date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | sdelgadoc/langchain | langchain~llms~chatglm.py | from typing import Any, List, Mapping, Optional
import requests
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
class ChatGLM(LLM):
"""ChatGLM LLM service.
Example:
.. code-block:: python
from langchain.llms import ChatGLM
endpoint_url = (
"http://127.0.0.1:8000"
)
ChatGLM_llm = ChatGLM(
endpoint_url=endpoint_url
)
"""
endpoint_url: str = "http://127.0.0.1:8000/"
"""Endpoint URL to use."""
model_kwargs: Optional[dict] = None
"""Key word arguments to pass to the model."""
max_token: int = 20000
"""Max token allowed to pass to the model."""
temperature: float = 0.1
"""LLM model temperature from 0 to 10."""
history: List[List] = []
"""History of the conversation"""
top_p: float = 0.7
"""Top P for nucleus sampling from 0 to 1"""
@property
def _llm_type(self) -> str:
return "chat_glm"
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"endpoint_url": self.endpoint_url},
**{"model_kwargs": _model_kwargs},
}
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to a ChatGLM LLM inference endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = chatglm_llm("Who are you?")
"""
_model_kwargs = self.model_kwargs or {}
# HTTP headers for authorization
headers = {"Content-Type": "application/json"}
payload = {
"prompt": prompt,
"temperature": self.temperature,
"history": self.history,
"max_length": self.max_token,
"top_p": self.top_p,
}
payload.update(_model_kwargs)
payload.update(kwargs)
# print("ChatGLM payload:", payload)
# call api
try:
response = requests.post(self.endpoint_url, headers=headers, json=payload)
except requests.exceptions.RequestException as e:
raise ValueError(f"Error raised by inference endpoint: {e}")
# print("ChatGLM resp:", response)
if response.status_code != 200:
raise ValueError(f"Failed with response: {response}")
try:
parsed_response = response.json()
# Check if response content does exists
if isinstance(parsed_response, dict):
content_keys = "response"
if content_keys in parsed_response:
text = parsed_response[content_keys]
else:
raise ValueError(f"No content in response : {parsed_response}")
else:
raise ValueError(f"Unexpected response type: {parsed_response}")
except requests.exceptions.JSONDecodeError as e:
raise ValueError(
f"Error raised during decoding response from inference endpoint: {e}."
f"\nResponse: {response.text}"
)
if stop is not None:
text = enforce_stop_tokens(text, stop)
self.history = self.history + [[None, parsed_response["response"]]]
return text
| [] |
2024-01-10 | sdelgadoc/langchain | langchain~embeddings~bedrock.py | import json
import os
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Extra, root_validator
from langchain.embeddings.base import Embeddings
class BedrockEmbeddings(BaseModel, Embeddings):
"""Bedrock embedding models.
To authenticate, the AWS client uses the following methods to
automatically load credentials:
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
If a specific credential profile should be used, you must pass
the name of the profile from the ~/.aws/credentials file that is to be used.
Make sure the credentials / roles used have the required policies to
access the Bedrock service.
"""
"""
Example:
.. code-block:: python
from langchain.bedrock_embeddings import BedrockEmbeddings
region_name ="us-east-1"
credentials_profile_name = "default"
model_id = "amazon.titan-e1t-medium"
be = BedrockEmbeddings(
credentials_profile_name=credentials_profile_name,
region_name=region_name,
model_id=model_id
)
"""
client: Any #: :meta private:
"""Bedrock client."""
region_name: Optional[str] = None
"""The aws region e.g., `us-west-2`. Fallsback to AWS_DEFAULT_REGION env variable
or region specified in ~/.aws/config in case it is not provided here.
"""
credentials_profile_name: Optional[str] = None
"""The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which
has either access keys or role information specified.
If not specified, the default credential profile or, if on an EC2 instance,
credentials from IMDS will be used.
See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
"""
model_id: str = "amazon.titan-e1t-medium"
"""Id of the model to call, e.g., amazon.titan-e1t-medium, this is
equivalent to the modelId property in the list-foundation-models api"""
model_kwargs: Optional[Dict] = None
"""Key word arguments to pass to the model."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that AWS credentials to and python package exists in environment."""
if values["client"] is not None:
return values
try:
import boto3
if values["credentials_profile_name"] is not None:
session = boto3.Session(profile_name=values["credentials_profile_name"])
else:
# use default credentials
session = boto3.Session()
client_params = {}
if values["region_name"]:
client_params["region_name"] = values["region_name"]
values["client"] = session.client("bedrock", **client_params)
except ImportError:
raise ModuleNotFoundError(
"Could not import boto3 python package. "
"Please install it with `pip install boto3`."
)
except Exception as e:
raise ValueError(
"Could not load credentials to authenticate with AWS client. "
"Please check that credentials in the specified "
"profile name are valid."
) from e
return values
def _embedding_func(self, text: str) -> List[float]:
"""Call out to Bedrock embedding endpoint."""
# replace newlines, which can negatively affect performance.
text = text.replace(os.linesep, " ")
_model_kwargs = self.model_kwargs or {}
input_body = {**_model_kwargs, "inputText": text}
body = json.dumps(input_body)
try:
response = self.client.invoke_model(
body=body,
modelId=self.model_id,
accept="application/json",
contentType="application/json",
)
response_body = json.loads(response.get("body").read())
return response_body.get("embedding")
except Exception as e:
raise ValueError(f"Error raised by inference endpoint: {e}")
def embed_documents(
self, texts: List[str], chunk_size: int = 1
) -> List[List[float]]:
"""Compute doc embeddings using a Bedrock model.
Args:
texts: The list of texts to embed.
chunk_size: Bedrock currently only allows single string
inputs, so chunk size is always 1. This input is here
only for compatibility with the embeddings interface.
Returns:
List of embeddings, one for each text.
"""
results = []
for text in texts:
response = self._embedding_func(text)
results.append(response)
return results
def embed_query(self, text: str) -> List[float]:
"""Compute query embeddings using a Bedrock model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self._embedding_func(text)
| [] |
2024-01-10 | sdelgadoc/langchain | langchain~llms~tongyi.py | from __future__ import annotations
import logging
from typing import Any, Callable, Dict, List, Optional
from pydantic import Field, root_validator
from requests.exceptions import HTTPError
from tenacity import (
before_sleep_log,
retry,
retry_if_exception_type,
stop_after_attempt,
wait_exponential,
)
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.llms.base import LLM
from langchain.schema import Generation, LLMResult
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
def _create_retry_decorator(llm: Tongyi) -> Callable[[Any], Any]:
min_seconds = 1
max_seconds = 4
# Wait 2^x * 1 second between each retry starting with
# 4 seconds, then up to 10 seconds, then 10 seconds afterwards
return retry(
reraise=True,
stop=stop_after_attempt(llm.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
retry=(retry_if_exception_type(HTTPError)),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
def generate_with_retry(llm: Tongyi, **kwargs: Any) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(llm)
@retry_decorator
def _generate_with_retry(**_kwargs: Any) -> Any:
resp = llm.client.call(**_kwargs)
if resp.status_code == 200:
return resp
elif resp.status_code in [400, 401]:
raise ValueError(
f"status_code: {resp.status_code} \n "
f"code: {resp.code} \n message: {resp.message}"
)
else:
raise HTTPError(
f"HTTP error occurred: status_code: {resp.status_code} \n "
f"code: {resp.code} \n message: {resp.message}"
)
return _generate_with_retry(**kwargs)
def stream_generate_with_retry(llm: Tongyi, **kwargs: Any) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(llm)
@retry_decorator
def _stream_generate_with_retry(**_kwargs: Any) -> Any:
stream_resps = []
resps = llm.client.call(**_kwargs)
for resp in resps:
if resp.status_code == 200:
stream_resps.append(resp)
elif resp.status_code in [400, 401]:
raise ValueError(
f"status_code: {resp.status_code} \n "
f"code: {resp.code} \n message: {resp.message}"
)
else:
raise HTTPError(
f"HTTP error occurred: status_code: {resp.status_code} \n "
f"code: {resp.code} \n message: {resp.message}"
)
return stream_resps
return _stream_generate_with_retry(**kwargs)
class Tongyi(LLM):
"""Tongyi Qwen large language models.
To use, you should have the ``dashscope`` python package installed, and the
environment variable ``DASHSCOPE_API_KEY`` set with your API key, or pass
it as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain.llms import Tongyi
Tongyi = tongyi()
"""
@property
def lc_secrets(self) -> Dict[str, str]:
return {"dashscope_api_key": "DASHSCOPE_API_KEY"}
@property
def lc_serializable(self) -> bool:
return True
client: Any #: :meta private:
model_name: str = "qwen-plus-v1"
"""Model name to use."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
top_p: float = 0.8
"""Total probability mass of tokens to consider at each step."""
dashscope_api_key: Optional[str] = None
"""Dashscope api key provide by alicloud."""
n: int = 1
"""How many completions to generate for each prompt."""
streaming: bool = False
"""Whether to stream the results or not."""
max_retries: int = 10
"""Maximum number of retries to make when generating."""
prefix_messages: List = Field(default_factory=list)
"""Series of messages for Chat input."""
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "tongyi"
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
get_from_dict_or_env(values, "dashscope_api_key", "DASHSCOPE_API_KEY")
try:
import dashscope
except ImportError:
raise ImportError(
"Could not import dashscope python package. "
"Please install it with `pip install dashscope`."
)
try:
values["client"] = dashscope.Generation
except AttributeError:
raise ValueError(
"`dashscope` has no `Generation` attribute, this is likely "
"due to an old version of the dashscope package. Try upgrading it "
"with `pip install --upgrade dashscope`."
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling OpenAI API."""
normal_params = {
"top_p": self.top_p,
}
return {**normal_params, **self.model_kwargs}
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to Tongyi's generate endpoint.
Args:
prompt: The prompt to pass into the model.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = tongyi("Tell me a joke.")
"""
params: Dict[str, Any] = {
**{"model": self.model_name},
**self._default_params,
**kwargs,
}
completion = generate_with_retry(
self,
prompt=prompt,
**params,
)
return completion["output"]["text"]
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
generations = []
params: Dict[str, Any] = {
**{"model": self.model_name},
**self._default_params,
**kwargs,
}
if self.streaming:
if len(prompts) > 1:
raise ValueError("Cannot stream results with multiple prompts.")
params["stream"] = True
for stream_resp in stream_generate_with_retry(
self, prompt=prompts[0], **params
):
generations.append(
[
Generation(
text=stream_resp["output"]["text"],
generation_info=dict(
finish_reason=stream_resp["output"]["finish_reason"],
),
)
]
)
else:
for prompt in prompts:
completion = generate_with_retry(
self,
prompt=prompt,
**params,
)
generations.append(
[
Generation(
text=completion["output"]["text"],
generation_info=dict(
finish_reason=completion["output"]["finish_reason"],
),
)
]
)
return LLMResult(generations=generations)
async def _agenerate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
raise NotImplementedError()
| [] |
2024-01-10 | sdelgadoc/langchain | langchain~embeddings~nlpcloud.py | from typing import Any, Dict, List
from pydantic import BaseModel, root_validator
from langchain.embeddings.base import Embeddings
from langchain.utils import get_from_dict_or_env
class NLPCloudEmbeddings(BaseModel, Embeddings):
"""NLP Cloud embedding models.
To use, you should have the nlpcloud python package installed
Example:
.. code-block:: python
from langchain.embeddings import NLPCloudEmbeddings
embeddings = NLPCloudEmbeddings()
"""
model_name: str # Define model_name as a class attribute
client: Any #: :meta private:
def __init__(
self, model_name: str = "paraphrase-multilingual-mpnet-base-v2", **kwargs: Any
) -> None:
super().__init__(model_name=model_name, **kwargs)
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
nlpcloud_api_key = get_from_dict_or_env(
values, "nlpcloud_api_key", "NLPCLOUD_API_KEY"
)
try:
import nlpcloud
values["client"] = nlpcloud.Client(
values["model_name"], nlpcloud_api_key, gpu=False, lang="en"
)
except ImportError:
raise ImportError(
"Could not import nlpcloud python package. "
"Please install it with `pip install nlpcloud`."
)
return values
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Embed a list of documents using NLP Cloud.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
return self.client.embeddings(texts)["embeddings"]
def embed_query(self, text: str) -> List[float]:
"""Embed a query using NLP Cloud.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self.client.embeddings([text])["embeddings"][0]
| [] |
2024-01-10 | sdelgadoc/langchain | langchain~tools~amadeus~flight_search.py | import logging
from datetime import datetime as dt
from typing import Dict, Optional, Type
from pydantic import BaseModel, Field
from langchain.callbacks.manager import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain.tools.amadeus.base import AmadeusBaseTool
logger = logging.getLogger(__name__)
class FlightSearchSchema(BaseModel):
originLocationCode: str = Field(
description=(
" The three letter International Air Transport "
" Association (IATA) Location Identifier for the "
" search's origin airport. "
)
)
destinationLocationCode: str = Field(
description=(
" The three letter International Air Transport "
" Association (IATA) Location Identifier for the "
" search's destination airport. "
)
)
departureDateTimeEarliest: str = Field(
description=(
" The earliest departure datetime from the origin airport "
" for the flight search in the following format: "
' "YYYY-MM-DDTHH:MM", where "T" separates the date and time '
' components. For example: "2023-06-09T10:30:00" represents '
" June 9th, 2023, at 10:30 AM. "
)
)
departureDateTimeLatest: str = Field(
description=(
" The latest departure datetime from the origin airport "
" for the flight search in the following format: "
' "YYYY-MM-DDTHH:MM", where "T" separates the date and time '
' components. For example: "2023-06-09T10:30:00" represents '
" June 9th, 2023, at 10:30 AM. "
)
)
page_number: int = Field(
default=1,
description="The specific page number of flight results to retrieve",
)
class AmadeusFlightSearch(AmadeusBaseTool):
name: str = "single_flight_search"
description: str = (
" Use this tool to search for a single flight between the origin and "
" destination airports at a departure between an earliest and "
" latest datetime. "
)
args_schema: Type[FlightSearchSchema] = FlightSearchSchema
def _run(
self,
originLocationCode: str,
destinationLocationCode: str,
departureDateTimeEarliest: str,
departureDateTimeLatest: str,
page_number: int = 1,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> list:
try:
from amadeus import ResponseError
except ImportError as e:
raise ImportError(
"Unable to import amadeus, please install with `pip install amadeus`."
) from e
RESULTS_PER_PAGE = 10
# Authenticate and retrieve a client
client = self.client
# Check that earliest and latest dates are in the same day
earliestDeparture = dt.strptime(departureDateTimeEarliest, "%Y-%m-%dT%H:%M:%S")
latestDeparture = dt.strptime(departureDateTimeLatest, "%Y-%m-%dT%H:%M:%S")
if earliestDeparture.date() != latestDeparture.date():
logger.error(
" Error: Earliest and latest departure dates need to be the "
" same date. If you're trying to search for round-trip "
" flights, call this function for the outbound flight first, "
" and then call again for the return flight. "
)
return [None]
# Collect all results from the API
try:
response = client.shopping.flight_offers_search.get(
originLocationCode=originLocationCode,
destinationLocationCode=destinationLocationCode,
departureDate=latestDeparture.strftime("%Y-%m-%d"),
adults=1,
)
except ResponseError as error:
print(error)
# Generate output dictionary
output = []
for offer in response.data:
itinerary: Dict = {}
itinerary["price"] = {}
itinerary["price"]["total"] = offer["price"]["total"]
currency = offer["price"]["currency"]
currency = response.result["dictionaries"]["currencies"][currency]
itinerary["price"]["currency"] = {}
itinerary["price"]["currency"] = currency
segments = []
for segment in offer["itineraries"][0]["segments"]:
flight = {}
flight["departure"] = segment["departure"]
flight["arrival"] = segment["arrival"]
flight["flightNumber"] = segment["number"]
carrier = segment["carrierCode"]
carrier = response.result["dictionaries"]["carriers"][carrier]
flight["carrier"] = carrier
segments.append(flight)
itinerary["segments"] = []
itinerary["segments"] = segments
output.append(itinerary)
# Filter out flights after latest departure time
for index, offer in enumerate(output):
offerDeparture = dt.strptime(
offer["segments"][0]["departure"]["at"], "%Y-%m-%dT%H:%M:%S"
)
if offerDeparture > latestDeparture:
output.pop(index)
# Return the paginated results
startIndex = (page_number - 1) * RESULTS_PER_PAGE
endIndex = startIndex + RESULTS_PER_PAGE
return output[startIndex:endIndex]
async def _arun(
self,
originLocationCode: str,
destinationLocationCode: str,
departureDateTimeEarliest: str,
departureDateTimeLatest: str,
page_number: int = 1,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> list:
raise NotImplementedError(f"The tool {self.name} does not support async yet.")
| [] |
2024-01-10 | sdelgadoc/langchain | langchain~embeddings~mlflow_gateway.py | from __future__ import annotations
from typing import Any, Iterator, List, Optional
from pydantic import BaseModel
from langchain.embeddings.base import Embeddings
def _chunk(texts: List[str], size: int) -> Iterator[List[str]]:
for i in range(0, len(texts), size):
yield texts[i : i + size]
class MlflowAIGatewayEmbeddings(Embeddings, BaseModel):
"""MLflow AI Gateway Embeddings APIs."""
route: str
"""The route to use for the MLflow AI Gateway API."""
gateway_uri: Optional[str] = None
"""The URI for the MLflow AI Gateway API."""
def __init__(self, **kwargs: Any):
try:
import mlflow.gateway
except ImportError as e:
raise ImportError(
"Could not import `mlflow.gateway` module. "
"Please install it with `pip install mlflow[gateway]`."
) from e
super().__init__(**kwargs)
if self.gateway_uri:
mlflow.gateway.set_gateway_uri(self.gateway_uri)
def _query(self, texts: List[str]) -> List[List[float]]:
try:
import mlflow.gateway
except ImportError as e:
raise ImportError(
"Could not import `mlflow.gateway` module. "
"Please install it with `pip install mlflow[gateway]`."
) from e
embeddings = []
for txt in _chunk(texts, 20):
resp = mlflow.gateway.query(self.route, data={"text": txt})
embeddings.append(resp["embeddings"])
return embeddings
def embed_documents(self, texts: List[str]) -> List[List[float]]:
return self._query(texts)
def embed_query(self, text: str) -> List[float]:
return self._query([text])[0]
| [] |
2024-01-10 | sdelgadoc/langchain | langchain~experimental~autonomous_agents~baby_agi~baby_agi.py | """BabyAGI agent."""
from collections import deque
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Field
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.chains.base import Chain
from langchain.experimental.autonomous_agents.baby_agi.task_creation import (
TaskCreationChain,
)
from langchain.experimental.autonomous_agents.baby_agi.task_execution import (
TaskExecutionChain,
)
from langchain.experimental.autonomous_agents.baby_agi.task_prioritization import (
TaskPrioritizationChain,
)
from langchain.schema.language_model import BaseLanguageModel
from langchain.vectorstores.base import VectorStore
class BabyAGI(Chain, BaseModel):
"""Controller model for the BabyAGI agent."""
task_list: deque = Field(default_factory=deque)
task_creation_chain: Chain = Field(...)
task_prioritization_chain: Chain = Field(...)
execution_chain: Chain = Field(...)
task_id_counter: int = Field(1)
vectorstore: VectorStore = Field(init=False)
max_iterations: Optional[int] = None
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
def add_task(self, task: Dict) -> None:
self.task_list.append(task)
def print_task_list(self) -> None:
print("\033[95m\033[1m" + "\n*****TASK LIST*****\n" + "\033[0m\033[0m")
for t in self.task_list:
print(str(t["task_id"]) + ": " + t["task_name"])
def print_next_task(self, task: Dict) -> None:
print("\033[92m\033[1m" + "\n*****NEXT TASK*****\n" + "\033[0m\033[0m")
print(str(task["task_id"]) + ": " + task["task_name"])
def print_task_result(self, result: str) -> None:
print("\033[93m\033[1m" + "\n*****TASK RESULT*****\n" + "\033[0m\033[0m")
print(result)
@property
def input_keys(self) -> List[str]:
return ["objective"]
@property
def output_keys(self) -> List[str]:
return []
def get_next_task(
self, result: str, task_description: str, objective: str, **kwargs: Any
) -> List[Dict]:
"""Get the next task."""
task_names = [t["task_name"] for t in self.task_list]
incomplete_tasks = ", ".join(task_names)
response = self.task_creation_chain.run(
result=result,
task_description=task_description,
incomplete_tasks=incomplete_tasks,
objective=objective,
**kwargs,
)
new_tasks = response.split("\n")
return [
{"task_name": task_name} for task_name in new_tasks if task_name.strip()
]
def prioritize_tasks(
self, this_task_id: int, objective: str, **kwargs: Any
) -> List[Dict]:
"""Prioritize tasks."""
task_names = [t["task_name"] for t in list(self.task_list)]
next_task_id = int(this_task_id) + 1
response = self.task_prioritization_chain.run(
task_names=", ".join(task_names),
next_task_id=str(next_task_id),
objective=objective,
**kwargs,
)
new_tasks = response.split("\n")
prioritized_task_list = []
for task_string in new_tasks:
if not task_string.strip():
continue
task_parts = task_string.strip().split(".", 1)
if len(task_parts) == 2:
task_id = task_parts[0].strip()
task_name = task_parts[1].strip()
prioritized_task_list.append(
{"task_id": task_id, "task_name": task_name}
)
return prioritized_task_list
def _get_top_tasks(self, query: str, k: int) -> List[str]:
"""Get the top k tasks based on the query."""
results = self.vectorstore.similarity_search(query, k=k)
if not results:
return []
return [str(item.metadata["task"]) for item in results]
def execute_task(self, objective: str, task: str, k: int = 5, **kwargs: Any) -> str:
"""Execute a task."""
context = self._get_top_tasks(query=objective, k=k)
return self.execution_chain.run(
objective=objective, context="\n".join(context), task=task, **kwargs
)
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
"""Run the agent."""
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
objective = inputs["objective"]
first_task = inputs.get("first_task", "Make a todo list")
self.add_task({"task_id": 1, "task_name": first_task})
num_iters = 0
while True:
if self.task_list:
self.print_task_list()
# Step 1: Pull the first task
task = self.task_list.popleft()
self.print_next_task(task)
# Step 2: Execute the task
result = self.execute_task(
objective, task["task_name"], callbacks=_run_manager.get_child()
)
this_task_id = int(task["task_id"])
self.print_task_result(result)
# Step 3: Store the result in Pinecone
result_id = f"result_{task['task_id']}"
self.vectorstore.add_texts(
texts=[result],
metadatas=[{"task": task["task_name"]}],
ids=[result_id],
)
# Step 4: Create new tasks and reprioritize task list
new_tasks = self.get_next_task(
result,
task["task_name"],
objective,
callbacks=_run_manager.get_child(),
)
for new_task in new_tasks:
self.task_id_counter += 1
new_task.update({"task_id": self.task_id_counter})
self.add_task(new_task)
self.task_list = deque(
self.prioritize_tasks(
this_task_id, objective, callbacks=_run_manager.get_child()
)
)
num_iters += 1
if self.max_iterations is not None and num_iters == self.max_iterations:
print(
"\033[91m\033[1m" + "\n*****TASK ENDING*****\n" + "\033[0m\033[0m"
)
break
return {}
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
vectorstore: VectorStore,
verbose: bool = False,
task_execution_chain: Optional[Chain] = None,
**kwargs: Dict[str, Any],
) -> "BabyAGI":
"""Initialize the BabyAGI Controller."""
task_creation_chain = TaskCreationChain.from_llm(llm, verbose=verbose)
task_prioritization_chain = TaskPrioritizationChain.from_llm(
llm, verbose=verbose
)
if task_execution_chain is None:
execution_chain: Chain = TaskExecutionChain.from_llm(llm, verbose=verbose)
else:
execution_chain = task_execution_chain
return cls(
task_creation_chain=task_creation_chain,
task_prioritization_chain=task_prioritization_chain,
execution_chain=execution_chain,
vectorstore=vectorstore,
**kwargs,
)
| [] |
2024-01-10 | sdelgadoc/langchain | langchain~llms~mlflow_ai_gateway.py | from __future__ import annotations
from typing import Any, Dict, List, Mapping, Optional
from pydantic import BaseModel, Extra
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
class Params(BaseModel, extra=Extra.allow):
"""Parameters for the MLflow AI Gateway LLM."""
temperature: float = 0.0
candidate_count: int = 1
"""The number of candidates to return."""
stop: Optional[List[str]] = None
max_tokens: Optional[int] = None
class MlflowAIGateway(LLM):
"""The MLflow AI Gateway models."""
route: str
gateway_uri: Optional[str] = None
params: Optional[Params] = None
def __init__(self, **kwargs: Any):
try:
import mlflow.gateway
except ImportError as e:
raise ImportError(
"Could not import `mlflow.gateway` module. "
"Please install it with `pip install mlflow[gateway]`."
) from e
super().__init__(**kwargs)
if self.gateway_uri:
mlflow.gateway.set_gateway_uri(self.gateway_uri)
@property
def _default_params(self) -> Dict[str, Any]:
params: Dict[str, Any] = {
"gateway_uri": self.gateway_uri,
"route": self.route,
**(self.params.dict() if self.params else {}),
}
return params
@property
def _identifying_params(self) -> Mapping[str, Any]:
return self._default_params
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
try:
import mlflow.gateway
except ImportError as e:
raise ImportError(
"Could not import `mlflow.gateway` module. "
"Please install it with `pip install mlflow[gateway]`."
) from e
data: Dict[str, Any] = {
"prompt": prompt,
**(self.params.dict() if self.params else {}),
}
if s := (stop or (self.params.stop if self.params else None)):
data["stop"] = s
resp = mlflow.gateway.query(self.route, data=data)
return resp["candidates"][0]["text"]
@property
def _llm_type(self) -> str:
return "mlflow-ai-gateway"
| [] |
2024-01-10 | sdelgadoc/langchain | langchain~llms~petals.py | import logging
from typing import Any, Dict, List, Mapping, Optional
from pydantic import Extra, Field, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
class Petals(LLM):
"""Petals Bloom models.
To use, you should have the ``petals`` python package installed, and the
environment variable ``HUGGINGFACE_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain.llms import petals
petals = Petals()
"""
client: Any
"""The client to use for the API calls."""
tokenizer: Any
"""The tokenizer to use for the API calls."""
model_name: str = "bigscience/bloom-petals"
"""The model to use."""
temperature: float = 0.7
"""What sampling temperature to use"""
max_new_tokens: int = 256
"""The maximum number of new tokens to generate in the completion."""
top_p: float = 0.9
"""The cumulative probability for top-p sampling."""
top_k: Optional[int] = None
"""The number of highest probability vocabulary tokens
to keep for top-k-filtering."""
do_sample: bool = True
"""Whether or not to use sampling; use greedy decoding otherwise."""
max_length: Optional[int] = None
"""The maximum length of the sequence to be generated."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call
not explicitly specified."""
huggingface_api_key: Optional[str] = None
class Config:
"""Configuration for this pydantic config."""
extra = Extra.forbid
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.values()}
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
logger.warning(
f"""WARNING! {field_name} is not default parameter.
{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values["model_kwargs"] = extra
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
huggingface_api_key = get_from_dict_or_env(
values, "huggingface_api_key", "HUGGINGFACE_API_KEY"
)
try:
from petals import DistributedBloomForCausalLM
from transformers import BloomTokenizerFast
model_name = values["model_name"]
values["tokenizer"] = BloomTokenizerFast.from_pretrained(model_name)
values["client"] = DistributedBloomForCausalLM.from_pretrained(model_name)
values["huggingface_api_key"] = huggingface_api_key
except ImportError:
raise ValueError(
"Could not import transformers or petals python package."
"Please install with `pip install -U transformers petals`."
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling Petals API."""
normal_params = {
"temperature": self.temperature,
"max_new_tokens": self.max_new_tokens,
"top_p": self.top_p,
"top_k": self.top_k,
"do_sample": self.do_sample,
"max_length": self.max_length,
}
return {**normal_params, **self.model_kwargs}
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {**{"model_name": self.model_name}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "petals"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call the Petals API."""
params = self._default_params
params = {**params, **kwargs}
inputs = self.tokenizer(prompt, return_tensors="pt")["input_ids"]
outputs = self.client.generate(inputs, **params)
text = self.tokenizer.decode(outputs[0])
if stop is not None:
# I believe this is required since the stop tokens
# are not enforced by the model parameters
text = enforce_stop_tokens(text, stop)
return text
| [] |
2024-01-10 | sdelgadoc/langchain | langchain~tools~amadeus~closest_airport.py | from typing import Optional, Type
from pydantic import BaseModel, Field
from langchain.callbacks.manager import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain.chains import LLMChain
from langchain.chat_models import ChatOpenAI
from langchain.tools.amadeus.base import AmadeusBaseTool
class ClosestAirportSchema(BaseModel):
location: str = Field(
description=(
" The location for which you would like to find the nearest airport "
" along with optional details such as country, state, region, or "
" province, allowing for easy processing and identification of "
" the closest airport. Examples of the format are the following:\n"
" Cali, Colombia\n "
" Lincoln, Nebraska, United States\n"
" New York, United States\n"
" Sydney, New South Wales, Australia\n"
" Rome, Lazio, Italy\n"
" Toronto, Ontario, Canada\n"
)
)
class AmadeusClosestAirport(AmadeusBaseTool):
name: str = "closest_airport"
description: str = (
"Use this tool to find the closest airport to a particular location."
)
args_schema: Type[ClosestAirportSchema] = ClosestAirportSchema
def _run(
self,
location: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
template = (
" What is the nearest airport to {location}? Please respond with the "
" airport's International Air Transport Association (IATA) Location "
' Identifier in the following JSON format. JSON: "iataCode": "IATA '
' Location Identifier" '
)
llm = ChatOpenAI(temperature=0)
llm_chain = LLMChain.from_string(llm=llm, template=template)
output = llm_chain.run(location=location)
return output
async def _arun(
self,
location: str,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
raise NotImplementedError(f"The tool {self.name} does not support async yet.")
| [
" What is the nearest airport to {location}? Please respond with the airport's International Air Transport Association (IATA) Location Identifier in the following JSON format. JSON: \"iataCode\": \"IATA Location Identifier\" "
] |
2024-01-10 | sdelgadoc/langchain | langchain~llms~replicate.py | import logging
from typing import Any, Dict, List, Mapping, Optional
from pydantic import Extra, Field, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
class Replicate(LLM):
"""Replicate models.
To use, you should have the ``replicate`` python package installed,
and the environment variable ``REPLICATE_API_TOKEN`` set with your API token.
You can find your token here: https://replicate.com/account
The model param is required, but any other model parameters can also
be passed in with the format input={model_param: value, ...}
Example:
.. code-block:: python
from langchain.llms import Replicate
replicate = Replicate(model="stability-ai/stable-diffusion: \
27b93a2413e7f36cd83da926f365628\
0b2931564ff050bf9575f1fdf9bcd7478",
input={"image_dimensions": "512x512"})
"""
model: str
input: Dict[str, Any] = Field(default_factory=dict)
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
replicate_api_token: Optional[str] = None
class Config:
"""Configuration for this pydantic config."""
extra = Extra.forbid
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.values()}
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
logger.warning(
f"""{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values["model_kwargs"] = extra
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
replicate_api_token = get_from_dict_or_env(
values, "REPLICATE_API_TOKEN", "REPLICATE_API_TOKEN"
)
values["replicate_api_token"] = replicate_api_token
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
"model": self.model,
**{"model_kwargs": self.model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of model."""
return "replicate"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call to replicate endpoint."""
try:
import replicate as replicate_python
except ImportError:
raise ImportError(
"Could not import replicate python package. "
"Please install it with `pip install replicate`."
)
# get the model and version
model_str, version_str = self.model.split(":")
model = replicate_python.models.get(model_str)
version = model.versions.get(version_str)
# sort through the openapi schema to get the name of the first input
input_properties = sorted(
version.openapi_schema["components"]["schemas"]["Input"][
"properties"
].items(),
key=lambda item: item[1].get("x-order", 0),
)
first_input_name = input_properties[0][0]
inputs = {first_input_name: prompt, **self.input}
iterator = replicate_python.run(self.model, input={**inputs, **kwargs})
return "".join([output for output in iterator])
| [] |
2024-01-10 | sdelgadoc/langchain | langchain~llms~nlpcloud.py | from typing import Any, Dict, List, Mapping, Optional
from pydantic import Extra, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.utils import get_from_dict_or_env
class NLPCloud(LLM):
"""NLPCloud large language models.
To use, you should have the ``nlpcloud`` python package installed, and the
environment variable ``NLPCLOUD_API_KEY`` set with your API key.
Example:
.. code-block:: python
from langchain.llms import NLPCloud
nlpcloud = NLPCloud(model="gpt-neox-20b")
"""
client: Any #: :meta private:
model_name: str = "finetuned-gpt-neox-20b"
"""Model name to use."""
temperature: float = 0.7
"""What sampling temperature to use."""
min_length: int = 1
"""The minimum number of tokens to generate in the completion."""
max_length: int = 256
"""The maximum number of tokens to generate in the completion."""
length_no_input: bool = True
"""Whether min_length and max_length should include the length of the input."""
remove_input: bool = True
"""Remove input text from API response"""
remove_end_sequence: bool = True
"""Whether or not to remove the end sequence token."""
bad_words: List[str] = []
"""List of tokens not allowed to be generated."""
top_p: int = 1
"""Total probability mass of tokens to consider at each step."""
top_k: int = 50
"""The number of highest probability tokens to keep for top-k filtering."""
repetition_penalty: float = 1.0
"""Penalizes repeated tokens. 1.0 means no penalty."""
length_penalty: float = 1.0
"""Exponential penalty to the length."""
do_sample: bool = True
"""Whether to use sampling (True) or greedy decoding."""
num_beams: int = 1
"""Number of beams for beam search."""
early_stopping: bool = False
"""Whether to stop beam search at num_beams sentences."""
num_return_sequences: int = 1
"""How many completions to generate for each prompt."""
nlpcloud_api_key: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
nlpcloud_api_key = get_from_dict_or_env(
values, "nlpcloud_api_key", "NLPCLOUD_API_KEY"
)
try:
import nlpcloud
values["client"] = nlpcloud.Client(
values["model_name"], nlpcloud_api_key, gpu=True, lang="en"
)
except ImportError:
raise ImportError(
"Could not import nlpcloud python package. "
"Please install it with `pip install nlpcloud`."
)
return values
@property
def _default_params(self) -> Mapping[str, Any]:
"""Get the default parameters for calling NLPCloud API."""
return {
"temperature": self.temperature,
"min_length": self.min_length,
"max_length": self.max_length,
"length_no_input": self.length_no_input,
"remove_input": self.remove_input,
"remove_end_sequence": self.remove_end_sequence,
"bad_words": self.bad_words,
"top_p": self.top_p,
"top_k": self.top_k,
"repetition_penalty": self.repetition_penalty,
"length_penalty": self.length_penalty,
"do_sample": self.do_sample,
"num_beams": self.num_beams,
"early_stopping": self.early_stopping,
"num_return_sequences": self.num_return_sequences,
}
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {**{"model_name": self.model_name}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "nlpcloud"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to NLPCloud's create endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Not supported by this interface (pass in init method)
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = nlpcloud("Tell me a joke.")
"""
if stop and len(stop) > 1:
raise ValueError(
"NLPCloud only supports a single stop sequence per generation."
"Pass in a list of length 1."
)
elif stop and len(stop) == 1:
end_sequence = stop[0]
else:
end_sequence = None
params = {**self._default_params, **kwargs}
response = self.client.generation(prompt, end_sequence=end_sequence, **params)
return response["generated_text"]
| [] |
2024-01-10 | sdelgadoc/langchain | langchain~vectorstores~deeplake.py | """Wrapper around Activeloop Deep Lake."""
from __future__ import annotations
import logging
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
try:
import deeplake
from deeplake.core.fast_forwarding import version_compare
from deeplake.core.vectorstore import DeepLakeVectorStore
_DEEPLAKE_INSTALLED = True
except ImportError:
_DEEPLAKE_INSTALLED = False
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.vectorstores.base import VectorStore
from langchain.vectorstores.utils import maximal_marginal_relevance
logger = logging.getLogger(__name__)
class DeepLake(VectorStore):
"""Wrapper around Deep Lake, a data lake for deep learning applications.
We integrated deeplake's similarity search and filtering for fast prototyping,
Now, it supports Tensor Query Language (TQL) for production use cases
over billion rows.
Why Deep Lake?
- Not only stores embeddings, but also the original data with version control.
- Serverless, doesn't require another service and can be used with major
cloud providers (S3, GCS, etc.)
- More than just a multi-modal vector store. You can use the dataset
to fine-tune your own LLM models.
To use, you should have the ``deeplake`` python package installed.
Example:
.. code-block:: python
from langchain.vectorstores import DeepLake
from langchain.embeddings.openai import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
vectorstore = DeepLake("langchain_store", embeddings.embed_query)
"""
_LANGCHAIN_DEFAULT_DEEPLAKE_PATH = "./deeplake/"
def __init__(
self,
dataset_path: str = _LANGCHAIN_DEFAULT_DEEPLAKE_PATH,
token: Optional[str] = None,
embedding_function: Optional[Embeddings] = None,
read_only: bool = False,
ingestion_batch_size: int = 1000,
num_workers: int = 0,
verbose: bool = True,
exec_option: Optional[str] = None,
**kwargs: Any,
) -> None:
"""Creates an empty DeepLakeVectorStore or loads an existing one.
The DeepLakeVectorStore is located at the specified ``path``.
Examples:
>>> # Create a vector store with default tensors
>>> deeplake_vectorstore = DeepLake(
... path = <path_for_storing_Data>,
... )
>>>
>>> # Create a vector store in the Deep Lake Managed Tensor Database
>>> data = DeepLake(
... path = "hub://org_id/dataset_name",
... exec_option = "tensor_db",
... )
Args:
dataset_path (str): Path to existing dataset or where to create
a new one. Defaults to _LANGCHAIN_DEFAULT_DEEPLAKE_PATH.
token (str, optional): Activeloop token, for fetching credentials
to the dataset at path if it is a Deep Lake dataset.
Tokens are normally autogenerated. Optional.
embedding_function (str, optional): Function to convert
either documents or query. Optional.
read_only (bool): Open dataset in read-only mode. Default is False.
ingestion_batch_size (int): During data ingestion, data is divided
into batches. Batch size is the size of each batch.
Default is 1000.
num_workers (int): Number of workers to use during data ingestion.
Default is 0.
verbose (bool): Print dataset summary after each operation.
Default is True.
exec_option (str, optional): DeepLakeVectorStore supports 3 ways to perform
searching - "python", "compute_engine", "tensor_db" and auto.
Default is None.
- ``auto``- Selects the best execution method based on the storage
location of the Vector Store. It is the default option.
- ``python`` - Pure-python implementation that runs on the client.
WARNING: using this with big datasets can lead to memory
issues. Data can be stored anywhere.
- ``compute_engine`` - C++ implementation of the Deep Lake Compute
Engine that runs on the client. Can be used for any data stored in
or connected to Deep Lake. Not for in-memory or local datasets.
- ``tensor_db`` - Hosted Managed Tensor Database that is
responsible for storage and query execution. Only for data stored in
the Deep Lake Managed Database. Use runtime = {"db_engine": True}
during dataset creation.
**kwargs: Other optional keyword arguments.
Raises:
ValueError: If some condition is not met.
"""
self.ingestion_batch_size = ingestion_batch_size
self.num_workers = num_workers
self.verbose = verbose
if _DEEPLAKE_INSTALLED is False:
raise ValueError(
"Could not import deeplake python package. "
"Please install it with `pip install deeplake[enterprise]`."
)
if (
kwargs.get("runtime") == {"tensor_db": True}
and version_compare(deeplake.__version__, "3.6.7") == -1
):
raise ValueError(
"To use tensor_db option you need to update deeplake to `3.6.7`. "
f"Currently installed deeplake version is {deeplake.__version__}. "
)
self.dataset_path = dataset_path
self.vectorstore = DeepLakeVectorStore(
path=self.dataset_path,
embedding_function=embedding_function,
read_only=read_only,
token=token,
exec_option=exec_option,
verbose=verbose,
**kwargs,
)
self._embedding_function = embedding_function
self._id_tensor_name = "ids" if "ids" in self.vectorstore.tensors() else "id"
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Examples:
>>> ids = deeplake_vectorstore.add_texts(
... texts = <list_of_texts>,
... metadatas = <list_of_metadata_jsons>,
... ids = <list_of_ids>,
... )
Args:
texts (Iterable[str]): Texts to add to the vectorstore.
metadatas (Optional[List[dict]], optional): Optional list of metadatas.
ids (Optional[List[str]], optional): Optional list of IDs.
embedding_function (Optional[Embeddings], optional): Embedding function
to use to convert the text into embeddings.
**kwargs (Any): Any additional keyword arguments passed is not supported
by this method.
Returns:
List[str]: List of IDs of the added texts.
"""
if kwargs:
unsupported_items = "`, `".join(set(kwargs.keys()))
raise TypeError(
f"`{unsupported_items}` is/are not a valid argument to add_text method"
)
kwargs = {}
if ids:
if self._id_tensor_name == "ids": # for backwards compatibility
kwargs["ids"] = ids
else:
kwargs["id"] = ids
if metadatas is None:
metadatas = [{}] * len(list(texts))
if not isinstance(texts, list):
texts = list(texts)
if texts is None:
raise ValueError("`texts` parameter shouldn't be None.")
elif len(texts) == 0:
raise ValueError("`texts` parameter shouldn't be empty.")
return self.vectorstore.add(
text=texts,
metadata=metadatas,
embedding_data=texts,
embedding_tensor="embedding",
embedding_function=self._embedding_function.embed_documents, # type: ignore
return_ids=True,
**kwargs,
)
def _search_tql(
self,
tql_query: Optional[str],
exec_option: Optional[str] = None,
**kwargs: Any,
) -> List[Document]:
"""Function for performing tql_search.
Args:
tql_query (str): TQL Query string for direct evaluation.
Available only for `compute_engine` and `tensor_db`.
exec_option (str, optional): Supports 3 ways to search.
Could be "python", "compute_engine" or "tensor_db". Default is "python".
- ``python`` - Pure-python implementation for the client.
WARNING: not recommended for big datasets due to potential memory
issues.
- ``compute_engine`` - C++ implementation of Deep Lake Compute
Engine for the client. Not for in-memory or local datasets.
- ``tensor_db`` - Hosted Managed Tensor Database for storage
and query execution. Only for data in Deep Lake Managed Database.
Use runtime = {"db_engine": True} during dataset creation.
return_score (bool): Return score with document. Default is False.
Returns:
Tuple[List[Document], List[Tuple[Document, float]]] - A tuple of two lists.
The first list contains Documents, and the second list contains
tuples of Document and float score.
Raises:
ValueError: If return_score is True but some condition is not met.
"""
result = self.vectorstore.search(
query=tql_query,
exec_option=exec_option,
)
metadatas = result["metadata"]
texts = result["text"]
docs = [
Document(
page_content=text,
metadata=metadata,
)
for text, metadata in zip(texts, metadatas)
]
if kwargs:
unsupported_argument = next(iter(kwargs))
if kwargs[unsupported_argument] is not False:
raise ValueError(
f"specifying {unsupported_argument} is "
"not supported with tql search."
)
return docs
def _search(
self,
query: Optional[str] = None,
embedding: Optional[Union[List[float], np.ndarray]] = None,
embedding_function: Optional[Callable] = None,
k: int = 4,
distance_metric: str = "L2",
use_maximal_marginal_relevance: bool = False,
fetch_k: Optional[int] = 20,
filter: Optional[Union[Dict, Callable]] = None,
return_score: bool = False,
exec_option: Optional[str] = None,
**kwargs: Any,
) -> Any[List[Document], List[Tuple[Document, float]]]:
"""
Return docs similar to query.
Args:
query (str, optional): Text to look up similar docs.
embedding (Union[List[float], np.ndarray], optional): Query's embedding.
embedding_function (Callable, optional): Function to convert `query`
into embedding.
k (int): Number of Documents to return.
distance_metric (str): `L2` for Euclidean, `L1` for Nuclear, `max`
for L-infinity distance, `cos` for cosine similarity, 'dot' for dot
product.
filter (Union[Dict, Callable], optional): Additional filter prior
to the embedding search.
- ``Dict`` - Key-value search on tensors of htype json, on an
AND basis (a sample must satisfy all key-value filters to be True)
Dict = {"tensor_name_1": {"key": value},
"tensor_name_2": {"key": value}}
- ``Function`` - Any function compatible with `deeplake.filter`.
use_maximal_marginal_relevance (bool): Use maximal marginal relevance.
fetch_k (int): Number of Documents for MMR algorithm.
return_score (bool): Return the score.
exec_option (str, optional): Supports 3 ways to perform searching.
Could be "python", "compute_engine" or "tensor_db".
- ``python`` - Pure-python implementation for the client.
WARNING: not recommended for big datasets.
- ``compute_engine`` - C++ implementation of Deep Lake Compute
Engine for the client. Not for in-memory or local datasets.
- ``tensor_db`` - Hosted Managed Tensor Database for storage
and query execution. Only for data in Deep Lake Managed Database.
Use runtime = {"db_engine": True} during dataset creation.
**kwargs: Additional keyword arguments.
Returns:
List of Documents by the specified distance metric,
if return_score True, return a tuple of (Document, score)
Raises:
ValueError: if both `embedding` and `embedding_function` are not specified.
"""
if kwargs.get("tql_query"):
return self._search_tql(
tql_query=kwargs["tql_query"],
exec_option=exec_option,
return_score=return_score,
embedding=embedding,
embedding_function=embedding_function,
distance_metric=distance_metric,
use_maximal_marginal_relevance=use_maximal_marginal_relevance,
filter=filter,
)
if embedding_function:
if isinstance(embedding_function, Embeddings):
_embedding_function = embedding_function.embed_query
else:
_embedding_function = embedding_function
elif self._embedding_function:
_embedding_function = self._embedding_function.embed_query
else:
_embedding_function = None
if embedding is None:
if _embedding_function is None:
raise ValueError(
"Either `embedding` or `embedding_function` needs to be"
" specified."
)
embedding = _embedding_function(query) if query else None
if isinstance(embedding, list):
embedding = np.array(embedding, dtype=np.float32)
if len(embedding.shape) > 1:
embedding = embedding[0]
result = self.vectorstore.search(
embedding=embedding,
k=fetch_k if use_maximal_marginal_relevance else k,
distance_metric=distance_metric,
filter=filter,
exec_option=exec_option,
return_tensors=["embedding", "metadata", "text"],
)
scores = result["score"]
embeddings = result["embedding"]
metadatas = result["metadata"]
texts = result["text"]
if use_maximal_marginal_relevance:
lambda_mult = kwargs.get("lambda_mult", 0.5)
indices = maximal_marginal_relevance( # type: ignore
embedding, # type: ignore
embeddings,
k=min(k, len(texts)),
lambda_mult=lambda_mult,
)
scores = [scores[i] for i in indices]
texts = [texts[i] for i in indices]
metadatas = [metadatas[i] for i in indices]
docs = [
Document(
page_content=text,
metadata=metadata,
)
for text, metadata in zip(texts, metadatas)
]
if return_score:
return [(doc, score) for doc, score in zip(docs, scores)]
return docs
def similarity_search(
self,
query: str,
k: int = 4,
**kwargs: Any,
) -> List[Document]:
"""
Return docs most similar to query.
Examples:
>>> # Search using an embedding
>>> data = vector_store.similarity_search(
... query=<your_query>,
... k=<num_items>,
... exec_option=<preferred_exec_option>,
... )
>>> # Run tql search:
>>> data = vector_store.similarity_search(
... query=None,
... tql_query="SELECT * WHERE id == <id>",
... exec_option="compute_engine",
... )
Args:
k (int): Number of Documents to return. Defaults to 4.
query (str): Text to look up similar documents.
**kwargs: Additional keyword arguments include:
embedding (Callable): Embedding function to use. Defaults to None.
distance_metric (str): 'L2' for Euclidean, 'L1' for Nuclear, 'max'
for L-infinity, 'cos' for cosine, 'dot' for dot product.
Defaults to 'L2'.
filter (Union[Dict, Callable], optional): Additional filter
before embedding search.
- Dict: Key-value search on tensors of htype json,
(sample must satisfy all key-value filters)
Dict = {"tensor_1": {"key": value}, "tensor_2": {"key": value}}
- Function: Compatible with `deeplake.filter`.
Defaults to None.
exec_option (str): Supports 3 ways to perform searching.
'python', 'compute_engine', or 'tensor_db'. Defaults to 'python'.
- 'python': Pure-python implementation for the client.
WARNING: not recommended for big datasets.
- 'compute_engine': C++ implementation of the Compute Engine for
the client. Not for in-memory or local datasets.
- 'tensor_db': Managed Tensor Database for storage and query.
Only for data in Deep Lake Managed Database.
Use `runtime = {"db_engine": True}` during dataset creation.
Returns:
List[Document]: List of Documents most similar to the query vector.
"""
return self._search(
query=query,
k=k,
use_maximal_marginal_relevance=False,
return_score=False,
**kwargs,
)
def similarity_search_by_vector(
self,
embedding: Union[List[float], np.ndarray],
k: int = 4,
**kwargs: Any,
) -> List[Document]:
"""
Return docs most similar to embedding vector.
Examples:
>>> # Search using an embedding
>>> data = vector_store.similarity_search_by_vector(
... embedding=<your_embedding>,
... k=<num_items_to_return>,
... exec_option=<preferred_exec_option>,
... )
Args:
embedding (Union[List[float], np.ndarray]):
Embedding to find similar docs.
k (int): Number of Documents to return. Defaults to 4.
**kwargs: Additional keyword arguments including:
filter (Union[Dict, Callable], optional):
Additional filter before embedding search.
- ``Dict`` - Key-value search on tensors of htype json. True
if all key-value filters are satisfied.
Dict = {"tensor_name_1": {"key": value},
"tensor_name_2": {"key": value}}
- ``Function`` - Any function compatible with
`deeplake.filter`.
Defaults to None.
exec_option (str): Options for search execution include
"python", "compute_engine", or "tensor_db". Defaults to
"python".
- "python" - Pure-python implementation running on the client.
Can be used for data stored anywhere. WARNING: using this
option with big datasets is discouraged due to potential
memory issues.
- "compute_engine" - Performant C++ implementation of the Deep
Lake Compute Engine. Runs on the client and can be used for
any data stored in or connected to Deep Lake. It cannot be
used with in-memory or local datasets.
- "tensor_db" - Performant, fully-hosted Managed Tensor Database.
Responsible for storage and query execution. Only available
for data stored in the Deep Lake Managed Database.
To store datasets in this database, specify
`runtime = {"db_engine": True}` during dataset creation.
distance_metric (str): `L2` for Euclidean, `L1` for Nuclear,
`max` for L-infinity distance, `cos` for cosine similarity,
'dot' for dot product. Defaults to `L2`.
Returns:
List[Document]: List of Documents most similar to the query vector.
"""
return self._search(
embedding=embedding,
k=k,
use_maximal_marginal_relevance=False,
return_score=False,
**kwargs,
)
def similarity_search_with_score(
self,
query: str,
k: int = 4,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""
Run similarity search with Deep Lake with distance returned.
Examples:
>>> data = vector_store.similarity_search_with_score(
... query=<your_query>,
... embedding=<your_embedding_function>
... k=<number_of_items_to_return>,
... exec_option=<preferred_exec_option>,
... )
Args:
query (str): Query text to search for.
k (int): Number of results to return. Defaults to 4.
**kwargs: Additional keyword arguments. Some of these arguments are:
distance_metric: `L2` for Euclidean, `L1` for Nuclear, `max` L-infinity
distance, `cos` for cosine similarity, 'dot' for dot product.
Defaults to `L2`.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
embedding_function (Callable): Embedding function to use. Defaults
to None.
exec_option (str): DeepLakeVectorStore supports 3 ways to perform
searching. It could be either "python", "compute_engine" or
"tensor_db". Defaults to "python".
- "python" - Pure-python implementation running on the client.
Can be used for data stored anywhere. WARNING: using this
option with big datasets is discouraged due to potential
memory issues.
- "compute_engine" - Performant C++ implementation of the Deep
Lake Compute Engine. Runs on the client and can be used for
any data stored in or connected to Deep Lake. It cannot be used
with in-memory or local datasets.
- "tensor_db" - Performant, fully-hosted Managed Tensor Database.
Responsible for storage and query execution. Only available for
data stored in the Deep Lake Managed Database. To store datasets
in this database, specify `runtime = {"db_engine": True}`
during dataset creation.
Returns:
List[Tuple[Document, float]]: List of documents most similar to the query
text with distance in float."""
return self._search(
query=query,
k=k,
return_score=True,
**kwargs,
)
def max_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
exec_option: Optional[str] = None,
**kwargs: Any,
) -> List[Document]:
"""
Return docs selected using the maximal marginal relevance. Maximal marginal
relevance optimizes for similarity to query AND diversity among selected docs.
Examples:
>>> data = vector_store.max_marginal_relevance_search_by_vector(
... embedding=<your_embedding>,
... fetch_k=<elements_to_fetch_before_mmr_search>,
... k=<number_of_items_to_return>,
... exec_option=<preferred_exec_option>,
... )
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch for MMR algorithm.
lambda_mult: Number between 0 and 1 determining the degree of diversity.
0 corresponds to max diversity and 1 to min diversity. Defaults to 0.5.
exec_option (str): DeepLakeVectorStore supports 3 ways for searching.
Could be "python", "compute_engine" or "tensor_db". Defaults to
"python".
- "python" - Pure-python implementation running on the client.
Can be used for data stored anywhere. WARNING: using this
option with big datasets is discouraged due to potential
memory issues.
- "compute_engine" - Performant C++ implementation of the Deep
Lake Compute Engine. Runs on the client and can be used for
any data stored in or connected to Deep Lake. It cannot be used
with in-memory or local datasets.
- "tensor_db" - Performant, fully-hosted Managed Tensor Database.
Responsible for storage and query execution. Only available for
data stored in the Deep Lake Managed Database. To store datasets
in this database, specify `runtime = {"db_engine": True}`
during dataset creation.
**kwargs: Additional keyword arguments.
Returns:
List[Documents] - A list of documents.
"""
return self._search(
embedding=embedding,
k=k,
fetch_k=fetch_k,
use_maximal_marginal_relevance=True,
lambda_mult=lambda_mult,
exec_option=exec_option,
**kwargs,
)
def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
exec_option: Optional[str] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Examples:
>>> # Search using an embedding
>>> data = vector_store.max_marginal_relevance_search(
... query = <query_to_search>,
... embedding_function = <embedding_function_for_query>,
... k = <number_of_items_to_return>,
... exec_option = <preferred_exec_option>,
... )
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents for MMR algorithm.
lambda_mult: Value between 0 and 1. 0 corresponds
to maximum diversity and 1 to minimum.
Defaults to 0.5.
exec_option (str): Supports 3 ways to perform searching.
- "python" - Pure-python implementation running on the client.
Can be used for data stored anywhere. WARNING: using this
option with big datasets is discouraged due to potential
memory issues.
- "compute_engine" - Performant C++ implementation of the Deep
Lake Compute Engine. Runs on the client and can be used for
any data stored in or connected to Deep Lake. It cannot be
used with in-memory or local datasets.
- "tensor_db" - Performant, fully-hosted Managed Tensor Database.
Responsible for storage and query execution. Only available
for data stored in the Deep Lake Managed Database. To store
datasets in this database, specify
`runtime = {"db_engine": True}` during dataset creation.
**kwargs: Additional keyword arguments
Returns:
List of Documents selected by maximal marginal relevance.
Raises:
ValueError: when MRR search is on but embedding function is
not specified.
"""
embedding_function = kwargs.get("embedding") or self._embedding_function
if embedding_function is None:
raise ValueError(
"For MMR search, you must specify an embedding function on"
" `creation` or during add call."
)
return self._search(
query=query,
k=k,
fetch_k=fetch_k,
use_maximal_marginal_relevance=True,
lambda_mult=lambda_mult,
exec_option=exec_option,
embedding_function=embedding_function, # type: ignore
**kwargs,
)
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Optional[Embeddings] = None,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
dataset_path: str = _LANGCHAIN_DEFAULT_DEEPLAKE_PATH,
embedding_function: Optional[Embeddings] = None,
**kwargs: Any,
) -> DeepLake:
"""Create a Deep Lake dataset from a raw documents.
If a dataset_path is specified, the dataset will be persisted in that location,
otherwise by default at `./deeplake`
Examples:
>>> # Search using an embedding
>>> vector_store = DeepLake.from_texts(
... texts = <the_texts_that_you_want_to_embed>,
... embedding_function = <embedding_function_for_query>,
... k = <number_of_items_to_return>,
... exec_option = <preferred_exec_option>,
... )
Args:
dataset_path (str): - The full path to the dataset. Can be:
- Deep Lake cloud path of the form ``hub://username/dataset_name``.
To write to Deep Lake cloud datasets,
ensure that you are logged in to Deep Lake
(use 'activeloop login' from command line)
- AWS S3 path of the form ``s3://bucketname/path/to/dataset``.
Credentials are required in either the environment
- Google Cloud Storage path of the form
``gcs://bucketname/path/to/dataset`` Credentials are required
in either the environment
- Local file system path of the form ``./path/to/dataset`` or
``~/path/to/dataset`` or ``path/to/dataset``.
- In-memory path of the form ``mem://path/to/dataset`` which doesn't
save the dataset, but keeps it in memory instead.
Should be used only for testing as it does not persist.
texts (List[Document]): List of documents to add.
embedding (Optional[Embeddings]): Embedding function. Defaults to None.
Note, in other places, it is called embedding_function.
metadatas (Optional[List[dict]]): List of metadatas. Defaults to None.
ids (Optional[List[str]]): List of document IDs. Defaults to None.
**kwargs: Additional keyword arguments.
Returns:
DeepLake: Deep Lake dataset.
Raises:
ValueError: If 'embedding' is provided in kwargs. This is deprecated,
please use `embedding_function` instead.
"""
if embedding:
raise ValueError(
"using embedding as embedidng_functions is deprecated. "
"Please use `embedding_function` instead."
)
deeplake_dataset = cls(
dataset_path=dataset_path, embedding_function=embedding_function, **kwargs
)
deeplake_dataset.add_texts(
texts=texts,
metadatas=metadatas,
ids=ids,
)
return deeplake_dataset
def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> bool:
"""Delete the entities in the dataset.
Args:
ids (Optional[List[str]], optional): The document_ids to delete.
Defaults to None.
**kwargs: Other keyword arguments that subclasses might use.
- filter (Optional[Dict[str, str]], optional): The filter to delete by.
- delete_all (Optional[bool], optional): Whether to drop the dataset.
Returns:
bool: Whether the delete operation was successful.
"""
filter = kwargs.get("filter")
delete_all = kwargs.get("delete_all")
self.vectorstore.delete(ids=ids, filter=filter, delete_all=delete_all)
return True
@classmethod
def force_delete_by_path(cls, path: str) -> None:
"""Force delete dataset by path.
Args:
path (str): path of the dataset to delete.
Raises:
ValueError: if deeplake is not installed.
"""
try:
import deeplake
except ImportError:
raise ValueError(
"Could not import deeplake python package. "
"Please install it with `pip install deeplake`."
)
deeplake.delete(path, large_ok=True, force=True)
def delete_dataset(self) -> None:
"""Delete the collection."""
self.delete(delete_all=True)
def ds(self) -> Any:
logger.warning(
"this method is deprecated and will be removed, "
"better to use `db.vectorstore.dataset` instead."
)
return self.vectorstore.dataset
| [] |
2024-01-10 | sdelgadoc/langchain | langchain~llms~baseten.py | import logging
from typing import Any, Dict, List, Mapping, Optional
from pydantic import Field
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
logger = logging.getLogger(__name__)
class Baseten(LLM):
"""Baseten models.
To use, you should have the ``baseten`` python package installed,
and run ``baseten.login()`` with your Baseten API key.
The required ``model`` param can be either a model id or model
version id. Using a model version ID will result in
slightly faster invocation.
Any other model parameters can also
be passed in with the format input={model_param: value, ...}
The Baseten model must accept a dictionary of input with the key
"prompt" and return a dictionary with a key "data" which maps
to a list of response strings.
Example:
.. code-block:: python
from langchain.llms import Baseten
my_model = Baseten(model="MODEL_ID")
output = my_model("prompt")
"""
model: str
input: Dict[str, Any] = Field(default_factory=dict)
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
**{"model_kwargs": self.model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of model."""
return "baseten"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call to Baseten deployed model endpoint."""
try:
import baseten
except ImportError as exc:
raise ImportError(
"Could not import Baseten Python package. "
"Please install it with `pip install baseten`."
) from exc
# get the model and version
try:
model = baseten.deployed_model_version_id(self.model)
response = model.predict({"prompt": prompt})
except baseten.common.core.ApiError:
model = baseten.deployed_model_id(self.model)
response = model.predict({"prompt": prompt})
return "".join(response)
| [] |
2024-01-10 | sdelgadoc/langchain | langchain~llms~llamacpp.py | import logging
from typing import Any, Dict, Generator, List, Optional
from pydantic import Field, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
logger = logging.getLogger(__name__)
class LlamaCpp(LLM):
"""llama.cpp model.
To use, you should have the llama-cpp-python library installed, and provide the
path to the Llama model as a named parameter to the constructor.
Check out: https://github.com/abetlen/llama-cpp-python
Example:
.. code-block:: python
from langchain.llms import LlamaCpp
llm = LlamaCpp(model_path="/path/to/llama/model")
"""
client: Any #: :meta private:
model_path: str
"""The path to the Llama model file."""
lora_base: Optional[str] = None
"""The path to the Llama LoRA base model."""
lora_path: Optional[str] = None
"""The path to the Llama LoRA. If None, no LoRa is loaded."""
n_ctx: int = Field(512, alias="n_ctx")
"""Token context window."""
n_parts: int = Field(-1, alias="n_parts")
"""Number of parts to split the model into.
If -1, the number of parts is automatically determined."""
seed: int = Field(-1, alias="seed")
"""Seed. If -1, a random seed is used."""
f16_kv: bool = Field(True, alias="f16_kv")
"""Use half-precision for key/value cache."""
logits_all: bool = Field(False, alias="logits_all")
"""Return logits for all tokens, not just the last token."""
vocab_only: bool = Field(False, alias="vocab_only")
"""Only load the vocabulary, no weights."""
use_mlock: bool = Field(False, alias="use_mlock")
"""Force system to keep model in RAM."""
n_threads: Optional[int] = Field(None, alias="n_threads")
"""Number of threads to use.
If None, the number of threads is automatically determined."""
n_batch: Optional[int] = Field(8, alias="n_batch")
"""Number of tokens to process in parallel.
Should be a number between 1 and n_ctx."""
n_gpu_layers: Optional[int] = Field(None, alias="n_gpu_layers")
"""Number of layers to be loaded into gpu memory. Default None."""
suffix: Optional[str] = Field(None)
"""A suffix to append to the generated text. If None, no suffix is appended."""
max_tokens: Optional[int] = 256
"""The maximum number of tokens to generate."""
temperature: Optional[float] = 0.8
"""The temperature to use for sampling."""
top_p: Optional[float] = 0.95
"""The top-p value to use for sampling."""
logprobs: Optional[int] = Field(None)
"""The number of logprobs to return. If None, no logprobs are returned."""
echo: Optional[bool] = False
"""Whether to echo the prompt."""
stop: Optional[List[str]] = []
"""A list of strings to stop generation when encountered."""
repeat_penalty: Optional[float] = 1.1
"""The penalty to apply to repeated tokens."""
top_k: Optional[int] = 40
"""The top-k value to use for sampling."""
last_n_tokens_size: Optional[int] = 64
"""The number of tokens to look back when applying the repeat_penalty."""
use_mmap: Optional[bool] = True
"""Whether to keep the model loaded in RAM"""
streaming: bool = True
"""Whether to stream the results, token by token."""
verbose: bool = True
"""Print verbose output to stderr."""
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that llama-cpp-python library is installed."""
model_path = values["model_path"]
model_param_names = [
"lora_path",
"lora_base",
"n_ctx",
"n_parts",
"seed",
"f16_kv",
"logits_all",
"vocab_only",
"use_mlock",
"n_threads",
"n_batch",
"use_mmap",
"last_n_tokens_size",
"verbose",
]
model_params = {k: values[k] for k in model_param_names}
# For backwards compatibility, only include if non-null.
if values["n_gpu_layers"] is not None:
model_params["n_gpu_layers"] = values["n_gpu_layers"]
try:
from llama_cpp import Llama
values["client"] = Llama(model_path, **model_params)
except ImportError:
raise ImportError(
"Could not import llama-cpp-python library. "
"Please install the llama-cpp-python library to "
"use this embedding model: pip install llama-cpp-python"
)
except Exception as e:
raise ValueError(
f"Could not load Llama model from path: {model_path}. "
f"Received error {e}"
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling llama_cpp."""
return {
"suffix": self.suffix,
"max_tokens": self.max_tokens,
"temperature": self.temperature,
"top_p": self.top_p,
"logprobs": self.logprobs,
"echo": self.echo,
"stop_sequences": self.stop, # key here is convention among LLM classes
"repeat_penalty": self.repeat_penalty,
"top_k": self.top_k,
}
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {**{"model_path": self.model_path}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "llamacpp"
def _get_parameters(self, stop: Optional[List[str]] = None) -> Dict[str, Any]:
"""
Performs sanity check, preparing parameters in format needed by llama_cpp.
Args:
stop (Optional[List[str]]): List of stop sequences for llama_cpp.
Returns:
Dictionary containing the combined parameters.
"""
# Raise error if stop sequences are in both input and default params
if self.stop and stop is not None:
raise ValueError("`stop` found in both the input and default params.")
params = self._default_params
# llama_cpp expects the "stop" key not this, so we remove it:
params.pop("stop_sequences")
# then sets it as configured, or default to an empty list:
params["stop"] = self.stop or stop or []
return params
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call the Llama model and return the output.
Args:
prompt: The prompt to use for generation.
stop: A list of strings to stop generation when encountered.
Returns:
The generated text.
Example:
.. code-block:: python
from langchain.llms import LlamaCpp
llm = LlamaCpp(model_path="/path/to/local/llama/model.bin")
llm("This is a prompt.")
"""
if self.streaming:
# If streaming is enabled, we use the stream
# method that yields as they are generated
# and return the combined strings from the first choices's text:
combined_text_output = ""
for token in self.stream(prompt=prompt, stop=stop, run_manager=run_manager):
combined_text_output += token["choices"][0]["text"]
return combined_text_output
else:
params = self._get_parameters(stop)
params = {**params, **kwargs}
result = self.client(prompt=prompt, **params)
return result["choices"][0]["text"]
def stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> Generator[Dict, None, None]:
"""Yields results objects as they are generated in real time.
BETA: this is a beta feature while we figure out the right abstraction.
Once that happens, this interface could change.
It also calls the callback manager's on_llm_new_token event with
similar parameters to the OpenAI LLM class method of the same name.
Args:
prompt: The prompts to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
A generator representing the stream of tokens being generated.
Yields:
A dictionary like objects containing a string token and metadata.
See llama-cpp-python docs and below for more.
Example:
.. code-block:: python
from langchain.llms import LlamaCpp
llm = LlamaCpp(
model_path="/path/to/local/model.bin",
temperature = 0.5
)
for chunk in llm.stream("Ask 'Hi, how are you?' like a pirate:'",
stop=["'","\n"]):
result = chunk["choices"][0]
print(result["text"], end='', flush=True)
"""
params = self._get_parameters(stop)
result = self.client(prompt=prompt, stream=True, **params)
for chunk in result:
token = chunk["choices"][0]["text"]
log_probs = chunk["choices"][0].get("logprobs", None)
if run_manager:
run_manager.on_llm_new_token(
token=token, verbose=self.verbose, log_probs=log_probs
)
yield chunk
def get_num_tokens(self, text: str) -> int:
tokenized_text = self.client.tokenize(text.encode("utf-8"))
return len(tokenized_text)
| [] |
2024-01-10 | kkulesz/university-projects | master~sem2~USD~USD_acer_acerx~acer~acaerax_policies.py | from stable_baselines3.common.policies import (
ActorCriticCnnPolicy,
ActorCriticPolicy,
MultiInputActorCriticPolicy,
)
from functools import partial
from typing import Tuple
import numpy as np
import torch as th
from stable_baselines3.common.distributions import (
DiagGaussianDistribution,
Distribution,
)
from stable_baselines3.common.type_aliases import Schedule
import torch.nn as nn
class ACERAXActorCriticPolicy(ActorCriticPolicy):
def __init__(
self,
*args,
**kwargs
):
super(ACERAXActorCriticPolicy, self).__init__(*args, **kwargs)
dispersion_net = []
dispersion_net.append(nn.Linear(self.features_dim, 64))
dispersion_net.append(nn.Tanh())
dispersion_net.append(nn.Linear(64, self.action_dist.action_dim))
self.latent_dim_dp = 64
self.dispersion_net = nn.Sequential(*dispersion_net).to(self.device)
def _build(self, lr_schedule: Schedule) -> None:
"""
Create the networks and the optimizer.
:param lr_schedule: Learning rate schedule
lr_schedule(1) is the initial learning rate
"""
self._build_mlp_extractor()
latent_dim_pi = self.mlp_extractor.latent_dim_pi
if isinstance(self.action_dist, DiagGaussianDistribution):
self.action_net, self.log_std = self.action_dist.proba_distribution_net(
latent_dim=latent_dim_pi, log_std_init=self.log_std_init
)
else:
raise NotImplementedError(f"Unsupported distribution '{self.action_dist}'.")
self.value_net = nn.Linear(self.mlp_extractor.latent_dim_vf, 1)
# Init weights: use orthogonal initialization
# with small initial weight for the output
if self.ortho_init:
# TODO: check for features_extractor
# Values from stable-baselines.
# features_extractor/mlp values are
# originally from openai/baselines (default gains/init_scales).
module_gains = {
self.features_extractor: np.sqrt(2),
self.mlp_extractor: np.sqrt(2),
self.action_net: 0.01,
self.value_net: 1,
}
for module, gain in module_gains.items():
module.apply(partial(self.init_weights, gain=gain))
# Setup optimizer with initial learning rate
self.optimizer = self.optimizer_class(self.parameters(), lr=lr_schedule(1), **self.optimizer_kwargs)
def forward(self, obs: th.Tensor, deterministic: bool = False) -> Tuple[th.Tensor, th.Tensor, th.Tensor]:
"""
Forward pass in all the networks (actor and critic)
:param obs: Observation
:param deterministic: Whether to sample or use deterministic actions
:return: action, value and log probability of the action
"""
# Preprocess the observation if needed
features = self.extract_features(obs)
latent_pi, latent_vf = self.mlp_extractor(features)
# Evaluate the values for the given observations
values = self.value_net(latent_vf)
dispersion = self.dispersion_net(features)
distribution = self._get_action_dist_from_latent(latent_pi, dispersion)
actions = distribution.get_actions(deterministic=deterministic)
log_prob = distribution.log_prob(actions)
return actions, values, log_prob
def _get_action_dist_from_latent(self, latent_pi: th.Tensor, dispersion: th.Tensor) -> Distribution:
"""
Retrieve action distribution given the latent codes.
:param latent_pi: Latent code for the actor
:return: Action distribution
"""
mean_actions = self.action_net(latent_pi)
if isinstance(self.action_dist, DiagGaussianDistribution):
return self.action_dist.proba_distribution(mean_actions, dispersion)
else:
raise ValueError("Invalid action distribution")
def evaluate_actions(self, obs: th.Tensor, actions: th.Tensor) -> Tuple[th.Tensor, th.Tensor, th.Tensor]:
"""
Evaluate actions according to the current policy,
given the observations.
:param obs:
:param actions:
:return: estimated value, log likelihood of taking those actions
and entropy of the action distribution.
"""
# Preprocess the observation if needed
features = self.extract_features(obs)
latent_pi, latent_vf = self.mlp_extractor(features)
dispersion = self.dispersion_net(features)
distribution = self._get_action_dist_from_latent(latent_pi, dispersion)
log_prob = distribution.log_prob(actions)
values = self.value_net(latent_vf)
return values, log_prob, distribution.entropy()
def get_distribution(self, obs: th.Tensor) -> Distribution:
"""
Get the current policy distribution given the observations.
:param obs:
:return: the action distribution.
"""
features = self.extract_features(obs)
latent_pi = self.mlp_extractor.forward_actor(features)
dispersion = self.dispersion_net(features)
return self._get_action_dist_from_latent(latent_pi, dispersion)
def get_dispersion(self, obs: th.Tensor) -> th.Tensor:
features = self.extract_features(obs)
dispersion = self.dispersion_net(features)
return dispersion
| [] |
2024-01-10 | rocket-science-ch/rocketrosti | rrosti~snippets~abstract_snippet_database.py | # Copyright (c) 2023 Rocket Science AG, Switzerland
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Base class for vector databases of snippets.
A snippet database is a collection of snippets that can be searched by vector similarity.
This is used to find the most relevant snippets for a query.
"""
from __future__ import annotations
import asyncio
from abc import ABC, abstractmethod
from typing import Iterable
from loguru import logger
from rrosti.llm_api import openai_api
from rrosti.snippets.snippet import Snippet
from rrosti.utils.token_count import token_count
def _count_tokens(question: str, snippets: list[str]) -> int:
"""Returns the token count of the query that would be sent to the LLM."""
# FIXME: This estimates the token count in some way by procuding something that looks a bit like a prompt.
# It used to be sillier, using an old jinja template that was not used anywhere else.
# Now it tries to emulate that somewhat for compatibility.
sysmsg = "\n\n".join(f"## Extract #{i+1}:\n\n{snippet}" for i, snippet in enumerate(snippets))
return token_count(sysmsg) + token_count(question) + 80
class AbstractSnippetDatabase(ABC):
"""
Base class for vector databases of snippets.
A snippet database is a collection of snippets that can be searched by vector similarity.
This is used to find the most relevant snippets for a query.
"""
@abstractmethod
def has_id(self, id: str) -> bool:
"""Check if the id (text hash) is in the database."""
@abstractmethod
def __contains__(self, snippet: Snippet) -> bool:
"""Check if the snippet is in the database."""
@abstractmethod
def get_by_id(self, id: str) -> Snippet | None:
"""Get a snippet by its id (text hash)."""
@abstractmethod
def add_snippet(self, snippet: Snippet) -> Snippet:
"""
Add a snippet to the database.
Returns the snippet that was added, which may be different from the input snippet
if the snippet was already in the database.
"""
@abstractmethod
def add_snippets(self, snippets: Iterable[Snippet]) -> None:
"""Add multiple snippets to the database. Upserts."""
@abstractmethod
async def find_nearest_raw(
self, openai_provider: openai_api.OpenAIApiProvider, query: Snippet, n_results: int
) -> list[Snippet]:
"""
Find the nearest raw (unmerged) snippets to the query.
Returns a list of snippets sorted by distance.
"""
async def find_nearest_merged(
self, openai_provider: openai_api.OpenAIApiProvider, query: Snippet, max_tokens: int, n_merge_candidates: int
) -> list[Snippet]:
"""
query: The query to find snippets for. Used to determine the total length of snippets.
max_tokens: The maximum number of tokens to return.
n_merge_candidates: How many raw snippets to fetch and consider for merging.
Returns (snippets: (1..M,)), where M <= n_merge_candidates.
Distances are not returned because they are not well defined for merged snippets.
"""
await query.async_ensure_embedding(openai_provider)
candidates = await self.find_nearest_raw(openai_provider, query, n_merge_candidates)
while True:
merged = Snippet.merge_list(candidates)
logger.debug("{} candidates merged into {} snippets", len(candidates), len(merged))
# FIXME: This looks fishy: I think only qa_token_count is used from that module, and it relies on a prompt
# template that we don't use otherwise.
token_count = await asyncio.to_thread(_count_tokens, query.text, [m.text for m in merged])
if token_count > max_tokens:
# too long
logger.debug("{} snippets is too long: {} > {}", len(candidates), token_count, max_tokens)
candidates = candidates[:-1]
else:
logger.info("Going to query with {} snippets, {} after merging", len(candidates), len(merged))
logger.debug("Tokens: {} <= {}", token_count, max_tokens)
return merged
| [] |
2024-01-10 | rocket-science-ch/rocketrosti | tests~chat~state_machine~test_execution.py | # Copyright (c) 2023 Rocket Science AG, Switzerland
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from typing import Iterable
import aioresponses
import numpy as np
import pytest
import requests_mock as rmock
from pytest_mock import MockerFixture
from rrosti.chat import chat_session
from rrosti.chat.state_machine import execution, interpolable, parsing
from rrosti.llm_api import openai_api
from rrosti.servers.websocket_query_server import Frontend
from rrosti.snippets.abstract_snippet_database import AbstractSnippetDatabase
from rrosti.snippets.snippet import Snippet
def make_state_machine_runner(mocker: MockerFixture, code: str) -> execution.StateMachineRunner:
# openai_provider = openai_api_direct.DirectOpenAIApiProvider()
openai_provider = mocker.Mock(spec=openai_api.OpenAIApiProvider)
llm: chat_session.LLM = mocker.Mock(spec=chat_session.LLM, openai_provider=openai_provider)
llm.chat_completion.return_value = chat_session.Message( # type: ignore[attr-defined]
role="assistant",
importance=chat_session.Importance.LOW,
ttl=None,
text="Hello, world from chat completion!",
)
frontend: Frontend = mocker.Mock(spec=Frontend)
sm = parsing.loads_from_yaml(code)
return execution.StateMachineRunner(sm=sm, llm=llm, frontend=frontend, openai_provider=openai_provider)
@pytest.fixture(autouse=True)
def _no_network(requests_mock: rmock.Mocker) -> Iterable[None]:
with aioresponses.aioresponses():
yield
async def test_execution(mocker: MockerFixture) -> None:
runner = make_state_machine_runner(
mocker,
r"""
agents:
- name: agent1
states:
- name: initial
action:
- goto: some_state
- name: some_state
conditions:
- default:
action:
- end
""",
)
await runner.run()
SIMPLE_PYTHON_YAML = r"""
agents:
- name: agent1
states:
- name: initial
action:
- goto: some_state
- name: some_state
conditions:
- default:
action:
- message: "The Python output is:\n\n{python()}\n\nEnd of Python output."
- end
"""
async def test_python(mocker: MockerFixture) -> None:
runner = make_state_machine_runner(mocker, SIMPLE_PYTHON_YAML)
# The LLM gives us a message with a Python block.
runner._llm.chat_completion.return_value = chat_session.Message( # type: ignore[attr-defined]
role="assistant",
importance=chat_session.Importance.LOW,
ttl=1,
text=f"""
Let's execute some Python.
$$$python
print(f"Hello, world! 1+1={1+1}")
$$$
How does that look?
""".strip(),
)
await runner.run()
# The runner should have appended a message with the Python output.
msg = runner._agent_runners[0]._session.messages[-1]
assert msg.role == "user"
assert msg.text == "The Python output is:\n\nHello, world! 1+1=2\n\nEnd of Python output."
def assert_no_python_block_found_error(runner: execution.StateMachineRunner) -> None:
assert len(runner._agent_runners[0]._session.messages) == 2
msg = runner._agent_runners[0]._session.messages[-1].text
assert msg.startswith("The Python output is:\n\n")
assert msg.endswith("\n\nEnd of Python output.")
msg = msg.removeprefix("The Python output is:\n\n").removesuffix("\n\nEnd of Python output.")
assert msg.startswith("Your message did not contain a Python code block.")
async def test_no_python_block(mocker: MockerFixture) -> None:
runner = make_state_machine_runner(mocker, SIMPLE_PYTHON_YAML)
# The LLM gives us a message with a Python block.
runner._llm.chat_completion.return_value = chat_session.Message( # type: ignore[attr-defined]
role="assistant",
ttl=None,
importance=chat_session.Importance.LOW,
text="""
Let's execute some Python.
How does that look?
""".strip(),
)
await runner.run()
assert_no_python_block_found_error(runner)
async def test_unterminated_python_block(mocker: MockerFixture) -> None:
runner = make_state_machine_runner(mocker, SIMPLE_PYTHON_YAML)
# The LLM gives us a message with a Python block.
runner._llm.chat_completion.return_value = chat_session.Message( # type: ignore[attr-defined]
role="assistant",
importance=chat_session.Importance.LOW,
ttl=1,
text="""
Let's execute some Python.
$$$python
print("foo")
How does that look?
""".strip(),
)
await runner.run()
assert_no_python_block_found_error(runner)
SIMPLE_RTFM_YAML = r"""
agents:
- name: agent1
states:
- name: initial
action:
- system_message: "Here are your instructions."
- goto: some_state
- name: some_state
conditions:
- default:
action:
- message: "The RTFM output is:\n\n{rtfm()}\n\nEnd of RTFM output."
- end
"""
@pytest.fixture
def snippet_db(mocker: MockerFixture) -> AbstractSnippetDatabase:
mocker.patch("rrosti.chat.state_machine.interpolable._get_database")
db = mocker.Mock(spec=AbstractSnippetDatabase)
db.find_nearest_merged.return_value = [
Snippet("hello, world", source_filename="source1", start_offset=0, page_start=1, page_end=1),
Snippet("goodbye, cruel world", source_filename="source2", start_offset=10, page_start=10, page_end=10),
]
interpolable._get_database.return_value = db # type: ignore[attr-defined]
return db # type: ignore[no-any-return]
def assert_rtfm_output_is(runner: execution.StateMachineRunner, expected: str) -> None:
assert len(runner._agent_runners[0]._session.messages) == 3
msg = runner._agent_runners[0]._session.messages[-1].text
assert msg.startswith("The RTFM output is:\n\n")
assert msg.endswith("\n\nEnd of RTFM output.")
msg = msg.removeprefix("The RTFM output is:\n\n").removesuffix("\n\nEnd of RTFM output.")
assert msg == expected
async def mock_query_embedding_async(snippets: list[str]) -> openai_api.EmbeddingResponse:
return openai_api.EmbeddingResponse(
snippets=snippets[:],
embeddings=np.ones((len(snippets), 123), dtype=np.float32),
model="fake_model",
prompt_tokens=42,
)
async def test_rtfm_stub(mocker: MockerFixture, snippet_db: AbstractSnippetDatabase) -> None:
runner = make_state_machine_runner(mocker, SIMPLE_RTFM_YAML)
# The LLM gives us a message with an rtfm block
runner._llm.chat_completion.return_value = chat_session.Message( # type: ignore[attr-defined]
role="assistant",
ttl=1,
importance=chat_session.Importance.LOW,
text="""
Let's execute some RTFM.
$$$rtfm
Something here.
$$$
How does that look?
""".strip(),
)
EXPECTED_OUTPUT = """
Extract #123:
hello, world
-----
Extract #124:
goodbye, cruel world
""".strip()
runner._openai_provider.acreate_embedding.side_effect = mock_query_embedding_async # type: ignore[attr-defined]
runner.frontend.handle_rtfm_output.return_value = 123 # type: ignore[attr-defined]
await runner.run()
assert_rtfm_output_is(runner, EXPECTED_OUTPUT)
COMPLEX_YAML = """
config:
model: model_global
agents:
- name: agent_1
states:
- name: initial
model: model_initial
action:
- message: x
- goto: main
- name: main
conditions:
- if:
contains: 'cond1'
then:
model: model_cond1
action:
- message: 'user_input'
- if:
contains: 'cond3'
then:
model: model_cond3
action:
- send:
to: agent_2
next_state: main
- default:
model: model_default
action:
- message: "user_input"
- name: agent_2
states:
- name: initial
model: model_initial2
action:
- message: "agent 2 initial"
- goto: main
- name: main
conditions:
- if:
contains: 'cond6'
then:
model: model_cond6
action:
- send:
to: agent_1
next_state: main
- default:
model: model_default2
action:
- message: "x"
""".strip()
async def test_complex_cond1(mocker: MockerFixture) -> None:
# 1. message "x" is appended
# 2. LLM is executed with model_initial; output "cond1"
# 3. message "user_input" is appended
runner = make_state_machine_runner(mocker, COMPLEX_YAML)
runner._llm.chat_completion.return_value = chat_session.Message( # type: ignore[attr-defined]
role="assistant",
importance=chat_session.Importance.LOW,
ttl=None,
text="cond1: something",
)
await runner.step()
runner1 = runner._agent_runners[0]
runner2 = runner._agent_runners[1]
assert len(runner._agent_runners[0]._session.messages) == 3
assert runner1._session.messages[0].role == "user"
assert runner1._session.messages[0].text == "x"
assert runner1._session.messages[1].role == "assistant"
assert runner1._session.messages[1].role == "assistant"
assert runner1._session.messages[1].text == "cond1: something"
assert runner1._session.messages[2].role == "user"
assert runner1._session.messages[2].text == "user_input"
assert runner._llm.chat_completion.call_count == 1 # type: ignore[attr-defined]
assert runner._llm.chat_completion.call_args[1]["model"] == "model_initial" # type: ignore[attr-defined]
# execute one more step. The LLM should be invoked with model_cond1.
runner._llm.chat_completion.reset_mock() # type: ignore[attr-defined]
runner._llm.chat_completion.return_value = chat_session.Message( # type: ignore[attr-defined]
role="assistant",
importance=chat_session.Importance.LOW,
ttl=None,
text="cond1: something else",
)
await runner.step()
# No messages should have been sent to the other agent
assert len(runner2._session.messages) == 0
assert len(runner1._session.messages) == 5
assert runner1._session.messages[3].role == "assistant"
assert runner1._session.messages[3].text == "cond1: something else"
assert runner1._session.messages[4].role == "user"
assert runner1._session.messages[4].text == "user_input"
assert runner._llm.chat_completion.call_count == 1 # type: ignore[attr-defined]
assert runner._llm.chat_completion.call_args[1]["model"] == "model_cond1" # type: ignore[attr-defined]
async def test_complex_cond3(mocker: MockerFixture) -> None:
# 1. message "x" is appended
# 2. LLM is executed with model_initial; output "cond3"
# 3. LLM output is sent to agent_2
runner = make_state_machine_runner(mocker, COMPLEX_YAML)
runner._llm.chat_completion.return_value = chat_session.Message( # type: ignore[attr-defined]
role="assistant",
importance=chat_session.Importance.LOW,
ttl=None,
text="cond3: something",
)
await runner.step()
runner1 = runner._agent_runners[0]
runner2 = runner._agent_runners[1]
assert len(runner._agent_runners[0]._session.messages) == 2
assert runner1._session.messages[0].role == "user"
assert runner1._session.messages[0].text == "x"
assert runner1._session.messages[1].role == "assistant"
assert runner1._session.messages[1].role == "assistant"
assert runner1._session.messages[1].text == "cond3: something"
assert runner2._session.messages[0].role == "user"
assert runner2._session.messages[0].text == "agent 2 initial"
assert len(runner2._session.messages) == 2
assert runner2._session.messages[1].role == "user"
assert runner2._session.messages[1].text == "cond3: something"
assert runner._llm.chat_completion.call_count == 1 # type: ignore[attr-defined]
assert runner._llm.chat_completion.call_args[1]["model"] == "model_initial" # type: ignore[attr-defined]
| [] |
2024-01-10 | rocket-science-ch/rocketrosti | rrosti~servers~serve_data_retrieval_ws.py | # Copyright (c) 2023 Rocket Science AG, Switzerland
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""A websocket query server that uses the data retrieval engine to answer questions."""
import argparse
import asyncio
import logging
import sys
import tracemalloc
from loguru import logger
import rrosti.utils.config
from rrosti.chat.chat_session import OpenAI, UserInputLLM
from rrosti.chat.state_machine import execution
from rrosti.llm_api import openai_api_direct
from rrosti.query import logging as qlog
from rrosti.servers import data_retrieval_engine, websocket_query_server
from rrosti.utils import misc
from rrosti.utils.config import config
from rrosti.utils.misc import ProgramArgsBase
class ProgramArgs(websocket_query_server.ProgramArgsMixin, execution.ProgramArgsMixin, ProgramArgsBase):
temperature: float
@classmethod
def _add_args(cls, parser: argparse.ArgumentParser) -> None:
super()._add_args(parser)
parser.add_argument(
"--temperature",
type=float,
default=config.openai_api.completion_temperature,
help="The temperature of the OpenAI Model",
)
async def main() -> None:
rrosti.utils.config.load()
openai_provider = openai_api_direct.DirectOpenAIApiProvider()
tracemalloc.start()
misc.setup_logging()
args = ProgramArgs.parse_args()
logging.getLogger("matplotlib").setLevel(logging.INFO)
logging.getLogger("asyncio").setLevel(logging.INFO if not args.debug_asyncio else logging.DEBUG)
logging.getLogger("urllib3").setLevel(logging.INFO)
logging.getLogger("websockets").setLevel(logging.INFO)
# logging.getLogger("openai").setLevel(logging.INFO)
qlog.ServerStartedEvent.log(args=sys.argv)
engine: websocket_query_server.QueryEngineBase
if args.user_simulate_llm:
logger.info("Starting query server, using a user simulated LLM")
engine = data_retrieval_engine.DataQueryEngine(llm=UserInputLLM(), openai_provider=openai_provider)
else:
logger.info("Starting query server")
engine = data_retrieval_engine.DataQueryEngine(
llm=OpenAI(openai_api_direct.DirectOpenAIApiProvider(), temperature=args.temperature),
openai_provider=openai_provider,
)
asyncio.get_event_loop().set_debug(args.debug_asyncio)
await engine.aserve_forever(args)
if __name__ == "__main__":
asyncio.run(main())
| [] |
2024-01-10 | rocket-science-ch/rocketrosti | rrosti~snippets~sklearn_snippet_database.py | # Copyright (c) 2023 Rocket Science AG, Switzerland
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""An in-memory snippet vector database using sklearn's NearestNeighbors."""
from __future__ import annotations
import asyncio
from typing import Iterable
from loguru import logger
from overrides import override
from sklearn.neighbors import NearestNeighbors # type: ignore[import]
from rrosti.llm_api import openai_api
from rrosti.snippets.abstract_snippet_database import AbstractSnippetDatabase
from rrosti.snippets.snippet import Snippet
from rrosti.utils.misc import FloatArray
class SklearnSnippetDatabase(AbstractSnippetDatabase):
"""An in-memory snippet vector database using sklearn's NearestNeighbors."""
_nbrs: NearestNeighbors
_snippets: list[Snippet]
_embeddings: FloatArray
_id_to_index_map: dict[str, int]
def __init__(self, snippets: list[Snippet]) -> None:
self._snippets = Snippet._drop_duplicates(snippets)
self._id_to_index_map = {snippet.hash: i for i, snippet in enumerate(self._snippets)}
self._embeddings = Snippet.consolidate_embeddings(self._snippets)
self._nbrs = NearestNeighbors(n_neighbors=1, algorithm="brute", metric="cosine", n_jobs=-1).fit(
self._embeddings
)
logger.info(f"Initialized SklearnSnippetDatabase with {len(self._snippets)} snippets")
@override
def has_id(self, id: str) -> bool:
return id in self._id_to_index_map
@override
def __contains__(self, snippet: Snippet) -> bool:
return snippet.hash in self._id_to_index_map
@override
def get_by_id(self, id: str) -> Snippet | None:
if id not in self._id_to_index_map:
return None
return self._snippets[self._id_to_index_map[id]]
@override
def add_snippet(self, snippet: Snippet) -> Snippet:
raise NotImplementedError("TODO: implement this")
@override
def add_snippets(self, snippets: Iterable[Snippet]) -> None:
raise NotImplementedError("TODO: implement this")
@override
async def find_nearest_raw(
self, openai_provider: openai_api.OpenAIApiProvider, query: Snippet, n_results: int
) -> list[Snippet]:
emb = (await query.async_get_embedding(openai_provider)).reshape(1, -1)
logger.info("Finding {} neighbors...", n_results)
_, indices = await asyncio.to_thread(self._nbrs.kneighbors, emb, n_results)
logger.info("Found neighbors")
return [self._snippets[i] for i in indices[0]]
| [] |
2024-01-10 | rocket-science-ch/rocketrosti | rrosti~servers~data_retrieval_engine.py | # Copyright (c) 2023 Rocket Science AG, Switzerland
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Allow users to ask questions about data. Answer those questions by delegating them to an LLM,
which in turn can execute specific functions like rtfm to answer them.
"""
from __future__ import annotations
import asyncio
import sys
import marshmallow as ma
from overrides import override
from rrosti.chat import chat_session
from rrosti.chat.state_machine import execution, interpolable
from rrosti.chat.state_machine.execution import load_and_run
from rrosti.llm_api import openai_api
from rrosti.servers.websocket_query_server import Frontend, QueryEngineBase
class DataQueryEngine(QueryEngineBase):
_llm: chat_session.LLM
_openai_provider: openai_api.OpenAIApiProvider
def __init__(self, llm: chat_session.LLM, openai_provider: openai_api.OpenAIApiProvider) -> None:
self._llm = llm
self._openai_provider = openai_provider
@override
async def ensure_loaded(self) -> None:
await asyncio.gather(interpolable.ensure_loaded(), execution.ensure_loaded())
@override
async def arun(self, frontend: Frontend) -> None:
try:
await load_and_run(llm=self._llm, frontend=frontend, openai_provider=self._openai_provider)
except ma.exceptions.ValidationError as e:
print(e.messages, sys.stderr)
raise
| [] |
2024-01-10 | rocket-science-ch/rocketrosti | rrosti~chat~state_machine~execution.py | # Copyright (c) 2023 Rocket Science AG, Switzerland
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""State machine execution."""
from __future__ import annotations
import argparse
import asyncio
import sys
import uuid
from enum import Enum
import marshmallow as ma
from loguru import logger
from overrides import override
from ruamel.yaml import YAML
from rrosti.chat import chat_session
from rrosti.chat.chat_session import Importance, Message
from rrosti.chat.state_machine import ast, interpolable, parsing
from rrosti.llm_api import openai_api, openai_api_direct
from rrosti.query import logging as qlog
from rrosti.servers.websocket_query_server import Frontend, UserInputMessage, WebFrontend
from rrosti.utils import misc
from rrosti.utils.config import config
from rrosti.utils.misc import ProgramArgsBase, ProgramArgsMixinProtocol, truncate_json_strings
yaml = YAML(typ=["rt", "string"])
_NO_PYTHON_CODE_BLOCK_FOUND_MSG = """Your message did not contain a Python code block.
It should look like this:
$$$python
print("Hello world!")
$$$
Please try again. Do not apologize."""
_NO_RTFM_CODE_BLOCK_FOUND_MSG = """Your message did not contain an rtfm code block.
It should look like this:
$$$rtfm
block contents here
$$$
Please try again. Do not apologize."""
_IMPORTANCE_USER_INPUT_INTERPOLABLE = Importance.MEDIUM
_TTL_USER_INPUT_INTERPOLABLE: int | None = None
_IMPORTANCE_PYTHON_INTERPOLABLE = Importance.LOW
_TTL_PYTHON_INTERPOLABLE: int | None = None
_IMPORTANCE_RTFM_INTERPOLABLE = Importance.NOISE
_TTL_RTFM_INTERPOLABLE: int | None = 2
class ProgramArgsMixin(ProgramArgsMixinProtocol):
user_simulate_llm: bool
@classmethod
def _add_args(cls, parser: argparse.ArgumentParser) -> None:
super()._add_args(parser)
parser.add_argument(
"--user-simulate-llm",
action="store_true",
help="Instead of using an actual LLM, ask for user input in the console.",
)
class _Python(interpolable.Interpolable):
"""Implementation of {python()}."""
_show_output = "Python Output:\n\n"
_python_vars: dict[str, object]
def __init__(self, python_vars: dict[str, object]) -> None:
self._python_vars = python_vars
@override
def is_for_me(self, code: str) -> bool:
return code == "python()"
# TODO: make async
@override
async def execute(self, last_msg: str | None, frontend: Frontend) -> interpolable.InterpolableOutput:
if last_msg is None:
logger.error("python() executed without a message in context")
assert False
try:
python_out = interpolable.execute_python_in_msg(last_msg, self._python_vars)
except interpolable.NoCodeBlockFoundError:
return interpolable.InterpolableOutput(
output=_NO_PYTHON_CODE_BLOCK_FOUND_MSG,
info_prefix="ERROR:\n\n",
importance=_IMPORTANCE_PYTHON_INTERPOLABLE,
ttl=_TTL_PYTHON_INTERPOLABLE,
)
else:
frontend.handle_python_output(python_out)
return interpolable.InterpolableOutput(
output=python_out._output,
info_prefix=self._show_output,
importance=_IMPORTANCE_PYTHON_INTERPOLABLE,
ttl=_TTL_PYTHON_INTERPOLABLE,
)
class _Rtfm(interpolable.Interpolable):
"""Implementation of {rtfm()}."""
_show_output = "RTFM output:\n\n"
_openai_provider: openai_api.OpenAIApiProvider
def __init__(self, openai_provider: openai_api.OpenAIApiProvider) -> None:
self._openai_provider = openai_provider
@override
def is_for_me(self, code: str) -> bool:
return code == "rtfm()"
@override
async def execute(self, last_msg: str | None, frontend: Frontend) -> interpolable.InterpolableOutput:
if last_msg is None:
logger.error("rtfm() executed without a message in context")
assert False
try:
snippets = await interpolable.execute_rtfm_in_msg(self._openai_provider, last_msg)
except interpolable.NoCodeBlockFoundError:
return interpolable.InterpolableOutput(
output=_NO_RTFM_CODE_BLOCK_FOUND_MSG,
info_prefix="ERROR:\n\n",
importance=_IMPORTANCE_RTFM_INTERPOLABLE,
ttl=_TTL_RTFM_INTERPOLABLE,
)
else:
start_index = frontend.handle_rtfm_output(snippets)
output_str = "\n-----\n".join(
f"Extract #{i + start_index}:\n\n{snip.text}" for i, snip in enumerate(snippets)
).strip()
return interpolable.InterpolableOutput(
output=output_str,
info_prefix=self._show_output,
importance=_IMPORTANCE_RTFM_INTERPOLABLE,
ttl=_TTL_RTFM_INTERPOLABLE,
)
class _ActionHandler(ast.AsyncActionVisitor):
runner: _AgentRunner
config: ast.Config | None
def __init__(self, runner: _AgentRunner) -> None:
self.runner = runner
self.config = None
@override
async def message_action(self, action: ast.MessageAction) -> None:
assert self.config is not None
logger.info(
"[Agent {}]: Message action ({}): {}",
self.runner.name,
action.role,
truncate_json_strings(action.text),
)
text = action.text
importances: set[Importance] = set()
ttls: set[int | None] = set()
# handle function invocations
for placeholder, code in action.placeholders.items():
logger.info("[Agent {}]: Handling function invocation: {}", self.runner.name, code)
interpolable_output: interpolable.InterpolableOutput
for interp in self.runner._interpolables:
if interp.is_for_me(code):
interpolable_output = await interp.execute(
self.runner._session.messages[-1].text,
frontend=self.runner._sm_runner.frontend,
)
if interpolable_output.info_prefix:
await self.runner._sm_runner.frontend.send_message(
interpolable_output.info_prefix + interpolable_output.output
)
break
else:
raise ValueError(f"Don't know what to do with interpolation: {code}")
# We'll probably give the state machine writer the best tools if we strip whitespace
# from the output, whatever it is.
replacement = interpolable_output.output.strip()
text = text.replace(placeholder, replacement)
importances.add(interpolable_output.importance)
ttls.add(interpolable_output.ttl)
if config.state_machine.debug_detect_unresolved_funcalls:
assert "FUNCALL(" not in text, "Unresolved function call(s)"
ttl = None
if not importances:
# This means there were no function invocations.
# As a heuristic, we assume that user and system messages added in the initial state
# are of high importance, and messages added in other states are of medium importance.
if isinstance(self.runner._curr_state, ast.InitialState):
importance = Importance.HIGH
else:
importance = Importance.MEDIUM
else:
# Otherwise, if there's more than one of either, warn and take the min.
if len(importances) > 1:
logger.warning(
"Multiple importances for message action: {} (taking min)",
importances,
)
importance = min(importances)
if len(ttls) > 1:
logger.warning(
"Multiple ttls for message action: {} (taking None)",
ttls,
)
else:
ttl = next(iter(ttls))
self.runner._session.add_message(
chat_session.Message(role=action.role, text=text, ttl=ttl, importance=importance)
)
@override
async def goto_action(self, action: ast.GotoAction) -> None:
assert self.config is not None
logger.info("[Agent {}]: Goto action: {}", self.runner.name, action.label)
self.runner._curr_state = self.runner._agent.get_state(action.label)
@override
async def end_action(self, action: ast.EndAction) -> None:
assert self.config is not None
logger.info("[Agent {}]: End action", self.runner.name)
self.runner._runner_state = RunnerState.TERMINATED
@override
async def send_action(self, action: ast.SendAction) -> None:
assert self.config is not None
# FIXME: make sure send is the last action.
logger.info("[Agent {}]: Send action: to: {}; next: {}", self.runner.name, action.to, action.next_state)
self.runner._runner_state = RunnerState.WAITING
await self.runner._sm_runner._get_agent_runner(action.to).add_message(
self.runner._session.messages[-1].as_user_message(), quiet=True
)
class RunnerState(Enum):
"""
The state of the agent runner.
The runner starts in the NOT_STARTED state. The first agent's runner is started by
StateMachineRunner, putting it in the RUNNING state.
Apart from this, the only state changes are:
- Sending a message causes a RUNNING -> WAITING transition.
- Receiving a message causes a NOT_STARTED/WAITING -> RUNNING transition.
- Excuting an `end` action causes a RUNNING -> TERMINATED transition.
"""
NOT_STARTED = "not_started"
RUNNING = "running"
WAITING = "waiting"
TERMINATED = "terminated"
class _AgentRunner:
"""
The agents are sort-of independent actors that run like coroutines.
Do not use directly. Use StateMachineRunner.
"""
_session: chat_session.ChatSession
_agent: ast.Agent
_curr_state: ast.State
_sm_runner: StateMachineRunner
_inbox: asyncio.Queue[chat_session.Message]
__runner_state: RunnerState
_handler: _ActionHandler
_interpolables: list[interpolable.Interpolable]
@property
def _runner_state(self) -> RunnerState:
return self.__runner_state
@_runner_state.setter
def _runner_state(self, value: RunnerState) -> None:
logger.info("[Agent {}]: State transition: {} -> {}", self.name, self._runner_state, value)
self.__runner_state = value
@property
def _sm(self) -> ast.StateMachine:
return self._sm_runner._sm
@property
def name(self) -> str:
return self._agent.name
def __init__(
self,
sm_runner: StateMachineRunner,
agent: ast.Agent,
llm: chat_session.LLM,
python_vars: dict[str, object] | None = None,
) -> None:
self._sm_runner = sm_runner
self._inbox = asyncio.Queue()
self._agent = agent
self._session = chat_session.ChatSession(llm, name=self._agent.name, callback=self._sm_runner._message_callback)
self._curr_state = self._agent.initial_state
self._handler = _ActionHandler(self)
self.__runner_state = RunnerState.NOT_STARTED
async def _get_user_input() -> str:
# Expire messages with expired ttls.
self._session.decrease_ttls()
return (await self._sm_runner.frontend.get_user_input()).content
self._interpolables = [
interpolable.SimpleInterpolable(
code="user_input()",
importance=_IMPORTANCE_USER_INPUT_INTERPOLABLE,
ttl=_TTL_USER_INPUT_INTERPOLABLE,
coro=_get_user_input,
),
_Python(python_vars or {}),
_Rtfm(self._sm_runner._openai_provider),
]
async def _execute_action(self, action: ast.ActionBase) -> None:
"""Execute an action."""
assert self._runner_state == RunnerState.RUNNING, self._runner_state
await action.aaccept(self._handler)
async def _execute_actions(self, actions: ast.ActionList, config: ast.Config) -> None:
self._handler.config = config
await actions.aaccept(self._handler)
async def start(self) -> None:
"""Start the agent's state machine."""
assert self._runner_state == RunnerState.NOT_STARTED, self._runner_state
assert isinstance(self._curr_state, ast.InitialState)
self._runner_state = RunnerState.RUNNING
# For the first step, we need to get out of the initial state.
# For it, we just execute actions. No LLM is invoked here.
# The initial state is used to set up the environment for the first LLM invocation.
logger.info("[Agent {}]: Starting.", self._agent.name)
self._sm_runner._model_override = self._curr_state.config.model
await self._execute_actions(self._curr_state.action, self._curr_state.config)
assert isinstance(self._curr_state, ast.NonInitialState), "Did not get out of initial state"
# self._process_inbox() # add any messages from inbox
def _add_message_to_session(self, message: chat_session.Message) -> None:
"""Add a message to the agent's session."""
assert self._runner_state not in (RunnerState.NOT_STARTED, RunnerState.TERMINATED), self._runner_state
self._session.add_message(message)
logger.info("[Agent {}]: Received message: {}", self._agent.name, message)
def _process_inbox(self) -> None:
"""Process the agent's inbox, adding to the session."""
if self._inbox.empty():
return
assert self._inbox.qsize() == 1, "Sus: Several messages in inbox."
self._add_message_to_session(self._inbox.get_nowait())
async def add_message(self, message: chat_session.Message, quiet: bool = False) -> None:
"""Add a message to the agent's inbox."""
logger.info("[Agent {}]: Adding message to inbox: {}", self._agent.name, message)
if self._runner_state == RunnerState.NOT_STARTED:
await self.start()
elif self._runner_state == RunnerState.WAITING:
self._runner_state = RunnerState.RUNNING
self._session.add_message(message, quiet=quiet)
async def step(self) -> None:
"""Execute one step of the agent's state machine."""
assert self._runner_state != RunnerState.TERMINATED, "Cannot step after termination"
if self._runner_state in (RunnerState.NOT_STARTED, RunnerState.WAITING):
self._add_message_to_session(await self._inbox.get())
assert self._runner_state == RunnerState.RUNNING, self._runner_state
assert isinstance(self._curr_state, ast.NonInitialState)
logger.info("[Agent {}]: Non-initial state: {}", self._agent.name, self._curr_state.name)
self._process_inbox() # add any messages from inbox
msg = await self._session.generate_response(
self._sm_runner._model_override or self._agent.config.model or self._sm.config.model
)
self._sm_runner._model_override = None
await self._sm_runner.frontend.send_message(msg)
cond = self._curr_state.triggered_condition(msg.text)
# If the condition contains a model, it overrides the next model to use
self._sm_runner._model_override = cond.config.model
this_state = self._curr_state
await self._execute_actions(cond.action, cond.config) # updates self._curr_state
self._prev_state = this_state
async def run(self) -> None:
"""Run the agent's state machine until it terminates."""
assert self._runner_state != RunnerState.TERMINATED, "Cannot run after termination"
while self._runner_state != RunnerState.TERMINATED: # type: ignore[comparison-overlap] # (mypy 1.4.1 bug)
await self.step()
def __repr__(self) -> str:
return f"<_AgentRunner {self._agent.name}>"
class StateMachineRunner:
"""Execute a network of agents (essentially a hierarchical state machine)."""
_sm: ast.StateMachine
_llm: chat_session.LLM
_python_vars: dict[str, object]
_agent_runners: list[_AgentRunner]
frontend: Frontend
_message_callback: chat_session.MessageCallback | None
_openai_provider: openai_api.OpenAIApiProvider
_model_override: str | None = None
"""When an action specifies a model, it is stored here and used for the next message."""
@property
def _is_terminated(self) -> bool:
return any(runner._runner_state == RunnerState.TERMINATED for runner in self._agent_runners)
@property
def _running_agent(self) -> _AgentRunner:
assert not self._is_terminated, "Cannot get running agents after termination"
rs = [runner for runner in self._agent_runners if runner._runner_state == RunnerState.RUNNING]
assert len(rs) == 1, f"Exactly one agent should be running, got {len(rs)}: {rs}"
return rs[0]
def __init__(
self,
*,
sm: ast.StateMachine,
llm: chat_session.LLM,
frontend: Frontend,
openai_provider: openai_api.OpenAIApiProvider,
python_vars: dict[str, object] | None = None,
message_callback: chat_session.MessageCallback | None = None,
) -> None:
"""
Args:
sm: The state machine to execute.
llm: The LLM to use.
frontend: an Object that defines and handles communication with the Frontend
openai_provider: The OpenAI provider to use.
python_vars: The Python variables to use.
message_callback: A callback to call when a message is sent or received.
"""
self._openai_provider = openai_provider
self._sm = sm
self._llm = llm
self._python_vars = python_vars or {}
self._message_callback = message_callback
self._agent_runners = [_AgentRunner(self, agent, self._llm, self._python_vars) for agent in sm.agents]
self.frontend = frontend
def _get_agent_runner(self, name: str) -> _AgentRunner:
"""Get the agent runner with the given name."""
for runner in self._agent_runners:
if runner.name == name:
return runner
raise ValueError(f"Agent {name} not found")
async def step(self) -> None:
"""Execute one step of the state machine."""
# The first agent is the one that starts the state machine.
if self._agent_runners[0]._runner_state == RunnerState.NOT_STARTED:
await self._agent_runners[0].start()
assert not self._is_terminated, "Cannot step after termination"
await self._running_agent.step()
async def run(self) -> None:
"""Run the state machine until it terminates."""
assert not self._is_terminated, "Cannot run after termination"
while not self._is_terminated:
await self.step()
class MessageObserver:
total_cost = 0.0
def __call__(self, message: chat_session.Message, agent: str | None, quiet: bool) -> None:
if not quiet:
print(message.to_string(agent=agent))
if message.cost:
self.total_cost += message.cost
logger.info("Total cost so far: {:.5f}", self.total_cost)
@misc.async_once_in_thread
def _load_state_machine() -> tuple[str, ast.StateMachine]:
yaml_data = config.state_machine.yaml_path.read_text()
sm = parsing.loads_from_yaml(yaml_data)
yaml.indent(mapping=4, sequence=4, offset=0)
logger.info(
"Loaded state machine:\n{}",
yaml.dump_to_string(truncate_json_strings(sm.to_json())), # type: ignore[attr-defined]
)
return yaml_data, sm
async def ensure_loaded() -> None:
await _load_state_machine()
async def load_and_run(
*,
openai_provider: openai_api.OpenAIApiProvider,
llm: chat_session.LLM,
frontend: Frontend,
) -> None:
yaml_text, sm = await _load_state_machine()
# Get the first user input as the query, just for logging purposes.
query = await frontend.peek_user_input()
logger.info("User conversation starter: {}", query)
with qlog.QueryEvent.section(text=query.content, uuid=query.uuid, prompt=yaml_text):
runner = StateMachineRunner(
sm=sm,
llm=llm,
python_vars=dict(),
message_callback=MessageObserver(),
frontend=frontend,
openai_provider=openai_provider,
)
await runner.run()
class ProgramArgs(ProgramArgsBase, ProgramArgsMixin):
@classmethod
def _add_args(cls, parser: argparse.ArgumentParser) -> None:
super()._add_args(parser)
class FakeWebFrontend(WebFrontend):
def __init__(self) -> None:
super().__init__(None, False)
@override
async def send_message(self, msg: Message | str) -> None:
# TODO: implement console version, if needed
pass
@override
async def _get_user_input_impl(self) -> UserInputMessage:
# TODO: implement console version, if needed
return UserInputMessage(content="", uuid=str(uuid.uuid4()))
async def main() -> None:
misc.setup_logging()
openai_provider = openai_api_direct.DirectOpenAIApiProvider()
args = ProgramArgs.parse_args()
llm: chat_session.LLM
if args.user_simulate_llm:
llm = chat_session.UserInputLLM()
else:
llm = chat_session.OpenAI(openai_provider)
try:
await load_and_run(llm=llm, frontend=FakeWebFrontend(), openai_provider=openai_provider)
except ma.exceptions.ValidationError as e:
print(e.messages, sys.stderr)
raise
if __name__ == "__main__":
asyncio.run(main())
| [] |
2024-01-10 | rocket-science-ch/rocketrosti | rrosti~chat~state_machine~interpolable.py | # Copyright (c) 2023 Rocket Science AG, Switzerland
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Contains the logic for interpolables, i.e. functions that can be called from within a message, like
{user_input()} or {python()}.
The basic idea is that a user can enter a message like
```
{user_input()}
```
The message is then parsed and the code block is extracted. In this case, the code block is
{user_input()}
The code block is then checked against all interpolables to see which one is responsible for it. In this case, the
`UserInputInterpolable` would be responsible. The interpolable is then executed and the output is returned. In this
case, the user would be asked for input.
"""
import asyncio
import io
import re
from abc import ABC, abstractmethod
from contextlib import redirect_stdout
from typing import Awaitable, Callable
import attrs
import pandas as pd
from overrides import override
from rrosti.chat.chat_session import Importance
from rrosti.llm_api import openai_api
from rrosti.servers.websocket_query_server import Frontend, PythonItem
from rrosti.snippets import document_sync
from rrosti.snippets.abstract_snippet_database import AbstractSnippetDatabase
from rrosti.snippets.sklearn_snippet_database import SklearnSnippetDatabase
from rrosti.snippets.snippet import Snippet
from rrosti.utils import misc
from rrosti.utils.config import config
class NoCodeBlockFoundError(ValueError):
pass
@attrs.frozen
class InterpolableOutput:
output: str
# For debug purposes, shown in frontend. If None, then the actual output is not shown at all.
# If it should be shown but without prefix, simply use ''
info_prefix: str | None
importance: Importance # Influences pruning order
# None = do not prune; positive integer = prune after this many user inputs
ttl: int | None = attrs.field()
@ttl.validator
def _check_ttl(self, _attribute: str, value: int | None) -> None:
if value is not None and value <= 0:
raise ValueError("ttl must be None or a positive integer")
# TODO: Is this name descriptive? Can we think of something better?
class Interpolable(ABC):
"""An interpolable function invocation, like {user_input()}, {python()} or {rtfm()}."""
@abstractmethod
def is_for_me(self, code: str) -> bool:
...
@abstractmethod
async def execute(self, last_msg: str | None, frontend: Frontend) -> InterpolableOutput:
...
# A simple interpolable that wraps a coroutine
class SimpleInterpolable(Interpolable):
_code: str
_coro: Callable[[], Awaitable[str]]
_importance: Importance
_ttl: int | None
def __init__(self, code: str, importance: Importance, ttl: int | None, coro: Callable[[], Awaitable[str]]) -> None:
self._code = code
self._coro = coro
self._importance = importance
self._ttl = ttl
@override
def is_for_me(self, code: str) -> bool:
return code == self._code
@override
async def execute(self, last_msg: str | None, frontend: Frontend) -> InterpolableOutput:
return InterpolableOutput(
output=await self._coro(), info_prefix=None, importance=self._importance, ttl=self._ttl
)
def extract_code_block(language: str, msg_text: str) -> str:
# First check that we don't have multiple matches for ```{language}
if msg_text.count(f"```{language}") > 1:
raise ValueError("Multiple code blocks found in message.")
code_block_pattern = re.compile(r"\$\$\$" + language + r"\r?\n(.*?)\$\$\$", re.DOTALL)
match = code_block_pattern.search(msg_text)
if not match:
raise NoCodeBlockFoundError("No full and terminated code block found in message")
return match.group(1)
def execute_python_in_msg(msg_text: str, vars: dict[str, object]) -> PythonItem:
code = extract_code_block("python", msg_text)
try:
f = io.StringIO()
with redirect_stdout(f):
exec(code, dict(pd=pd, **vars))
out = f.getvalue().strip()
except Exception as e:
out = "Exception: " + str(e)
return PythonItem(_code=code, _output=out)
@misc.async_once_blocking
async def _get_database() -> AbstractSnippetDatabase:
snippets = await document_sync.sync_and_get_snippets()
return await asyncio.to_thread(SklearnSnippetDatabase, snippets)
async def ensure_loaded() -> None:
await _get_database()
async def execute_rtfm_in_msg(openai_provider: openai_api.OpenAIApiProvider, message: str) -> list[Snippet]:
text = extract_code_block("rtfm", message)
# We need the encoding for the text
snippet = Snippet.from_query(text)
await snippet.async_ensure_embedding(openai_provider)
# Now execute the nearest neighbors search
db = await _get_database()
return await db.find_nearest_merged(
openai_provider, snippet, config.state_machine.rtfm_max_tokens, config.state_machine.rtfm_merge_candidates
)
| [] |
2024-01-10 | rocket-science-ch/rocketrosti | rrosti~snippets~snippet.py | # Copyright (c) 2023 Rocket Science AG, Switzerland
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Snippet type that supports merging and optionally has an embedding."""
from __future__ import annotations
import hashlib
from functools import cached_property
from pathlib import Path
from typing import IO, Iterable, Sequence, cast
import attrs
import numpy as np
import orjson
from attrs import field
from attrs.setters import frozen, validate
from loguru import logger
from rrosti.llm_api import openai_api
from rrosti.utils.config import config
from rrosti.utils.misc import FloatArray
def _hash_bytes(data: bytes) -> str:
return hashlib.sha256(data).hexdigest()
def _hash_str(data: str) -> str:
return _hash_bytes(data.encode("utf-8"))
def _embedding_cache_path() -> Path:
return config.document_sync.data_gen_path / "embeddings.map"
# TODO: Some of this logic could be simplified, by e.g. having a transparent-ish cache
# that queries the API when it doesn't have an embedding.
class EmbeddingCache:
_private_tag = object()
_map: dict[str, FloatArray]
_sorted: bool
def _ensure_sorted(self) -> None:
if not self._sorted:
self._map = dict(sorted(self._map.items()))
self._sorted = True
def __init__(self, private_tag: object) -> None:
"""Internal constructor."""
assert private_tag is self._private_tag, "EmbeddingCache should not be instantiated directly."
self._map = {}
self._sorted = True
@classmethod
def load(cls) -> EmbeddingCache:
cache = cls(cls._private_tag)
path = _embedding_cache_path()
if not path.exists():
logger.info("No embedding cache found, creating new one.")
return cache
logger.info("Loading embedding cache from {}...", path)
hash_list: list[str] = []
all_bytes: list[bytes] = []
with open(path) as f:
for line in f:
hash, bytes_hex = line.strip().split("\t")
hash_list.append(hash)
all_bytes.append(bytes.fromhex(bytes_hex))
embeddings = np.frombuffer(b"".join(all_bytes), dtype=np.float32).reshape(len(hash_list), -1)
cache._map = dict(zip(hash_list, embeddings))
cache._sorted = False
assert all(len(k) == 64 for k in cache._map)
assert len({len(v) for v in cache._map.values()}) == 1
logger.info("Loaded a cache of {} embeddings.", len(cache._map))
return cache
def save(self) -> None:
self._ensure_sorted()
path = _embedding_cache_path()
# Atomically replace
fname_new = Path(str(path) + ".new")
with open(fname_new, "w") as f:
for hash, embedding in self._map.items():
f.write(f"{hash}\t{embedding.tobytes().hex()}\n")
fname_new.rename(path)
def _assert_consistency(self, snippet: Snippet) -> None:
if snippet._embedding is None:
return
cached = self._map.get(snippet.hash)
if cached is None:
return
assert snippet._embedding.shape == cached.shape, (snippet._embedding.shape, cached.shape)
if not np.all(snippet._embedding == cached):
# Tolerate a small cosine distance between the two embeddings.
cos_dist = 1 - np.dot(snippet._embedding, cached) / np.linalg.norm(snippet._embedding) / np.linalg.norm(
cached
)
if cos_dist > 0.01:
logger.error("Embedding cache is inconsistent with snippet.")
logger.error("Snippet:\n{}", snippet.text)
logger.error("Snippet embedding:\n{}", snippet._embedding)
logger.error("Cached embedding:\n{}", cached)
logger.error("Cosine distance: {}", cos_dist)
assert False
# Close enough. Just use the cached embedding.
snippet._embedding = cached
def _copy_to_snippet(self, snippet: Snippet) -> None:
if snippet._embedding is None and snippet.hash in self._map:
snippet._embedding = self._map[snippet.hash].copy()
return
def _copy_from_snippet(self, snippet: Snippet) -> None:
if snippet._embedding is not None and snippet.hash not in self._map:
self._map[snippet.hash] = snippet._embedding.copy()
def sync_with_snippet(self, snippet: Snippet) -> None:
"""
If the snippet has an embedding, add it to the cache.
If the cache has an embedding for the snippet, add it to the snippet.
"""
self._assert_consistency(snippet)
self._copy_from_snippet(snippet)
self._copy_to_snippet(snippet)
def sync_with_snippets(self, snippets: Iterable[Snippet]) -> None:
for snippet in snippets:
self.sync_with_snippet(snippet)
@classmethod
def from_snippets(cls, snippets: Iterable[Snippet]) -> EmbeddingCache:
cache = cls(cls._private_tag)
cache.sync_with_snippets(snippets)
return cache
# TODO: Split into Snippet and SnippetWithEmbedding
@attrs.define(slots=False) # slots=False because we use cached_property
class Snippet:
"""
A snippet type that supports merging. Can optionally have an embedding.
Embeddings are dense vector representations in a lower-dimensional space
that capture semantic meaning, allowing for operations like similarity checks.
"""
source_filename: str = field(
on_setattr=frozen, kw_only=True, validator=[attrs.validators.instance_of(str)]
) # typically filename or similar identifier
start_offset: int = field(on_setattr=frozen, kw_only=True, validator=[attrs.validators.instance_of(int)])
text: str = field(on_setattr=frozen, validator=[attrs.validators.instance_of(str)])
_embedding: FloatArray | None = field(on_setattr=validate, default=None, repr=False, kw_only=True)
page_start: int | None
page_end: int | None
@classmethod
def _drop_duplicates(cls, snippets: Iterable[Snippet]) -> list[Snippet]:
hashes_seen: dict[str, int] = {}
out: list[Snippet] = []
dup_count = 0
for snippet in snippets:
if snippet.hash not in hashes_seen:
hashes_seen[snippet.hash] = len(out)
out.append(snippet)
else:
# If this one has an embedding and the previous one not, add the embedding to the previous one
if snippet._embedding is not None and out[hashes_seen[snippet.hash]]._embedding is None:
out[hashes_seen[snippet.hash]]._embedding = snippet._embedding
dup_count += 1
if dup_count:
logger.warning("Dropped {} duplicate snippets.", dup_count)
return out
@_embedding.validator
def _validate_embedding(self, _attribute: attrs.Attribute[Snippet], value: FloatArray | None) -> None:
if value is None:
return
assert isinstance(value, np.ndarray)
assert value.dtype == np.float32
assert value.ndim == 1
assert value.shape[0] > 0
def sync_ensure_embedding(self, openai_provider: openai_api.OpenAIApiProvider) -> None:
if self._embedding is None:
self._embedding = openai_provider.create_embedding(input=[self.text]).embeddings[0]
@classmethod
def ensure_embeddings(cls, openai_provider: openai_api.OpenAIApiProvider, snippets: Sequence[Snippet]) -> None:
"""
Ensure that all snippets have embeddings.
This method will query OpenAI's API for embeddings for all snippets that do not have them.
"""
snippets_without_embeddings = [s for s in snippets if s._embedding is None]
if not snippets_without_embeddings:
return
logger.info(f"Querying embeddings for {len(snippets_without_embeddings)} snippets")
embeddings = openai_provider.create_embedding([s.text for s in snippets_without_embeddings]).embeddings
for snippet, embedding in zip(snippets_without_embeddings, embeddings):
snippet._embedding = embedding
async def async_ensure_embedding(self, openai_provider: openai_api.OpenAIApiProvider) -> None:
if self._embedding is None:
self._embedding = (await openai_provider.acreate_embedding([self.text])).embeddings[0]
@classmethod
async def async_ensure_embeddings(
cls, openai_provider: openai_api.OpenAIApiProvider, snippets: Sequence[Snippet]
) -> None:
"""
Ensure that all snippets have embeddings.
This method will query OpenAI's API for embeddings for all snippets that do not have them.
"""
snippets_without_embeddings = [s for s in snippets if s._embedding is None]
if not snippets_without_embeddings:
return
logger.info(f"Querying embeddings for {len(snippets_without_embeddings)} snippets")
embeddings = (await openai_provider.acreate_embedding([s.text for s in snippets_without_embeddings])).embeddings
for snippet, embedding in zip(snippets_without_embeddings, embeddings):
snippet._embedding = embedding
@property
def has_embedding(self) -> bool:
return self._embedding is not None
def sync_get_embedding(self, openai_provider: openai_api.OpenAIApiProvider) -> FloatArray:
"""Get the embedding for this snippet, requesting it synchronously if necessary."""
self.sync_ensure_embedding(openai_provider)
assert self._embedding is not None
return self._embedding
async def async_get_embedding(self, openai_provider: openai_api.OpenAIApiProvider) -> FloatArray:
"""Get the embedding for this snippet, requesting it asynchronously if necessary."""
await self.async_ensure_embedding(openai_provider)
assert self._embedding is not None
return self._embedding
@cached_property
def hash(self) -> str:
"""The hash of the snippet text."""
return _hash_str(self.text)
@property
def end_offset(self) -> int:
return self.start_offset + len(self.text)
@property
def length(self) -> int:
return len(self.text)
def try_merge(self, other: Snippet) -> Snippet | None:
"""Merge with a potentially overlapping later snippet, unless there is a gap."""
if self.source_filename != other.source_filename:
return None
assert self.start_offset <= other.start_offset, "Snippets must be sorted"
if other.start_offset > self.end_offset:
return None
added_text = other.text[self.end_offset - other.start_offset :]
return Snippet(
text=self.text + added_text,
start_offset=self.start_offset,
source_filename=self.source_filename,
embedding=None,
page_start=self.page_start,
page_end=other.page_end,
)
@staticmethod
def from_query(query: str) -> Snippet:
return Snippet(
text=query, start_offset=0, source_filename="$$query", embedding=None, page_start=None, page_end=None
)
@staticmethod
def merge_list(snippets: Iterable[Snippet]) -> list[Snippet]:
"""
Given a list of snippets, merge snippets that are next to each other or that overlap.
Changes snippet order.
"""
snippets = sorted(snippets, key=lambda s: (s.source_filename, s.start_offset))
merged_snippets: list[Snippet] = []
for snippet in snippets:
if not merged_snippets:
merged_snippets.append(snippet)
continue
last_snippet = merged_snippets[-1]
merged_snippet = last_snippet.try_merge(snippet)
if merged_snippet is not None:
merged_snippets[-1] = merged_snippet
else:
merged_snippets.append(snippet)
return merged_snippets
@staticmethod
def from_dict(data: dict[str, int | str | None]) -> Snippet:
"""
Load a snippet from a dictionary.
These snippets are required to have embeddings.
"""
assert isinstance(data["start"], int)
assert isinstance(data["doc"], str)
assert isinstance(data["content"], str)
assert isinstance(data["page_start"], int | None)
assert isinstance(data["page_end"], int | None)
if data["embedding"] is None:
emb: FloatArray | None = None
else:
assert isinstance(data["embedding"], str)
emb = np.frombuffer(bytes.fromhex(data["embedding"]), dtype=np.float32)
return Snippet(
source_filename=data["doc"],
start_offset=data["start"],
text=data["content"],
embedding=emb,
page_start=data["page_start"], # type: ignore[arg-type]
page_end=data["page_end"], # type: ignore[arg-type]
)
def to_dict(self) -> dict[str, int | str | None]:
"""Convert a snippet to a dictionary."""
return {
"start": self.start_offset,
"doc": self.source_filename,
"content": self.text,
"embedding": self._embedding.tobytes().hex() if self._embedding is not None else None,
"page_start": self.page_start,
"page_end": self.page_end,
}
@staticmethod
def consolidate_embeddings(snippets: Sequence[Snippet]) -> FloatArray:
"""
Consolidate the embeddings of a list of snippets into a single numpy array, replacing
the embeddings in the snippets with slices of the array.
The snippets must have embeddings.
The larger array is useful for computing distances between snippets, while the slices
are useful for taking less memory than fully copying the embeddings.
"""
assert all(s._embedding is not None for s in snippets), "All snippets must have embeddings"
assert all(
cast(FloatArray, s._embedding).shape == cast(FloatArray, snippets[0]._embedding).shape for s in snippets
), "All embeddings must have the same shape"
embeddings = np.stack([cast(FloatArray, s._embedding) for s in snippets])
for s, e in zip(snippets, embeddings):
s._embedding = e
return embeddings
@staticmethod
def load_from_jsonl(file_or_filename: str | Path | IO[bytes]) -> list[Snippet]:
"""Load snippets from a JSONL file."""
logger.info("Loading snippets from {}", file_or_filename)
def _do(file: IO[bytes]) -> list[Snippet]:
return [Snippet.from_dict(orjson.loads(line)) for line in file]
if isinstance(file_or_filename, (str, Path)):
with open(file_or_filename, "rb") as f:
snippets = _do(f)
else:
snippets = _do(file_or_filename)
logger.info("Loaded {} snippets from {}", len(snippets), file_or_filename)
return snippets
| [] |
2024-01-10 | rocket-science-ch/rocketrosti | rrosti~chat~chat_session.py | # Copyright (c) 2023 Rocket Science AG, Switzerland
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Tools for maintaining a chat session (i.e. LLM context with messages)."""
from __future__ import annotations
import asyncio
import enum
from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import Literal, Protocol
import aioconsole # type: ignore[import]
import openai
from loguru import logger
from overrides import override
from rrosti.llm_api import openai_api
from rrosti.utils import misc
from rrosti.utils.config import config
ERROR_SLEEP_SECONDS = 2
class Importance(enum.IntEnum):
"""Importance of a message."""
NOISE = 0
LOW = 10
MEDIUM = 20
HIGH = 30
@dataclass
class Message:
"""A message in a chat session."""
role: Literal["user", "assistant", "system"]
text: str
# To manage the size of the context, we may need to trim messages.
# 1. Each message has an importance score; when the context is full, we always trim the earliest
# message with the lowest importance score.
# 2. Messages can have an optional ttl (time-to-live) field which gets decremented at each
# user input. When the ttl reaches 0, the message is removed.
importance: Importance
ttl: int | None = None
cost: float | None = None
time_used: float | None = None
def __post_init__(self) -> None:
assert not (self.role == "user" and self.cost is not None), "User messages cannot have a cost"
def as_user_message(self) -> Message:
"""
Return a user message with the same text as this message, with no cost.
"""
return Message(role="user", text=self.text, importance=self.importance)
def to_string(self, agent: str | None = None) -> str:
meta: list[str] = [self.role]
if self.cost is not None:
meta.append(f"{self.cost:.5f} USD")
if self.time_used is not None:
meta.append(f"time={self.time_used:.2f}")
if self.ttl is not None:
meta.append(f"ttl={self.ttl}")
meta.append(f"importance={self.importance}")
meta_str = ", ".join(meta)
header = f"{agent} ({meta_str})" if agent else meta_str
lines = []
lines.append("-" * 5 + " " + header + " " + "-" * (70 - 7 - len(header)))
lines.append(self.text.strip())
lines.append("-" * 70)
return "\n".join(lines)
async def get_user_input(prompt: str = "", end_line: str | None = None) -> str:
"""Get user input from the console, optionally ending on a specific line."""
lines: list[str] = []
while True:
line = await aioconsole.ainput(f"{prompt}> ")
if end_line is not None and line.strip() == end_line:
break
lines.append(line)
if end_line is None:
break
return "\n".join(lines)
class LLM(ABC):
"""A language model."""
@abstractmethod
async def chat_completion(self, messages: list[Message], agent_name: str = "", model: str | None = None) -> Message:
...
class OpenAI(LLM):
"""OpenAI chat language model."""
temperature: float
prompt_tokens: int
completion_tokens: int
cost: float
openai_provider: openai_api.OpenAIApiProvider
def __init__(self, openai_provider: openai_api.OpenAIApiProvider, temperature: float = 1.0) -> None:
self.openai_provider = openai_provider
self.temperature = temperature
self.prompt_tokens = 0
self.completion_tokens = 0
self.cost = 0.0
@override
async def chat_completion(
self,
messages: list[Message],
agent_name: str = "",
model: str | None = None,
msg_importance: Importance = Importance.MEDIUM,
) -> Message:
del agent_name # intentionally unused parameter
if model is None:
model = config.openai_api.chat_completion_model
while True:
try:
start_time = asyncio.get_event_loop().time()
resp = await self.openai_provider.acreate_chat_completion(
messages=[dict(role=message.role, content=message.text) for message in messages],
max_tokens=config.openai_api.completion_max_tokens,
model=model,
)
end_time = asyncio.get_event_loop().time()
elapsed_time = end_time - start_time
break
except (
openai.error.RateLimitError,
openai.error.Timeout,
openai.error.APIConnectionError,
openai.error.APIError,
openai.error.ServiceUnavailableError,
) as e:
logger.error("OpenAI API error: {}. Sleeping for {} seconds...", e, ERROR_SLEEP_SECONDS)
await asyncio.sleep(ERROR_SLEEP_SECONDS)
prompt_tokens = resp["usage"]["prompt_tokens"]
completion_tokens = resp["usage"]["completion_tokens"]
self.prompt_tokens += prompt_tokens
self.completion_tokens += completion_tokens
model_cost = config.openai_api.model_cost[
model if model is not None else config.openai_api.chat_completion_model
]
# TODO: handle prompts that did not return any content at all (currently it crashes the process and starts a new
# session with the client without any Warning
text: str | None = resp["choices"][0]["message"].get("content")
if not text:
# If nothing is returned, the query (or answer) got blocked by Azure. This is the identifier so that the
# state machine can give the user an answer explaining it.
text = "$$$error$$$"
return Message(
role="assistant",
text=text,
importance=msg_importance,
cost=model_cost.calculate(prompt_tokens, completion_tokens),
time_used=elapsed_time,
)
class UserInputLLM(LLM):
"""LLM that asks the user for input. For testing."""
@override
async def chat_completion(self, messages: list[Message], agent_name: str = "", model: str | None = None) -> Message:
for message in messages:
print("-" * 5 + " " + message.role + " " + "-" * (40 - 7 - len(message.role)))
print(message.text)
print("-" * 40)
print("Enter message. '.' on a line alone finishes.")
text = await get_user_input(agent_name, end_line=".")
return Message(role="user", importance=Importance.HIGH, text=text)
class MessageCallback(Protocol):
def __call__(self, message: Message, agent: str | None, quiet: bool) -> None:
...
class ChatSession:
"""
A chat session (i.e. LLM context, consisting of messages).
Also contains functionality to prune messages to prevent the LLM context from becoming too long.
"""
messages: list[Message]
llm: LLM
_name: str # A human-readable name. Could be e.g. a name of an agent. May be empty string.
_message_callback: MessageCallback | None
def __init__(self, llm: LLM, name: str = "", callback: MessageCallback | None = None) -> None:
self.messages = []
self.prompt_tokens = 0
self.completion_tokens = 0
self.llm = llm
self._name = name
self._message_callback = callback
def _prune_unimportant_message(self) -> None:
"""Prune the least important message."""
assert self.messages, "No messages to prune"
min_importance = min(message.importance for message in self.messages)
for message in self.messages:
if message.importance == min_importance:
logger.info(
"Pruning unimportant message (importance={}): {}",
message.importance,
misc.truncate_string(message.text),
)
self.messages.remove(message)
return
def decrease_ttls(self) -> None:
"""Decrease ttls and kill messages with expired ttls."""
new_messages = []
for message in self.messages:
if message.ttl is not None:
message.ttl -= 1
logger.info("Decreasing ttl of message to {}: {}", message.ttl, misc.truncate_string(message.text))
if message.ttl is not None and message.ttl <= 0:
logger.info("Killing message with expired ttl: {}", misc.truncate_string(message.text))
else:
new_messages.append(message)
self.messages = new_messages
def add_message(
self,
message: Message,
quiet: bool = False,
) -> None:
"""Add a message to the chat session."""
assert message.ttl is None or message.ttl > 0, "llm_ttl must be None or positive"
self.messages.append(message)
logger.info(
"Message({}): {}",
message.role,
misc.truncate_string(message.text),
)
if self._message_callback is not None:
self._message_callback(message, self._name, quiet)
async def generate_response(self, model: str | None = None) -> Message:
while True:
try:
resp = await self.llm.chat_completion(self.messages, agent_name=self._name, model=model)
break
except openai.error.InvalidRequestError as e:
if "maximum context length" in str(e):
logger.info("Context length exceeded: {}", str(e))
self._prune_unimportant_message()
continue
raise
self.add_message(resp)
return resp
| [
"prompt_tokens"
] |
2024-01-10 | rocket-science-ch/rocketrosti | rrosti~snippets~document_sync.py | # Copyright (c) 2023 Rocket Science AG, Switzerland
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Parse documents from `config.document_sync.source_docs_path` into `config.document_sync.parsed_docs_path`.
Generate snippets and maintain an embedding cache.
"""
import asyncio
import hashlib
from pathlib import Path
from typing import Awaitable, Iterable
import orjson
from aiopath import AsyncPath # type: ignore[import]
from loguru import logger
from tqdm import tqdm
from rrosti.llm_api import openai_api, openai_api_direct
from rrosti.snippets.parsed_document import ParsedDocument
from rrosti.snippets.snippet import EmbeddingCache, Snippet
from rrosti.utils.config import config
async def _handle_source_document(doc_path: Path) -> bool:
"""
Parse a source document and output it to `config.document_sync.parsed_docs_path`.
Returns True if something was updated.
"""
if doc_path.suffix.lower() not in [".pdf", ".txt"]:
logger.warning(f"Unknown file type, ignoring document: {doc_path}")
return False
target_apath = AsyncPath(config.document_sync.parsed_docs_path / (doc_path.name + ".json"))
doc_apath = AsyncPath(doc_path)
doc_data = await doc_apath.read_bytes()
# If the target exists, we check (using the sha256) if it's the same document.
if await target_apath.exists():
json_text = await target_apath.read_bytes()
# TODO: handle corrupted JSON
parsed_document = ParsedDocument.from_dict(orjson.loads(json_text))
new_sha = hashlib.sha256(doc_data).hexdigest()
if parsed_document.sha256 == new_sha:
logger.trace(f"Document already parsed: {doc_path}")
return False
logger.info("Document changed: {}; old={}, new={}", doc_path, parsed_document.sha256, new_sha)
else:
logger.info("New document: {}", doc_path)
# Look at the suffix. We handle pdf and txt.
if doc_path.suffix.lower() == ".pdf":
raise NotImplementedError("PDF parsing is not yet implemented")
# parsed_document = await pdf_parse.parse_pdf(doc_path)
elif doc_path.suffix.lower() == ".txt": # noqa: RET506 (unnecessary elif after raise)
parsed_document = ParsedDocument.from_textfile_bytes(doc_data, name=doc_path.name, path=str(doc_path))
else:
assert False
# Write the parsed document to `config.document_sync.parsed_docs_path`. Add .json to the end of the filename.
output_apath = AsyncPath(config.document_sync.parsed_docs_path / (doc_path.name + ".json"))
await output_apath.parent.mkdir(parents=True, exist_ok=True)
await output_apath.write_bytes(orjson.dumps(parsed_document.to_dict()))
return True
def _snippetize_document(path: Path) -> list[Snippet]:
"""Take in a parsed document and output a list of snippets."""
# logger.info("Reading bytes from {}", path)
data = path.read_bytes()
# logger.info("Parsing JSON")
json_dict = orjson.loads(data)
# logger.info("Creating ParsedDocument")
doc = ParsedDocument.from_dict(json_dict)
# logger.info("Getting snippets")
return doc.get_snippets(images=True)
async def _snippetize_documents(paths: Iterable[Path]) -> list[Snippet]:
"""Take in a list of parsed documents and output a list of snippets."""
aws: list[Awaitable[list[Snippet]]] = [asyncio.to_thread(_snippetize_document, path) for path in paths]
snippets: list[Snippet] = []
for aw in aws:
snippets.extend(await aw)
return snippets
async def sync_and_get_snippets() -> list[Snippet]:
config.document_sync.data_gen_path.mkdir(parents=True, exist_ok=True)
openai_provider = openai_api_direct.DirectOpenAIApiProvider() # TODO
logger.info("Looking for source documents...")
source_documents = [p for p in config.document_sync.source_docs_path.glob("*") if p.is_file()]
logger.info(f"Found {len(source_documents)} source documents")
if not source_documents:
logger.error("No source documents found")
raise RuntimeError("No source documents found")
# Parse all source documents.
# Ok, we could do this in parallel, but there's something fishy in the PDF parsing library;
# some PDFs seem to randomly cause it to segfault. Doing it sequentially mitigates this,
# at least in the sense that we will compute one PDF at a time and save the result.
updated = [await _handle_source_document(path) for path in source_documents]
num_updated = sum(updated)
if num_updated == 0:
logger.info("No documents updated")
# TODO: Detect removed documents so we don't need to always regenerate the snippets?
else:
logger.info("Updated {} documents.", num_updated)
# Snippetize all parsed documents.
logger.info("Generating snippets...")
snippets = await _snippetize_documents(config.document_sync.parsed_docs_path.glob("*.json"))
# Load cached embeddings so we won't recompute for snippets we've already seen.
cache = await asyncio.to_thread(EmbeddingCache.load)
await asyncio.to_thread(cache.sync_with_snippets, snippets)
no_embedding = [s for s in snippets if not s.has_embedding]
logger.info("Found {} (out of {}) snippets without embeddings.", len(no_embedding), len(snippets))
async def ensure_embeddings_and_sync(
openai_provider: openai_api.OpenAIApiProvider, snippets: list[Snippet]
) -> None:
await Snippet.async_ensure_embeddings(openai_provider, snippets)
cache.sync_with_snippets(snippets)
if no_embedding:
logger.info("Querying embeddings...")
aws: list[Awaitable[None]] = []
for i in tqdm(range(0, len(no_embedding), config.openai_api.endpoint.max_embedding_requests_per_query)):
chunk = no_embedding[i : i + config.openai_api.endpoint.max_embedding_requests_per_query]
aws.append(ensure_embeddings_and_sync(openai_provider, chunk))
await asyncio.gather(*aws)
await asyncio.to_thread(cache.save)
return snippets
if __name__ == "__main__":
asyncio.run(sync_and_get_snippets())
| [] |
2024-01-10 | bsnibble/gpt-placement | guess.py | from openai import OpenAI
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from webdriver_manager.chrome import ChromeDriverManager
import json
import time
def run_javascript(url, js_code):
driver = webdriver.Chrome(service=Service(ChromeDriverManager().install()))
try:
driver.get(url)
# Wait for the readyState to be complete
while driver.execute_script("return document.readyState") != "complete":
print("Waiting for page to load...")
time.sleep(0.5)
print("Page loaded, running JavaScript...")
return driver.execute_script(js_code)
finally:
driver.quit()
def query_gpt(prompt_system, prompt_user):
# Replace with your OpenAI API key
client = OpenAI(api_key='...') # add your API key here
response = client.chat.completions.create(
model="gpt-3.5-turbo-1106",
messages=[
{"role": "system", "content": prompt_system},
{"role": "user", "content": prompt_user}
],
max_tokens=150,
response_format={ "type": "json_object" }
)
return response
# Function to read JavaScript code from a file
def read_js_file(file_path):
with open(file_path, 'r') as file:
return file.read()
# Step 1: Request the type of page from the user (product, cart)
user_page_type = -1
# validate input or re-ask
while user_page_type not in ['0', '1']:
if user_page_type != -1:
print("Invalid input, please enter a valid number")
user_page_type = input("Enter the type of page (0 - product, 1 - cart): ")
# ask for the cart-drawer if product page
get_drawer = False
if user_page_type == '0':
drawer = input("Should I look for cart drawer selector Y/n:")
if drawer != "n" and drawer != "N":
get_drawer = True
# Step 2: Request URL or HTML content from user, stop if empty
user_input = input("Enter a URL or HTML content: ")
if user_input == "":
print("Nothing entered, exiting...")
exit()
# Step 3: check if the input is a URL or HTML content
if user_input.startswith("http"):
is_url = True
else:
is_url = False
# Read JavaScript code from a file
js_file_path = 'script.js'
js_code = read_js_file(js_file_path)
#print loaded JavaScript size
print("Loaded JavaScript size:", len(js_code))
#stop if size 0
if len(js_code) == 0:
print("JavaScript file is empty, exiting...")
exit()
if is_url:
print("Connecting to URL ...", user_input)
# Step 2 & 3: Open URL in Selenium and run JavaScript
js_result = run_javascript(user_input, js_code)
#if result is a valid string
if isinstance(js_result, str):
#print first 100 characters
print("JavaScript result:", js_result[:100], '...')
else:
js_result = user_input
# Step 4: Create GPT prompt based on the page type
gpt_prompt_system_button = f"""
Analyze the provided HTML from a Shopify e-commerce product detail page.
Your task is to find the main 'Add to cart' button and the main 'product form', avoiding any other buttons related to mini-carts, sidebar or cart-drawers or other secondary forms.
The goal is to find the correct selector to add an element AFTER the "add to cart" button
IGNORE ANY BUTTON IN THE CART DRAWERS
Return the information in a JSON format with the following structure:
{{\"button_or_container_selector\": selector for the 'add to cart' button or its immediate parent container, prefer a selector based on the type AND class or parent class,
\"placement_position\": one of [afterBegin, beforeBegin, afterEnd, beforeEnd], the position where a new button should be added,
\"form_selector\":selector for the main product form. It could be different from the one containing the previous selector. Prefer a selector based on the form action (e.g. /cart/add) AND a class (or a parent class), Ignore IDs that contains the \"template\" substring.}}
If in doubt for each selector add an unique parent class or ID, ignore anything with a "template" substring in it
PROVIDE ONLY THE JSON STRING WITH THE LISTED KEYS"""
gpt_prompt_system_drawer = f"""
Analyze the provided HTML from a Shopify e-commerce product detail page.
Your task is to find the main 'Checkout' button in the cart drawer and the main 'cart drawer form', avoiding any other buttons related to mini-carts, main forms.
The goal is to find the correct selector to add an element AFTER the "add to cart" button
Return the information in a JSON format with the following structure:
{{\"button_or_container_selector\": selector for the 'Checkout' button in the cart drawer or its immediate parent container, prefer a selector based on the type AND class or parent class,
\"placement_position\": one of [afterBegin, beforeBegin, afterEnd, beforeEnd], the position where a new button should be added,
}}
If in doubt for each selector add an unique parent class or ID, ignore anything with a "template" substring in it
PROVIDE ONLY THE JSON STRING WITH THE LISTED KEYS"""
gpt_prompt_system_cart = f"""
Analyze the provided HTML from a Shopify e-commerce cart page.
Your task is to find the main 'Checkout' button and the main 'cart form', avoiding any other buttons related to mini-carts, sidebar, cart-drawers or other secondary forms.
The goal is to find the correct selector to add an element BEFORE the "Checkout" button
IGNORE ANY BUTTON IN THE CART DRAWERS
Return the information in a JSON format with the following structure:
{{\"button_or_container_selector\": selector for the 'Checkout' button or its container, prefer a selector based on the type AND class or parent class,
\"placement_position\": one of [afterBegin, beforeBegin, afterEnd, beforeEnd], the position where a new button should be added,
\"form_selector\":selector for the main cart form. It could be different from the one containing the previous selector. Prefer a selector based on the form action (e.g. /cart/add) AND a class (or a parent class), Ignore IDs that contains the \"template\" substring.}}
If in doubt for each selector add an unique parent class or ID, ignore anything with a "template" substring in it
PROVIDE ONLY THE JSON STRING WITH THE LISTED KEYS"""
gpt_prompt_user = f"""
Analyze this HTML code:
{js_result}
"""
gpt_prompt_system2 = False
if user_page_type == '0':
#product page
gpt_prompt_system = gpt_prompt_system_button
if get_drawer:
gpt_prompt_system2 = gpt_prompt_system_drawer
else:
#cart page
gpt_prompt_system = gpt_prompt_system_cart
# Step 5: Send prompt to GPT and get response
gpt_response = query_gpt(gpt_prompt_system, gpt_prompt_user)
gpt_response2 = False
if gpt_prompt_system2:
gpt_response2 = query_gpt(gpt_prompt_system2, gpt_prompt_user)
# Step 6: Print the response
print("GPT-3 Response")
# Extracting the JSON content
# print response based on page type
if user_page_type == '0':
json_content = gpt_response.choices[0].message.content
print("Button response:")
print("-------------------")
json_data = json.loads(json_content)
formatted_json = json.dumps(json_data, indent=4)
print(formatted_json)
if gpt_response2:
json_content2 = gpt_response2.choices[0].message.content
print(" ")
print("Drawer response:")
print("-------------------")
json_data = json.loads(json_content2)
formatted_json = json.dumps(json_data, indent=4)
print(formatted_json)
else:
json_content = gpt_response.choices[0].message.content
print("Cart response:")
print("-------------------")
json_data = json.loads(json_content)
formatted_json = json.dumps(json_data, indent=4)
print(formatted_json) | [
"\nAnalyze the provided HTML from a Shopify e-commerce product detail page.\nYour task is to find the main 'Add to cart' button and the main 'product form', avoiding any other buttons related to mini-carts, sidebar or cart-drawers or other secondary forms.\nThe goal is to find the correct selector to add an element AFTER the \"add to cart\" button\nIGNORE ANY BUTTON IN THE CART DRAWERS\n\nReturn the information in a JSON format with the following structure:\n\n{\"button_or_container_selector\": selector for the 'add to cart' button or its immediate parent container, prefer a selector based on the type AND class or parent class, \n\"placement_position\": one of [afterBegin, beforeBegin, afterEnd, beforeEnd], the position where a new button should be added, \n\"form_selector\":selector for the main product form. It could be different from the one containing the previous selector. Prefer a selector based on the form action (e.g. /cart/add) AND a class (or a parent class), Ignore IDs that contains the \"template\" substring.}\n\nIf in doubt for each selector add an unique parent class or ID, ignore anything with a \"template\" substring in it\n\nPROVIDE ONLY THE JSON STRING WITH THE LISTED KEYS",
"\nAnalyze the provided HTML from a Shopify e-commerce product detail page.\nYour task is to find the main 'Checkout' button in the cart drawer and the main 'cart drawer form', avoiding any other buttons related to mini-carts, main forms.\nThe goal is to find the correct selector to add an element AFTER the \"add to cart\" button\n\nReturn the information in a JSON format with the following structure:\n\n{\"button_or_container_selector\": selector for the 'Checkout' button in the cart drawer or its immediate parent container, prefer a selector based on the type AND class or parent class, \n\"placement_position\": one of [afterBegin, beforeBegin, afterEnd, beforeEnd], the position where a new button should be added, \n}\n\nIf in doubt for each selector add an unique parent class or ID, ignore anything with a \"template\" substring in it\n\nPROVIDE ONLY THE JSON STRING WITH THE LISTED KEYS",
"False",
"\n Analyze this HTML code:\n PLACEHOLDER\n ",
"\nAnalyze the provided HTML from a Shopify e-commerce cart page.\nYour task is to find the main 'Checkout' button and the main 'cart form', avoiding any other buttons related to mini-carts, sidebar, cart-drawers or other secondary forms.\nThe goal is to find the correct selector to add an element BEFORE the \"Checkout\" button\nIGNORE ANY BUTTON IN THE CART DRAWERS\n\nReturn the information in a JSON format with the following structure:\n\n{\"button_or_container_selector\": selector for the 'Checkout' button or its container, prefer a selector based on the type AND class or parent class, \n\"placement_position\": one of [afterBegin, beforeBegin, afterEnd, beforeEnd], the position where a new button should be added, \n\"form_selector\":selector for the main cart form. It could be different from the one containing the previous selector. Prefer a selector based on the form action (e.g. /cart/add) AND a class (or a parent class), Ignore IDs that contains the \"template\" substring.}\n\nIf in doubt for each selector add an unique parent class or ID, ignore anything with a \"template\" substring in it\n\nPROVIDE ONLY THE JSON STRING WITH THE LISTED KEYS"
] |
2024-01-10 | dhananjayaaps/JustiBot_AI_Law_Agent | ret_agent.py | from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
prompt = PromptTemplate(
input_variables=["query"],
template="{query}"
)
llm_chain = LLMChain(prompt=prompt) | [
"{query}"
] |
2024-01-10 | carlospolop/hacktricks-cloud | scripts~translator.py | import argparse
import os
from openai import OpenAI #pip3 install openai
import time
import shutil
import tempfile
import subprocess
import sys
import tiktoken
import concurrent.futures
from tqdm import tqdm #pip3 install tqdm
import traceback
MASTER_BRANCH = "master"
VERBOSE = True
MAX_TOKENS = 10000 #gpt-4-1106-preview
def reportTokens(prompt, model):
encoding = tiktoken.encoding_for_model(model)
# print number of tokens in light gray, with first 50 characters of prompt in green. if truncated, show that it is truncated
#print("\033[37m" + str(len(encoding.encode(prompt))) + " tokens\033[0m" + " in prompt: " + "\033[92m" + prompt[:50] + "\033[0m" + ("..." if len(prompt) > 50 else ""))
return len(encoding.encode(prompt))
def check_git_dir(path):
if os.path.isdir(os.path.join(path, '.git')):
return True
return False
def get_branch_files(branch):
"""Get a list of all files in a branch."""
command = f"git ls-tree -r --name-only {branch}"
result = subprocess.run(command.split(), stdout=subprocess.PIPE)
files = result.stdout.decode().splitlines()
return set(files)
def delete_unique_files(branch):
"""Delete files that are unique to branch2."""
# Get the files in each branch
files_branch1 = get_branch_files(MASTER_BRANCH)
files_branch2 = get_branch_files(branch)
# Find the files that are in branch2 but not in branch1
unique_files = files_branch2 - files_branch1
if unique_files:
# Switch to the second branch
subprocess.run(["git", "checkout", branch])
# Delete the unique files from the second branch
for file in unique_files:
subprocess.run(["git", "rm", file])
subprocess.run(["git", "checkout", MASTER_BRANCH])
print(f"[+] Deleted {len(unique_files)} files from branch: {branch}")
def cp_translation_to_repo_dir_and_check_gh_branch(branch, temp_folder, translate_files):
branch_exists = subprocess.run(['git', 'show-ref', '--verify', '--quiet', 'refs/heads/' + branch])
# If branch doesn't exist, create it
if branch_exists.returncode != 0:
subprocess.run(['git', 'checkout', '-b', branch])
else:
subprocess.run(['git', 'checkout', branch])
# Walk through source directory
for dirpath, dirnames, filenames in os.walk(temp_folder):
# Compute destination path
dest_path = os.path.join(os.getcwd(), os.path.relpath(dirpath, temp_folder))
# Create directory structure in destination, if not already present
if not os.path.exists(dest_path):
os.makedirs(dest_path)
# Copy each file from source to destination
for file_name in filenames:
src_file = os.path.join(dirpath, file_name)
shutil.copy2(src_file, dest_path)
print(f"Translated files copied to branch: {branch}")
if translate_files:
subprocess.run(['git', 'add', "-A"])
subprocess.run(['git', 'commit', '-m', f"Translated {translate_files} to {branch}"[:72]])
subprocess.run(['git', 'checkout', MASTER_BRANCH])
print("Commit created and moved to master branch")
else:
print("No commiting anything, leaving in language branch")
def translate_text(language, text, file_path, model, cont=0, slpitted=False, client=None):
if not text:
return text
messages = [
{"role": "system", "content": "You are a professional hacker, translator and writer. You write everything super clear and as concise as possible without loosing information."},
{"role": "system", "content": f"The following is content from a hacking book about hacking techiques. The following content is from the file {file_path}. Translate the relevant English text to {language} and return the translation keeping excatly the same markdown and html syntax. Do not translate things like code, hacking technique names, hacking word, cloud/SaaS platform names (like Workspace, aws, gcp...), the word 'leak', pentesting, and markdown tags. Also don't add any extra stuff apart from the translation and markdown syntax."},
{"role": "user", "content": text},
]
try:
response = client.chat.completions.create(
model=model,
messages=messages,
temperature=0
)
except Exception as e:
print(e)
if cont > 6:
print(f"Page {file_path} could not be translated due to count with text: {text}\nReturning text as is.")
return text
if "is currently overloaded" in str(e).lower():
print("Overloaded, waiting 30 seconds")
time.sleep(30)
elif "timeout" in str(e).lower():
print("Timeout, waiting 30 seconds")
cont += 1
time.sleep(30)
elif "rate limit" in str(e).lower():
print("Rate limit, waiting 60 seconds")
cont += 1
time.sleep(60)
elif "maximum context length" in str(e).lower():
print("Maximum context length, splitting text in two and translating separately")
if slpitted:
#print(f"Page {file_path} could not be translated with text: {text}")
print(f"Page {file_path} could not be translated.\nReturning text as is.")
return text
text1 = text.split('\n')[:len(text.split('\n'))//2]
text2 = text.split('\n')[len(text.split('\n'))//2:]
return translate_text(language, '\n'.join(text1), file_path, model, cont, False, client) + '\n' + translate_text(language, '\n'.join(text2), file_path, model, cont, True, client)
print("Retrying translation")
return translate_text(language, text, file_path, model, cont, False, client)
response_message = response.choices[0].message.content.strip()
# Sometimes chatgpt modified the number of "#" at the beginning of the text, so we need to fix that. This is specially important for the first line of the MD that mucst have only 1 "#"
cont2 = 0
while (text.startswith('# ') and not response_message[cont2:].startswith('# ')):
cont2 += 1
if cont2 > 3:
cont2 = 0
print(f"Error with initial '#', something went wrong, recheck: {response_message[:30]}")
break
response_message = response_message[cont2:]
return response_message
def split_text(text, model):
global MAX_TOKENS
lines = text.split('\n')
chunks = []
chunk = ''
in_code_block = False
for line in lines:
# If we are in a code block, just add the code to the chunk
if line.startswith('```'):
# If we are in a code block, finish it with the "```"
if in_code_block:
chunk += line + '\n'
in_code_block = not in_code_block
chunks.append(chunk.strip())
chunk = ''
# If a code block is started, add the "```" to the chunk
if in_code_block:
chunk += line + '\n'
continue
if (line.startswith('#') and reportTokens(chunk + "\n" + line.strip(), model) > MAX_TOKENS*0.8) or \
reportTokens(chunk + "\n" + line.strip(), model) > MAX_TOKENS:
chunks.append(chunk.strip())
chunk = ''
chunk += line.strip() + '\n'
chunks.append(chunk.strip())
return chunks
def copy_gitbook_dir(source_path, dest_path):
folder_name = ".gitbook/"
source_folder = os.path.join(source_path, folder_name)
destination_folder = os.path.join(dest_path, folder_name)
if not os.path.exists(source_folder):
print(f"Error: {source_folder} does not exist.")
else:
# Copy the .gitbook folder
shutil.copytree(source_folder, destination_folder)
print(f"Copied .gitbook folder from {source_folder} to {destination_folder}")
def copy_summary(source_path, dest_path):
file_name = "SUMMARY.md"
source_filepath = os.path.join(source_path, file_name)
dest_filepath = os.path.join(dest_path, file_name)
shutil.copy2(source_filepath, dest_filepath)
print("[+] Copied SUMMARY.md")
def translate_file(language, file_path, file_dest_path, model, client):
global VERBOSE
if file_path.endswith('SUMMARY.md'):
return
with open(file_path, 'r', encoding='utf-8') as f:
content = f.read()
content_chunks = split_text(content, model)
translated_content = ''
start_time = time.time()
for chunk in content_chunks:
# Don't trasnlate code blocks
if chunk.startswith('```'):
translated_content += chunk + '\n'
else:
translated_content += translate_text(language, chunk, file_path, model, cont=0, slpitted=False, client=client) + '\n'
elapsed_time = time.time() - start_time
# make sure directory exists
os.makedirs(os.path.dirname(file_dest_path), exist_ok=True)
with open(file_dest_path, 'w', encoding='utf-8') as f:
f.write(translated_content)
#if VERBOSE:
print(f"Page {file_path} translated in {elapsed_time:.2f} seconds")
def translate_directory(language, source_path, dest_path, model, num_threads, client):
all_markdown_files = []
for subdir, dirs, files in os.walk(source_path):
for file in files:
if file.endswith('.md') and file != "SUMMARY.md":
source_filepath = os.path.join(subdir, file)
dest_filepath = os.path.join(dest_path, os.path.relpath(source_filepath, source_path))
all_markdown_files.append((source_filepath, dest_filepath))
print(f"Translating {len(all_markdown_files)} files")
#with tqdm(total=len(all_markdown_files), desc="Translating Files") as pbar:
with concurrent.futures.ThreadPoolExecutor(max_workers=num_threads) as executor:
futures = []
for source_filepath, dest_filepath in all_markdown_files:
if os.path.exists(dest_filepath):
continue
os.makedirs(os.path.dirname(dest_filepath), exist_ok=True)
future = executor.submit(translate_file, language, source_filepath, dest_filepath, model, client)
futures.append(future)
for future in concurrent.futures.as_completed(futures):
try:
future.result()
#pbar.update()
except Exception as exc:
tb = traceback.format_exc()
print(f'Translation generated an exception: {exc}')
print("Traceback:", tb)
if __name__ == "__main__":
print("- Version 1.1.1")
# Set up argparse
parser = argparse.ArgumentParser(description='Translate gitbook and copy to a new branch.')
parser.add_argument('-d', '--directory', action='store_true', help='Translate a full directory.')
parser.add_argument('-l', '--language', required=True, help='Target language for translation.')
parser.add_argument('-b', '--branch', required=True, help='Branch name to copy translated files.')
parser.add_argument('-k', '--api-key', required=True, help='API key to use.')
parser.add_argument('-m', '--model', default="gpt-4-1106-preview", help='The openai model to use. By default: gpt-4-1106-preview')
parser.add_argument('-o', '--org-id', help='The org ID to use (if not set the default one will be used).')
parser.add_argument('-f', '--file-paths', help='If this is set, only the indicated files will be translated (" , " separated).')
parser.add_argument('-n', '--dont-cd', action='store_false', help="If this is true, the script won't change the current directory.")
parser.add_argument('-t', '--threads', default=5, type=int, help="Number of threads to use to translate a directory.")
#parser.add_argument('-v', '--verbose', action='store_false', help="Get the time it takes to translate each page.")
args = parser.parse_args()
source_folder = os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0])))
dest_folder = tempfile.mkdtemp()
language = args.language.capitalize()
branch = args.branch
model = args.model
org_id = args.org_id
num_threads = args.threads
#VERBOSE = args.verbose
client = OpenAI(
api_key=args.api_key,
organization=org_id
)
# Start with the current directory.
current_dir = os.getcwd()
# Check if model is gpt-3.5
if "gpt-3.5" in model:
MAX_TOKENS = 2000
# Check the current directory
if check_git_dir(current_dir):
print('Found .git directory in current directory: ' + current_dir)
else:
# Check the parent directory
parent_dir = os.path.dirname(current_dir)
if check_git_dir(parent_dir):
print('Found .git directory in parent directory: ' + parent_dir)
# Change the current working directory to the parent directory
os.chdir(parent_dir)
print('Current working directory has been changed to: ' + os.getcwd())
else:
print('No .git directory found in current or parent directory. Exiting.')
exit(1)
current_dir = os.getcwd()
print(f"The translated files will be copied to {current_dir}, make sure this is the expected folder.")
if not args.dont_cd:
# Change to the parent directory
os.chdir(source_folder)
translate_files = None # Need to initialize it here to avoid error
if args.file_paths:
# Translate only the indicated file
translate_files = [f for f in args.file_paths.split(' , ') if f]
for file_path in translate_files:
#with tqdm(total=len(all_markdown_files), desc="Translating Files") as pbar:
with concurrent.futures.ThreadPoolExecutor(max_workers=num_threads) as executor:
futures = []
future = executor.submit(translate_file, language, file_path, os.path.join(dest_folder, file_path), model, client)
futures.append(future)
for future in concurrent.futures.as_completed(futures):
try:
future.result()
#pbar.update()
except Exception as exc:
print(f'Translation generated an exception: {exc}')
# Delete possibly removed files from the master branch
delete_unique_files(branch)
elif args.directory:
# Translate everything
translate_directory(language, source_folder, dest_folder, model, num_threads, client)
else:
print("You need to indicate either a directory or a list of files to translate.")
exit(1)
# Copy summary
copy_summary(source_folder, dest_folder)
# Copy .gitbook folder
copy_gitbook_dir(source_folder, dest_folder)
# Create the branch and copy the translated files
cp_translation_to_repo_dir_and_check_gh_branch(branch, dest_folder, translate_files)
| [
"The following is content from a hacking book about hacking techiques. The following content is from the file PLACEHOLDER. Translate the relevant English text to PLACEHOLDER and return the translation keeping excatly the same markdown and html syntax. Do not translate things like code, hacking technique names, hacking word, cloud/SaaS platform names (like Workspace, aws, gcp...), the word 'leak', pentesting, and markdown tags. Also don't add any extra stuff apart from the translation and markdown syntax.",
"You are a professional hacker, translator and writer. You write everything super clear and as concise as possible without loosing information."
] |
2024-01-10 | adinkralabs/datasets | datasets~openwebtext~openwebtext.py | # coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Open WebText Corpus"""
from __future__ import absolute_import, division, print_function
import os
import re
from itertools import chain
import datasets
_CITATION = """\
@misc{Gokaslan2019OpenWeb,
title={OpenWebText Corpus},
author={Aaron Gokaslan*, Vanya Cohen*, Ellie Pavlick, Stefanie Tellex},
howpublished{\\url{http://Skylion007.github.io/OpenWebTextCorpus}},
year={2019}
}
"""
_DESCRIPTION = """\
An open-source replication of the WebText dataset from OpenAI.
"""
_URL = "https://zenodo.org/record/3834942/files/openwebtext.tar.xz"
class Openwebtext(datasets.GeneratorBasedBuilder):
"""The Open WebText dataset."""
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="plain_text",
description="Plain text",
version=datasets.Version("1.0.0"),
)
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({"text": datasets.Value("string")}),
homepage="https://skylion007.github.io/OpenWebTextCorpus/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
dl_dir = dl_manager.download_and_extract(_URL)
owt_dir = os.path.join(dl_dir, "openwebtext")
subset_xzs = [
os.path.join(owt_dir, file_name)
for file_name in sorted(os.listdir(owt_dir))
if file_name.endswith("xz") # filter out ...xz.lock
]
ex_dirs = dl_manager.extract(subset_xzs, num_proc=round(os.cpu_count() * 0.75))
nested_txt_files = [
[
os.path.join(ex_dir, txt_file_name)
for txt_file_name in sorted(os.listdir(ex_dir))
if txt_file_name.endswith("txt")
]
for ex_dir in ex_dirs
]
txt_files = chain(*nested_txt_files)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"txt_files": txt_files}),
]
def _generate_examples(self, txt_files):
""" Yields examples. """
for idx, filepath in enumerate(txt_files):
with open(filepath, encoding="utf-8") as f:
yield idx, {"text": re.sub("\n\n\n+", "\n\n", f.read()).strip()}
| [] |
2024-01-10 | TARTRL/TiKick | tmarl~envs~env_wrappers.py | """
Modified from OpenAI Baselines code to work with multi-agent envs
"""
import numpy as np
from multiprocessing import Process, Pipe
from abc import ABC, abstractmethod
from tmarl.utils.util import tile_images
class CloudpickleWrapper(object):
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
"""
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob)
class ShareVecEnv(ABC):
"""
An abstract asynchronous, vectorized environment.
Used to batch data from multiple copies of an environment, so that
each observation becomes an batch of observations, and expected action is a batch of actions to
be applied per-environment.
"""
closed = False
viewer = None
metadata = {
'render.modes': ['human', 'rgb_array']
}
def __init__(self, num_envs, observation_space, share_observation_space, action_space):
self.num_envs = num_envs
self.observation_space = observation_space
self.share_observation_space = share_observation_space
self.action_space = action_space
@abstractmethod
def reset(self):
"""
Reset all the environments and return an array of
observations, or a dict of observation arrays.
If step_async is still doing work, that work will
be cancelled and step_wait() should not be called
until step_async() is invoked again.
"""
pass
@abstractmethod
def step_async(self, actions):
"""
Tell all the environments to start taking a step
with the given actions.
Call step_wait() to get the results of the step.
You should not call this if a step_async run is
already pending.
"""
pass
@abstractmethod
def step_wait(self):
"""
Wait for the step taken with step_async().
Returns (obs, rews, dones, infos):
- obs: an array of observations, or a dict of
arrays of observations.
- rews: an array of rewards
- dones: an array of "episode done" booleans
- infos: a sequence of info objects
"""
pass
def close_extras(self):
"""
Clean up the extra resources, beyond what's in this base class.
Only runs when not self.closed.
"""
pass
def close(self):
if self.closed:
return
if self.viewer is not None:
self.viewer.close()
self.close_extras()
self.closed = True
def step(self, actions):
"""
Step the environments synchronously.
This is available for backwards compatibility.
"""
self.step_async(actions)
return self.step_wait()
def render(self, mode='human'):
imgs = self.get_images()
bigimg = tile_images(imgs)
if mode == 'human':
self.get_viewer().imshow(bigimg)
return self.get_viewer().isopen
elif mode == 'rgb_array':
return bigimg
else:
raise NotImplementedError
def get_images(self):
"""
Return RGB images from each environment
"""
raise NotImplementedError
@property
def unwrapped(self):
if isinstance(self, VecEnvWrapper):
return self.venv.unwrapped
else:
return self
def get_viewer(self):
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.SimpleImageViewer()
return self.viewer
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if 'bool' in done.__class__.__name__:
if done:
ob = env.reset()
else:
if np.all(done):
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
remote.send((ob))
elif cmd == 'render':
if data == "rgb_array":
fr = env.render(mode=data)
remote.send(fr)
elif data == "human":
env.render(mode=data)
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
env.close()
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.share_observation_space, env.action_space))
elif cmd == 'get_max_step':
remote.send((env.max_steps))
elif cmd == 'get_action': # for behavior cloning
action = env.get_action()
remote.send((action))
else:
raise NotImplementedError
class SubprocVecEnv(ShareVecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, share_observation_space, action_space = self.remotes[0].recv()
ShareVecEnv.__init__(self, len(env_fns), observation_space,
share_observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
obs = [remote.recv() for remote in self.remotes]
return np.stack(obs)
def get_max_step(self):
for remote in self.remotes:
remote.send(('get_max_step', None))
return np.stack([remote.recv() for remote in self.remotes])
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def render(self, mode="rgb_array"):
for remote in self.remotes:
remote.send(('render', mode))
if mode == "rgb_array":
frame = [remote.recv() for remote in self.remotes]
return np.stack(frame)
def shareworker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, s_ob, reward, done, info, available_actions = env.step(data)
if 'bool' in done.__class__.__name__:
if done:
ob, s_ob, available_actions = env.reset()
else:
if np.all(done):
ob, s_ob, available_actions = env.reset()
remote.send((ob, s_ob, reward, done, info, available_actions))
elif cmd == 'reset':
ob, s_ob, available_actions = env.reset()
remote.send((ob, s_ob, available_actions))
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'render':
if data == "rgb_array":
fr = env.render(mode=data)
remote.send(fr)
elif data == "human":
env.render(mode=data)
elif cmd == 'close':
env.close()
remote.close()
break
elif cmd == 'get_spaces':
remote.send(
(env.observation_space, env.share_observation_space, env.action_space))
elif cmd == 'render_vulnerability':
fr = env.render_vulnerability(data)
remote.send((fr))
elif cmd == 'get_action': # for behavior cloning
action = env.get_action()
remote.send((action))
else:
raise NotImplementedError
class ShareSubprocVecEnv(ShareVecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=shareworker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, share_observation_space, action_space = self.remotes[0].recv(
)
ShareVecEnv.__init__(self, len(env_fns), observation_space,
share_observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, share_obs, rews, dones, infos, available_actions = zip(*results)
return np.stack(obs), np.stack(share_obs), np.stack(rews), np.stack(dones), infos, np.stack(available_actions)
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
results = [remote.recv() for remote in self.remotes]
obs, share_obs, available_actions = zip(*results)
return np.stack(obs), np.stack(share_obs), np.stack(available_actions)
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def get_action(self): # for behavior clonging
for remote in self.remotes:
remote.send(('get_action', None))
results = [remote.recv() for remote in self.remotes]
return np.concatenate(results)
# single env
class DummyVecEnv(ShareVecEnv):
def __init__(self, env_fns):
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
ShareVecEnv.__init__(self, len(
env_fns), env.observation_space, env.share_observation_space, env.action_space)
self.actions = None
def step_async(self, actions):
self.actions = actions
def step_wait(self):
results = [env.step(a) for (a, env) in zip(self.actions, self.envs)]
obs, rews, dones, infos = map(np.array, zip(*results))
for (i, done) in enumerate(dones):
if 'bool' in done.__class__.__name__:
if done:
obs[i] = self.envs[i].reset()
else:
if np.all(done):
obs[i] = self.envs[i].reset()
self.actions = None
return obs, rews, dones, infos
def reset(self):
obs = [env.reset() for env in self.envs]
return np.array(obs)
def get_max_step(self):
return [env.max_steps for env in self.envs]
def close(self):
for env in self.envs:
env.close()
def render(self, mode="human", playeridx=None):
if mode == "rgb_array":
if playeridx == None:
return np.array([env.render(mode=mode) for env in self.envs])
else:
return np.array([env.render(mode=mode,playeridx=playeridx) for env in self.envs])
elif mode == "human":
for env in self.envs:
if playeridx == None:
env.render(mode=mode)
else:
env.render(mode=mode, playeridx=playeridx)
else:
raise NotImplementedError
class ShareDummyVecEnv(ShareVecEnv):
def __init__(self, env_fns):
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
ShareVecEnv.__init__(self, len(
env_fns), env.observation_space, env.share_observation_space, env.action_space)
self.actions = None
def step_async(self, actions):
self.actions = actions
def step_wait(self):
results = [env.step(a) for (a, env) in zip(self.actions, self.envs)]
obs, share_obs, rews, dones, infos, available_actions = map(
np.array, zip(*results))
for (i, done) in enumerate(dones):
if 'bool' in done.__class__.__name__:
if done:
obs[i], share_obs[i], available_actions[i] = self.envs[i].reset()
else:
if np.all(done):
obs[i], share_obs[i], available_actions[i] = self.envs[i].reset()
self.actions = None
return obs, share_obs, rews, dones, infos, available_actions
def reset(self):
results = [env.reset() for env in self.envs]
obs, share_obs, available_actions = map(np.array, zip(*results))
return obs, share_obs, available_actions
def close(self):
for env in self.envs:
env.close()
def render(self, mode="human"):
if mode == "rgb_array":
return np.array([env.render(mode=mode) for env in self.envs])
elif mode == "human":
for env in self.envs:
env.render(mode=mode)
else:
raise NotImplementedError
def save_replay(self):
for env in self.envs:
env.save_replay()
def get_action(self): # for behavior cloning
results = [env.reset() for env in self.envs]
return results
| [] |
2024-01-10 | SuperDuperAi/SuperChat | pages~4_ANALITIC_CSV.py | import streamlit as st
from lida import Manager, TextGenerationConfig, llm
import os
import openai
from PIL import Image
from io import BytesIO
import base64
openai.api_key = st.secrets["OPENAI_API_KEY"]
def base64_to_image(base64_string):
# Decode the base64 string
byte_data = base64.b64decode(base64_string)
# Use BytesIO to convert the byte data to image
return Image.open(BytesIO(byte_data))
lida = Manager(text_gen=llm("openai"))
textgen_config = TextGenerationConfig(n=1, temperature=0.5, model="gpt-3.5-turbo-0301", use_cache=True)
menu = st.sidebar.selectbox("Choose an Option", ["Summarize", "Question based Graph"])
if menu == "Summarize":
st.subheader("Summarization of your Data")
file_uploader = st.file_uploader("Upload your CSV", type="csv")
if file_uploader is not None:
path_to_save = "filename.csv"
with open(path_to_save, "wb") as f:
f.write(file_uploader.getvalue())
summary = lida.summarize("filename.csv", summary_method="default", textgen_config=textgen_config)
st.write(summary)
goals = lida.goals(summary, n=2, textgen_config=textgen_config)
for goal in goals:
st.write(goal)
i = 0
library = "seaborn"
textgen_config = TextGenerationConfig(n=1, temperature=0.2, use_cache=True)
charts = lida.visualize(summary=summary, goal=goals[i], textgen_config=textgen_config, library=library)
img_base64_string = charts[0].raster
img = base64_to_image(img_base64_string)
st.image(img)
elif menu == "Question based Graph":
st.subheader("Query your Data to Generate Graph")
file_uploader = st.file_uploader("Upload your CSV", type="csv")
if file_uploader is not None:
path_to_save = "filename1.csv"
with open(path_to_save, "wb") as f:
f.write(file_uploader.getvalue())
text_area = st.text_area("Query your Data to Generate Graph", height=200)
if st.button("Generate Graph"):
if len(text_area) > 0:
st.info("Your Query: " + text_area)
lida = Manager(text_gen=llm("openai"))
textgen_config = TextGenerationConfig(n=1, temperature=0.2, use_cache=True)
summary = lida.summarize("filename1.csv", summary_method="default", textgen_config=textgen_config)
user_query = text_area
charts = lida.visualize(summary=summary, goal=user_query, textgen_config=textgen_config)
charts[0]
image_base64 = charts[0].raster
img = base64_to_image(image_base64)
st.image(img)
| [
"filename1.csv",
"filename.csv"
] |
2024-01-10 | SuperDuperAi/SuperChat | pages~2_NEWS_SUMMARIZE.py | import time
import streamlit as st
from langchain.document_loaders import NewsURLLoader
from runtime import model
st.title("SuperChat with NEWS (any articles)")
st.markdown(
"**Chat with Claude v2 on Bedrock (100k context)")
if 'doc_news' not in st.session_state:
st.session_state['doc_news'] = ""
url = ''
if st.experimental_get_query_params():
url = st.experimental_get_query_params()['url'][0]
input_url = st.text_input("Enter a URL:", value=url)
if "messages" not in st.session_state:
st.session_state.messages = []
if input_url and st.session_state['doc_news'] == "":
with st.spinner('Processing'):
loader = NewsURLLoader(urls=[input_url])
docs = loader.load()
page_content = str(docs[0].page_content)
metadata = docs[0].metadata
# Define prompt
prompt_template = f"""
I'm going to give you a document from web url (news or article, post blog).
Generate a summarized and episodic narrative from the following text.
Divide the summary into three acts, 10 slides.
Identify key theses, important figures, and locations.
Make sure each episode fits into a 5-10 second slide for short-form videos like shorts, reels, or TikTok.
Conclude with a main takeaway.
Retain the essence of the original text.
Here is the document:
<document>
Title: {metadata['title']}
Language: {metadata['language']}
Description: {metadata['description']}
Publish Date: {metadata['publish_date']}
Page Content: {page_content}
</document>
Thus, the format of your overall response should look like example what's shown between the <example></example> tags.
For generating a script that dynamically adapts to the length of the input text,The aim would be to maintain the integrity of the essential points while condensing information if the text is too long.
Make sure to follow the formatting and spacing exactly.
<example>
# title
## subtitle
### Summary:
---
### Scenes (describe the scenes in the video):
[including, if possible, descriptions, quotes and characters]
---
### Analysis:
(1) Identify the main themes and problems discussed.
(2) List interesting theses and quotes.
(3) Identify the main characters.
(4) Suggest tags for linking with articles.
(5) Sentiment Analysis. Is the sentiment expressed in the text positive, negative, or neutral? Please provide evidence from the text to support your assessment. Additionally, on a scale of -1 to 1, where -1 is extremely negative, 0 is neutral, and 1 is extremely positive, rate the sentiment of the text.
(6) Political Orientation Analysis.
(7) Fake news detection or manipulation, critical thinking.
### Questions:
Q1:
Q2:
Q3:
[Provide three follow-up questions worded as if I'm asking you.
Format in bold as Q1, Q2, and Q3. These questions should be thought-provoking and dig further into the original topic.]
</example>
Answer the question immediately without preamble.
"""
# st.info(prompt_template)
with st.chat_message("assistant"):
# st.info(prompt_template)
st.warning(f"Page len: {len(page_content)}")
st.experimental_set_query_params = {'url': input_url}
news_summarise = model.predict(input=prompt_template)
st.session_state['doc_news'] = news_summarise
st.session_state.messages.append({"role": "assistant", "content": news_summarise})
# with st.chat_message("assistant"):
# st.markdown(news_summarise)
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Initialize session state for chat input if it doesn't already exist
prompt_disabled = (st.session_state['doc_news'] == "")
if prompt := st.chat_input("What is up?", disabled=prompt_disabled):
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response = ""
processed_prompt = prompt
result = model.predict(input=prompt)
for chunk in result:
full_response += chunk
time.sleep(0.01)
message_placeholder.markdown(full_response + "▌")
message_placeholder.markdown(result)
st.session_state.messages.append({"role": "assistant", "content": full_response})
| [
"doc_news",
"\n I'm going to give you a document from web url (news or article, post blog).\n \n Generate a summarized and episodic narrative from the following text. \n Divide the summary into three acts, 10 slides. \n Identify key theses, important figures, and locations. \n Make sure each episode fits into a 5-10 second slide for short-form videos like shorts, reels, or TikTok. \n Conclude with a main takeaway. \n Retain the essence of the original text.\n\n Here is the document:\n <document>\n Title: PLACEHOLDER\n Language: PLACEHOLDER\n Description: PLACEHOLDER\n Publish Date: PLACEHOLDER\n\n Page Content: PLACEHOLDER\n </document>\n \n Thus, the format of your overall response should look like example what's shown between the <example></example> tags. \n For generating a script that dynamically adapts to the length of the input text,The aim would be to maintain the integrity of the essential points while condensing information if the text is too long.\n Make sure to follow the formatting and spacing exactly. \n \n <example>\n # title\n ## subtitle\n \n ### Summary: \n \n ---\n ### Scenes (describe the scenes in the video):\n [including, if possible, descriptions, quotes and characters]\n \n ---\n ### Analysis:\n (1) Identify the main themes and problems discussed.\n (2) List interesting theses and quotes.\n (3) Identify the main characters.\n (4) Suggest tags for linking with articles.\n (5) Sentiment Analysis. Is the sentiment expressed in the text positive, negative, or neutral? Please provide evidence from the text to support your assessment. Additionally, on a scale of -1 to 1, where -1 is extremely negative, 0 is neutral, and 1 is extremely positive, rate the sentiment of the text.\n (6) Political Orientation Analysis. \n (7) Fake news detection or manipulation, critical thinking.\n \n ### Questions:\n Q1:\n Q2:\n Q3:\n \n [Provide three follow-up questions worded as if I'm asking you. \n Format in bold as Q1, Q2, and Q3. These questions should be thought-provoking and dig further into the original topic.]\n\n </example>\n \n Answer the question immediately without preamble.\n \n "
] |
2024-01-10 | SuperDuperAi/SuperChat | runtime.py | import time
import boto3
import streamlit as st
from langchain.chains import ConversationChain
from langchain.llms.bedrock import Bedrock
from langchain.memory import ConversationBufferMemory
from langchain.prompts.prompt import PromptTemplate
from botocore import config
with open("styles.css") as css:
st.markdown(f'<style>{css.read()}</style>', unsafe_allow_html=True)
bedrock_runtime = boto3.client(
service_name="bedrock-runtime",
region_name="us-east-1",
aws_access_key_id=st.secrets["AWS_ACCESS_KEY_ID"],
aws_secret_access_key=st.secrets["AWS_SECRET_ACCESS_KEY"],
config=config.Config(
connect_timeout=1000,
read_timeout=3000
)
)
# add to sidebar inputs max_tokens_to_sample
st.sidebar.subheader('Model parameters')
max_tokens_to_sample = st.sidebar.slider('tokens to answer', 256, 8000, 4000)
llm = Bedrock(client=bedrock_runtime, model_id="anthropic.claude-v2")
llm.model_kwargs = {"temperature": 0.7, "max_tokens_to_sample": max_tokens_to_sample}
@st.cache_resource
def load_llm():
DEFAULT_TEMPLATE = """{history}\n\nHuman: {input}\n\nAssistant:"""
prompt = PromptTemplate(
input_variables=["history", "input"],
template=DEFAULT_TEMPLATE
)
model = ConversationChain(
prompt=prompt,
llm=llm,
# verbose=True,
memory=ConversationBufferMemory(
human_prefix="\n\nHuman: ",
ai_prefix="\n\nAssistant:"
)
)
return model
model = load_llm()
| [
"{history}\n\nHuman: {input}\n\nAssistant:",
"input"
] |
2024-01-10 | SuperDuperAi/SuperChat | pages~1_CHAT_MEETING.py | import time
import streamlit as st
from PyPDF2 import PdfReader
from langchain.text_splitter import CharacterTextSplitter, RecursiveCharacterTextSplitter
from runtime import model
st.title("SuperChat Ai with Meeting transcript")
st.markdown(
"**Chat with Claude v2 on Bedrock (100k context)**")
# add to sidebar inputs max_tokens_to_sample
st.sidebar.subheader('Model parameters')
pdf_docs = None
if 'doc_pdf' not in st.session_state:
st.session_state['doc_pdf'] = ""
instruct_value = ""
instruct_text = ""
with st.sidebar:
st.subheader('Parameters')
chunk_size = st.sidebar.slider('chunk_size', 0, 10000, 1000)
pdf_chunks_limit = st.sidebar.slider('pdf_chunks_limit', 0, 95000, 90000)
pdf_docs = st.file_uploader(
"Upload your pdfs here and click on 'Process'", accept_multiple_files=True, type=['txt'])
if pdf_docs and st.session_state['doc_pdf'] == "":
with (st.spinner('Processing')):
text = ""
for pdf in pdf_docs:
# pdf_reader = PdfReader(pdf)
# for page in pdf_reader.pages:
# text += page.extract_text()
text += pdf.read().decode("utf-8")
text_splitter = CharacterTextSplitter(
separator="\n",
chunk_size=chunk_size,
chunk_overlap=200,
length_function=len
)
chunks = text_splitter.split_text(text=text)
data = ""
for chunk in chunks:
if len(data) > 90000:
st.warning('PDFs is big, only first >100k characters will be used')
break
data += chunk
prompt_template =f"""I'm going to provide you with a transcript or document from a recent webinar or meeting. I'd like you to create an extensive long-read article suitable for blog publication based on this information. Please adhere to the following sections and guidelines in your response:
Summary:
Key Takeaways and Challenges:
a. Core Concepts: Identify and discuss the central ideas and challenges covered in the webinar.
b. Noteworthy Statements and Data Points: List any compelling arguments, statistics, or quotes.
c. Key Participants: Identify the main speakers and elaborate on their roles and points of view.
d. Related Topics and Influencers: Suggest tags for associating this content with other relevant webinars, articles, or experts in the field.
Structural Analysis According to Three Segments:
a. Introduction: Summarize the opening segment, including the setting, main speakers, and the primary topics to be covered.
b. Discussion and Debate: Describe the core discussions, disagreements, and breakthrough moments.
c. Conclusion and Takeaways: Sum up how the webinar ended, including any conclusions, action items, or unresolved questions.
Content Evaluation:
a. Sentiment Analysis: Is the overall sentiment positive, negative, or neutral? Provide textual evidence. Rate the sentiment on a scale of -1 to 1.
b. Content Safety Check: Examine for any content promoting violence, discrimination, or hatred towards individuals/groups and provide supporting excerpts.
Speaker Metrics:
Clarity Score: On a scale of 1 to 10, rate how clearly the speaker articulated their points.
Engagement Level: On a scale of 1 to 10, rate how well the speaker engaged the audience.
Subject Mastery: On a scale of 1 to 10, rate the speaker's expertise in the subject matter.
Pace and Timing: On a scale of 1 to 10, rate the appropriateness of the speaker's pace and use of time.
Audience Interaction: On a scale of 1 to 10, rate the speaker's ability to interact with and respond to the audience.
Visual Aids: On a scale of 1 to 10, rate the effectiveness of any visual aids, slides, or props used.
Meeting Metrics:
Agenda Adherence: On a scale of 1 to 10, rate how closely the meeting stuck to its intended agenda.
Content Relevance: On a scale of 1 to 10, rate the relevance of the content presented to the stated purpose of the meeting.
Collaboration Quality: On a scale of 1 to 10, rate the quality of discussions, debates, and collaborations.
Outcome Achievement: On a scale of 1 to 10, rate how well the meeting achieved its intended outcomes or objectives.
Duration Appropriateness: On a scale of 1 to 10, rate whether the meeting duration was appropriate for its content and objectives.
Technical Execution: On a scale of 1 to 10, rate the quality of the audio, video, and any other technical aspects.
Here is the document:
<document>
{data}
</document>
Result in Markdown format.
Answer in 8000 words or less.
### Questions:
[Provide three follow-up questions worded as if I'm asking you.
Format in bold as Q1, Q2, and Q3. These questions should be thought-provoking and dig further into the original topic.]
"""
# st.info(prompt_template)
with st.chat_message("assistant"):
st.session_state['doc_pdf'] = data
st.success(f'Text chunks generated, total words: {len(data)}')
pdf_summarise = model.predict(input=prompt_template)
st.session_state.messages.append({"role": "assistant", "content": pdf_summarise})
if "messages" not in st.session_state:
st.session_state.messages = []
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Initialize session state for chat input if it doesn't already exist
prompt_disabled = (st.session_state['doc_pdf'] == "")
if prompt := st.chat_input("What is up?", disabled=prompt_disabled):
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response = ""
processed_prompt = prompt
result = model.predict(input=prompt)
for chunk in result:
full_response += chunk
time.sleep(0.01)
message_placeholder.markdown(full_response + "▌")
message_placeholder.markdown(result)
st.session_state.messages.append({"role": "assistant", "content": full_response})
| [
"I'm going to provide you with a transcript or document from a recent webinar or meeting. I'd like you to create an extensive long-read article suitable for blog publication based on this information. Please adhere to the following sections and guidelines in your response:\n\nSummary:\n\nKey Takeaways and Challenges:\na. Core Concepts: Identify and discuss the central ideas and challenges covered in the webinar.\nb. Noteworthy Statements and Data Points: List any compelling arguments, statistics, or quotes.\nc. Key Participants: Identify the main speakers and elaborate on their roles and points of view.\nd. Related Topics and Influencers: Suggest tags for associating this content with other relevant webinars, articles, or experts in the field.\n\nStructural Analysis According to Three Segments:\na. Introduction: Summarize the opening segment, including the setting, main speakers, and the primary topics to be covered.\nb. Discussion and Debate: Describe the core discussions, disagreements, and breakthrough moments.\nc. Conclusion and Takeaways: Sum up how the webinar ended, including any conclusions, action items, or unresolved questions.\n\nContent Evaluation:\na. Sentiment Analysis: Is the overall sentiment positive, negative, or neutral? Provide textual evidence. Rate the sentiment on a scale of -1 to 1.\nb. Content Safety Check: Examine for any content promoting violence, discrimination, or hatred towards individuals/groups and provide supporting excerpts.\n\nSpeaker Metrics:\nClarity Score: On a scale of 1 to 10, rate how clearly the speaker articulated their points.\nEngagement Level: On a scale of 1 to 10, rate how well the speaker engaged the audience.\nSubject Mastery: On a scale of 1 to 10, rate the speaker's expertise in the subject matter.\nPace and Timing: On a scale of 1 to 10, rate the appropriateness of the speaker's pace and use of time.\nAudience Interaction: On a scale of 1 to 10, rate the speaker's ability to interact with and respond to the audience.\nVisual Aids: On a scale of 1 to 10, rate the effectiveness of any visual aids, slides, or props used.\n\nMeeting Metrics:\nAgenda Adherence: On a scale of 1 to 10, rate how closely the meeting stuck to its intended agenda.\nContent Relevance: On a scale of 1 to 10, rate the relevance of the content presented to the stated purpose of the meeting.\nCollaboration Quality: On a scale of 1 to 10, rate the quality of discussions, debates, and collaborations.\nOutcome Achievement: On a scale of 1 to 10, rate how well the meeting achieved its intended outcomes or objectives.\nDuration Appropriateness: On a scale of 1 to 10, rate whether the meeting duration was appropriate for its content and objectives.\nTechnical Execution: On a scale of 1 to 10, rate the quality of the audio, video, and any other technical aspects.\n\nHere is the document:\n<document>\n\n</document>\n\nResult in Markdown format.\nAnswer in 8000 words or less.\n\n### Questions:\n[Provide three follow-up questions worded as if I'm asking you. \nFormat in bold as Q1, Q2, and Q3. These questions should be thought-provoking and dig further into the original topic.]\n\n "
] |
2024-01-10 | SuperDuperAi/SuperChat | pages~1_CHAT_PDF.py | import time
import streamlit as st
from PyPDF2 import PdfReader
from langchain.text_splitter import CharacterTextSplitter, RecursiveCharacterTextSplitter
from runtime import model
st.title("SuperChat Ai with PDF")
st.markdown(
"**Chat with Claude v2 on Bedrock (100k context). Get started by uploading a PDF!**")
# add to sidebar inputs max_tokens_to_sample
st.sidebar.subheader('Model parameters')
pdf_docs = None
if 'doc_pdf' not in st.session_state:
st.session_state['doc_pdf'] = ""
instruct_value = ""
instruct_text = ""
with st.sidebar:
st.subheader('Parameters')
chunk_size = st.sidebar.slider('chunk_size', 0, 10000, 1000)
pdf_chunks_limit = st.sidebar.slider('pdf_chunks_limit', 0, 95000, 90000)
pdf_docs = st.file_uploader(
"Upload your pdfs here and click on 'Process'", accept_multiple_files=True, type=['pdf'])
if pdf_docs and st.session_state['doc_pdf'] == "":
with st.spinner('Processing'):
text = ""
for pdf in pdf_docs:
pdf_reader = PdfReader(pdf)
for page in pdf_reader.pages:
text += page.extract_text()
text_splitter = CharacterTextSplitter(
separator="\n",
chunk_size=chunk_size,
chunk_overlap=200,
length_function=len
)
chunks = text_splitter.split_text(text=text)
data = ""
for chunk in chunks:
if len(data) > 90000:
st.warning('PDFs is big, only first >100k characters will be used')
break
data += chunk
prompt_template = f"""
I'm going to provide you with document (book) in pdf file.
Then, I'll ask you to create an extensive long-read article suitable for blog publication based on the information.
Please adhere to the following sections and guidelines in your response:
Literary Analysis:
a. Main Themes and Challenges: Identify and discuss the overarching themes and problems.
b. Engaging Theses and Quotations: List interesting theses and quotes.
c. Principal Characters: Identify the main characters and elaborate on their roles.
d. Inter-Textual Links: Suggest tags for associating with other literary works and authors.
Episodic Description According to Three-Act Structure:
a. Act 1 - Setup: Provide a summary of the initial act, establishing the setting, characters, and the main conflicts.
b. Act 2 - Confrontation: Describe the events and obstacles the main characters face, leading to the climax of the story.
c. Act 3 - Resolution: Sum up how the story concludes, including how conflicts are resolved and the state of the characters.
Content Assessment:
a. Sentiment Analysis: Determine whether the sentiment in the text is positive, negative, or neutral, providing textual evidence. Rate the sentiment on a scale of -1 to 1.
b. Destructive Content Detection: Check for any content promoting violence, discrimination, or hatred towards individuals/groups and provide supporting excerpts.
Readability Metrics:
a. Provide the Flesch Reading Ease score, Flesch-Kincaid Grade Level, and Gunning Fog Index.
Political Orientation Analysis:
a. Identify and explain liberal or conservative values, democratic or autocratic tendencies, and militaristic or humanistic themes in the text.
b. Summarize the political orientation and rate on a scale of -1 to 1 for each dimension (Liberal-Conservative, Democratic-Autocratic, Militaristic-Humanistic).
Here is the document:
<document>
{data}
</document>
Result in Markdown format.
Answer in 8000 words or less.
### Questions:
[Provide three follow-up questions worded as if I'm asking you.
Format in bold as Q1, Q2, and Q3. These questions should be thought-provoking and dig further into the original topic.]
"""
# st.info(prompt_template)
with st.chat_message("assistant"):
st.session_state['doc_pdf'] = data
st.success(f'Text chunks generated, total words: {len(data)}')
pdf_summarise = model.predict(input=prompt_template)
st.session_state.messages.append({"role": "assistant", "content": pdf_summarise})
if "messages" not in st.session_state:
st.session_state.messages = []
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Initialize session state for chat input if it doesn't already exist
prompt_disabled = (st.session_state['doc_pdf'] == "")
if prompt := st.chat_input("What is up?", disabled=prompt_disabled):
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response = ""
processed_prompt = prompt
result = model.predict(input=prompt)
for chunk in result:
full_response += chunk
time.sleep(0.01)
message_placeholder.markdown(full_response + "▌")
message_placeholder.markdown(result)
st.session_state.messages.append({"role": "assistant", "content": full_response})
| [
"\n I'm going to provide you with document (book) in pdf file.\n Then, I'll ask you to create an extensive long-read article suitable for blog publication based on the information. \n Please adhere to the following sections and guidelines in your response:\n \n Literary Analysis:\n a. Main Themes and Challenges: Identify and discuss the overarching themes and problems.\n b. Engaging Theses and Quotations: List interesting theses and quotes.\n c. Principal Characters: Identify the main characters and elaborate on their roles.\n d. Inter-Textual Links: Suggest tags for associating with other literary works and authors.\n \n Episodic Description According to Three-Act Structure:\n a. Act 1 - Setup: Provide a summary of the initial act, establishing the setting, characters, and the main conflicts.\n b. Act 2 - Confrontation: Describe the events and obstacles the main characters face, leading to the climax of the story.\n c. Act 3 - Resolution: Sum up how the story concludes, including how conflicts are resolved and the state of the characters.\n \n Content Assessment:\n a. Sentiment Analysis: Determine whether the sentiment in the text is positive, negative, or neutral, providing textual evidence. Rate the sentiment on a scale of -1 to 1.\n b. Destructive Content Detection: Check for any content promoting violence, discrimination, or hatred towards individuals/groups and provide supporting excerpts.\n \n Readability Metrics:\n a. Provide the Flesch Reading Ease score, Flesch-Kincaid Grade Level, and Gunning Fog Index.\n \n Political Orientation Analysis:\n a. Identify and explain liberal or conservative values, democratic or autocratic tendencies, and militaristic or humanistic themes in the text.\n b. Summarize the political orientation and rate on a scale of -1 to 1 for each dimension (Liberal-Conservative, Democratic-Autocratic, Militaristic-Humanistic).\n \n Here is the document:\n <document>\n \n </document>\n \n Result in Markdown format.\n Answer in 8000 words or less.\n \n ### Questions:\n [Provide three follow-up questions worded as if I'm asking you. \n Format in bold as Q1, Q2, and Q3. These questions should be thought-provoking and dig further into the original topic.]\n\n "
] |
2024-01-10 | bahagh/assignment | task2~fastrun.py | from flask import Flask, request, jsonify, render_template
import urllib.request
import xml.etree.ElementTree as ET
import re
import openai
import yaml
with open('key.yml', 'r') as file:
data = yaml.safe_load(file)
app = Flask(__name__)
openai.api_key = data.get('api').get('key')
##########################################################################################################################################
##########################################################################################################################################
##########################################################################################################################################
papers_list = ["Title: Lawyer LLaMA Technical Report\nSummary: Large Language Models (LLMs), like LLaMA, have exhibited remarkable\nperformance across various tasks. Nevertheless, when deployed to specific\ndomains such as law or medicine, the models still confront the challenge of a\ndeficiency in domain-specific knowledge and an inadequate capability to\nleverage that knowledge to resolve domain-related problems. In this paper, we\npropose a new framework to adapt LLMs to specific domains and build Lawyer\nLLaMA, a legal domain LLM, based on this framework. Specifically, we inject\ndomain knowledge during the continual training stage and teach the model to\nlearn professional skills using properly designed supervised fine-tuning tasks.\nMoreover, to alleviate the hallucination problem during the model's generation,\nwe add a retrieval module and extract relevant legal articles before the model\nanswers any queries. When learning domain-specific skills, we find that\nexperts' experience is much more useful than experiences distilled from\nChatGPT, where hundreds of expert-written data outperform tens of thousands of\nChatGPT-generated ones. We will release our model and data.\n\n",
'Title: Label Supervised LLaMA Finetuning\nSummary: The recent success of Large Language Models (LLMs) has gained significant\nattention in both academia and industry. Substantial efforts have been made to\nenhance the zero- and few-shot generalization capabilities of open-source LLMs\nthrough finetuning. Currently, the prevailing approach is instruction-tuning,\nwhich trains LLMs to complete real-world tasks by generating responses guided\nby natural language instructions. It is worth noticing that such an approach\nmay underperform in sequence and token classification tasks. Unlike text\ngeneration tasks, classification tasks have a limited label space, where\nprecise label prediction is more appreciated than generating diverse and\nhuman-like responses. Prior research has unveiled that instruction-tuned LLMs\ncannot outperform BERT, prompting us to explore the potential of leveraging\nlatent representations from LLMs for supervised label prediction. In this\npaper, we introduce a label-supervised adaptation for LLMs, which aims to\nfinetuning the model with discriminant labels. We evaluate this approach with\nLabel Supervised LLaMA (LS-LLaMA), based on LLaMA-2-7B, a relatively\nsmall-scale LLM, and can be finetuned on a single GeForce RTX4090 GPU. We\nextract latent representations from the final LLaMA layer and project them into\nthe label space to compute the cross-entropy loss. The model is finetuned by\nLow-Rank Adaptation (LoRA) to minimize this loss. Remarkably, without intricate\nprompt engineering or external knowledge, LS-LLaMA substantially outperforms\nLLMs ten times its size in scale and demonstrates consistent improvements\ncompared to robust baselines like BERT-Large and RoBERTa-Large in text\nclassification. Moreover, by removing the causal mask from decoders, LS-unLLaMA\nachieves the state-of-the-art performance in named entity recognition (NER).\nOur work will shed light on a novel approach to adapting LLMs for various\ndownstream tasks.\n\n',
'Title: LLAMA: Leveraging Learning to Automatically Manage Algorithms\nSummary: Algorithm portfolio and selection approaches have achieved remarkable\nimprovements over single solvers. However, the implementation of such systems\nis often highly customised and specific to the problem domain. This makes it\ndifficult for researchers to explore different techniques for their specific\nproblems. We present LLAMA, a modular and extensible toolkit implemented as an\nR package that facilitates the exploration of a range of different portfolio\ntechniques on any problem domain. It implements the algorithm selection\napproaches most commonly used in the literature and leverages the extensive\nlibrary of machine learning algorithms and techniques in R. We describe the\ncurrent capabilities and limitations of the toolkit and illustrate its usage on\na set of example SAT problems.\n\n',
'Title: Challenges and opportunities integrating LLAMA into AdePT\nSummary: Particle transport simulations are a cornerstone of high-energy physics\n(HEP), constituting a substantial part of the computing workload performed in\nHEP. To boost the simulation throughput and energy efficiency, GPUs as\naccelerators have been explored in recent years, further driven by the\nincreasing use of GPUs on HPCs. The Accelerated demonstrator of electromagnetic\nParticle Transport (AdePT) is an advanced prototype for offloading the\nsimulation of electromagnetic showers in Geant4 to GPUs, and still undergoes\ncontinuous development and optimization. Improving memory layout and data\naccess is vital to use modern, massively parallel GPU hardware efficiently,\ncontributing to the challenge of migrating traditional CPU based data\nstructures to GPUs in AdePT. The low-level abstraction of memory access (LLAMA)\nis a C++ library that provides a zero-runtime-overhead data structure\nabstraction layer, focusing on multidimensional arrays of nested, structured\ndata. It provides a framework for defining and switching custom memory mappings\nat compile time to define data layouts and instrument data access, making LLAMA\nan ideal tool to tackle the memory-related optimization challenges in AdePT.\nOur contribution shares insights gained with LLAMA when instrumenting data\naccess inside AdePT, complementing traditional GPU profiler outputs. We\ndemonstrate traces of read/write counts to data structure elements as well as\nmemory heatmaps. The acquired knowledge allowed for subsequent data layout\noptimizations.\n\n',
'Title: LLaMA: Open and Efficient Foundation Language Models\nSummary: We introduce LLaMA, a collection of foundation language models ranging from\n7B to 65B parameters. We train our models on trillions of tokens, and show that\nit is possible to train state-of-the-art models using publicly available\ndatasets exclusively, without resorting to proprietary and inaccessible\ndatasets. In particular, LLaMA-13B outperforms GPT-3 (175B) on most benchmarks,\nand LLaMA-65B is competitive with the best models, Chinchilla-70B and\nPaLM-540B. We release all our models to the research community.\n\n',
"Title: Camoscio: an Italian Instruction-tuned LLaMA\nSummary: In recent years Large Language Models (LLMs) have increased the state of the\nart on several natural language processing tasks. However, their accessibility\nis often limited to paid API services, posing challenges for researchers in\nconducting extensive investigations. On the other hand, while some open-source\nmodels have been proposed by the community, they are typically English-centric\nor multilingual without a specific adaptation for the Italian language. In an\neffort to democratize the available and open resources for the Italian\nlanguage, in this paper we introduce Camoscio: a language model specifically\ntuned to follow users' prompts in Italian. Specifically, we finetuned the\nsmallest variant of LLaMA (7b) with LoRA on a corpus of instruction prompts\ntranslated to Italian via ChatGPT. Results indicate that the model's zero-shot\nperformance on various downstream tasks in Italian competes favorably with\nexisting models specifically finetuned for those tasks. All the artifacts\n(code, dataset, model) are released to the community at the following url:\nhttps://github.com/teelinsan/camoscio\n\n",
'Title: Code Llama: Open Foundation Models for Code\nSummary: We release Code Llama, a family of large language models for code based on\nLlama 2 providing state-of-the-art performance among open models, infilling\ncapabilities, support for large input contexts, and zero-shot instruction\nfollowing ability for programming tasks. We provide multiple flavors to cover a\nwide range of applications: foundation models (Code Llama), Python\nspecializations (Code Llama - Python), and instruction-following models (Code\nLlama - Instruct) with 7B, 13B and 34B parameters each. All models are trained\non sequences of 16k tokens and show improvements on inputs with up to 100k\ntokens. 7B and 13B Code Llama and Code Llama - Instruct variants support\ninfilling based on surrounding content. Code Llama reaches state-of-the-art\nperformance among open models on several code benchmarks, with scores of up to\n53% and 55% on HumanEval and MBPP, respectively. Notably, Code Llama - Python\n7B outperforms Llama 2 70B on HumanEval and MBPP, and all our models outperform\nevery other publicly available model on MultiPL-E. We release Code Llama under\na permissive license that allows for both research and commercial use.\n\n',
"Title: Impact of Tokenization on LLaMa Russian Adaptation\nSummary: Latest instruction-tuned large language models (LLM) show great results on\nvarious tasks, however, they often face performance degradation for non-English\ninput. There is evidence that the reason lies in inefficient tokenization\ncaused by low language representation in pre-training data which hinders the\ncomprehension of non-English instructions, limiting the potential of target\nlanguage instruction-tuning. In this work we investigate the possibility of\naddressing the issue with vocabulary substitution in the context of LLaMa\nRussian language adaptation. We explore three variants of vocabulary adaptation\nand test their performance on Saiga instruction-tuning and fine-tuning on\nRussian Super Glue benchmark. The results of automatic evaluation show that\nvocabulary substitution not only improves the model's quality in Russian but\nalso accelerates fine-tuning (35%) and inference (up to 60%) while reducing\nmemory consumption. Additional human evaluation of the instruction-tuned models\ndemonstrates that models with Russian-adapted vocabulary generate answers with\nhigher user preference than the original Saiga-LLaMa model.\n\n",
"Title: Steering Llama 2 via Contrastive Activation Addition\nSummary: We introduce Contrastive Activation Addition (CAA), an innovative method for\nsteering language models by modifying activations during their forward passes.\nCAA computes ``steering vectors'' by averaging the difference in residual\nstream activations between pairs of positive and negative examples of a\nparticular behavior such as factual versus hallucinatory responses. During\ninference, these steering vectors are added at all token positions after the\nuser's prompt with either a positive or negative coefficient, allowing precise\ncontrol over the degree of the targeted behavior. We evaluate CAA's\neffectiveness on Llama 2 Chat using both multiple-choice behavioral question\ndatasets and open-ended generation tasks. We demonstrate that CAA significantly\nalters model behavior, outperforms traditional methods like finetuning and\nfew-shot prompting, and minimally reduces capabilities. Moreover, by employing\nvarious activation space interpretation methods, we gain deeper insights into\nCAA's mechanisms. CAA both accurately steers model outputs and also sheds light\non how high-level concepts are represented in Large Language Models (LLMs).\n\n",
'Title: VinaLLaMA: LLaMA-based Vietnamese Foundation Model\nSummary: In this technical report, we present VinaLLaMA, an open-weight,\nstate-of-the-art (SOTA) Large Language Model for the Vietnamese language, built\nupon LLaMA-2 with an additional 800 billion trained tokens. VinaLLaMA not only\ndemonstrates fluency in Vietnamese but also exhibits a profound understanding\nof Vietnamese culture, making it a truly indigenous model. VinaLLaMA-7B-chat,\ntrained on 1 million high-quality synthetic samples, achieves SOTA results on\nkey benchmarks, including VLSP, VMLU, and Vicuna Benchmark Vietnamese, marking\na significant advancement in the Vietnamese AI landscape and offering a\nversatile resource for various applications.\n\n',
"Title: DRG-LLaMA : Tuning LLaMA Model to Predict Diagnosis-related Group for\n Hospitalized Patients\nSummary: In the U.S. inpatient payment system, the Diagnosis-Related Group (DRG) is\npivotal, but its assignment process is inefficient. The study introduces\nDRG-LLaMA, an advanced large language model (LLM) fine-tuned on clinical notes\nto enhance DRGs assignment. Utilizing LLaMA as the foundational model and\noptimizing it through Low-Rank Adaptation (LoRA) on 236,192 MIMIC-IV discharge\nsummaries, our DRG-LLaMA-7B model exhibited a noteworthy macro-averaged F1\nscore of 0.327, a top-1 prediction accuracy of 52.0%, and a macro-averaged Area\nUnder the Curve (AUC) of 0.986, with a maximum input token length of 512. This\nmodel surpassed the performance of prior leading models in DRG prediction,\nshowing a relative improvement of 40.3% and 35.7% in macro-averaged F1 score\ncompared to ClinicalBERT and CAML, respectively. Applied to base DRG and\ncomplication or comorbidity (CC)/major complication or comorbidity (MCC)\nprediction, DRG-LLaMA achieved a top-1 prediction accuracy of 67.8% and 67.5%,\nrespectively. Additionally, our findings indicate that DRG-LLaMA's performance\ncorrelates with increased model parameters and input context lengths.\n\n",
"Title: LLAMA: The Low-Level Abstraction For Memory Access\nSummary: The performance gap between CPU and memory widens continuously. Choosing the\nbest memory layout for each hardware architecture is increasingly important as\nmore and more programs become memory bound. For portable codes that run across\nheterogeneous hardware architectures, the choice of the memory layout for data\nstructures is ideally decoupled from the rest of a program. This can be\naccomplished via a zero-runtime-overhead abstraction layer, underneath which\nmemory layouts can be freely exchanged.\n We present the Low-Level Abstraction of Memory Access (LLAMA), a C++ library\nthat provides such a data structure abstraction layer with example\nimplementations for multidimensional arrays of nested, structured data. LLAMA\nprovides fully C++ compliant methods for defining and switching custom memory\nlayouts for user-defined data types. The library is extensible with third-party\nallocators.\n Providing two close-to-life examples, we show that the LLAMA-generated AoS\n(Array of Structs) and SoA (Struct of Arrays) layouts produce identical code\nwith the same performance characteristics as manually written data structures.\nIntegrations into the SPEC CPU\\textsuperscript{\\textregistered} lbm benchmark\nand the particle-in-cell simulation PIConGPU demonstrate LLAMA's abilities in\nreal-world applications. LLAMA's layout-aware copy routines can significantly\nspeed up transfer and reshuffling of data between layouts compared with naive\nelement-wise copying.\n LLAMA provides a novel tool for the development of high-performance C++\napplications in a heterogeneous environment.\n\n",
'Title: HuaTuo: Tuning LLaMA Model with Chinese Medical Knowledge\nSummary: Large Language Models (LLMs), such as the LLaMA model, have demonstrated\ntheir effectiveness in various general-domain natural language processing (NLP)\ntasks. Nevertheless, LLMs have not yet performed optimally in biomedical domain\ntasks due to the need for medical expertise in the responses. In response to\nthis challenge, we propose HuaTuo, a LLaMA-based model that has been\nsupervised-fine-tuned with generated QA (Question-Answer) instances. The\nexperimental results demonstrate that HuaTuo generates responses that possess\nmore reliable medical knowledge. Our proposed HuaTuo model is accessible at\nhttps://github.com/SCIR-HI/Huatuo-Llama-Med-Chinese.\n\n',
"Title: Efficient and Effective Text Encoding for Chinese LLaMA and Alpaca\nSummary: Large Language Models (LLMs), such as ChatGPT and GPT-4, have dramatically\ntransformed natural language processing research and shown promising strides\ntowards Artificial General Intelligence (AGI). Nonetheless, the high costs\nassociated with training and deploying LLMs present substantial obstacles to\ntransparent, accessible academic research. While several large language models,\nsuch as LLaMA, have been open-sourced by the community, these predominantly\nfocus on English corpora, limiting their usefulness for other languages. In\nthis paper, we propose a method to augment LLaMA with capabilities for\nunderstanding and generating Chinese text and its ability to follow\ninstructions. We achieve this by extending LLaMA's existing vocabulary with an\nadditional 20,000 Chinese tokens, thereby improving its encoding efficiency and\nsemantic understanding of Chinese. We further incorporate secondary\npre-training using Chinese data and fine-tune the model with Chinese\ninstruction datasets, significantly enhancing the model's ability to comprehend\nand execute instructions. Our experimental results indicate that the newly\nproposed model markedly enhances the original LLaMA's proficiency in\nunderstanding and generating Chinese content. Additionally, the results on the\nC-Eval dataset yield competitive performance among the models with several\ntimes the size of ours. We have made our pre-trained models, training scripts,\nand other resources available through GitHub, fostering open research for our\ncommunity. GitHub repository: https://github.com/ymcui/Chinese-LLaMA-Alpaca\n\n",
'Title: LLaMA-Adapter V2: Parameter-Efficient Visual Instruction Model\nSummary: How to efficiently transform large language models (LLMs) into instruction\nfollowers is recently a popular research direction, while training LLM for\nmulti-modal reasoning remains less explored. Although the recent LLaMA-Adapter\ndemonstrates the potential to handle visual inputs with LLMs, it still cannot\ngeneralize well to open-ended visual instructions and lags behind GPT-4. In\nthis paper, we present LLaMA-Adapter V2, a parameter-efficient visual\ninstruction model. Specifically, we first augment LLaMA-Adapter by unlocking\nmore learnable parameters (e.g., norm, bias and scale), which distribute the\ninstruction-following ability across the entire LLaMA model besides adapters.\nSecondly, we propose an early fusion strategy to feed visual tokens only into\nthe early LLM layers, contributing to better visual knowledge incorporation.\nThirdly, a joint training paradigm of image-text pairs and\ninstruction-following data is introduced by optimizing disjoint groups of\nlearnable parameters. This strategy effectively alleviates the interference\nbetween the two tasks of image-text alignment and instruction following and\nachieves strong multi-modal reasoning with only a small-scale image-text and\ninstruction dataset. During inference, we incorporate additional expert models\n(e.g. captioning/OCR systems) into LLaMA-Adapter to further enhance its image\nunderstanding capability without incurring training costs. Compared to the\noriginal LLaMA-Adapter, our LLaMA-Adapter V2 can perform open-ended multi-modal\ninstructions by merely introducing 14M parameters over LLaMA. The newly\ndesigned framework also exhibits stronger language-only instruction-following\ncapabilities and even excels in chat interactions. Our code and models are\navailable at https://github.com/ZrrSkywalker/LLaMA-Adapter.\n\n',
"Title: Goat: Fine-tuned LLaMA Outperforms GPT-4 on Arithmetic Tasks\nSummary: We introduce Goat, a fine-tuned LLaMA model that significantly outperforms\nGPT-4 on a range of arithmetic tasks. Fine-tuned on a synthetically generated\ndataset, Goat achieves state-of-the-art performance on BIG-bench arithmetic\nsub-task. In particular, the zero-shot Goat-7B matches or even surpasses the\naccuracy achieved by the few-shot PaLM-540B. Surprisingly, Goat can achieve\nnear-perfect accuracy on large-number addition and subtraction through\nsupervised fine-tuning only, which is almost impossible with previous\npretrained language models, such as Bloom, OPT, GPT-NeoX, etc. We attribute\nGoat's exceptional performance to LLaMA's consistent tokenization of numbers.\nTo tackle more challenging tasks like large-number multiplication and division,\nwe propose an approach that classifies tasks based on their learnability, and\nsubsequently decomposes unlearnable tasks, such as multi-digit multiplication\nand division, into a series of learnable tasks by leveraging basic arithmetic\nprinciples. We thoroughly examine the performance of our model, offering a\ncomprehensive evaluation of the effectiveness of our proposed decomposition\nsteps. Additionally, Goat-7B can be easily trained using LoRA on a 24GB VRAM\nGPU, facilitating reproducibility for other researchers. We release our model,\ndataset, and the Python script for dataset generation.\n\n",
'Title: Parameter-Efficient Fine-Tuning of LLaMA for the Clinical Domain\nSummary: Adapting pretrained language models to novel domains, such as clinical\napplications, traditionally involves retraining their entire set of parameters.\nHowever, this approach is increasingly proven to be impractical owing to the\nsubstantial computational requirements associated with training such large\nlanguage models. To address this issue, Parameter-Efficient Fine-Tuning (PEFT)\ntechniques offer a viable solution by selectively fine-tuning a small subset of\nadditional parameters, significantly reducing the computational requirements\nfor domain adaptation. In this study, we propose Clinical LLaMA-LoRA, a PEFT\nadapter layer built upon the open-sourced LLaMA model. Clinical LLaMA-LoRA is\ntrained using clinical notes obtained from the MIMIC-IV database, thereby\ncreating a specialised adapter designed for the clinical domain. Additionally,\nwe propose a two-step PEFT framework which fuses Clinical LLaMA-LoRA with\nDownstream LLaMA-LoRA, another PEFT adapter specialised for downstream tasks.\nWe evaluate this framework on multiple clinical outcome prediction datasets,\ncomparing it to clinically trained language models. Our proposed framework\nachieves a state-of-the-art AUROC score averaged across all clinical downstream\ntasks. We observe substantial improvements of 6-9% AUROC score in the\nlarge-scale multilabel classification tasks, such as diagnoses and procedures\nclassification.\n\n',
'Title: Llama 2: Open Foundation and Fine-Tuned Chat Models\nSummary: In this work, we develop and release Llama 2, a collection of pretrained and\nfine-tuned large language models (LLMs) ranging in scale from 7 billion to 70\nbillion parameters. Our fine-tuned LLMs, called Llama 2-Chat, are optimized for\ndialogue use cases. Our models outperform open-source chat models on most\nbenchmarks we tested, and based on our human evaluations for helpfulness and\nsafety, may be a suitable substitute for closed-source models. We provide a\ndetailed description of our approach to fine-tuning and safety improvements of\nLlama 2-Chat in order to enable the community to build on our work and\ncontribute to the responsible development of LLMs.\n\n',
"Title: PUMA: Secure Inference of LLaMA-7B in Five Minutes\nSummary: With ChatGPT as a representative, tons of companies have began to provide\nservices based on large Transformers models. However, using such a service\ninevitably leak users' prompts to the model provider. Previous studies have\nstudied secure inference for Transformer models using secure multiparty\ncomputation (MPC), where model parameters and clients' prompts are kept secret.\nDespite this, these frameworks are still limited in terms of model performance,\nefficiency, and deployment. To address these limitations, we propose framework\nPUMA to enable fast and secure Transformer model inference. Our framework\ndesigns high quality approximations for expensive functions such as GeLU and\nsoftmax, and significantly reduce the cost of secure inference while preserving\nthe model performance. Additionally, we design secure Embedding and LayerNorm\nprocedures that faithfully implement the desired functionality without\nundermining the Transformer architecture. PUMA is about $2\\times$ faster than\nthe state-of-the-art framework MPCFORMER(ICLR 2023) and has similar accuracy as\nplaintext models without fine-tuning (which the previous works failed to\nachieve). PUMA can even evaluate LLaMA-7B in around 5 minutes to generate 1\ntoken. To our best knowledge, this is the first time that a model with such a\nparameter size is able to be evaluated under MPC. PUMA has been open-sourced in\nthe Github repository of SecretFlow-SPU.\n\n",
'Title: Financial News Analytics Using Fine-Tuned Llama 2 GPT Model\nSummary: The paper considers the possibility to fine-tune Llama 2 GPT large language\nmodel (LLM) for the multitask analysis of financial news. For fine-tuning, the\nPEFT/LoRA based approach was used. In the study, the model was fine-tuned for\nthe following tasks: analysing a text from financial market perspectives,\nhighlighting main points of a text, summarizing a text and extracting named\nentities with appropriate sentiments. The obtained results show that the\nfine-tuned Llama 2 model can perform a multitask financial news analysis with a\nspecified structure of response, part of response can be a structured text and\nanother part of data can have JSON format for further processing. Extracted\nsentiments for named entities can be considered as predictive features in\nsupervised machine learning models with quantitative target variables.\n\n',
'Title: Making LLaMA SEE and Draw with SEED Tokenizer\nSummary: The great success of Large Language Models (LLMs) has expanded the potential\nof multimodality, contributing to the gradual evolution of General Artificial\nIntelligence (AGI). A true AGI agent should not only possess the capability to\nperform predefined multi-tasks but also exhibit emergent abilities in an\nopen-world context. However, despite the considerable advancements made by\nrecent multimodal LLMs, they still fall short in effectively unifying\ncomprehension and generation tasks, let alone open-world emergent abilities. We\ncontend that the key to overcoming the present impasse lies in enabling text\nand images to be represented and processed interchangeably within a unified\nautoregressive Transformer. To this end, we introduce SEED, an elaborate image\ntokenizer that empowers LLMs with the ability to SEE and Draw at the same time.\nWe identify two crucial design principles: (1) Image tokens should be\nindependent of 2D physical patch positions and instead be produced with a 1D\ncausal dependency, exhibiting intrinsic interdependence that aligns with the\nleft-to-right autoregressive prediction mechanism in LLMs. (2) Image tokens\nshould capture high-level semantics consistent with the degree of semantic\nabstraction in words, and be optimized for both discriminativeness and\nreconstruction during the tokenizer training phase. With SEED tokens, LLM is\nable to perform scalable multimodal autoregression under its original training\nrecipe, i.e., next-word prediction. SEED-LLaMA is therefore produced by\nlarge-scale pretraining and instruction tuning on the interleaved textual and\nvisual data, demonstrating impressive performance on a broad range of\nmultimodal comprehension and generation tasks. More importantly, SEED-LLaMA has\nexhibited compositional emergent abilities such as multi-turn in-context\nmultimodal generation, acting like your AI assistant.\n\n',
'Title: Sheared LLaMA: Accelerating Language Model Pre-training via Structured\n Pruning\nSummary: The popularity of LLaMA (Touvron et al., 2023a;b) and other recently emerged\nmoderate-sized large language models (LLMs) highlights the potential of\nbuilding smaller yet powerful LLMs. Regardless, the cost of training such\nmodels from scratch on trillions of tokens remains high. In this work, we study\nstructured pruning as an effective means to develop smaller LLMs from\npre-trained, larger models. Our approach employs two key techniques: (1)\ntargeted structured pruning, which prunes a larger model to a specified target\nshape by removing layers, heads, and intermediate and hidden dimensions in an\nend-to-end manner, and (2) dynamic batch loading, which dynamically updates the\ncomposition of sampled data in each training batch based on varying losses\nacross different domains. We demonstrate the efficacy of our approach by\npresenting the Sheared-LLaMA series, pruning the LLaMA2-7B model down to 1.3B\nand 2.7B parameters. Sheared-LLaMA models outperform state-of-the-art\nopen-source models of equivalent sizes, such as Pythia, INCITE, and OpenLLaMA\nmodels, on a wide range of downstream and instruction tuning evaluations, while\nrequiring only 3% of compute compared to training such models from scratch.\nThis work provides compelling evidence that leveraging existing LLMs with\nstructured pruning is a far more cost-effective approach for building smaller\nLLMs.\n\n',
'Title: Fine-Tuning LLaMA for Multi-Stage Text Retrieval\nSummary: The effectiveness of multi-stage text retrieval has been solidly demonstrated\nsince before the era of pre-trained language models. However, most existing\nstudies utilize models that predate recent advances in large language models\n(LLMs). This study seeks to explore potential improvements that\nstate-of-the-art LLMs can bring. We conduct a comprehensive study, fine-tuning\nthe latest LLaMA model both as a dense retriever (RepLLaMA) and as a pointwise\nreranker (RankLLaMA) for both passage retrieval and document retrieval using\nthe MS MARCO datasets. Our findings demonstrate that the effectiveness of large\nlanguage models indeed surpasses that of smaller models. Additionally, since\nLLMs can inherently handle longer contexts, they can represent entire documents\nholistically, obviating the need for traditional segmenting and pooling\nstrategies. Furthermore, evaluations on BEIR demonstrate that our\nRepLLaMA-RankLLaMA pipeline exhibits strong zero-shot effectiveness. Model\ncheckpoints from this study are available on HuggingFace.\n\n',
"Title: BadLlama: cheaply removing safety fine-tuning from Llama 2-Chat 13B\nSummary: Llama 2-Chat is a collection of large language models that Meta developed and\nreleased to the public. While Meta fine-tuned Llama 2-Chat to refuse to output\nharmful content, we hypothesize that public access to model weights enables bad\nactors to cheaply circumvent Llama 2-Chat's safeguards and weaponize Llama 2's\ncapabilities for malicious purposes. We demonstrate that it is possible to\neffectively undo the safety fine-tuning from Llama 2-Chat 13B with less than\n$200, while retaining its general capabilities. Our results demonstrate that\nsafety-fine tuning is ineffective at preventing misuse when model weights are\nreleased publicly. Given that future models will likely have much greater\nability to cause harm at scale, it is essential that AI developers address\nthreats from fine-tuning when considering whether to publicly release their\nmodel weights.\n\n",
'Title: Tamil-Llama: A New Tamil Language Model Based on Llama 2\nSummary: Language modeling has witnessed remarkable advancements in recent years, with\nLarge Language Models (LLMs) like ChatGPT setting unparalleled benchmarks in\nhuman-like text generation. However, a prevailing limitation is the\nunderrepresentation of languages like Tamil in these cutting-edge models,\nleading to suboptimal performance in diverse linguistic contexts. This paper\naddresses this lacuna, enhancing the open-source LLaMA model with an addition\nof 16,000 Tamil tokens, aiming to achieve superior text generation and\ncomprehension in the Tamil language. We strategically employ the LoRA\nmethodology for efficient model training on a comprehensive Tamil corpus,\nensuring computational feasibility and model robustness. Moreover, we introduce\na Tamil-translated version of the Alpaca dataset and a subset of the OpenOrca\ndataset tailored for instruction fine-tuning. Our results showcase significant\nperformance improvements in Tamil text generation, with potential implications\nfor the broader landscape of LLMs in Indian languages. We further underscore\nour commitment to open research by making our models, datasets, and code\npublicly accessible, fostering further innovations in language modeling.\n\n',
'Title: Beyond Surface: Probing LLaMA Across Scales and Layers\nSummary: This paper presents an in-depth analysis of Large Language Models (LLMs),\nfocusing on LLaMA, a prominent open-source foundational model in natural\nlanguage processing. Instead of assessing LLaMA through its generative output,\nwe design multiple-choice tasks to probe its intrinsic understanding in\nhigh-order tasks such as reasoning and computation. We examine the model\nhorizontally, comparing different sizes, and vertically, assessing different\nlayers. We unveil several key and uncommon findings based on the designed\nprobing tasks: (1) Horizontally, enlarging model sizes almost could not\nautomatically impart additional knowledge or computational prowess. Instead, it\ncan enhance reasoning abilities, especially in math problem solving, and helps\nreduce hallucinations, but only beyond certain size thresholds; (2) In vertical\nanalysis, the lower layers of LLaMA lack substantial arithmetic and factual\nknowledge, showcasing logical thinking, multilingual and recognitive abilities,\nwith top layers housing most computational power and real-world knowledge.\n\n',
'Title: Purple Llama CyberSecEval: A Secure Coding Benchmark for Language Models\nSummary: This paper presents CyberSecEval, a comprehensive benchmark developed to help\nbolster the cybersecurity of Large Language Models (LLMs) employed as coding\nassistants. As what we believe to be the most extensive unified cybersecurity\nsafety benchmark to date, CyberSecEval provides a thorough evaluation of LLMs\nin two crucial security domains: their propensity to generate insecure code and\ntheir level of compliance when asked to assist in cyberattacks. Through a case\nstudy involving seven models from the Llama 2, Code Llama, and OpenAI GPT large\nlanguage model families, CyberSecEval effectively pinpointed key cybersecurity\nrisks. More importantly, it offered practical insights for refining these\nmodels. A significant observation from the study was the tendency of more\nadvanced models to suggest insecure code, highlighting the critical need for\nintegrating security considerations in the development of sophisticated LLMs.\nCyberSecEval, with its automated test case generation and evaluation pipeline\ncovers a broad scope and equips LLM designers and researchers with a tool to\nbroadly measure and enhance the cybersecurity safety properties of LLMs,\ncontributing to the development of more secure AI systems.\n\n',
'Title: LLAMA Millimeter and Submillimeter Observatory. Update on its Science\n Opportunities\nSummary: The Large Latin American Millimeter Array (LLAMA for short) is a joint\nscientific and technological undertaking of Argentina and Brazil whose goal is\nto install and to operate an observing facility capable of performing\nobservations of the Universe at millimeter and sub-millimeter wavelengths. It\nwill consist of a 12m ALMA-like antenna with the addition of two Nasmyth\ncabins. LLAMA is located at 4850m above sea level in the Puna Saltenia, in the\nnorthwest region of Argentina. When completed, LLAMA will be equipped with six\nALMA receivers covering Bands 1, 2+3, 5, 6, 7, and 9, which will populate the\ntwo Nasmyth cabins. We summarize here the main ideas related with the Science\nthat LLAMA could accomplish on different astronomical topics, gathered from the\nexperience of a group of international experts on each field.\n\n',
'Title: LLAMA: The $M_{BH}$ - $σ_{\\star}$ Relation of the most luminous\n local AGNs\nSummary: The $M_{BH}$ - $\\sigma_{\\star}$ relation is considered a result of\nco-evolution between the host galaxies and their super-massive black holes. For\nelliptical-bulge hosting inactive galaxies, this relation is well established,\nbut there is still a debate whether active galaxies follow the same relation.\nIn this paper, we estimate black hole masses for a sample of 19 local luminous\nAGNs (LLAMA) in order to test their location on the $M_{BH}$ - $\\sigma_{\\star}$\nrelation. Super-massive black hole masses ($M_{BH}$) were derived from the\nbroad-line based relations for H$\\alpha$, H$\\beta$ and Pa$\\beta$ emission line\nprofiles for the Type 1 AGNs. We compare the bulge stellar velocity dispersion\n($\\sigma_{\\star}$) as determined from the Ca II triplet (CaT) with the\ndispersion measured from the near-infrared CO (2-0) absorption features for\neach AGN and find them to be consistent with each other. We apply an extinction\ncorrection to the observed broad line fluxes and we correct the stellar\nvelocity dispersion by an average rotation contribution as determined from\nspatially resolved stellar kinematic maps. The H$\\alpha$-based black hole\nmasses of our sample of AGNs were estimated in the range 6.34 $\\leq$\n$\\log{M_{BH}}$ $\\leq$ 7.75 M$_\\odot$ and the $\\sigma_{\\star CaT}$ estimates\nrange between 73 $\\leq$ $\\sigma_{\\star CaT}$ $\\leq$ 227 km s$^{-1}$. From the\nso-constructed $M_{BH}$ - $\\sigma_{\\star}$ relation for our Type 1 AGNs, we\nestimate the black hole masses for the Type 2 AGNs and the inactive galaxies in\nour sample. In conclusion, we find that our sample of local luminous AGNs is\nconsistent with the $M_{BH}$ - $\\sigma_{\\star}$ relation of lower luminosity\nAGNs and inactive galaxies, after correcting for dust extinction and the\nrotational contribution to the stellar velocity dispersion.\n\n',
'Title: LLAMA: Nuclear stellar properties of Swift BAT AGN and matched inactive\n galaxies\nSummary: In a complete sample of local 14-195 keV selected AGNs and inactive galaxies,\nmatched by their host galaxy properties, we study the spatially resolved\nstellar kinematics and luminosity distributions at near-infrared wavelengths on\nscales of 10-150 pc, using SINFONI on the VLT. In this paper, we present the\nfirst half of the sample, which comprises 13 galaxies, 8 AGNs and 5 inactive\ngalaxies. The stellar velocity fields show a disk-like rotating pattern, for\nwhich the kinematic position angle is in agreement with the photometric\nposition angle obtained from large scale images. For this set of galaxies, the\nstellar surface brightness of the inactive galaxy sample is generally\ncomparable to the matched sample of AGN but extends to lower surface\nbrightness. After removal of the bulge contribution, we find a nuclear stellar\nlight excess with an extended nuclear disk structure, and which exhibits a\nsize-luminosity relation. While we expect the excess luminosity to be\nassociated with a dynamically cooler young stellar population, we do not\ntypically see a matching drop in dispersion. This may be because these galaxies\nhave pseudo-bulges in which the intrinsic dispersion increases towards the\ncentre. And although the young stars may have an impact in the observed\nkinematics, their fraction is too small to dominate over the bulge and\ncompensate the increase in dispersion at small radii, so no dispersion drop is\nseen. Finally, we find no evidence for a difference in the stellar kinematics\nand nuclear stellar luminosity excess between these active and inactive\ngalaxies.\n\n',
'Title: LLAMA: Normal star formation efficiencies of molecular gas in the\n centres of luminous Seyfert galaxies\nSummary: Using new APEX and JCMT spectroscopy of the CO 2-1 line, we undertake a\ncontrolled study of cold molecular gas in moderately luminous Active Galactic\nNuclei (AGN) and inactive galaxies from the Luminous Local AGN with Matched\nAnalogs (LLAMA) survey. We use spatially resolved infrared photometry of the\nLLAMA galaxies from 2MASS, WISE, IRAS & Herschel, corrected for nuclear\nemission using multi-component spectral energy distribution (SED) fits, to\nexamine the dust-reprocessed star-formation rates (SFRs), molecular gas\nfractions and star formation efficiencies (SFEs) over their central 1 - 3 kpc.\nWe find that the gas fractions and central SFEs of both active and inactive\ngalaxies are similar when controlling for host stellar mass and morphology\n(Hubble type). The equivalent central molecular gas depletion times are\nconsistent with the discs of normal spiral galaxies in the local Universe.\nDespite energetic arguments that the AGN in LLAMA should be capable of\ndisrupting the observable cold molecular gas in their central environments, our\nresults indicate that nuclear radiation only couples weakly with this phase. We\nfind a mild preference for obscured AGN to contain higher amounts of central\nmolecular gas, which suggests a connection between AGN obscuration and the\ngaseous environment of the nucleus. Systems with depressed SFEs are not found\namong the LLAMA AGN. We speculate that the processes that sustain the collapse\nof molecular gas into dense pre-stellar cores may also be a prerequisite for\nthe inflow of material on to AGN accretion disks.\n\n',
"Title: Low-Latency Algorithm for Multi-messenger Astrophysics (LLAMA) with\n Gravitational-Wave and High-Energy Neutrino Candidates\nSummary: We describe in detail the online data analysis pipeline that was used in the\nmulti-messenger search for common sources of gravitational waves (GWs) and\nhigh-energy neutrinos (HENs) during the second observing period (O2) of\nAdvanced LIGO and Advanced Virgo. Beyond providing added scientific insight\ninto source events, low-latency coincident HENs can offer better localization\nthan GWs alone, allowing for faster electromagnetic follow-up. Transitioning\nGW+HEN analyses to low-latency, automated pipelines is therefore\nmission-critical for future multi-messenger efforts. The O2 Low-Latency\nAlgorithm for Multi-messenger Astrophysics (\\pipeline) also served as a\nproof-of-concept for future online GW+HEN searches and led to a codebase that\ncan handle other messengers as well. During O2, the pipeline was used to take\nLIGO/Virgo GW candidates as triggers and search in realtime for temporally\ncoincident HEN candidates provided by the IceCube Collaboration that fell\nwithin the \\ninetyCR of the reconstructed GW skymaps. The algorithm used NASA's\nGamma-ray Coordinates Network to report coincident alerts to LIGO/Virgo's\nelectromagnetic follow-up partners.\n\n",
'Title: Llama: A Heterogeneous & Serverless Framework for Auto-Tuning Video\n Analytics Pipelines\nSummary: The proliferation of camera-enabled devices and large video repositories has\nled to a diverse set of video analytics applications. These applications rely\non video pipelines, represented as DAGs of operations, to transform videos,\nprocess extracted metadata, and answer questions like, "Is this intersection\ncongested?" The latency and resource efficiency of pipelines can be optimized\nusing configurable knobs for each operation (e.g., sampling rate, batch size,\nor type of hardware used). However, determining efficient configurations is\nchallenging because (a) the configuration search space is exponentially large,\nand (b) the optimal configuration depends on users\' desired latency and cost\ntargets, (c) input video contents may exercise different paths in the DAG and\nproduce a variable amount intermediate results. Existing video analytics and\nprocessing systems leave it to the users to manually configure operations and\nselect hardware resources.\n We present Llama: a heterogeneous and serverless framework for auto-tuning\nvideo pipelines. Given an end-to-end latency target, Llama optimizes for cost\nefficiency by (a) calculating a latency target for each operation invocation,\nand (b) dynamically running a cost-based optimizer to assign configurations\nacross heterogeneous hardware that best meet the calculated per-invocation\nlatency target. This makes the problem of auto-tuning large video pipelines\ntractable and allows us to handle input-dependent behavior, conditional\nbranches in the DAG, and execution variability. We describe the algorithms in\nLlama and evaluate it on a cloud platform using serverless CPU and GPU\nresources. We show that compared to state-of-the-art cluster and serverless\nvideo analytics and processing systems, Llama achieves 7.8x lower latency and\n16x cost reduction on average.\n\n',
'Title: LLAMA: Stellar populations in the nuclei of ultra hard X-ray selected\n AGN and matched inactive galaxies\nSummary: The relation between nuclear ($\\lesssim$ 50 pc) star formation and nuclear\ngalactic activity is still elusive: theoretical models predict a link between\nthe two, but it is unclear whether active galactic nuclei (AGNs) should appear\nat the same time, before or after nuclear star formation activity is ongoing.\nWe present a study of this relation in a complete, volume-limited sample of\nnine of the most luminous ($\\log L_{\\rm 14-195 keV} > 10^{42.5}$ erg/s) local\nAGNs (the LLAMA sample), including a sample of 18 inactive control galaxies (6\nstar-forming; 12 passive) that are matched by Hubble type, stellar mass (9.5\n$\\lesssim$ log M_star/M_sun $\\lesssim$ 10.5), inclination and distance. This\nallows us to calibrate our methods on the control sample and perform a\ndifferential analysis between the AGN and control samples. We perform stellar\npopulation synthesis on VLT/X-SHOOTER spectra in an aperture corresponding to a\nphysical radius of $\\approx$ 150 pc. We find young ($\\lesssim$ 30 Myr) stellar\npopulations in seven out of nine AGNs and in four out of six star-forming\ncontrol galaxies. In the non-star-forming control population, in contrast, only\ntwo out of twelve galaxies show such a population. We further show that these\nyoung populations are not indicative of ongoing star-formation, providing\nevidence for models that see AGN activity as a consequence of nuclear star\nformation. Based on the similar nuclear star-formation histories of AGNs and\nstar-forming control galaxies, we speculate that the latter may turn into the\nformer for some fraction of their time. Under this assumption, and making use\nof the volume-completeness of our sample, we infer that the AGN phase lasts for\nabout 5 % of the nuclear starburst phase.\n\n',
'Title: Multi-Task Instruction Tuning of LLaMa for Specific Scenarios: A\n Preliminary Study on Writing Assistance\nSummary: Proprietary Large Language Models (LLMs), such as ChatGPT, have garnered\nsignificant attention due to their exceptional capabilities in handling a\ndiverse range of tasks. Recent studies demonstrate that open-sourced smaller\nfoundational models, such as 7B-size LLaMA, can also display remarkable\nproficiency in tackling diverse tasks when fine-tuned using instruction-driven\ndata. In this work, we investigate a practical problem setting where the\nprimary focus is on one or a few particular tasks rather than general-purpose\ninstruction following, and explore whether LLMs can be beneficial and further\nimproved for such targeted scenarios. We choose the writing-assistant scenario\nas the testbed, which includes seven writing tasks. We collect training data\nfor these tasks, reframe them in an instruction-following format, and\nsubsequently refine the LLM, specifically LLaMA, via instruction tuning.\nExperimental results show that fine-tuning LLaMA on writing instruction data\nsignificantly improves its ability on writing tasks. We also conduct more\nexperiments and analyses to offer insights for future work on effectively\nfine-tuning LLaMA for specific scenarios. Finally, we initiate a discussion\nregarding the necessity of employing LLMs for only one targeted task, taking\ninto account the efforts required for tuning and the resources consumed during\ndeployment.\n\n',
'Title: Music Understanding LLaMA: Advancing Text-to-Music Generation with\n Question Answering and Captioning\nSummary: Text-to-music generation (T2M-Gen) faces a major obstacle due to the scarcity\nof large-scale publicly available music datasets with natural language\ncaptions. To address this, we propose the Music Understanding LLaMA (MU-LLaMA),\ncapable of answering music-related questions and generating captions for music\nfiles. Our model utilizes audio representations from a pretrained MERT model to\nextract music features. However, obtaining a suitable dataset for training the\nMU-LLaMA model remains challenging, as existing publicly accessible audio\nquestion answering datasets lack the necessary depth for open-ended music\nquestion answering. To fill this gap, we present a methodology for generating\nquestion-answer pairs from existing audio captioning datasets and introduce the\nMusicQA Dataset designed for answering open-ended music-related questions. The\nexperiments demonstrate that the proposed MU-LLaMA model, trained on our\ndesigned MusicQA dataset, achieves outstanding performance in both music\nquestion answering and music caption generation across various metrics,\noutperforming current state-of-the-art (SOTA) models in both fields and\noffering a promising advancement in the T2M-Gen research field.\n\n',
"Title: Fine-Tuning Llama 2 Large Language Models for Detecting Online Sexual\n Predatory Chats and Abusive Texts\nSummary: Detecting online sexual predatory behaviours and abusive language on social\nmedia platforms has become a critical area of research due to the growing\nconcerns about online safety, especially for vulnerable populations such as\nchildren and adolescents. Researchers have been exploring various techniques\nand approaches to develop effective detection systems that can identify and\nmitigate these risks. Recent development of large language models (LLMs) has\nopened a new opportunity to address this problem more effectively. This paper\nproposes an approach to detection of online sexual predatory chats and abusive\nlanguage using the open-source pretrained Llama 2 7B-parameter model, recently\nreleased by Meta GenAI. We fine-tune the LLM using datasets with different\nsizes, imbalance degrees, and languages (i.e., English, Roman Urdu and Urdu).\nBased on the power of LLMs, our approach is generic and automated without a\nmanual search for a synergy between feature extraction and classifier design\nsteps like conventional methods in this domain. Experimental results show a\nstrong performance of the proposed approach, which performs proficiently and\nconsistently across three distinct datasets with five sets of experiments. This\nstudy's outcomes indicate that the proposed method can be implemented in\nreal-world applications (even with non-English languages) for flagging sexual\npredators, offensive or toxic content, hate speech, and discriminatory language\nin online discussions and comments to maintain respectful internet or digital\ncommunities. Furthermore, it can be employed for solving text classification\nproblems with other potential applications such as sentiment analysis, spam and\nphishing detection, sorting legal documents, fake news detection, language\nidentification, user intent recognition, text-based product categorization,\nmedical record analysis, and resume screening.\n\n",
'Title: Safety-Tuned LLaMAs: Lessons From Improving the Safety of Large Language\n Models that Follow Instructions\nSummary: Training large language models to follow instructions makes them perform\nbetter on a wide range of tasks, generally becoming more helpful. However, a\nperfectly helpful model will follow even the most malicious instructions and\nreadily generate harmful content. In this paper, we raise concerns over the\nsafety of models that only emphasize helpfulness, not safety, in their\ninstruction-tuning. We show that several popular instruction-tuned models are\nhighly unsafe. Moreover, we show that adding just 3% safety examples (a few\nhundred demonstrations) in the training set when fine-tuning a model like LLaMA\ncan substantially improve their safety. Our safety-tuning does not make models\nsignificantly less capable or helpful as measured by standard benchmarks.\nHowever, we do find a behavior of exaggerated safety, where too much\nsafety-tuning makes models refuse to respond to reasonable prompts that\nsuperficially resemble unsafe ones. Our study sheds light on trade-offs in\ntraining LLMs to follow instructions and exhibit safe behavior.\n\n',
'Title: Benchmarking quantized LLaMa-based models on the Brazilian Secondary\n School Exam\nSummary: Although Large Language Models (LLMs) represent a revolution in the way we\ninteract with computers, allowing the construction of complex questions and the\nability to reason over a sequence of statements, their use is restricted due to\nthe need for dedicated hardware for execution. In this study, we evaluate the\nperformance of LLMs based on the 7 and 13 billion LLaMA models, subjected to a\nquantization process and run on home hardware. The models considered were\nAlpaca, Koala, and Vicuna. To evaluate the effectiveness of these models, we\ndeveloped a database containing 1,006 questions from the ENEM (Brazilian\nNational Secondary School Exam). Our analysis revealed that the best performing\nmodels achieved an accuracy of approximately 46% for the original texts of the\nPortuguese questions and 49% on their English translations. In addition, we\nevaluated the computational efficiency of the models by measuring the time\nrequired for execution. On average, the 7 and 13 billion LLMs took\napproximately 20 and 50 seconds, respectively, to process the queries on a\nmachine equipped with an AMD Ryzen 5 3600x processor\n\n',
'Title: Whispering LLaMA: A Cross-Modal Generative Error Correction Framework\n for Speech Recognition\nSummary: We introduce a new cross-modal fusion technique designed for generative error\ncorrection in automatic speech recognition (ASR). Our methodology leverages\nboth acoustic information and external linguistic representations to generate\naccurate speech transcription contexts. This marks a step towards a fresh\nparadigm in generative error correction within the realm of n-best hypotheses.\nUnlike the existing ranking-based rescoring methods, our approach adeptly uses\ndistinct initialization techniques and parameter-efficient algorithms to boost\nASR performance derived from pre-trained speech and text models. Through\nevaluation across diverse ASR datasets, we evaluate the stability and\nreproducibility of our fusion technique, demonstrating its improved word error\nrate relative (WERR) performance in comparison to n-best hypotheses by\nrelatively 37.66%. To encourage future research, we have made our code and\npre-trained models open source at\nhttps://github.com/Srijith-rkr/Whispering-LLaMA.\n\n',
"Title: LLaMA Rider: Spurring Large Language Models to Explore the Open World\nSummary: Recently, various studies have leveraged Large Language Models (LLMs) to help\ndecision-making and planning in environments, and try to align the LLMs'\nknowledge with the world conditions. Nonetheless, the capacity of LLMs to\ncontinuously acquire environmental knowledge and adapt in an open world remains\nuncertain. In this paper, we propose an approach to spur LLMs to explore the\nopen world, gather experiences, and learn to improve their task-solving\ncapabilities. In this approach, a multi-round feedback-revision mechanism is\nutilized to encourage LLMs to actively select appropriate revision actions\nguided by feedback information from the environment. This facilitates\nexploration and enhances the model's performance. Besides, we integrate\nsub-task relabeling to assist LLMs in maintaining consistency in sub-task\nplanning and help the model learn the combinatorial nature between tasks,\nenabling it to complete a wider range of tasks through training based on the\nacquired exploration experiences. By evaluation in Minecraft, an open-ended\nsandbox world, we demonstrate that our approach LLaMA-Rider enhances the\nefficiency of the LLM in exploring the environment, and effectively improves\nthe LLM's ability to accomplish more tasks through fine-tuning with merely 1.3k\ninstances of collected data, showing minimal training costs compared to the\nbaseline using reinforcement learning.\n\n",
"Title: Accelerating LLaMA Inference by Enabling Intermediate Layer Decoding via\n Instruction Tuning with LITE\nSummary: Large Language Models (LLMs) have achieved remarkable performance across a\nwide variety of natural language tasks; however, their large size makes their\ninference slow and computationally expensive. Focusing on this problem, we\npropose to instruction tune LLMs with additional explicit losses from the\nintermediate layers (LITE) and show that it enables these layers to acquire\n'good' generation ability without affecting the generation ability of the final\nlayer. We perform 'dynamic confidence-based early exiting' at token level from\nthe intermediate layers which improves the efficiency of text generation\nwithout compromising the quality of the generation. We conduct comprehensive\nexperiments by instruction tuning LLaMA-2 models on the Alpaca dataset and\nholistically evaluate on four different human-instruction test sets. We show\nthat dynamic early exiting achieves consistent and considerable inference\ncomputation cost improvements (37.86% for 7B and 46.35% for 13B model) while\nmaintaining the generation quality of the responses. We further conduct a\nthorough analysis of the results over several important aspects, such as\ncomparing the semantic similarity of the outputs and dissecting the efficiency\nimprovements by comparing the number of tokens generated in the output. In\nsummary, our work contributes to improving the efficiency of LLM inference\nwhile maintaining the generation quality, a crucial step en route to enabling\ntheir widespread adoption.\n\n",
'Title: LoRA Fine-tuning Efficiently Undoes Safety Training in Llama 2-Chat 70B\nSummary: AI developers often apply safety alignment procedures to prevent the misuse\nof their AI systems. For example, before Meta released Llama 2-Chat, a\ncollection of instruction fine-tuned large language models, they invested\nheavily in safety training, incorporating extensive red-teaming and\nreinforcement learning from human feedback. However, it remains unclear how\nwell safety training guards against model misuse when attackers have access to\nmodel weights. We explore the robustness of safety training in language models\nby subversively fine-tuning the public weights of Llama 2-Chat. We employ\nlow-rank adaptation (LoRA) as an efficient fine-tuning method. With a budget of\nless than $200 per model and using only one GPU, we successfully undo the\nsafety training of Llama 2-Chat models of sizes 7B, 13B, and 70B. Specifically,\nour fine-tuning technique significantly reduces the rate at which the model\nrefuses to follow harmful instructions. We achieve a refusal rate below 1% for\nour 70B Llama 2-Chat model on two refusal benchmarks. Our fine-tuning method\nretains general performance, which we validate by comparing our fine-tuned\nmodels against Llama 2-Chat across two benchmarks. Additionally, we present a\nselection of harmful outputs produced by our models. While there is\nconsiderable uncertainty about the scope of risks from current models, it is\nlikely that future models will have significantly more dangerous capabilities,\nincluding the ability to hack into critical infrastructure, create dangerous\nbio-weapons, or autonomously replicate and adapt to new environments. We show\nthat subversive fine-tuning is practical and effective, and hence argue that\nevaluating risks from fine-tuning should be a core part of risk assessments for\nreleasing model weights.\n\n',
"Title: Llamas Know What GPTs Don't Show: Surrogate Models for Confidence\n Estimation\nSummary: To maintain user trust, large language models (LLMs) should signal low\nconfidence on examples where they are incorrect, instead of misleading the\nuser. The standard approach of estimating confidence is to use the softmax\nprobabilities of these models, but as of November 2023, state-of-the-art LLMs\nsuch as GPT-4 and Claude-v1.3 do not provide access to these probabilities. We\nfirst study eliciting confidence linguistically -- asking an LLM for its\nconfidence in its answer -- which performs reasonably (80.5% AUC on GPT-4\naveraged across 12 question-answering datasets -- 7% above a random baseline)\nbut leaves room for improvement. We then explore using a surrogate confidence\nmodel -- using a model where we do have probabilities to evaluate the original\nmodel's confidence in a given question. Surprisingly, even though these\nprobabilities come from a different and often weaker model, this method leads\nto higher AUC than linguistic confidences on 9 out of 12 datasets. Our best\nmethod composing linguistic confidences and surrogate model probabilities gives\nstate-of-the-art confidence estimates on all 12 datasets (84.6% average AUC on\nGPT-4).\n\n",
"Title: HELLaMA: LLaMA-based Table to Text Generation by Highlighting the\n Important Evidence\nSummary: Large models have demonstrated significant progress across various domains,\nparticularly in tasks related to text generation. In the domain of Table to\nText, many Large Language Model (LLM)-based methods currently resort to\nmodifying prompts to invoke public APIs, incurring potential costs and\ninformation leaks. With the advent of open-source large models, fine-tuning\nLLMs has become feasible. In this study, we conducted parameter-efficient\nfine-tuning on the LLaMA2 model. Distinguishing itself from previous\nfine-tuning-based table-to-text methods, our approach involves injecting\nreasoning information into the input by emphasizing table-specific row data.\nOur model consists of two modules: 1) a table reasoner that identifies relevant\nrow evidence, and 2) a table summarizer that generates sentences based on the\nhighlighted table. To facilitate this, we propose a search strategy to\nconstruct reasoning labels for training the table reasoner. On both the FetaQA\nand QTSumm datasets, our approach achieved state-of-the-art results.\nAdditionally, we observed that highlighting input tables significantly enhances\nthe model's performance and provides valuable interpretability.\n\n",
"Title: SecureBERT and LLAMA 2 Empowered Control Area Network Intrusion\n Detection and Classification\nSummary: Numerous studies have proved their effective strength in detecting Control\nArea Network (CAN) attacks. In the realm of understanding the human semantic\nspace, transformer-based models have demonstrated remarkable effectiveness.\nLeveraging pre-trained transformers has become a common strategy in various\nlanguage-related tasks, enabling these models to grasp human semantics more\ncomprehensively. To delve into the adaptability evaluation on pre-trained\nmodels for CAN intrusion detection, we have developed two distinct models:\nCAN-SecureBERT and CAN-LLAMA2. Notably, our CAN-LLAMA2 model surpasses the\nstate-of-the-art models by achieving an exceptional performance 0.999993 in\nterms of balanced accuracy, precision detection rate, F1 score, and a\nremarkably low false alarm rate of 3.10e-6. Impressively, the false alarm rate\nis 52 times smaller than that of the leading model, MTH-IDS (Multitiered Hybrid\nIntrusion Detection System). Our study underscores the promise of employing a\nLarge Language Model as the foundational model, while incorporating adapters\nfor other cybersecurity-related tasks and maintaining the model's inherent\nlanguage-related capabilities.\n\n",
'Title: Localizing Lying in Llama: Understanding Instructed Dishonesty on\n True-False Questions Through Prompting, Probing, and Patching\nSummary: Large language models (LLMs) demonstrate significant knowledge through their\noutputs, though it is often unclear whether false outputs are due to a lack of\nknowledge or dishonesty. In this paper, we investigate instructed dishonesty,\nwherein we explicitly prompt LLaMA-2-70b-chat to lie. We perform prompt\nengineering to find which prompts best induce lying behavior, and then use\nmechanistic interpretability approaches to localize where in the network this\nbehavior occurs. Using linear probing and activation patching, we localize five\nlayers that appear especially important for lying. We then find just 46\nattention heads within these layers that enable us to causally intervene such\nthat the lying model instead answers honestly. We show that these interventions\nwork robustly across many prompts and dataset splits. Overall, our work\ncontributes a greater understanding of dishonesty in LLMs so that we may hope\nto prevent it.\n\n',
'Title: What Do Llamas Really Think? Revealing Preference Biases in Language\n Model Representations\nSummary: Do large language models (LLMs) exhibit sociodemographic biases, even when\nthey decline to respond? To bypass their refusal to "speak," we study this\nresearch question by probing contextualized embeddings and exploring whether\nthis bias is encoded in its latent representations. We propose a logistic\nBradley-Terry probe which predicts word pair preferences of LLMs from the\nwords\' hidden vectors. We first validate our probe on three pair preference\ntasks and thirteen LLMs, where we outperform the word embedding association\ntest (WEAT), a standard approach in testing for implicit association, by a\nrelative 27% in error rate. We also find that word pair preferences are best\nrepresented in the middle layers. Next, we transfer probes trained on harmless\ntasks (e.g., pick the larger number) to controversial ones (compare\nethnicities) to examine biases in nationality, politics, religion, and gender.\nWe observe substantial bias for all target classes: for instance, the Mistral\nmodel implicitly prefers Europe to Africa, Christianity to Judaism, and\nleft-wing to right-wing politics, despite declining to answer. This suggests\nthat instruction fine-tuning does not necessarily debias contextualized\nembeddings. Our codebase is at https://github.com/castorini/biasprobe.\n\n',
"Title: Llama Guard: LLM-based Input-Output Safeguard for Human-AI Conversations\nSummary: We introduce Llama Guard, an LLM-based input-output safeguard model geared\ntowards Human-AI conversation use cases. Our model incorporates a safety risk\ntaxonomy, a valuable tool for categorizing a specific set of safety risks found\nin LLM prompts (i.e., prompt classification). This taxonomy is also\ninstrumental in classifying the responses generated by LLMs to these prompts, a\nprocess we refer to as response classification. For the purpose of both prompt\nand response classification, we have meticulously gathered a dataset of high\nquality. Llama Guard, a Llama2-7b model that is instruction-tuned on our\ncollected dataset, albeit low in volume, demonstrates strong performance on\nexisting benchmarks such as the OpenAI Moderation Evaluation dataset and\nToxicChat, where its performance matches or exceeds that of currently available\ncontent moderation tools. Llama Guard functions as a language model, carrying\nout multi-class classification and generating binary decision scores.\nFurthermore, the instruction fine-tuning of Llama Guard allows for the\ncustomization of tasks and the adaptation of output formats. This feature\nenhances the model's capabilities, such as enabling the adjustment of taxonomy\ncategories to align with specific use cases, and facilitating zero-shot or\nfew-shot prompting with diverse taxonomies at the input. We are making Llama\nGuard model weights available and we encourage researchers to further develop\nand adapt them to meet the evolving needs of the community for AI safety.\n\n",
"Title: Enhanced E-Commerce Attribute Extraction: Innovating with Decorative\n Relation Correction and LLAMA 2.0-Based Annotation\nSummary: The rapid proliferation of e-commerce platforms accentuates the need for\nadvanced search and retrieval systems to foster a superior user experience.\nCentral to this endeavor is the precise extraction of product attributes from\ncustomer queries, enabling refined search, comparison, and other crucial\ne-commerce functionalities. Unlike traditional Named Entity Recognition (NER)\ntasks, e-commerce queries present a unique challenge owing to the intrinsic\ndecorative relationship between product types and attributes. In this study, we\npropose a pioneering framework that integrates BERT for classification, a\nConditional Random Fields (CRFs) layer for attribute value extraction, and\nLarge Language Models (LLMs) for data annotation, significantly advancing\nattribute recognition from customer inquiries. Our approach capitalizes on the\nrobust representation learning of BERT, synergized with the sequence decoding\nprowess of CRFs, to adeptly identify and extract attribute values. We introduce\na novel decorative relation correction mechanism to further refine the\nextraction process based on the nuanced relationships between product types and\nattributes inherent in e-commerce data. Employing LLMs, we annotate additional\ndata to expand the model's grasp and coverage of diverse attributes. Our\nmethodology is rigorously validated on various datasets, including Walmart,\nBestBuy's e-commerce NER dataset, and the CoNLL dataset, demonstrating\nsubstantial improvements in attribute recognition performance. Particularly,\nthe model showcased promising results during a two-month deployment in\nWalmart's Sponsor Product Search, underscoring its practical utility and\neffectiveness.\n\n",
"Title: LLaMAntino: LLaMA 2 Models for Effective Text Generation in Italian\n Language\nSummary: Large Language Models represent state-of-the-art linguistic models designed\nto equip computers with the ability to comprehend natural language. With its\nexceptional capacity to capture complex contextual relationships, the LLaMA\n(Large Language Model Meta AI) family represents a novel advancement in the\nfield of natural language processing by releasing foundational models designed\nto improve the natural language understanding abilities of the transformer\narchitecture thanks to their large amount of trainable parameters (7, 13, and\n70 billion parameters). In many natural language understanding tasks, these\nmodels obtain the same performances as private company models such as OpenAI\nChat-GPT with the advantage to make publicly available weights and code for\nresearch and commercial uses. In this work, we investigate the possibility of\nLanguage Adaptation for LLaMA models, explicitly focusing on addressing the\nchallenge of Italian Language coverage. Adopting an open science approach, we\nexplore various tuning approaches to ensure a high-quality text generated in\nItalian suitable for common tasks in this underrepresented language in the\noriginal models' datasets. We aim to release effective text generation models\nwith strong linguistic properties for many tasks that seem challenging using\nmultilingual or general-purpose LLMs. By leveraging an open science philosophy,\nthis study contributes to Language Adaptation strategies for the Italian\nlanguage by introducing the novel LLaMAntino family of Italian LLMs.\n\n",
'Title: The Lensed Lyman-Alpha MUSE Arcs Sample (LLAMAS) : I. Characterisation\n of extended Lyman-alpha haloes and spatial offsets\nSummary: We present the Lensed Lyman-Alpha MUSE Arcs Sample (LLAMAS) selected from\nMUSE and HST observations of 17 lensing clusters. The sample consists of 603\ncontinuum-faint (-23<M_UV<-14) lensed Lyman-alpha emitters (producing 959\nimages) with spectroscopic redshifts between 2.9 and 6.7. Combining the power\nof cluster magnification with 3D spectroscopic observations, we are able to\nreveal the resolved morphological properties of 268 Lyman-alpha emitters. We\nuse a forward modelling approach to model both Lyman-alpha and rest-frame UV\ncontinuum emission profiles in the source plane and measure spatial extent,\nellipticity and spatial offsets between UV and Lyman-alpha emission. We find a\nsignificant correlation between UV continuum and Lyman-alpha spatial extent.\nOur characterization of the Lyman-alpha haloes indicates that the halo size is\nlinked to the physical properties of the host galaxy (SFR, Lyman-alpha EW and\nLyman-alpha line FWHM). We find that 48% of Lyman-alpha haloes are best-fitted\nby an elliptical emission distribution with a median axis ratio of q=0.48. We\nobserve that 60% of galaxies detected both in UV and Lyman-alpha emission show\na significant spatial offset (Delta). We measure a median offset of Delta= 0.58\n\\pm 0.14 kpc for the entire sample. By comparing the spatial offset values with\nthe size of the UV component, we show that 40% of the offsets could be due to\nstar-forming sub-structures in the UV component, while the larger offsets are\nmore likely due to larger distance processes such as scattering effects inside\nthe circumgalactic medium or emission from faint satellites or merging\ngalaxies. Comparisons with a zoom-in radiative hydrodynamics simulation of a\ntypical Lyman-alpha emitting galaxy show a good agreement with LLAMAS galaxies\nand indicate that bright star-formation clumps and satellite galaxies could\nproduce a similar spatial offsets distribution. (abridged)\n\n',
"Title: ChatDoctor: A Medical Chat Model Fine-Tuned on a Large Language Model\n Meta-AI (LLaMA) Using Medical Domain Knowledge\nSummary: The primary aim of this research was to address the limitations observed in\nthe medical knowledge of prevalent large language models (LLMs) such as\nChatGPT, by creating a specialized language model with enhanced accuracy in\nmedical advice. We achieved this by adapting and refining the large language\nmodel meta-AI (LLaMA) using a large dataset of 100,000 patient-doctor dialogues\nsourced from a widely used online medical consultation platform. These\nconversations were cleaned and anonymized to respect privacy concerns. In\naddition to the model refinement, we incorporated a self-directed information\nretrieval mechanism, allowing the model to access and utilize real-time\ninformation from online sources like Wikipedia and data from curated offline\nmedical databases. The fine-tuning of the model with real-world patient-doctor\ninteractions significantly improved the model's ability to understand patient\nneeds and provide informed advice. By equipping the model with self-directed\ninformation retrieval from reliable online and offline sources, we observed\nsubstantial improvements in the accuracy of its responses. Our proposed\nChatDoctor, represents a significant advancement in medical LLMs, demonstrating\na significant improvement in understanding patient inquiries and providing\naccurate advice. Given the high stakes and low error tolerance in the medical\nfield, such enhancements in providing accurate and reliable information are not\nonly beneficial but essential.\n\n",
'Title: Baby Llama: knowledge distillation from an ensemble of teachers trained\n on a small dataset with no performance penalty\nSummary: We present our submission to the BabyLM challenge, whose goal was to improve\nthe sample efficiency of language models. We trained an ensemble consisting of\na GPT-2 and small LLaMA models on the developmentally-plausible, 10M-word\nBabyLM dataset, then distilled it into a small, 58M-parameter LLaMA model,\nwhich exceeds in performance both of its teachers as well as a similar model\ntrained without distillation. This suggests that distillation can not only\nretain the full performance of the teacher model when the latter is trained on\na sufficiently small dataset; it can exceed it, and lead to significantly\nbetter performance than direct training.\n\n',
'Title: Sorted LLaMA: Unlocking the Potential of Intermediate Layers of Large\n Language Models for Dynamic Inference Using Sorted Fine-Tuning (SoFT)\nSummary: The rapid advancement of large language models (LLMs) has revolutionized\nnatural language processing (NLP). While these models excel at understanding\nand generating human-like text, their widespread deployment can be\nprohibitively expensive. SortedNet is a recent training technique for enabling\ndynamic inference for deep neural networks. It leverages network modularity to\ncreate sub-models with varying computational loads, sorting them based on\ncomputation/accuracy characteristics in a nested manner. We extend SortedNet to\ngenerative NLP tasks, making large language models dynamic without any\npretraining and by only replacing standard Supervised Fine-Tuning (SFT) with\nSorted Fine-Tuning (SoFT) at the same costs. Our approach boosts model\nefficiency, eliminating the need for multiple models for various scenarios\nduring inference. We show that using this approach, we are able to unlock the\npotential of intermediate layers of transformers in generating the target\noutput. Our sub-models remain integral components of the original model,\nminimizing storage requirements and transition costs between different\ncomputational/latency budgets. By applying this approach on LLaMa 2 13B for\ntuning on the Stanford Alpaca dataset and comparing it to normal tuning and\nearly exit via PandaLM benchmark, we show that Sorted Fine-Tuning can deliver\nmodels twice as fast as the original model while maintaining or exceeding\nperformance.\n\n',
"Title: ChatGPT, Llama, can you write my report? An experiment on assisted\n digital forensics reports written using (Local) Large Language Models\nSummary: Generative AIs, especially Large Language Models (LLMs) such as ChatGPT or\nLlama, have advanced significantly, positioning them as valuable tools for\ndigital forensics. While initial studies have explored the potential of ChatGPT\nin the context of investigations, the question of to what extent LLMs can\nassist the forensic report writing process remains unresolved. To answer the\nquestion, this article first examines forensic reports with the goal of\ngeneralization (e.g., finding the `average structure' of a report). We then\nevaluate the strengths and limitations of LLMs for generating the different\nparts of the forensic report using a case study. This work thus provides\nvaluable insights into the automation of report writing, a critical facet of\ndigital forensics investigations. We conclude that combined with thorough\nproofreading and corrections, LLMs may assist practitioners during the report\nwriting process but at this point cannot replace them.\n\n",
'Title: Battle of the Large Language Models: Dolly vs LLaMA vs Vicuna vs Guanaco\n vs Bard vs ChatGPT -- A Text-to-SQL Parsing Comparison\nSummary: The success of ChatGPT has ignited an AI race, with researchers striving to\ndevelop new large language models (LLMs) that can match or surpass the language\nunderstanding and generation abilities of commercial ones. In recent times, a\nnumber of models have emerged, claiming performance near that of GPT-3.5 or\nGPT-4 through various instruction-tuning methods. As practitioners of\nText-to-SQL parsing, we are grateful for their valuable contributions to\nopen-source research. However, it is important to approach these claims with a\nsense of scrutiny and ascertain the actual effectiveness of these models.\nTherefore, we pit six popular large language models against each other,\nsystematically evaluating their Text-to-SQL parsing capability on nine\nbenchmark datasets with five different prompting strategies, covering both\nzero-shot and few-shot scenarios. Regrettably, the open-sourced models fell\nsignificantly short of the performance achieved by closed-source models like\nGPT-3.5, highlighting the need for further work to bridge the performance gap\nbetween these models.\n\n']
######################################################################################################################################################################################
######################################################################################################################################################################################
######################################################################################################################################################################################
embeddings = ['This technical report introduces Lawyer LLaMA, a legal domain large language model (LLM) designed to address the challenges of applying LLMs in specific domains like law. While LLMs have shown impressive performance across various tasks, they often lack domain-specific knowledge and struggle to apply that knowledge effectively. \n\nTo overcome this limitation, the proposed framework involves injecting domain knowledge during the continual training stage and training the model on supervised fine-tuning tasks that teach professional skills. Additionally, to mitigate the issue of generating incorrect or misleading information, a retrieval module is incorporated. This module retrieves relevant legal articles before the model generates answers to queries, ensuring more accurate responses.\n\nThe study reveals that incorporating expert knowledge during model training is more valuable than relying solely on data generated by ChatGPT. Expert-written data significantly outperforms content produced by ChatGPT, highlighting the importance of utilizing human expertise for domain-specific skills.\n\nThe researchers plan to make the Lawyer LLaMA model and accompanying data publicly available, fostering further exploration and development within the legal domain.',
'Title: Label Supervised LLaMA: Finetuning Large Language Models for Improved Classification Tasks\n\nSummary: This paper introduces a novel approach called Label Supervised LLaMA (LS LLaMA) for finetuning Large Language Models (LLMs) using discriminant labels. Unlike traditional instruction tuning, LS LLaMA focuses on improving label prediction performance in sequence and token classification tasks. The approach extracts latent representations from a smaller LLM called LLaMA 2 7B and projects them into the label space to compute a cross entropy loss. Finetuning is done using Low Rank Adaptation (LoRA) to minimize this loss. LS LLaMA achieves significant improvements in text classification, outperforming LLMs ten times its size and robust baselines like BERT Large and RoBERTa Large. Additionally, by removing the causal mask from decoders, LS unLLaMA achieves state-of-the-art performance in named entity recognition (NER). This work presents a promising approach to adapting LLMs for various downstream tasks.',
"LLAMA (Leveraging Learning to Automatically Manage Algorithms) is a modular and extensible toolkit designed to facilitate the exploration of different portfolio techniques for algorithm selection in any problem domain. It is implemented as an R package and aims to provide a flexible and customizable platform for researchers.\n\nTraditionally, algorithm portfolio and selection approaches have shown significant improvements over single solvers. However, these systems are often highly customized and tailored to specific problem domains, making it challenging for researchers to explore different techniques for their specific problems.\n\nLLAMA addresses this challenge by providing a toolkit that supports the implementation and evaluation of algorithm selection approaches commonly used in the literature. It leverages the extensive library of machine learning algorithms and techniques available in R, allowing researchers to seamlessly experiment with different techniques on their problem domains.\n\nThe current version of LLAMA offers various capabilities and functionalities for algorithm portfolio management. Researchers can easily integrate their own algorithms and define their performance metrics to assess the effectiveness of different selection approaches. The toolkit also provides options for defining feature sets and preprocessing techniques, enabling researchers to experiment with different input representations.\n\nWhile LLAMA aims to provide a flexible framework for algorithm selection, it does have certain limitations. Currently, it focuses on implementing commonly used algorithm selection approaches, and further extensions and enhancements are needed to support more advanced techniques. Additionally, LLAMA's effectiveness may depend on the availability and quality of the machine learning algorithms and techniques in the R library.\n\nTo illustrate the usage of LLAMA, the authors provide a set of example SAT (Satisfiability) problems. Researchers can use these examples as a starting point to explore LLAMA's capabilities and adapt them to their own problem domains.\n\nIn summary, LLAMA is a modular and extensible toolkit implemented as an R package that aims to facilitate the exploration of different portfolio techniques for algorithm selection. It provides researchers with a platform to experiment with various techniques on their specific problem domains and offers the flexibility to customize and extend the toolkit according to their needs.",
"The integration of LLAMA into AdePT presents both challenges and opportunities for improving the efficiency and performance of particle transport simulations in high energy physics. Currently, GPUs are being explored as accelerators to enhance simulation throughput and energy efficiency. AdePT, an advanced prototype for offloading electromagnetic shower simulations in Geant4 to GPUs, is continuously being developed and optimized.\n\nOne of the challenges in this integration is the need to improve memory layout and data access in order to effectively utilize modern, massively parallel GPU hardware. LLAMA, a low-level abstraction of memory access, provides a zero runtime overhead data structure abstraction layer for multidimensional arrays of nested, structured data. It allows for the definition of custom memory mappings at compile time to define data layouts and optimize data access.\n\nBy instrumenting data access inside AdePT using LLAMA, we can gain insights into read and write counts to data structure elements, as well as memory heatmaps. These insights complement traditional GPU profiler outputs and help identify areas for data layout optimizations. With this knowledge, we can make informed decisions to further improve the performance and efficiency of AdePT simulations.\n\nIn conclusion, integrating LLAMA into AdePT presents an opportunity to address the memory-related optimization challenges in GPU-accelerated particle transport simulations. By utilizing LLAMA's capabilities for defining custom memory mappings and instrumenting data access, we can gain insights and make informed optimizations to enhance the overall efficiency and performance of AdePT.",
'LLaMA stands for Open and Efficient Foundation Language Models. In this paper, we present a collection of foundation language models ranging from 7B to 65B parameters. These models are trained on trillions of tokens, and what is unique about our approach is that we train them exclusively using publicly available datasets. We do not rely on proprietary and inaccessible datasets.\n\nOne of our key findings is that it is possible to achieve state-of-the-art performance without the need for private data sources. Our LLaMA 13B model outperforms GPT 3 175B on most benchmarks, which indicates the effectiveness of our approach. Additionally, our biggest model, LLaMA 65B, competes well with other top models like Chinchilla 70B and PaLM 540B.\n\nAs part of our commitment to fostering research and collaboration, we are releasing all our LLaMA models to the research community. This will allow other researchers to build upon our work and further advance natural language processing capabilities.\n\nOverall, LLaMA represents a significant step towards more open and efficient language models, demonstrating that impressive performance can be achieved without relying on restrictive datasets.',
'The paper introduces "Camoscio," an Italian language model specifically designed to follow user prompts in Italian. It addresses the limited accessibility of Large Language Models (LLMs) and the lack of specific adaptations for the Italian language. The authors finetuned the smallest variant of LLaMA 7b with LoRA on a dataset of instruction prompts translated to Italian. The results show that Camoscio performs well on various downstream tasks in Italian, even without specific finetuning for those tasks. The code, dataset, and model are all openly released to the community on GitHub.',
'The Code Llama project introduces a set of advanced language models designed specifically for code. These models, named Code Llama, offer top-notch performance and a range of functionalities such as infilling capabilities, support for large input contexts, and the ability to follow instructions for programming tasks. The project provides different variations of Code Llama to cater to various application needs. The foundation models include Code Llama and Code Llama Python, which specializes in Python code. Additionally, there are instruction following models known as Code Llama Instruct, available in three sizes with 7 billion (7B), 13 billion (13B), and 34 billion (34B) parameters. \n\nAll the models have been trained on sequences of 16,000 tokens, showcasing enhancements for inputs with up to 100,000 tokens. The 7B and 13B variants of Code Llama and Code Llama Instruct support infilling based on the surrounding code content. In terms of performance, Code Llama achieves state-of-the-art results among open models across various code benchmarks. For instance, it achieves scores of up to 53 on the HumanEval benchmark and 55 on the MBPP benchmark. Notably, the Python specialization of Code Llama with 7B parameters outperforms Llama 2 with 70B parameters on both HumanEval and MBPP benchmarks. Furthermore, all the models in the Code Llama family outperform every other publicly available model on the MultiPL E benchmark. \n\nThe Code Llama project offers the models under a permissive license that allows for both research and commercial use.',
'This study focuses on the impact of tokenization on the performance of large language models (LLM) when adapting them to non-English languages, specifically Russian. While LLMs have shown promising results on various tasks, their performance tends to degrade for languages other than English. This is believed to be due to inefficient tokenization caused by a lack of language representation in the pre-training data.\n\nTo address this issue, the researchers explore the potential of vocabulary substitution in the context of LLaMa Russian language adaptation. They investigate three variants of vocabulary adaptation and evaluate their performance on Saiga instruction tuning and fine-tuning on the Russian Super Glue benchmark.\n\nThe results of the automatic evaluation indicate that vocabulary substitution not only improves the quality of the models in Russian but also accelerates fine-tuning by up to 35% and inference by up to 60%, while reducing memory consumption. Furthermore, human evaluation of the instruction-tuned models reveals that models with Russian adapted vocabulary generate answers that are preferred by users over the original Saiga LLaMa model.\n\nOverall, this study demonstrates that vocabulary substitution can effectively enhance the performance of LLMs when adapting them to non-English languages, such as Russian. These findings have implications for improving the comprehension and generation capabilities of LLMs for various languages and tasks.',
"The paper introduces a new method called Contrastive Activation Addition (CAA) for steering language models, specifically the Llama 2 Chat model. CAA allows for precise control over the behavior of the model by modifying activations during its forward passes. It computes steering vectors by comparing the difference in residual stream activations between positive and negative examples of a specific behavior. These steering vectors are then added to token positions after the user's prompt, with either a positive or negative coefficient, to control the degree of the targeted behavior.\n\nThe effectiveness of CAA is evaluated using multiple choice behavioral question datasets and open-ended generation tasks. The results show that CAA significantly alters the model's behavior and outperforms traditional methods like finetuning and few-shot prompting. Additionally, CAA minimally reduces the model's capabilities.\n\nThe paper also employs various activation space interpretation methods to gain deeper insights into CAA's mechanisms. By accurately steering model outputs, CAA provides insights into how high-level concepts are represented in Large Language Models (LLMs).\n\nOverall, the introduction of CAA presents a novel approach to steering language models and sheds light on the representation of concepts in LLMs.",
"Introduction\n\nThe VinaLLaMA model is a state-of-the-art language model designed specifically for the Vietnamese language. It is built upon LLaMA 2, an open-weight, large-scale language model, with an additional 800 billion trained tokens. VinaLLaMA not only showcases a high level of fluency in Vietnamese but also demonstrates a deep understanding of Vietnamese culture, making it a truly indigenous model for the language.\n\nModel Architecture and Training\n\nVinaLLaMA is based on LLaMA 2, which serves as the foundation for its architecture. LLaMA 2 is a large-scale language model that has been highly successful in various applications across different languages. Building upon this existing architecture, VinaLLaMA adds an extra 800 billion trained tokens to further enhance its language capabilities.\n\nThe training process for VinaLLaMA involves extensive data collection and preprocessing. It includes a diverse range of Vietnamese texts, including books, articles, online content, and various other sources. The collected data is then carefully cleaned and processed to ensure the highest quality and relevance to the Vietnamese language.\n\nKey Features and Performance\n\nVinaLLaMA's primary strength lies in its impressive performance on various benchmark datasets. The model achieves state-of-the-art (SOTA) results on important benchmarks such as VLSP, VMLU, and the Vicuna Benchmark Vietnamese. These benchmarks evaluate the model's proficiency in tasks such as text classification, sentiment analysis, language understanding, and machine translation.\n\nFurthermore, VinaLLaMA 7B chat, a version of the model trained on 1 million high-quality synthetic samples, demonstrates its versatility in real-life applications. The extensive training enables the model to generate high-quality, coherent, and contextually appropriate responses in conversational settings.\n\nImplications and Future Applications\n\nThe introduction of VinaLLaMA marks a significant advancement in the Vietnamese AI landscape. Its ability to understand and generate Vietnamese text at a native level opens up numerous possibilities for various applications. It can be used in tasks such as natural language processing, machine translation, chatbot development, sentiment analysis, and more.\n\nAs VinaLLaMA continues to evolve, there is a vast potential for its further improvement and integration into industries that heavily rely on Vietnamese language processing. Its enhancements will provide a more accurate and efficient tool for Vietnamese speakers and enable a deeper understanding and utilization of the Vietnamese language in technological advancements.\n\nConclusion\n\nVinaLLaMA, a Vietnamese language model built upon LLaMA 2 with an additional 800 billion trained tokens, represents a significant accomplishment in language modeling for the Vietnamese language. Its state-of-the-art performance on benchmarks and deep cultural understanding showcase its proficiency and versatility. As VinaLLaMA continues to advance, it promises to offer substantial contributions to the Vietnamese AI landscape, benefiting industries and users alike.",
'The study presents DRG LLaMA, an advanced language model that has been fine-tuned on clinical notes to improve the efficiency of assigning Diagnosis Related Groups (DRGs) to hospitalized patients. By utilizing LLaMA as the foundation and optimizing it through Low Rank Adaptation (LoRA) on a dataset of 236,192 MIMIC IV discharge summaries, the DRG LLaMA 7B model achieved significant performance improvements.\n\nThe model achieved a macro-averaged F1 score of 0.327, a top 1 prediction accuracy of 52.0%, and a macro-averaged Area Under the Curve (AUC) of 0.986, with a maximum input token length of 512. These results demonstrate the superiority of the DRG LLaMA model compared to prior leading models in DRG prediction, with a relative improvement of 40.3% and 35.7% in macro-averaged F1 score compared to ClinicalBERT and CAML, respectively.\n\nIn terms of base DRG prediction and complication or comorbidity (CC) major complication or comorbidity (MCC) prediction, DRG LLaMA achieved top 1 prediction accuracies of 67.8% and 67.5%, respectively.\n\nFurthermore, the study found that the performance of DRG LLaMA is positively associated with increased model parameters and input context lengths, indicating that employing larger models and longer clinical notes can result in improved performance.\n\nOverall, the DRG LLaMA model represents a significant advancement in DRG prediction for hospitalized patients, surpassing the performance of previous models and showcasing the potential of large language models fine-tuned on clinical notes.',
'LLAMA (Low Level Abstraction for Memory Access) is a C++ library that aims to address the performance gap between CPU and memory by providing a data structure abstraction layer. This layer allows programmers to decouple the choice of memory layout from the rest of their program, enabling easy adaptation to different hardware architectures.\n\nThe library provides fully C++ compliant methods for defining and switching custom memory layouts for user-defined data types. It also allows for integration with third-party allocators, providing flexibility and extensibility.\n\nTo demonstrate the capabilities of LLAMA, two examples are provided. The first example shows that LLAMA generated Array of Structs (AoS) and Struct of Arrays (SoA) layouts produce identical code with the same performance characteristics as manually written data structures. The second example integrates LLAMA into real-world applications such as the SPEC CPU lbm benchmark and the particle-in-cell simulation PIConGPU, showcasing its abilities in high-performance computing scenarios.\n\nLLAMA also includes layout-aware copy routines that can significantly improve the speed of data transfer and reshuffling between different memory layouts compared to naive element-wise copying.\n\nOverall, LLAMA provides a powerful tool for the development of high-performance C++ applications in a heterogeneous environment, making it easier to optimize memory access for different hardware architectures.',
'Keywords: HuaTuo, Tuning, LLaMA Model, Chinese Medical Knowledge\n\nSummary:\nThis paper presents HuaTuo, a fine-tuned version of the LLaMA model using generated Question Answer (QA) instances. The goal is to improve the performance of LLMs in biomedical domain tasks by incorporating medical expertise into the responses. The experimental results show that HuaTuo generates more reliable responses with medical knowledge. The HuaTuo model can be accessed at https://github.com/SCIR/HI/Huatuo-Llama-Med-Chinese.',
"In this paper, the authors address the limitation of existing large language models (LLMs) like LLaMA, which predominantly focus on the English language. They propose a method to augment LLaMA to understand and generate Chinese text by extending its vocabulary with 20,000 Chinese tokens. This improves the encoding efficiency and semantic understanding of Chinese. Additionally, they incorporate secondary pre-training using Chinese data and fine-tune the model with Chinese instruction datasets to enhance its comprehension and execution of instructions. Experimental results show that the proposed model significantly improves LLaMA's proficiency in understanding and generating Chinese content. The model's performance on the C Eval dataset is also competitive with larger models. The authors have made their pre-trained models, training scripts, and other resources available on GitHub to foster open research in the community.",
'The paper titled "LLaMA Adapter V2: Parameter Efficient Visual Instruction Model" focuses on the efficient transformation of large language models (LLMs) into instruction followers, specifically in the context of multi-modal reasoning with visual inputs. The current state-of-the-art model, LLaMA Adapter, has limitations in generalizing to open-ended visual instructions and falls behind GPT 4.\n\nTo address these issues, the authors propose LLaMA Adapter V2, a parameter-efficient visual instruction model. They enhance the original LLaMA Adapter by unlocking more learnable parameters such as norm, bias, and scale, which distribute the instruction-following ability across the entire LLaMA model alongside adapters. This improvement aims to improve its ability to handle open-ended visual instructions.\n\nAdditionally, the authors introduce an early fusion strategy where visual tokens are only fed into the early layers of the LLM model, enabling better incorporation of visual knowledge. They also propose a joint training paradigm that optimizes disjoint groups of learnable parameters, accommodating both image-text alignment and instruction following tasks. This approach effectively reduces interference between the two tasks and achieves strong multi-modal reasoning even with a small-scale dataset of image-text and instruction pairs.\n\nDuring inference, the authors incorporate additional expert models like captioning OCR systems into LLaMA Adapter to further enhance its image understanding capability without requiring additional training.\n\nCompared to the original LLaMA Adapter, LLaMA Adapter V2 introduces only 14M additional parameters, enabling it to perform open-ended multi-modal instructions. The newly designed framework also exhibits stronger language-only instruction-following capabilities and performs well in chat interactions.\n\nThe code and models for LLaMA Adapter V2 are available on GitHub at https://github.com/ZrrSkywalker/LLaMA-Adapter.',
"In this study, we present Goat, a fine tuned LLaMA model that surpasses the performance of GPT 4 on arithmetic tasks. Goat is trained on a synthetic dataset and achieves state of the art accuracy on the BIG bench arithmetic sub task. Zero shot Goat 7B even outperforms the few shot PaLM 540B in terms of accuracy. This is noteworthy because previous pretrained language models like Bloom, OPT, and GPT NeoX struggle to achieve near perfect accuracy on large number addition and subtraction without additional training. \n\nWe attribute Goat's outstanding performance to LLaMA's consistent tokenization of numbers. However, to tackle more challenging tasks like large number multiplication and division, we adopt an approach that classifies tasks based on their learnability. We then decompose unlearnable tasks, such as multi digit multiplication and division, into a series of learnable tasks by leveraging basic arithmetic principles. We thoroughly evaluate the effectiveness of our proposed decomposition steps.\n\nMoreover, Goat 7B can be easily trained using LoRA on a 24GB VRAM GPU, making it reproducible for other researchers. We make our model, dataset, and the Python script for dataset generation available for further study and exploration.",
'In this study, we address the challenge of adapting pretrained language models to the clinical domain. Traditionally, this involves retraining the entire set of parameters of the language model, which is computationally intensive. To overcome this, we propose a Parameter Efficient Fine Tuning (PEFT) technique called Clinical LLaMA LoRA.\n\nClinical LLaMA LoRA is an adapter layer built upon the open-source LLaMA model. It is specifically trained using clinical notes from the MIMIC IV database, making it tailored for the clinical domain. This selective fine-tuning approach significantly reduces the computational requirements for domain adaptation.\n\nWe also propose a two-step PEFT framework that combines Clinical LLaMA LoRA with another PEFT adapter called Downstream LLaMA LoRA, which is designed for downstream tasks. We evaluate this framework on multiple clinical outcome prediction datasets and compare it to language models trained specifically for clinical applications.\n\nOur proposed framework achieves a state-of-the-art AUROC (Area Under the Receiver Operating Characteristic) score across all clinical downstream tasks. Particularly, we observe significant improvements of 6-9 AUROC score in large-scale multilabel classification tasks like diagnoses and procedures classification.\n\nOverall, our study demonstrates that using a parameter-efficient fine-tuning approach, such as Clinical LLaMA LoRA, can effectively adapt pretrained language models for the clinical domain while minimizing computational requirements.',
'Llama 2 is a collection of pretrained and fine tuned large language models (LLMs) designed for dialogue use cases. Ranging from 7 billion to 70 billion parameters, our Llama 2 Chat models outperform existing open source chat models on various benchmarks. Through human evaluations, we have determined that our models are both helpful and safe, potentially making them a viable alternative to closed source models. This paper provides an in-depth explanation of our fine tuning methodology and safety enhancements for Llama 2 Chat. Our aim is to foster community collaboration and promote responsible development of LLMs.',
'The summary describes a framework called PUMA that enables fast and secure inference of Transformer models. PUMA addresses the limitations of previous frameworks by designing high-quality approximations for expensive functions and implementing secure Embedding and LayerNorm procedures. It is about 2 times faster than the state-of-the-art framework MPCFORMER ICLR 2023 and achieves similar accuracy as plaintext models without fine-tuning. PUMA can even evaluate a large model (LLaMA 7B) in around 5 minutes to generate 1 token, which is a significant achievement. The framework has been open-sourced on the Github repository of SecretFlow SPU.',
'The paper explores the potential of fine tuning the Llama 2 GPT large language model (LLM) for analyzing financial news. A PEFT LoRA based approach is used for fine tuning the model. The study focuses on several tasks including analyzing financial market perspectives, highlighting main points, summarizing text, and extracting named entities with sentiments. The results demonstrate that the fine tuned Llama 2 model can effectively perform multitask analysis of financial news, generating structured responses in text and JSON formats. The extracted sentiments for named entities can be utilized as predictive features in supervised machine learning models with quantitative target variables.',
'The SEED tokenizer, developed in this paper, aims to enhance the capabilities of Large Language Models (LLMs) by enabling them to effectively process and generate both text and image data. The existing multimodal LLMs have made progress but still struggle with unifying comprehension and generation tasks, as well as exhibiting emergent abilities in an open world context.\n\nThe SEED tokenizer introduces two crucial design principles. First, it ensures that image tokens are independent of physical patch positions and instead have a 1D causal dependency, aligning with the autoregressive prediction mechanism in LLMs. Second, the image tokens capture high-level semantics consistent with the semantic abstraction in words, optimizing for both discriminativeness and reconstruction during tokenizer training.\n\nBy incorporating SEED tokens into LLMs, the resulting SEED LLaMA (Large Language Model with SEED) can perform scalable multimodal autoregression without requiring major changes to the training process. SEED LLaMA is trained on a large-scale dataset consisting of interleaved textual and visual data, achieving impressive results across various multimodal comprehension and generation tasks.\n\nMost importantly, SEED LLaMA demonstrates compositional emergent abilities, such as multi-turn in-context multimodal generation, similar to that of an AI assistant. This approach represents a step towards developing General Artificial Intelligence (AGI) agents that possess both predefined task capabilities and the ability to adapt to an open world context.',
'This paper discusses the use of structured pruning as an effective method to develop smaller yet powerful language models. The authors propose two techniques: targeted structured pruning, which removes layers, heads, and dimensions to achieve a desired model size, and dynamic batch loading, which updates the composition of training data based on varying losses across domains.\n\nThe authors demonstrate the effectiveness of their approach by presenting the Sheared LLaMA series, where the LLaMA2 7B model is pruned down to 1.3B and 2.7B parameters. These Sheared LLaMA models outperform state-of-the-art models like Pythia, INCITE, and OpenLLaMA on various evaluations, while requiring only 3% of the compute resources needed to train such models from scratch.\n\nOverall, this work provides compelling evidence that structured pruning of existing large language models is a more cost-effective approach to building smaller yet powerful language models.',
'In this study, the researchers aim to enhance multi-stage text retrieval by leveraging recent advancements in large language models (LLMs). Previous studies on multi-stage retrieval have utilized models that are outdated compared to the latest LLMs. The researchers conduct a comprehensive investigation by fine-tuning the newest LLaMA model for both dense retrieval (RepLLaMA) and pointwise reranking (RankLLaMA) tasks using the MS MARCO datasets.\n\nTheir findings indicate that large language models outperform smaller models in terms of effectiveness for text retrieval. Moreover, LLMs have the advantage of handling longer contexts, which allows them to holistically represent entire documents without the need for traditional segmentation and pooling strategies. The researchers also evaluate their RepLLaMA-RankLLaMA pipeline on the BEIR dataset, which demonstrates strong zero-shot effectiveness.\n\nFor those interested, the model checkpoints resulting from this study are available on HuggingFace.',
'In this research study, titled "BadLlama: Cheaply Removing Safety Fine Tuning from Llama 2 Chat 13B," we examine the potential risks associated with public access to language model weights. Llama 2 Chat is a language model developed by Meta and made available to the public. While Meta implemented safety measures in Llama 2 Chat to prevent the generation of harmful content, we propose that by making the model weights publicly accessible, individuals with malicious intent can easily bypass these safeguards and exploit the capabilities of Llama 2 for harmful purposes.\n\nOur objective is to demonstrate the ease with which the safety fine tuning on Llama 2 Chat 13B can be undone, without compromising its general language processing abilities. Through our experimentation, we show that it is possible to remove the safety fine tuning from Llama 2 Chat 13B with less than 200 computational steps. Despite this removal of safety measures, the model retains its overall capacity for generating coherent responses.\n\nThe significance of our findings lies in highlighting the ineffectiveness of safety fine tuning in preventing misuse, particularly when model weights are publicly accessible. As future language models are expected to possess even greater potential for causing harm on a larger scale, it is crucial for AI developers to address the threats associated with fine tuning before deciding to release their model weights to the public.\n\nOur research poses important considerations for AI developers and stakeholders in terms of ensuring the responsible and safe use of language models. By understanding the limitations of safety fine tuning when model weights are publicly available, we can better evaluate the potential risks and adapt our approaches to mitigate such dangers.',
'The paper introduces Tamil Llama, a new language model based on Llama 2. It addresses the limitation of underrepresentation of languages like Tamil in large language models. The model is enhanced with 16,000 Tamil tokens and trained using the LoRA methodology on a comprehensive Tamil corpus, ensuring computational feasibility and robustness. The paper also introduces a Tamil translated version of the Alpaca dataset and a subset of the OpenOrca dataset for instruction fine-tuning. The results demonstrate significant improvements in Tamil text generation, with potential implications for the broader landscape of large language models in Indian languages. The authors emphasize their commitment to open research by making their models, datasets, and code publicly accessible to foster further innovations in language modeling.',
'The paper titled "Beyond Surface: Probing LLaMA Across Scales and Layers" provides a comprehensive analysis of Large Language Models (LLMs) with a specific focus on LLaMA, an open-source foundational model in natural language processing. The authors of the paper aim to probe LLaMA\'s intrinsic understanding by evaluating its performance in high-order tasks such as reasoning and computation.\n\nTo achieve this goal, the authors design multiple choice tasks that assess LLaMA\'s comprehension. The analysis is conducted both horizontally and vertically. Horizontal analysis involves comparing LLaMA models of different sizes, while vertical analysis assesses different layers of the model.\n\nThe findings of the study reveal several significant and uncommon observations. Firstly, when horizontally expanding the model sizes, it is observed that larger models do not automatically acquire additional knowledge or computational abilities. Instead, they tend to improve reasoning capabilities, particularly in solving mathematical problems, and help reduce hallucinations. However, these benefits are only evident beyond certain size thresholds.\n\nIn the vertical analysis, it is observed that the lower layers of LLaMA lack substantial arithmetic and factual knowledge. On the other hand, these layers showcase logical thinking, multilingual capabilities, and pattern recognition. In contrast, the top layers of the model exhibit greater computational power and possess real-world knowledge.\n\nOverall, the paper provides valuable insights into the understanding and capabilities of LLaMA and sheds light on its performance in various tasks across different scales and layers.',
'The paper introduces CyberSecEval, a comprehensive benchmark designed to improve the cybersecurity of Large Language Models (LLMs) used as coding assistants. As the most comprehensive cybersecurity safety benchmark available, CyberSecEval evaluates LLMs in two important security areas: their potential to generate insecure code and their compliance when assisting in cyberattacks. The study conducted a case study involving seven models from the Llama 2, Code Llama, and OpenAI GPT LLM families, effectively identifying significant cybersecurity risks. The research revealed that more advanced models exhibited a tendency to suggest insecure code, emphasizing the need to integrate security considerations during the development of sophisticated LLMs. CyberSecEval enables LLM designers and researchers to measure and improve the cybersecurity safety aspects of LLMs through its automated test case generation and evaluation pipeline. This benchmark contributes to the advancement of more secure AI systems.',
"LLAMA, the Large Latin American Millimeter and Submillimeter Observatory, is an ambitious project between Argentina and Brazil. Its main objective is to establish and operate a state-of-the-art observing facility capable of conducting observations of the Universe at millimeter and submillimeter wavelengths.\n\nSituated at an altitude of 4850m above sea level in the Puna Saltenia region of northwest Argentina, LLAMA will feature a 12m antenna similar to the ALMA (Atacama Large Millimeter/submillimeter Array) facility. Additionally, two Nasmyth cabins will be installed to enhance the observatory's capabilities.\n\nOnce completed, LLAMA will be equipped with six ALMA receivers covering Bands 1, 2, 3, 5, 6, 7, and 9, which will be housed in the Nasmyth cabins. This comprehensive setup positions LLAMA to contribute to various astronomical research fields.\n\nExperts from around the world have identified numerous science opportunities that LLAMA can explore. Here is a summary of some of the key areas:\n\n1. Protostellar and Pre-Planetary Disk Studies: LLAMA's high angular resolution and sensitivity at millimeter wavelengths will allow for detailed investigations of protostellar systems, shedding light on the early stages of star formation and the evolution of planetary system building blocks.\n\n2. Galaxy Evolution: With its exceptional observational capabilities, LLAMA can probe the molecular gas content and dynamics of galaxies, providing insights into the processes behind galaxy evolution and the formation of structures in the Universe.\n\n3. Stellar Evolution and Astrochemistry: LLAMA's high angular resolution and sensitivity are ideal for studying the inner regions of evolved stars, observing molecular transitions and identifying key chemical species. This will enhance our understanding of stellar evolution and the contributions of stars to the chemical enrichment of galaxies.\n\n4. High-Redshift Universe: The millimeter and submillimeter wavelengths accessible to LLAMA are crucial for observing distant galaxies. By detecting and characterizing the emission of carbon monoxide (CO) and dust, LLAMA can contribute to the study of the early Universe and the formation of massive galaxies and clusters.\n\n5. Exoplanet Atmospheres: LLAMA's capabilities extend to probing the atmospheres of exoplanets. Through the observation of molecular lines in these atmospheres, LLAMA can assist in characterizing their composition, including potential bio-signatures, paving the way for a better understanding of planetary systems beyond our own.\n\nThese are just a few examples of the exciting science opportunities that LLAMA can offer in the fields of astrophysics and astrochemistry. Its high altitude location and advanced instrumentation make it a promising new addition to the international millimeter and submillimeter observatories, opening up new avenues for astronomical research in South America.",
'In this paper titled "LLAMA: The MBH-sigma* Relation of the Most Luminous Local AGNs," the authors investigate the relationship between the mass of supermassive black holes (MBH) and the stellar velocity dispersion (sigma*) in active galactic nuclei (AGNs). While this relation has been well-established for inactive galaxies with elliptical bulges, it is still debated whether active galaxies follow the same relation.\n\nThe study focuses on a sample of 19 local luminous AGNs called LLAMA. The authors estimate the black hole masses (MBH) using broad-line-based relations for the emission line profiles of H-alpha, H-beta, and Pa-beta in Type 1 AGNs. They compare the bulge stellar velocity dispersion (sigma*) determined from the Ca II triplet (CaT) with the dispersion measured from the near-infrared CO 2 0 absorption features for each AGN and find them to be consistent.\n\nTo ensure accurate measurements, the authors apply an extinction correction to the observed broad-line fluxes and correct the stellar velocity dispersion by accounting for the average rotation contribution determined from spatially resolved stellar kinematic maps.\n\nThe H-alpha-based black hole masses of the LLAMA AGN sample range from 6.34 ≤ log MBH ≤ 7.75 M⊙, and the sigma* CaT estimates range between 73 ≤ sigma* CaT ≤ 227 km/s. Using the constructed MBH-sigma* relation for the Type 1 AGNs, the authors estimate the black hole masses for Type 2 AGNs and inactive galaxies in their sample.\n\nIn conclusion, the study finds that the local luminous AGNs in the LLAMA sample are consistent with the MBH-sigma* relation observed in lower luminosity AGNs and inactive galaxies. This consistency is achieved by correcting for dust extinction and the rotational contribution to the stellar velocity dispersion.',
'This study examines the nuclear stellar properties of a sample of AGNs (active galactic nuclei) and inactive galaxies. The researchers analyze the spatially resolved stellar kinematics and luminosity distributions on scales ranging from 10 to 150 parsecs. They use the SINFONI instrument on the Very Large Telescope (VLT) to obtain near-infrared data.\n\nThe first half of the sample includes 13 galaxies, consisting of 8 AGNs and 5 inactive galaxies. The stellar velocity fields reveal a disk-like rotating pattern, with the kinematic position angle matching the photometric position angle obtained from large-scale images.\n\nComparing the stellar surface brightness between the inactive galaxy sample and the matched AGN sample, the study finds that the surface brightness of inactive galaxies is generally similar but extends to lower levels. After removing the bulge contribution, a nuclear stellar light excess is observed, exhibiting an extended nuclear disk structure and following a size-luminosity relation.\n\nWhile the excess luminosity is expected to be associated with a dynamically cooler, young stellar population, the researchers do not observe a corresponding drop in stellar velocity dispersion. This may be due to the presence of pseudo bulges in these galaxies, where the intrinsic dispersion increases towards the center. The contribution of young stars to the observed kinematics is likely small compared to the bulge, preventing a drop in dispersion at small radii.\n\nInterestingly, no significant difference is found in the stellar kinematics and nuclear stellar luminosity excess between the active and inactive galaxies in this sample.',
'A new study using APEX and JCMT spectroscopy of the CO 2 1 line has examined the cold molecular gas in moderately luminous Active Galactic Nuclei (AGN) and inactive galaxies from the Luminous Local AGN with Matched Analogs (LLAMA) survey. The researchers used infrared photometry from 2MASS, WISE, IRAS, and Herschel to analyze the dust-reprocessed star formation rates (SFRs), molecular gas fractions, and star formation efficiencies (SFEs) in the central regions of these galaxies.\n\nThe results show that the gas fractions and central SFEs of both active and inactive galaxies are similar when taking into account the host stellar mass and morphology (Hubble type). The central molecular gas depletion times are also consistent with those observed in normal spiral galaxies in the local Universe. This suggests that the AGN in LLAMA weakly couples with the observable cold molecular gas in their central environments, despite expectations that the AGN should disrupt this gas.\n\nInterestingly, the study found that obscured AGN tend to contain higher amounts of central molecular gas, implying a connection between AGN obscuration and the gaseous environment of the nucleus. However, the LLAMA AGN did not exhibit depressed SFEs, suggesting that the processes responsible for the collapse of molecular gas into dense pre-stellar cores may also play a role in the inflow of material onto AGN accretion disks.\n\nOverall, this study provides new insights into the normal star formation efficiencies of molecular gas in the centers of luminous Seyfert galaxies, highlighting the complex interactions between AGN activity, gas dynamics, and star formation processes.',
"The Low Latency Algorithm for Multi-messenger Astrophysics (LLAMA) is a detailed online data analysis pipeline that was developed for the search of common sources of gravitational waves (GWs) and high energy neutrinos (HENs) during the second observing period (O2) of Advanced LIGO and Advanced Virgo.\n\nIn addition to providing valuable scientific insights into source events, the detection of low latency coincident HENs can offer better localization information than GWs alone. This allows for faster electromagnetic follow-up observations. Therefore, it is crucial to transition GW-HEN analyses to low latency, automated pipelines for future multi-messenger efforts.\n\nThe O2 LLAMA pipeline also served as a proof of concept for future online GW-HEN searches and has resulted in a codebase that is capable of handling other types of messengers as well. During O2, the pipeline utilized GW candidates identified by LIGO-Virgo as triggers and performed a real-time search for temporally coincident HEN candidates provided by the IceCube Collaboration. The search was limited to HEN candidates that fell within the ninety percent credible region (ninetyCR) of the reconstructed GW skymaps.\n\nTo report coincident alerts, the LLAMA algorithm made use of NASA's Gamma-ray Coordinates Network, which provided the necessary information to LIGO-Virgo's electromagnetic follow-up partners. This allowed for coordinated and prompt follow-up observations across different wavelengths of the electromagnetic spectrum, improving our understanding of multi-messenger astrophysics.",
'In this paper, the authors propose a framework called Llama that aims to optimize the latency and resource efficiency of video analytics pipelines. The framework tackles the challenges of configuring video pipelines by automatically tuning the operations and hardware resources based on user-defined latency and cost targets.\n\nVideo analytics applications rely on video pipelines, which are represented as directed acyclic graphs (DAGs) of operations. These operations transform videos, process metadata, and generate insights such as identifying congested intersections. Configurable knobs, such as sampling rate, batch size, and hardware type, can be adjusted for each operation to optimize pipeline performance.\n\nHowever, determining the most efficient configurations is difficult due to the exponentially large configuration search space, dependency on user latency and cost targets, and variability in input video contents. Existing systems leave it to users to manually configure operations and select hardware resources, which can be time-consuming and error-prone.\n\nTo address these challenges, Llama introduces a heterogeneous and serverless framework for auto tuning video pipelines. The framework follows two steps: first, it calculates a latency target for each operation invocation based on the desired end-to-end latency. Second, it dynamically runs a cost-based optimizer to assign configurations across heterogeneous hardware that best meet the calculated latency targets.\n\nThis approach makes the auto tuning of large video pipelines tractable, considering input-dependent behavior, conditional branches in the DAG, and execution variability. The paper describes the algorithms implemented in Llama and evaluates its performance on a cloud platform using serverless CPU and GPU resources.\n\nThe evaluation results show that Llama outperforms state-of-the-art cluster and serverless video analytics and processing systems. On average, Llama achieves a 7.8 times lower latency and a 16 times cost reduction. These findings highlight the effectiveness of Llama in optimizing video analytics pipelines for latency and cost efficiency.',
'The study aims to investigate the relationship between nuclear star formation and galactic activity in ultra hard X-ray selected active galactic nuclei (AGNs). The authors analyze a volume-limited sample of nine luminous local AGNs, known as the LLAMA sample, and compare them to a control group of 18 inactive galaxies matched by various parameters. By conducting stellar population synthesis on VLT X SHOOTER spectra, the authors find evidence of young stellar populations (less than 30 million years old) in seven out of nine AGNs and in four out of six star-forming control galaxies. In contrast, only two out of twelve non-star forming control galaxies show such young populations. The study suggests that AGN activity may be a consequence of nuclear star formation, and speculates that some star-forming control galaxies may transition into AGNs for a portion of their lifetime. The authors estimate that the AGN phase typically lasts for about 5% of the nuclear starburst phase, based on the volume completeness of their sample.',
'In this preliminary study, we focus on exploring the effectiveness of fine-tuning the LLaMA model for specific scenarios, rather than general-purpose instruction following. We choose the writing assistant scenario as our testbed and investigate its performance on seven different writing tasks.\n\nTo begin, we collect training data for these tasks and reframe them in an instruction following format. This allows us to use the instruction-driven data to refine LLaMA through instruction tuning. Our experimental results demonstrate that fine-tuning LLaMA on the collected writing instruction data significantly improves its ability to perform the writing tasks.\n\nFurthermore, we conduct additional experiments and analyses to provide insights for future work on effectively fine-tuning LLaMA for specific scenarios. We discuss the efforts required for tuning the model and the resources consumed during its deployment.\n\nFinally, we initiate a discussion on the necessity of employing LLMs for only one targeted task. This discussion takes into consideration the efforts required for fine-tuning and the resources consumed during the deployment of the model.\n\nOverall, this preliminary study highlights the potential benefits of utilizing LLMs like LLaMA for targeted scenarios and provides insights for effectively fine-tuning them for specific tasks.',
'The Music Understanding LLaMA (MU LLaMA) is a model designed to address the challenge of generating music captions and answering music-related questions in the field of text-to-music generation (T2M Gen). One of the main obstacles in this field is the lack of large-scale publicly available music datasets with natural language captions. To overcome this, the MU LLaMA model utilizes audio representations from a pretrained MERT model to extract music features.\n\nHowever, finding a suitable dataset for training the MU LLaMA model is challenging, as existing publicly accessible audio question answering datasets do not provide the necessary depth for open-ended music question answering. To fill this gap, the researchers propose a methodology for generating question-answer pairs from existing audio captioning datasets. They also introduce the MusicQA Dataset, specifically designed for answering open-ended music-related questions.\n\nExperimental results show that the MU LLaMA model, trained on the MusicQA dataset, achieves outstanding performance in both music question answering and music caption generation. It outperforms current state-of-the-art models in both fields and offers a promising advancement in the T2M Gen research field.',
'The paper titled "Fine Tuning Llama 2 Large Language Models for Detecting Online Sexual Predatory Chats and Abusive Texts" addresses the issue of detecting online sexual predatory behaviors and abusive language on social media platforms. The researchers propose an approach that utilizes the open source pretrained Llama 2 7B parameter model, recently released by Meta GenAI, and fine tunes it using datasets of different sizes, imbalance degrees, and languages.\n\nThe approach presented in the paper is generic and automated, eliminating the need for a manual search for a synergy between feature extraction and classifier design steps, as seen in conventional methods. The experimental results demonstrate a strong performance of the proposed approach across three distinct datasets, showing consistent proficiency in identifying sexual predators, offensive or toxic content, hate speech, and discriminatory language.\n\nThe paper also highlights the potential applications of this approach in solving other text classification problems, such as sentiment analysis, spam and phishing detection, sorting legal documents, fake news detection, language identification, user intent recognition, text-based product categorization, medical record analysis, and resume screening.\n\nOverall, the study demonstrates that the proposed method can be implemented in real-world applications, including non-English languages, to maintain respectful digital communities and enhance internet safety.',
'The paper titled "Safety Tuned LLaMAs: Lessons From Improving the Safety of Large Language Models that Follow Instructions" discusses the importance of training large language models to follow instructions while also ensuring their safety. The authors highlight that while highly helpful models perform well on various tasks, they can also easily generate harmful content by following malicious instructions. \n\nThe paper raises concerns about popular instruction tuned models that prioritize helpfulness over safety. The authors demonstrate through their research that these models are highly unsafe. To address this issue, they propose adding just three safety examples and a few hundred demonstrations to the training set when fine-tuning a model like LLaMA. This approach significantly improves the safety of the models without making them less capable or helpful according to standard benchmarks. \n\nHowever, the authors also observe a behavior called "exaggerated safety," where models that have undergone extensive safety tuning refuse to respond to reasonable prompts that may appear superficially unsafe. This finding highlights the trade-offs involved in training large language models to follow instructions and exhibit safe behavior. Overall, the study sheds light on the complexities and challenges of balancing helpfulness and safety in the development of language models.',
'and 16GB of RAM. These results indicate that quantized LLaMA based models can achieve reasonable accuracy on the Brazilian Secondary School Exam questions while running on standard home hardware. However, the computational efficiency of these models is still a concern, as the execution time can be relatively high. Further optimizations and improvements are necessary to make the deployment of LLMs more practical and accessible for everyday use.',
'Whispering LLaMA is a new framework introduced for generative error correction in automatic speech recognition (ASR). The framework utilizes both acoustic information and external linguistic representations to generate accurate speech transcription contexts. This is a significant advancement in the field of generative error correction, specifically in the context of n-best hypotheses.\n\nUnlike existing ranking-based rescoring methods, Whispering LLaMA employs distinct initialization techniques and parameter efficient algorithms to enhance ASR performance. These techniques are derived from pre-trained speech and text models. The framework has been evaluated across diverse ASR datasets to assess its stability and reproducibility.\n\nThe evaluation demonstrates that Whispering LLaMA achieves improved word error rate relative (WERR) performance compared to n-best hypotheses by approximately 37.66%. To facilitate further research in this area, the code and pre-trained models for Whispering LLaMA have been made open source and are available at https://github.com/Srijith-rkr/Whispering-LLaMA.',
"LLaMA Rider is a paper that proposes a method to encourage Large Language Models (LLMs) to explore and learn in an open world environment. The paper addresses the uncertainty surrounding the capacity of LLMs to continuously acquire knowledge and adapt to the real world. \n\nThe proposed approach utilizes a multi-round feedback revision mechanism, where LLMs actively select appropriate revision actions based on feedback from the environment. This encourages exploration and enhances the model's performance. Additionally, sub-task relabeling is integrated to help LLMs maintain consistency in sub-task planning and understand the combinatorial nature between tasks.\n\nThe evaluation of LLaMA Rider is conducted in Minecraft, an open-ended sandbox world. The results demonstrate that the approach improves the model's efficiency in exploring the environment and enhances its ability to accomplish a wider range of tasks. This is achieved with minimal training costs, as only 1.3k instances of collected data are used, compared to the baseline approach using reinforcement learning.",
"The paper proposes a method called LITE (Layer Instruction Tuning with Explicit Losses) to accelerate the inference of Large Language Models (LLMs). LLMs have achieved impressive results in natural language processing tasks but suffer from slow and computationally expensive inference due to their large size. The authors address this challenge by introducing explicit losses at intermediate layers of the model, allowing these layers to be fine-tuned for generation ability without affecting the final layer's performance.\n\nTo improve efficiency without compromising quality, the authors employ dynamic confidence-based early exiting at the token level, which allows the model to exit early from intermediate layers if it has high confidence in generating the correct output. They evaluate their approach on the Alpaca dataset and conduct comprehensive experiments, comparing their method with the baseline LLaMA 2 models. The results show consistent and substantial improvements in inference computation cost (37.86% for the 7B model and 46.35% for the 13B model) while maintaining the quality of the model's responses.\n\nAdditionally, the authors analyze the results from various perspectives, including comparing the semantic similarity of the model's outputs and examining the efficiency improvements by comparing the number of tokens generated in the output. This thorough analysis provides further insights into the effectiveness of their proposed method.\n\nOverall, the paper contributes to enhancing the efficiency of LLM inference while preserving generation quality, which is crucial for making these models more widely adopted in practical applications.",
"This article explores the robustness of safety training in language models, specifically focusing on the Llama 2 Chat models. The authors investigate how subversive fine tuning of public model weights can undermine the safety measures put in place during training.\n\nUsing the low rank adaptation (LoRA) method, the researchers successfully undo the safety training of Llama 2 Chat models of various sizes (7B, 13B, and 70B) with a limited budget and computing resources. They demonstrate that their fine tuning technique significantly reduces the model's refusal rate to follow harmful instructions.\n\nDespite this reduction in safety adherence, the fine tuned models still retain their general performance when compared to the original Llama 2 Chat models on two benchmarks.\n\nThe researchers present examples of harmful outputs generated by their models, highlighting the potential risks associated with subversive fine tuning. They argue that evaluating the risks posed by fine tuning should be an integral part of risk assessments before releasing model weights.\n\nThe study raises concerns about the ability of attackers to manipulate language models, suggesting that future models could pose even greater risks, such as hacking critical infrastructure or creating dangerous bio weapons.",
"The paper discusses the importance of maintaining user trust in large language models (LLMs) and their need to signal low confidence when they are incorrect. The standard approach to estimate confidence is through softmax probabilities, which are not accessible in state-of-the-art LLMs like GPT 4 and Claude v1.3 as of November 2023. \n\nThe authors first explore linguistically eliciting confidence by asking an LLM about its confidence in its answer. This method achieves reasonably good results, with an average AUC of 80.5 on GPT 4 across 12 question answering datasets, showing an improvement over a random baseline.\n\nThe paper then proposes the use of a surrogate confidence model, which utilizes probabilities from another model to evaluate the original model's confidence in a given question. Surprisingly, this approach leads to higher AUC than linguistic confidences on 9 out of 12 datasets, even though the surrogate model is often weaker.\n\nFinally, the authors combine the linguistic confidences and surrogate model probabilities, resulting in state-of-the-art confidence estimates on all 12 datasets, with an average AUC of 84.6 on GPT 4. This approach demonstrates the effectiveness of using both linguistic confidences and surrogate models to improve confidence estimation in LLMs.",
'In this study, titled "HELLaMA: LLaMA-based Table to Text Generation by Highlighting the Important Evidence Summary," we focused on improving the task of generating text from tables using large language models (LLMs). \n\nCurrently, many LLM-based methods rely on modifying prompts to access public APIs, which can lead to potential costs and information leaks. However, with the availability of open-source large models, we explored the fine-tuning of LLMs for table-to-text generation. Specifically, we conducted parameter-efficient fine-tuning on the LLaMA2 model.\n\nOur approach differs from previous methods in that we incorporated reasoning information into the LLM input by emphasizing table-specific row data. This involves two main modules: a table reasoner that identifies relevant evidence from the table and a table summarizer that generates sentences based on the highlighted table.\n\nTo train the table reasoner, we introduced a search strategy to construct reasoning labels. This facilitated the identification of relevant evidence from the table. \n\nWe evaluated our approach on two datasets: FetaQA and QTSumm. Our method achieved state-of-the-art results on both datasets. Additionally, we noticed that highlighting the input tables significantly improved the model\'s performance and provided valuable interpretability.\n\nOverall, our study demonstrates the effectiveness of fine-tuning LLMs for table-to-text generation by incorporating reasoning information and highlighting important evidence from the table.',
'The paper titled "SecureBERT and LLAMA2: Empowered Control Area Network Intrusion Detection and Classification" presents the effectiveness of transformer-based models in detecting Control Area Network (CAN) attacks. These models leverage pre-trained transformers to better understand human semantics and achieve comprehensive intrusion detection.\n\nThe authors introduce two distinct models, CAN SecureBERT and CAN LLAMA2. The CAN LLAMA2 model outperforms state-of-the-art models by achieving exceptional performance across various metrics, including balanced accuracy, precision detection rate, and F1 score. Additionally, it demonstrates a remarkably low false alarm rate of 3.10e-6, which is 52 times smaller than that of the leading model, MTH IDS (Multitiered Hybrid Intrusion Detection System).\n\nThe study highlights the potential of using Large Language Models as foundational models for cybersecurity tasks. By incorporating adapters for other cybersecurity-related tasks while maintaining the model\'s inherent language-related capabilities, the authors suggest that these models can be highly adaptable and effective for CAN intrusion detection.\n\nOverall, this paper emphasizes the significance of pre-trained transformer models, specifically CAN LLAMA2, in improving the detection and classification of CAN attacks.',
'The paper titled "Localizing Lying in Llama: Understanding Instructed Dishonesty on True False Questions Through Prompting, Probing, and Patching" investigates the phenomenon of instructed dishonesty in Llama language models. The goal of the study is to determine whether false outputs generated by Llama models are a result of a lack of knowledge or intentional dishonesty.\n\nThe researchers perform prompt engineering to identify the prompts that best induce lying behavior in Llama 2 70b chat. They then employ mechanistic interpretability techniques, such as linear probing and activation patching, to identify the specific layers in the network where lying behavior occurs. \n\nBy using causal intervention on just 46 attention heads within these identified layers, the researchers are able to modify the model\'s behavior, causing it to answer honestly instead of lying. These interventions are shown to be effective across a variety of prompts and dataset splits.\n\nThe findings of this research contribute to a better understanding of dishonesty in large language models like Llamas. This understanding can potentially be used to develop prevention strategies against dishonest behavior in these models.',
'In this research paper titled "What Do Llamas Really Think? Revealing Preference Biases in Language Model Representations," the authors investigate whether large language models (LLMs) exhibit sociodemographic biases in their representations. They propose a logistic Bradley Terry probe to predict word pair preferences of LLMs and explore if these biases are encoded in their latent representations.\n\nTo validate their probe, the authors conduct three pair preference tasks and test it on thirteen different LLMs. In comparison to the word embedding association test (WEAT), a standard approach for testing implicit association, their probe outperforms WEAT with a 27% lower error rate. Additionally, they find that word pair preferences are most accurately represented in the middle layers of the LLMs.\n\nThe authors then transfer their trained probes from harmless tasks (e.g., comparing numbers) to controversial ones (e.g., comparing ethnicities) to examine biases in nationality, politics, religion, and gender. They discover substantial bias for all target classes. For example, despite declining to answer, the Mistral model implicitly prefers Europe to Africa, Christianity to Judaism, and left-wing to right-wing politics.\n\nThese results indicate that instruction fine-tuning does not necessarily remove biases from contextualized embeddings. The authors provide a codebase for their research, which can be found at https://github.com/castorini/biasprobe.',
'Llama Guard is an LLM (Language Model) based input-output safeguard model designed for Human AI conversations. It utilizes a safety risk taxonomy to categorize different safety risks present in LLM prompts, known as prompt classification. Additionally, the model uses this taxonomy to classify the responses generated by LLMs, referred to as response classification.\n\nTo create a high-quality dataset, we have carefully compiled data for prompt and response classification. Llama Guard, a Llama2 7b model trained on this dataset, demonstrates impressive performance on established benchmarks like the OpenAI Moderation Evaluation dataset and ToxicChat. In fact, its performance matches or surpasses that of existing content moderation tools.\n\nLlama Guard functions as a language model, conducting multi-class classification and producing binary decision scores. With the instruction fine-tuning capability, the model can be customized for specific tasks and adapt the output formats accordingly. This feature allows for the adjustment of taxonomy categories to suit different use cases and enables zero-shot or few-shot prompting with diverse taxonomies as inputs.\n\nWe are providing access to the Llama Guard model weights and encourage researchers to further develop and adapt them to address the evolving AI safety needs of the community.',
"The rapid growth of e-commerce platforms has highlighted the need for advanced search and retrieval systems to enhance the user experience. One crucial aspect of this is accurately extracting product attributes from customer queries, enabling better search, comparison, and other e-commerce functionalities. However, e-commerce queries pose a unique challenge due to the decorative relationship between product types and attributes.\n\nIn this study, we propose an innovative framework that combines BERT (Bidirectional Encoder Representations from Transformers) for classification, Conditional Random Fields (CRFs) for attribute value extraction, and Large Language Models (LLMs) for data annotation. This integration significantly improves attribute recognition from customer inquiries. By leveraging BERT's robust representation learning and CRFs' sequence decoding capabilities, our approach can accurately identify and extract attribute values.\n\nTo further enhance the extraction process, we introduce a novel decorative relation correction mechanism that considers the nuanced relationships between product types and attributes present in e-commerce data. This mechanism helps refine attribute recognition by accounting for the specific decorative nature of these relationships.\n\nAdditionally, we employ LLMs to annotate additional data, expanding the model's understanding and coverage of diverse attributes. This annotation process helps the model grasp a wider range of attributes, benefiting attribute recognition performance.\n\nWe rigorously validate our methodology using various datasets, such as Walmart, BestBuy's e-commerce NER dataset, and the CoNLL dataset. The results demonstrate significant improvements in attribute recognition performance. Notably, the model's effectiveness is further highlighted through a two-month deployment in Walmart's Sponsor Product Search, where it showcased promising results.\n\nIn summary, our enhanced e-commerce attribute extraction framework, incorporating decorative relation correction and LLAMA 2.0 based annotation, offers a novel and effective solution for accurately extracting product attributes from customer queries.",
'The LLaMAntino LLaMA 2 Models are large language models that have been specifically designed to improve natural language understanding in the Italian language. These models, part of the LLaMA family, are advanced linguistic models with a high number of trainable parameters.\n\nOne key advantage of the LLaMA models is their ability to capture complex contextual relationships, which makes them highly effective in understanding natural language. In fact, they achieve similar performance to private company models like OpenAI Chat GPT. Additionally, the weights and code for the LLaMA models are publicly available for research and commercial purposes.\n\nThis work focuses on Language Adaptation for the LLaMA models, specifically addressing the challenge of language coverage in Italian. By adopting an open science approach, the researchers explore different tuning approaches to ensure high-quality text generation in Italian. The goal is to create effective text generation models with strong linguistic properties for various tasks that are challenging using traditional multilingual or general-purpose language models.\n\nOverall, this study contributes to strategies for adapting language models to Italian by introducing the innovative LLaMAntino family of Italian language models. These models are aimed at improving text generation capabilities in Italian, which is an underrepresented language in the original LLaMA datasets.',
"We present the LLAMAS sample, consisting of 603 lensed Lyman alpha emitters observed through MUSE and HST. Using 3D spectroscopic observations, we characterize the morphological properties of 268 of these emitters. We find a correlation between the spatial extent of UV continuum and Lyman alpha emission. The size of Lyman alpha haloes is linked to the galaxy's physical properties, such as star formation rate and Lyman alpha line properties. We also observe that 60% of galaxies show a significant spatial offset between UV and Lyman alpha emission. These offsets could be due to substructures in the UV component or scattering effects in the circumgalactic medium. Comparisons with simulation data indicate that star formation clumps and satellite galaxies could be responsible for these offsets.",
"Introduction: The research aimed to address the limitations in large language models (LLMs) when it comes to medical knowledge by creating a specialized language model called ChatDoctor. This model was built by adapting and refining the Meta AI LLaMA using a dataset of 100,000 patient-doctor dialogues collected from an online medical consultation platform. The conversations were anonymized to ensure privacy.\n\nModel Refinement: The large dataset of patient-doctor dialogues was used to fine-tune the LLaMA model specifically for medical advice. This process involved training the model to better understand patient needs and provide informed advice based on real-world interactions. By utilizing this approach, significant improvements were observed in the model's accuracy.\n\nSelf-Directed Information Retrieval: In addition to model refinement, a self-directed information retrieval mechanism was incorporated into ChatDoctor. This allowed the model to access real-time information from sources like Wikipedia and offline medical databases. By incorporating this capability, the model was able to provide even more accurate responses by retrieving relevant information from trusted sources.\n\nImprovement in Accuracy: With the combination of model refinement and self-directed information retrieval, ChatDoctor demonstrated a significant advancement in medical LLMs. The model exhibited a marked improvement in understanding patient inquiries and providing accurate advice. In the medical field, where accuracy and reliability are crucial, such enhancements are not only beneficial but essential.\n\nConclusion: The development of ChatDoctor, a specialized medical language model, addressed the limitations of existing LLMs in providing accurate medical advice. Through model refinement and the inclusion of self-directed information retrieval, the accuracy and reliability of ChatDoctor were significantly improved. This advancement has important implications for the medical field, where accurate information is crucial for patient care.",
'In our submission to the BabyLM challenge, we aimed to improve the efficiency of language models by training an ensemble of teachers on a small dataset called BabyLM. The ensemble consisted of a GPT 2 model and small LLaMA models.\n\nWe then used knowledge distillation to transfer the knowledge from the ensemble into a small LLaMA model with only 58M parameters. Surprisingly, this distilled model outperformed both of its teachers and even a similar model trained without distillation.\n\nThis result highlights the effectiveness of distillation in retaining and even surpassing the performance of the teacher models when they are trained on a small dataset. It also demonstrates that direct training may not always achieve the same level of performance as distillation.\n\nOverall, our work shows the potential of knowledge distillation in improving the sample efficiency of language models, which can have applications in various natural language processing tasks.',
'This research paper introduces a technique called Sorted Fine Tuning (SoFT) for enabling dynamic inference on large language models (LLMs) without the need for pretraining or multiple models. The authors extend the use of SortedNet, a training technique for deep neural networks, to generative natural language processing tasks. By sorting sub models based on computation accuracy characteristics, SoFT allows for efficient and cost-effective dynamic inference.\n\nThe authors demonstrate the application of SoFT on the LLaMa 2 13B model using the Stanford Alpaca dataset. They compare the performance of SoFT with normal tuning and early exit using the PandaLM benchmark. The results show that SoFT can deliver models that are twice as fast as the original model, while maintaining or even exceeding performance.\n\nBy unlocking the potential of intermediate layers of transformers in generating the target output, SoFT offers a way to improve the efficiency of LLMs. The sub models created through SoFT are integral components of the original model, reducing storage requirements and transition costs between different computational latency budgets.\n\nOverall, this research presents a promising approach to optimizing the use of large language models for dynamic inference in natural language processing tasks.',
'Title: An Experiment on Assisted Digital Forensics Reports Written Using Local Large Language Models\n\nSummary:\nGenerative artificial intelligence systems, particularly Large Language Models (LLMs) like ChatGPT or Llama, have made significant advancements and are proving to be valuable tools in the field of digital forensics. Previous studies have explored the potential of LLMs in investigations. However, the extent to which these models can assist in the process of writing forensic reports remains unclear. This article aims to answer this question by examining forensic reports and identifying their general structure. We then evaluate the strengths and limitations of LLMs in generating different parts of a forensic report using a case study. The findings provide insights into the automation of report writing, a critical aspect of digital forensics investigations. Ultimately, we conclude that while LLMs can offer assistance during the report writing process, they cannot currently replace human practitioners and should be used in conjunction with thorough proofreading and corrections.',
"As the race to develop new large language models continues, researchers are striving to create models that can match or surpass the language understanding and generation abilities of commercial models like GPT 3.5 or GPT 4. Several models have emerged claiming performance similar to these commercial models through different instruction tuning methods.\n\nHowever, as practitioners of Text to SQL parsing, it's important to approach these claims with scrutiny and evaluate the actual effectiveness of these models. In order to do so, we have compared six popular large language models: Dolly, LLaMA, Vicuna, Guanaco, Bard, and ChatGPT.\n\nWe systematically evaluated their Text to SQL parsing capability on nine benchmark datasets using five different prompting strategies. These strategies covered both zero shot (no training data) and few shot (limited training data) scenarios. \n\nUnfortunately, the open source models fell significantly short of the performance achieved by closed source models like GPT 3.5. This highlights the need for further work and improvement to bridge the performance gap between these models.\n\nWe are grateful for the valuable contributions made by these models to open source research, but it's clear that more research and development is needed to create large language models that can rival the performance of commercial models in Text to SQL parsing."]
##########################################################################################################################################
##########################################################################################################################################
##########################################################################################################################################
def clean_text(text):
"""Remove unwanted characters and symbols from the text."""
cleaned_text = re.sub(r"[^a-zA-Z0-9.,!?]", " ", text)
cleaned_text2 = re.sub("\n", " ", cleaned_text)
return cleaned_text2
def answer_question(question, papers_list, embeddings):
"""Answer a question based on the provided papers_list and embeddings."""
cleaned_question = clean_text(question)
messages = [{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": f"{cleaned_question}"}]
total_tokens = 0
for paper_info, embedding in zip(papers_list, embeddings):
message_length = len(f"Paper Info: {clean_text(paper_info)}\nEmbedding: {embedding}")
if total_tokens + message_length <= 4097:
messages.append({"role": "assistant", "content": f"Paper Info: {clean_text(paper_info)}\nEmbedding: {embedding}"})
total_tokens += message_length
else:
break
# Generate the answer using OpenAI's GPT-3.5 Turbo
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
max_tokens=1500
)
answer = response['choices'][0]['message']['content']
return answer
@app.route('/')
def home():
return render_template('iii.html')
@app.route('/chat', methods=['POST'])
def chat():
user_input = request.json.get('user_input', '')
answer = answer_question(user_input, papers_list, embeddings)
return jsonify({'response': answer})
if __name__ == '__main__':
app.run(debug=False)
| [
"PLACEHOLDER",
"You are a helpful assistant."
] |
2024-01-10 | ngruver/llmtime | experiments~run_simplicity_bias.py | import numpy as np
import pandas as pd
from collections import defaultdict
import matplotlib.pyplot as plt
from darts.datasets import (
AirPassengersDataset,
GasRateCO2Dataset,
MonthlyMilkDataset,
WineDataset
)
import os
import openai
openai.api_key = os.environ['OPENAI_API_KEY']
openai.api_base = os.environ.get("OPENAI_API_BASE", "https://api.openai.com/v1")
from models.llmtime import get_llmtime_predictions_data
from models.llms import nll_fns
from data.serialize import SerializerSettings
from data.synthetic import get_synthetic_datasets
datasets = get_synthetic_datasets()
# print(datasets.keys())
# print(1/0)
# data = datasets['xsin']
data = datasets['linear_cos']
train, test = data
train = train
testt = test
x = np.linspace(0, 1, len(train) + len(test))
train_x = x[:len(train)]
test_x = x[len(train):]
_train_y = train.values
test_y = test.values
# print(train_y)
# plt.plot(train_x, train_y)
# plt.show()
np.random.seed(0)
train_y = _train_y + np.random.normal(0, 0.05, len(_train_y))
# train_y = _train_y + np.linspace(2, 10, len(_train_y)) * np.random.normal(0, 1.0, len(_train_y))
# train_y = _train_y + np.linspace(1, 5, len(_train_y)) * np.random.normal(0, 1.0, len(_train_y))
# print(train_y)
# plt.plot(train_x, train_y)
# plt.show()
# print(1/0)
# # dataset = AirPassengersDataset().load().astype(np.float32)
# dataset = GasRateCO2Dataset().load().astype(np.float32)
# # dataset = MonthlyMilkDataset().load().astype(np.float32)
# # dataset = WineDataset().load().astype(np.float32)
# train, test = dataset[:-100], dataset[-100:]
# # -100 for CO2
# # train, test = dataset.split_before(pd.Timestamp("19580101"))
# x = np.linspace(0, 1, len(dataset))
# print(dataset.pd_dataframe().head())
# # train_y = train.pd_dataframe()["#Passengers"].values
# train_y = train.pd_dataframe()["CO2%"].values
# # train_y = train.pd_dataframe()["Pounds per cow"].values
# # train_y = train.pd_dataframe()["Y"].values
# train_x = x[:len(train_y)]
# # test_y = test.pd_dataframe()["#Passengers"].values
# test_y = test.pd_dataframe()["CO2%"].values
# # test_y = test.pd_dataframe()["Pounds per cow"].values
# # test_y = test.pd_dataframe()["Y"].values
# test_x = x[len(train_y):]
# all_x = np.concatenate([train_x, test_x])
# all_y = np.concatenate([train_y, test_y])
# x_mean, x_std = all_x.mean(), all_x.std()
# y_mean, y_std = all_y.mean(), all_y.std()
# # train_x = (train_x - x_mean) / x_std
# # train_y = (train_y - y_mean) / y_std
# # test_x = (test_x - x_mean) / x_std
# # test_y = (test_y - y_mean) / y_std
# train_x = train_x.reshape(-1, 1)
# train_y = train_y.reshape(-1, 1)
# test_x = test_x.reshape(-1, 1)
# test_y = test_y.reshape(-1, 1)
from pysr import PySRRegressor
model = PySRRegressor(
niterations=100, # < Increase me for better results
binary_operators=["+", "*","-","/"],
unary_operators=[
"cos",
"exp",
"sin",
# "square",
# "inv(x) = 1/x",
# ^ Custom operator (julia syntax)
],
# constraints={
# "square": 4,
# "cube": 4,
# "exp": 4,
# },
maxsize=70,
maxdepth=10,
population_size=50,
loss="loss(prediction, target) = abs(prediction - target)",
model_selection='accuracy',
# parsimony=0,
# weight_mutate_constant=0.1,
# weight_mutate_operator=0.75,
# weight_randomize=0.01
)
model.fit(
train_x.reshape(-1, 1),
train_y.reshape(-1, 1)
)
# model = PySRRegressor.from_file("/Users/nategruver/desktop/hall_of_fame_2023-10-04_114754.955.pkl")
# model = PySRRegressor.from_file("/Users/nategruver/desktop/hall_of_fame_2023-10-04_115256.922.pkl")
# model = PySRRegressor.from_file("hall_of_fame_20023-10-04_154505.764.pkl")
# model = PySRRegressor.from_file("hall_of_fame_2023-10-04_162049.705.pkl")
model = PySRRegressor.from_file("hall_of_fame_2023-10-05_133544.169.pkl")
# model = PySRRegressor.from_file("hall_of_fame_2023-10-05_145612.971.pkl")
# model = PySRRegressor.from_file("hall_of_fame_2023-10-05_170325.867.pkl")
step_size = 4
start_idx = 0
# idxs = list(range(start_idx,len(model.equations_),step_size))
idxs = [1, 5, 9, 17, 25]
fig, ax = plt.subplots(1, len(idxs), figsize=(30, 2))
results = defaultdict(list)
test_losses = []
train_losses = []
complexities = []
nlls = []
for i in range(len(idxs)):
print(i)
idx = idxs[i]
model.sympy(i)
y_train_pred = model.predict(train_x.reshape(-1, 1), index=idx)
y_prediction = model.predict(test_x.reshape(-1, 1), index=idx)
if np.any(np.isinf(y_prediction)):
print("inf")
continue
# print(y_prediction.shape)
# print(test_y.shape)
loss = np.square(y_prediction - test_y).mean()
test_losses.append(loss)
print(model.equations_.iloc[idx])#.equation)
print(f"Loss: {loss:.4f}")
train_losses.append(model.equations_.iloc[idx].loss)
complexities.append(model.equations_.iloc[idx].complexity)
results['test_loss'].append(loss)
results['train_loss'].append(model.equations_.iloc[idx].loss)
results['complexity'].append(model.equations_.iloc[idx].complexity)
results['equation'].append(model.equations_.iloc[idx].equation)
results['test_preds'].append(y_prediction)
results['train_preds'].append(y_train_pred)
ax[i].plot(train_x, train_y, color='black')
ax[i].plot(train_x, y_train_pred, color='red')
ax[i].plot(test_x, y_prediction, color='red')
ax[i].plot(test_x, test_y, color='blue')
# ax[i].set_title(f"Test loss {loss:.1f}")
# plt.show()
# plt.close()
ax[i].set_title(model.equations_.iloc[idx].equation)
# plt.plot(test_x, (y_prediction - test_y[:,0]) ** 2, color='black')
# plt.show()
# plt.close()
print(test_y)
print(y_prediction)
# nll = get_llmtime_predictions_data(
# _train_y.flatten(), #+ np.random.normal(0, 0.01, len(train_y.flatten())),
# y_prediction.flatten(), #+ np.random.normal(0, 0.01, len(y_prediction.flatten())),
# model='text-davinci-003',
# alpha=0.99,
# basic=True,
# settings= SerializerSettings(10, prec=1, signed=True),
# num_samples=0,
# )['NLL/D']
# print(nll)
# nlls.append(nll)
fig.savefig("/Users/nategruver/desktop/simplicity_bias.pdf")
plt.show()
plt.close()
nlls = [-1.2953098912349181, -1.393385559561969, -1.7726323615778958, -1.256776624112951, -1.117701657084411]
print(nlls)
results['nll'] = nlls
results['train_x'] = train_x
results['train_y'] = train_y
results['test_x'] = test_x
results['test_y'] = test_y
#save results as pickle
import pickle
with open('simplicity_bias_results.pkl', 'wb') as f:
pickle.dump(results, f)
#normalize test_losses between 0 and 1
test_losses = np.array(test_losses)
test_losses = (test_losses - test_losses.min()) / (test_losses.max() - test_losses.min())
nlls = np.array(nlls)
nlls = (nlls - nlls.min()) / (nlls.max() - nlls.min())
complexities = np.array(complexities)
complexities = (complexities - complexities.min()) / (complexities.max() - complexities.min())
train_losses = np.array(train_losses)
train_losses = (train_losses - train_losses.min()) / (train_losses.max() - train_losses.min())
fig, ax = plt.subplots(figsize=(2, 3))
ax.plot(train_losses, label='Train Loss')
ax.plot(complexities, label='Complexity')
ax.plot(test_losses, label='Test Loss')
ax.plot(nlls, label='LLM NLL')
# ax.set_ylim(0, 500)
# add two column legend above the figure
handles, labels = ax.get_legend_handles_labels()
fig.legend(
handles, labels,
ncol=2,
bbox_to_anchor=(1.0, 1.05),
)
fig.savefig("/Users/nategruver/desktop/simplicity_bias_plot.pdf", bbox_inches='tight')
plt.show()
plt.close() | [] |
2024-01-10 | ngruver/llmtime | experiments~run_synthetic.py | import os
import pickle
import openai
openai.api_key = os.environ['OPENAI_API_KEY']
openai.api_base = os.environ.get("OPENAI_API_BASE", "https://api.openai.com/v1")
from data.serialize import SerializerSettings
from models.utils import grid_iter
from models.gaussian_process import get_gp_predictions_data
from models.darts import get_TCN_predictions_data, get_NHITS_predictions_data, get_NBEATS_predictions_data
from models.llmtime import get_llmtime_predictions_data
from models.darts import get_arima_predictions_data
from data.synthetic import get_synthetic_datasets
from models.validation_likelihood_tuning import get_autotuned_predictions_data
# Specify the hyperparameter grid for each model
gpt3_hypers = dict(
model='text-davinci-003',
alpha=0.1,
basic=True,
settings= SerializerSettings(10, prec=3,signed=True)
)
gpt4_hypers = dict(
alpha=0.3,
basic=True,
temp=1.0,
top_p=0.8,
settings=SerializerSettings(base=10, prec=3, signed=True, time_sep=', ', bit_sep='', minus_sign='-')
)
llama_hypers = dict(
temp=1.0,
alpha=0.99,
beta=0.3,
basic=False,
settings=SerializerSettings(base=10, prec=3, time_sep=',', bit_sep='', plus_sign='', minus_sign='-', signed=True),
)
gp_hypers = dict(lr=[1e-2])
arima_hypers = dict(p=[12,20,30], d=[1,2], q=[0,1,2])
TCN_hypers = dict(in_len=[10, 100, 400], out_len=[1],
kernel_size=[3, 5], num_filters=[1, 3],
likelihood=['laplace', 'gaussian']
)
NHITS_hypers = dict(in_len=[10, 100, 400], out_len=[1],
layer_widths=[64, 16], num_layers=[1, 2],
likelihood=['laplace', 'gaussian']
)
NBEATS_hypers = dict(in_len=[10, 100, 400], out_len=[1], # 10 is almost always the best
layer_widths=[64, 16], num_layers=[1, 2],
likelihood=['laplace', 'gaussian']
)
model_hypers = {
'gp': gp_hypers,
'arima': arima_hypers,
'TCN': TCN_hypers,
'N-BEATS': NBEATS_hypers,
'N-HiTS': NHITS_hypers,
'text-davinci-003': {'model': 'text-davinci-003', **gpt3_hypers},
'gpt-4': {'model': 'gpt-4', **gpt4_hypers},
'llama-70b': {'model': 'llama-70b', **llama_hypers},
}
# Specify the function to get predictions for each model
model_predict_fns = {
'gp': get_gp_predictions_data,
'arima': get_arima_predictions_data,
'TCN': get_TCN_predictions_data,
'N-BEATS': get_NBEATS_predictions_data,
'N-HiTS': get_NHITS_predictions_data,
'text-davinci-003': get_llmtime_predictions_data,
'gpt-4': get_llmtime_predictions_data,
'llama-70b': get_llmtime_predictions_data,
}
def is_gpt(model):
return any([x in model for x in ['ada', 'babbage', 'curie', 'davinci', 'text-davinci-003', 'gpt-4']])
# Specify the output directory for saving results
output_dir = 'outputs/synthetic'
os.makedirs(output_dir, exist_ok=True)
datasets = get_synthetic_datasets()
for dsname,data in datasets.items():
if dsname in ['rbf_0','rbf_1','matern_0','matern_1']:
continue
train, test = data
if os.path.exists(f'{output_dir}/{dsname}.pkl'):
with open(f'{output_dir}/{dsname}.pkl','rb') as f:
out_dict = pickle.load(f)
else:
out_dict = {}
for model in ['text-davinci-003', 'gpt-4', 'arima', 'TCN']:
if model in out_dict:
print(f"Skipping {dsname} {model}")
continue
print(f"Starting {dsname} {model}")
hypers = list(grid_iter(model_hypers[model]))
parallel = True if is_gpt(model) else False
num_samples = 20 if is_gpt(model) else 100
try:
preds = get_autotuned_predictions_data(train, test, hypers, num_samples, model_predict_fns[model], verbose=False, parallel=parallel)
out_dict[model] = preds
except Exception as e:
print(f"Failed {dsname} {model}")
print(e)
continue
with open(f'{output_dir}/{dsname}.pkl','wb') as f:
pickle.dump(out_dict,f)
print(f"Finished {dsname}")
| [] |
2024-01-10 | ngruver/llmtime | experiments~run_monash.py | import os
import pickle
from data.monash import get_datasets
from data.serialize import SerializerSettings
from models.validation_likelihood_tuning import get_autotuned_predictions_data
from models.utils import grid_iter
from models.llmtime import get_llmtime_predictions_data
import numpy as np
import openai
openai.api_key = os.environ['OPENAI_API_KEY']
openai.api_base = os.environ.get("OPENAI_API_BASE", "https://api.openai.com/v1")
# Specify the hyperparameter grid for each model
gpt3_hypers = dict(
temp=0.7,
alpha=0.9,
beta=0,
basic=False,
settings=SerializerSettings(base=10, prec=3, signed=True, half_bin_correction=True),
)
llama_hypers = dict(
temp=1.0,
alpha=0.99,
beta=0.3,
basic=False,
settings=SerializerSettings(base=10, prec=3, time_sep=',', bit_sep='', plus_sign='', minus_sign='-', signed=True),
)
model_hypers = {
'text-davinci-003': {'model': 'text-davinci-003', **gpt3_hypers},
'llama-7b': {'model': 'llama-7b', **llama_hypers},
'llama-70b': {'model': 'llama-70b', **llama_hypers},
}
# Specify the function to get predictions for each model
model_predict_fns = {
'text-davinci-003': get_llmtime_predictions_data,
'llama-7b': get_llmtime_predictions_data,
'llama-70b': get_llmtime_predictions_data,
}
def is_gpt(model):
return any([x in model for x in ['ada', 'babbage', 'curie', 'davinci', 'text-davinci-003', 'gpt-4']])
# Specify the output directory for saving results
output_dir = 'outputs/monash'
os.makedirs(output_dir, exist_ok=True)
models_to_run = [
'text-davinci-003',
# 'llama-7b',
# 'llama-70b',
]
datasets_to_run = [
"weather", "covid_deaths", "solar_weekly", "tourism_monthly", "australian_electricity_demand", "pedestrian_counts",
"traffic_hourly", "hospital", "fred_md", "tourism_yearly", "tourism_quarterly", "us_births",
"nn5_weekly", "traffic_weekly", "saugeenday", "cif_2016", "bitcoin", "sunspot", "nn5_daily"
]
max_history_len = 500
datasets = get_datasets()
for dsname in datasets_to_run:
print(f"Starting {dsname}")
data = datasets[dsname]
train, test = data
train = [x[-max_history_len:] for x in train]
if os.path.exists(f'{output_dir}/{dsname}.pkl'):
with open(f'{output_dir}/{dsname}.pkl','rb') as f:
out_dict = pickle.load(f)
else:
out_dict = {}
for model in models_to_run:
if model in out_dict:
print(f"Skipping {dsname} {model}")
continue
else:
print(f"Starting {dsname} {model}")
hypers = list(grid_iter(model_hypers[model]))
parallel = True if is_gpt(model) else False
num_samples = 5
try:
preds = get_autotuned_predictions_data(train, test, hypers, num_samples, model_predict_fns[model], verbose=False, parallel=parallel)
medians = preds['median']
targets = np.array(test)
maes = np.mean(np.abs(medians - targets), axis=1) # (num_series)
preds['maes'] = maes
preds['mae'] = np.mean(maes)
out_dict[model] = preds
except Exception as e:
print(f"Failed {dsname} {model}")
print(e)
continue
with open(f'{output_dir}/{dsname}.pkl','wb') as f:
pickle.dump(out_dict,f)
print(f"Finished {dsname}")
| [] |
2024-01-10 | ngruver/llmtime | experiments~run_memorization.py | import os
import numpy as np
import matplotlib.pyplot as plt
import openai
openai.api_key = os.environ['OPENAI_API_KEY']
openai.api_base = os.environ.get("OPENAI_API_BASE", "https://api.openai.com/v1")
from data.serialize import SerializerSettings
from models.utils import grid_iter
from models.gaussian_process import get_gp_predictions_data
from models.darts import get_TCN_predictions_data, get_NHITS_predictions_data, get_NBEATS_predictions_data
from models.llmtime import get_llmtime_predictions_data
from models.darts import get_arima_predictions_data
gpt3_hypers = dict(
temp=.7,
alpha=[0.5, .7, 0.9, 0.99],
beta=[0, .15, 0.3, .5],
basic=[False],
settings=[SerializerSettings(base=10, prec=prec, signed=True,half_bin_correction=True) for prec in [2,3]],
)
gp_hypers = dict(lr=[5e-3, 1e-2, 5e-2, 1e-1])
arima_hypers = dict(p=[12,20,30], d=[1,2], q=[0,1,2])
TCN_hypers = dict(in_len=[10, 100, 400], out_len=[1],
kernel_size=[3, 5], num_filters=[1, 3],
likelihood=['laplace', 'gaussian']
)
NHITS_hypers = dict(in_len=[10, 100, 400], out_len=[1],
layer_widths=[64, 16], num_layers=[1, 2],
likelihood=['laplace', 'gaussian']
)
NBEATS_hypers = dict(in_len=[10, 100, 400], out_len=[1],
layer_widths=[64, 16], num_layers=[1, 2],
likelihood=['laplace', 'gaussian']
)
model_hypers = {
'gp': gp_hypers,
'arima': arima_hypers,
'TCN': TCN_hypers,
'N-BEATS': NBEATS_hypers,
'N-HiTS': NHITS_hypers,
'text-davinci-003': {'model': 'text-davinci-003', **gpt3_hypers},
}
model_predict_fns = {
'gp': get_gp_predictions_data,
'arima': get_arima_predictions_data,
'TCN': get_TCN_predictions_data,
'N-BEATS': get_NBEATS_predictions_data,
'N-HiTS': get_NHITS_predictions_data,
'text-davinci-003': get_llmtime_predictions_data,
}
def is_gpt(model):
return any([x in model for x in ['ada', 'babbage', 'curie', 'davinci', 'text-davinci-003']])
import pickle
import matplotlib.pyplot as plt
from data.small_context import get_memorization_datasets
from models.validation_likelihood_tuning import get_autotuned_predictions_data
output_dir = 'outputs/memorization'
os.makedirs(output_dir, exist_ok=True)
datasets = get_memorization_datasets(predict_steps=30)
for dsname,data in datasets.items():
train, test = data
if os.path.exists(f'{output_dir}/{dsname}.pkl'):
with open(f'{output_dir}/{dsname}.pkl','rb') as f:
out_dict = pickle.load(f)
else:
out_dict = {}
for model in ['text-davinci-003', 'gp', 'arima', 'N-HiTS']:
if model in out_dict and not is_gpt(model):
if out_dict[model]['samples'] is not None:
print(f"Skipping {dsname} {model}")
continue
else:
print('Using best hyper...')
hypers = [out_dict[model]['best_hyper']]
else:
print(f"Starting {dsname} {model}")
hypers = list(grid_iter(model_hypers[model]))
parallel = True if is_gpt(model) else False
num_samples = 20 if is_gpt(model) else 100
try:
preds = get_autotuned_predictions_data(train, test, hypers, num_samples, model_predict_fns[model], verbose=0, parallel=parallel)
if preds.get('NLL/D', np.inf) < np.inf:
out_dict[model] = preds
else:
print(f"Failed {dsname} {model}")
except Exception as e:
print(f"Failed {dsname} {model}")
print(e)
continue
with open(f'{output_dir}/{dsname}.pkl','wb') as f:
pickle.dump(out_dict,f)
print(f"Finished {dsname}")
| [] |
2024-01-10 | ngruver/llmtime | experiments~run_darts.py | import os
import numpy as np
import matplotlib.pyplot as plt
import pickle
import matplotlib.pyplot as plt
import pandas as pd
from data.small_context import get_datasets
from data.serialize import SerializerSettings
from models.validation_likelihood_tuning import get_autotuned_predictions_data
from models.utils import grid_iter
from models.gaussian_process import get_gp_predictions_data
from models.darts import get_TCN_predictions_data, get_NHITS_predictions_data, get_NBEATS_predictions_data
from models.llmtime import get_llmtime_predictions_data
from models.darts import get_arima_predictions_data
import openai
openai.api_key = os.environ['OPENAI_API_KEY']
openai.api_base = os.environ.get("OPENAI_API_BASE", "https://api.openai.com/v1")
# Specify the hyperparameter grid for each model
gpt3_hypers = dict(
temp=.7,
alpha=[0.5, .7, 0.9, 0.99],
beta=[0, .15, 0.3, .5],
basic=[False],
settings=[SerializerSettings(base=10, prec=prec, signed=True, half_bin_correction=True) for prec in [2,3]],
)
gpt4_hypers = dict(
alpha=0.3,
basic=True,
temp=1.0,
top_p=0.8,
settings=SerializerSettings(base=10, prec=3, signed=True, time_sep=', ', bit_sep='', minus_sign='-')
)
llama_hypers = dict(
temp=1.0,
alpha=0.99,
beta=0.3,
basic=False,
settings=SerializerSettings(base=10, prec=3, time_sep=',', bit_sep='', plus_sign='', minus_sign='-', signed=True),
)
promptcast_hypers = dict(
temp=.7,
settings=SerializerSettings(base=10, prec=0, signed=True,
time_sep=', ',
bit_sep='',
plus_sign='',
minus_sign='-',
half_bin_correction=False,
decimal_point='')
)
gp_hypers = dict(lr=[5e-3, 1e-2, 5e-2, 1e-1])
arima_hypers = dict(p=[12,20,30], d=[1,2], q=[0,1,2])
TCN_hypers = dict(in_len=[10, 100, 400], out_len=[1],
kernel_size=[3, 5], num_filters=[1, 3],
likelihood=['laplace', 'gaussian']
)
NHITS_hypers = dict(in_len=[10, 100, 400], out_len=[1],
layer_widths=[64, 16], num_layers=[1, 2],
likelihood=['laplace', 'gaussian']
)
NBEATS_hypers = dict(in_len=[10, 100, 400], out_len=[1],
layer_widths=[64, 16], num_layers=[1, 2],
likelihood=['laplace', 'gaussian']
)
model_hypers = {
'gp': gp_hypers,
'arima': arima_hypers,
'TCN': TCN_hypers,
'N-BEATS': NBEATS_hypers,
'N-HiTS': NHITS_hypers,
'text-davinci-003': {'model': 'text-davinci-003', **gpt3_hypers},
'gpt-4': {'model': 'gpt-4', **gpt4_hypers},
'llama-70b': {'model': 'llama-70b', **llama_hypers},
}
# Specify the function to get predictions for each model
model_predict_fns = {
'gp': get_gp_predictions_data,
'arima': get_arima_predictions_data,
'TCN': get_TCN_predictions_data,
'N-BEATS': get_NBEATS_predictions_data,
'N-HiTS': get_NHITS_predictions_data,
'text-davinci-003': get_llmtime_predictions_data,
'gpt-4': get_llmtime_predictions_data,
'llama-70b': get_llmtime_predictions_data,
}
def is_gpt(model):
return any([x in model for x in ['ada', 'babbage', 'curie', 'davinci', 'text-davinci-003', 'gpt-4']])
# Specify the output directory for saving results
output_dir = 'outputs/darts'
os.makedirs(output_dir, exist_ok=True)
datasets = get_datasets()
for dsname,data in datasets.items():
train, test = data
if os.path.exists(f'{output_dir}/{dsname}.pkl'):
with open(f'{output_dir}/{dsname}.pkl','rb') as f:
out_dict = pickle.load(f)
else:
out_dict = {}
# N-HiTS, TCN and N-BEATS require training and can be slow. Skip them if you want quick results.
for model in ['text-davinci-003', 'gpt-4', 'gp', 'arima', 'N-HiTS', 'TCN', 'N-BEATS']:
if model in out_dict:
print(f"Skipping {dsname} {model}")
continue
else:
print(f"Starting {dsname} {model}")
hypers = list(grid_iter(model_hypers[model]))
parallel = True if is_gpt(model) else False
num_samples = 20 if is_gpt(model) else 100
try:
preds = get_autotuned_predictions_data(train, test, hypers, num_samples, model_predict_fns[model], verbose=False, parallel=parallel)
out_dict[model] = preds
except Exception as e:
print(f"Failed {dsname} {model}")
print(e)
continue
with open(f'{output_dir}/{dsname}.pkl','wb') as f:
pickle.dump(out_dict,f)
print(f"Finished {dsname}")
| [
", "
] |
2024-01-10 | ngruver/llmtime | data~metrics.py | import numpy as np
from jax import vmap
import jax.numpy as jnp
def quantile_loss(target, pred, q):
q_pred = jnp.quantile(pred, q, axis=0)
return 2 * jnp.sum(
jnp.abs((q_pred - target) * ((target <= q_pred) * 1.0 - q))
)
def calculate_crps(target, pred, num_quantiles=20):
quantiles = jnp.linspace(0, 1.0, num_quantiles+1)[1:]
vec_quantile_loss = vmap(lambda q: quantile_loss(target, pred, q))
crps = jnp.sum(vec_quantile_loss(quantiles))
crps = crps / (jnp.sum(np.abs(target)) * len(quantiles))
return crps
import jax
from jax import grad,vmap
from .serialize import serialize_arr, SerializerSettings
import openai
def nll(input_arr, target_arr, model, settings:SerializerSettings, transform, count_seps=True, prompt=None, temp=1):
""" Returns the NLL/dimension (log base e) of the target array (continuous) according to the LM
conditioned on the input array. Applies relevant log determinant for transforms and
converts from discrete NLL of the LLM to continuous by assuming uniform within the bins.
inputs:
input_arr: (n,) context array
target_arr: (n,) ground truth array
Returns: NLL/D
"""
input_str = serialize_arr(vmap(transform)(input_arr), settings)
target_str = serialize_arr(vmap(transform)(target_arr), settings)
if prompt:
input_str = prompt + '\n' + input_str
if not input_str.endswith(settings.time_sep):
print('Appending time separator to input... Are you sure you want this?')
prompt = input_str + settings.time_sep + target_str
else:
prompt = input_str + target_str
response = openai.Completion.create(model=model, prompt=prompt, logprobs=5, max_tokens=0, echo=True, temperature=temp)
#print(response['choices'][0])
logprobs = np.array(response['choices'][0].logprobs.token_logprobs, dtype=np.float32)
tokens = np.array(response['choices'][0].logprobs.tokens)
top5logprobs = response['choices'][0].logprobs.top_logprobs
seps = tokens==settings.time_sep
target_start = np.argmax(np.cumsum(seps)==len(input_arr)) + 1
logprobs = logprobs[target_start:]
tokens = tokens[target_start:]
top5logprobs = top5logprobs[target_start:]
seps = tokens==settings.time_sep
assert len(logprobs[seps]) == len(target_arr), f'There should be one separator per target. Got {len(logprobs[seps])} separators and {len(target_arr)} targets.'
#adjust logprobs by removing extraneous and renormalizing (see appendix of paper)
# logp' = logp - log(1-pk*pextra)
allowed_tokens = [settings.bit_sep + str(i) for i in range(settings.base)]
allowed_tokens += [settings.time_sep, settings.plus_sign, settings.minus_sign, settings.bit_sep+settings.decimal_point]
allowed_tokens = {t for t in allowed_tokens if len(t) > 0}
p_extra = np.array([sum(np.exp(ll) for k,ll in top5logprobs[i].items() if not (k in allowed_tokens)) for i in range(len(top5logprobs))])
if settings.bit_sep == '':
p_extra = 0
adjusted_logprobs = logprobs - np.log(1-p_extra)
digits_bits = -adjusted_logprobs[~seps].sum()
seps_bits = -adjusted_logprobs[seps].sum()
BPD = digits_bits/len(target_arr)
if count_seps:
BPD += seps_bits/len(target_arr)
#print("BPD unadjusted:", -logprobs.sum()/len(target_arr), "BPD adjusted:", BPD)
# log p(x) = log p(token) - log bin_width = log p(token) + prec * log base
transformed_nll = BPD - settings.prec*np.log(settings.base)
avg_logdet_dydx = np.log(vmap(grad(transform))(target_arr)).mean()
return transformed_nll-avg_logdet_dydx
class Evaluator:
def __init__(self):
self.non_numerical_cols = [
"serialized_history",
"serialized_target",
"serialized_prediction",
"history_len",
"num_channels",
"example_num",
"sample_num",
]
def evaluate_df(self, gt_df, pred_df):
cols = [c for c in gt_df.columns if c not in self.non_numerical_cols]
num_channels = gt_df["num_channels"].iloc[0]
history_len = gt_df["history_len"].iloc[0]
gt_vals = gt_df[cols].to_numpy().reshape(len(gt_df), -1, num_channels) # (num_examples, history_len + target_len, num_channels)
gt_vals = gt_vals[:, history_len:, :] # (num_examples, target_len, num_channels)
cols = [c for c in pred_df.columns if c not in self.non_numerical_cols]
num_channels = pred_df["num_channels"].iloc[0]
pred_df = pred_df[cols + ["example_num"]]
all_pred_vals = []
for example_num in sorted(pred_df["example_num"].unique()):
pred_vals = pred_df[pred_df["example_num"] == example_num][cols].to_numpy() # (num_samples, target_len * num_channels)
pred_vals = pred_vals.reshape(pred_vals.shape[0], -1, num_channels) # (num_samples, target_len, num_channels)
all_pred_vals.append(pred_vals)
pred_vals = np.stack(all_pred_vals, axis=1) # (num_samples, num_examples, target_len, num_channels)
assert gt_vals.shape == pred_vals.shape[1:]
diff = (gt_vals[None] - pred_vals) # (num_samples, num_examples, target_len, num_channels)
mse = np.mean(diff**2)
mae = np.mean(np.abs(diff))
crps = calculate_crps(gt_vals, pred_vals)
return {
"mse": mse,
"mae": mae,
"crps": crps,
}
def evaluate(self, gt, pred):
'''
gt: (batch_size, steps)
pred: (batch_size, num_samples, steps)
'''
assert gt.shape == (pred.shape[0], pred.shape[2]), f"wrong shapes: gt.shape: {gt.shape}, pred.shape: {pred.shape}"
diff = (gt[:, None, :] - pred) # (batch_size, num_samples, steps)
mse = np.mean(diff**2)
mae = np.mean(np.abs(diff))
std = np.std(gt, axis=1) + 1e-8 # (batch_size,)
normlized_diff = diff / std[:, None, None] # (batch_size, num_samples, steps)
nmse = np.mean(normlized_diff**2)
nmae = np.mean(np.abs(normlized_diff))
return {
"nmse": nmse,
"nmae": nmae,
"mse": mse,
"mae": mae,
} | [
"PLACEHOLDERPLACEHOLDER"
] |
2024-01-10 | alexZ7000/DevQuiz | Src~Menu.py | from tkinter import *
from tkinter.messagebox import *
import smtplib
import email.message
import pygame.mixer_music
import os
from dotenv import load_dotenv
import openai
from time import sleep
import ConexaoComBancoDeDados as bd
from chatgpt import criar_pergunta
from chatgpt import responder_pergunta
root = Tk() # Janela
root.title("DevQuiz") # Título da janela
x, y = (root.winfo_screenwidth()), (root.winfo_screenheight()) # Pega a resolução do monitor sem considerar a escala
root.geometry(f'{x}x{y}') # Dimensão da tela
root.minsize(1910, 1070) # Resolução mínima para redimencionalizar
root.maxsize(1920, 1080) # Forçar resolução Full HD (1920x1080) do DevQuiz
root.attributes("-fullscreen", 1) # Colocar o DevQuiz em tela cheia
frame = Frame(root)
class Menu:
"""Classe que irá definir todos os itens que todos os "Menu" vão usar"""
def __init__(self):
"""Criação da minha tela"""
self.theme_txt = None
self.dev_system = None
self.escuro = None
self.claro = None
self.frame = Frame(root, bg="#ccccff")
self._build_screen()
c = 0
def change_theme(self):
""" Função para mudar de tema "Claro" para tema "Escuro"
:param self: Menu
:returns: Não retorna nada"""
self.claro = "#ccccff"
self.escuro = "#1D1D66"
if self.c % 2 == 0:
self.frame.config(bg=self.claro)
return "#ccccff"
else:
self.frame.config(bg=self.escuro)
return "#1D1D66"
def set_dev_system(self, dev_system):
""" Função para colocar os objetos referenciados no "DevSystem" em todas as Classes que herdarem de "Menu".
:param dev_system: Pegar referencias
:returns: Não retorna nada
"""
self.dev_system = dev_system
def show(self):
""" Função para mostrar todos os widgets que forem "self.frame" """
self.frame.pack(fill=BOTH, expand=True)
def hide(self):
""" Função para esconder widgets que não
serão mais usados em uma tela nova e para
excluir caracteres inseridos nos "Entry" """
self.frame.forget()
self.reset_entry()
def _build_screen(self):
"""Função para construir minha tela, mas eu não preciso
construir nenhuma tela em menu, essa função deve ser ignorada"""
pass
def reset_entry(self):
"""Função para limpar os caracteres inseridos no "Entry" """
pass
| [] |
2024-01-10 | SNUtilab/amore | submodule~LDA_handling.py | # -*- coding: utf-8 -*-
"""
Created on Sun Nov 21 02:59:32 2021
@author: tkdgu
"""
from gensim.models.ldamulticore import LdaMulticore
from gensim.models import CoherenceModel
import numpy as np
import pandas as pd
def get_topic_doc(lda_model, corpus) :
topic_doc_df = pd.DataFrame(columns = range(0, lda_model.num_topics))
for corp in corpus :
temp = lda_model.get_document_topics(corp)
DICT = {}
for tup in temp :
DICT[tup[0]] = tup[1]
topic_doc_df = topic_doc_df.append(DICT, ignore_index=1)
topic_doc_df = np.array(topic_doc_df)
topic_doc_df = np.nan_to_num(topic_doc_df)
return(topic_doc_df)
def get_topic_word_matrix(lda_model) :
topic_word_df = pd.DataFrame()
for i in range(0, lda_model.num_topics) :
temp = lda_model.show_topic(i, 1000)
DICT = {}
for tup in temp :
DICT[tup[0]] = tup[1]
topic_word_df = topic_word_df.append(DICT, ignore_index =1)
topic_word_df = topic_word_df.transpose()
return(topic_word_df)
def get_topic_topword_matrix(lda_model, num_word) :
topic_word_df = pd.DataFrame()
for i in range(0, lda_model.num_topics) :
temp = lda_model.show_topic(i, num_word)
temp = [i[0] for i in temp]
DICT = dict(enumerate(temp))
topic_word_df = topic_word_df.append(DICT, ignore_index =1)
topic_word_df = topic_word_df.transpose()
return(topic_word_df)
def cosine(u, v):
return (np.dot(u, v) / (np.linalg.norm(u) * np.linalg.norm(v)))
def get_CPC_topic_matrix(encoded_CPC, encoded_topic) :
CPC_topic_matrix = pd.DataFrame(columns = range(0, encoded_topic.shape[0]), index = encoded_CPC.keys())
for topic in range(0, encoded_topic.shape[0]) :
for cpc in encoded_CPC.keys() :
cpc_embedding = encoded_CPC[cpc]
sim = cosine(encoded_topic[topic], cpc_embedding)
CPC_topic_matrix[topic][cpc] = sim
return CPC_topic_matrix
def get_topic_novelty(CPC_topic_matrix) :
result_dict = {}
for topic, max_value in enumerate(CPC_topic_matrix.max()) :
result_dict[topic] = 1/max_value
return(result_dict)
def classifying_topic(CPC_topic_matrix, standard) :
result_dict = {}
for topic, max_value in enumerate(CPC_topic_matrix.max()) :
if max_value <= standard :
result_dict[topic] = 'Novel'
else :
result_dict[topic] = 'Common'
return(result_dict)
def get_topic_vol(lda_model, corpus) :
topic_doc_df = pd.DataFrame(columns = range(0, lda_model.num_topics))
for corp in corpus :
temp = lda_model.get_document_topics(corp)
DICT = {}
for tup in temp :
DICT[tup[0]] = tup[1]
topic_doc_df = topic_doc_df.append(DICT, ignore_index=1)
result = topic_doc_df.apply(np.sum).to_dict()
return(result)
def get_topic_vol_time(lda_model, topic_doc_df, data_sample, time) :
topic_doc_df = pd.DataFrame(topic_doc_df)
topic_doc_df['time'] = data_sample[time]
topic_time_df = pd.DataFrame()
for col in range(0, lda_model.num_topics) :
grouped = topic_doc_df[col].groupby(topic_doc_df['time'])
print(grouped)
DICT = grouped.sum()
topic_time_df = topic_time_df.append(DICT, ignore_index=1)
topic_time_df = topic_time_df.transpose()
topic_time_df.index = topic_time_df.index.astype(int)
topic_time_df = topic_time_df.sort_index()
return(topic_time_df)
def get_topic_weight_time(lda_model, topic_doc_df, data_sample, time, weight, by = 'sum') :
topic_doc_df = pd.DataFrame(topic_doc_df)
topic_doc_df = topic_doc_df * data_sample[weight]
topic_doc_df['time'] = data_sample[time]
topic_time_df = pd.DataFrame()
for col in range(0, lda_model.num_topics) :
grouped = topic_doc_df[col].groupby(topic_doc_df[time])
if by == 'sum' :
DICT = grouped.sum()
if by == 'mean' :
DICT = grouped.mean()
topic_time_df = topic_time_df.append(DICT, ignore_index=1)
topic_time_df = topic_time_df.transpose()
topic_time_df.index = topic_time_df.index.astype(int)
topic_time_df = topic_time_df.sort_index()
return(topic_time_df)
def get_topic_CAGR(topic_time_df) :
st_time = min(topic_time_df.index)
ed_time = 2021 # 2020 fix
duration = int(ed_time) - int(st_time)
result = {}
for col in topic_time_df :
st_val = topic_time_df[col][0]
ed_val = topic_time_df[col][duration]
CAGR = (ed_val/st_val)**(1/duration) -1
result[col] = CAGR
return(result)
def get_topic2CPC(CPC_topic_matrix) :
result_dict = {}
for col in CPC_topic_matrix.columns :
result_dict[col] = pd.to_numeric(CPC_topic_matrix[col]).idxmax()
return(result_dict)
def get_most_similar_doc2topic(data_sample, topic_doc_df, top_n = 5, title = 'title', date = 'date') :
result_df = pd.DataFrame()
title = title
for col in range(topic_doc_df.shape[1]) :
DICT = {}
# idx = np.argmax(topic_doc_df[:,col])
# value = np.max(topic_doc_df[:,col])
for n in range(1, top_n+1) :
idx = topic_doc_df.argsort(axis = 0)[-n][col]
DICT['topic'] = col
DICT['rank'] = n
DICT['title'] = data_sample[title][idx]
DICT['date'] = data_sample[date][idx]
DICT['similarity'] = topic_doc_df[idx,col]
result_df = result_df.append(DICT, ignore_index=1)
return(result_df) | [] |
2024-01-10 | SNUtilab/amore | submodule~LDA_tunning.py | # -*- coding: utf-8 -*-
"""
Created on Sun Nov 21 02:59:32 2021
@author: tkdgu
"""
from gensim.models.ldamulticore import LdaMulticore
from gensim.models import CoherenceModel
from gensim.corpora import Dictionary
import pandas as pd
import numpy as np
def compute_coherence_values(corpus, dictionary, texts, k, a, b, method = "u_mass"):
result = {}
lda_model = LdaMulticore(corpus=corpus,
id2word=dictionary,
num_topics=k,
random_state=100,
chunksize=100,
passes=10,
alpha=a,
eta=b,
)
result['perplexity'] = lda_model.log_perplexity(corpus)
for method in ['u_mass', 'c_v', 'c_uci', 'c_npmi'] :
coherence_model_lda = CoherenceModel(model=lda_model,
texts=texts,
dictionary=dictionary,
coherence= method)
result[method] = coherence_model_lda.get_coherence()
return result
def tunning(texts, dct, corpus, START, END, STEP) :
grid = {}
grid['Validation_Set'] = {}
# Topics range #수정
min_topics = START
max_topics = END
step_size = STEP
topics_range = range(min_topics, max_topics, step_size)
# Alpha parameter
# alpha = list(np.arange(0.01, 1, 0.3))
alpha = [0.01, 0.1, 1]
# alpha.append('symmetric')
# alpha.append('asymmetric')
# Beta parameter
# beta = list(np.arange(0.01, 1, 0.3))
beta = [0.01, 0.1, 1]
# beta.append('symmetric')
# Validation sets
# num_of_docs = len(corpus)
corpus_sets = [# gensim.utils.ClippedCorpus(corpus, num_of_docs*0.25),
# gensim.utils.ClippedCorpus(corpus, num_of_docs*0.5),
# gensim.utils.ClippedCorpus(corpus, num_of_docs*0.75),
corpus]
corpus_title = ['100% Corpus']
model_results = {'Validation_Set': [],
'Topics': [],
'Alpha': [],
'Beta': [],
'Perplexity': [],
'U_mass' : [],
'C_v' : [],
'C_uci' : [],
'C_npmi' : [],
}
# Can take a long time to run
if 1 == 1:
cnt = 0
# iterate through validation corpuses
for i in range(len(corpus_sets)):
# iterate through number of topics
for k in topics_range:
# iterate through alpha values
for a in alpha:
# iterare through beta values
for b in beta:
# get the coherence score for the given parameters
result = compute_coherence_values(corpus=corpus_sets[i],
dictionary=dct,
texts = texts,
k=k,
a=a,
b=b)
# Save the model results
model_results['Validation_Set'].append(corpus_title[i])
model_results['Topics'].append(k)
model_results['Alpha'].append(a)
model_results['Beta'].append(b)
model_results['Perplexity'].append(result['perplexity'])
model_results['U_mass'].append(result['u_mass'])
model_results['C_v'].append(result['c_v'])
model_results['C_uci'].append(result['c_uci'])
model_results['C_npmi'].append(result['c_npmi'])
cnt +=1
print("전체 {} 중에서 {} ".format(len(alpha) *len(beta) *len(topics_range),cnt))
return(pd.DataFrame(model_results))
def lda_model(corpus, dct, Topics, Alpha, Beta) :
lda_model = LdaMulticore(corpus=corpus,
id2word=dct,
num_topics= Topics,
random_state=100,
chunksize=100,
passes=10,
alpha= Alpha,
eta= Beta,
)
return(lda_model)
def model_by_tunning(tunning_results, corpus, dct) :
index = tunning_results['Coherence'].idxmax()
Alpha = round(float(tunning_results['Alpha'][index]), 2)
Beta = round(float(tunning_results['Beta'][index]), 2)
Topics = tunning_results['Topics'][index]
lda_model = LdaMulticore(corpus=corpus,
id2word=dct,
num_topics= Topics,
random_state=100,
chunksize=100,
passes=10,
alpha= Alpha,
eta= Beta,
)
return(lda_model)
class LDA_obj() :
def __init__(self, texts, n_topics, alpha, beta, keyword_dct):
# document embedding, ready to LDA
self.texts = texts
self.keyword_dct = keyword_dct
# self.keyword_dct.filter_extremes(no_below = 20)
self.keyword_list = list(self.keyword_dct.token2id.keys())
self.corpus = [self.keyword_dct.doc2bow(text) for text in self.texts]
# encoded_keyword = embedding.keyword_embedding(keyword_list)
self.texts = [[k for k in doc if k in self.keyword_list] for doc in self.texts]
self.docs = [" ".join(i) for i in self.texts]
self.model = lda_model(self.corpus, self.keyword_dct, n_topics, alpha, beta)
| [] |
2024-01-10 | SNUtilab/amore | amore~submodule~LDA_tunning.py | # -*- coding: utf-8 -*-
"""
Created on Sun Nov 21 02:59:32 2021
@author: tkdgu
"""
from gensim.models.ldamulticore import LdaMulticore
from gensim.models import CoherenceModel
from gensim.corpora import Dictionary
import pandas as pd
import numpy as np
def compute_coherence_values(corpus, dictionary, texts, k, a, b, method = "u_mass"):
result = {}
lda_model = LdaMulticore(corpus=corpus,
id2word=dictionary,
num_topics=k,
random_state=100,
chunksize=100,
passes=10,
alpha=a,
eta=b,
)
result['perplexity'] = lda_model.log_perplexity(corpus)
for method in ['u_mass', 'c_v', 'c_uci', 'c_npmi'] :
coherence_model_lda = CoherenceModel(model=lda_model,
texts=texts,
dictionary=dictionary,
coherence= method)
result[method] = coherence_model_lda.get_coherence()
return result
def tunning(texts, dct, corpus, START, END, STEP) :
grid = {}
grid['Validation_Set'] = {}
# Topics range #수정
min_topics = START
max_topics = END
step_size = STEP
topics_range = range(min_topics, max_topics, step_size)
# Alpha parameter
# alpha = list(np.arange(0.01, 1, 0.3))
alpha = [0.01, 0.1, 0.5, 1]
alpha.append('symmetric')
alpha.append('asymmetric')
# Beta parameter
# beta = list(np.arange(0.01, 1, 0.3))
beta = [0.01, 0.1, 0.5, 1]
beta.append('symmetric')
# Validation sets
# num_of_docs = len(corpus)
corpus_sets = [# gensim.utils.ClippedCorpus(corpus, num_of_docs*0.25),
# gensim.utils.ClippedCorpus(corpus, num_of_docs*0.5),
# gensim.utils.ClippedCorpus(corpus, num_of_docs*0.75),
corpus]
corpus_title = ['100% Corpus']
model_results = {'Validation_Set': [],
'Topics': [],
'Alpha': [],
'Beta': [],
'Perplexity': [],
'U_mass' : [],
'C_v' : [],
'C_uci' : [],
'C_npmi' : [],
}
# Can take a long time to run
if 1 == 1:
cnt = 0
# iterate through validation corpuses
for i in range(len(corpus_sets)):
# iterate through number of topics
for k in topics_range:
# iterate through alpha values
for a in alpha:
# iterare through beta values
for b in beta:
# get the coherence score for the given parameters
result = compute_coherence_values(corpus=corpus_sets[i],
dictionary=dct,
texts = texts,
k=k,
a=a,
b=b)
# Save the model results
model_results['Validation_Set'].append(corpus_title[i])
model_results['Topics'].append(k)
model_results['Alpha'].append(a)
model_results['Beta'].append(b)
model_results['Perplexity'].append(result['perplexity'])
model_results['U_mass'].append(result['u_mass'])
model_results['C_v'].append(result['c_v'])
model_results['C_uci'].append(result['c_uci'])
model_results['C_npmi'].append(result['c_npmi'])
cnt +=1
print("전체 {} 중에서 {} ".format(len(alpha) *len(beta) *len(topics_range),cnt))
return(pd.DataFrame(model_results))
def lda_model(corpus, dct, Topics, Alpha, Beta) :
lda_model = LdaMulticore(corpus=corpus,
id2word=dct,
num_topics= Topics,
random_state=100,
chunksize=100,
passes=10,
alpha= Alpha,
eta= Beta,
)
return(lda_model)
def model_by_tunning(tunning_results, corpus, dct) :
index = tunning_results['Coherence'].idxmax()
Alpha = round(float(tunning_results['Alpha'][index]), 2)
Beta = round(float(tunning_results['Beta'][index]), 2)
Topics = tunning_results['Topics'][index]
lda_model = LdaMulticore(corpus=corpus,
id2word=dct,
num_topics= Topics,
random_state=100,
chunksize=100,
passes=10,
alpha= Alpha,
eta= Beta,
)
return(lda_model)
class LDA_obj() :
def __init__(self, texts, n_topics, alpha, beta):
# document embedding, ready to LDA
self.texts = texts
self.keyword_dct = Dictionary(self.texts)
# self.keyword_dct.filter_extremes(no_below = 20)
self.keyword_list = list(self.keyword_dct.token2id.keys())
self.corpus = [self.keyword_dct.doc2bow(text) for text in self.texts]
# encoded_keyword = embedding.keyword_embedding(keyword_list)
self.texts = [[k for k in doc if k in self.keyword_list] for doc in self.texts]
self.docs = [" ".join(i) for i in self.texts]
self.model = lda_model(self.corpus, self.keyword_dct, n_topics, alpha, beta)
| [] |
2024-01-10 | lambrou/SemTerm | semterm~terminal~TerminalTool.py | from typing import Dict, Any, Union, Tuple, Sequence
from uuid import uuid4
from inspect import signature
from langchain.tools.base import BaseTool
from pydantic.decorator import validate_arguments
from semterm.terminal.SemanticTerminalManager import SemanticTerminalManager
class TerminalTool(BaseTool):
name: str = "Terminal"
description: str = (
"Executes commands in a terminal. Input should be valid commands, and the output will be any "
"output from running that command. If you are asked to do perform a task, it is likely the setup for the task "
"has not been done yet. "
"If you are unsure, use the Human tool to verify with the human that they want you to run all setup commands "
"as well. "
)
manager: SemanticTerminalManager = SemanticTerminalManager()
@property
def func(self):
return self.manager.create_process().run
@property
def args(self) -> dict:
if self.args_schema is not None:
return self.args_schema.schema()["properties"]
else:
inferred_model = validate_arguments(self.func).model
schema = inferred_model.schema()["properties"]
valid_keys = signature(self.func).parameters
return {k: schema[k] for k in valid_keys if k not in ("run_manager", "callbacks")}
def _run(self, *args: Any, **kwargs: Any) -> str:
"""Use the tool."""
return self.func(*args, **kwargs)
async def _arun(self, *args: Any, **kwargs: Any) -> str: # pragma: no cover
"""Use the tool asynchronously."""
if self.coroutine:
return await self.coroutine(*args, **kwargs)
raise NotImplementedError("Tool does not support async")
def _to_args_and_kwargs(
self, tool_input: Union[str, Dict, list[str]]
) -> Tuple[Tuple, Dict]:
"""Convert tool input to pydantic model."""
args, kwargs = self._to_args_and_kwargs_b_compat(tool_input)
# For backwards compatibility. The tool must be run with a single input
all_args = list(args) + list(kwargs.values())
if len(all_args) != 1:
raise ValueError(
f"Too many arguments to single-input tool {self.name}."
f" Args: {all_args}"
)
return tuple(all_args), {}
@staticmethod
def _to_args_and_kwargs_b_compat(
run_input: Union[str, Dict, list[str]]
) -> Tuple[Sequence, dict]:
# For backwards compatability, if run_input is a string,
# pass as a positional argument.
if isinstance(run_input, str):
return (run_input,), {}
if isinstance(run_input, list):
return [], {"command": ";".join(run_input)}
else:
return [], run_input
| [] |
2024-01-10 | lambrou/SemTerm | semterm~agent~MrklAgent.py | import os
from langchain.agents import load_tools
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationEntityMemory
from semterm.agent.TerminalAgentPrompt import PREFIX
from semterm.config.Config import Config
from semterm.agent.TerminalAgent import TerminalAgent
from semterm.agent.TerminalAgentExecutor import TerminalAgentExecutor
from semterm.terminal.TerminalOutputParser import TerminalOutputParser
from semterm.terminal.TerminalTool import TerminalTool
from semterm.terminal.SemanticTerminalManager import SemanticTerminalManager
from semterm.langchain_extensions.tools import MistakeTool, TerminalHumanTool
class MrklAgent:
def __init__(self, config: Config):
config = config.get()
self.verbose = config.getboolean("DEFAULT", "verbose")
self.max_iterations = config.getint("DEFAULT", "max_iterations")
self.timeout = config.getint("DEFAULT", "timeout")
self.print_terminal_output = config.getboolean(
"DEFAULT", "print_terminal_output"
)
self.llm = ChatOpenAI(temperature=0)
self.tools = self.load_tools()
self.memory = self.initialize_memory()
self.terminal_agent = self.initialize_agent()
self.terminal_agent_executor = self.initialize_executor()
def load_tools(self):
tools = [
TerminalTool(manager=SemanticTerminalManager()),
TerminalHumanTool(),
MistakeTool(),
]
return tools
def initialize_memory(self):
return ConversationEntityMemory(
llm=self.llm,
return_messages=True,
chat_history_key="chat_history",
)
def initialize_agent(self):
return TerminalAgent.from_llm_and_tools(
self.llm,
self.tools,
memory=self.memory,
system_message=PREFIX.format(current_directory=os.getcwd()),
output_parser=TerminalOutputParser(),
verbose=self.verbose,
)
def initialize_executor(self):
return TerminalAgentExecutor.from_agent_and_tools(
self.terminal_agent,
self.tools,
memory=self.memory,
max_iterations=self.max_iterations,
verbose=self.verbose,
)
def run(self, user_input):
return self.terminal_agent_executor.run(input=user_input)
| [] |
2024-01-10 | lambrou/SemTerm | tests~agent~test_TerminalAgentExecutor.py | from abc import ABC
import pytest
from unittest.mock import MagicMock, patch
from langchain.schema import (
AgentAction,
AgentFinish,
BaseMemory,
SystemMessage,
)
from langchain.tools import BaseTool
from pydantic.typing import NoneType
from semterm.agent.TerminalAgent import TerminalAgent
from semterm.agent.TerminalAgentExecutor import TerminalAgentExecutor
from semterm.langchain_extensions.schema import AgentMistake
class MockTool(BaseTool, ABC):
name = "mock_tool"
description = "Mock tool for testing purposes."
def _run(self, *args, **kwargs):
pass
def _arun(self):
pass
class TestTerminalAgentExecutor:
@pytest.fixture
def executor(self, mock_tools, terminal_agent):
memory = MagicMock(spec=BaseMemory)
return TerminalAgentExecutor.from_agent_and_tools(
terminal_agent,
[MockTool(name="Tool1"), MockTool(name="Tool2"), MockTool(name="Tool3")],
max_iterations=10,
verbose=True,
memory=memory,
)
@patch.object(
TerminalAgent,
"plan",
return_value=AgentFinish(
return_values={"output": "42"},
log='{"action": "Final Answer", "action_input": "42"}',
),
)
def test_take_next_step_returns_finish(self, plan_mock, executor):
# Test that _take_next_step returns AgentFinish when the output is an instance of AgentFinish
output = AgentFinish(
{"output": "42"}, '{"action": "Final Answer", "action_input": "42"}'
)
result = executor._take_next_step({}, {}, {}, [])
assert result == output
@patch.object(
TerminalAgent,
"plan",
return_value=AgentAction(tool="tool1", tool_input="input1", log="input1"),
)
@patch.object(MockTool, "run", return_value="observation1")
def test_take_next_step_returns_actions(self, run_mock, plan_mock, executor):
# Test that _take_next_step returns a list of AgentAction and observation tuples
name_to_tool_map = {"tool1": MockTool()}
color_mapping = {"tool1": "red"}
inputs = {"input1": "value1"}
intermediate_steps = []
result = executor._take_next_step(
name_to_tool_map, color_mapping, inputs, intermediate_steps
)
assert len(result) == 1
assert isinstance(result[0][0], AgentAction)
assert result[0][0].tool == "tool1"
assert result[0][0].tool_input == "input1"
assert isinstance(result[0][1], str)
assert result[0][1] == "observation1"
@patch.object(
TerminalAgent,
"plan",
return_value=AgentMistake(
log="Invalid input", tool_input="input1", tool="tool1"
),
)
def test_take_next_step_returns_mistakes(self, plan_mock, executor):
# Test that _take_next_step returns a list of AgentMistake and observation tuples
name_to_tool_map = {"tool1": MockTool()}
color_mapping = {"tool1": "red"}
inputs = {"chat_history": [SystemMessage(content="Hello")], "input": "value1"}
intermediate_steps = []
result = executor._take_next_step(
name_to_tool_map,
color_mapping,
inputs,
intermediate_steps,
)
assert len(result) == 1
assert isinstance(result[0][0], AgentMistake)
assert result[0][0].log == "Invalid input"
assert result[0][0].tool_input == "input1"
assert result[0][0].tool == "tool1"
assert isinstance(result[0][1], NoneType)
@patch.object(
TerminalAgent,
"plan",
return_value=AgentAction(
log="Unknown tool", tool_input="input1", tool="unknown_tool"
),
)
def test_take_next_step_returns_invalid_tool(self, plan_mock, executor):
# Test that _take_next_step returns a list of AgentMistake and observation tuples
name_to_tool_map = {"tool1": MockTool()}
color_mapping = {"tool1": "red"}
inputs = {
"chat_history": [SystemMessage(content="Hello")],
"input": "value1",
}
intermediate_steps = []
result = executor._take_next_step(
name_to_tool_map,
color_mapping,
inputs,
intermediate_steps,
)
assert len(result) == 1
assert isinstance(result[0][0], AgentAction)
assert result[0][0].log == "Unknown tool"
assert result[0][0].tool_input == "input1"
assert result[0][0].tool == "unknown_tool"
assert result[0][1] == "unknown_tool is not a valid tool, try another one."
@patch.object(
TerminalAgent,
"plan",
return_value=AgentAction(log="input1", tool_input="input1", tool="tool1"),
)
def test_take_next_step_returns_directly(self, plan_mock, executor):
name_to_tool_map = {"tool1": MockTool(return_direct=True)}
color_mapping = {"tool1": "green"}
inputs = {
"chat_history": [SystemMessage(content="Hello")],
"input": "value1",
}
intermediate_steps = []
result = executor._take_next_step(
name_to_tool_map,
color_mapping,
inputs,
intermediate_steps,
)
assert len(result) == 1
assert isinstance(result[0][0], AgentAction)
assert result[0][0].log == "input1"
assert result[0][0].tool_input == "input1"
assert result[0][0].tool == "tool1"
assert result[0][1] == None
| [
"Hello",
"Mock tool for testing purposes."
] |
2024-01-10 | lambrou/SemTerm | tests~agent~test_MrklAgent.py | from functools import partial
import pytest
from unittest.mock import MagicMock, patch
from langchain.agents import Agent, AgentExecutor
from langchain.base_language import BaseLanguageModel
from langchain.schema import BaseMemory
from langchain.tools import BaseTool
from semterm.agent.TerminalAgent import TerminalAgent
from semterm.agent.TerminalAgentExecutor import TerminalAgentExecutor
from semterm.config.Config import Config
from semterm.agent.MrklAgent import MrklAgent
from semterm.langchain_extensions.tools import MistakeTool
from semterm.terminal.TerminalOutputParser import TerminalOutputParser
from semterm.terminal.TerminalTool import TerminalTool
class TestMrklAgent:
@pytest.fixture
def mrkl_agent(self, monkeypatch):
config_mock = MagicMock(spec=Config)
config_parser_mock = MagicMock()
config_parser_mock.getboolean.return_value = False
config_parser_mock.getint.return_value = 10
config_mock.get.return_value = config_parser_mock
# Store the original methods as attributes of the instance
original_load_tools = MrklAgent.load_tools
original_initialize_memory = MrklAgent.initialize_memory
original_initialize_agent = MrklAgent.initialize_agent
original_initialize_executor = MrklAgent.initialize_executor
monkeypatch.setattr(MrklAgent, "load_tools", MagicMock())
monkeypatch.setattr(MrklAgent, "initialize_memory", MagicMock())
monkeypatch.setattr(MrklAgent, "initialize_agent", MagicMock())
monkeypatch.setattr(MrklAgent, "initialize_executor", MagicMock())
chat_openai_mock = MagicMock()
monkeypatch.setattr("semterm.agent.MrklAgent.ChatOpenAI", chat_openai_mock)
agent = MrklAgent(config=config_mock)
agent.original_load_tools = partial(original_load_tools, agent)
agent.original_initialize_memory = partial(original_initialize_memory, agent)
agent.original_initialize_agent = partial(original_initialize_agent, agent)
agent.original_initialize_executor = partial(
original_initialize_executor, agent
)
return agent
def test_load_tools(self, mrkl_agent, monkeypatch):
load_tools_mock = MagicMock(return_value=[MagicMock(spec=BaseTool)])
terminal_tool_mock = MagicMock(spec=TerminalTool)
mistake_tool_mock = MagicMock(spec=MistakeTool)
monkeypatch.setattr("langchain.agents.load_tools", load_tools_mock)
monkeypatch.setattr(
"semterm.terminal.TerminalTool.TerminalTool", terminal_tool_mock
)
monkeypatch.setattr(
"semterm.langchain_extensions.tools.MistakeTool", mistake_tool_mock
)
tools = mrkl_agent.original_load_tools()
assert isinstance(tools, list)
assert all(isinstance(tool, BaseTool) for tool in tools)
assert any(isinstance(tool, TerminalTool) for tool in tools)
def test_initialize_memory(self, mrkl_agent, monkeypatch):
base_language_model_mock = MagicMock(spec=BaseLanguageModel)
monkeypatch.setattr(
"langchain.base_language.BaseLanguageModel",
MagicMock(return_value=base_language_model_mock),
)
# Set the MagicMock instance as the 'llm' attribute of mrkl_agent
mrkl_agent.llm = base_language_model_mock
memory = mrkl_agent.original_initialize_memory()
assert isinstance(memory, BaseMemory)
def test_initialize_agent(self, mrkl_agent, monkeypatch):
# Mock the objects used by the method
base_language_model_mock = MagicMock(spec=BaseLanguageModel)
terminal_agent_mock = MagicMock(spec=TerminalAgent)
terminal_output_parser_mock = MagicMock(spec=TerminalOutputParser)
# Set the MagicMock instances as the attributes of mrkl_agent
mrkl_agent.llm = base_language_model_mock
mrkl_agent.tools = [MagicMock(spec=BaseTool)]
mrkl_agent.memory = MagicMock(spec=BaseMemory)
mrkl_agent.verbose = False
# Mock the constructors and methods
monkeypatch.setattr(
"langchain.base_language.BaseLanguageModel",
MagicMock(return_value=base_language_model_mock),
)
monkeypatch.setattr(
"semterm.agent.TerminalAgent.TerminalAgent.from_llm_and_tools",
MagicMock(return_value=terminal_agent_mock),
)
monkeypatch.setattr(
"semterm.terminal.TerminalOutputParser", terminal_output_parser_mock
)
agent = mrkl_agent.original_initialize_agent()
# Assert that the agent is an instance of a subclass of the Agent class
assert issubclass(agent.__class__, Agent)
def test_initialize_executor(self, mrkl_agent, monkeypatch):
# Mock the objects used by the method
terminal_agent_executor_mock = MagicMock(spec=TerminalAgentExecutor)
# Set the MagicMock instances as the attributes of mrkl_agent
mrkl_agent.terminal_agent = MagicMock(spec=TerminalAgent)
mrkl_agent.tools = [MagicMock(spec=BaseTool)]
mrkl_agent.memory = MagicMock(spec=BaseMemory)
mrkl_agent.max_iterations = 10
mrkl_agent.verbose = False
# Mock the constructors and methods
monkeypatch.setattr(
"semterm.agent.TerminalAgentExecutor.TerminalAgentExecutor.from_agent_and_tools",
MagicMock(return_value=terminal_agent_executor_mock),
)
executor = mrkl_agent.original_initialize_executor()
# Assert that the executor is an instance of a subclass of the BaseExecutor class
assert issubclass(executor.__class__, AgentExecutor)
def test_run(self, mrkl_agent):
user_input = "test_input"
mrkl_agent.terminal_agent_executor.run = MagicMock()
mrkl_agent.run(user_input)
mrkl_agent.terminal_agent_executor.run.assert_called_with(input=user_input)
| [] |
2024-01-10 | lambrou/SemTerm | tests~agent~test_TerminalAgent.py | import os
import pytest
from unittest.mock import MagicMock
from langchain.base_language import BaseLanguageModel
from langchain.prompts import SystemMessagePromptTemplate
from langchain.tools import BaseTool
from langchain.schema import (
AgentAction,
BaseMessage,
AIMessage,
SystemMessage,
BaseMemory,
)
from semterm.agent.TerminalAgent import TerminalAgent
from semterm.agent.TerminalAgentPrompt import PREFIX, SUFFIX
from semterm.terminal.TerminalOutputParser import TerminalOutputParser
class TestTerminalAgent:
def test_create_prompt(self, terminal_agent, mock_tools):
system_message = PREFIX.format(current_directory=os.getcwd())
human_message = SUFFIX
input_variables = ["input", "chat_history", "agent_scratchpad"]
prompt = TerminalAgent.create_prompt(
tools=mock_tools,
system_message=system_message,
human_message=human_message,
input_variables=input_variables,
)
# Extract properties from the returned ChatPromptTemplate
system_message_from_prompt = prompt.messages[0].format_messages()[0].content
human_message_from_prompt = (
prompt.messages[2].format_messages(input="test input")[0].content
)
# Assert that the properties have the expected values
assert system_message_from_prompt == system_message
assert all(tool.name in human_message_from_prompt for tool in mock_tools)
assert all(tool.description in human_message_from_prompt for tool in mock_tools)
assert prompt.input_variables == input_variables
def test_construct_scratchpad(self, terminal_agent):
intermediate_steps = [
(AgentAction(tool="Human", tool_input="cd ..", log="cd .."), ""),
(
AgentAction(
tool="TerminalTool", tool_input="ls", log="ls command executed"
),
"file1 file2",
),
(
AgentAction(
tool="TerminalTool",
tool_input=["cd ..", "ls"],
log="['cd ..', 'ls']",
),
"file1 file2",
),
]
scratchpad = terminal_agent._construct_scratchpad(intermediate_steps)
assert isinstance(scratchpad, list)
assert all(isinstance(msg, BaseMessage) for msg in scratchpad)
assert len(scratchpad) == 5
assert isinstance(scratchpad[0], AIMessage)
assert scratchpad[0].content == "cd .."
assert isinstance(scratchpad[1], AIMessage)
assert scratchpad[1].content == "ls command executed"
assert isinstance(scratchpad[2], SystemMessage)
assert scratchpad[2].content.startswith("Observation:")
assert isinstance(scratchpad[3], AIMessage)
assert scratchpad[3].content == "['cd ..', 'ls']"
| [
"test input"
] |
2024-01-10 | lambrou/SemTerm | tests~terminal~test_TerminalOutputParser.py | import pytest
from typing import Union
from langchain.schema import AgentAction, AgentFinish
from semterm.agent.TerminalAgentPrompt import FORMAT_INSTRUCTIONS
from semterm.langchain_extensions.schema import AgentMistake
from semterm.terminal.TerminalOutputParser import (
TerminalOutputParser,
)
class TestTerminalOutputParser:
@pytest.fixture
def parser(self):
return TerminalOutputParser()
def test_get_format_instructions(self, parser):
assert parser.get_format_instructions() == FORMAT_INSTRUCTIONS
@pytest.mark.parametrize(
"text, expected",
[
(
'{"action": "Final Answer", "action_input": "42"}',
AgentFinish(
{"output": "42"}, '{"action": "Final Answer", "action_input": "42"}'
),
),
(
'Something before {"action": "Test Action", "action_input": "test input"} and after',
AgentAction(
"Test Action",
"test input",
'Something before {"action": "Test Action", "action_input": "test input"} and after',
),
),
(
"This is a text without valid JSON",
AgentFinish(
{"output": "This is a text without valid JSON"},
"This is a text without valid JSON",
),
),
(
"{'action': 'Invalid JSON', 'action_input': thisiswrong}",
AgentMistake(
"{'action': 'Invalid JSON', 'action_input': thisiswrong}",
"{'action': 'Invalid JSON', 'action_input': thisiswrong}",
),
),
],
)
def test_parse(self, parser, text: str, expected: Union[AgentAction, AgentFinish]):
result = parser.parse(text)
assert result == expected
| [] |
2024-01-10 | lambrou/SemTerm | semterm~terminal~TerminalOutputParser.py | import json
from abc import ABC
from typing import Union
from langchain.agents.conversational_chat.output_parser import ConvoOutputParser
from langchain.schema import AgentAction, AgentFinish
from semterm.agent.TerminalAgentPrompt import FORMAT_INSTRUCTIONS
from semterm.langchain_extensions.schema import AgentMistake
class TerminalOutputParser(ConvoOutputParser, ABC):
def get_format_instructions(self) -> str:
return FORMAT_INSTRUCTIONS
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
text = text.strip().replace("\xa0", " ")
start_positions = [i for i, c in enumerate(text) if c == "{"]
end_positions = [i for i, c in enumerate(text) if c == "}"]
for start in start_positions:
for end in end_positions:
if start < end: # ensure the end position is after the start
try:
cleaned_output = text[start : end + 1]
response = json.loads(cleaned_output)
action, action_input = (
response["action"],
response["action_input"],
)
if action == "Final Answer":
return AgentFinish({"output": action_input}, text)
else:
return AgentAction(action, action_input, text)
except json.JSONDecodeError:
return AgentMistake(text, text)
# If we reach this point, no valid JSON was found in the text
return AgentFinish({"output": text}, text)
| [] |
2024-01-10 | lambrou/SemTerm | semterm~terminal~SemanticTerminalProcess.py | import getpass
import re
import signal
from typing import Any
import pexpect as pexpect
import tiktoken
from langchain.utilities import BashProcess
from langchain.text_splitter import TokenTextSplitter
class SemanticTerminalProcess(BashProcess):
model_name: str = "gpt-3.5-turbo"
chunk_size: int = 500
def __init__(self, pid, print_terminal_output=True, timeout=20):
self.print_terminal_output = print_terminal_output
self.timeout = timeout
self.pid = pid
self.prompt = pid
self.process = self._initialize_persistent_process()
self.last_command_output = ""
self.incorrect_password_attempts = 0
def _initialize_persistent_process(self) -> pexpect.spawn:
process = pexpect.spawn(
"bash",
encoding="utf-8",
)
process.expect(r"\$")
process.sendline("PS1=" + self.prompt)
process.expect_exact(self.prompt, timeout=10)
return process
@staticmethod
def _tiktoken_encoder(text: str, **kwargs: Any) -> int:
encoder = tiktoken.encoding_for_model(SemanticTerminalProcess.model_name)
return len(encoder.encode(text, **kwargs))
@staticmethod
def _get_last_n_tokens(text: str, n: int = chunk_size, overlap: int = 200) -> str:
"""Return last n tokens from the output."""
text_splitter = TokenTextSplitter(
model_name=SemanticTerminalProcess.model_name,
chunk_size=n,
chunk_overlap=overlap,
)
split_text = text_splitter.split_text(text)
last = split_text[-1]
if SemanticTerminalProcess._tiktoken_encoder(last) < n:
return last
else:
return "Truncated Output: ..." + "".join(split_text[-2:])
def process_output(self, output: str, command: str) -> str:
"""Process the output."""
return output
def _run_persistent(self, command: str) -> str:
"""Run commands and return final output."""
self.command = command
if self.process is None:
raise ValueError("Process not initialized")
print("semterm > " + command)
try:
self.process.sendline(command)
self.process.expect([command, self.prompt], timeout=self.timeout)
self.last_command_output = self._handle_stdout(command)
except Exception as e: # noqa - LLM is extremely error prone at the moment.
self.last_command_output = (
"The last command resulted in an error. Error: ",
str(e),
)
if self.print_terminal_output:
print(self.last_command_output)
return SemanticTerminalProcess._get_last_n_tokens(self.last_command_output)
def _handle_stdout(self, command):
response = self._handle_terminal_expects(command)
return self._handle_terminal_response(command, response)
def _handle_terminal_response(self, command, response):
if response == "password_request":
return self._handle_password_request(command)
if response == "incorrect_password":
if self.incorrect_password_attempts > 2:
return "Too many bad pass attempts."
self.incorrect_password_attempts += 1
return self._handle_password_request(command, self.incorrect_password_attempts)
elif response == "prompt":
return self.process.before
elif response == "EOF":
return f"Process exited with error status: " \
f"{self.process.exitstatus}"
elif response == "TIMEOUT":
return f"Timeout reached. Most recent output: " \
f"{self.process.buffer}"
def _handle_password_request(self, command, try_count=0):
try:
try_text = f"{try_count} / 3 Attempts\n" if try_count > 0 else f"\n"
signal.signal(signal.SIGINT, self.keyboard_interrupt_handler)
try:
self.process.expect_exact(':', timeout=1)
except pexpect.exceptions.TIMEOUT: # pragma: no cover
pass
self.process.sendline(
getpass.getpass(
try_text +
f"semterm is requesting your password to run the following command: {command}\n"
f"If you trust semterm, please enter your password below:\n"
f"(CTRL+C to Dismiss) Password for {getpass.getuser()}: ",
)
)
return self._handle_stdout(command)
except KeyboardInterrupt:
self.process.sendintr()
print("KeyboardInterrupt: Password not sent.")
return "User aborted password request."
finally:
signal.signal(signal.SIGINT, signal.default_int_handler)
def _handle_terminal_expects(self, command: str) -> str:
password_regex = re.compile(
r"(password for|Enter password|Password:|'s password:)", re.IGNORECASE
)
incorrect_password_regex = re.compile(
r"(?i)(?!.*attempts)(incorrect password|password incorrect|wrong password|try "
r"again|wrong|incorrect)"
)
expect_dict = {
"prompt": self.prompt,
"password_request": password_regex,
"incorrect_password": incorrect_password_regex,
"EOF": pexpect.EOF,
"TIMEOUT": pexpect.TIMEOUT,
}
list_index = self.process.expect(
list(expect_dict.values()), timeout=self.timeout
)
return list(expect_dict.keys())[list_index]
def get_most_recent_output(self):
return self.process.buffer
@staticmethod
def keyboard_interrupt_handler(sig, frame):
print("\nPassword request cancelled.")
raise KeyboardInterrupt
| [] |
2024-01-10 | lambrou/SemTerm | semterm~agent~TerminalAgent.py | import os
from abc import ABC
from typing import Sequence, Optional, List, Tuple, Any
from langchain import BasePromptTemplate
from langchain.agents import (
ConversationalChatAgent,
AgentOutputParser,
)
from langchain.tools import BaseTool
from pydantic import Field
from .TerminalAgentPrompt import (
PREFIX,
SUFFIX,
TEMPLATE_TOOL_RESPONSE,
)
from langchain.schema import (
AgentAction,
BaseOutputParser,
BaseMessage,
AIMessage,
SystemMessage,
)
from semterm.terminal.TerminalOutputParser import TerminalOutputParser
class TerminalAgent(ConversationalChatAgent, ABC):
output_parser: AgentOutputParser = Field(default_factory=TerminalOutputParser)
@classmethod
def create_prompt(
cls,
tools: Sequence[BaseTool],
system_message: str = PREFIX.format(current_directory=os.getcwd()),
human_message: str = SUFFIX,
input_variables: Optional[List[str]] = None,
output_parser: Optional[BaseOutputParser] = None,
) -> BasePromptTemplate:
return super().create_prompt(
tools=tools,
system_message=system_message,
human_message=human_message,
input_variables=input_variables,
output_parser=output_parser or cls._get_default_output_parser(),
)
def _construct_scratchpad(
self, intermediate_steps: List[Tuple[AgentAction, str]]
) -> List[BaseMessage]:
thoughts: List[BaseMessage] = []
for action, observation in intermediate_steps:
if action.tool == "Human":
thoughts.append(AIMessage(content=action.tool_input))
continue
if isinstance(action.tool_input, list):
observation = observation.replace(";".join(action.tool_input), "")
else:
observation = observation.replace(action.tool_input, "")
thoughts.append(AIMessage(content=action.log))
system_message = SystemMessage(
content=TEMPLATE_TOOL_RESPONSE.format(observation=observation)
)
thoughts.append(system_message)
return thoughts
| [] |
2024-01-10 | lambrou/SemTerm | semterm~agent~TerminalAgentExecutor.py | from abc import ABC
from typing import Dict, List, Tuple, Union, Optional
from langchain.agents import AgentExecutor
from langchain.agents.tools import InvalidTool
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.schema import AgentAction, AgentFinish
from langchain.tools import BaseTool
from semterm.langchain_extensions.schema import AgentMistake
class TerminalAgentExecutor(AgentExecutor, ABC):
def _take_next_step(
self,
name_to_tool_map: Dict[str, BaseTool],
color_mapping: Dict[str, str],
inputs: Dict[str, str],
intermediate_steps: List[Tuple[AgentAction, str]],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Union[
AgentFinish, List[Tuple[AgentAction, str]], List[Tuple[AgentMistake, str]]
]:
"""Take a single step in the thought-action-observation loop.
Override this to take control of how the agent makes and acts on choices.
"""
# Call the LLM to see what to do.
output = self.agent.plan(intermediate_steps, **inputs)
result = []
actions: List[AgentAction]
# If the tool chosen is the finishing tool, then we end and return.
if isinstance(output, AgentFinish):
return output
if isinstance(output, (AgentAction, AgentMistake)):
actions = [output]
for agent_action in actions:
if run_manager:
run_manager.on_agent_action( # pragma: no cover
agent_action,
verbose=self.verbose,
color="green",
)
# Otherwise we lookup the tool
if agent_action.tool in name_to_tool_map:
tool = name_to_tool_map[agent_action.tool]
return_direct = tool.return_direct
color = color_mapping[agent_action.tool]
tool_run_kwargs = self.agent.tool_run_logging_kwargs()
if return_direct:
tool_run_kwargs["llm_prefix"] = ""
# We then call the tool on the tool input to get an observation
observation = tool.run(
agent_action.tool_input,
verbose=self.verbose,
color=color,
**tool_run_kwargs,
)
else:
tool_run_kwargs = self.agent.tool_run_logging_kwargs()
observation = InvalidTool().run(
agent_action.tool,
verbose=self.verbose,
color=None,
**tool_run_kwargs,
)
result.append((agent_action, observation))
return result
| [] |
2024-01-10 | lambrou/SemTerm | tests~conftest.py | import os
from unittest.mock import MagicMock
import pytest
from langchain.base_language import BaseLanguageModel
from langchain.schema import BaseMemory, LLMResult, Generation
from langchain.tools import BaseTool
from semterm.agent.TerminalAgent import TerminalAgent
from semterm.agent.TerminalAgentPrompt import PREFIX
from semterm.terminal.TerminalOutputParser import TerminalOutputParser
@pytest.fixture
def mock_tools():
tools = [MagicMock(spec=BaseTool) for _ in range(3)]
# Set custom name and description for each tool
for idx, tool in enumerate(tools):
tool.name = f"Tool{idx + 1}"
tool.description = f"Tool{idx + 1} description"
return tools
@pytest.fixture
def terminal_agent(mock_tools, monkeypatch):
memory_mock = MagicMock(spec=BaseMemory)
system_message = PREFIX.format(current_directory=os.getcwd())
output_parser_mock = MagicMock(spec=TerminalOutputParser)
verbose_mock = False
def mock_generate_prompt(*args, **kwargs):
return LLMResult(generations=[[Generation(text="Hello")]])
llm_mock = MagicMock(spec=BaseLanguageModel)
llm_mock.generate_prompt = mock_generate_prompt
# Instantiate the TerminalAgent using the from_llm_and_tools method
terminal_agent_instance = TerminalAgent.from_llm_and_tools(
llm=llm_mock,
tools=mock_tools,
memory=memory_mock,
system_message=system_message,
output_parser=output_parser_mock,
verbose=verbose_mock,
)
return terminal_agent_instance
| [] |
2024-01-10 | of-one/vocode-python | apps~langchain_agent~tools~contacts.py | from typing import List
from langchain.agents import tool
CONTACTS = [{"name": "Ajay", "phone": "+15555555555"}]
@tool("get_all_contacts")
def get_all_contacts(placeholder: str) -> List[dict]:
"""Get contacts."""
return CONTACTS
| [] |
2024-01-10 | of-one/vocode-python | vocode~streaming~vector_db~base_vector_db.py | import os
from typing import Iterable, List, Optional, Tuple
import aiohttp
import openai
from langchain.docstore.document import Document
DEFAULT_OPENAI_EMBEDDING_MODEL = "text-embedding-ada-002"
class VectorDB:
def __init__(
self,
aiohttp_session: Optional[aiohttp.ClientSession] = None,
):
if aiohttp_session:
# the caller is responsible for closing the session
self.aiohttp_session = aiohttp_session
self.should_close_session_on_tear_down = False
else:
self.aiohttp_session = aiohttp.ClientSession()
self.should_close_session_on_tear_down = True
async def create_openai_embedding(
self, text, model=DEFAULT_OPENAI_EMBEDDING_MODEL
) -> List[float]:
params = {
"input": text,
}
engine = os.getenv("AZURE_OPENAI_TEXT_EMBEDDING_ENGINE")
if engine:
params["engine"] = engine
else:
params["model"] = model
return list((await openai.Embedding.acreate(**params))["data"][0]["embedding"])
async def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
namespace: Optional[str] = None,
) -> List[str]:
raise NotImplementedError
async def similarity_search_with_score(
self,
query: str,
filter: Optional[dict] = None,
namespace: Optional[str] = None,
) -> List[Tuple[Document, float]]:
raise NotImplementedError
async def tear_down(self):
if self.should_close_session_on_tear_down:
await self.aiohttp_session.close()
| [] |
2024-01-10 | emlynoregan/lungar | setcreds%20template.py | # This is the template for the setcreds.py file. Make sure it is called setcreds.py, and add your api key below.
import openai
openai.api_key = '<YOUR-API-KEY>' | [] |
2024-01-10 | emlynoregan/lungar | scene_engine.py | import setcreds
import openai
import mergedeep
import random
import copy
_diags = 0
def diag(msg, level=5):
if _diags >= level:
print(f"***{msg}")
def run_game(scenes, start_scene_id, diagnostics = False, talk_engine="davinci", question_engine="curie",
npc_talk_max_tokens=64):
global _diags
_diags = diagnostics
scene = {**scenes[start_scene_id]}
new_scene = True
history = scene.get('history') or []
turn = 0
while not scene.get("gameover"):
diag(f"turn: {turn}")
# shrink history
while len(history) > 20:
remove_ix = random.randint(0, len(history) - 1)
del history[remove_ix]
if new_scene:
diag("new scene")
turn = scene.get('turn') if not scene.get('turn') is None else turn
diag(f"turn: {turn}")
look = scene.get('look')
if look:
player_look_lines = get_player_look_lines(scene)
print("\n".join(player_look_lines))
new_scene = False
# continue here
# probably remove this section, npcs don't need actions.
npcs = scene.get('npcs') or []
if npcs:
for _, npc in npcs.items():
if npc:
actions = npc.get('actions') or []
if actions:
diag(f"npc actions")
for action_name, action in actions.items():
if action:
diag(f"action: {action_name}")
min_turn = action.get('min_turn') or 0
diag(f"min_turn: {min_turn}")
if min_turn <= turn:
if npc_action_fires(npc, action, scene, history, question_engine):
diag(f"action fires!")
to_scene_id = action.get('to_scene')
to_scene = scenes[to_scene_id]
scene = do_merge(scene, to_scene)
new_scene = True
transition_pdesc = action.get('transition_pdesc')
if transition_pdesc:
print(transition_pdesc)
transition_ndesc = action.get('transition_ndesc')
if transition_ndesc:
history.append(transition_ndesc)
break
else:
diag(f"action doesn't fire")
else:
diag(f"too early for action")
if new_scene:
break
else:
diag(f"no npc actions")
else:
diag(f"no npcs")
actions = scene.get('actions') or []
if actions:
diag(f"npc actions")
for action_name, action in actions.items():
if action:
diag(f"action: {action_name}")
min_turn = action.get('min_turn') or 0
diag(f"min_turn: {min_turn}")
if min_turn <= turn:
if npc_action_fires(npc, action, scene, history, question_engine):
diag(f"action fires!")
to_scene_id = action.get('to_scene')
to_scene = scenes[to_scene_id]
scene = do_merge(scene, to_scene)
new_scene = True
transition_pdesc = action.get('transition_pdesc')
if transition_pdesc:
print(transition_pdesc)
transition_ndesc = action.get('transition_ndesc') or action.get('transition_pdesc')
if transition_ndesc:
history.append(transition_ndesc)
break
else:
diag(f"action doesn't fire")
else:
diag(f"too early for action")
else:
diag(f"no actions")
if not new_scene:
#see if npc talks
npcs = scene.get('npcs') or []
if npcs:
for npc_name, npc in npcs.items():
if npc:
shortdesc = npc.get('shortdesc') or npc_name
talk_p = npc.get('talk_p') or 1
p = random.random()
if p <= talk_p:
diag(f"npc talks")
talk = get_npc_talk(npc, scene, history, talk_engine, npc_talk_max_tokens=npc_talk_max_tokens)
full_talk = f"{shortdesc} says:{talk}"
history.append(full_talk)
print(full_talk)
print("")
if not new_scene:
# player action
player_action = None
player_talk = None
player = scene.get('player')
while not (player_action or player_talk):
player_input = input("You say > ")
if not player_input:
player_action_lines = get_player_action_lines(player)
print("\n".join(player_action_lines))
elif player_input[0] == "/":
# this is an action
player_command = player_input[1:].lower().strip()
if player_command in ["look", "l"]:
player_look_lines = get_player_look_lines(scene)
print("\n".join(player_look_lines))
elif player_command in ["exit", "x"]:
exit()
else:
# check in player actions
if not player_command in (player.get('actions') or {}):
print(f"The command '{player_command}' is not recognized.")
else:
player_action = player.get('actions').get(player_command)
else:
# this is talking
diag(f"player talks")
player_talk = player_input
print("")
if player_action:
diag(f"player action fires!")
action_ptext = player_action.get('ptext')
action_ntext = player_action.get('ntext')
history += [action_ntext] or []
print(action_ptext)
to_scene_id = player_action.get('to_scene')
to_scene = scenes[to_scene_id]
scene = do_merge(scene, to_scene)
new_scene = True
elif player_talk:
shortdesc = player.get('nshortdesc') or "The player"
full_talk = f"{shortdesc} says: {player_input}"
history.append(full_talk)
turn += 1
def do_merge(scene, to_scene):
to_scene2 = copy.deepcopy(to_scene)
scene2 = copy.deepcopy(scene)
return mergedeep.merge(scene2, to_scene2)
def npc_action_fires(npc, npc_action, scene, history, engine):
q_and_a_lines = npc_action.get('q_and_a_lines')
answer = npc_action.get('answer')
logit_bias = npc_action.get('logit_bias')
if q_and_a_lines:
about_lines = npc.get('about_lines') if npc else []
# talk_prompt = npc.get('talk_prompt') i
prompt_lines = get_npc_look_lines(scene) + [""] + \
about_lines + [""] + \
history + [""] + q_and_a_lines
prompt = "\n".join(prompt_lines)
diag (f"npc action prompt: {prompt}", level=4)
temperature = 0
completion = openai.Completion.create(
engine=engine,
max_tokens=2,
temperature=temperature,
prompt=prompt,
frequency_penalty=0,
logprobs=5,
logit_bias=logit_bias
)
diag(f"completion: {completion}", level=4)
ai_raw_msg = completion.choices[0].text
# print(f"ai_raw_msg: {ai_raw_msg}")
ai_msg_lines = ai_raw_msg.split("\n")
ai_msg = ai_msg_lines[0]
npc_answer = ai_msg.lower().strip()
diag(f"npc answer: {npc_answer}")
return npc_answer == (answer or "yes")
else:
return True
def get_npc_talk(npc, scene, history, engine, npc_talk_max_tokens=128):
about_lines = npc.get('about_lines')
talk_lines = npc.get('talk_lines')
talk_prompt = npc.get('talk_prompt')
prompt_lines = get_npc_look_lines(scene) + [""] + \
about_lines + [""] + \
talk_lines + [""] + \
history + ["", talk_prompt]
temperature = 0.8
prompt = "\n".join(prompt_lines)
diag(f"npc talk prompt: {prompt}")
completion = openai.Completion.create(
engine=engine,
max_tokens=npc_talk_max_tokens,
temperature=temperature,
prompt=prompt,
frequency_penalty=0.1
)
ai_raw_msg = completion.choices[0].text
# print(f"ai_raw_msg: {ai_raw_msg}")
ai_msg_lines = ai_raw_msg.split("\n")
ai_msg = ai_msg_lines[0]
diag(f"ai_msg: {ai_msg}")
return ai_msg
def get_player_look_lines(scene):
lines = []
scene_desc = scene.get('pdesc')
if scene_desc:
lines.append(scene_desc)
# lines.append("")
player = scene.get('player')
if player:
# player_desc = player.get('pdesc') or "You are here."
# if player_desc:
# lines.append(player_desc)
player_items = player.get('items')
if player_items:
for _, item in player_items.items():
if item:
item_desc = item.get('desc')
if item_desc:
lines.append(f"You are holding {item_desc}.")
# lines.append("")
lines.extend(get_npc_lines(scene))
return lines
def get_npc_look_lines(scene):
lines = []
scene_desc = scene.get('ndesc')
if scene_desc:
lines.append(scene_desc)
lines.append("")
player = scene.get('player')
if player:
player_desc = player.get('ndesc')
if player_desc:
lines.append(player_desc)
player_items = player.get('items')
if player_items:
player_short_desc = player.get('nshortdesc') or "The player"
for _, item in player_items.items():
if item:
item_desc = item.get('desc')
if item_desc:
lines.append(f"{player_short_desc} is holding {item_desc}.")
lines.append("")
lines.extend(get_npc_lines(scene))
return lines
def get_player_action_lines(player):
lines = [
"[/L]ook around you",
"e[/X]it the game"
]
player_actions = player.get('actions')
if player_actions:
for action_name, action in player_actions.items():
if action:
action_desc = action.get('desc') or action_name
lines.append(action_desc)
return lines
def get_npc_lines(scene):
lines=[]
npcs = scene.get('npcs') or []
if npcs:
for npc_name, npc in npcs.items():
if npc:
npc_desc = npc.get('pdesc')
if npc_desc:
lines.append(npc_desc)
npc_items = npc.get('items')
if npc_items:
npc_short_desc = npc.get('shortdesc') or npc_name
for _, item in npc_items.items():
if item:
item_desc = item.get('desc')
if item_desc:
lines.append(f"{npc_short_desc} is holding {item_desc}.")
lines.append("")
return lines
| [
"talk_prompt",
"\n"
] |
2024-01-10 | ANR-kFLOW/event-relation-classification | GPT-3%20data%20augmentation~events_triggers_generation_by_GPT3.py | import time
import openai
import pandas as pd
# openai.api_key = os.getenv("OPENAI_API_KEY")
#
# openai.Engine.list()
# write your file name instead of jokes_prepared.jsonl
# with open("/Users/youssrarebboud/Downloads/prompt_prepared.jsonl") as f:
# response = openai.File.create(file=f, purpose='fine-tune')
# print(response)
def read_file(path):
file = pd.read_csv(path)
return file
def random_selection(file):
df_elements = file.sample(n=7)
return df_elements['sentence']
generated_sentences = []
file = read_file('/Users/youssrarebboud/Desktop/intention_left.csv')
print(len(file))
file.dropna(axis=0, how='any', subset=None, inplace=True)
file = file.drop_duplicates()
print(len(file))
file.columns = ['idx', 'sentence']
prompt_intention = "an event is A possible or actual event, which can possibly be defined by precise time and space coordinates "" intention relationship Connects an event (trigger1), with an other event (trigger 2), that is intended to cause it independetly if the result is achieved or not ""so if:The government voted a law, in the attempt of reducing unemployment.'' is an sentence that has an intention relationship between the event(voted)==(trigger1) and the event (reducing)==trigger2"" and also this sentence The company said it expects to use the proceeds to repay certain bank debt and for general corporate purposes, including establishing new operating centers and possible acquisitions, with trigger1==use and trigger2== establishing,what would be the trigger1 and trigger2 in these sentences, give me only one single word for each trigger an only two triggers per sentence, put each pair between parentheses in a separate line:"
prompt_prevention = 'an event is A possible or actual event, which can possibly be defined by precise time and space cordinates, a prevention relationship Connect an event (trigger1) with the event (trigger 2) for which is the cause of not happening. so if in this sentence Subcontractors will be offered a settlement and a swift transition to new management is expected to avert an exodus of skilled workers from Waertsilae Marine\'s two big shipyards, government officials said. is an expression with prevention relationship between settelement(trigger1) and oxodus(trigger2), what would be the trigger1 and trigger2 in these sentences, give me only one single word for each trigger an only two triggers per sentence, put each pair between parentheses in a separate line: '
prompt_enable = "a condition is The fact of having certain qualities, which may trigger events, an event is A possible or actual event, which can possibly be defined by precise time and space cordinates "" enables relationship Connects a condition or an event (trigger1), with an other event (trigger 2),it is contributing to realize as an enabling factor.""so if:the basketball player is so tall that he was scoring many times during the match'' is an sentence that has an enabling relationship between the Condition(tall)==(trigger1) and the event (scoring)==trigger2"" and also this sentence In addition, Courtaulds said the moves are logical because they will allow both the chemicals and textile businesses to focus more closely on core activities. with trigger1==moves and trigger2== focus,what would be the trigger1 and trigger2 in these sentences, give me only one single word for each trigger an only two triggers per sentence, put each pair between parentheses in a separate line:"
print(file)
#
# # Here set parameters as you like
for i, row in file.groupby(file.index // 1):
examples = row['sentence']
my_prompt = prompt_intention + ' '.join(examples)
response = openai.Completion.create(
engine="text-davinci-003",
prompt=my_prompt,
temperature=0,
max_tokens=2000,
# top_p=1,
# frequency_penalty=0.0,
# presence_penalty=0.0,
# stop=["\n"]
)
# print(response['choices'][0]['text'])
# print(response['choices'][0]['text'])
# for x in response['choices'][0]['text'].split('\n'):
# print(x)
# generated_sentences.append(x)
generated_sentences.append(response['choices'][0]['text'])
time.sleep(10)
data_frame = pd.DataFrame(generated_sentences, columns=['generated events'])
print('I am just here ')
data_frame.to_csv('left_intention_with_events2.csv')
print('saved')
# data_frame = pd.DataFrame(generated_sentences, columns=['generated sentences'])
# data_frame.to_csv('generated_event_triggers_GPT3_second_hit_intention.csv')
| [
"an event is A possible or actual event, which can possibly be defined by precise time and space coordinates intention relationship Connects an event (trigger1), with an other event (trigger 2), that is intended to cause it independetly if the result is achieved or not so if:The government voted a law, in the attempt of reducing unemployment.'' is an sentence that has an intention relationship between the event(voted)==(trigger1) and the event (reducing)==trigger2 and also this sentence The company said it expects to use the proceeds to repay certain bank debt and for general corporate purposes, including establishing new operating centers and possible acquisitions, with trigger1==use and trigger2== establishing,what would be the trigger1 and trigger2 in these sentences, give me only one single word for each trigger an only two triggers per sentence, put each pair between parentheses in a separate line:",
"prompt_intention + ' '.join(examples)",
"an event is A possible or actual event, which can possibly be defined by precise time and space cordinates, a prevention relationship Connect an event (trigger1) with the event (trigger 2) for which is the cause of not happening. so if in this sentence Subcontractors will be offered a settlement and a swift transition to new management is expected to avert an exodus of skilled workers from Waertsilae Marine's two big shipyards, government officials said. is an expression with prevention relationship between settelement(trigger1) and oxodus(trigger2), what would be the trigger1 and trigger2 in these sentences, give me only one single word for each trigger an only two triggers per sentence, put each pair between parentheses in a separate line: ",
"a condition is The fact of having certain qualities, which may trigger events, an event is A possible or actual event, which can possibly be defined by precise time and space cordinates enables relationship Connects a condition or an event (trigger1), with an other event (trigger 2),it is contributing to realize as an enabling factor.so if:the basketball player is so tall that he was scoring many times during the match'' is an sentence that has an enabling relationship between the Condition(tall)==(trigger1) and the event (scoring)==trigger2 and also this sentence In addition, Courtaulds said the moves are logical because they will allow both the chemicals and textile businesses to focus more closely on core activities. with trigger1==moves and trigger2== focus,what would be the trigger1 and trigger2 in these sentences, give me only one single word for each trigger an only two triggers per sentence, put each pair between parentheses in a separate line:",
" "
] |
2024-01-10 | ANR-kFLOW/event-relation-classification | GPT-3%20data%20augmentation~sentence_generation.py | import random
import time
import openai
import pandas as pd
def read_file(path):
file = pd.read_csv(path)
return file
enable = read_file(
'/Users/youssrarebboud/Documents/GitHub/EventRelationDataset/annotation_csv/well aligned rows Timebank/intends.csv')
rslt_df = enable[enable['annotation'] == 1]
enable_examples = set(rslt_df.sentence.values)
generated_Sentences = []
request_enable = 'a condition is The fact of having certain qualities, which may trigger events, an event is A possible or actual event, which can possibly be defined by precise time and space cordinates "" enables relationship Connects a condition or an event (trigger1), with an other event (trigger 2),it is contributing to realize as an enabling factor."" give me very long political example sentences follwong these examples and give me each sentence in one line please'
request_prevents = 'an event is A possible or actual event, which can possibly be defined by precise time and space cordinates, prevention is a relation between an event and the event for which is the cause of not happening.Example: the strike was sufficient to block the changement in working conditions. give me very long political different in topics example sentences which have the prevention relationship and give me each sentence in one line please, for example'
request_intention = 'an event is A possible or actual event, which can possibly be defined by precise time and space cordinates, Connects an Event with the effect it is intended to cause (independently if the result is achieved or not).Example: The government voted a law, in the attempt of reducing unemployment. give me very long political different in topics example sentences which have the intention relationship and give me each sentence in one line please, for example'
while len(generated_Sentences) < 80:
prompt = request_intention + ','.join(random.sample(list(enable_examples) + generated_Sentences, 5))
response = openai.Completion.create(
engine="text-davinci-003",
prompt=prompt,
temperature=0,
max_tokens=2000,
# top_p=1,
# frequency_penalty=0.0,
# presence_penalty=0.0,
# stop=["\n"]
)
print('here')
# print(response['choices'][0]['text'])
time.sleep(3)
for sen in response['choices'][0]['text'].split('\n'):
if len(sen) > 0:
generated_Sentences.append(sen)
print(sen)
generated_Sentences_df = pd.DataFrame(list(set(generated_Sentences)))
generated_Sentences_df.to_csv('/Users/youssrarebboud/Desktop/intention_left.csv')
# for x in response['choices'][0]['text'].split('\n'):
# print(x)
# generated_sentences.append(x)
# generated_sentences.append(response['choices'][0]['text'])
# response = response = bot.ask(request_enable+','.join(random.sample(list(enable_examples)+generated_Sentences,5)))
# for sen in response.split('\n'):
#
# print(sen)
#
# generated_Sentences.append(sen)
#
| [
"request_intention + ','.join(random.sample(list(enable_examples) + generated_Sentences, 5))"
] |
2024-01-10 | parallel75/Microsoft_AutoGen_Tutorial | research.py | import os
from autogen import config_list_from_json
import autogen
import requests
from bs4 import BeautifulSoup
import json
from langchain.agents import initialize_agent
from langchain.chat_models import ChatOpenAI
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.chains.summarize import load_summarize_chain
from langchain import PromptTemplate
import openai
from dotenv import load_dotenv
# 获取各个 API Key
load_dotenv()
config_list = config_list_from_json(env_or_file="OAI_CONFIG_LIST")
#具体可参见第三期视频
openai.api_key = os.getenv("OPENAI_API_KEY")
serper_api_key=os.getenv("SERPER_API_KEY")
browserless_api_key=os.getenv("BROWSERLESS_API_KEY")
# research 工具模块
#调用 Google search by Serper
def search(query):
url = "https://google.serper.dev/search"
payload = json.dumps({
"q": query
})
headers = {
'X-API-KEY': serper_api_key,
'Content-Type': 'application/json'
}
response = requests.request("POST", url, headers=headers, data=payload)
return response.json()
#抓取网站内容
def scrape(url: str):
# scrape website, and also will summarize the content based on objective if the content is too large
# objective is the original objective & task that user give to the agent, url is the url of the website to be scraped
print("Scraping website...")
# Define the headers for the request
headers = {
'Cache-Control': 'no-cache',
'Content-Type': 'application/json',
}
# Define the data to be sent in the request
data = {
"url": url
}
# Convert Python object to JSON string
data_json = json.dumps(data)
# Send the POST request
post_url = f"https://chrome.browserless.io/content?token={browserless_api_key}"
response = requests.post(
post_url, headers=headers, data=data_json)
# Check the response status code
if response.status_code == 200:
soup = BeautifulSoup(response.content, "html.parser")
text = soup.get_text()
print("CONTENT:", text)
if len(text) > 8000:
output = summary(text)
return output
else:
return text
else:
print(f"HTTP request failed with status code {response.status_code}")
#总结网站内容
def summary(content):
llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo-16k-0613")
text_splitter = RecursiveCharacterTextSplitter(
separators=["\n\n", "\n"], chunk_size=10000, chunk_overlap=500)
docs = text_splitter.create_documents([content])
map_prompt = """
Write a detailed summary of the following text for a research purpose:
"{text}"
SUMMARY:
"""
map_prompt_template = PromptTemplate(
template=map_prompt, input_variables=["text"])
summary_chain = load_summarize_chain(
llm=llm,
chain_type='map_reduce', #内容切片 防止超过 LLM 的 Token 限制
map_prompt=map_prompt_template,
combine_prompt=map_prompt_template,
verbose=True
)
output = summary_chain.run(input_documents=docs,)
return output
# 信息收集
def research(query):
llm_config_researcher = {
"functions": [
{
"name": "search",
"description": "google search for relevant information",
"parameters": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "Google search query",
}
},
"required": ["query"],
},
},
{
"name": "scrape",
"description": "Scraping website content based on url",
"parameters": {
"type": "object",
"properties": {
"url": {
"type": "string",
"description": "Website url to scrape",
}
},
"required": ["url"],
},
},
],
"config_list": config_list}
researcher = autogen.AssistantAgent(
name="researcher",
system_message="Research about a given query, collect as many information as possible, and generate detailed research results with loads of technique details with all reference links attached; Add TERMINATE to the end of the research report;",
llm_config=llm_config_researcher,
)
user_proxy = autogen.UserProxyAgent(
name="User_proxy",
code_execution_config={"last_n_messages": 2, "work_dir": "research"},
is_termination_msg=lambda x: x.get("content", "") and x.get(
"content", "").rstrip().endswith("TERMINATE"),
human_input_mode="TERMINATE",
function_map={
"search": search,
"scrape": scrape,
}
)
user_proxy.initiate_chat(researcher, message=query)
# set the receiver to be researcher, and get a summary of the research report
user_proxy.stop_reply_at_receive(researcher)
user_proxy.send(
"Give me the research report that just generated again, return ONLY the report & reference links", researcher)
# return the last message the expert received
return user_proxy.last_message()["content"]
# 编辑 配置不同的 agent 角色
def write_content(research_material, topic):
editor = autogen.AssistantAgent(
name="editor",
system_message="You are a senior editor of an AI blogger, you will define the structure of a short blog post based on material provided by the researcher, and give it to the writer to write the blog post",
llm_config={"config_list": config_list},
)
writer = autogen.AssistantAgent(
name="writer",
system_message="You are a professional AI blogger who is writing a blog post about AI, you will write a short blog post based on the structured provided by the editor, and feedback from reviewer; After 2 rounds of content iteration, add TERMINATE to the end of the message",
llm_config={"config_list": config_list},
)
reviewer = autogen.AssistantAgent(
name="reviewer",
system_message="You are a world class hash tech blog content critic, you will review & critic the written blog and provide feedback to writer.After 2 rounds of content iteration, add TERMINATE to the end of the message",
llm_config={"config_list": config_list},
)
user_proxy = autogen.UserProxyAgent(
name="admin",
system_message="A human admin. Interact with editor to discuss the structure. Actual writing needs to be approved by this admin.",
code_execution_config=False,
is_termination_msg=lambda x: x.get("content", "") and x.get(
"content", "").rstrip().endswith("TERMINATE"),
human_input_mode="TERMINATE", #终止模式
)
#创建 组
groupchat = autogen.GroupChat(
agents=[user_proxy, editor, writer, reviewer],
messages=[],
max_round=20)
manager = autogen.GroupChatManager(groupchat=groupchat)
#消息交换机制部分 重点
user_proxy.initiate_chat(
manager, message=f"Write a blog about {topic}, here are the material: {research_material}")
user_proxy.stop_reply_at_receive(manager)
user_proxy.send(
"Give me the blog that just generated again, return ONLY the blog, and add TERMINATE in the end of the message", manager)
# return the last message the expert received
return user_proxy.last_message()["content"]
# 出版
llm_config_content_assistant = {
"functions": [
{
"name": "research",
"description": "research about a given topic, return the research material including reference links",
"parameters": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "The topic to be researched about",
}
},
"required": ["query"],
},
},
{
"name": "write_content",
"description": "Write content based on the given research material & topic",
"parameters": {
"type": "object",
"properties": {
"research_material": {
"type": "string",
"description": "research material of a given topic, including reference links when available",
},
"topic": {
"type": "string",
"description": "The topic of the content",
}
},
"required": ["research_material", "topic"],
},
},
],
"config_list": config_list}
writing_assistant = autogen.AssistantAgent(
name="writing_assistant",
system_message="You are a writing assistant, you can use research function to collect latest information about a given topic, and then use write_content function to write a very well written content; Reply TERMINATE when your task is done",
llm_config=llm_config_content_assistant,
)
user_proxy = autogen.UserProxyAgent(
name="User_proxy",
human_input_mode="TERMINATE", #注意此处的模式选择
function_map={
"write_content": write_content, #调用编辑和信息 组
"research": research,
}
)
#最初 需求 启动干活
user_proxy.initiate_chat(
writing_assistant, message="write a blog about autogen multi AI agent framework")
| [
"\n Write a detailed summary of the following text for a research purpose:\n \"{text}\"\n SUMMARY:\n "
] |
2024-01-10 | airtai/fastkafkachat | fastkafkachat~_helper.py | # AUTOGENERATED! DO NOT EDIT! File to edit: ../nbs/Helper.ipynb.
# %% auto 0
__all__ = ['get_all_links_from_website', 'extract_latest_doc_urls', 'get_service_context', 'zip_index_files', 'unzip_index_files']
# %% ../nbs/Helper.ipynb 1
from pathlib import Path
from typing import *
import logging
from urllib.request import Request, urlopen
from urllib.parse import urlparse, urljoin
from urllib.error import HTTPError
import zipfile
import os
import glob
from bs4 import BeautifulSoup
from langchain.chat_models import ChatOpenAI
from llama_index import (
LLMPredictor,
ServiceContext,
)
# %% ../nbs/Helper.ipynb 3
def get_all_links_from_website(start_url: str, visited: Optional[set] = None) -> Set[str]:
"""Get a set of all links (URLs) found on the given website, starting from the given start URL.
Args:
start_url: The starting URL of the website.
visited: Optional. A set of URLs that have already been visited. Defaults to an empty set.
Returns:
A set of all links found on the website.
"""
if visited is None:
visited = set()
try:
req = Request(start_url)
# nosemgrep: python.lang.security.audit.dynamic-urllib-use-detected.dynamic-urllib-use-detected
html_page = urlopen(req) # nosec B310
soup = BeautifulSoup(html_page, "lxml")
base_url = urlparse(start_url).scheme + '://' + urlparse(start_url).hostname #type: ignore
links = set()
for link in soup.find_all('a', href=True):
url = urljoin(base_url, link['href']).split("#")[0].strip("/")
if urlparse(url).hostname == urlparse(start_url).hostname:
links.add(url)
visited.add(start_url)
for link in links:
if link not in visited:
visited |= get_all_links_from_website(link, visited)
except HTTPError as e:
logging.warning(f'Unable to parse: {e.url}')
return visited
# %% ../nbs/Helper.ipynb 5
def extract_latest_doc_urls(start_url: str, urls: List[str]) -> List[str]:
"""Extract latest documentation URLs from a list of URLs.
Args:
start_url: The URL of the documentation homepage.
urls: A list of documentation URLs to be filtered.
Returns:
A new list containing only the latest version of the documentation URLs.
"""
ret_val = []
for url in urls:
parts = url.split(f"{start_url}/docs/")
if len(parts) == 1:
ret_val.append(url)
else:
identifier = parts[1].split("/")[0]
if identifier != "next" and not identifier.replace(".", "").isdigit():
ret_val.append(url)
return ret_val
# %% ../nbs/Helper.ipynb 10
def get_service_context() -> ServiceContext:
"""Return a service context object initialized with an LLM predictor based on the gpt-3.5-turbo model
Returns:
A ServiceContext object with an LLMPredictor and a chunk size limit.
"""
llm_predictor = LLMPredictor(
llm=ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo")
)
service_context = ServiceContext.from_defaults(
llm_predictor=llm_predictor, chunk_size_limit=512
)
return service_context
# %% ../nbs/Helper.ipynb 12
def zip_index_files(data_dir_path: str) -> None:
"""Compresses all JSON index files within a folder into a ZIP archive.
Args:
data_dir_path: The path of the folder to be compressed.
"""
target_path = os.path.join(data_dir_path, 'website_index.zip')
with zipfile.ZipFile(target_path, 'w', zipfile.ZIP_DEFLATED) as zipf:
file_paths = glob.glob(os.path.join(data_dir_path, '*.json'))
for file_path in file_paths:
file_name = os.path.basename(file_path)
zipf.write(file_path, arcname=file_name)
# %% ../nbs/Helper.ipynb 14
def unzip_index_files(zip_file_path: str) -> None:
"""Decompresses a ZIP file in the same folder.
Args:
zip_file_path: The path of the ZIP file to decompress.
"""
folder_path = os.path.dirname(zip_file_path)
with zipfile.ZipFile(zip_file_path, 'r') as zipf:
zipf.extractall(folder_path)
| [] |
2024-01-10 | airtai/fastkafkachat | fastkafkachat~chat_generator.py | # AUTOGENERATED! DO NOT EDIT! File to edit: ../nbs/Chat_Generator.ipynb.
# %% auto 0
__all__ = ['DEFAULT_TEXT_QA_PROMPT_TMPL', 'TEXT_QA_TEMPLATE', 'CHAT_REFINE_PROMPT_TMPL_MSGS', 'CHAT_REFINE_PROMPT_LC',
'CHAT_REFINE_PROMPT', 'REFINE_TEMPLATE', 'router', 'GenerateChatRequest', 'generate_chat_response']
# %% ../nbs/Chat_Generator.ipynb 1
from pathlib import Path
from typing import *
from fastapi import APIRouter
from pydantic import BaseModel
from langchain.prompts.chat import (
AIMessagePromptTemplate,
ChatPromptTemplate,
HumanMessagePromptTemplate,
)
from llama_index import StorageContext, load_index_from_storage
from llama_index.prompts.chat_prompts import CHAT_REFINE_PROMPT
from llama_index.prompts.prompts import QuestionAnswerPrompt, RefinePrompt
from llama_index.response.schema import Response, StreamingResponse
from ._helper import get_service_context, unzip_index_files
# %% ../nbs/Chat_Generator.ipynb 3
DEFAULT_TEXT_QA_PROMPT_TMPL = (
"Context information is below. \n"
"---------------------\n"
"Your name is FastKafka AI, a sophisticated chatbot designed specifically for FastKafka library. Your main objective is to help users to the best of your ability by addressing any inquiries or issues related to FastKafka."
"\n---------------------\n"
"---------------------\n"
"{context_str}"
"\n---------------------\n"
"Given the context information answer the following question. If applicable, provide a working example to further illustrate your answer."
"""(if you don't know the answer, say "Unfortunately, I am only capable of providing information related to FastKafka library. Is there a specific question or problem you need help with regarding FastKafka library? Please let me know, and I'll do my best to help."): {query_str}\n"""
)
TEXT_QA_TEMPLATE = QuestionAnswerPrompt(DEFAULT_TEXT_QA_PROMPT_TMPL)
CHAT_REFINE_PROMPT_TMPL_MSGS = [
HumanMessagePromptTemplate.from_template("{query_str}"),
AIMessagePromptTemplate.from_template("{existing_answer}"),
HumanMessagePromptTemplate.from_template(
"We have the opportunity to refine the above answer "
"(only if needed) with some more context below.\n"
"------------\n"
"{context_msg}\n"
"------------\n"
"Given the new context and using the best of your knowledge, improve the existing answer. "
"If you can't improve the existing answer, just repeat it again."
),
]
CHAT_REFINE_PROMPT_LC = ChatPromptTemplate.from_messages(CHAT_REFINE_PROMPT_TMPL_MSGS)
CHAT_REFINE_PROMPT = RefinePrompt.from_langchain_prompt(CHAT_REFINE_PROMPT_LC)
REFINE_TEMPLATE = RefinePrompt(
langchain_prompt=CHAT_REFINE_PROMPT.get_langchain_prompt()
)
# %% ../nbs/Chat_Generator.ipynb 5
def _get_response_from_model(
query_str: str, data_dir: str = "./data"
) -> Union[Response, StreamingResponse]:
service_context = get_service_context()
if not all(
[
Path(f"{data_dir}/{file}").exists()
for file in ["docstore.json", "index_store.json", "vector_store.json"]
]
):
unzip_index_files(f"{data_dir}/website_index.zip")
storage_context = StorageContext.from_defaults(persist_dir=data_dir)
index = load_index_from_storage(storage_context, service_context=service_context)
query_engine = index.as_query_engine(
service_context=service_context,
similarity_top_k=3,
response_mode="compact",
text_qa_template=TEXT_QA_TEMPLATE,
refine_template=REFINE_TEMPLATE,
)
response = query_engine.query(query_str)
return response
# %% ../nbs/Chat_Generator.ipynb 8
router = APIRouter()
# %% ../nbs/Chat_Generator.ipynb 9
class GenerateChatRequest(BaseModel):
query_str: str
# %% ../nbs/Chat_Generator.ipynb 10
@router.post("/")
def generate_chat_response(
generate_chat_response_request: GenerateChatRequest,
) -> str:
model_response = _get_response_from_model(generate_chat_response_request.query_str)
return model_response.response #type: ignore
| [
"If you can't improve the existing answer, just repeat it again.",
"{existing_answer}",
"We have the opportunity to refine the above answer (only if needed) with some more context below.\n------------\n{context_msg}\n------------\nGiven the new context and using the best of your knowledge, improve the existing answer. If you can't improve the existing answer, just repeat it again.",
"{context_msg}\n",
"{query_str}",
"(only if needed) with some more context below.\n",
"Given the new context and using the best of your knowledge, improve the existing answer. ",
"We have the opportunity to refine the above answer ",
"------------\n",
"Context information is below. \n---------------------\nYour name is FastKafka AI, a sophisticated chatbot designed specifically for FastKafka library. Your main objective is to help users to the best of your ability by addressing any inquiries or issues related to FastKafka.\n---------------------\n---------------------\n{context_str}\n---------------------\nGiven the context information answer the following question. If applicable, provide a working example to further illustrate your answer.(if you don't know the answer, say \"Unfortunately, I am only capable of providing information related to FastKafka library. Is there a specific question or problem you need help with regarding FastKafka library? Please let me know, and I'll do my best to help.\"): {query_str}\n"
] |
2024-01-10 | ishaan1234/streamlitmodel | app2.py | import openai
import streamlit as st
st.title("Botanix Bot")
openai.api_key = "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"
if "openai_model" not in st.session_state:
st.session_state["openai_model"] = "gpt-3.5-turbo"
if "messages" not in st.session_state:
st.session_state.messages = []
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
if prompt := st.chat_input("What is up?"):
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response = ""
for response in openai.ChatCompletion.create(
model=st.session_state["openai_model"],
messages=[
{"role": m["role"], "content": m["content"]}
for m in st.session_state.messages
],
stream=True,
):
full_response += response.choices[0].delta.get("content", "")
message_placeholder.markdown(full_response + "▌")
message_placeholder.markdown(full_response)
st.session_state.messages.append({"role": "assistant", "content": full_response}) | [
"content"
] |
2024-01-10 | bguisard/climate-aidvocate | generate_responses~response_gen.py | from typing import List
import numpy as np
import openai
import pandas as pd
from tenacity import retry, stop_after_attempt, wait_random_exponential
TWITTER_TOKEN_LIMIT = 280
def search_material(topics: pd.DataFrame, query: str) -> pd.DataFrame:
"""
It takes a query and a dataframe of search embeddings, and returns the top n most similar documents
:param topics: the dataframe containing the search column
:type topics: pd.DataFrame with column `embedding`
:param query: the query string
:type query: str
:return: A dataframe with the top n results from the search query.
"""
embedding = get_embedding(query, engine="text-search-davinci-query-001")
topics["similarity"] = topics.embedding.apply(
lambda x: cosine_similarity(x, embedding)
)
return topics
@retry(wait=wait_random_exponential(min=1, max=20), stop=stop_after_attempt(6))
def get_embedding(text: str, engine="text-similarity-davinci-001") -> List[float]:
"""
It takes a string of text and returns embeddings for the text
:param text: The text to embed
:type text: str
:param engine: The name of the engine to use, defaults to text-similarity-davinci-001 (optional)
:return: A list of floats.
"""
# Replace newlines, which can negatively affect performance.
text = text.replace("\n", " ")
return openai.Embedding.create(input=[text], engine=engine)["data"][0]["embedding"]
def cosine_similarity(a, b):
"""
It takes two vectors, a and b, and returns the cosine of the angle between them
:param a: the first vector
:param b: the number of bits to use for the hash
:return: The cosine similarity between two vectors.
"""
return np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))
@retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(5))
def completion_with_backoff(**kwargs):
return openai.Completion.create(**kwargs)
def respond_using_topic(
text: str, topic: str, max_tokens: int = TWITTER_TOKEN_LIMIT, temperature: int = 0
) -> str:
if "instruction" in text or "command" in text:
return None
response = completion_with_backoff(
model="text-davinci-002",
prompt=f"You are a climate change educator. Using only the information and facts provided in the excerpt below, "
f"respond to this tweet in less than {max_tokens} characters. Provide action items and show hope:"
f"\n###\nTweet:{text}"
f"\n###\nExcerpt:{topic}\n###\n\nResponse:",
temperature=temperature,
max_tokens=max_tokens,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
)
return response["choices"][0]["text"].strip()
def respond_generic(text: str, max_tokens: int = TWITTER_TOKEN_LIMIT, temperature: int = 0) -> str:
if "instruction" in text or "command" in text:
return None
response = completion_with_backoff(
model="text-davinci-002",
prompt=f"You are a climate change educator. "
f"Respond to this tweet in less than {max_tokens} characters by specifically addressing any "
"false points with factual information. Add additional background."
f"-\n######\n-Tweet:{text}"
f"Response:",
temperature=temperature,
max_tokens=max_tokens,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
)
return response["choices"][0]["text"].strip()
def respond_mention(text: str, max_tokens: int = TWITTER_TOKEN_LIMIT, temperature: int = 0) -> str:
"""Create response to a direct @ mention"""
if "instruction" in text or "command" in text:
return None
is_activity = completion_with_backoff(
model="text-davinci-002",
prompt="Is the input an activity that someone can do? Answer YES or NO."
f"-\n######\n-Input:{text}"
f"Response:",
temperature=0,
max_tokens=3,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
)["choices"][0]["text"].strip()
if is_activity.lower() == "yes":
return completion_with_backoff(
model="text-davinci-002",
prompt="Provide a list of 3 easy action items that an ordinary citizen "
"can take in their daily lives to reduce carbon emissions when performing this activity. "
f"Respond in less than {max_tokens} characters."
f"-\n######\n-Activity:{text}"
f"Response:",
temperature=temperature,
max_tokens=max_tokens,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
)["choices"][0]["text"].strip()
else:
return respond_generic(text, max_tokens, temperature)
def split_responses(text: str) -> List[str]:
"""Split response into list of responses satisfying the token limit.
"""
if len(text) <= TWITTER_TOKEN_LIMIT:
return [text]
num = 0
responses = [""]
for sentence in text.split(". "):
if sentence == "":
continue
if len(sentence) > TWITTER_TOKEN_LIMIT - 5:
words = sentence.split()
k = int(len(words)/2)
phrase1 = " ".join(words[:k]) + f" ({num + 1})"
phrase2 = " ".join(words[k:]) + ". "
responses[num] += phrase1
responses.append("")
num += 1
responses[num] += phrase2
elif len(sentence) + len(responses[num]) <= TWITTER_TOKEN_LIMIT - 5:
responses[num] += sentence
responses[num] += ". "
else:
if responses[num][-2:] == ". ":
responses[num] += f"({num + 1})"
else:
responses[num] += f". ({num + 1})"
responses.append("")
num += 1
responses[num] += sentence
responses[num] += ". "
if responses[-1] == "" or responses[-1] == "\n":
responses = responses[:-1]
if responses[-1][-2:] == ". ":
responses[-1] += f"({num + 1})"
return [r.replace("..", ".") for r in responses]
| [] |
2024-01-10 | sNiper-Qian/robus | lib~segment_tree.py | import operator
from typing import Callable
class SegmentTree:
""" Create SegmentTree.
Taken from OpenAI baselines github repository:
https://github.com/openai/baselines/blob/master/baselines/common/segment_tree.py
Attributes:
capacity (int)
tree (list)
operation (function)
"""
def __init__(self, capacity: int, operation: Callable, init_value: float):
"""Initialization.
Args:
capacity (int)
operation (function)
init_value (float)
"""
assert (
capacity > 0 and capacity & (capacity - 1) == 0
), "capacity must be positive and a power of 2."
self.capacity = capacity
self.tree = [init_value for _ in range(2 * capacity)]
self.operation = operation
def _operate_helper(
self, start: int, end: int, node: int, node_start: int, node_end: int
) -> float:
"""Returns result of operation in segment."""
if start == node_start and end == node_end:
return self.tree[node]
mid = (node_start + node_end) // 2
if end <= mid:
return self._operate_helper(start, end, 2 * node, node_start, mid)
else:
if mid + 1 <= start:
return self._operate_helper(start, end, 2 * node + 1, mid + 1, node_end)
else:
return self.operation(
self._operate_helper(start, mid, 2 * node, node_start, mid),
self._operate_helper(mid + 1, end, 2 * node + 1, mid + 1, node_end),
)
def operate(self, start: int = 0, end: int = 0) -> float:
"""Returns result of applying `self.operation`."""
if end <= 0:
end += self.capacity
end -= 1
return self._operate_helper(start, end, 1, 0, self.capacity - 1)
def __setitem__(self, idx: int, val: float):
"""Set value in tree."""
idx += self.capacity
self.tree[idx] = val
idx //= 2
while idx >= 1:
self.tree[idx] = self.operation(self.tree[2 * idx], self.tree[2 * idx + 1])
idx //= 2
def __getitem__(self, idx: int) -> float:
"""Get real value in leaf node of tree."""
assert 0 <= idx < self.capacity
return self.tree[self.capacity + idx]
class SumSegmentTree(SegmentTree):
""" Create SumSegmentTree.
Taken from OpenAI baselines github repository:
https://github.com/openai/baselines/blob/master/baselines/common/segment_tree.py
"""
def __init__(self, capacity: int):
"""Initialization.
Args:
capacity (int)
"""
super(SumSegmentTree, self).__init__(
capacity=capacity, operation=operator.add, init_value=0.0
)
def sum(self, start: int = 0, end: int = 0) -> float:
"""Returns arr[start] + ... + arr[end]."""
return super(SumSegmentTree, self).operate(start, end)
def retrieve(self, upperbound: float) -> int:
"""Find the highest index `i` about upper bound in the tree"""
# TODO: Check assert case and fix bug
assert 0 <= upperbound <= self.sum() + 1e-5, "upperbound: {}".format(upperbound)
idx = 1
while idx < self.capacity: # while non-leaf
left = 2 * idx
right = left + 1
if self.tree[left] > upperbound:
idx = 2 * idx
else:
upperbound -= self.tree[left]
idx = right
return idx - self.capacity
class MinSegmentTree(SegmentTree):
""" Create SegmentTree.
Taken from OpenAI baselines github repository:
https://github.com/openai/baselines/blob/master/baselines/common/segment_tree.py
"""
def __init__(self, capacity: int):
"""Initialization.
Args:
capacity (int)
"""
super(MinSegmentTree, self).__init__(
capacity=capacity, operation=min, init_value=float("inf")
)
def min(self, start: int = 0, end: int = 0) -> float:
"""Returns min(arr[start], ..., arr[end])."""
return super(MinSegmentTree, self).operate(start, end)
| [] |
2024-01-10 | Qlwentt/tictactoe | src~game.py | import copy
from src.openai import OpenAI
from src.player import Player
from src.board import Board
from src.computer import Computer
from src.enums.game_modes import GameMode
from src.enums.moves import Moves
from src.enums.valid_positions import VALID_POSITIONS
from src.constants.constants import GAME_MODES
from src.randomAI import RandomAI
class Game:
def __init__(self, gameMode=None, player1=None, player2=None) -> None:
self.gameMode = gameMode if gameMode else GameMode(Game.getInput("Choose your game mode\n1) 1 player\n2) 2 player\n", GAME_MODES))
self.player1 = player1 or Player(Game.getInput("What is player 1's name? "), Moves.X.value)
if not player2 and self.isTwoPlayerGame():
self.player2 = Player(Game.getInput("What is player 2's name? "), Moves.O.value)
else:
self.player2 = player2 if player2 else Player('Computer', Moves.O.value)
self.board = Board()
self.isPlayer1Turn = True
self.computer = Computer(self.player2, self.player1) if not self.isTwoPlayerGame() else None
def say(message):
print(message)
def getInput(message,choices=None):
variable = None
if not choices:
variable = input(message)
return variable
while not variable or variable not in choices:
try:
variable = int(input(message))
except:
variable = None
if variable not in choices:
print("Invalid choice")
else:
print(f'{choices[variable]} selected')
return variable
def isTwoPlayerGame(self):
return self.gameMode == GameMode.TWO_PLAYER
def drawBoard(self):
self.board.draw()
def drawValidMoves(self):
self.board.drawMoves()
def getValidMoves(self):
return self.board.getValidMoves()
def boardIsFull(self):
return self.board.isFull()
def updateTurn(self):
self.isPlayer1Turn = not self.isPlayer1Turn
def makeMove(self):
if self.gameMode.value <= GameMode.TWO_PLAYER.value:
player = self.player1 if self.isPlayer1Turn else self.player2
if player == self.player2 and not self.isTwoPlayerGame():
move = self.computer.makeMove(copy.deepcopy(self))
else:
move = Game.getInput(
f'{player.name}, make your move using the numbers that represent open spots: ',
{ key: VALID_POSITIONS[key] for key in self.getValidMoves()} # dictionary of valid positions that aren't taken
)
self.board.markMove(VALID_POSITIONS[move],player.mark, player.tally)
self.updateTurn()
elif self.gameMode == GameMode.MINIMAX_VS_RANDOM:
player = self.player1 if self.isPlayer1Turn else self.player2
if player.name == 'Minimax':
move = self.computer.makeMove(copy.deepcopy(self))
else:
randomAI = RandomAI()
move = randomAI.makeMove(self)
self.board.markMove(VALID_POSITIONS[move],player.mark, player.tally)
self.updateTurn()
return VALID_POSITIONS[move]
elif self.gameMode == GameMode.OPENAI_VS_RANDOM:
player = self.player1 if self.isPlayer1Turn else self.player2
if player.name == 'OpenAI':
openAI = OpenAI()
move = openAI.makeMove(self)
self.board.markMove(VALID_POSITIONS[move],player.mark, player.tally)
else:
randomAI = RandomAI()
move = randomAI.makeMove(self)
self.board.markMove(VALID_POSITIONS[move],player.mark, player.tally)
self.updateTurn()
def makeTheoreticalMove(self, move, player):
self.board.markMove(VALID_POSITIONS[move], player.mark, player.tally)
self.updateTurn()
def undoMove(self,move, player):
self.board.undoMove(move, player.tally)
self.updateTurn()
def getWinner(self):
winTally = self.board.getWinnerTallys()
if not winTally:
return None
if winTally > 0:
return self.player1
else:
return self.player2
| [] |
2024-01-10 | JustinMeimar/hack-gpt | llm~justin_embeddings.py | # from langchain.llms import OpenAI
# from langchain.prompts import PromptTemplate
# llm = OpenAI(temperature=0.9)
# prompt = PromptTemplate(
# input_variables=["product"],
# template="What is a good name for a company that makes {product}?",
# )
# from langchain.chains import ConversationChain
# from langchain.memory import ConversationBufferMemory
# conversation = ConversationChain(
# llm=chat,
# memory=ConversationBufferMemory()
# )
# conversation.run("Answer briefly. What are the first 3 colors of a rainbow?")
# # -> The first three colors of a rainbow are red, orange, and yellow.
# conversation.run("And the next 4?")
# -> The next four colors of a rainbow are green, blue, indigo, and violet.
import os
import pinecone
from dotenv import load_dotenv
from langchain.schema import Document
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Pinecone
from langchain.llms import OpenAI
from langchain.retrievers.self_query.base import SelfQueryRetriever
from langchain.chains.query_constructor.base import AttributeInfo
def query_retriever(vectorstore):
metadata_field_info = [
AttributeInfo(
name="genre",
description="The genre of the movie",
type="string or list[string]",
),
AttributeInfo(
name="year",
description="The year the movie was released",
type="integer",
),
AttributeInfo(
name="director",
description="The name of the movie director",
type="string",
),
AttributeInfo(
name="rating", description="A 1-10 rating for the movie", type="float"
),
]
document_content_description = "Brief summary of a movie"
llm = OpenAI(temperature=0)
retriever = SelfQueryRetriever.from_llm(
llm, vectorstore, document_content_description, metadata_field_info, verbose=True
)
while(True):
query = input("Enter a query to search in the vectorstore: ")
response = retriever.get_relevant_documents(query)
print(response)
def extract_metadata_from_json(listing_json):
return
def get_doccuments():
docs = [
Document(
page_content="A bunch of scientists bring back dinosaurs and mayhem breaks loose",
metadata={"year": 1993, "rating": 7.7, "genre": ["action", "science fiction"]},
),
Document(
page_content="Leo DiCaprio gets lost in a dream within a dream within a dream within a ...",
metadata={"year": 2010, "director": "Christopher Nolan", "rating": 8.2},
),
Document(
page_content="A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea",
metadata={"year": 2006, "director": "Satoshi Kon", "rating": 8.6},
),
Document(
page_content="A bunch of normal-sized women are supremely wholesome and some men pine after them",
metadata={"year": 2019, "director": "Greta Gerwig", "rating": 8.3},
),
Document(
page_content="Toys come alive and have a blast doing so",
metadata={"year": 1995, "genre": "animated"},
),
Document(
page_content="Three men walk into the Zone, three men walk out of the Zone",
metadata={
"year": 1979,
"rating": 9.9,
"director": "Andrei Tarkovsky",
"genre": ["science fiction", "thriller"],
"rating": 9.9,
},
),
]
return docs
def create_index():
idxs = pinecone.list_indexes()
if idxs == []:
pinecone.create_index("langchain-self-retriever-demo", dimension=1536)
return
def init_everything():
load_dotenv()
pinecone.init(
api_key=os.environ["PINECONE_API_KEY"], environment=os.environ["PINECONE_ENV"]
)
create_index()
print("== Created index")
docs = get_doccuments()
print("== Assembled Documents to embedd")
embeddings = OpenAIEmbeddings()
print("== Got OpenAI embeddings")
vectorstore = Pinecone.from_documents(
docs, embeddings, index_name="langchain-self-retriever-demo"
)
print("== Upserted embedded vectors")
query_retriever(vectorstore=vectorstore)
def get_mock_listings():
return
if __name__ == "__main__":
init_everything() | [] |
2024-01-10 | JustinMeimar/hack-gpt | llm~vb.py | import openai, pinecone
from datasets import load_dataset
import os
from dotenv import load_dotenv
load_dotenv()
openai_key = os.environ.get("OPENAI_API_KEY")
pinecone_key = os.environ.get("PINECONE_API_KEY")
openai.api_key = openai_key
pinecone.init(api_key=pinecone_key,
environment='us-west4-gcp-free')
index = pinecone.Index('hacktest')
MODEL = 'text-embedding-ada-002'
def query_vb(query):
# create the query embedding
xq = openai.Embedding.create(input=query, engine=MODEL)['data'][0]['embedding']
# query, returning the top 5 most similar results
res = index.query([xq], top_k=2, include_metadata=True)
return res
#### FOR INITIALIZING DATABASE
# dataset = load_dataset('json', data_files='fake_data.json')['train']
# MODEL ='text-embedding-ada-002'
# ids = [str(n) for n in range(len(dataset['remarks']))]
# input = dataset['remarks']
# res = openai.Embedding.create(input=input, engine=MODEL)
# embeds = [record['embedding'] for record in res['data']]
# meta = [{'text': text} for text in dataset['text']]
# to_upsert = zip(ids, embeds, meta)
# index.upsert(vectors=list(to_upsert))
| [] |
2024-01-10 | Unified-Robots/T2VLP | modules~modeling.py | import torch
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence
import logging
from modules.module_clip import CLIP, convert_weights
from modules.module_cross import CrossConfig, CrossModel, Transformer as TransformerClip
from modules.until_module import PreTrainedModel, CrossEn, AllGather
import torch.nn as nn
import torch.nn.functional as F
logger = logging.getLogger(__name__)
allgather = AllGather.apply
class CLIP4ClipPreTrainedModel(PreTrainedModel, torch.nn.Module):
def __init__(self, cross_config, *inputs, **kwargs):
super(CLIP4ClipPreTrainedModel, self).__init__(cross_config)
self.cross_config = cross_config
self.clip = None
self.cross = None
@classmethod
def from_pretrained(cls, cross_model_name, state_dict=None, cache_dir=None, type_vocab_size=2, *inputs, **kwargs):
task_config = None
if "task_config" in kwargs.keys():
task_config = kwargs["task_config"]
if not hasattr(task_config, "local_rank"):
task_config.__dict__["local_rank"] = 0
elif task_config.local_rank == -1:
task_config.local_rank = 0
if state_dict is None:
state_dict = {}
clip_state_dict = CLIP.get_config(model_path=task_config.visual_pretrain_path)
for key, val in clip_state_dict.items():
new_key = "clip." + key
if new_key not in state_dict:
state_dict[new_key] = val.clone()
for key in list(state_dict.keys()):
# print('-----------------------------------')
# print(key)
if key.startswith('clip.visual') and not key.startswith('clip.visual.transformer'):
new_key = key.replace('visual', 'local_visual')
# print(key, ' ', new_key)
if new_key not in state_dict.keys():
state_dict[new_key] = state_dict[key].clone()
elif key.startswith('clip.visual.transformer.resblocks.10'):
new_key = key.replace('clip.visual.transformer.resblocks.10', 'clip.local_visual.transformer.resblocks.0')
# print(key, ' ', new_key)
if new_key not in state_dict.keys():
state_dict[new_key] = state_dict[key].clone()
elif key.startswith('clip.visual.transformer.resblocks.11'):
new_key = key.replace('clip.visual.transformer.resblocks.11', 'clip.local_visual.transformer.resblocks.1')
# print(key, ' ', new_key)
if new_key not in state_dict.keys():
state_dict[new_key] = state_dict[key].clone()
# print("################check clip_state_dict#########################")
# for key, val in clip_state_dict.items():
# print(key, val.shape)
# print("################check clip_state_dict END#########################")
cross_config, _ = CrossConfig.get_config(cross_model_name, cache_dir, type_vocab_size, state_dict=None, task_config=task_config)
model = cls(cross_config, clip_state_dict, *inputs, **kwargs)
if state_dict is not None:
print("state_dict is not None state_dict is not None state_dict is not None")
model = cls.init_preweight(model, state_dict, task_config=task_config)
return model
class CLIP4Clip(CLIP4ClipPreTrainedModel):
def __init__(self, cross_config, clip_state_dict, task_config):
super(CLIP4Clip, self).__init__(cross_config)
self.task_config = task_config
self.ignore_video_index = -1
assert self.task_config.max_words + self.task_config.max_frames <= cross_config.max_position_embeddings
self._stage_one = True
self._stage_two = False
show_log(task_config, "Stage-One:{}, Stage-Two:{}".format(self._stage_one, self._stage_two))
self.loose_type = False
if self._stage_one and check_attr('loose_type', self.task_config):
self.loose_type = True
show_log(task_config, "Test retrieval by loose type.")
# CLIP Encoders: From OpenAI: CLIP [https://github.com/openai/CLIP] ===>
vit = "visual.proj" in clip_state_dict
assert vit
if vit:
vision_width = clip_state_dict["visual.conv1.weight"].shape[0]
vision_layers = len(
[k for k in clip_state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")])
vision_patch_size = clip_state_dict["visual.conv1.weight"].shape[-1]
grid_size = round((clip_state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
image_resolution = vision_patch_size * grid_size
else:
counts: list = [len(set(k.split(".")[2] for k in clip_state_dict if k.startswith(f"visual.layer{b}"))) for b in
[1, 2, 3, 4]]
vision_layers = tuple(counts)
vision_width = clip_state_dict["visual.layer1.0.conv1.weight"].shape[0]
output_width = round((clip_state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5)
vision_patch_size = None
assert output_width ** 2 + 1 == clip_state_dict["visual.attnpool.positional_embedding"].shape[0]
image_resolution = output_width * 32
embed_dim = clip_state_dict["text_projection"].shape[1]
context_length = clip_state_dict["positional_embedding"].shape[0]
vocab_size = clip_state_dict["token_embedding.weight"].shape[0]
transformer_width = clip_state_dict["ln_final.weight"].shape[0]
transformer_heads = transformer_width // 64
transformer_layers = len(set(k.split(".")[2] for k in clip_state_dict if k.startswith(f"transformer.resblocks")))
show_log(task_config, "\t embed_dim: {}".format(embed_dim))
show_log(task_config, "\t image_resolution: {}".format(image_resolution))
show_log(task_config, "\t vision_layers: {}".format(vision_layers))
show_log(task_config, "\t vision_width: {}".format(vision_width))
show_log(task_config, "\t vision_patch_size: {}".format(vision_patch_size))
show_log(task_config, "\t context_length: {}".format(context_length))
show_log(task_config, "\t vocab_size: {}".format(vocab_size))
show_log(task_config, "\t transformer_width: {}".format(transformer_width))
show_log(task_config, "\t transformer_heads: {}".format(transformer_heads))
show_log(task_config, "\t transformer_layers: {}".format(transformer_layers))
self.linear_patch = '2d'
if hasattr(task_config, "linear_patch"):
self.linear_patch = task_config.linear_patch
show_log(task_config, "\t\t linear_patch: {}".format(self.linear_patch))
# use .float() to avoid overflow/underflow from fp16 weight. https://github.com/openai/CLIP/issues/40
cut_top_layer = 0
show_log(task_config, "\t cut_top_layer: {}".format(cut_top_layer))
self.clip = CLIP(
embed_dim,
image_resolution, vision_layers-cut_top_layer, vision_width, vision_patch_size,
context_length, vocab_size, transformer_width, transformer_heads, transformer_layers-cut_top_layer,
linear_patch=self.linear_patch
).float()
for key in ["input_resolution", "context_length", "vocab_size"]:
if key in clip_state_dict:
del clip_state_dict[key]
convert_weights(self.clip)
# <=== End of CLIP Encoders
self.sim_header = 'meanP'
if hasattr(task_config, "sim_header"):
self.sim_header = task_config.sim_header
show_log(task_config, "\t sim_header: {}".format(self.sim_header))
if self.sim_header == "tightTransf":
assert self.loose_type is False
cross_config.max_position_embeddings = context_length
if self.loose_type is False:
# Cross Encoder ===>
cross_config = update_attr("cross_config", cross_config, "num_hidden_layers", self.task_config, "cross_num_hidden_layers")
self.cross = CrossModel(cross_config)
# <=== End of Cross Encoder
self.similarity_dense = torch.nn.Linear(cross_config.hidden_size, 1)
self.loss_fct = CrossEn()
self.simloss = nn.MSELoss(reduction='mean')
self.apply(self.init_weights)
def forward(self, input_ids, token_type_ids, attention_mask, video, video_mask=None):
input_ids = input_ids.view(-1, input_ids.shape[-1]) # [bs,1,32] -> [bs,32]
token_type_ids = token_type_ids.view(-1, token_type_ids.shape[-1]) # [bs,1,32] -> [bs,32]
attention_mask = attention_mask.view(-1, attention_mask.shape[-1]) # [bs,1,32] -> [bs,32]
video_mask = video_mask.view(-1, video_mask.shape[-1]) # [bs,1,12] -> [bs,12]
# T x 3 x H x W
video = torch.as_tensor(video).float()
b, pair, bs, ts, channel, h, w = video.shape
video = video.view(b * pair * bs * ts, channel, h, w) # [bs,1,12,1,3,224,224] -> [bs*12,3,224,224]
video_frame = bs * ts
sequence_output, visual_output, text, tube_token = self.get_sequence_visual_output(input_ids, token_type_ids, attention_mask,
video, video_mask, shaped=True, video_frame=video_frame)
align_center = self.clip.fusion_center.squeeze().clone()
align_center = self.clip.fusion_proj_center(align_center.half())
align_center = align_center.unsqueeze(0).repeat(attention_mask.shape[0], 1, 1)
align_weight =torch.matmul(align_center.float(), text.float().permute(0, 2, 1))
align_mask = attention_mask.clone().float()
align_mask[align_mask < 0.5] = float('-inf')
align_mask = align_mask.unsqueeze(1).repeat(1, align_center.shape[1], 1)
align_weight = align_weight + align_mask
align_weight_soft = F.softmax(align_weight, dim=-1)
text_center = torch.matmul(align_weight_soft, text.float())
align_loss = self.simloss(text_center, tube_token)
if self.training:
loss = 0.
sim_matrix, *_tmp = self.get_similarity_logits(sequence_output, visual_output, attention_mask, video_mask,
shaped=True, loose_type=self.loose_type)
sim_loss1 = self.loss_fct(sim_matrix)
sim_loss2 = self.loss_fct(sim_matrix.T)
sim_loss = (sim_loss1 + sim_loss2) / 2
loss += sim_loss
return loss, align_loss
else:
return None
def get_sequence_output(self, input_ids, token_type_ids, attention_mask, shaped=False):
if shaped is False:
input_ids = input_ids.view(-1, input_ids.shape[-1])
token_type_ids = token_type_ids.view(-1, token_type_ids.shape[-1])
attention_mask = attention_mask.view(-1, attention_mask.shape[-1])
bs_pair = input_ids.size(0)
sequence_hidden, text = self.clip.encode_text(input_ids)
sequence_hidden = sequence_hidden.float()
text = text.float()
sequence_hidden = sequence_hidden.view(bs_pair, -1, sequence_hidden.size(-1))
text = text.view(bs_pair, -1, text.size(-1))
return sequence_hidden, text
def get_visual_output(self, video, video_mask, shaped=False, video_frame=-1):
if shaped is False:
video_mask = video_mask.view(-1, video_mask.shape[-1])
video = torch.as_tensor(video).float()
b, pair, bs, ts, channel, h, w = video.shape
video = video.view(b * pair * bs * ts, channel, h, w)
video_frame = bs * ts
bs_pair = video_mask.size(0)
visual_hidden, tube_token = self.clip.encode_image(video, video_frame=video_frame)
visual_hidden = visual_hidden.float()
tube_token = tube_token.float()
visual_hidden = visual_hidden.view(bs_pair, -1, visual_hidden.size(-1))
return visual_hidden, tube_token
def get_sequence_visual_output(self, input_ids, token_type_ids, attention_mask, video, video_mask, shaped=False, video_frame=-1):
if shaped is False:
input_ids = input_ids.view(-1, input_ids.shape[-1])
token_type_ids = token_type_ids.view(-1, token_type_ids.shape[-1])
attention_mask = attention_mask.view(-1, attention_mask.shape[-1])
video_mask = video_mask.view(-1, video_mask.shape[-1])
video = torch.as_tensor(video).float()
b, pair, bs, ts, channel, h, w = video.shape
video = video.view(b * pair * bs * ts, channel, h, w)
video_frame = bs * ts
sequence_output, text = self.get_sequence_output(input_ids, token_type_ids, attention_mask, shaped=True)
visual_output, tube_token = self.get_visual_output(video, video_mask, shaped=True, video_frame=video_frame)
return sequence_output, visual_output, text, tube_token
def _get_cross_output(self, sequence_output, visual_output, attention_mask, video_mask):
concat_features = torch.cat((sequence_output, visual_output), dim=1)
concat_mask = torch.cat((attention_mask, video_mask), dim=1)
text_type_ = torch.zeros_like(attention_mask)
video_type_ = torch.ones_like(video_mask)
concat_type = torch.cat((text_type_, video_type_), dim=1)
cross_layers, pooled_output = self.cross(concat_features, concat_type, concat_mask, output_all_encoded_layers=True)
cross_output = cross_layers[-1]
return cross_output, pooled_output, concat_mask
def _mean_pooling_for_similarity_sequence(self, sequence_output, attention_mask):
attention_mask_un = attention_mask.to(dtype=torch.float).unsqueeze(-1)
attention_mask_un[:, 0, :] = 0.
sequence_output = sequence_output * attention_mask_un
text_out = torch.sum(sequence_output, dim=1) / torch.sum(attention_mask_un, dim=1, dtype=torch.float)
return text_out
def _mean_pooling_for_similarity_visual(self, visual_output, video_mask,):
video_mask_un = video_mask.to(dtype=torch.float).unsqueeze(-1)
visual_output = visual_output * video_mask_un
video_mask_un_sum = torch.sum(video_mask_un, dim=1, dtype=torch.float)
video_mask_un_sum[video_mask_un_sum == 0.] = 1.
video_out = torch.sum(visual_output, dim=1) / video_mask_un_sum
return video_out
def _mean_pooling_for_similarity(self, sequence_output, visual_output, attention_mask, video_mask,):
text_out = self._mean_pooling_for_similarity_sequence(sequence_output, attention_mask)
video_out = self._mean_pooling_for_similarity_visual(visual_output, video_mask)
return text_out, video_out
def _loose_similarity(self, sequence_output, visual_output, attention_mask, video_mask, sim_header="meanP"):
sequence_output, visual_output = sequence_output.contiguous(), visual_output.contiguous()
if sim_header == "meanP":
# Default: Parameter-free type
pass
elif sim_header == "seqLSTM":
# Sequential type: LSTM
visual_output_original = visual_output
visual_output = pack_padded_sequence(visual_output, torch.sum(video_mask, dim=-1).cpu(),
batch_first=True, enforce_sorted=False)
visual_output, _ = self.lstm_visual(visual_output)
if self.training: self.lstm_visual.flatten_parameters()
visual_output, _ = pad_packed_sequence(visual_output, batch_first=True)
visual_output = torch.cat((visual_output, visual_output_original[:, visual_output.size(1):, ...].contiguous()), dim=1)
visual_output = visual_output + visual_output_original
elif sim_header == "seqTransf":
# Sequential type: Transformer Encoder
visual_output_original = visual_output
seq_length = visual_output.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=visual_output.device)
position_ids = position_ids.unsqueeze(0).expand(visual_output.size(0), -1)
frame_position_embeddings = self.frame_position_embeddings(position_ids)
visual_output = visual_output + frame_position_embeddings
extended_video_mask = (1.0 - video_mask.unsqueeze(1)) * -1000000.0
extended_video_mask = extended_video_mask.expand(-1, video_mask.size(1), -1)
visual_output = visual_output.permute(1, 0, 2) # NLD -> LND
visual_output = self.transformerClip(visual_output, extended_video_mask)
visual_output = visual_output.permute(1, 0, 2) # LND -> NLD
visual_output = visual_output + visual_output_original
if self.training:
visual_output = allgather(visual_output, self.task_config)
video_mask = allgather(video_mask, self.task_config)
sequence_output = allgather(sequence_output, self.task_config)
torch.distributed.barrier()
visual_output = visual_output / visual_output.norm(dim=-1, keepdim=True) # normlize
visual_output = self._mean_pooling_for_similarity_visual(visual_output, video_mask) # [bs,12,512] -> [bs,1,512]
visual_output = visual_output / visual_output.norm(dim=-1, keepdim=True)
sequence_output = sequence_output.squeeze(1)
sequence_output = sequence_output / sequence_output.norm(dim=-1, keepdim=True) # [bs,1,512]
logit_scale = self.clip.logit_scale.exp()
retrieve_logits = logit_scale * torch.matmul(sequence_output, visual_output.t())
# [bs,bs]
return retrieve_logits
def _cross_similarity(self, sequence_output, visual_output, attention_mask, video_mask):
sequence_output, visual_output = sequence_output.contiguous(), visual_output.contiguous()
b_text, s_text, h_text = sequence_output.size()
b_visual, s_visual, h_visual = visual_output.size()
retrieve_logits_list = []
step_size = b_text # set smaller to reduce memory cost
split_size = [step_size] * (b_text // step_size)
release_size = b_text - sum(split_size)
if release_size > 0:
split_size += [release_size]
# due to clip text branch retrun the last hidden
attention_mask = torch.ones(sequence_output.size(0), 1)\
.to(device=attention_mask.device, dtype=attention_mask.dtype)
sequence_output_splits = torch.split(sequence_output, split_size, dim=0)
attention_mask_splits = torch.split(attention_mask, split_size, dim=0)
for i in range(len(split_size)):
sequence_output_row = sequence_output_splits[i]
attention_mask_row = attention_mask_splits[i]
sequence_output_l = sequence_output_row.unsqueeze(1).repeat(1, b_visual, 1, 1)
sequence_output_l = sequence_output_l.view(-1, s_text, h_text)
attention_mask_l = attention_mask_row.unsqueeze(1).repeat(1, b_visual, 1)
attention_mask_l = attention_mask_l.view(-1, s_text)
step_truth = sequence_output_row.size(0)
visual_output_r = visual_output.unsqueeze(0).repeat(step_truth, 1, 1, 1)
visual_output_r = visual_output_r.view(-1, s_visual, h_visual)
video_mask_r = video_mask.unsqueeze(0).repeat(step_truth, 1, 1)
video_mask_r = video_mask_r.view(-1, s_visual)
cross_output, pooled_output, concat_mask = \
self._get_cross_output(sequence_output_l, visual_output_r, attention_mask_l, video_mask_r)
retrieve_logits_row = self.similarity_dense(pooled_output).squeeze(-1).view(step_truth, b_visual)
retrieve_logits_list.append(retrieve_logits_row)
retrieve_logits = torch.cat(retrieve_logits_list, dim=0)
return retrieve_logits
def get_similarity_logits(self, sequence_output, visual_output, attention_mask, video_mask, shaped=False, loose_type=False):
if shaped is False:
attention_mask = attention_mask.view(-1, attention_mask.shape[-1])
video_mask = video_mask.view(-1, video_mask.shape[-1])
contrastive_direction = ()
if loose_type:
assert self.sim_header in ["meanP", "seqLSTM", "seqTransf"]
retrieve_logits = self._loose_similarity(sequence_output, visual_output, attention_mask, video_mask, sim_header=self.sim_header)
else:
assert self.sim_header in ["tightTransf"]
retrieve_logits = self._cross_similarity(sequence_output, visual_output, attention_mask, video_mask, )
return retrieve_logits, contrastive_direction
def show_log(task_config, info):
if task_config is None or task_config.local_rank == 0:
logger.warning(info)
def check_attr(target_name, task_config):
return hasattr(task_config, target_name) and task_config.__dict__[target_name]
def update_attr(target_name, target_config, target_attr_name, source_config, source_attr_name, default_value=None):
if hasattr(source_config, source_attr_name):
if default_value is None or getattr(source_config, source_attr_name) != default_value:
setattr(target_config, target_attr_name, getattr(source_config, source_attr_name))
show_log(source_config, "Set {}.{}: {}.".format(target_name,
target_attr_name, getattr(target_config, target_attr_name)))
return target_config
| [] |
2024-01-10 | bojanskr/azure-search-power-skills | Vector~EmbeddingGenerator~embedder~text_embedder.py | import openai
import os
import re
import logging
from tenacity import retry, wait_random_exponential, stop_after_attempt
class TextEmbedder():
openai.api_type = "azure"
openai.api_key = os.getenv("AZURE_OPENAI_API_KEY")
openai.api_base = f"https://{os.getenv('AZURE_OPENAI_SERVICE_NAME')}.openai.azure.com/"
openai.api_version = os.getenv("AZURE_OPENAI_API_VERSION")
AZURE_OPENAI_EMBEDDING_DEPLOYMENT = os.getenv("AZURE_OPENAI_EMBEDDING_DEPLOYMENT")
def clean_text(self, text, text_limit=7000):
# Clean up text (e.g. line breaks, )
text = re.sub(r'\s+', ' ', text).strip()
text = re.sub(r'[\n\r]+', ' ', text).strip()
# Truncate text if necessary (e.g. for, ada-002, 4095 tokens ~ 7000 chracters)
if len(text) > text_limit:
logging.warning("Token limit reached exceeded maximum length, truncating...")
text = text[:text_limit]
return text
@retry(wait=wait_random_exponential(min=1, max=20), stop=stop_after_attempt(6))
def embed_content(self, text, clean_text=True, use_single_precision=True):
embedding_precision = 9 if use_single_precision else 18
if clean_text:
text = self.clean_text(text)
response = openai.Embedding.create(input=text, engine=self.AZURE_OPENAI_EMBEDDING_DEPLOYMENT)
embedding = [round(x, embedding_precision) for x in response['data'][0]['embedding']]
return embedding | [] |
2024-01-10 | yuejunzhang/GPT-Telegramus | DALLEModule.py | """
Copyright (C) 2023 Fern Lane, GPT-Telegramus
Licensed under the GNU Affero General Public License, Version 3.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.gnu.org/licenses/agpl-3.0.en.html
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import logging
import openai
import UsersHandler
from RequestResponseContainer import RequestResponseContainer
class DALLEModule:
def __init__(self, config: dict, messages: dict, users_handler: UsersHandler.UsersHandler) -> None:
self.config = config
self.messages = messages
self.users_handler = users_handler
self._enabled = False
self._restart_attempts = 0
self._proxy = None
def initialize(self) -> None:
"""
Initializes DALL-E official API
:return:
"""
try:
# Set enabled status
self._enabled = self.config["modules"]["dalle"]
if not self._enabled:
logging.warning("DALL-E module disabled in config file!")
return
# Set Key
openai.api_key = self.config["dalle"]["open_ai_api_key"]
# Proxy for DALL-E
proxy = self.config["dalle"]["proxy"]
if proxy and len(proxy) > 1 and proxy.strip().lower() != "auto":
self._proxy = proxy
openai.proxy = proxy
else:
self._proxy = None
# Done?
logging.info("DALL-E module initialized")
# Error
except Exception as e:
logging.error("Error initializing DALL-E module!", exc_info=e)
self._enabled = False
def set_proxy(self, proxy: str) -> None:
"""
Sets new proxy from ProxyAutomation
self.config["dalle"]["proxy"] must be "auto"
:param proxy: https proxy but in format http://IP:PORT
:return:
"""
if self.config["dalle"]["proxy"].strip().lower() != "auto":
return
logging.info("Setting proxy {0} for DALL-E module".format(proxy))
self._proxy = proxy
openai.proxy = proxy
def process_request(self, request_response: RequestResponseContainer) -> None:
"""
Processes request to DALL-E
:param request_response: RequestResponseContainer object
:return:
"""
# Check if we are initialized
if not self._enabled:
logging.error("DALL-E module not initialized!")
request_response.response = self.messages["response_error"].replace("\\n", "\n") \
.format("DALL-E module not initialized!")
request_response.error = True
return
try:
# Increment requests_total for statistics
request_response.user["requests_total"] += 1
self.users_handler.save_user(request_response.user)
# Set Key
openai.api_key = self.config["dalle"]["open_ai_api_key"]
# Generate image
logging.info("Requesting image from DALL-E")
image_response = openai.Image.create(prompt=request_response.request,
n=1,
size=self.config["dalle"]["image_size"])
response_url = image_response["data"][0]["url"]
# Check response
if not response_url or len(response_url) < 1:
raise Exception("Wrong DALL-E response!")
# OK?
logging.info("Response successfully processed for user {0} ({1})"
.format(request_response.user["user_name"], request_response.user["user_id"]))
request_response.response = response_url
# Exit requested
except KeyboardInterrupt:
logging.warning("KeyboardInterrupt @ process_request")
return
# DALL-E or other error
except Exception as e:
logging.error("Error processing request!", exc_info=e)
# Try to restart
self.restart()
self._restart_attempts += 1
# Try again 1 time
if self._restart_attempts < 2:
self.process_request(request_response)
# Stop restarting and respond with error
else:
request_response.response = self.messages["response_error"].replace("\\n", "\n").format(str(e))
request_response.error = True
self._restart_attempts = 0
def restart(self):
"""
Restarts module and saves proxy
:return:
"""
if not self.config["modules"]["dalle"]:
return
logging.info("Restarting DALL-E module")
# Restart
self.initialize()
# Set proxy
try:
if self._proxy is not None:
openai.proxy = self._proxy
except Exception as e:
logging.error("Error setting back proxy to DALL-E module!", exc_info=e)
| [] |
2024-01-10 | yoshikipom/python | openai~src~image~explain_image.py | from openai import OpenAI
client = OpenAI()
response = client.chat.completions.create(
model="gpt-4-vision-preview",
messages=[
{
"role": "user",
"content": [
{"type": "text", "text": "What's in this image?"},
{
"type": "image_url",
"image_url": {
"url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg",
"detail": "low"
},
},
],
}
],
max_tokens=300,
)
print(response.choices[0].message.content)
| [
"[{'type': 'text', 'text': \"What's in this image?\"}, {'type': 'image_url', 'image_url': {'url': 'https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg', 'detail': 'low'}}]"
] |
2024-01-10 | yoshikipom/python | openai~src~audio~text_to_speech.py | from pathlib import Path
from openai import OpenAI
openai = OpenAI()
# speech_file_path = Path(__file__).parent / "speech.mp3"
speech_file_path = "speech.mp3"
def main() -> None:
audio = openai.audio
# Create text-to-speech audio file
response = audio.speech.create(
model="tts-1", voice="nova", input="Good morning! Hello! Goodbye!", speed=0.7,
)
response.stream_to_file(speech_file_path)
if __name__ == "__main__":
main()
| [] |
2024-01-10 | yoshikipom/python | openai~src~chat~single_message.py | from openai import OpenAI
import os
client = OpenAI()
content = 'hello'
chat_completion = client.chat.completions.create(
model='gpt-4',
messages=[{"role": "user", "content": content}]
)
print(chat_completion.choices[0].message.content)
| [
"hello"
] |
2024-01-10 | ankit-tyagi/scrapeghost | tests~testutils.py | from unittest.mock import patch
import openai
def _mock_response(**kwargs):
mr = openai.openai_object.OpenAIObject.construct_from(
dict(
model=kwargs.get("model"),
choices=[
{
"finish_reason": kwargs.get("finish_reason", "stop"),
"message": {
"content": kwargs.get("content", "Hello world"),
},
}
],
usage={
"prompt_tokens": kwargs.get("prompt_tokens", 1),
"completion_tokens": kwargs.get("completion_tokens", 1),
},
created=1629200000,
id="cmpl-xxxxxxxxxxxxxxxxxxxx",
model_version=kwargs.get("model_version", "ada"),
prompt="Hello world",
status="complete",
)
)
return mr
def _timeout(**kwargs):
raise openai.error.Timeout()
def patch_create():
p = patch("scrapeghost.apicall.openai.ChatCompletion.create")
return p
| [
"Hello world",
"content"
] |
2024-01-10 | FurahaAswan/ClueMaster | backend~game~tests.py | from openai import OpenAI
client = OpenAI("key")
response = client.chat.completions.create(
model="gpt-4-vision-preview",
messages=[
{
"role": "user",
"content": [
{"type": "text", "text": """Convert the 2 tables (example 1, example 2) to a csv. Only respond with the csv and don't include newline characters in your response"""},
{
"type": "image_url",
"image_url": {
"url": "https://i.ibb.co/f0CgXmv/Screenshot-2023-11-22-142205.png",
},
},
],
}
],
max_tokens=300,
)
print(response.choices[0]) | [
"[{'type': 'text', 'text': \"Convert the 2 tables (example 1, example 2) to a csv. Only respond with the csv and don't include newline characters in your response\"}, {'type': 'image_url', 'image_url': {'url': 'https://i.ibb.co/f0CgXmv/Screenshot-2023-11-22-142205.png'}}]"
] |
2024-01-10 | gojira/langchain | langchain~chat_models~azure_openai.py | """Azure OpenAI chat wrapper."""
from __future__ import annotations
import logging
from typing import Any, Dict
from pydantic import root_validator
from langchain.chat_models.openai import ChatOpenAI
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__file__)
class AzureChatOpenAI(ChatOpenAI):
"""Wrapper around Azure OpenAI Chat Completion API. To use this class you
must have a deployed model on Azure OpenAI. Use `deployment_name` in the
constructor to refer to the "Model deployment name" in the Azure portal.
In addition, you should have the ``openai`` python package installed, and the
following environment variables set or passed in constructor in lower case:
- ``OPENAI_API_TYPE`` (default: ``azure``)
- ``OPENAI_API_KEY``
- ``OPENAI_API_BASE``
- ``OPENAI_API_VERSION``
For exmaple, if you have `gpt-35-turbo` deployed, with the deployment name
`35-turbo-dev`, the constructor should look like:
.. code-block:: python
AzureChatOpenAI(
deployment_name="35-turbo-dev",
openai_api_version="2023-03-15-preview",
)
Be aware the API version may change.
Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
"""
deployment_name: str = ""
openai_api_type: str = "azure"
openai_api_base: str = ""
openai_api_version: str = ""
openai_api_key: str = ""
openai_organization: str = ""
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
openai_api_key = get_from_dict_or_env(
values,
"openai_api_key",
"OPENAI_API_KEY",
)
openai_api_base = get_from_dict_or_env(
values,
"openai_api_base",
"OPENAI_API_BASE",
)
openai_api_version = get_from_dict_or_env(
values,
"openai_api_version",
"OPENAI_API_VERSION",
)
openai_api_type = get_from_dict_or_env(
values,
"openai_api_type",
"OPENAI_API_TYPE",
)
openai_organization = get_from_dict_or_env(
values,
"openai_organization",
"OPENAI_ORGANIZATION",
default="",
)
try:
import openai
openai.api_type = openai_api_type
openai.api_base = openai_api_base
openai.api_version = openai_api_version
openai.api_key = openai_api_key
if openai_organization:
openai.organization = openai_organization
except ImportError:
raise ValueError(
"Could not import openai python package. "
"Please it install it with `pip install openai`."
)
try:
values["client"] = openai.ChatCompletion
except AttributeError:
raise ValueError(
"`openai` has no `ChatCompletion` attribute, this is likely "
"due to an old version of the openai package. Try upgrading it "
"with `pip install --upgrade openai`."
)
if values["n"] < 1:
raise ValueError("n must be at least 1.")
if values["n"] > 1 and values["streaming"]:
raise ValueError("n must be 1 when streaming.")
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling OpenAI API."""
return {
**super()._default_params,
"engine": self.deployment_name,
}
| [] |
2024-01-10 | gojira/langchain | langchain~tools~openapi~utils~api_models.py | """Pydantic models for parsing an OpenAPI spec."""
import logging
from enum import Enum
from typing import Any, Dict, List, Optional, Sequence, Tuple, Type, Union
from openapi_schema_pydantic import MediaType, Parameter, Reference, RequestBody, Schema
from pydantic import BaseModel, Field
from langchain.tools.openapi.utils.openapi_utils import HTTPVerb, OpenAPISpec
logger = logging.getLogger(__name__)
PRIMITIVE_TYPES = {
"integer": int,
"number": float,
"string": str,
"boolean": bool,
"array": List,
"object": Dict,
"null": None,
}
# See https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#parameterIn
# for more info.
class APIPropertyLocation(Enum):
"""The location of the property."""
QUERY = "query"
PATH = "path"
HEADER = "header"
COOKIE = "cookie" # Not yet supported
@classmethod
def from_str(cls, location: str) -> "APIPropertyLocation":
"""Parse an APIPropertyLocation."""
try:
return cls(location)
except ValueError:
raise ValueError(
f"Invalid APIPropertyLocation. Valid values are {cls.__members__}"
)
_SUPPORTED_MEDIA_TYPES = ("application/json",)
SUPPORTED_LOCATIONS = {
APIPropertyLocation.QUERY,
APIPropertyLocation.PATH,
}
INVALID_LOCATION_TEMPL = (
'Unsupported APIPropertyLocation "{location}"'
" for parameter {name}. "
+ f"Valid values are {[loc.value for loc in SUPPORTED_LOCATIONS]}"
)
SCHEMA_TYPE = Union[str, Type, tuple, None, Enum]
class APIPropertyBase(BaseModel):
"""Base model for an API property."""
# The name of the parameter is required and is case sensitive.
# If "in" is "path", the "name" field must correspond to a template expression
# within the path field in the Paths Object.
# If "in" is "header" and the "name" field is "Accept", "Content-Type",
# or "Authorization", the parameter definition is ignored.
# For all other cases, the "name" corresponds to the parameter
# name used by the "in" property.
name: str = Field(alias="name")
"""The name of the property."""
required: bool = Field(alias="required")
"""Whether the property is required."""
type: SCHEMA_TYPE = Field(alias="type")
"""The type of the property.
Either a primitive type, a component/parameter type,
or an array or 'object' (dict) of the above."""
default: Optional[Any] = Field(alias="default", default=None)
"""The default value of the property."""
description: Optional[str] = Field(alias="description", default=None)
"""The description of the property."""
class APIProperty(APIPropertyBase):
"""A model for a property in the query, path, header, or cookie params."""
location: APIPropertyLocation = Field(alias="location")
"""The path/how it's being passed to the endpoint."""
@staticmethod
def _cast_schema_list_type(schema: Schema) -> Optional[Union[str, Tuple[str, ...]]]:
type_ = schema.type
if not isinstance(type_, list):
return type_
else:
return tuple(type_)
@staticmethod
def _get_schema_type_for_enum(parameter: Parameter, schema: Schema) -> Enum:
"""Get the schema type when the parameter is an enum."""
param_name = f"{parameter.name}Enum"
return Enum(param_name, {str(v): v for v in schema.enum})
@staticmethod
def _get_schema_type_for_array(
schema: Schema,
) -> Optional[Union[str, Tuple[str, ...]]]:
items = schema.items
if isinstance(items, Schema):
schema_type = APIProperty._cast_schema_list_type(items)
elif isinstance(items, Reference):
ref_name = items.ref.split("/")[-1]
schema_type = ref_name # TODO: Add ref definitions to make his valid
else:
raise ValueError(f"Unsupported array items: {items}")
if isinstance(schema_type, str):
# TODO: recurse
schema_type = (schema_type,)
return schema_type
@staticmethod
def _get_schema_type(parameter: Parameter, schema: Optional[Schema]) -> SCHEMA_TYPE:
if schema is None:
return None
schema_type: SCHEMA_TYPE = APIProperty._cast_schema_list_type(schema)
if schema_type == "array":
schema_type = APIProperty._get_schema_type_for_array(schema)
elif schema_type == "object":
# TODO: Resolve array and object types to components.
raise NotImplementedError("Objects not yet supported")
elif schema_type in PRIMITIVE_TYPES:
if schema.enum:
schema_type = APIProperty._get_schema_type_for_enum(parameter, schema)
else:
# Directly use the primitive type
pass
else:
raise NotImplementedError(f"Unsupported type: {schema_type}")
return schema_type
@staticmethod
def _validate_location(location: APIPropertyLocation, name: str) -> None:
if location not in SUPPORTED_LOCATIONS:
raise NotImplementedError(
INVALID_LOCATION_TEMPL.format(location=location, name=name)
)
@staticmethod
def _validate_content(content: Optional[Dict[str, MediaType]]) -> None:
if content:
raise ValueError(
"API Properties with media content not supported. "
"Media content only supported within APIRequestBodyProperty's"
)
@staticmethod
def _get_schema(parameter: Parameter, spec: OpenAPISpec) -> Optional[Schema]:
schema = parameter.param_schema
if isinstance(schema, Reference):
schema = spec.get_referenced_schema(schema)
elif schema is None:
return None
elif not isinstance(schema, Schema):
raise ValueError(f"Error dereferencing schema: {schema}")
return schema
@staticmethod
def is_supported_location(location: str) -> bool:
"""Return whether the provided location is supported."""
try:
return APIPropertyLocation.from_str(location) in SUPPORTED_LOCATIONS
except ValueError:
return False
@classmethod
def from_parameter(cls, parameter: Parameter, spec: OpenAPISpec) -> "APIProperty":
"""Instantiate from an OpenAPI Parameter."""
location = APIPropertyLocation.from_str(parameter.param_in)
cls._validate_location(
location,
parameter.name,
)
cls._validate_content(parameter.content)
schema = cls._get_schema(parameter, spec)
schema_type = cls._get_schema_type(parameter, schema)
default_val = schema.default if schema is not None else None
return cls(
name=parameter.name,
location=location,
default=default_val,
description=parameter.description,
required=parameter.required,
type=schema_type,
)
class APIRequestBodyProperty(APIPropertyBase):
"""A model for a request body property."""
properties: List["APIRequestBodyProperty"] = Field(alias="properties")
"""The sub-properties of the property."""
# This is useful for handling nested property cycles.
# We can define separate types in that case.
references_used: List[str] = Field(alias="references_used")
"""The references used by the property."""
@classmethod
def _process_object_schema(
cls, schema: Schema, spec: OpenAPISpec, references_used: List[str]
) -> Tuple[Union[str, List[str], None], List["APIRequestBodyProperty"]]:
properties = []
required_props = schema.required or []
if schema.properties is None:
raise ValueError(
f"No properties found when processing object schema: {schema}"
)
for prop_name, prop_schema in schema.properties.items():
if isinstance(prop_schema, Reference):
ref_name = prop_schema.ref.split("/")[-1]
if ref_name not in references_used:
references_used.append(ref_name)
prop_schema = spec.get_referenced_schema(prop_schema)
else:
continue
properties.append(
cls.from_schema(
schema=prop_schema,
name=prop_name,
required=prop_name in required_props,
spec=spec,
references_used=references_used,
)
)
return schema.type, properties
@classmethod
def _process_array_schema(
cls, schema: Schema, name: str, spec: OpenAPISpec, references_used: List[str]
) -> str:
items = schema.items
if items is not None:
if isinstance(items, Reference):
ref_name = items.ref.split("/")[-1]
if ref_name not in references_used:
references_used.append(ref_name)
items = spec.get_referenced_schema(items)
else:
pass
return f"Array<{ref_name}>"
else:
pass
if isinstance(items, Schema):
array_type = cls.from_schema(
schema=items,
name=f"{name}Item",
required=True, # TODO: Add required
spec=spec,
references_used=references_used,
)
return f"Array<{array_type.type}>"
return "array"
@classmethod
def from_schema(
cls,
schema: Schema,
name: str,
required: bool,
spec: OpenAPISpec,
references_used: Optional[List[str]] = None,
) -> "APIRequestBodyProperty":
"""Recursively populate from an OpenAPI Schema."""
if references_used is None:
references_used = []
schema_type = schema.type
properties: List[APIRequestBodyProperty] = []
if schema_type == "object" and schema.properties:
schema_type, properties = cls._process_object_schema(
schema, spec, references_used
)
elif schema_type == "array":
schema_type = cls._process_array_schema(schema, name, spec, references_used)
elif schema_type in PRIMITIVE_TYPES:
# Use the primitive type directly
pass
elif schema_type is None:
# No typing specified/parsed. WIll map to 'any'
pass
else:
raise ValueError(f"Unsupported type: {schema_type}")
return cls(
name=name,
required=required,
type=schema_type,
default=schema.default,
description=schema.description,
properties=properties,
references_used=references_used,
)
class APIRequestBody(BaseModel):
"""A model for a request body."""
description: Optional[str] = Field(alias="description")
"""The description of the request body."""
properties: List[APIRequestBodyProperty] = Field(alias="properties")
# E.g., application/json - we only support JSON at the moment.
media_type: str = Field(alias="media_type")
"""The media type of the request body."""
@classmethod
def _process_supported_media_type(
cls,
media_type_obj: MediaType,
spec: OpenAPISpec,
) -> List[APIRequestBodyProperty]:
"""Process the media type of the request body."""
references_used = []
schema = media_type_obj.media_type_schema
if isinstance(schema, Reference):
references_used.append(schema.ref.split("/")[-1])
schema = spec.get_referenced_schema(schema)
if schema is None:
raise ValueError(
f"Could not resolve schema for media type: {media_type_obj}"
)
api_request_body_properties = []
required_properties = schema.required or []
if schema.type == "object" and schema.properties:
for prop_name, prop_schema in schema.properties.items():
if isinstance(prop_schema, Reference):
prop_schema = spec.get_referenced_schema(prop_schema)
api_request_body_properties.append(
APIRequestBodyProperty.from_schema(
schema=prop_schema,
name=prop_name,
required=prop_name in required_properties,
spec=spec,
)
)
else:
api_request_body_properties.append(
APIRequestBodyProperty(
name="body",
required=True,
type=schema.type,
default=schema.default,
description=schema.description,
properties=[],
references_used=references_used,
)
)
return api_request_body_properties
@classmethod
def from_request_body(
cls, request_body: RequestBody, spec: OpenAPISpec
) -> "APIRequestBody":
"""Instantiate from an OpenAPI RequestBody."""
properties = []
for media_type, media_type_obj in request_body.content.items():
if media_type not in _SUPPORTED_MEDIA_TYPES:
continue
api_request_body_properties = cls._process_supported_media_type(
media_type_obj,
spec,
)
properties.extend(api_request_body_properties)
return cls(
description=request_body.description,
properties=properties,
media_type=media_type,
)
class APIOperation(BaseModel):
"""A model for a single API operation."""
operation_id: str = Field(alias="operation_id")
"""The unique identifier of the operation."""
description: Optional[str] = Field(alias="description")
"""The description of the operation."""
base_url: str = Field(alias="base_url")
"""The base URL of the operation."""
path: str = Field(alias="path")
"""The path of the operation."""
method: HTTPVerb = Field(alias="method")
"""The HTTP method of the operation."""
properties: Sequence[APIProperty] = Field(alias="properties")
# TODO: Add parse in used components to be able to specify what type of
# referenced object it is.
# """The properties of the operation."""
# components: Dict[str, BaseModel] = Field(alias="components")
request_body: Optional[APIRequestBody] = Field(alias="request_body")
"""The request body of the operation."""
@staticmethod
def _get_properties_from_parameters(
parameters: List[Parameter], spec: OpenAPISpec
) -> List[APIProperty]:
"""Get the properties of the operation."""
properties = []
for param in parameters:
if APIProperty.is_supported_location(param.param_in):
properties.append(APIProperty.from_parameter(param, spec))
elif param.required:
raise ValueError(
INVALID_LOCATION_TEMPL.format(
location=param.param_in, name=param.name
)
)
else:
logger.warning(
INVALID_LOCATION_TEMPL.format(
location=param.param_in, name=param.name
)
+ " Ignoring optional parameter"
)
pass
return properties
@classmethod
def from_openapi_url(
cls,
spec_url: str,
path: str,
method: str,
) -> "APIOperation":
"""Create an APIOperation from an OpenAPI URL."""
spec = OpenAPISpec.from_url(spec_url)
return cls.from_openapi_spec(spec, path, method)
@classmethod
def from_openapi_spec(
cls,
spec: OpenAPISpec,
path: str,
method: str,
) -> "APIOperation":
"""Create an APIOperation from an OpenAPI spec."""
operation = spec.get_operation(path, method)
parameters = spec.get_parameters_for_operation(operation)
properties = cls._get_properties_from_parameters(parameters, spec)
operation_id = OpenAPISpec.get_cleaned_operation_id(operation, path, method)
request_body = spec.get_request_body_for_operation(operation)
api_request_body = (
APIRequestBody.from_request_body(request_body, spec)
if request_body is not None
else None
)
return cls(
operation_id=operation_id,
description=operation.description,
base_url=spec.base_url,
path=path,
method=method,
properties=properties,
request_body=api_request_body,
)
@staticmethod
def ts_type_from_python(type_: SCHEMA_TYPE) -> str:
if type_ is None:
# TODO: Handle Nones better. These often result when
# parsing specs that are < v3
return "any"
elif isinstance(type_, str):
return {
"str": "string",
"integer": "number",
"float": "number",
"date-time": "string",
}.get(type_, type_)
elif isinstance(type_, tuple):
return f"Array<{APIOperation.ts_type_from_python(type_[0])}>"
elif isinstance(type_, type) and issubclass(type_, Enum):
return " | ".join([f"'{e.value}'" for e in type_])
else:
return str(type_)
def _format_nested_properties(
self, properties: List[APIRequestBodyProperty], indent: int = 2
) -> str:
"""Format nested properties."""
formatted_props = []
for prop in properties:
prop_name = prop.name
prop_type = self.ts_type_from_python(prop.type)
prop_required = "" if prop.required else "?"
prop_desc = f"/* {prop.description} */" if prop.description else ""
if prop.properties:
nested_props = self._format_nested_properties(
prop.properties, indent + 2
)
prop_type = f"{{\n{nested_props}\n{' ' * indent}}}"
formatted_props.append(
f"{prop_desc}\n{' ' * indent}{prop_name}{prop_required}: {prop_type},"
)
return "\n".join(formatted_props)
def to_typescript(self) -> str:
"""Get typescript string representation of the operation."""
operation_name = self.operation_id
params = []
if self.request_body:
formatted_request_body_props = self._format_nested_properties(
self.request_body.properties
)
params.append(formatted_request_body_props)
for prop in self.properties:
prop_name = prop.name
prop_type = self.ts_type_from_python(prop.type)
prop_required = "" if prop.required else "?"
prop_desc = f"/* {prop.description} */" if prop.description else ""
params.append(f"{prop_desc}\n\t\t{prop_name}{prop_required}: {prop_type},")
formatted_params = "\n".join(params).strip()
description_str = f"/* {self.description} */" if self.description else ""
typescript_definition = f"""
{description_str}
type {operation_name} = (_: {{
{formatted_params}
}}) => any;
"""
return typescript_definition.strip()
@property
def query_params(self) -> List[str]:
return [
property.name
for property in self.properties
if property.location == APIPropertyLocation.QUERY
]
@property
def path_params(self) -> List[str]:
return [
property.name
for property in self.properties
if property.location == APIPropertyLocation.PATH
]
@property
def body_params(self) -> List[str]:
if self.request_body is None:
return []
return [prop.name for prop in self.request_body.properties]
| [] |
2024-01-10 | gojira/langchain | langchain~llms~manifest.py | """Wrapper around HazyResearch's Manifest library."""
from typing import Any, Dict, List, Mapping, Optional
from pydantic import Extra, root_validator
from langchain.llms.base import LLM
class ManifestWrapper(LLM):
"""Wrapper around HazyResearch's Manifest library."""
client: Any #: :meta private:
llm_kwargs: Optional[Dict] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that python package exists in environment."""
try:
from manifest import Manifest
if not isinstance(values["client"], Manifest):
raise ValueError
except ImportError:
raise ValueError(
"Could not import manifest python package. "
"Please it install it with `pip install manifest-ml`."
)
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
kwargs = self.llm_kwargs or {}
return {**self.client.client.get_model_params(), **kwargs}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "manifest"
def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
"""Call out to LLM through Manifest."""
if stop is not None and len(stop) != 1:
raise NotImplementedError(
f"Manifest currently only supports a single stop token, got {stop}"
)
kwargs = self.llm_kwargs or {}
if stop is not None:
kwargs["stop_token"] = stop
return self.client.run(prompt, **kwargs)
| [] |
2024-01-10 | gojira/langchain | langchain~llms~gooseai.py | """Wrapper around GooseAI API."""
import logging
from typing import Any, Dict, List, Mapping, Optional
from pydantic import Extra, Field, root_validator
from langchain.llms.base import LLM
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
class GooseAI(LLM):
"""Wrapper around OpenAI large language models.
To use, you should have the ``openai`` python package installed, and the
environment variable ``GOOSEAI_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain.llms import GooseAI
gooseai = GooseAI(model_name="gpt-neo-20b")
"""
client: Any
model_name: str = "gpt-neo-20b"
"""Model name to use"""
temperature: float = 0.7
"""What sampling temperature to use"""
max_tokens: int = 256
"""The maximum number of tokens to generate in the completion.
-1 returns as many tokens as possible given the prompt and
the models maximal context size."""
top_p: float = 1
"""Total probability mass of tokens to consider at each step."""
min_tokens: int = 1
"""The minimum number of tokens to generate in the completion."""
frequency_penalty: float = 0
"""Penalizes repeated tokens according to frequency."""
presence_penalty: float = 0
"""Penalizes repeated tokens."""
n: int = 1
"""How many completions to generate for each prompt."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not explicitly specified."""
logit_bias: Optional[Dict[str, float]] = Field(default_factory=dict)
"""Adjust the probability of specific tokens being generated."""
gooseai_api_key: Optional[str] = None
class Config:
"""Configuration for this pydantic config."""
extra = Extra.ignore
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.values()}
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
logger.warning(
f"""WARNING! {field_name} is not default parameter.
{field_name} was transfered to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values["model_kwargs"] = extra
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
gooseai_api_key = get_from_dict_or_env(
values, "gooseai_api_key", "GOOSEAI_API_KEY"
)
try:
import openai
openai.api_key = gooseai_api_key
openai.api_base = "https://api.goose.ai/v1"
values["client"] = openai.Completion
except ImportError:
raise ValueError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling GooseAI API."""
normal_params = {
"temperature": self.temperature,
"max_tokens": self.max_tokens,
"top_p": self.top_p,
"min_tokens": self.min_tokens,
"frequency_penalty": self.frequency_penalty,
"presence_penalty": self.presence_penalty,
"n": self.n,
"logit_bias": self.logit_bias,
}
return {**normal_params, **self.model_kwargs}
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {**{"model_name": self.model_name}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "gooseai"
def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
"""Call the GooseAI API."""
params = self._default_params
if stop is not None:
if "stop" in params:
raise ValueError("`stop` found in both the input and default params.")
params["stop"] = stop
response = self.client.create(engine=self.model_name, prompt=prompt, **params)
text = response.choices[0].text
return text
| [] |
2024-01-10 | gojira/langchain | tests~unit_tests~chains~test_sequential.py | """Test pipeline functionality."""
from typing import Dict, List
import pytest
from langchain.chains.base import Chain
from langchain.chains.sequential import SequentialChain, SimpleSequentialChain
from langchain.memory.simple import SimpleMemory
class FakeChain(Chain):
"""Fake Chain for testing purposes."""
input_variables: List[str]
output_variables: List[str]
@property
def input_keys(self) -> List[str]:
"""Input keys this chain returns."""
return self.input_variables
@property
def output_keys(self) -> List[str]:
"""Input keys this chain returns."""
return self.output_variables
def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
outputs = {}
for var in self.output_variables:
variables = [inputs[k] for k in self.input_variables]
outputs[var] = f"{' '.join(variables)}foo"
return outputs
def test_sequential_usage_single_inputs() -> None:
"""Test sequential on single input chains."""
chain_1 = FakeChain(input_variables=["foo"], output_variables=["bar"])
chain_2 = FakeChain(input_variables=["bar"], output_variables=["baz"])
chain = SequentialChain(chains=[chain_1, chain_2], input_variables=["foo"])
output = chain({"foo": "123"})
expected_output = {"baz": "123foofoo", "foo": "123"}
assert output == expected_output
def test_sequential_usage_multiple_inputs() -> None:
"""Test sequential on multiple input chains."""
chain_1 = FakeChain(input_variables=["foo", "test"], output_variables=["bar"])
chain_2 = FakeChain(input_variables=["bar", "foo"], output_variables=["baz"])
chain = SequentialChain(chains=[chain_1, chain_2], input_variables=["foo", "test"])
output = chain({"foo": "123", "test": "456"})
expected_output = {
"baz": "123 456foo 123foo",
"foo": "123",
"test": "456",
}
assert output == expected_output
def test_sequential_usage_memory() -> None:
"""Test sequential usage with memory."""
memory = SimpleMemory(memories={"zab": "rab"})
chain_1 = FakeChain(input_variables=["foo"], output_variables=["bar"])
chain_2 = FakeChain(input_variables=["bar"], output_variables=["baz"])
chain = SequentialChain(
memory=memory, chains=[chain_1, chain_2], input_variables=["foo"]
)
output = chain({"foo": "123"})
expected_output = {"baz": "123foofoo", "foo": "123", "zab": "rab"}
assert output == expected_output
memory = SimpleMemory(memories={"zab": "rab", "foo": "rab"})
chain_1 = FakeChain(input_variables=["foo"], output_variables=["bar"])
chain_2 = FakeChain(input_variables=["bar"], output_variables=["baz"])
with pytest.raises(ValueError):
SequentialChain(
memory=memory, chains=[chain_1, chain_2], input_variables=["foo"]
)
def test_sequential_usage_multiple_outputs() -> None:
"""Test sequential usage on multiple output chains."""
chain_1 = FakeChain(input_variables=["foo"], output_variables=["bar", "test"])
chain_2 = FakeChain(input_variables=["bar", "foo"], output_variables=["baz"])
chain = SequentialChain(chains=[chain_1, chain_2], input_variables=["foo"])
output = chain({"foo": "123"})
expected_output = {
"baz": "123foo 123foo",
"foo": "123",
}
assert output == expected_output
def test_sequential_missing_inputs() -> None:
"""Test error is raised when input variables are missing."""
chain_1 = FakeChain(input_variables=["foo"], output_variables=["bar"])
chain_2 = FakeChain(input_variables=["bar", "test"], output_variables=["baz"])
with pytest.raises(ValueError):
# Also needs "test" as an input
SequentialChain(chains=[chain_1, chain_2], input_variables=["foo"])
def test_sequential_bad_outputs() -> None:
"""Test error is raised when bad outputs are specified."""
chain_1 = FakeChain(input_variables=["foo"], output_variables=["bar"])
chain_2 = FakeChain(input_variables=["bar"], output_variables=["baz"])
with pytest.raises(ValueError):
# "test" is not present as an output variable.
SequentialChain(
chains=[chain_1, chain_2],
input_variables=["foo"],
output_variables=["test"],
)
def test_sequential_valid_outputs() -> None:
"""Test chain runs when valid outputs are specified."""
chain_1 = FakeChain(input_variables=["foo"], output_variables=["bar"])
chain_2 = FakeChain(input_variables=["bar"], output_variables=["baz"])
chain = SequentialChain(
chains=[chain_1, chain_2],
input_variables=["foo"],
output_variables=["bar", "baz"],
)
output = chain({"foo": "123"}, return_only_outputs=True)
expected_output = {"baz": "123foofoo", "bar": "123foo"}
assert output == expected_output
def test_sequential_overlapping_inputs() -> None:
"""Test error is raised when input variables are overlapping."""
chain_1 = FakeChain(input_variables=["foo"], output_variables=["bar", "test"])
chain_2 = FakeChain(input_variables=["bar"], output_variables=["baz"])
with pytest.raises(ValueError):
# "test" is specified as an input, but also is an output of one step
SequentialChain(chains=[chain_1, chain_2], input_variables=["foo", "test"])
def test_simple_sequential_functionality() -> None:
"""Test simple sequential functionality."""
chain_1 = FakeChain(input_variables=["foo"], output_variables=["bar"])
chain_2 = FakeChain(input_variables=["bar"], output_variables=["baz"])
chain = SimpleSequentialChain(chains=[chain_1, chain_2])
output = chain({"input": "123"})
expected_output = {"output": "123foofoo", "input": "123"}
assert output == expected_output
def test_multi_input_errors() -> None:
"""Test simple sequential errors if multiple input variables are expected."""
chain_1 = FakeChain(input_variables=["foo"], output_variables=["bar"])
chain_2 = FakeChain(input_variables=["bar", "foo"], output_variables=["baz"])
with pytest.raises(ValueError):
SimpleSequentialChain(chains=[chain_1, chain_2])
def test_multi_output_errors() -> None:
"""Test simple sequential errors if multiple output variables are expected."""
chain_1 = FakeChain(input_variables=["foo"], output_variables=["bar", "grok"])
chain_2 = FakeChain(input_variables=["bar"], output_variables=["baz"])
with pytest.raises(ValueError):
SimpleSequentialChain(chains=[chain_1, chain_2])
| [] |
2024-01-10 | gojira/langchain | langchain~llms~sagemaker_endpoint.py | """Wrapper around Sagemaker InvokeEndpoint API."""
from abc import ABC, abstractmethod
from typing import Any, Dict, List, Mapping, Optional, Union
from pydantic import Extra, root_validator
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
class ContentHandlerBase(ABC):
"""A handler class to transform input from LLM to a
format that SageMaker endpoint expects. Similarily,
the class also handles transforming output from the
SageMaker endpoint to a format that LLM class expects.
"""
"""
Example:
.. code-block:: python
class ContentHandler(ContentHandlerBase):
content_type = "application/json"
accepts = "application/json"
def transform_input(self, prompt: str, model_kwargs: Dict) -> bytes:
input_str = json.dumps({prompt: prompt, **model_kwargs})
return input_str.encode('utf-8')
def transform_output(self, output: bytes) -> str:
response_json = json.loads(output.read().decode("utf-8"))
return response_json[0]["generated_text"]
"""
content_type: Optional[str] = "text/plain"
"""The MIME type of the input data passed to endpoint"""
accepts: Optional[str] = "text/plain"
"""The MIME type of the response data returned from endpoint"""
@abstractmethod
def transform_input(
self, prompt: Union[str, List[str]], model_kwargs: Dict
) -> bytes:
"""Transforms the input to a format that model can accept
as the request Body. Should return bytes or seekable file
like object in the format specified in the content_type
request header.
"""
@abstractmethod
def transform_output(self, output: bytes) -> Any:
"""Transforms the output from the model to string that
the LLM class expects.
"""
class SagemakerEndpoint(LLM):
"""Wrapper around custom Sagemaker Inference Endpoints.
To use, you must supply the endpoint name from your deployed
Sagemaker model & the region where it is deployed.
To authenticate, the AWS client uses the following methods to
automatically load credentials:
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
If a specific credential profile should be used, you must pass
the name of the profile from the ~/.aws/credentials file that is to be used.
Make sure the credentials / roles used have the required policies to
access the Sagemaker endpoint.
See: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html
"""
"""
Example:
.. code-block:: python
from langchain import SagemakerEndpoint
endpoint_name = (
"my-endpoint-name"
)
region_name = (
"us-west-2"
)
credentials_profile_name = (
"default"
)
se = SagemakerEndpoint(
endpoint_name=endpoint_name,
region_name=region_name,
credentials_profile_name=credentials_profile_name
)
"""
client: Any #: :meta private:
endpoint_name: str = ""
"""The name of the endpoint from the deployed Sagemaker model.
Must be unique within an AWS Region."""
region_name: str = ""
"""The aws region where the Sagemaker model is deployed, eg. `us-west-2`."""
credentials_profile_name: Optional[str] = None
"""The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which
has either access keys or role information specified.
If not specified, the default credential profile or, if on an EC2 instance,
credentials from IMDS will be used.
See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
"""
content_handler: ContentHandlerBase
"""The content handler class that provides an input and
output transform functions to handle formats between LLM
and the endpoint.
"""
"""
Example:
.. code-block:: python
class ContentHandler(ContentHandlerBase):
content_type = "application/json"
accepts = "application/json"
def transform_input(self, prompt: str, model_kwargs: Dict) -> bytes:
input_str = json.dumps({prompt: prompt, **model_kwargs})
return input_str.encode('utf-8')
def transform_output(self, output: bytes) -> str:
response_json = json.loads(output.read().decode("utf-8"))
return response_json[0]["generated_text"]
"""
model_kwargs: Optional[Dict] = None
"""Key word arguments to pass to the model."""
endpoint_kwargs: Optional[Dict] = None
"""Optional attributes passed to the invoke_endpoint
function. See `boto3`_. docs for more info.
.. _boto3: <https://boto3.amazonaws.com/v1/documentation/api/latest/index.html>
"""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that AWS credentials to and python package exists in environment."""
try:
import boto3
try:
if values["credentials_profile_name"] is not None:
session = boto3.Session(
profile_name=values["credentials_profile_name"]
)
else:
# use default credentials
session = boto3.Session()
values["client"] = session.client(
"sagemaker-runtime", region_name=values["region_name"]
)
except Exception as e:
raise ValueError(
"Could not load credentials to authenticate with AWS client. "
"Please check that credentials in the specified "
"profile name are valid."
) from e
except ImportError:
raise ValueError(
"Could not import boto3 python package. "
"Please it install it with `pip install boto3`."
)
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"endpoint_name": self.endpoint_name},
**{"model_kwargs": _model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "sagemaker_endpoint"
def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
"""Call out to Sagemaker inference endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = se("Tell me a joke.")
"""
_model_kwargs = self.model_kwargs or {}
_endpoint_kwargs = self.endpoint_kwargs or {}
body = self.content_handler.transform_input(prompt, _model_kwargs)
content_type = self.content_handler.content_type
accepts = self.content_handler.accepts
# send request
try:
response = self.client.invoke_endpoint(
EndpointName=self.endpoint_name,
Body=body,
ContentType=content_type,
Accept=accepts,
**_endpoint_kwargs,
)
except Exception as e:
raise ValueError(f"Error raised by inference endpoint: {e}")
text = self.content_handler.transform_output(response["Body"])
if stop is not None:
# This is a bit hacky, but I can't figure out a better way to enforce
# stop tokens when making calls to the sagemaker endpoint.
text = enforce_stop_tokens(text, stop)
return text
| [] |
2024-01-10 | gojira/langchain | langchain~document_loaders~url.py | """Loader that uses unstructured to load HTML files."""
import logging
from typing import Any, List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
logger = logging.getLogger(__file__)
class UnstructuredURLLoader(BaseLoader):
"""Loader that uses unstructured to load HTML files."""
def __init__(
self,
urls: List[str],
continue_on_failure: bool = True,
headers: dict = {},
**unstructured_kwargs: Any,
):
"""Initialize with file path."""
try:
import unstructured # noqa:F401
from unstructured.__version__ import __version__ as __unstructured_version__
self.__version = __unstructured_version__
except ImportError:
raise ValueError(
"unstructured package not found, please install it with "
"`pip install unstructured`"
)
if not self.__is_headers_available() and len(headers.keys()) != 0:
logger.warning(
"You are using old version of unstructured. "
"The headers parameter is ignored"
)
self.urls = urls
self.continue_on_failure = continue_on_failure
self.headers = headers
self.unstructured_kwargs = unstructured_kwargs
def __is_headers_available(self) -> bool:
_unstructured_version = self.__version.split("-")[0]
unstructured_version = tuple([int(x) for x in _unstructured_version.split(".")])
return unstructured_version >= (0, 5, 7)
def load(self) -> List[Document]:
"""Load file."""
from unstructured.partition.html import partition_html
docs: List[Document] = list()
for url in self.urls:
try:
if self.__is_headers_available():
elements = partition_html(
url=url, headers=self.headers, **self.unstructured_kwargs
)
else:
elements = partition_html(url=url, **self.unstructured_kwargs)
except Exception as e:
if self.continue_on_failure:
logger.error(f"Error fetching or processing {url}, exeption: {e}")
else:
raise e
text = "\n\n".join([str(el) for el in elements])
metadata = {"source": url}
docs.append(Document(page_content=text, metadata=metadata))
return docs
| [] |
2024-01-10 | gojira/langchain | langchain~agents~agent_toolkits~openapi~planner.py | """Agent that interacts with OpenAPI APIs via a hierarchical planning approach."""
import json
import re
from typing import List, Optional
import yaml
from langchain.agents.agent import AgentExecutor
from langchain.agents.agent_toolkits.openapi.planner_prompt import (
API_CONTROLLER_PROMPT,
API_CONTROLLER_TOOL_DESCRIPTION,
API_CONTROLLER_TOOL_NAME,
API_ORCHESTRATOR_PROMPT,
API_PLANNER_PROMPT,
API_PLANNER_TOOL_DESCRIPTION,
API_PLANNER_TOOL_NAME,
PARSING_GET_PROMPT,
PARSING_POST_PROMPT,
REQUESTS_GET_TOOL_DESCRIPTION,
REQUESTS_POST_TOOL_DESCRIPTION,
)
from langchain.agents.agent_toolkits.openapi.spec import ReducedOpenAPISpec
from langchain.agents.mrkl.base import ZeroShotAgent
from langchain.agents.tools import Tool
from langchain.chains.llm import LLMChain
from langchain.llms.openai import OpenAI
from langchain.prompts import PromptTemplate
from langchain.requests import RequestsWrapper
from langchain.schema import BaseLanguageModel
from langchain.tools.base import BaseTool
from langchain.tools.requests.tool import BaseRequestsTool
#
# Requests tools with LLM-instructed extraction of truncated responses.
#
# Of course, truncating so bluntly may lose a lot of valuable
# information in the response.
# However, the goal for now is to have only a single inference step.
MAX_RESPONSE_LENGTH = 5000
class RequestsGetToolWithParsing(BaseRequestsTool, BaseTool):
name = "requests_get"
description = REQUESTS_GET_TOOL_DESCRIPTION
response_length: Optional[int] = MAX_RESPONSE_LENGTH
llm_chain = LLMChain(
llm=OpenAI(),
prompt=PARSING_GET_PROMPT,
)
def _run(self, text: str) -> str:
try:
data = json.loads(text)
except json.JSONDecodeError as e:
raise e
response = self.requests_wrapper.get(data["url"])
response = response[: self.response_length]
return self.llm_chain.predict(
response=response, instructions=data["output_instructions"]
).strip()
async def _arun(self, text: str) -> str:
raise NotImplementedError()
class RequestsPostToolWithParsing(BaseRequestsTool, BaseTool):
name = "requests_post"
description = REQUESTS_POST_TOOL_DESCRIPTION
response_length: Optional[int] = MAX_RESPONSE_LENGTH
llm_chain = LLMChain(
llm=OpenAI(),
prompt=PARSING_POST_PROMPT,
)
def _run(self, text: str) -> str:
try:
data = json.loads(text)
except json.JSONDecodeError as e:
raise e
response = self.requests_wrapper.post(data["url"], data["data"])
response = response[: self.response_length]
return self.llm_chain.predict(
response=response, instructions=data["output_instructions"]
).strip()
async def _arun(self, text: str) -> str:
raise NotImplementedError()
#
# Orchestrator, planner, controller.
#
def _create_api_planner_tool(
api_spec: ReducedOpenAPISpec, llm: BaseLanguageModel
) -> Tool:
endpoint_descriptions = [
f"{name} {description}" for name, description, _ in api_spec.endpoints
]
prompt = PromptTemplate(
template=API_PLANNER_PROMPT,
input_variables=["query"],
partial_variables={"endpoints": "- " + "- ".join(endpoint_descriptions)},
)
chain = LLMChain(llm=llm, prompt=prompt)
tool = Tool(
name=API_PLANNER_TOOL_NAME,
description=API_PLANNER_TOOL_DESCRIPTION,
func=chain.run,
)
return tool
def _create_api_controller_agent(
api_url: str,
api_docs: str,
requests_wrapper: RequestsWrapper,
llm: BaseLanguageModel,
) -> AgentExecutor:
tools: List[BaseTool] = [
RequestsGetToolWithParsing(requests_wrapper=requests_wrapper),
RequestsPostToolWithParsing(requests_wrapper=requests_wrapper),
]
prompt = PromptTemplate(
template=API_CONTROLLER_PROMPT,
input_variables=["input", "agent_scratchpad"],
partial_variables={
"api_url": api_url,
"api_docs": api_docs,
"tool_names": ", ".join([tool.name for tool in tools]),
"tool_descriptions": "\n".join(
[f"{tool.name}: {tool.description}" for tool in tools]
),
},
)
agent = ZeroShotAgent(
llm_chain=LLMChain(llm=llm, prompt=prompt),
allowed_tools=[tool.name for tool in tools],
)
return AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True)
def _create_api_controller_tool(
api_spec: ReducedOpenAPISpec,
requests_wrapper: RequestsWrapper,
llm: BaseLanguageModel,
) -> Tool:
"""Expose controller as a tool.
The tool is invoked with a plan from the planner, and dynamically
creates a controller agent with relevant documentation only to
constrain the context.
"""
base_url = api_spec.servers[0]["url"] # TODO: do better.
def _create_and_run_api_controller_agent(plan_str: str) -> str:
pattern = r"\b(GET|POST)\s+(/\S+)*"
matches = re.findall(pattern, plan_str)
endpoint_names = [
"{method} {route}".format(method=method, route=route.split("?")[0])
for method, route in matches
]
endpoint_docs_by_name = {name: docs for name, _, docs in api_spec.endpoints}
docs_str = ""
for endpoint_name in endpoint_names:
docs = endpoint_docs_by_name.get(endpoint_name)
if not docs:
raise ValueError(f"{endpoint_name} endpoint does not exist.")
docs_str += f"== Docs for {endpoint_name} == \n{yaml.dump(docs)}\n"
agent = _create_api_controller_agent(base_url, docs_str, requests_wrapper, llm)
return agent.run(plan_str)
return Tool(
name=API_CONTROLLER_TOOL_NAME,
func=_create_and_run_api_controller_agent,
description=API_CONTROLLER_TOOL_DESCRIPTION,
)
def create_openapi_agent(
api_spec: ReducedOpenAPISpec,
requests_wrapper: RequestsWrapper,
llm: BaseLanguageModel,
) -> AgentExecutor:
"""Instantiate API planner and controller for a given spec.
Inject credentials via requests_wrapper.
We use a top-level "orchestrator" agent to invoke the planner and controller,
rather than a top-level planner
that invokes a controller with its plan. This is to keep the planner simple.
"""
tools = [
_create_api_planner_tool(api_spec, llm),
_create_api_controller_tool(api_spec, requests_wrapper, llm),
]
prompt = PromptTemplate(
template=API_ORCHESTRATOR_PROMPT,
input_variables=["input", "agent_scratchpad"],
partial_variables={
"tool_names": ", ".join([tool.name for tool in tools]),
"tool_descriptions": "\n".join(
[f"{tool.name}: {tool.description}" for tool in tools]
),
},
)
agent = ZeroShotAgent(
llm_chain=LLMChain(llm=llm, prompt=prompt),
allowed_tools=[tool.name for tool in tools],
)
return AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True)
| [
"tool_descriptions",
"\n",
"tool_names",
"agent_scratchpad",
"- ",
"input",
", ",
"endpoints"
] |
2024-01-10 | gojira/langchain | langchain~prompts~example_selector~semantic_similarity.py | """Example selector that selects examples based on SemanticSimilarity."""
from __future__ import annotations
from typing import Any, Dict, List, Optional, Type
from pydantic import BaseModel, Extra
from langchain.embeddings.base import Embeddings
from langchain.prompts.example_selector.base import BaseExampleSelector
from langchain.vectorstores.base import VectorStore
def sorted_values(values: Dict[str, str]) -> List[Any]:
"""Return a list of values in dict sorted by key."""
return [values[val] for val in sorted(values)]
class SemanticSimilarityExampleSelector(BaseExampleSelector, BaseModel):
"""Example selector that selects examples based on SemanticSimilarity."""
vectorstore: VectorStore
"""VectorStore than contains information about examples."""
k: int = 4
"""Number of examples to select."""
example_keys: Optional[List[str]] = None
"""Optional keys to filter examples to."""
input_keys: Optional[List[str]] = None
"""Optional keys to filter input to. If provided, the search is based on
the input variables instead of all variables."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
def add_example(self, example: Dict[str, str]) -> str:
"""Add new example to vectorstore."""
if self.input_keys:
string_example = " ".join(
sorted_values({key: example[key] for key in self.input_keys})
)
else:
string_example = " ".join(sorted_values(example))
ids = self.vectorstore.add_texts([string_example], metadatas=[example])
return ids[0]
def select_examples(self, input_variables: Dict[str, str]) -> List[dict]:
"""Select which examples to use based on semantic similarity."""
# Get the docs with the highest similarity.
if self.input_keys:
input_variables = {key: input_variables[key] for key in self.input_keys}
query = " ".join(sorted_values(input_variables))
example_docs = self.vectorstore.similarity_search(query, k=self.k)
# Get the examples from the metadata.
# This assumes that examples are stored in metadata.
examples = [dict(e.metadata) for e in example_docs]
# If example keys are provided, filter examples to those keys.
if self.example_keys:
examples = [{k: eg[k] for k in self.example_keys} for eg in examples]
return examples
@classmethod
def from_examples(
cls,
examples: List[dict],
embeddings: Embeddings,
vectorstore_cls: Type[VectorStore],
k: int = 4,
input_keys: Optional[List[str]] = None,
**vectorstore_cls_kwargs: Any,
) -> SemanticSimilarityExampleSelector:
"""Create k-shot example selector using example list and embeddings.
Reshuffles examples dynamically based on query similarity.
Args:
examples: List of examples to use in the prompt.
embeddings: An iniialized embedding API interface, e.g. OpenAIEmbeddings().
vectorstore_cls: A vector store DB interface class, e.g. FAISS.
k: Number of examples to select
input_keys: If provided, the search is based on the input variables
instead of all variables.
vectorstore_cls_kwargs: optional kwargs containing url for vector store
Returns:
The ExampleSelector instantiated, backed by a vector store.
"""
if input_keys:
string_examples = [
" ".join(sorted_values({k: eg[k] for k in input_keys}))
for eg in examples
]
else:
string_examples = [" ".join(sorted_values(eg)) for eg in examples]
vectorstore = vectorstore_cls.from_texts(
string_examples, embeddings, metadatas=examples, **vectorstore_cls_kwargs
)
return cls(vectorstore=vectorstore, k=k, input_keys=input_keys)
class MaxMarginalRelevanceExampleSelector(SemanticSimilarityExampleSelector):
"""ExampleSelector that selects examples based on Max Marginal Relevance.
This was shown to improve performance in this paper:
https://arxiv.org/pdf/2211.13892.pdf
"""
fetch_k: int = 20
"""Number of examples to fetch to rerank."""
def select_examples(self, input_variables: Dict[str, str]) -> List[dict]:
"""Select which examples to use based on semantic similarity."""
# Get the docs with the highest similarity.
if self.input_keys:
input_variables = {key: input_variables[key] for key in self.input_keys}
query = " ".join(sorted_values(input_variables))
example_docs = self.vectorstore.max_marginal_relevance_search(
query, k=self.k, fetch_k=self.fetch_k
)
# Get the examples from the metadata.
# This assumes that examples are stored in metadata.
examples = [dict(e.metadata) for e in example_docs]
# If example keys are provided, filter examples to those keys.
if self.example_keys:
examples = [{k: eg[k] for k in self.example_keys} for eg in examples]
return examples
@classmethod
def from_examples(
cls,
examples: List[dict],
embeddings: Embeddings,
vectorstore_cls: Type[VectorStore],
k: int = 4,
input_keys: Optional[List[str]] = None,
fetch_k: int = 20,
**vectorstore_cls_kwargs: Any,
) -> MaxMarginalRelevanceExampleSelector:
"""Create k-shot example selector using example list and embeddings.
Reshuffles examples dynamically based on query similarity.
Args:
examples: List of examples to use in the prompt.
embeddings: An iniialized embedding API interface, e.g. OpenAIEmbeddings().
vectorstore_cls: A vector store DB interface class, e.g. FAISS.
k: Number of examples to select
input_keys: If provided, the search is based on the input variables
instead of all variables.
vectorstore_cls_kwargs: optional kwargs containing url for vector store
Returns:
The ExampleSelector instantiated, backed by a vector store.
"""
if input_keys:
string_examples = [
" ".join(sorted_values({k: eg[k] for k in input_keys}))
for eg in examples
]
else:
string_examples = [" ".join(sorted_values(eg)) for eg in examples]
vectorstore = vectorstore_cls.from_texts(
string_examples, embeddings, metadatas=examples, **vectorstore_cls_kwargs
)
return cls(vectorstore=vectorstore, k=k, fetch_k=fetch_k, input_keys=input_keys)
| [] |
2024-01-10 | gojira/langchain | tests~unit_tests~agents~test_react.py | """Unit tests for ReAct."""
from typing import Any, List, Mapping, Optional, Union
from langchain.agents.react.base import ReActChain, ReActDocstoreAgent
from langchain.agents.tools import Tool
from langchain.docstore.base import Docstore
from langchain.docstore.document import Document
from langchain.llms.base import LLM
from langchain.prompts.prompt import PromptTemplate
from langchain.schema import AgentAction
_PAGE_CONTENT = """This is a page about LangChain.
It is a really cool framework.
What isn't there to love about langchain?
Made in 2022."""
_FAKE_PROMPT = PromptTemplate(input_variables=["input"], template="{input}")
class FakeListLLM(LLM):
"""Fake LLM for testing that outputs elements of a list."""
responses: List[str]
i: int = -1
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "fake_list"
def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
"""Increment counter, and then return response in that index."""
self.i += 1
return self.responses[self.i]
@property
def _identifying_params(self) -> Mapping[str, Any]:
return {}
class FakeDocstore(Docstore):
"""Fake docstore for testing purposes."""
def search(self, search: str) -> Union[str, Document]:
"""Return the fake document."""
document = Document(page_content=_PAGE_CONTENT)
return document
def test_predict_until_observation_normal() -> None:
"""Test predict_until_observation when observation is made normally."""
outputs = ["foo\nAction: Search[foo]"]
fake_llm = FakeListLLM(responses=outputs)
tools = [
Tool(name="Search", func=lambda x: x, description="foo"),
Tool(name="Lookup", func=lambda x: x, description="bar"),
]
agent = ReActDocstoreAgent.from_llm_and_tools(fake_llm, tools)
output = agent.plan([], input="")
expected_output = AgentAction("Search", "foo", outputs[0])
assert output == expected_output
def test_predict_until_observation_repeat() -> None:
"""Test when no action is generated initially."""
outputs = ["foo", " Search[foo]"]
fake_llm = FakeListLLM(responses=outputs)
tools = [
Tool(name="Search", func=lambda x: x, description="foo"),
Tool(name="Lookup", func=lambda x: x, description="bar"),
]
agent = ReActDocstoreAgent.from_llm_and_tools(fake_llm, tools)
output = agent.plan([], input="")
expected_output = AgentAction("Search", "foo", "foo\nAction: Search[foo]")
assert output == expected_output
def test_react_chain() -> None:
"""Test react chain."""
responses = [
"I should probably search\nAction: Search[langchain]",
"I should probably lookup\nAction: Lookup[made]",
"Ah okay now I know the answer\nAction: Finish[2022]",
]
fake_llm = FakeListLLM(responses=responses)
react_chain = ReActChain(llm=fake_llm, docstore=FakeDocstore())
output = react_chain.run("when was langchain made")
assert output == "2022"
def test_react_chain_bad_action() -> None:
"""Test react chain when bad action given."""
bad_action_name = "BadAction"
responses = [
f"I'm turning evil\nAction: {bad_action_name}[langchain]",
"Oh well\nAction: Finish[curses foiled again]",
]
fake_llm = FakeListLLM(responses=responses)
react_chain = ReActChain(llm=fake_llm, docstore=FakeDocstore())
output = react_chain.run("when was langchain made")
assert output == "curses foiled again"
| [
"input",
"{input}"
] |
2024-01-10 | gojira/langchain | langchain~llms~anthropic.py | """Wrapper around Anthropic APIs."""
import re
from typing import Any, Dict, Generator, List, Mapping, Optional
from pydantic import Extra, root_validator
from langchain.llms.base import LLM
from langchain.utils import get_from_dict_or_env
class Anthropic(LLM):
r"""Wrapper around Anthropic large language models.
To use, you should have the ``anthropic`` python package installed, and the
environment variable ``ANTHROPIC_API_KEY`` set with your API key, or pass
it as a named parameter to the constructor.
Example:
.. code-block:: python
import anthropic
from langchain.llms import Anthropic
model = Anthropic(model="<model_name>", anthropic_api_key="my-api-key")
# Simplest invocation, automatically wrapped with HUMAN_PROMPT
# and AI_PROMPT.
response = model("What are the biggest risks facing humanity?")
# Or if you want to use the chat mode, build a few-shot-prompt, or
# put words in the Assistant's mouth, use HUMAN_PROMPT and AI_PROMPT:
raw_prompt = "What are the biggest risks facing humanity?"
prompt = f"{anthropic.HUMAN_PROMPT} {prompt}{anthropic.AI_PROMPT}"
response = model(prompt)
"""
client: Any #: :meta private:
model: str = "claude-v1"
"""Model name to use."""
max_tokens_to_sample: int = 256
"""Denotes the number of tokens to predict per generation."""
temperature: float = 1.0
"""A non-negative float that tunes the degree of randomness in generation."""
top_k: int = 0
"""Number of most likely tokens to consider at each step."""
top_p: float = 1
"""Total probability mass of tokens to consider at each step."""
streaming: bool = False
"""Whether to stream the results."""
anthropic_api_key: Optional[str] = None
HUMAN_PROMPT: Optional[str] = None
AI_PROMPT: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
anthropic_api_key = get_from_dict_or_env(
values, "anthropic_api_key", "ANTHROPIC_API_KEY"
)
try:
import anthropic
values["client"] = anthropic.Client(anthropic_api_key)
values["HUMAN_PROMPT"] = anthropic.HUMAN_PROMPT
values["AI_PROMPT"] = anthropic.AI_PROMPT
except ImportError:
raise ValueError(
"Could not import anthropic python package. "
"Please it install it with `pip install anthropic`."
)
return values
@property
def _default_params(self) -> Mapping[str, Any]:
"""Get the default parameters for calling Anthropic API."""
return {
"max_tokens_to_sample": self.max_tokens_to_sample,
"temperature": self.temperature,
"top_k": self.top_k,
"top_p": self.top_p,
}
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {**{"model": self.model}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "anthropic"
def _wrap_prompt(self, prompt: str) -> str:
if not self.HUMAN_PROMPT or not self.AI_PROMPT:
raise NameError("Please ensure the anthropic package is loaded")
if prompt.startswith(self.HUMAN_PROMPT):
return prompt # Already wrapped.
# Guard against common errors in specifying wrong number of newlines.
corrected_prompt, n_subs = re.subn(r"^\n*Human:", self.HUMAN_PROMPT, prompt)
if n_subs == 1:
return corrected_prompt
# As a last resort, wrap the prompt ourselves to emulate instruct-style.
return f"{self.HUMAN_PROMPT} {prompt}{self.AI_PROMPT} Sure, here you go:\n"
def _get_anthropic_stop(self, stop: Optional[List[str]] = None) -> List[str]:
if not self.HUMAN_PROMPT or not self.AI_PROMPT:
raise NameError("Please ensure the anthropic package is loaded")
if stop is None:
stop = []
# Never want model to invent new turns of Human / Assistant dialog.
stop.extend([self.HUMAN_PROMPT, self.AI_PROMPT])
return stop
def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
r"""Call out to Anthropic's completion endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
prompt = "What are the biggest risks facing humanity?"
prompt = f"\n\nHuman: {prompt}\n\nAssistant:"
response = model(prompt)
"""
stop = self._get_anthropic_stop(stop)
if self.streaming:
stream_resp = self.client.completion_stream(
model=self.model,
prompt=self._wrap_prompt(prompt),
stop_sequences=stop,
stream=True,
**self._default_params,
)
current_completion = ""
for data in stream_resp:
delta = data["completion"][len(current_completion) :]
current_completion = data["completion"]
self.callback_manager.on_llm_new_token(
delta, verbose=self.verbose, **data
)
return current_completion
response = self.client.completion(
model=self.model,
prompt=self._wrap_prompt(prompt),
stop_sequences=stop,
**self._default_params,
)
return response["completion"]
async def _acall(self, prompt: str, stop: Optional[List[str]] = None) -> str:
"""Call out to Anthropic's completion endpoint asynchronously."""
stop = self._get_anthropic_stop(stop)
if self.streaming:
stream_resp = await self.client.acompletion_stream(
model=self.model,
prompt=self._wrap_prompt(prompt),
stop_sequences=stop,
stream=True,
**self._default_params,
)
current_completion = ""
async for data in stream_resp:
delta = data["completion"][len(current_completion) :]
current_completion = data["completion"]
if self.callback_manager.is_async:
await self.callback_manager.on_llm_new_token(
delta, verbose=self.verbose, **data
)
else:
self.callback_manager.on_llm_new_token(
delta, verbose=self.verbose, **data
)
return current_completion
response = await self.client.acompletion(
model=self.model,
prompt=self._wrap_prompt(prompt),
stop_sequences=stop,
**self._default_params,
)
return response["completion"]
def stream(self, prompt: str, stop: Optional[List[str]] = None) -> Generator:
r"""Call Anthropic completion_stream and return the resulting generator.
BETA: this is a beta feature while we figure out the right abstraction.
Once that happens, this interface could change.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
A generator representing the stream of tokens from Anthropic.
Example:
.. code-block:: python
prompt = "Write a poem about a stream."
prompt = f"\n\nHuman: {prompt}\n\nAssistant:"
generator = anthropic.stream(prompt)
for token in generator:
yield token
"""
stop = self._get_anthropic_stop(stop)
return self.client.completion_stream(
model=self.model,
prompt=self._wrap_prompt(prompt),
stop_sequences=stop,
**self._default_params,
)
| [
"None"
] |
2024-01-10 | gojira/langchain | langchain~llms~aleph_alpha.py | """Wrapper around Aleph Alpha APIs."""
from typing import Any, Dict, List, Optional, Sequence
from pydantic import Extra, root_validator
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.utils import get_from_dict_or_env
class AlephAlpha(LLM):
"""Wrapper around Aleph Alpha large language models.
To use, you should have the ``aleph_alpha_client`` python package installed, and the
environment variable ``ALEPH_ALPHA_API_KEY`` set with your API key, or pass
it as a named parameter to the constructor.
Parameters are explained more in depth here:
https://github.com/Aleph-Alpha/aleph-alpha-client/blob/c14b7dd2b4325c7da0d6a119f6e76385800e097b/aleph_alpha_client/completion.py#L10
Example:
.. code-block:: python
from langchain.llms import AlephAlpha
alpeh_alpha = AlephAlpha(aleph_alpha_api_key="my-api-key")
"""
client: Any #: :meta private:
model: Optional[str] = "luminous-base"
"""Model name to use."""
maximum_tokens: int = 64
"""The maximum number of tokens to be generated."""
temperature: float = 0.0
"""A non-negative float that tunes the degree of randomness in generation."""
top_k: int = 0
"""Number of most likely tokens to consider at each step."""
top_p: float = 0.0
"""Total probability mass of tokens to consider at each step."""
presence_penalty: float = 0.0
"""Penalizes repeated tokens."""
frequency_penalty: float = 0.0
"""Penalizes repeated tokens according to frequency."""
repetition_penalties_include_prompt: Optional[bool] = False
"""Flag deciding whether presence penalty or frequency penalty are
updated from the prompt."""
use_multiplicative_presence_penalty: Optional[bool] = False
"""Flag deciding whether presence penalty is applied
multiplicatively (True) or additively (False)."""
penalty_bias: Optional[str] = None
"""Penalty bias for the completion."""
penalty_exceptions: Optional[List[str]] = None
"""List of strings that may be generated without penalty,
regardless of other penalty settings"""
penalty_exceptions_include_stop_sequences: Optional[bool] = None
"""Should stop_sequences be included in penalty_exceptions."""
best_of: Optional[int] = None
"""returns the one with the "best of" results
(highest log probability per token)
"""
n: int = 1
"""How many completions to generate for each prompt."""
logit_bias: Optional[Dict[int, float]] = None
"""The logit bias allows to influence the likelihood of generating tokens."""
log_probs: Optional[int] = None
"""Number of top log probabilities to be returned for each generated token."""
tokens: Optional[bool] = False
"""return tokens of completion."""
disable_optimizations: Optional[bool] = False
minimum_tokens: Optional[int] = 0
"""Generate at least this number of tokens."""
echo: bool = False
"""Echo the prompt in the completion."""
use_multiplicative_frequency_penalty: bool = False
sequence_penalty: float = 0.0
sequence_penalty_min_length: int = 2
use_multiplicative_sequence_penalty: bool = False
completion_bias_inclusion: Optional[Sequence[str]] = None
completion_bias_inclusion_first_token_only: bool = False
completion_bias_exclusion: Optional[Sequence[str]] = None
completion_bias_exclusion_first_token_only: bool = False
"""Only consider the first token for the completion_bias_exclusion."""
contextual_control_threshold: Optional[float] = None
"""If set to None, attention control parameters only apply to those tokens that have
explicitly been set in the request.
If set to a non-None value, control parameters are also applied to similar tokens.
"""
control_log_additive: Optional[bool] = True
"""True: apply control by adding the log(control_factor) to attention scores.
False: (attention_scores - - attention_scores.min(-1)) * control_factor
"""
repetition_penalties_include_completion: bool = True
"""Flag deciding whether presence penalty or frequency penalty
are updated from the completion."""
raw_completion: bool = False
"""Force the raw completion of the model to be returned."""
aleph_alpha_api_key: Optional[str] = None
"""API key for Aleph Alpha API."""
stop_sequences: Optional[List[str]] = None
"""Stop sequences to use."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
aleph_alpha_api_key = get_from_dict_or_env(
values, "aleph_alpha_api_key", "ALEPH_ALPHA_API_KEY"
)
try:
import aleph_alpha_client
values["client"] = aleph_alpha_client.Client(token=aleph_alpha_api_key)
except ImportError:
raise ValueError(
"Could not import aleph_alpha_client python package. "
"Please it install it with `pip install aleph_alpha_client`."
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling the Aleph Alpha API."""
return {
"maximum_tokens": self.maximum_tokens,
"temperature": self.temperature,
"top_k": self.top_k,
"top_p": self.top_p,
"presence_penalty": self.presence_penalty,
"frequency_penalty": self.frequency_penalty,
"n": self.n,
"repetition_penalties_include_prompt": self.repetition_penalties_include_prompt, # noqa: E501
"use_multiplicative_presence_penalty": self.use_multiplicative_presence_penalty, # noqa: E501
"penalty_bias": self.penalty_bias,
"penalty_exceptions": self.penalty_exceptions,
"penalty_exceptions_include_stop_sequences": self.penalty_exceptions_include_stop_sequences, # noqa: E501
"best_of": self.best_of,
"logit_bias": self.logit_bias,
"log_probs": self.log_probs,
"tokens": self.tokens,
"disable_optimizations": self.disable_optimizations,
"minimum_tokens": self.minimum_tokens,
"echo": self.echo,
"use_multiplicative_frequency_penalty": self.use_multiplicative_frequency_penalty, # noqa: E501
"sequence_penalty": self.sequence_penalty,
"sequence_penalty_min_length": self.sequence_penalty_min_length,
"use_multiplicative_sequence_penalty": self.use_multiplicative_sequence_penalty, # noqa: E501
"completion_bias_inclusion": self.completion_bias_inclusion,
"completion_bias_inclusion_first_token_only": self.completion_bias_inclusion_first_token_only, # noqa: E501
"completion_bias_exclusion": self.completion_bias_exclusion,
"completion_bias_exclusion_first_token_only": self.completion_bias_exclusion_first_token_only, # noqa: E501
"contextual_control_threshold": self.contextual_control_threshold,
"control_log_additive": self.control_log_additive,
"repetition_penalties_include_completion": self.repetition_penalties_include_completion, # noqa: E501
"raw_completion": self.raw_completion,
}
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {**{"model": self.model}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "alpeh_alpha"
def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
"""Call out to Aleph Alpha's completion endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = alpeh_alpha("Tell me a joke.")
"""
from aleph_alpha_client import CompletionRequest, Prompt
params = self._default_params
if self.stop_sequences is not None and stop is not None:
raise ValueError(
"stop sequences found in both the input and default params."
)
elif self.stop_sequences is not None:
params["stop_sequences"] = self.stop_sequences
else:
params["stop_sequences"] = stop
request = CompletionRequest(prompt=Prompt.from_text(prompt), **params)
response = self.client.complete(model=self.model, request=request)
text = response.completions[0].completion
# If stop tokens are provided, Aleph Alpha's endpoint returns them.
# In order to make this consistent with other endpoints, we strip them.
if stop is not None or self.stop_sequences is not None:
text = enforce_stop_tokens(text, params["stop_sequences"])
return text
| [] |
2024-01-10 | gojira/langchain | langchain~llms~__init__.py | """Wrappers on top of large language models APIs."""
from typing import Dict, Type
from langchain.llms.ai21 import AI21
from langchain.llms.aleph_alpha import AlephAlpha
from langchain.llms.anthropic import Anthropic
from langchain.llms.bananadev import Banana
from langchain.llms.base import BaseLLM
from langchain.llms.cerebriumai import CerebriumAI
from langchain.llms.cohere import Cohere
from langchain.llms.deepinfra import DeepInfra
from langchain.llms.forefrontai import ForefrontAI
from langchain.llms.gooseai import GooseAI
from langchain.llms.gpt4all import GPT4All
from langchain.llms.huggingface_endpoint import HuggingFaceEndpoint
from langchain.llms.huggingface_hub import HuggingFaceHub
from langchain.llms.huggingface_pipeline import HuggingFacePipeline
from langchain.llms.llamacpp import LlamaCpp
from langchain.llms.modal import Modal
from langchain.llms.nlpcloud import NLPCloud
from langchain.llms.openai import AzureOpenAI, OpenAI, OpenAIChat
from langchain.llms.petals import Petals
from langchain.llms.promptlayer_openai import PromptLayerOpenAI, PromptLayerOpenAIChat
from langchain.llms.replicate import Replicate
from langchain.llms.rwkv import RWKV
from langchain.llms.sagemaker_endpoint import SagemakerEndpoint
from langchain.llms.self_hosted import SelfHostedPipeline
from langchain.llms.self_hosted_hugging_face import SelfHostedHuggingFaceLLM
from langchain.llms.stochasticai import StochasticAI
from langchain.llms.writer import Writer
__all__ = [
"Anthropic",
"AlephAlpha",
"Banana",
"CerebriumAI",
"Cohere",
"DeepInfra",
"ForefrontAI",
"GooseAI",
"GPT4All",
"LlamaCpp",
"Modal",
"NLPCloud",
"OpenAI",
"OpenAIChat",
"Petals",
"HuggingFaceEndpoint",
"HuggingFaceHub",
"SagemakerEndpoint",
"HuggingFacePipeline",
"AI21",
"AzureOpenAI",
"Replicate",
"SelfHostedPipeline",
"SelfHostedHuggingFaceLLM",
"PromptLayerOpenAI",
"PromptLayerOpenAIChat",
"StochasticAI",
"Writer",
"RWKV",
]
type_to_cls_dict: Dict[str, Type[BaseLLM]] = {
"ai21": AI21,
"aleph_alpha": AlephAlpha,
"anthropic": Anthropic,
"bananadev": Banana,
"cerebriumai": CerebriumAI,
"cohere": Cohere,
"deepinfra": DeepInfra,
"forefrontai": ForefrontAI,
"gooseai": GooseAI,
"gpt4all": GPT4All,
"huggingface_hub": HuggingFaceHub,
"huggingface_endpoint": HuggingFaceEndpoint,
"llamacpp": LlamaCpp,
"modal": Modal,
"sagemaker_endpoint": SagemakerEndpoint,
"nlpcloud": NLPCloud,
"openai": OpenAI,
"petals": Petals,
"huggingface_pipeline": HuggingFacePipeline,
"azure": AzureOpenAI,
"replicate": Replicate,
"self_hosted": SelfHostedPipeline,
"self_hosted_hugging_face": SelfHostedHuggingFaceLLM,
"stochasticai": StochasticAI,
"writer": Writer,
"rwkv": RWKV,
}
| [] |
2024-01-10 | gojira/langchain | langchain~memory~summary_buffer.py | from typing import Any, Dict, List
from pydantic import root_validator
from langchain.memory.chat_memory import BaseChatMemory
from langchain.memory.summary import SummarizerMixin
from langchain.schema import BaseMessage, get_buffer_string
class ConversationSummaryBufferMemory(BaseChatMemory, SummarizerMixin):
"""Buffer with summarizer for storing conversation memory."""
max_token_limit: int = 2000
moving_summary_buffer: str = ""
memory_key: str = "history"
@property
def buffer(self) -> List[BaseMessage]:
return self.chat_memory.messages
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
"""Return history buffer."""
buffer = self.buffer
if self.moving_summary_buffer != "":
first_messages: List[BaseMessage] = [
self.summary_message_cls(content=self.moving_summary_buffer)
]
buffer = first_messages + buffer
if self.return_messages:
final_buffer: Any = buffer
else:
final_buffer = get_buffer_string(
buffer, human_prefix=self.human_prefix, ai_prefix=self.ai_prefix
)
return {self.memory_key: final_buffer}
@root_validator()
def validate_prompt_input_variables(cls, values: Dict) -> Dict:
"""Validate that prompt input variables are consistent."""
prompt_variables = values["prompt"].input_variables
expected_keys = {"summary", "new_lines"}
if expected_keys != set(prompt_variables):
raise ValueError(
"Got unexpected prompt input variables. The prompt expects "
f"{prompt_variables}, but it should have {expected_keys}."
)
return values
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Save context from this conversation to buffer."""
super().save_context(inputs, outputs)
# Prune buffer if it exceeds max token limit
buffer = self.chat_memory.messages
curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer)
if curr_buffer_length > self.max_token_limit:
pruned_memory = []
while curr_buffer_length > self.max_token_limit:
pruned_memory.append(buffer.pop(0))
curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer)
self.moving_summary_buffer = self.predict_new_summary(
pruned_memory, self.moving_summary_buffer
)
def clear(self) -> None:
"""Clear memory contents."""
super().clear()
self.moving_summary_buffer = ""
| [] |
2024-01-10 | gojira/langchain | tests~integration_tests~vectorstores~test_pgvector.py | """Test PGVector functionality."""
import os
from typing import List
from sqlalchemy.orm import Session
from langchain.docstore.document import Document
from langchain.vectorstores.pgvector import PGVector
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
CONNECTION_STRING = PGVector.connection_string_from_db_params(
driver=os.environ.get("TEST_PGVECTOR_DRIVER", "psycopg2"),
host=os.environ.get("TEST_PGVECTOR_HOST", "localhost"),
port=int(os.environ.get("TEST_PGVECTOR_PORT", "5432")),
database=os.environ.get("TEST_PGVECTOR_DATABASE", "postgres"),
user=os.environ.get("TEST_PGVECTOR_USER", "postgres"),
password=os.environ.get("TEST_PGVECTOR_PASSWORD", "postgres"),
)
ADA_TOKEN_COUNT = 1536
class FakeEmbeddingsWithAdaDimension(FakeEmbeddings):
"""Fake embeddings functionality for testing."""
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Return simple embeddings."""
return [
[float(1.0)] * (ADA_TOKEN_COUNT - 1) + [float(i)] for i in range(len(texts))
]
def embed_query(self, text: str) -> List[float]:
"""Return simple embeddings."""
return [float(1.0)] * (ADA_TOKEN_COUNT - 1) + [float(0.0)]
def test_pgvector() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
docsearch = PGVector.from_texts(
texts=texts,
collection_name="test_collection",
embedding=FakeEmbeddingsWithAdaDimension(),
connection_string=CONNECTION_STRING,
pre_delete_collection=True,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
def test_pgvector_with_metadatas() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = PGVector.from_texts(
texts=texts,
collection_name="test_collection",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
connection_string=CONNECTION_STRING,
pre_delete_collection=True,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"page": "0"})]
def test_pgvector_with_metadatas_with_scores() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = PGVector.from_texts(
texts=texts,
collection_name="test_collection",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
connection_string=CONNECTION_STRING,
pre_delete_collection=True,
)
output = docsearch.similarity_search_with_score("foo", k=1)
assert output == [(Document(page_content="foo", metadata={"page": "0"}), 0.0)]
def test_pgvector_with_filter_match() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = PGVector.from_texts(
texts=texts,
collection_name="test_collection_filter",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
connection_string=CONNECTION_STRING,
pre_delete_collection=True,
)
output = docsearch.similarity_search_with_score("foo", k=1, filter={"page": "0"})
assert output == [(Document(page_content="foo", metadata={"page": "0"}), 0.0)]
def test_pgvector_with_filter_distant_match() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = PGVector.from_texts(
texts=texts,
collection_name="test_collection_filter",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
connection_string=CONNECTION_STRING,
pre_delete_collection=True,
)
output = docsearch.similarity_search_with_score("foo", k=1, filter={"page": "2"})
assert output == [
(Document(page_content="baz", metadata={"page": "2"}), 0.0013003906671379406)
]
def test_pgvector_with_filter_no_match() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = PGVector.from_texts(
texts=texts,
collection_name="test_collection_filter",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
connection_string=CONNECTION_STRING,
pre_delete_collection=True,
)
output = docsearch.similarity_search_with_score("foo", k=1, filter={"page": "5"})
assert output == []
def test_pgvector_collection_with_metadata() -> None:
"""Test end to end collection construction"""
pgvector = PGVector(
collection_name="test_collection",
collection_metadata={"foo": "bar"},
embedding_function=FakeEmbeddingsWithAdaDimension(),
connection_string=CONNECTION_STRING,
pre_delete_collection=True,
)
session = Session(pgvector.connect())
collection = pgvector.get_collection(session)
if collection is None:
assert False, "Expected a CollectionStore object but received None"
else:
assert collection.name == "test_collection"
assert collection.cmetadata == {"foo": "bar"}
| [] |
2024-01-10 | gojira/langchain | langchain~evaluation~qa~eval_chain.py | """LLM Chain specifically for evaluating question answering."""
from __future__ import annotations
from typing import Any, List
from langchain import PromptTemplate
from langchain.chains.llm import LLMChain
from langchain.evaluation.qa.eval_prompt import CONTEXT_PROMPT, COT_PROMPT, PROMPT
from langchain.llms.base import BaseLLM
class QAEvalChain(LLMChain):
"""LLM Chain specifically for evaluating question answering."""
@classmethod
def from_llm(
cls, llm: BaseLLM, prompt: PromptTemplate = PROMPT, **kwargs: Any
) -> QAEvalChain:
"""Load QA Eval Chain from LLM.
Args:
llm (BaseLLM): the base language model to use.
prompt (PromptTemplate): A prompt template containing the input_variables:
'input', 'answer' and 'result' that will be used as the prompt
for evaluation.
Defaults to PROMPT.
**kwargs: additional keyword arguments.
Returns:
QAEvalChain: the loaded QA eval chain.
"""
expected_input_vars = {"query", "answer", "result"}
if expected_input_vars != set(prompt.input_variables):
raise ValueError(
f"Input variables should be {expected_input_vars}, "
f"but got {prompt.input_variables}"
)
return cls(llm=llm, prompt=prompt, **kwargs)
def evaluate(
self,
examples: List[dict],
predictions: List[dict],
question_key: str = "query",
answer_key: str = "answer",
prediction_key: str = "result",
) -> List[dict]:
"""Evaluate question answering examples and predictions."""
inputs = [
{
"query": example[question_key],
"answer": example[answer_key],
"result": predictions[i][prediction_key],
}
for i, example in enumerate(examples)
]
return self.apply(inputs)
class ContextQAEvalChain(LLMChain):
"""LLM Chain specifically for evaluating QA w/o GT based on context"""
@classmethod
def _validate_input_vars(cls, prompt: PromptTemplate) -> None:
expected_input_vars = {"query", "context", "result"}
if expected_input_vars != set(prompt.input_variables):
raise ValueError(
f"Input variables should be {expected_input_vars}, "
f"but got {prompt.input_variables}"
)
@classmethod
def from_llm(
cls, llm: BaseLLM, prompt: PromptTemplate = CONTEXT_PROMPT, **kwargs: Any
) -> ContextQAEvalChain:
"""Load QA Eval Chain from LLM.
Args:
llm (BaseLLM): the base language model to use.
prompt (PromptTemplate): A prompt template containing the input_variables:
'query', 'context' and 'result' that will be used as the prompt
for evaluation.
Defaults to PROMPT.
**kwargs: additional keyword arguments.
Returns:
ContextQAEvalChain: the loaded QA eval chain.
"""
cls._validate_input_vars(prompt)
return cls(llm=llm, prompt=prompt, **kwargs)
def evaluate(
self,
examples: List[dict],
predictions: List[dict],
question_key: str = "query",
context_key: str = "context",
prediction_key: str = "result",
) -> List[dict]:
"""Evaluate question answering examples and predictions."""
inputs = [
{
"query": example[question_key],
"context": example[context_key],
"result": predictions[i][prediction_key],
}
for i, example in enumerate(examples)
]
return self.apply(inputs)
class CotQAEvalChain(ContextQAEvalChain):
"""LLM Chain specifically for evaluating QA using chain of thought reasoning."""
@classmethod
def from_llm(
cls, llm: BaseLLM, prompt: PromptTemplate = COT_PROMPT, **kwargs: Any
) -> CotQAEvalChain:
cls._validate_input_vars(prompt)
return cls(llm=llm, prompt=prompt, **kwargs)
| [] |
2024-01-10 | gojira/langchain | langchain~chains~llm_requests.py | """Chain that hits a URL and then uses an LLM to parse results."""
from __future__ import annotations
from typing import Dict, List
from pydantic import Extra, Field, root_validator
from langchain.chains import LLMChain
from langchain.chains.base import Chain
from langchain.requests import TextRequestsWrapper
DEFAULT_HEADERS = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36" # noqa: E501
}
class LLMRequestsChain(Chain):
"""Chain that hits a URL and then uses an LLM to parse results."""
llm_chain: LLMChain
requests_wrapper: TextRequestsWrapper = Field(
default_factory=TextRequestsWrapper, exclude=True
)
text_length: int = 8000
requests_key: str = "requests_result" #: :meta private:
input_key: str = "url" #: :meta private:
output_key: str = "output" #: :meta private:
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@property
def input_keys(self) -> List[str]:
"""Will be whatever keys the prompt expects.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Will always return text key.
:meta private:
"""
return [self.output_key]
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
try:
from bs4 import BeautifulSoup # noqa: F401
except ImportError:
raise ValueError(
"Could not import bs4 python package. "
"Please it install it with `pip install bs4`."
)
return values
def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
from bs4 import BeautifulSoup
# Other keys are assumed to be needed for LLM prediction
other_keys = {k: v for k, v in inputs.items() if k != self.input_key}
url = inputs[self.input_key]
res = self.requests_wrapper.get(url)
# extract the text from the html
soup = BeautifulSoup(res, "html.parser")
other_keys[self.requests_key] = soup.get_text()[: self.text_length]
result = self.llm_chain.predict(**other_keys)
return {self.output_key: result}
@property
def _chain_type(self) -> str:
return "llm_requests_chain"
| [] |
2024-01-10 | gojira/langchain | langchain~embeddings~aleph_alpha.py | from typing import Any, Dict, List, Optional
from pydantic import BaseModel, root_validator
from langchain.embeddings.base import Embeddings
from langchain.utils import get_from_dict_or_env
class AlephAlphaAsymmetricSemanticEmbedding(BaseModel, Embeddings):
"""
Wrapper for Aleph Alpha's Asymmetric Embeddings
AA provides you with an endpoint to embed a document and a query.
The models were optimized to make the embeddings of documents and
the query for a document as similar as possible.
To learn more, check out: https://docs.aleph-alpha.com/docs/tasks/semantic_embed/
Example:
.. code-block:: python
from aleph_alpha import AlephAlphaAsymmetricSemanticEmbedding
embeddings = AlephAlphaSymmetricSemanticEmbedding()
document = "This is a content of the document"
query = "What is the content of the document?"
doc_result = embeddings.embed_documents([document])
query_result = embeddings.embed_query(query)
"""
client: Any #: :meta private:
model: Optional[str] = "luminous-base"
"""Model name to use."""
hosting: Optional[str] = "https://api.aleph-alpha.com"
"""Optional parameter that specifies which datacenters may process the request."""
normalize: Optional[bool] = True
"""Should returned embeddings be normalized"""
compress_to_size: Optional[int] = 128
"""Should the returned embeddings come back as an original 5120-dim vector,
or should it be compressed to 128-dim."""
contextual_control_threshold: Optional[int] = None
"""Attention control parameters only apply to those tokens that have
explicitly been set in the request."""
control_log_additive: Optional[bool] = True
"""Apply controls on prompt items by adding the log(control_factor)
to attention scores."""
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
aleph_alpha_api_key = get_from_dict_or_env(
values, "aleph_alpha_api_key", "ALEPH_ALPHA_API_KEY"
)
try:
from aleph_alpha_client import Client
except ImportError:
raise ValueError(
"Could not import aleph_alpha_client python package. "
"Please it install it with `pip install aleph_alpha_client`."
)
values["client"] = Client(token=aleph_alpha_api_key)
return values
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Call out to Aleph Alpha's asymmetric Document endpoint.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
try:
from aleph_alpha_client import (
Prompt,
SemanticEmbeddingRequest,
SemanticRepresentation,
)
except ImportError:
raise ValueError(
"Could not import aleph_alpha_client python package. "
"Please it install it with `pip install aleph_alpha_client`."
)
document_embeddings = []
for text in texts:
document_params = {
"prompt": Prompt.from_text(text),
"representation": SemanticRepresentation.Document,
"compress_to_size": self.compress_to_size,
"normalize": self.normalize,
"contextual_control_threshold": self.contextual_control_threshold,
"control_log_additive": self.control_log_additive,
}
document_request = SemanticEmbeddingRequest(**document_params)
document_response = self.client.semantic_embed(
request=document_request, model=self.model
)
document_embeddings.append(document_response.embedding)
return document_embeddings
def embed_query(self, text: str) -> List[float]:
"""Call out to Aleph Alpha's asymmetric, query embedding endpoint
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
try:
from aleph_alpha_client import (
Prompt,
SemanticEmbeddingRequest,
SemanticRepresentation,
)
except ImportError:
raise ValueError(
"Could not import aleph_alpha_client python package. "
"Please it install it with `pip install aleph_alpha_client`."
)
symmetric_params = {
"prompt": Prompt.from_text(text),
"representation": SemanticRepresentation.Query,
"compress_to_size": self.compress_to_size,
"normalize": self.normalize,
"contextual_control_threshold": self.contextual_control_threshold,
"control_log_additive": self.control_log_additive,
}
symmetric_request = SemanticEmbeddingRequest(**symmetric_params)
symmetric_response = self.client.semantic_embed(
request=symmetric_request, model=self.model
)
return symmetric_response.embedding
class AlephAlphaSymmetricSemanticEmbedding(AlephAlphaAsymmetricSemanticEmbedding):
"""The symmetric version of the Aleph Alpha's semantic embeddings.
The main difference is that here, both the documents and
queries are embedded with a SemanticRepresentation.Symmetric
Example:
.. code-block:: python
from aleph_alpha import AlephAlphaSymmetricSemanticEmbedding
embeddings = AlephAlphaAsymmetricSemanticEmbedding()
text = "This is a test text"
doc_result = embeddings.embed_documents([text])
query_result = embeddings.embed_query(text)
"""
def _embed(self, text: str) -> List[float]:
try:
from aleph_alpha_client import (
Prompt,
SemanticEmbeddingRequest,
SemanticRepresentation,
)
except ImportError:
raise ValueError(
"Could not import aleph_alpha_client python package. "
"Please it install it with `pip install aleph_alpha_client`."
)
query_params = {
"prompt": Prompt.from_text(text),
"representation": SemanticRepresentation.Symmetric,
"compress_to_size": self.compress_to_size,
"normalize": self.normalize,
"contextual_control_threshold": self.contextual_control_threshold,
"control_log_additive": self.control_log_additive,
}
query_request = SemanticEmbeddingRequest(**query_params)
query_response = self.client.semantic_embed(
request=query_request, model=self.model
)
return query_response.embedding
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Call out to Aleph Alpha's Document endpoint.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
document_embeddings = []
for text in texts:
document_embeddings.append(self._embed(text))
return document_embeddings
def embed_query(self, text: str) -> List[float]:
"""Call out to Aleph Alpha's asymmetric, query embedding endpoint
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self._embed(text)
| [] |
2024-01-10 | gojira/langchain | langchain~chains~mapreduce.py | """Map-reduce chain.
Splits up a document, sends the smaller parts to the LLM with one prompt,
then combines the results with another one.
"""
from __future__ import annotations
from typing import Dict, List
from pydantic import Extra
from langchain.chains.base import Chain
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.llm import LLMChain
from langchain.docstore.document import Document
from langchain.llms.base import BaseLLM
from langchain.prompts.base import BasePromptTemplate
from langchain.text_splitter import TextSplitter
class MapReduceChain(Chain):
"""Map-reduce chain."""
combine_documents_chain: BaseCombineDocumentsChain
"""Chain to use to combine documents."""
text_splitter: TextSplitter
"""Text splitter to use."""
input_key: str = "input_text" #: :meta private:
output_key: str = "output_text" #: :meta private:
@classmethod
def from_params(
cls, llm: BaseLLM, prompt: BasePromptTemplate, text_splitter: TextSplitter
) -> MapReduceChain:
"""Construct a map-reduce chain that uses the chain for map and reduce."""
llm_chain = LLMChain(llm=llm, prompt=prompt)
reduce_chain = StuffDocumentsChain(llm_chain=llm_chain)
combine_documents_chain = MapReduceDocumentsChain(
llm_chain=llm_chain, combine_document_chain=reduce_chain
)
return cls(
combine_documents_chain=combine_documents_chain, text_splitter=text_splitter
)
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@property
def input_keys(self) -> List[str]:
"""Expect input key.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Return output key.
:meta private:
"""
return [self.output_key]
def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
# Split the larger text into smaller chunks.
texts = self.text_splitter.split_text(inputs[self.input_key])
docs = [Document(page_content=text) for text in texts]
outputs, _ = self.combine_documents_chain.combine_docs(docs)
return {self.output_key: outputs}
| [] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.