date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | jerhadf/langflow-jh | src~backend~langflow~components~utilities~GetRequest.py | from langflow import CustomComponent
from langchain.schema import Document
from langflow.services.database.models.base import orjson_dumps
import requests
from typing import Optional
class GetRequest(CustomComponent):
display_name: str = "GET Request"
description: str = "Make a GET request to the given URL."
output_types: list[str] = ["Document"]
documentation: str = "https://docs.langflow.org/components/utilities#get-request"
beta = True
field_config = {
"url": {
"display_name": "URL",
"info": "The URL to make the request to",
"is_list": True,
},
"headers": {
"display_name": "Headers",
"info": "The headers to send with the request.",
},
"code": {"show": False},
"timeout": {
"display_name": "Timeout",
"field_type": "int",
"info": "The timeout to use for the request.",
"value": 5,
},
}
def get_document(
self, session: requests.Session, url: str, headers: Optional[dict], timeout: int
) -> Document:
try:
response = session.get(url, headers=headers, timeout=int(timeout))
try:
response_json = response.json()
result = orjson_dumps(response_json, indent_2=False)
except Exception:
result = response.text
self.repr_value = result
return Document(
page_content=result,
metadata={
"source": url,
"headers": headers,
"status_code": response.status_code,
},
)
except requests.Timeout:
return Document(
page_content="Request Timed Out",
metadata={"source": url, "headers": headers, "status_code": 408},
)
except Exception as exc:
return Document(
page_content=str(exc),
metadata={"source": url, "headers": headers, "status_code": 500},
)
def build(
self,
url: str,
headers: Optional[dict] = None,
timeout: int = 5,
) -> list[Document]:
if headers is None:
headers = {}
urls = url if isinstance(url, list) else [url]
with requests.Session() as session:
documents = [self.get_document(session, u, headers, timeout) for u in urls]
self.repr_value = documents
return documents
| [] |
2024-01-10 | takkat14/RL-assignments | hw3~atari_wrappers.py | # taken from OpenAI baselines.
import numpy as np
import gym
class MaxAndSkipEnv(gym.Wrapper):
def __init__(self, env, skip=4):
"""Return only every `skip`-th frame"""
gym.Wrapper.__init__(self, env)
# most recent raw observations (for max pooling across time steps)
self._obs_buffer = np.zeros(
(2,) + env.observation_space.shape, dtype=np.uint8)
self._skip = skip
def step(self, action):
"""Repeat action, sum reward, and max over last observations."""
total_reward = 0.0
done = None
for i in range(self._skip):
obs, reward, done, info, _ = self.env.step(action)
if i == self._skip - 2:
self._obs_buffer[0] = obs
if i == self._skip - 1:
self._obs_buffer[1] = obs
total_reward += reward
if done:
break
# Note that the observation on the done=True frame
# doesn't matter
max_frame = self._obs_buffer.max(axis=0)
return max_frame, total_reward, done, info, {}
def reset(self, **kwargs):
return self.env.reset(**kwargs)
class ClipRewardEnv(gym.RewardWrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
def reward(self, reward):
"""Bin reward to {+1, 0, -1} by its sign."""
return np.sign(reward)
class FireResetEnv(gym.Wrapper):
def __init__(self, env):
"""Take action on reset for environments that are fixed until firing."""
gym.Wrapper.__init__(self, env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def reset(self, **kwargs):
self.env.reset(**kwargs)
obs, _, done, _, _ = self.env.step(1)
if done:
self.env.reset(**kwargs)
obs, _, done, _, _ = self.env.step(2)
if done:
self.env.reset(**kwargs)
return obs, None
def step(self, ac):
return self.env.step(ac)
class EpisodicLifeEnv(gym.Wrapper):
def __init__(self, env):
"""Make end-of-life == end-of-episode, but only reset on true game over.
Done by DeepMind for the DQN and co. since it helps value estimation.
"""
gym.Wrapper.__init__(self, env)
self.lives = 0
self.was_real_done = True
def step(self, action):
obs, reward, done, info, _ = self.env.step(action)
self.was_real_done = done
# check current lives, make loss of life terminal,
# then update lives to handle bonus lives
lives = self.env.unwrapped.ale.lives()
if lives < self.lives and lives > 0:
# for Qbert sometimes we stay in lives == 0 condition for a few frames
# so it's important to keep lives > 0, so that we only reset once
# the environment advertises done.
done = True
self.lives = lives
return obs, reward, done, info, {}
def reset(self, **kwargs):
"""Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs = self.env.reset(**kwargs)
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _, _ = self.env.step(0)
self.lives = self.env.unwrapped.ale.lives()
return obs, None
# in torch imgs have shape [c, h, w] instead of common [h, w, c]
class AntiTorchWrapper(gym.ObservationWrapper):
def __init__(self, env):
gym.ObservationWrapper.__init__(self, env)
self.img_size = [env.observation_space.shape[i]
for i in [1, 2, 0]
]
self.observation_space = gym.spaces.Box(0.0, 1.0, self.img_size)
def observation(self, img):
"""what happens to each observation"""
img = img.transpose(1, 2, 0)
return img
| [] |
2024-01-10 | 17600164659/dify | api~services~hit_testing_service.py | import logging
import time
from typing import List
import numpy as np
from flask import current_app
from langchain.embeddings.base import Embeddings
from langchain.schema import Document
from sklearn.manifold import TSNE
from core.embedding.cached_embedding import CacheEmbedding
from core.index.vector_index.vector_index import VectorIndex
from core.model_providers.model_factory import ModelFactory
from extensions.ext_database import db
from models.account import Account
from models.dataset import Dataset, DocumentSegment, DatasetQuery
class HitTestingService:
@classmethod
def retrieve(cls, dataset: Dataset, query: str, account: Account, limit: int = 10) -> dict:
if dataset.available_document_count == 0 or dataset.available_document_count == 0:
return {
"query": {
"content": query,
"tsne_position": {'x': 0, 'y': 0},
},
"records": []
}
embedding_model = ModelFactory.get_embedding_model(
tenant_id=dataset.tenant_id,
model_provider_name=dataset.embedding_model_provider,
model_name=dataset.embedding_model
)
embeddings = CacheEmbedding(embedding_model)
vector_index = VectorIndex(
dataset=dataset,
config=current_app.config,
embeddings=embeddings
)
start = time.perf_counter()
documents = vector_index.search(
query,
search_type='similarity_score_threshold',
search_kwargs={
'k': 10
}
)
end = time.perf_counter()
logging.debug(f"Hit testing retrieve in {end - start:0.4f} seconds")
dataset_query = DatasetQuery(
dataset_id=dataset.id,
content=query,
source='hit_testing',
created_by_role='account',
created_by=account.id
)
db.session.add(dataset_query)
db.session.commit()
return cls.compact_retrieve_response(dataset, embeddings, query, documents)
@classmethod
def compact_retrieve_response(cls, dataset: Dataset, embeddings: Embeddings, query: str, documents: List[Document]):
text_embeddings = [
embeddings.embed_query(query)
]
text_embeddings.extend(embeddings.embed_documents([document.page_content for document in documents]))
tsne_position_data = cls.get_tsne_positions_from_embeddings(text_embeddings)
query_position = tsne_position_data.pop(0)
i = 0
records = []
for document in documents:
index_node_id = document.metadata['doc_id']
segment = db.session.query(DocumentSegment).filter(
DocumentSegment.dataset_id == dataset.id,
DocumentSegment.enabled == True,
DocumentSegment.status == 'completed',
DocumentSegment.index_node_id == index_node_id
).first()
if not segment:
i += 1
continue
record = {
"segment": segment,
"score": document.metadata['score'],
"tsne_position": tsne_position_data[i]
}
records.append(record)
i += 1
return {
"query": {
"content": query,
"tsne_position": query_position,
},
"records": records
}
@classmethod
def get_tsne_positions_from_embeddings(cls, embeddings: list):
embedding_length = len(embeddings)
if embedding_length <= 1:
return [{'x': 0, 'y': 0}]
concatenate_data = np.array(embeddings).reshape(embedding_length, -1)
# concatenate_data = np.concatenate(embeddings)
perplexity = embedding_length / 2 + 1
if perplexity >= embedding_length:
perplexity = max(embedding_length - 1, 1)
tsne = TSNE(n_components=2, perplexity=perplexity, early_exaggeration=12.0)
data_tsne = tsne.fit_transform(concatenate_data)
tsne_position_data = []
for i in range(len(data_tsne)):
tsne_position_data.append({'x': float(data_tsne[i][0]), 'y': float(data_tsne[i][1])})
return tsne_position_data
| [] |
2024-01-10 | 17600164659/dify | api~tasks~enable_segment_to_index_task.py | import datetime
import logging
import time
import click
from celery import shared_task
from langchain.schema import Document
from werkzeug.exceptions import NotFound
from core.index.index import IndexBuilder
from extensions.ext_database import db
from extensions.ext_redis import redis_client
from models.dataset import DocumentSegment
@shared_task(queue='dataset')
def enable_segment_to_index_task(segment_id: str):
"""
Async enable segment to index
:param segment_id:
Usage: enable_segment_to_index_task.delay(segment_id)
"""
logging.info(click.style('Start enable segment to index: {}'.format(segment_id), fg='green'))
start_at = time.perf_counter()
segment = db.session.query(DocumentSegment).filter(DocumentSegment.id == segment_id).first()
if not segment:
raise NotFound('Segment not found')
if segment.status != 'completed':
return
indexing_cache_key = 'segment_{}_indexing'.format(segment.id)
try:
document = Document(
page_content=segment.content,
metadata={
"doc_id": segment.index_node_id,
"doc_hash": segment.index_node_hash,
"document_id": segment.document_id,
"dataset_id": segment.dataset_id,
}
)
dataset = segment.dataset
if not dataset:
logging.info(click.style('Segment {} has no dataset, pass.'.format(segment.id), fg='cyan'))
return
dataset_document = segment.document
if not dataset_document:
logging.info(click.style('Segment {} has no document, pass.'.format(segment.id), fg='cyan'))
return
if not dataset_document.enabled or dataset_document.archived or dataset_document.indexing_status != 'completed':
logging.info(click.style('Segment {} document status is invalid, pass.'.format(segment.id), fg='cyan'))
return
# save vector index
index = IndexBuilder.get_index(dataset, 'high_quality')
if index:
index.add_texts([document], duplicate_check=True)
# save keyword index
index = IndexBuilder.get_index(dataset, 'economy')
if index:
index.add_texts([document])
end_at = time.perf_counter()
logging.info(click.style('Segment enabled to index: {} latency: {}'.format(segment.id, end_at - start_at), fg='green'))
except Exception as e:
logging.exception("enable segment to index failed")
segment.enabled = False
segment.disabled_at = datetime.datetime.utcnow()
segment.status = 'error'
segment.error = str(e)
db.session.commit()
finally:
redis_client.delete(indexing_cache_key)
| [] |
2024-01-10 | 17600164659/dify | api~core~model_providers~providers~xinference_provider.py | import json
from typing import Type
from langchain.llms import Xinference
from core.helper import encrypter
from core.model_providers.models.embedding.xinference_embedding import XinferenceEmbedding
from core.model_providers.models.entity.model_params import KwargRule, ModelKwargsRules, ModelType
from core.model_providers.models.llm.xinference_model import XinferenceModel
from core.model_providers.providers.base import BaseModelProvider, CredentialsValidateFailedError
from core.model_providers.models.base import BaseProviderModel
from models.provider import ProviderType
class XinferenceProvider(BaseModelProvider):
@property
def provider_name(self):
"""
Returns the name of a provider.
"""
return 'xinference'
def _get_fixed_model_list(self, model_type: ModelType) -> list[dict]:
return []
def get_model_class(self, model_type: ModelType) -> Type[BaseProviderModel]:
"""
Returns the model class.
:param model_type:
:return:
"""
if model_type == ModelType.TEXT_GENERATION:
model_class = XinferenceModel
elif model_type == ModelType.EMBEDDINGS:
model_class = XinferenceEmbedding
else:
raise NotImplementedError
return model_class
def get_model_parameter_rules(self, model_name: str, model_type: ModelType) -> ModelKwargsRules:
"""
get model parameter rules.
:param model_name:
:param model_type:
:return:
"""
return ModelKwargsRules(
temperature=KwargRule[float](min=0, max=2, default=1),
top_p=KwargRule[float](min=0, max=1, default=0.7),
presence_penalty=KwargRule[float](min=-2, max=2, default=0),
frequency_penalty=KwargRule[float](min=-2, max=2, default=0),
max_tokens=KwargRule[int](min=10, max=4000, default=256),
)
@classmethod
def is_model_credentials_valid_or_raise(cls, model_name: str, model_type: ModelType, credentials: dict):
"""
check model credentials valid.
:param model_name:
:param model_type:
:param credentials:
"""
if 'server_url' not in credentials:
raise CredentialsValidateFailedError('Xinference Server URL must be provided.')
if 'model_uid' not in credentials:
raise CredentialsValidateFailedError('Xinference Model UID must be provided.')
try:
credential_kwargs = {
'server_url': credentials['server_url'],
'model_uid': credentials['model_uid'],
}
llm = Xinference(
**credential_kwargs
)
llm("ping", generate_config={'max_tokens': 10})
except Exception as ex:
raise CredentialsValidateFailedError(str(ex))
@classmethod
def encrypt_model_credentials(cls, tenant_id: str, model_name: str, model_type: ModelType,
credentials: dict) -> dict:
"""
encrypt model credentials for save.
:param tenant_id:
:param model_name:
:param model_type:
:param credentials:
:return:
"""
credentials['server_url'] = encrypter.encrypt_token(tenant_id, credentials['server_url'])
return credentials
def get_model_credentials(self, model_name: str, model_type: ModelType, obfuscated: bool = False) -> dict:
"""
get credentials for llm use.
:param model_name:
:param model_type:
:param obfuscated:
:return:
"""
if self.provider.provider_type != ProviderType.CUSTOM.value:
raise NotImplementedError
provider_model = self._get_provider_model(model_name, model_type)
if not provider_model.encrypted_config:
return {
'server_url': None,
'model_uid': None,
}
credentials = json.loads(provider_model.encrypted_config)
if credentials['server_url']:
credentials['server_url'] = encrypter.decrypt_token(
self.provider.tenant_id,
credentials['server_url']
)
if obfuscated:
credentials['server_url'] = encrypter.obfuscated_token(credentials['server_url'])
return credentials
@classmethod
def is_provider_credentials_valid_or_raise(cls, credentials: dict):
return
@classmethod
def encrypt_provider_credentials(cls, tenant_id: str, credentials: dict) -> dict:
return {}
def get_provider_credentials(self, obfuscated: bool = False) -> dict:
return {}
| [] |
2024-01-10 | 17600164659/dify | api~core~docstore~dataset_docstore.py | from typing import Any, Dict, Optional, Sequence
from langchain.schema import Document
from sqlalchemy import func
from core.model_providers.model_factory import ModelFactory
from extensions.ext_database import db
from models.dataset import Dataset, DocumentSegment
class DatesetDocumentStore:
def __init__(
self,
dataset: Dataset,
user_id: str,
document_id: Optional[str] = None,
):
self._dataset = dataset
self._user_id = user_id
self._document_id = document_id
@classmethod
def from_dict(cls, config_dict: Dict[str, Any]) -> "DatesetDocumentStore":
return cls(**config_dict)
def to_dict(self) -> Dict[str, Any]:
"""Serialize to dict."""
return {
"dataset_id": self._dataset.id,
}
@property
def dateset_id(self) -> Any:
return self._dataset.id
@property
def user_id(self) -> Any:
return self._user_id
@property
def docs(self) -> Dict[str, Document]:
document_segments = db.session.query(DocumentSegment).filter(
DocumentSegment.dataset_id == self._dataset.id
).all()
output = {}
for document_segment in document_segments:
doc_id = document_segment.index_node_id
output[doc_id] = Document(
page_content=document_segment.content,
metadata={
"doc_id": document_segment.index_node_id,
"doc_hash": document_segment.index_node_hash,
"document_id": document_segment.document_id,
"dataset_id": document_segment.dataset_id,
}
)
return output
def add_documents(
self, docs: Sequence[Document], allow_update: bool = True
) -> None:
max_position = db.session.query(func.max(DocumentSegment.position)).filter(
DocumentSegment.document_id == self._document_id
).scalar()
if max_position is None:
max_position = 0
embedding_model = ModelFactory.get_embedding_model(
tenant_id=self._dataset.tenant_id,
model_provider_name=self._dataset.embedding_model_provider,
model_name=self._dataset.embedding_model
)
for doc in docs:
if not isinstance(doc, Document):
raise ValueError("doc must be a Document")
segment_document = self.get_document(doc_id=doc.metadata['doc_id'], raise_error=False)
# NOTE: doc could already exist in the store, but we overwrite it
if not allow_update and segment_document:
raise ValueError(
f"doc_id {doc.metadata['doc_id']} already exists. "
"Set allow_update to True to overwrite."
)
# calc embedding use tokens
tokens = embedding_model.get_num_tokens(doc.page_content)
if not segment_document:
max_position += 1
segment_document = DocumentSegment(
tenant_id=self._dataset.tenant_id,
dataset_id=self._dataset.id,
document_id=self._document_id,
index_node_id=doc.metadata['doc_id'],
index_node_hash=doc.metadata['doc_hash'],
position=max_position,
content=doc.page_content,
word_count=len(doc.page_content),
tokens=tokens,
created_by=self._user_id,
)
if 'answer' in doc.metadata and doc.metadata['answer']:
segment_document.answer = doc.metadata.pop('answer', '')
db.session.add(segment_document)
else:
segment_document.content = doc.page_content
if 'answer' in doc.metadata and doc.metadata['answer']:
segment_document.answer = doc.metadata.pop('answer', '')
segment_document.index_node_hash = doc.metadata['doc_hash']
segment_document.word_count = len(doc.page_content)
segment_document.tokens = tokens
db.session.commit()
def document_exists(self, doc_id: str) -> bool:
"""Check if document exists."""
result = self.get_document_segment(doc_id)
return result is not None
def get_document(
self, doc_id: str, raise_error: bool = True
) -> Optional[Document]:
document_segment = self.get_document_segment(doc_id)
if document_segment is None:
if raise_error:
raise ValueError(f"doc_id {doc_id} not found.")
else:
return None
return Document(
page_content=document_segment.content,
metadata={
"doc_id": document_segment.index_node_id,
"doc_hash": document_segment.index_node_hash,
"document_id": document_segment.document_id,
"dataset_id": document_segment.dataset_id,
}
)
def delete_document(self, doc_id: str, raise_error: bool = True) -> None:
document_segment = self.get_document_segment(doc_id)
if document_segment is None:
if raise_error:
raise ValueError(f"doc_id {doc_id} not found.")
else:
return None
db.session.delete(document_segment)
db.session.commit()
def set_document_hash(self, doc_id: str, doc_hash: str) -> None:
"""Set the hash for a given doc_id."""
document_segment = self.get_document_segment(doc_id)
if document_segment is None:
return None
document_segment.index_node_hash = doc_hash
db.session.commit()
def get_document_hash(self, doc_id: str) -> Optional[str]:
"""Get the stored hash for a document, if it exists."""
document_segment = self.get_document_segment(doc_id)
if document_segment is None:
return None
return document_segment.index_node_hash
def get_document_segment(self, doc_id: str) -> DocumentSegment:
document_segment = db.session.query(DocumentSegment).filter(
DocumentSegment.dataset_id == self._dataset.id,
DocumentSegment.index_node_id == doc_id
).first()
return document_segment
| [] |
2024-01-10 | 17600164659/dify | api~core~model_providers~models~llm~wenxin_model.py | import decimal
from typing import List, Optional, Any
from langchain.callbacks.manager import Callbacks
from langchain.schema import LLMResult
from core.model_providers.error import LLMBadRequestError
from core.model_providers.models.llm.base import BaseLLM
from core.model_providers.models.entity.message import PromptMessage, MessageType
from core.model_providers.models.entity.model_params import ModelMode, ModelKwargs
from core.third_party.langchain.llms.wenxin import Wenxin
class WenxinModel(BaseLLM):
model_mode: ModelMode = ModelMode.COMPLETION
def _init_client(self) -> Any:
provider_model_kwargs = self._to_model_kwargs_input(self.model_rules, self.model_kwargs)
# TODO load price_config from configs(db)
return Wenxin(
streaming=self.streaming,
callbacks=self.callbacks,
**self.credentials,
**provider_model_kwargs
)
def _run(self, messages: List[PromptMessage],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
**kwargs) -> LLMResult:
"""
run predict by prompt messages and stop words.
:param messages:
:param stop:
:param callbacks:
:return:
"""
prompts = self._get_prompt_from_messages(messages)
return self._client.generate([prompts], stop, callbacks)
def get_num_tokens(self, messages: List[PromptMessage]) -> int:
"""
get num tokens of prompt messages.
:param messages:
:return:
"""
prompts = self._get_prompt_from_messages(messages)
return max(self._client.get_num_tokens(prompts), 0)
def _set_model_kwargs(self, model_kwargs: ModelKwargs):
provider_model_kwargs = self._to_model_kwargs_input(self.model_rules, model_kwargs)
for k, v in provider_model_kwargs.items():
if hasattr(self.client, k):
setattr(self.client, k, v)
def handle_exceptions(self, ex: Exception) -> Exception:
return LLMBadRequestError(f"Wenxin: {str(ex)}")
@classmethod
def support_streaming(cls):
return False
| [] |
2024-01-10 | 17600164659/dify | api~core~model_providers~models~llm~minimax_model.py | import decimal
from typing import List, Optional, Any
from langchain.callbacks.manager import Callbacks
from langchain.llms import Minimax
from langchain.schema import LLMResult
from core.model_providers.error import LLMBadRequestError
from core.model_providers.models.llm.base import BaseLLM
from core.model_providers.models.entity.message import PromptMessage, MessageType
from core.model_providers.models.entity.model_params import ModelMode, ModelKwargs
class MinimaxModel(BaseLLM):
model_mode: ModelMode = ModelMode.COMPLETION
def _init_client(self) -> Any:
provider_model_kwargs = self._to_model_kwargs_input(self.model_rules, self.model_kwargs)
return Minimax(
model=self.name,
model_kwargs={
'stream': False
},
callbacks=self.callbacks,
**self.credentials,
**provider_model_kwargs
)
def _run(self, messages: List[PromptMessage],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
**kwargs) -> LLMResult:
"""
run predict by prompt messages and stop words.
:param messages:
:param stop:
:param callbacks:
:return:
"""
prompts = self._get_prompt_from_messages(messages)
return self._client.generate([prompts], stop, callbacks)
def get_num_tokens(self, messages: List[PromptMessage]) -> int:
"""
get num tokens of prompt messages.
:param messages:
:return:
"""
prompts = self._get_prompt_from_messages(messages)
return max(self._client.get_num_tokens(prompts), 0)
def get_currency(self):
return 'RMB'
def _set_model_kwargs(self, model_kwargs: ModelKwargs):
provider_model_kwargs = self._to_model_kwargs_input(self.model_rules, model_kwargs)
for k, v in provider_model_kwargs.items():
if hasattr(self.client, k):
setattr(self.client, k, v)
def handle_exceptions(self, ex: Exception) -> Exception:
if isinstance(ex, ValueError):
return LLMBadRequestError(f"Minimax: {str(ex)}")
else:
return ex
| [] |
2024-01-10 | 17600164659/dify | api~core~generator~llm_generator.py | import logging
from langchain.schema import OutputParserException
from core.model_providers.error import LLMError, ProviderTokenNotInitError
from core.model_providers.model_factory import ModelFactory
from core.model_providers.models.entity.message import PromptMessage, MessageType
from core.model_providers.models.entity.model_params import ModelKwargs
from core.prompt.output_parser.rule_config_generator import RuleConfigGeneratorOutputParser
from core.prompt.output_parser.suggested_questions_after_answer import SuggestedQuestionsAfterAnswerOutputParser
from core.prompt.prompt_template import JinjaPromptTemplate, OutLinePromptTemplate
from core.prompt.prompts import CONVERSATION_TITLE_PROMPT, CONVERSATION_SUMMARY_PROMPT, INTRODUCTION_GENERATE_PROMPT, \
GENERATOR_QA_PROMPT
class LLMGenerator:
@classmethod
def generate_conversation_name(cls, tenant_id: str, query, answer):
prompt = CONVERSATION_TITLE_PROMPT
if len(query) > 2000:
query = query[:300] + "...[TRUNCATED]..." + query[-300:]
prompt = prompt.format(query=query)
model_instance = ModelFactory.get_text_generation_model(
tenant_id=tenant_id,
model_kwargs=ModelKwargs(
max_tokens=50
)
)
prompts = [PromptMessage(content=prompt)]
response = model_instance.run(prompts)
answer = response.content
return answer.strip()
@classmethod
def generate_conversation_summary(cls, tenant_id: str, messages):
max_tokens = 200
model_instance = ModelFactory.get_text_generation_model(
tenant_id=tenant_id,
model_kwargs=ModelKwargs(
max_tokens=max_tokens
)
)
prompt = CONVERSATION_SUMMARY_PROMPT
prompt_with_empty_context = prompt.format(context='')
prompt_tokens = model_instance.get_num_tokens([PromptMessage(content=prompt_with_empty_context)])
max_context_token_length = model_instance.model_rules.max_tokens.max
max_context_token_length = max_context_token_length if max_context_token_length else 1500
rest_tokens = max_context_token_length - prompt_tokens - max_tokens - 1
context = ''
for message in messages:
if not message.answer:
continue
if len(message.query) > 2000:
query = message.query[:300] + "...[TRUNCATED]..." + message.query[-300:]
else:
query = message.query
if len(message.answer) > 2000:
answer = message.answer[:300] + "...[TRUNCATED]..." + message.answer[-300:]
else:
answer = message.answer
message_qa_text = "\n\nHuman:" + query + "\n\nAssistant:" + answer
if rest_tokens - model_instance.get_num_tokens([PromptMessage(content=context + message_qa_text)]) > 0:
context += message_qa_text
if not context:
return '[message too long, no summary]'
prompt = prompt.format(context=context)
prompts = [PromptMessage(content=prompt)]
response = model_instance.run(prompts)
answer = response.content
return answer.strip()
@classmethod
def generate_introduction(cls, tenant_id: str, pre_prompt: str):
prompt = INTRODUCTION_GENERATE_PROMPT
prompt = prompt.format(prompt=pre_prompt)
model_instance = ModelFactory.get_text_generation_model(
tenant_id=tenant_id
)
prompts = [PromptMessage(content=prompt)]
response = model_instance.run(prompts)
answer = response.content
return answer.strip()
@classmethod
def generate_suggested_questions_after_answer(cls, tenant_id: str, histories: str):
output_parser = SuggestedQuestionsAfterAnswerOutputParser()
format_instructions = output_parser.get_format_instructions()
prompt = JinjaPromptTemplate(
template="{{histories}}\n{{format_instructions}}\nquestions:\n",
input_variables=["histories"],
partial_variables={"format_instructions": format_instructions}
)
_input = prompt.format_prompt(histories=histories)
try:
model_instance = ModelFactory.get_text_generation_model(
tenant_id=tenant_id,
model_kwargs=ModelKwargs(
max_tokens=256,
temperature=0
)
)
except ProviderTokenNotInitError:
return []
prompts = [PromptMessage(content=_input.to_string())]
try:
output = model_instance.run(prompts)
questions = output_parser.parse(output.content)
except LLMError:
questions = []
except Exception as e:
logging.exception(e)
questions = []
return questions
@classmethod
def generate_rule_config(cls, tenant_id: str, audiences: str, hoping_to_solve: str) -> dict:
output_parser = RuleConfigGeneratorOutputParser()
prompt = OutLinePromptTemplate(
template=output_parser.get_format_instructions(),
input_variables=["audiences", "hoping_to_solve"],
partial_variables={
"variable": '{variable}',
"lanA": '{lanA}',
"lanB": '{lanB}',
"topic": '{topic}'
},
validate_template=False
)
_input = prompt.format_prompt(audiences=audiences, hoping_to_solve=hoping_to_solve)
model_instance = ModelFactory.get_text_generation_model(
tenant_id=tenant_id,
model_kwargs=ModelKwargs(
max_tokens=512,
temperature=0
)
)
prompts = [PromptMessage(content=_input.to_string())]
try:
output = model_instance.run(prompts)
rule_config = output_parser.parse(output.content)
except LLMError as e:
raise e
except OutputParserException:
raise ValueError('Please give a valid input for intended audience or hoping to solve problems.')
except Exception as e:
logging.exception(e)
rule_config = {
"prompt": "",
"variables": [],
"opening_statement": ""
}
return rule_config
@classmethod
def generate_qa_document(cls, tenant_id: str, query, document_language: str):
prompt = GENERATOR_QA_PROMPT.format(language=document_language)
model_instance = ModelFactory.get_text_generation_model(
tenant_id=tenant_id,
model_kwargs=ModelKwargs(
max_tokens=2000
)
)
prompts = [
PromptMessage(content=prompt, type=MessageType.SYSTEM),
PromptMessage(content=query)
]
response = model_instance.run(prompts)
answer = response.content
return answer.strip()
| [
"{{histories}}\n{{format_instructions}}\nquestions:\n",
"format_instructions",
"PLACEHOLDERPLACEHOLDER",
"{lanA}",
"audiences",
"False",
"{lanB}",
"lanB",
"lanA",
"hoping_to_solve",
"{variable}"
] |
2024-01-10 | 17600164659/dify | api~core~tool~web_reader_tool.py | import hashlib
import json
import os
import re
import site
import subprocess
import tempfile
import unicodedata
from contextlib import contextmanager
from typing import Type
import requests
from bs4 import BeautifulSoup, NavigableString, Comment, CData
from langchain.base_language import BaseLanguageModel
from langchain.chains.summarize import load_summarize_chain
from langchain.schema import Document
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.tools.base import BaseTool
from newspaper import Article
from pydantic import BaseModel, Field
from regex import regex
from core.data_loader import file_extractor
from core.data_loader.file_extractor import FileExtractor
FULL_TEMPLATE = """
TITLE: {title}
AUTHORS: {authors}
PUBLISH DATE: {publish_date}
TOP_IMAGE_URL: {top_image}
TEXT:
{text}
"""
class WebReaderToolInput(BaseModel):
url: str = Field(..., description="URL of the website to read")
summary: bool = Field(
default=False,
description="When the user's question requires extracting the summarizing content of the webpage, "
"set it to true."
)
cursor: int = Field(
default=0,
description="Start reading from this character."
"Use when the first response was truncated"
"and you want to continue reading the page."
"The value cannot exceed 24000.",
)
class WebReaderTool(BaseTool):
"""Reader tool for getting website title and contents. Gives more control than SimpleReaderTool."""
name: str = "web_reader"
args_schema: Type[BaseModel] = WebReaderToolInput
description: str = "use this to read a website. " \
"If you can answer the question based on the information provided, " \
"there is no need to use."
page_contents: str = None
url: str = None
max_chunk_length: int = 4000
summary_chunk_tokens: int = 4000
summary_chunk_overlap: int = 0
summary_separators: list[str] = ["\n\n", "。", ".", " ", ""]
continue_reading: bool = True
llm: BaseLanguageModel = None
def _run(self, url: str, summary: bool = False, cursor: int = 0) -> str:
try:
if not self.page_contents or self.url != url:
page_contents = get_url(url)
self.page_contents = page_contents
self.url = url
else:
page_contents = self.page_contents
except Exception as e:
return f'Read this website failed, caused by: {str(e)}.'
if summary and self.llm:
character_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder(
chunk_size=self.summary_chunk_tokens,
chunk_overlap=self.summary_chunk_overlap,
separators=self.summary_separators
)
texts = character_splitter.split_text(page_contents)
docs = [Document(page_content=t) for t in texts]
docs = docs[1:]
# only use first 5 docs
if len(docs) > 5:
docs = docs[:5]
chain = load_summarize_chain(self.llm, chain_type="refine", callbacks=self.callbacks)
try:
page_contents = chain.run(docs)
# todo use cache
except Exception as e:
return f'Read this website failed, caused by: {str(e)}.'
else:
page_contents = page_result(page_contents, cursor, self.max_chunk_length)
if self.continue_reading and len(page_contents) >= self.max_chunk_length:
page_contents += f"\nPAGE WAS TRUNCATED. IF YOU FIND INFORMATION THAT CAN ANSWER QUESTION " \
f"THEN DIRECT ANSWER AND STOP INVOKING web_reader TOOL, OTHERWISE USE " \
f"CURSOR={cursor+len(page_contents)} TO CONTINUE READING."
return page_contents
async def _arun(self, url: str) -> str:
raise NotImplementedError
def page_result(text: str, cursor: int, max_length: int) -> str:
"""Page through `text` and return a substring of `max_length` characters starting from `cursor`."""
return text[cursor: cursor + max_length]
def get_url(url: str) -> str:
"""Fetch URL and return the contents as a string."""
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
}
supported_content_types = file_extractor.SUPPORT_URL_CONTENT_TYPES + ["text/html"]
head_response = requests.head(url, headers=headers, allow_redirects=True, timeout=(5, 10))
if head_response.status_code != 200:
return "URL returned status code {}.".format(head_response.status_code)
# check content-type
main_content_type = head_response.headers.get('Content-Type').split(';')[0].strip()
if main_content_type not in supported_content_types:
return "Unsupported content-type [{}] of URL.".format(main_content_type)
if main_content_type in file_extractor.SUPPORT_URL_CONTENT_TYPES:
return FileExtractor.load_from_url(url, return_text=True)
response = requests.get(url, headers=headers, allow_redirects=True, timeout=(5, 30))
a = extract_using_readabilipy(response.text)
if not a['plain_text'] or not a['plain_text'].strip():
return get_url_from_newspaper3k(url)
res = FULL_TEMPLATE.format(
title=a['title'],
authors=a['byline'],
publish_date=a['date'],
top_image="",
text=a['plain_text'] if a['plain_text'] else "",
)
return res
def get_url_from_newspaper3k(url: str) -> str:
a = Article(url)
a.download()
a.parse()
res = FULL_TEMPLATE.format(
title=a.title,
authors=a.authors,
publish_date=a.publish_date,
top_image=a.top_image,
text=a.text,
)
return res
def extract_using_readabilipy(html):
with tempfile.NamedTemporaryFile(delete=False, mode='w+') as f_html:
f_html.write(html)
f_html.close()
html_path = f_html.name
# Call Mozilla's Readability.js Readability.parse() function via node, writing output to a temporary file
article_json_path = html_path + ".json"
jsdir = os.path.join(find_module_path('readabilipy'), 'javascript')
with chdir(jsdir):
subprocess.check_call(["node", "ExtractArticle.js", "-i", html_path, "-o", article_json_path])
# Read output of call to Readability.parse() from JSON file and return as Python dictionary
with open(article_json_path, "r", encoding="utf-8") as json_file:
input_json = json.loads(json_file.read())
# Deleting files after processing
os.unlink(article_json_path)
os.unlink(html_path)
article_json = {
"title": None,
"byline": None,
"date": None,
"content": None,
"plain_content": None,
"plain_text": None
}
# Populate article fields from readability fields where present
if input_json:
if "title" in input_json and input_json["title"]:
article_json["title"] = input_json["title"]
if "byline" in input_json and input_json["byline"]:
article_json["byline"] = input_json["byline"]
if "date" in input_json and input_json["date"]:
article_json["date"] = input_json["date"]
if "content" in input_json and input_json["content"]:
article_json["content"] = input_json["content"]
article_json["plain_content"] = plain_content(article_json["content"], False, False)
article_json["plain_text"] = extract_text_blocks_as_plain_text(article_json["plain_content"])
if "textContent" in input_json and input_json["textContent"]:
article_json["plain_text"] = input_json["textContent"]
article_json["plain_text"] = re.sub(r'\n\s*\n', '\n', article_json["plain_text"])
return article_json
def find_module_path(module_name):
for package_path in site.getsitepackages():
potential_path = os.path.join(package_path, module_name)
if os.path.exists(potential_path):
return potential_path
return None
@contextmanager
def chdir(path):
"""Change directory in context and return to original on exit"""
# From https://stackoverflow.com/a/37996581, couldn't find a built-in
original_path = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(original_path)
def extract_text_blocks_as_plain_text(paragraph_html):
# Load article as DOM
soup = BeautifulSoup(paragraph_html, 'html.parser')
# Select all lists
list_elements = soup.find_all(['ul', 'ol'])
# Prefix text in all list items with "* " and make lists paragraphs
for list_element in list_elements:
plain_items = "".join(list(filter(None, [plain_text_leaf_node(li)["text"] for li in list_element.find_all('li')])))
list_element.string = plain_items
list_element.name = "p"
# Select all text blocks
text_blocks = [s.parent for s in soup.find_all(string=True)]
text_blocks = [plain_text_leaf_node(block) for block in text_blocks]
# Drop empty paragraphs
text_blocks = list(filter(lambda p: p["text"] is not None, text_blocks))
return text_blocks
def plain_text_leaf_node(element):
# Extract all text, stripped of any child HTML elements and normalise it
plain_text = normalise_text(element.get_text())
if plain_text != "" and element.name == "li":
plain_text = "* {}, ".format(plain_text)
if plain_text == "":
plain_text = None
if "data-node-index" in element.attrs:
plain = {"node_index": element["data-node-index"], "text": plain_text}
else:
plain = {"text": plain_text}
return plain
def plain_content(readability_content, content_digests, node_indexes):
# Load article as DOM
soup = BeautifulSoup(readability_content, 'html.parser')
# Make all elements plain
elements = plain_elements(soup.contents, content_digests, node_indexes)
if node_indexes:
# Add node index attributes to nodes
elements = [add_node_indexes(element) for element in elements]
# Replace article contents with plain elements
soup.contents = elements
return str(soup)
def plain_elements(elements, content_digests, node_indexes):
# Get plain content versions of all elements
elements = [plain_element(element, content_digests, node_indexes)
for element in elements]
if content_digests:
# Add content digest attribute to nodes
elements = [add_content_digest(element) for element in elements]
return elements
def plain_element(element, content_digests, node_indexes):
# For lists, we make each item plain text
if is_leaf(element):
# For leaf node elements, extract the text content, discarding any HTML tags
# 1. Get element contents as text
plain_text = element.get_text()
# 2. Normalise the extracted text string to a canonical representation
plain_text = normalise_text(plain_text)
# 3. Update element content to be plain text
element.string = plain_text
elif is_text(element):
if is_non_printing(element):
# The simplified HTML may have come from Readability.js so might
# have non-printing text (e.g. Comment or CData). In this case, we
# keep the structure, but ensure that the string is empty.
element = type(element)("")
else:
plain_text = element.string
plain_text = normalise_text(plain_text)
element = type(element)(plain_text)
else:
# If not a leaf node or leaf type call recursively on child nodes, replacing
element.contents = plain_elements(element.contents, content_digests, node_indexes)
return element
def add_node_indexes(element, node_index="0"):
# Can't add attributes to string types
if is_text(element):
return element
# Add index to current element
element["data-node-index"] = node_index
# Add index to child elements
for local_idx, child in enumerate(
[c for c in element.contents if not is_text(c)], start=1):
# Can't add attributes to leaf string types
child_index = "{stem}.{local}".format(
stem=node_index, local=local_idx)
add_node_indexes(child, node_index=child_index)
return element
def normalise_text(text):
"""Normalise unicode and whitespace."""
# Normalise unicode first to try and standardise whitespace characters as much as possible before normalising them
text = strip_control_characters(text)
text = normalise_unicode(text)
text = normalise_whitespace(text)
return text
def strip_control_characters(text):
"""Strip out unicode control characters which might break the parsing."""
# Unicode control characters
# [Cc]: Other, Control [includes new lines]
# [Cf]: Other, Format
# [Cn]: Other, Not Assigned
# [Co]: Other, Private Use
# [Cs]: Other, Surrogate
control_chars = set(['Cc', 'Cf', 'Cn', 'Co', 'Cs'])
retained_chars = ['\t', '\n', '\r', '\f']
# Remove non-printing control characters
return "".join(["" if (unicodedata.category(char) in control_chars) and (char not in retained_chars) else char for char in text])
def normalise_unicode(text):
"""Normalise unicode such that things that are visually equivalent map to the same unicode string where possible."""
normal_form = "NFKC"
text = unicodedata.normalize(normal_form, text)
return text
def normalise_whitespace(text):
"""Replace runs of whitespace characters with a single space as this is what happens when HTML text is displayed."""
text = regex.sub(r"\s+", " ", text)
# Remove leading and trailing whitespace
text = text.strip()
return text
def is_leaf(element):
return (element.name in ['p', 'li'])
def is_text(element):
return isinstance(element, NavigableString)
def is_non_printing(element):
return any(isinstance(element, _e) for _e in [Comment, CData])
def add_content_digest(element):
if not is_text(element):
element["data-content-digest"] = content_digest(element)
return element
def content_digest(element):
if is_text(element):
# Hash
trimmed_string = element.string.strip()
if trimmed_string == "":
digest = ""
else:
digest = hashlib.sha256(trimmed_string.encode('utf-8')).hexdigest()
else:
contents = element.contents
num_contents = len(contents)
if num_contents == 0:
# No hash when no child elements exist
digest = ""
elif num_contents == 1:
# If single child, use digest of child
digest = content_digest(contents[0])
else:
# Build content digest from the "non-empty" digests of child nodes
digest = hashlib.sha256()
child_digests = list(
filter(lambda x: x != "", [content_digest(content) for content in contents]))
for child in child_digests:
digest.update(child.encode('utf-8'))
digest = digest.hexdigest()
return digest
| [
"\nTITLE: {title}\nAUTHORS: {authors}\nPUBLISH DATE: {publish_date}\nTOP_IMAGE_URL: {top_image}\nTEXT:\n\n{text}\n",
"None"
] |
2024-01-10 | 17600164659/dify | api~core~model_providers~models~llm~xinference_model.py | from typing import List, Optional, Any
from langchain.callbacks.manager import Callbacks
from langchain.llms import Xinference
from langchain.schema import LLMResult
from core.model_providers.error import LLMBadRequestError
from core.model_providers.models.llm.base import BaseLLM
from core.model_providers.models.entity.message import PromptMessage
from core.model_providers.models.entity.model_params import ModelMode, ModelKwargs
class XinferenceModel(BaseLLM):
model_mode: ModelMode = ModelMode.COMPLETION
def _init_client(self) -> Any:
self.provider_model_kwargs = self._to_model_kwargs_input(self.model_rules, self.model_kwargs)
client = Xinference(
**self.credentials,
)
client.callbacks = self.callbacks
return client
def _run(self, messages: List[PromptMessage],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
**kwargs) -> LLMResult:
"""
run predict by prompt messages and stop words.
:param messages:
:param stop:
:param callbacks:
:return:
"""
prompts = self._get_prompt_from_messages(messages)
return self._client.generate(
[prompts],
stop,
callbacks,
generate_config={
"stop": stop,
"stream": self.streaming,
**self.provider_model_kwargs,
}
)
def get_num_tokens(self, messages: List[PromptMessage]) -> int:
"""
get num tokens of prompt messages.
:param messages:
:return:
"""
prompts = self._get_prompt_from_messages(messages)
return max(self._client.get_num_tokens(prompts), 0)
def _set_model_kwargs(self, model_kwargs: ModelKwargs):
pass
def handle_exceptions(self, ex: Exception) -> Exception:
return LLMBadRequestError(f"Xinference: {str(ex)}")
@classmethod
def support_streaming(cls):
return True
| [] |
2024-01-10 | 17600164659/dify | api~tests~integration_tests~models~llm~test_huggingface_hub_model.py | import json
import os
from unittest.mock import patch, MagicMock
from langchain.schema import Generation
from core.model_providers.models.entity.message import PromptMessage, MessageType
from core.model_providers.models.entity.model_params import ModelKwargs, ModelType
from core.model_providers.models.llm.huggingface_hub_model import HuggingfaceHubModel
from core.model_providers.providers.huggingface_hub_provider import HuggingfaceHubProvider
from models.provider import Provider, ProviderType, ProviderModel
def get_mock_provider():
return Provider(
id='provider_id',
tenant_id='tenant_id',
provider_name='huggingface_hub',
provider_type=ProviderType.CUSTOM.value,
encrypted_config='',
is_valid=True,
)
def get_mock_model(model_name, huggingfacehub_api_type, mocker):
model_kwargs = ModelKwargs(
max_tokens=10,
temperature=0.01
)
valid_api_key = os.environ['HUGGINGFACE_API_KEY']
endpoint_url = os.environ['HUGGINGFACE_ENDPOINT_URL']
model_provider = HuggingfaceHubProvider(provider=get_mock_provider())
credentials = {
'huggingfacehub_api_type': huggingfacehub_api_type,
'huggingfacehub_api_token': valid_api_key
}
if huggingfacehub_api_type == 'inference_endpoints':
credentials['huggingfacehub_endpoint_url'] = endpoint_url
mock_query = MagicMock()
mock_query.filter.return_value.first.return_value = ProviderModel(
provider_name='huggingface_hub',
model_name=model_name,
model_type=ModelType.TEXT_GENERATION.value,
encrypted_config=json.dumps(credentials),
is_valid=True,
)
mocker.patch('extensions.ext_database.db.session.query', return_value=mock_query)
return HuggingfaceHubModel(
model_provider=model_provider,
name=model_name,
model_kwargs=model_kwargs
)
def decrypt_side_effect(tenant_id, encrypted_api_key):
return encrypted_api_key
@patch('huggingface_hub.hf_api.ModelInfo')
@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect)
def test_hosted_inference_api_get_num_tokens(mock_decrypt, mock_model_info, mocker):
mock_model_info.return_value = MagicMock(pipeline_tag='text2text-generation')
mocker.patch('langchain.llms.huggingface_hub.HuggingFaceHub._call', return_value="abc")
model = get_mock_model(
'tiiuae/falcon-40b',
'hosted_inference_api',
mocker
)
rst = model.get_num_tokens([
PromptMessage(type=MessageType.HUMAN, content='Who is your manufacturer?')
])
assert rst == 5
@patch('huggingface_hub.hf_api.ModelInfo')
@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect)
def test_inference_endpoints_get_num_tokens(mock_decrypt, mock_model_info, mocker):
mock_model_info.return_value = MagicMock(pipeline_tag='text2text-generation')
mocker.patch('langchain.llms.huggingface_hub.HuggingFaceHub._call', return_value="abc")
model = get_mock_model(
'',
'inference_endpoints',
mocker
)
rst = model.get_num_tokens([
PromptMessage(type=MessageType.HUMAN, content='Who is your manufacturer?')
])
assert rst == 5
@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect)
def test_hosted_inference_api_run(mock_decrypt, mocker):
mocker.patch('core.model_providers.providers.base.BaseModelProvider.update_last_used', return_value=None)
model = get_mock_model(
'google/flan-t5-base',
'hosted_inference_api',
mocker
)
rst = model.run(
[PromptMessage(content='Human: Are you Really Human? you MUST only answer `y` or `n`? \nAssistant: ')],
stop=['\nHuman:'],
)
assert len(rst.content) > 0
assert rst.content.strip() == 'n'
@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect)
def test_inference_endpoints_run(mock_decrypt, mocker):
mocker.patch('core.model_providers.providers.base.BaseModelProvider.update_last_used', return_value=None)
model = get_mock_model(
'',
'inference_endpoints',
mocker
)
rst = model.run(
[PromptMessage(content='Answer the following yes/no question. Can you write a whole Haiku in a single tweet?')],
)
assert len(rst.content) > 0
| [
"Answer the following yes/no question. Can you write a whole Haiku in a single tweet?",
"Who is your manufacturer?",
"Human: Are you Really Human? you MUST only answer `y` or `n`? \nAssistant: "
] |
2024-01-10 | 17600164659/dify | api~core~tool~dataset_retriever_tool.py | import re
from typing import Type
from flask import current_app
from langchain.tools import BaseTool
from pydantic import Field, BaseModel
from core.callback_handler.index_tool_callback_handler import DatasetIndexToolCallbackHandler
from core.embedding.cached_embedding import CacheEmbedding
from core.index.keyword_table_index.keyword_table_index import KeywordTableIndex, KeywordTableConfig
from core.index.vector_index.vector_index import VectorIndex
from core.model_providers.error import LLMBadRequestError, ProviderTokenNotInitError
from core.model_providers.model_factory import ModelFactory
from extensions.ext_database import db
from models.dataset import Dataset, DocumentSegment
class DatasetRetrieverToolInput(BaseModel):
dataset_id: str = Field(..., description="ID of dataset to be queried. MUST be UUID format.")
query: str = Field(..., description="Query for the dataset to be used to retrieve the dataset.")
class DatasetRetrieverTool(BaseTool):
"""Tool for querying a Dataset."""
name: str = "dataset"
args_schema: Type[BaseModel] = DatasetRetrieverToolInput
description: str = "use this to retrieve a dataset. "
tenant_id: str
dataset_id: str
k: int = 3
@classmethod
def from_dataset(cls, dataset: Dataset, **kwargs):
description = dataset.description
if not description:
description = 'useful for when you want to answer queries about the ' + dataset.name
description = description.replace('\n', '').replace('\r', '')
description += '\nID of dataset MUST be ' + dataset.id
return cls(
tenant_id=dataset.tenant_id,
dataset_id=dataset.id,
description=description,
**kwargs
)
def _run(self, dataset_id: str, query: str) -> str:
pattern = r'\b[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}\b'
match = re.search(pattern, dataset_id, re.IGNORECASE)
if match:
dataset_id = match.group()
dataset = db.session.query(Dataset).filter(
Dataset.tenant_id == self.tenant_id,
Dataset.id == dataset_id
).first()
if not dataset:
return f'[{self.name} failed to find dataset with id {dataset_id}.]'
if dataset.indexing_technique == "economy":
# use keyword table query
kw_table_index = KeywordTableIndex(
dataset=dataset,
config=KeywordTableConfig(
max_keywords_per_chunk=5
)
)
documents = kw_table_index.search(query, search_kwargs={'k': self.k})
return str("\n".join([document.page_content for document in documents]))
else:
try:
embedding_model = ModelFactory.get_embedding_model(
tenant_id=dataset.tenant_id,
model_provider_name=dataset.embedding_model_provider,
model_name=dataset.embedding_model
)
except LLMBadRequestError:
return ''
except ProviderTokenNotInitError:
return ''
embeddings = CacheEmbedding(embedding_model)
vector_index = VectorIndex(
dataset=dataset,
config=current_app.config,
embeddings=embeddings
)
if self.k > 0:
documents = vector_index.search(
query,
search_type='similarity',
search_kwargs={
'k': self.k
}
)
else:
documents = []
hit_callback = DatasetIndexToolCallbackHandler(dataset.id)
hit_callback.on_tool_end(documents)
document_context_list = []
index_node_ids = [document.metadata['doc_id'] for document in documents]
segments = DocumentSegment.query.filter(DocumentSegment.completed_at.isnot(None),
DocumentSegment.status == 'completed',
DocumentSegment.enabled == True,
DocumentSegment.index_node_id.in_(index_node_ids)
).all()
if segments:
index_node_id_to_position = {id: position for position, id in enumerate(index_node_ids)}
sorted_segments = sorted(segments,
key=lambda segment: index_node_id_to_position.get(segment.index_node_id,
float('inf')))
for segment in sorted_segments:
if segment.answer:
document_context_list.append(f'question:{segment.content} \nanswer:{segment.answer}')
else:
document_context_list.append(segment.content)
return str("\n".join(document_context_list))
async def _arun(self, tool_input: str) -> str:
raise NotImplementedError()
| [
"use this to retrieve a dataset. "
] |
2024-01-10 | 17600164659/dify | api~core~model_providers~models~embedding~azure_openai_embedding.py | import decimal
import logging
import openai
import tiktoken
from langchain.embeddings import OpenAIEmbeddings
from core.model_providers.error import LLMBadRequestError, LLMAuthorizationError, LLMRateLimitError, \
LLMAPIUnavailableError, LLMAPIConnectionError
from core.model_providers.models.embedding.base import BaseEmbedding
from core.model_providers.providers.base import BaseModelProvider
AZURE_OPENAI_API_VERSION = '2023-07-01-preview'
class AzureOpenAIEmbedding(BaseEmbedding):
def __init__(self, model_provider: BaseModelProvider, name: str):
self.credentials = model_provider.get_model_credentials(
model_name=name,
model_type=self.type
)
client = OpenAIEmbeddings(
deployment=name,
openai_api_type='azure',
openai_api_version=AZURE_OPENAI_API_VERSION,
chunk_size=16,
max_retries=1,
openai_api_key=self.credentials.get('openai_api_key'),
openai_api_base=self.credentials.get('openai_api_base')
)
super().__init__(model_provider, client, name)
@property
def base_model_name(self) -> str:
"""
get base model name (not deployment)
:return: str
"""
return self.credentials.get("base_model_name")
def get_num_tokens(self, text: str) -> int:
"""
get num tokens of text.
:param text:
:return:
"""
if len(text) == 0:
return 0
enc = tiktoken.encoding_for_model(self.credentials.get('base_model_name'))
tokenized_text = enc.encode(text)
# calculate the number of tokens in the encoded text
return len(tokenized_text)
def handle_exceptions(self, ex: Exception) -> Exception:
if isinstance(ex, openai.error.InvalidRequestError):
logging.warning("Invalid request to Azure OpenAI API.")
return LLMBadRequestError(str(ex))
elif isinstance(ex, openai.error.APIConnectionError):
logging.warning("Failed to connect to Azure OpenAI API.")
return LLMAPIConnectionError(ex.__class__.__name__ + ":" + str(ex))
elif isinstance(ex, (openai.error.APIError, openai.error.ServiceUnavailableError, openai.error.Timeout)):
logging.warning("Azure OpenAI service unavailable.")
return LLMAPIUnavailableError(ex.__class__.__name__ + ":" + str(ex))
elif isinstance(ex, openai.error.RateLimitError):
return LLMRateLimitError('Azure ' + str(ex))
elif isinstance(ex, openai.error.AuthenticationError):
return LLMAuthorizationError('Azure ' + str(ex))
elif isinstance(ex, openai.error.OpenAIError):
return LLMBadRequestError('Azure ' + ex.__class__.__name__ + ":" + str(ex))
else:
return ex
| [] |
2024-01-10 | 17600164659/dify | api~core~model_providers~models~llm~replicate_model.py | import decimal
from functools import wraps
from typing import List, Optional, Any
from langchain.callbacks.manager import Callbacks
from langchain.schema import LLMResult, get_buffer_string
from replicate.exceptions import ReplicateError, ModelError
from core.model_providers.providers.base import BaseModelProvider
from core.model_providers.error import LLMBadRequestError
from core.third_party.langchain.llms.replicate_llm import EnhanceReplicate
from core.model_providers.models.llm.base import BaseLLM
from core.model_providers.models.entity.message import PromptMessage, MessageType
from core.model_providers.models.entity.model_params import ModelMode, ModelKwargs
class ReplicateModel(BaseLLM):
def __init__(self, model_provider: BaseModelProvider,
name: str,
model_kwargs: ModelKwargs,
streaming: bool = False,
callbacks: Callbacks = None):
self.model_mode = ModelMode.CHAT if name.endswith('-chat') else ModelMode.COMPLETION
super().__init__(model_provider, name, model_kwargs, streaming, callbacks)
def _init_client(self) -> Any:
provider_model_kwargs = self._to_model_kwargs_input(self.model_rules, self.model_kwargs)
return EnhanceReplicate(
model=self.name + ':' + self.credentials.get('model_version'),
input=provider_model_kwargs,
streaming=self.streaming,
replicate_api_token=self.credentials.get('replicate_api_token'),
callbacks=self.callbacks,
)
def _run(self, messages: List[PromptMessage],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
**kwargs) -> LLMResult:
"""
run predict by prompt messages and stop words.
:param messages:
:param stop:
:param callbacks:
:return:
"""
prompts = self._get_prompt_from_messages(messages)
extra_kwargs = {}
if isinstance(prompts, list):
system_messages = [message for message in messages if message.type == 'system']
if system_messages:
system_message = system_messages[0]
extra_kwargs['system_prompt'] = system_message.content
prompts = [message for message in messages if message.type != 'system']
prompts = get_buffer_string(prompts)
# The maximum length the generated tokens can have.
# Corresponds to the length of the input prompt + max_new_tokens.
if 'max_length' in self._client.input:
self._client.input['max_length'] = min(
self._client.input['max_length'] + self.get_num_tokens(messages),
self.model_rules.max_tokens.max
)
return self._client.generate([prompts], stop, callbacks, **extra_kwargs)
def get_num_tokens(self, messages: List[PromptMessage]) -> int:
"""
get num tokens of prompt messages.
:param messages:
:return:
"""
prompts = self._get_prompt_from_messages(messages)
if isinstance(prompts, list):
prompts = get_buffer_string(prompts)
return self._client.get_num_tokens(prompts)
def _set_model_kwargs(self, model_kwargs: ModelKwargs):
provider_model_kwargs = self._to_model_kwargs_input(self.model_rules, model_kwargs)
self.client.input = provider_model_kwargs
def handle_exceptions(self, ex: Exception) -> Exception:
if isinstance(ex, (ModelError, ReplicateError)):
return LLMBadRequestError(f"Replicate: {str(ex)}")
else:
return ex
@classmethod
def support_streaming(cls):
return True | [] |
2024-01-10 | 17600164659/dify | api~core~model_providers~models~llm~anthropic_model.py | import decimal
import logging
from functools import wraps
from typing import List, Optional, Any
import anthropic
from langchain.callbacks.manager import Callbacks
from langchain.chat_models import ChatAnthropic
from langchain.schema import LLMResult
from core.model_providers.error import LLMBadRequestError, LLMAPIConnectionError, LLMAPIUnavailableError, \
LLMRateLimitError, LLMAuthorizationError
from core.model_providers.models.llm.base import BaseLLM
from core.model_providers.models.entity.message import PromptMessage, MessageType
from core.model_providers.models.entity.model_params import ModelMode, ModelKwargs
class AnthropicModel(BaseLLM):
model_mode: ModelMode = ModelMode.CHAT
def _init_client(self) -> Any:
provider_model_kwargs = self._to_model_kwargs_input(self.model_rules, self.model_kwargs)
return ChatAnthropic(
model=self.name,
streaming=self.streaming,
callbacks=self.callbacks,
default_request_timeout=60,
**self.credentials,
**provider_model_kwargs
)
def _run(self, messages: List[PromptMessage],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
**kwargs) -> LLMResult:
"""
run predict by prompt messages and stop words.
:param messages:
:param stop:
:param callbacks:
:return:
"""
prompts = self._get_prompt_from_messages(messages)
return self._client.generate([prompts], stop, callbacks)
def get_num_tokens(self, messages: List[PromptMessage]) -> int:
"""
get num tokens of prompt messages.
:param messages:
:return:
"""
prompts = self._get_prompt_from_messages(messages)
return max(self._client.get_num_tokens_from_messages(prompts) - len(prompts), 0)
def _set_model_kwargs(self, model_kwargs: ModelKwargs):
provider_model_kwargs = self._to_model_kwargs_input(self.model_rules, model_kwargs)
for k, v in provider_model_kwargs.items():
if hasattr(self.client, k):
setattr(self.client, k, v)
def handle_exceptions(self, ex: Exception) -> Exception:
if isinstance(ex, anthropic.APIConnectionError):
logging.warning("Failed to connect to Anthropic API.")
return LLMAPIConnectionError(f"Anthropic: The server could not be reached, cause: {ex.__cause__}")
elif isinstance(ex, anthropic.RateLimitError):
return LLMRateLimitError("Anthropic: A 429 status code was received; we should back off a bit.")
elif isinstance(ex, anthropic.AuthenticationError):
return LLMAuthorizationError(f"Anthropic: {ex.message}")
elif isinstance(ex, anthropic.BadRequestError):
return LLMBadRequestError(f"Anthropic: {ex.message}")
elif isinstance(ex, anthropic.APIStatusError):
return LLMAPIUnavailableError(f"Anthropic: code: {ex.status_code}, cause: {ex.message}")
else:
return ex
@classmethod
def support_streaming(cls):
return True
| [] |
2024-01-10 | 17600164659/dify | api~core~model_providers~models~llm~azure_openai_model.py | import decimal
import logging
from functools import wraps
from typing import List, Optional, Any
import openai
from langchain.callbacks.manager import Callbacks
from langchain.schema import LLMResult
from core.model_providers.providers.base import BaseModelProvider
from core.third_party.langchain.llms.azure_chat_open_ai import EnhanceAzureChatOpenAI
from core.third_party.langchain.llms.azure_open_ai import EnhanceAzureOpenAI
from core.model_providers.error import LLMBadRequestError, LLMAPIConnectionError, LLMAPIUnavailableError, \
LLMRateLimitError, LLMAuthorizationError
from core.model_providers.models.llm.base import BaseLLM
from core.model_providers.models.entity.message import PromptMessage, MessageType
from core.model_providers.models.entity.model_params import ModelMode, ModelKwargs
AZURE_OPENAI_API_VERSION = '2023-07-01-preview'
class AzureOpenAIModel(BaseLLM):
def __init__(self, model_provider: BaseModelProvider,
name: str,
model_kwargs: ModelKwargs,
streaming: bool = False,
callbacks: Callbacks = None):
if name == 'text-davinci-003':
self.model_mode = ModelMode.COMPLETION
else:
self.model_mode = ModelMode.CHAT
super().__init__(model_provider, name, model_kwargs, streaming, callbacks)
def _init_client(self) -> Any:
provider_model_kwargs = self._to_model_kwargs_input(self.model_rules, self.model_kwargs)
if self.name == 'text-davinci-003':
client = EnhanceAzureOpenAI(
deployment_name=self.name,
streaming=self.streaming,
request_timeout=60,
openai_api_type='azure',
openai_api_version=AZURE_OPENAI_API_VERSION,
openai_api_key=self.credentials.get('openai_api_key'),
openai_api_base=self.credentials.get('openai_api_base'),
callbacks=self.callbacks,
**provider_model_kwargs
)
else:
extra_model_kwargs = {
'top_p': provider_model_kwargs.get('top_p'),
'frequency_penalty': provider_model_kwargs.get('frequency_penalty'),
'presence_penalty': provider_model_kwargs.get('presence_penalty'),
}
client = EnhanceAzureChatOpenAI(
deployment_name=self.name,
temperature=provider_model_kwargs.get('temperature'),
max_tokens=provider_model_kwargs.get('max_tokens'),
model_kwargs=extra_model_kwargs,
streaming=self.streaming,
request_timeout=60,
openai_api_type='azure',
openai_api_version=AZURE_OPENAI_API_VERSION,
openai_api_key=self.credentials.get('openai_api_key'),
openai_api_base=self.credentials.get('openai_api_base'),
callbacks=self.callbacks,
)
return client
def _run(self, messages: List[PromptMessage],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
**kwargs) -> LLMResult:
"""
run predict by prompt messages and stop words.
:param messages:
:param stop:
:param callbacks:
:return:
"""
prompts = self._get_prompt_from_messages(messages)
return self._client.generate([prompts], stop, callbacks)
@property
def base_model_name(self) -> str:
"""
get base model name (not deployment)
:return: str
"""
return self.credentials.get("base_model_name")
def get_num_tokens(self, messages: List[PromptMessage]) -> int:
"""
get num tokens of prompt messages.
:param messages:
:return:
"""
prompts = self._get_prompt_from_messages(messages)
if isinstance(prompts, str):
return self._client.get_num_tokens(prompts)
else:
return max(self._client.get_num_tokens_from_messages(prompts) - len(prompts), 0)
def _set_model_kwargs(self, model_kwargs: ModelKwargs):
provider_model_kwargs = self._to_model_kwargs_input(self.model_rules, model_kwargs)
if self.name == 'text-davinci-003':
for k, v in provider_model_kwargs.items():
if hasattr(self.client, k):
setattr(self.client, k, v)
else:
extra_model_kwargs = {
'top_p': provider_model_kwargs.get('top_p'),
'frequency_penalty': provider_model_kwargs.get('frequency_penalty'),
'presence_penalty': provider_model_kwargs.get('presence_penalty'),
}
self.client.temperature = provider_model_kwargs.get('temperature')
self.client.max_tokens = provider_model_kwargs.get('max_tokens')
self.client.model_kwargs = extra_model_kwargs
def handle_exceptions(self, ex: Exception) -> Exception:
if isinstance(ex, openai.error.InvalidRequestError):
logging.warning("Invalid request to Azure OpenAI API.")
return LLMBadRequestError(str(ex))
elif isinstance(ex, openai.error.APIConnectionError):
logging.warning("Failed to connect to Azure OpenAI API.")
return LLMAPIConnectionError(ex.__class__.__name__ + ":" + str(ex))
elif isinstance(ex, (openai.error.APIError, openai.error.ServiceUnavailableError, openai.error.Timeout)):
logging.warning("Azure OpenAI service unavailable.")
return LLMAPIUnavailableError(ex.__class__.__name__ + ":" + str(ex))
elif isinstance(ex, openai.error.RateLimitError):
return LLMRateLimitError('Azure ' + str(ex))
elif isinstance(ex, openai.error.AuthenticationError):
return LLMAuthorizationError('Azure ' + str(ex))
elif isinstance(ex, openai.error.OpenAIError):
return LLMBadRequestError('Azure ' + ex.__class__.__name__ + ":" + str(ex))
else:
return ex
@classmethod
def support_streaming(cls):
return True | [] |
2024-01-10 | 17600164659/dify | api~core~model_providers~models~llm~tongyi_model.py | import decimal
from functools import wraps
from typing import List, Optional, Any
from langchain.callbacks.manager import Callbacks
from langchain.schema import LLMResult
from requests import HTTPError
from core.model_providers.error import LLMBadRequestError
from core.model_providers.models.llm.base import BaseLLM
from core.model_providers.models.entity.message import PromptMessage, MessageType
from core.model_providers.models.entity.model_params import ModelMode, ModelKwargs
from core.third_party.langchain.llms.tongyi_llm import EnhanceTongyi
class TongyiModel(BaseLLM):
model_mode: ModelMode = ModelMode.COMPLETION
def _init_client(self) -> Any:
provider_model_kwargs = self._to_model_kwargs_input(self.model_rules, self.model_kwargs)
del provider_model_kwargs['max_tokens']
return EnhanceTongyi(
model_name=self.name,
max_retries=1,
streaming=self.streaming,
callbacks=self.callbacks,
**self.credentials,
**provider_model_kwargs
)
def _run(self, messages: List[PromptMessage],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
**kwargs) -> LLMResult:
"""
run predict by prompt messages and stop words.
:param messages:
:param stop:
:param callbacks:
:return:
"""
prompts = self._get_prompt_from_messages(messages)
return self._client.generate([prompts], stop, callbacks)
def get_num_tokens(self, messages: List[PromptMessage]) -> int:
"""
get num tokens of prompt messages.
:param messages:
:return:
"""
prompts = self._get_prompt_from_messages(messages)
return max(self._client.get_num_tokens(prompts), 0)
def get_currency(self):
return 'RMB'
def _set_model_kwargs(self, model_kwargs: ModelKwargs):
provider_model_kwargs = self._to_model_kwargs_input(self.model_rules, model_kwargs)
del provider_model_kwargs['max_tokens']
for k, v in provider_model_kwargs.items():
if hasattr(self.client, k):
setattr(self.client, k, v)
def handle_exceptions(self, ex: Exception) -> Exception:
if isinstance(ex, (ValueError, HTTPError)):
return LLMBadRequestError(f"Tongyi: {str(ex)}")
else:
return ex
@classmethod
def support_streaming(cls):
return True
| [] |
2024-01-10 | 17600164659/dify | api~tests~integration_tests~models~llm~test_azure_openai_model.py | import json
import os
from unittest.mock import patch, MagicMock
import pytest
from langchain.schema import ChatGeneration, AIMessage
from core.model_providers.models.entity.model_params import ModelKwargs, ModelType
from core.model_providers.models.llm.azure_openai_model import AzureOpenAIModel
from core.model_providers.models.entity.message import PromptMessage, MessageType
from core.model_providers.providers.azure_openai_provider import AzureOpenAIProvider
from models.provider import Provider, ProviderType, ProviderModel
def get_mock_provider():
return Provider(
id='provider_id',
tenant_id='tenant_id',
provider_name='azure_openai',
provider_type=ProviderType.CUSTOM.value,
encrypted_config='',
is_valid=True,
)
def get_mock_azure_openai_model(model_name, mocker):
model_kwargs = ModelKwargs(
max_tokens=10,
temperature=0
)
valid_openai_api_base = os.environ['AZURE_OPENAI_API_BASE']
valid_openai_api_key = os.environ['AZURE_OPENAI_API_KEY']
provider = AzureOpenAIProvider(provider=get_mock_provider())
mock_query = MagicMock()
mock_query.filter.return_value.first.return_value = ProviderModel(
provider_name='azure_openai',
model_name=model_name,
model_type=ModelType.TEXT_GENERATION.value,
encrypted_config=json.dumps({
'openai_api_base': valid_openai_api_base,
'openai_api_key': valid_openai_api_key,
'base_model_name': model_name
}),
is_valid=True,
)
mocker.patch('extensions.ext_database.db.session.query', return_value=mock_query)
return AzureOpenAIModel(
model_provider=provider,
name=model_name,
model_kwargs=model_kwargs
)
def decrypt_side_effect(tenant_id, encrypted_openai_api_key):
return encrypted_openai_api_key
@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect)
def test_get_num_tokens(mock_decrypt, mocker):
openai_model = get_mock_azure_openai_model('text-davinci-003', mocker)
rst = openai_model.get_num_tokens([PromptMessage(content='you are a kindness Assistant.')])
assert rst == 6
@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect)
def test_chat_get_num_tokens(mock_decrypt, mocker):
openai_model = get_mock_azure_openai_model('gpt-35-turbo', mocker)
rst = openai_model.get_num_tokens([
PromptMessage(type=MessageType.SYSTEM, content='you are a kindness Assistant.'),
PromptMessage(type=MessageType.HUMAN, content='Who is your manufacturer?')
])
assert rst == 22
@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect)
def test_run(mock_decrypt, mocker):
mocker.patch('core.model_providers.providers.base.BaseModelProvider.update_last_used', return_value=None)
openai_model = get_mock_azure_openai_model('gpt-35-turbo', mocker)
messages = [PromptMessage(content='Human: Are you Human? you MUST only answer `y` or `n`? \nAssistant: ')]
rst = openai_model.run(
messages,
stop=['\nHuman:'],
)
assert len(rst.content) > 0
| [
"Who is your manufacturer?",
"Human: Are you Human? you MUST only answer `y` or `n`? \nAssistant: ",
"you are a kindness Assistant."
] |
2024-01-10 | 17600164659/dify | api~core~model_providers~models~embedding~xinference_embedding.py | from langchain.embeddings import XinferenceEmbeddings
from replicate.exceptions import ModelError, ReplicateError
from core.model_providers.error import LLMBadRequestError
from core.model_providers.providers.base import BaseModelProvider
from core.model_providers.models.embedding.base import BaseEmbedding
class XinferenceEmbedding(BaseEmbedding):
def __init__(self, model_provider: BaseModelProvider, name: str):
credentials = model_provider.get_model_credentials(
model_name=name,
model_type=self.type
)
client = XinferenceEmbeddings(
**credentials,
)
super().__init__(model_provider, client, name)
def handle_exceptions(self, ex: Exception) -> Exception:
if isinstance(ex, (ModelError, ReplicateError)):
return LLMBadRequestError(f"Xinference embedding: {str(ex)}")
else:
return ex
| [] |
2024-01-10 | 17600164659/dify | api~core~indexing_runner.py | import datetime
import json
import logging
import re
import threading
import time
import uuid
from typing import Optional, List, cast
from flask import current_app, Flask
from flask_login import current_user
from langchain.schema import Document
from langchain.text_splitter import RecursiveCharacterTextSplitter, TextSplitter
from core.data_loader.file_extractor import FileExtractor
from core.data_loader.loader.notion import NotionLoader
from core.docstore.dataset_docstore import DatesetDocumentStore
from core.generator.llm_generator import LLMGenerator
from core.index.index import IndexBuilder
from core.model_providers.error import ProviderTokenNotInitError
from core.model_providers.model_factory import ModelFactory
from core.model_providers.models.entity.message import MessageType
from core.spiltter.fixed_text_splitter import FixedRecursiveCharacterTextSplitter
from extensions.ext_database import db
from extensions.ext_redis import redis_client
from extensions.ext_storage import storage
from libs import helper
from models.dataset import Document as DatasetDocument
from models.dataset import Dataset, DocumentSegment, DatasetProcessRule
from models.model import UploadFile
from models.source import DataSourceBinding
class IndexingRunner:
def __init__(self):
self.storage = storage
def run(self, dataset_documents: List[DatasetDocument]):
"""Run the indexing process."""
for dataset_document in dataset_documents:
try:
# get dataset
dataset = Dataset.query.filter_by(
id=dataset_document.dataset_id
).first()
if not dataset:
raise ValueError("no dataset found")
# load file
text_docs = self._load_data(dataset_document)
# get the process rule
processing_rule = db.session.query(DatasetProcessRule). \
filter(DatasetProcessRule.id == dataset_document.dataset_process_rule_id). \
first()
# get splitter
splitter = self._get_splitter(processing_rule)
# split to documents
documents = self._step_split(
text_docs=text_docs,
splitter=splitter,
dataset=dataset,
dataset_document=dataset_document,
processing_rule=processing_rule
)
self._build_index(
dataset=dataset,
dataset_document=dataset_document,
documents=documents
)
except DocumentIsPausedException:
raise DocumentIsPausedException('Document paused, document id: {}'.format(dataset_document.id))
except ProviderTokenNotInitError as e:
dataset_document.indexing_status = 'error'
dataset_document.error = str(e.description)
dataset_document.stopped_at = datetime.datetime.utcnow()
db.session.commit()
except Exception as e:
logging.exception("consume document failed")
dataset_document.indexing_status = 'error'
dataset_document.error = str(e)
dataset_document.stopped_at = datetime.datetime.utcnow()
db.session.commit()
def format_split_text(self, text):
regex = r"Q\d+:\s*(.*?)\s*A\d+:\s*([\s\S]*?)(?=Q|$)"
matches = re.findall(regex, text, re.MULTILINE)
result = []
for match in matches:
q = match[0]
a = match[1]
if q and a:
result.append({
"question": q,
"answer": re.sub(r"\n\s*", "\n", a.strip())
})
return result
def run_in_splitting_status(self, dataset_document: DatasetDocument):
"""Run the indexing process when the index_status is splitting."""
try:
# get dataset
dataset = Dataset.query.filter_by(
id=dataset_document.dataset_id
).first()
if not dataset:
raise ValueError("no dataset found")
# get exist document_segment list and delete
document_segments = DocumentSegment.query.filter_by(
dataset_id=dataset.id,
document_id=dataset_document.id
).all()
db.session.delete(document_segments)
db.session.commit()
# load file
text_docs = self._load_data(dataset_document)
# get the process rule
processing_rule = db.session.query(DatasetProcessRule). \
filter(DatasetProcessRule.id == dataset_document.dataset_process_rule_id). \
first()
# get splitter
splitter = self._get_splitter(processing_rule)
# split to documents
documents = self._step_split(
text_docs=text_docs,
splitter=splitter,
dataset=dataset,
dataset_document=dataset_document,
processing_rule=processing_rule
)
# build index
self._build_index(
dataset=dataset,
dataset_document=dataset_document,
documents=documents
)
except DocumentIsPausedException:
raise DocumentIsPausedException('Document paused, document id: {}'.format(dataset_document.id))
except ProviderTokenNotInitError as e:
dataset_document.indexing_status = 'error'
dataset_document.error = str(e.description)
dataset_document.stopped_at = datetime.datetime.utcnow()
db.session.commit()
except Exception as e:
logging.exception("consume document failed")
dataset_document.indexing_status = 'error'
dataset_document.error = str(e)
dataset_document.stopped_at = datetime.datetime.utcnow()
db.session.commit()
def run_in_indexing_status(self, dataset_document: DatasetDocument):
"""Run the indexing process when the index_status is indexing."""
try:
# get dataset
dataset = Dataset.query.filter_by(
id=dataset_document.dataset_id
).first()
if not dataset:
raise ValueError("no dataset found")
# get exist document_segment list and delete
document_segments = DocumentSegment.query.filter_by(
dataset_id=dataset.id,
document_id=dataset_document.id
).all()
documents = []
if document_segments:
for document_segment in document_segments:
# transform segment to node
if document_segment.status != "completed":
document = Document(
page_content=document_segment.content,
metadata={
"doc_id": document_segment.index_node_id,
"doc_hash": document_segment.index_node_hash,
"document_id": document_segment.document_id,
"dataset_id": document_segment.dataset_id,
}
)
documents.append(document)
# build index
self._build_index(
dataset=dataset,
dataset_document=dataset_document,
documents=documents
)
except DocumentIsPausedException:
raise DocumentIsPausedException('Document paused, document id: {}'.format(dataset_document.id))
except ProviderTokenNotInitError as e:
dataset_document.indexing_status = 'error'
dataset_document.error = str(e.description)
dataset_document.stopped_at = datetime.datetime.utcnow()
db.session.commit()
except Exception as e:
logging.exception("consume document failed")
dataset_document.indexing_status = 'error'
dataset_document.error = str(e)
dataset_document.stopped_at = datetime.datetime.utcnow()
db.session.commit()
def file_indexing_estimate(self, tenant_id: str, file_details: List[UploadFile], tmp_processing_rule: dict,
doc_form: str = None, doc_language: str = 'English', dataset_id: str = None) -> dict:
"""
Estimate the indexing for the document.
"""
if dataset_id:
dataset = Dataset.query.filter_by(
id=dataset_id
).first()
if not dataset:
raise ValueError('Dataset not found.')
embedding_model = ModelFactory.get_embedding_model(
tenant_id=dataset.tenant_id,
model_provider_name=dataset.embedding_model_provider,
model_name=dataset.embedding_model
)
else:
embedding_model = ModelFactory.get_embedding_model(
tenant_id=tenant_id
)
tokens = 0
preview_texts = []
total_segments = 0
for file_detail in file_details:
# load data from file
text_docs = FileExtractor.load(file_detail)
processing_rule = DatasetProcessRule(
mode=tmp_processing_rule["mode"],
rules=json.dumps(tmp_processing_rule["rules"])
)
# get splitter
splitter = self._get_splitter(processing_rule)
# split to documents
documents = self._split_to_documents_for_estimate(
text_docs=text_docs,
splitter=splitter,
processing_rule=processing_rule
)
total_segments += len(documents)
for document in documents:
if len(preview_texts) < 5:
preview_texts.append(document.page_content)
tokens += embedding_model.get_num_tokens(self.filter_string(document.page_content))
if doc_form and doc_form == 'qa_model':
text_generation_model = ModelFactory.get_text_generation_model(
tenant_id=tenant_id
)
if len(preview_texts) > 0:
# qa model document
response = LLMGenerator.generate_qa_document(current_user.current_tenant_id, preview_texts[0], doc_language)
document_qa_list = self.format_split_text(response)
return {
"total_segments": total_segments * 20,
"tokens": total_segments * 2000,
"total_price": '{:f}'.format(
text_generation_model.calc_tokens_price(total_segments * 2000, MessageType.HUMAN)),
"currency": embedding_model.get_currency(),
"qa_preview": document_qa_list,
"preview": preview_texts
}
return {
"total_segments": total_segments,
"tokens": tokens,
"total_price": '{:f}'.format(embedding_model.calc_tokens_price(tokens)),
"currency": embedding_model.get_currency(),
"preview": preview_texts
}
def notion_indexing_estimate(self, tenant_id: str, notion_info_list: list, tmp_processing_rule: dict,
doc_form: str = None, doc_language: str = 'English', dataset_id: str = None) -> dict:
"""
Estimate the indexing for the document.
"""
if dataset_id:
dataset = Dataset.query.filter_by(
id=dataset_id
).first()
if not dataset:
raise ValueError('Dataset not found.')
embedding_model = ModelFactory.get_embedding_model(
tenant_id=dataset.tenant_id,
model_provider_name=dataset.embedding_model_provider,
model_name=dataset.embedding_model
)
else:
embedding_model = ModelFactory.get_embedding_model(
tenant_id=tenant_id
)
# load data from notion
tokens = 0
preview_texts = []
total_segments = 0
for notion_info in notion_info_list:
workspace_id = notion_info['workspace_id']
data_source_binding = DataSourceBinding.query.filter(
db.and_(
DataSourceBinding.tenant_id == current_user.current_tenant_id,
DataSourceBinding.provider == 'notion',
DataSourceBinding.disabled == False,
DataSourceBinding.source_info['workspace_id'] == f'"{workspace_id}"'
)
).first()
if not data_source_binding:
raise ValueError('Data source binding not found.')
for page in notion_info['pages']:
loader = NotionLoader(
notion_access_token=data_source_binding.access_token,
notion_workspace_id=workspace_id,
notion_obj_id=page['page_id'],
notion_page_type=page['type']
)
documents = loader.load()
processing_rule = DatasetProcessRule(
mode=tmp_processing_rule["mode"],
rules=json.dumps(tmp_processing_rule["rules"])
)
# get splitter
splitter = self._get_splitter(processing_rule)
# split to documents
documents = self._split_to_documents_for_estimate(
text_docs=documents,
splitter=splitter,
processing_rule=processing_rule
)
total_segments += len(documents)
for document in documents:
if len(preview_texts) < 5:
preview_texts.append(document.page_content)
tokens += embedding_model.get_num_tokens(document.page_content)
if doc_form and doc_form == 'qa_model':
text_generation_model = ModelFactory.get_text_generation_model(
tenant_id=tenant_id
)
if len(preview_texts) > 0:
# qa model document
response = LLMGenerator.generate_qa_document(current_user.current_tenant_id, preview_texts[0], doc_language)
document_qa_list = self.format_split_text(response)
return {
"total_segments": total_segments * 20,
"tokens": total_segments * 2000,
"total_price": '{:f}'.format(
text_generation_model.calc_tokens_price(total_segments * 2000, MessageType.HUMAN)),
"currency": embedding_model.get_currency(),
"qa_preview": document_qa_list,
"preview": preview_texts
}
return {
"total_segments": total_segments,
"tokens": tokens,
"total_price": '{:f}'.format(embedding_model.calc_tokens_price(tokens)),
"currency": embedding_model.get_currency(),
"preview": preview_texts
}
def _load_data(self, dataset_document: DatasetDocument) -> List[Document]:
# load file
if dataset_document.data_source_type not in ["upload_file", "notion_import"]:
return []
data_source_info = dataset_document.data_source_info_dict
text_docs = []
if dataset_document.data_source_type == 'upload_file':
if not data_source_info or 'upload_file_id' not in data_source_info:
raise ValueError("no upload file found")
file_detail = db.session.query(UploadFile). \
filter(UploadFile.id == data_source_info['upload_file_id']). \
one_or_none()
text_docs = FileExtractor.load(file_detail)
elif dataset_document.data_source_type == 'notion_import':
loader = NotionLoader.from_document(dataset_document)
text_docs = loader.load()
# update document status to splitting
self._update_document_index_status(
document_id=dataset_document.id,
after_indexing_status="splitting",
extra_update_params={
DatasetDocument.word_count: sum([len(text_doc.page_content) for text_doc in text_docs]),
DatasetDocument.parsing_completed_at: datetime.datetime.utcnow()
}
)
# replace doc id to document model id
text_docs = cast(List[Document], text_docs)
for text_doc in text_docs:
# remove invalid symbol
text_doc.page_content = self.filter_string(text_doc.page_content)
text_doc.metadata['document_id'] = dataset_document.id
text_doc.metadata['dataset_id'] = dataset_document.dataset_id
return text_docs
def filter_string(self, text):
text = re.sub(r'<\|', '<', text)
text = re.sub(r'\|>', '>', text)
text = re.sub(r'[\x00-\x08\x0B\x0C\x0E-\x1F\x7F\x80-\xFF]', '', text)
return text
def _get_splitter(self, processing_rule: DatasetProcessRule) -> TextSplitter:
"""
Get the NodeParser object according to the processing rule.
"""
if processing_rule.mode == "custom":
# The user-defined segmentation rule
rules = json.loads(processing_rule.rules)
segmentation = rules["segmentation"]
if segmentation["max_tokens"] < 50 or segmentation["max_tokens"] > 1000:
raise ValueError("Custom segment length should be between 50 and 1000.")
separator = segmentation["separator"]
if separator:
separator = separator.replace('\\n', '\n')
character_splitter = FixedRecursiveCharacterTextSplitter.from_tiktoken_encoder(
chunk_size=segmentation["max_tokens"],
chunk_overlap=0,
fixed_separator=separator,
separators=["\n\n", "。", ".", " ", ""]
)
else:
# Automatic segmentation
character_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder(
chunk_size=DatasetProcessRule.AUTOMATIC_RULES['segmentation']['max_tokens'],
chunk_overlap=0,
separators=["\n\n", "。", ".", " ", ""]
)
return character_splitter
def _step_split(self, text_docs: List[Document], splitter: TextSplitter,
dataset: Dataset, dataset_document: DatasetDocument, processing_rule: DatasetProcessRule) \
-> List[Document]:
"""
Split the text documents into documents and save them to the document segment.
"""
documents = self._split_to_documents(
text_docs=text_docs,
splitter=splitter,
processing_rule=processing_rule,
tenant_id=dataset.tenant_id,
document_form=dataset_document.doc_form,
document_language=dataset_document.doc_language
)
# save node to document segment
doc_store = DatesetDocumentStore(
dataset=dataset,
user_id=dataset_document.created_by,
document_id=dataset_document.id
)
# add document segments
doc_store.add_documents(documents)
# update document status to indexing
cur_time = datetime.datetime.utcnow()
self._update_document_index_status(
document_id=dataset_document.id,
after_indexing_status="indexing",
extra_update_params={
DatasetDocument.cleaning_completed_at: cur_time,
DatasetDocument.splitting_completed_at: cur_time,
}
)
# update segment status to indexing
self._update_segments_by_document(
dataset_document_id=dataset_document.id,
update_params={
DocumentSegment.status: "indexing",
DocumentSegment.indexing_at: datetime.datetime.utcnow()
}
)
return documents
def _split_to_documents(self, text_docs: List[Document], splitter: TextSplitter,
processing_rule: DatasetProcessRule, tenant_id: str,
document_form: str, document_language: str) -> List[Document]:
"""
Split the text documents into nodes.
"""
all_documents = []
all_qa_documents = []
for text_doc in text_docs:
# document clean
document_text = self._document_clean(text_doc.page_content, processing_rule)
text_doc.page_content = document_text
# parse document to nodes
documents = splitter.split_documents([text_doc])
split_documents = []
for document_node in documents:
doc_id = str(uuid.uuid4())
hash = helper.generate_text_hash(document_node.page_content)
document_node.metadata['doc_id'] = doc_id
document_node.metadata['doc_hash'] = hash
split_documents.append(document_node)
all_documents.extend(split_documents)
# processing qa document
if document_form == 'qa_model':
for i in range(0, len(all_documents), 10):
threads = []
sub_documents = all_documents[i:i + 10]
for doc in sub_documents:
document_format_thread = threading.Thread(target=self.format_qa_document, kwargs={
'flask_app': current_app._get_current_object(),
'tenant_id': tenant_id, 'document_node': doc, 'all_qa_documents': all_qa_documents,
'document_language': document_language})
threads.append(document_format_thread)
document_format_thread.start()
for thread in threads:
thread.join()
return all_qa_documents
return all_documents
def format_qa_document(self, flask_app: Flask, tenant_id: str, document_node, all_qa_documents, document_language):
format_documents = []
if document_node.page_content is None or not document_node.page_content.strip():
return
with flask_app.app_context():
try:
# qa model document
response = LLMGenerator.generate_qa_document(tenant_id, document_node.page_content, document_language)
document_qa_list = self.format_split_text(response)
qa_documents = []
for result in document_qa_list:
qa_document = Document(page_content=result['question'], metadata=document_node.metadata.copy())
doc_id = str(uuid.uuid4())
hash = helper.generate_text_hash(result['question'])
qa_document.metadata['answer'] = result['answer']
qa_document.metadata['doc_id'] = doc_id
qa_document.metadata['doc_hash'] = hash
qa_documents.append(qa_document)
format_documents.extend(qa_documents)
except Exception as e:
logging.exception(e)
all_qa_documents.extend(format_documents)
def _split_to_documents_for_estimate(self, text_docs: List[Document], splitter: TextSplitter,
processing_rule: DatasetProcessRule) -> List[Document]:
"""
Split the text documents into nodes.
"""
all_documents = []
for text_doc in text_docs:
# document clean
document_text = self._document_clean(text_doc.page_content, processing_rule)
text_doc.page_content = document_text
# parse document to nodes
documents = splitter.split_documents([text_doc])
split_documents = []
for document in documents:
if document.page_content is None or not document.page_content.strip():
continue
doc_id = str(uuid.uuid4())
hash = helper.generate_text_hash(document.page_content)
document.metadata['doc_id'] = doc_id
document.metadata['doc_hash'] = hash
split_documents.append(document)
all_documents.extend(split_documents)
return all_documents
def _document_clean(self, text: str, processing_rule: DatasetProcessRule) -> str:
"""
Clean the document text according to the processing rules.
"""
if processing_rule.mode == "automatic":
rules = DatasetProcessRule.AUTOMATIC_RULES
else:
rules = json.loads(processing_rule.rules) if processing_rule.rules else {}
if 'pre_processing_rules' in rules:
pre_processing_rules = rules["pre_processing_rules"]
for pre_processing_rule in pre_processing_rules:
if pre_processing_rule["id"] == "remove_extra_spaces" and pre_processing_rule["enabled"] is True:
# Remove extra spaces
pattern = r'\n{3,}'
text = re.sub(pattern, '\n\n', text)
pattern = r'[\t\f\r\x20\u00a0\u1680\u180e\u2000-\u200a\u202f\u205f\u3000]{2,}'
text = re.sub(pattern, ' ', text)
elif pre_processing_rule["id"] == "remove_urls_emails" and pre_processing_rule["enabled"] is True:
# Remove email
pattern = r'([a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+)'
text = re.sub(pattern, '', text)
# Remove URL
pattern = r'https?://[^\s]+'
text = re.sub(pattern, '', text)
return text
def format_split_text(self, text):
regex = r"Q\d+:\s*(.*?)\s*A\d+:\s*([\s\S]*?)(?=Q|$)" # 匹配Q和A的正则表达式
matches = re.findall(regex, text, re.MULTILINE) # 获取所有匹配到的结果
result = [] # 存储最终的结果
for match in matches:
q = match[0]
a = match[1]
if q and a:
# 如果Q和A都存在,就将其添加到结果中
result.append({
"question": q,
"answer": re.sub(r"\n\s*", "\n", a.strip())
})
return result
def _build_index(self, dataset: Dataset, dataset_document: DatasetDocument, documents: List[Document]) -> None:
"""
Build the index for the document.
"""
vector_index = IndexBuilder.get_index(dataset, 'high_quality')
keyword_table_index = IndexBuilder.get_index(dataset, 'economy')
embedding_model = ModelFactory.get_embedding_model(
tenant_id=dataset.tenant_id,
model_provider_name=dataset.embedding_model_provider,
model_name=dataset.embedding_model
)
# chunk nodes by chunk size
indexing_start_at = time.perf_counter()
tokens = 0
chunk_size = 100
for i in range(0, len(documents), chunk_size):
# check document is paused
self._check_document_paused_status(dataset_document.id)
chunk_documents = documents[i:i + chunk_size]
tokens += sum(
embedding_model.get_num_tokens(document.page_content)
for document in chunk_documents
)
# save vector index
if vector_index:
vector_index.add_texts(chunk_documents)
# save keyword index
keyword_table_index.add_texts(chunk_documents)
document_ids = [document.metadata['doc_id'] for document in chunk_documents]
db.session.query(DocumentSegment).filter(
DocumentSegment.document_id == dataset_document.id,
DocumentSegment.index_node_id.in_(document_ids),
DocumentSegment.status == "indexing"
).update({
DocumentSegment.status: "completed",
DocumentSegment.completed_at: datetime.datetime.utcnow()
})
db.session.commit()
indexing_end_at = time.perf_counter()
# update document status to completed
self._update_document_index_status(
document_id=dataset_document.id,
after_indexing_status="completed",
extra_update_params={
DatasetDocument.tokens: tokens,
DatasetDocument.completed_at: datetime.datetime.utcnow(),
DatasetDocument.indexing_latency: indexing_end_at - indexing_start_at,
}
)
def _check_document_paused_status(self, document_id: str):
indexing_cache_key = 'document_{}_is_paused'.format(document_id)
result = redis_client.get(indexing_cache_key)
if result:
raise DocumentIsPausedException()
def _update_document_index_status(self, document_id: str, after_indexing_status: str,
extra_update_params: Optional[dict] = None) -> None:
"""
Update the document indexing status.
"""
count = DatasetDocument.query.filter_by(id=document_id, is_paused=True).count()
if count > 0:
raise DocumentIsPausedException()
update_params = {
DatasetDocument.indexing_status: after_indexing_status
}
if extra_update_params:
update_params.update(extra_update_params)
DatasetDocument.query.filter_by(id=document_id).update(update_params)
db.session.commit()
def _update_segments_by_document(self, dataset_document_id: str, update_params: dict) -> None:
"""
Update the document segment by document id.
"""
DocumentSegment.query.filter_by(document_id=dataset_document_id).update(update_params)
db.session.commit()
def batch_add_segments(self, segments: List[DocumentSegment], dataset: Dataset):
"""
Batch add segments index processing
"""
documents = []
for segment in segments:
document = Document(
page_content=segment.content,
metadata={
"doc_id": segment.index_node_id,
"doc_hash": segment.index_node_hash,
"document_id": segment.document_id,
"dataset_id": segment.dataset_id,
}
)
documents.append(document)
# save vector index
index = IndexBuilder.get_index(dataset, 'high_quality')
if index:
index.add_texts(documents, duplicate_check=True)
# save keyword index
index = IndexBuilder.get_index(dataset, 'economy')
if index:
index.add_texts(documents)
class DocumentIsPausedException(Exception):
pass
| [] |
2024-01-10 | 17600164659/dify | api~tests~integration_tests~models~llm~test_tongyi_model.py | import json
import os
from unittest.mock import patch
from langchain.schema import ChatGeneration, AIMessage, Generation
from core.model_providers.models.entity.message import PromptMessage, MessageType
from core.model_providers.models.entity.model_params import ModelKwargs
from core.model_providers.models.llm.tongyi_model import TongyiModel
from core.model_providers.providers.tongyi_provider import TongyiProvider
from models.provider import Provider, ProviderType
def get_mock_provider(valid_api_key):
return Provider(
id='provider_id',
tenant_id='tenant_id',
provider_name='tongyi',
provider_type=ProviderType.CUSTOM.value,
encrypted_config=json.dumps({
'dashscope_api_key': valid_api_key,
}),
is_valid=True,
)
def get_mock_model(model_name):
model_kwargs = ModelKwargs(
max_tokens=10,
temperature=0.01
)
valid_api_key = os.environ['TONGYI_DASHSCOPE_API_KEY']
model_provider = TongyiProvider(provider=get_mock_provider(valid_api_key))
return TongyiModel(
model_provider=model_provider,
name=model_name,
model_kwargs=model_kwargs
)
def decrypt_side_effect(tenant_id, encrypted_api_key):
return encrypted_api_key
@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect)
def test_get_num_tokens(mock_decrypt):
model = get_mock_model('qwen-v1')
rst = model.get_num_tokens([
PromptMessage(type=MessageType.HUMAN, content='Who is your manufacturer?')
])
assert rst == 5
@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect)
def test_run(mock_decrypt, mocker):
mocker.patch('core.model_providers.providers.base.BaseModelProvider.update_last_used', return_value=None)
model = get_mock_model('qwen-v1')
rst = model.run(
[PromptMessage(content='Human: Are you Human? you MUST only answer `y` or `n`? \nAssistant: ')],
stop=['\nHuman:'],
)
assert len(rst.content) > 0
| [
"Who is your manufacturer?",
"Human: Are you Human? you MUST only answer `y` or `n`? \nAssistant: "
] |
2024-01-10 | 17600164659/dify | api~core~model_providers~model_provider_factory.py | from typing import Type
from sqlalchemy.exc import IntegrityError
from core.model_providers.models.entity.model_params import ModelType
from core.model_providers.providers.base import BaseModelProvider
from core.model_providers.rules import provider_rules
from extensions.ext_database import db
from models.provider import TenantPreferredModelProvider, ProviderType, Provider, ProviderQuotaType
DEFAULT_MODELS = {
ModelType.TEXT_GENERATION.value: {
'provider_name': 'openai',
'model_name': 'gpt-3.5-turbo',
},
ModelType.EMBEDDINGS.value: {
'provider_name': 'openai',
'model_name': 'text-embedding-ada-002',
},
ModelType.SPEECH_TO_TEXT.value: {
'provider_name': 'openai',
'model_name': 'whisper-1',
}
}
class ModelProviderFactory:
@classmethod
def get_model_provider_class(cls, provider_name: str) -> Type[BaseModelProvider]:
if provider_name == 'openai':
from core.model_providers.providers.openai_provider import OpenAIProvider
return OpenAIProvider
elif provider_name == 'anthropic':
from core.model_providers.providers.anthropic_provider import AnthropicProvider
return AnthropicProvider
elif provider_name == 'minimax':
from core.model_providers.providers.minimax_provider import MinimaxProvider
return MinimaxProvider
elif provider_name == 'spark':
from core.model_providers.providers.spark_provider import SparkProvider
return SparkProvider
elif provider_name == 'tongyi':
from core.model_providers.providers.tongyi_provider import TongyiProvider
return TongyiProvider
elif provider_name == 'wenxin':
from core.model_providers.providers.wenxin_provider import WenxinProvider
return WenxinProvider
elif provider_name == 'chatglm':
from core.model_providers.providers.chatglm_provider import ChatGLMProvider
return ChatGLMProvider
elif provider_name == 'azure_openai':
from core.model_providers.providers.azure_openai_provider import AzureOpenAIProvider
return AzureOpenAIProvider
elif provider_name == 'replicate':
from core.model_providers.providers.replicate_provider import ReplicateProvider
return ReplicateProvider
elif provider_name == 'huggingface_hub':
from core.model_providers.providers.huggingface_hub_provider import HuggingfaceHubProvider
return HuggingfaceHubProvider
elif provider_name == 'xinference':
from core.model_providers.providers.xinference_provider import XinferenceProvider
return XinferenceProvider
elif provider_name == 'openllm':
from core.model_providers.providers.openllm_provider import OpenLLMProvider
return OpenLLMProvider
else:
raise NotImplementedError
@classmethod
def get_provider_names(cls):
"""
Returns a list of provider names.
"""
return list(provider_rules.keys())
@classmethod
def get_provider_rules(cls):
"""
Returns a list of provider rules.
:return:
"""
return provider_rules
@classmethod
def get_provider_rule(cls, provider_name: str):
"""
Returns provider rule.
"""
return provider_rules[provider_name]
@classmethod
def get_preferred_model_provider(cls, tenant_id: str, model_provider_name: str):
"""
get preferred model provider.
:param tenant_id: a string representing the ID of the tenant.
:param model_provider_name:
:return:
"""
# get preferred provider
preferred_provider = cls._get_preferred_provider(tenant_id, model_provider_name)
if not preferred_provider or not preferred_provider.is_valid:
return None
# init model provider
model_provider_class = ModelProviderFactory.get_model_provider_class(model_provider_name)
return model_provider_class(provider=preferred_provider)
@classmethod
def get_preferred_type_by_preferred_model_provider(cls,
tenant_id: str,
model_provider_name: str,
preferred_model_provider: TenantPreferredModelProvider):
"""
get preferred provider type by preferred model provider.
:param model_provider_name:
:param preferred_model_provider:
:return:
"""
if not preferred_model_provider:
model_provider_rules = ModelProviderFactory.get_provider_rule(model_provider_name)
support_provider_types = model_provider_rules['support_provider_types']
if ProviderType.CUSTOM.value in support_provider_types:
custom_provider = db.session.query(Provider) \
.filter(
Provider.tenant_id == tenant_id,
Provider.provider_name == model_provider_name,
Provider.provider_type == ProviderType.CUSTOM.value,
Provider.is_valid == True
).first()
if custom_provider:
return ProviderType.CUSTOM.value
model_provider = cls.get_model_provider_class(model_provider_name)
if ProviderType.SYSTEM.value in support_provider_types \
and model_provider.is_provider_type_system_supported():
return ProviderType.SYSTEM.value
elif ProviderType.CUSTOM.value in support_provider_types:
return ProviderType.CUSTOM.value
else:
return preferred_model_provider.preferred_provider_type
@classmethod
def _get_preferred_provider(cls, tenant_id: str, model_provider_name: str):
"""
get preferred provider of tenant.
:param tenant_id:
:param model_provider_name:
:return:
"""
# get preferred provider type
preferred_provider_type = cls._get_preferred_provider_type(tenant_id, model_provider_name)
# get providers by preferred provider type
providers = db.session.query(Provider) \
.filter(
Provider.tenant_id == tenant_id,
Provider.provider_name == model_provider_name,
Provider.provider_type == preferred_provider_type
).all()
no_system_provider = False
if preferred_provider_type == ProviderType.SYSTEM.value:
quota_type_to_provider_dict = {}
for provider in providers:
quota_type_to_provider_dict[provider.quota_type] = provider
model_provider_rules = ModelProviderFactory.get_provider_rule(model_provider_name)
for quota_type_enum in ProviderQuotaType:
quota_type = quota_type_enum.value
if quota_type in model_provider_rules['system_config']['supported_quota_types']:
if quota_type in quota_type_to_provider_dict.keys():
provider = quota_type_to_provider_dict[quota_type]
if provider.is_valid and provider.quota_limit > provider.quota_used:
return provider
elif quota_type == ProviderQuotaType.TRIAL.value:
try:
provider = Provider(
tenant_id=tenant_id,
provider_name=model_provider_name,
provider_type=ProviderType.SYSTEM.value,
is_valid=True,
quota_type=ProviderQuotaType.TRIAL.value,
quota_limit=model_provider_rules['system_config']['quota_limit'],
quota_used=0
)
db.session.add(provider)
db.session.commit()
except IntegrityError:
db.session.rollback()
provider = db.session.query(Provider) \
.filter(
Provider.tenant_id == tenant_id,
Provider.provider_name == model_provider_name,
Provider.provider_type == ProviderType.SYSTEM.value,
Provider.quota_type == ProviderQuotaType.TRIAL.value
).first()
return provider
no_system_provider = True
if no_system_provider:
providers = db.session.query(Provider) \
.filter(
Provider.tenant_id == tenant_id,
Provider.provider_name == model_provider_name,
Provider.provider_type == ProviderType.CUSTOM.value
).all()
if preferred_provider_type == ProviderType.CUSTOM.value or no_system_provider:
if providers:
return providers[0]
else:
try:
provider = Provider(
tenant_id=tenant_id,
provider_name=model_provider_name,
provider_type=ProviderType.CUSTOM.value,
is_valid=False
)
db.session.add(provider)
db.session.commit()
except IntegrityError:
db.session.rollback()
provider = db.session.query(Provider) \
.filter(
Provider.tenant_id == tenant_id,
Provider.provider_name == model_provider_name,
Provider.provider_type == ProviderType.CUSTOM.value
).first()
return provider
return None
@classmethod
def _get_preferred_provider_type(cls, tenant_id: str, model_provider_name: str):
"""
get preferred provider type of tenant.
:param tenant_id:
:param model_provider_name:
:return:
"""
preferred_model_provider = db.session.query(TenantPreferredModelProvider) \
.filter(
TenantPreferredModelProvider.tenant_id == tenant_id,
TenantPreferredModelProvider.provider_name == model_provider_name
).first()
return cls.get_preferred_type_by_preferred_model_provider(tenant_id, model_provider_name, preferred_model_provider)
| [] |
2024-01-10 | 17600164659/dify | api~core~model_providers~models~llm~openllm_model.py | from typing import List, Optional, Any
from langchain.callbacks.manager import Callbacks
from langchain.schema import LLMResult
from core.model_providers.error import LLMBadRequestError
from core.model_providers.models.llm.base import BaseLLM
from core.model_providers.models.entity.message import PromptMessage
from core.model_providers.models.entity.model_params import ModelMode, ModelKwargs
from core.third_party.langchain.llms.openllm import OpenLLM
class OpenLLMModel(BaseLLM):
model_mode: ModelMode = ModelMode.COMPLETION
def _init_client(self) -> Any:
self.provider_model_kwargs = self._to_model_kwargs_input(self.model_rules, self.model_kwargs)
client = OpenLLM(
server_url=self.credentials.get('server_url'),
callbacks=self.callbacks,
llm_kwargs=self.provider_model_kwargs
)
return client
def _run(self, messages: List[PromptMessage],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
**kwargs) -> LLMResult:
"""
run predict by prompt messages and stop words.
:param messages:
:param stop:
:param callbacks:
:return:
"""
prompts = self._get_prompt_from_messages(messages)
return self._client.generate([prompts], stop, callbacks)
def get_num_tokens(self, messages: List[PromptMessage]) -> int:
"""
get num tokens of prompt messages.
:param messages:
:return:
"""
prompts = self._get_prompt_from_messages(messages)
return max(self._client.get_num_tokens(prompts), 0)
def _set_model_kwargs(self, model_kwargs: ModelKwargs):
pass
def handle_exceptions(self, ex: Exception) -> Exception:
return LLMBadRequestError(f"OpenLLM: {str(ex)}")
@classmethod
def support_streaming(cls):
return False
| [] |
2024-01-10 | 17600164659/dify | api~tests~integration_tests~models~llm~test_minimax_model.py | import json
import os
from unittest.mock import patch
from langchain.schema import ChatGeneration, AIMessage, Generation
from core.model_providers.models.entity.message import PromptMessage, MessageType
from core.model_providers.models.entity.model_params import ModelKwargs
from core.model_providers.models.llm.minimax_model import MinimaxModel
from core.model_providers.providers.minimax_provider import MinimaxProvider
from models.provider import Provider, ProviderType
def get_mock_provider(valid_group_id, valid_api_key):
return Provider(
id='provider_id',
tenant_id='tenant_id',
provider_name='minimax',
provider_type=ProviderType.CUSTOM.value,
encrypted_config=json.dumps({
'minimax_group_id': valid_group_id,
'minimax_api_key': valid_api_key
}),
is_valid=True,
)
def get_mock_model(model_name):
model_kwargs = ModelKwargs(
max_tokens=10,
temperature=0.01
)
valid_api_key = os.environ['MINIMAX_API_KEY']
valid_group_id = os.environ['MINIMAX_GROUP_ID']
model_provider = MinimaxProvider(provider=get_mock_provider(valid_group_id, valid_api_key))
return MinimaxModel(
model_provider=model_provider,
name=model_name,
model_kwargs=model_kwargs
)
def decrypt_side_effect(tenant_id, encrypted_api_key):
return encrypted_api_key
@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect)
def test_get_num_tokens(mock_decrypt):
model = get_mock_model('abab5.5-chat')
rst = model.get_num_tokens([
PromptMessage(type=MessageType.HUMAN, content='Who is your manufacturer?')
])
assert rst == 5
@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect)
def test_run(mock_decrypt, mocker):
mocker.patch('core.model_providers.providers.base.BaseModelProvider.update_last_used', return_value=None)
model = get_mock_model('abab5.5-chat')
rst = model.run(
[PromptMessage(content='Human: Are you a real Human? you MUST only answer `y` or `n`? \nAssistant: ')],
stop=['\nHuman:'],
)
assert len(rst.content) > 0
| [
"Who is your manufacturer?",
"Human: Are you a real Human? you MUST only answer `y` or `n`? \nAssistant: "
] |
2024-01-10 | 17600164659/dify | api~core~model_providers~models~llm~chatglm_model.py | import decimal
from typing import List, Optional, Any
from langchain.callbacks.manager import Callbacks
from langchain.llms import ChatGLM
from langchain.schema import LLMResult
from core.model_providers.error import LLMBadRequestError
from core.model_providers.models.llm.base import BaseLLM
from core.model_providers.models.entity.message import PromptMessage, MessageType
from core.model_providers.models.entity.model_params import ModelMode, ModelKwargs
class ChatGLMModel(BaseLLM):
model_mode: ModelMode = ModelMode.COMPLETION
def _init_client(self) -> Any:
provider_model_kwargs = self._to_model_kwargs_input(self.model_rules, self.model_kwargs)
return ChatGLM(
callbacks=self.callbacks,
endpoint_url=self.credentials.get('api_base'),
**provider_model_kwargs
)
def _run(self, messages: List[PromptMessage],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
**kwargs) -> LLMResult:
"""
run predict by prompt messages and stop words.
:param messages:
:param stop:
:param callbacks:
:return:
"""
prompts = self._get_prompt_from_messages(messages)
return self._client.generate([prompts], stop, callbacks)
def get_num_tokens(self, messages: List[PromptMessage]) -> int:
"""
get num tokens of prompt messages.
:param messages:
:return:
"""
prompts = self._get_prompt_from_messages(messages)
return max(self._client.get_num_tokens(prompts), 0)
def get_currency(self):
return 'RMB'
def _set_model_kwargs(self, model_kwargs: ModelKwargs):
provider_model_kwargs = self._to_model_kwargs_input(self.model_rules, model_kwargs)
for k, v in provider_model_kwargs.items():
if hasattr(self.client, k):
setattr(self.client, k, v)
def handle_exceptions(self, ex: Exception) -> Exception:
if isinstance(ex, ValueError):
return LLMBadRequestError(f"ChatGLM: {str(ex)}")
else:
return ex
@classmethod
def support_streaming(cls):
return False
| [] |
2024-01-10 | 17600164659/dify | api~tests~integration_tests~models~llm~test_openai_model.py | import json
import os
from unittest.mock import patch
from langchain.schema import Generation, ChatGeneration, AIMessage
from core.model_providers.providers.openai_provider import OpenAIProvider
from core.model_providers.models.entity.message import PromptMessage, MessageType
from core.model_providers.models.entity.model_params import ModelKwargs
from core.model_providers.models.llm.openai_model import OpenAIModel
from models.provider import Provider, ProviderType
def get_mock_provider(valid_openai_api_key):
return Provider(
id='provider_id',
tenant_id='tenant_id',
provider_name='openai',
provider_type=ProviderType.CUSTOM.value,
encrypted_config=json.dumps({'openai_api_key': valid_openai_api_key}),
is_valid=True,
)
def get_mock_openai_model(model_name):
model_kwargs = ModelKwargs(
max_tokens=10,
temperature=0
)
model_name = model_name
valid_openai_api_key = os.environ['OPENAI_API_KEY']
openai_provider = OpenAIProvider(provider=get_mock_provider(valid_openai_api_key))
return OpenAIModel(
model_provider=openai_provider,
name=model_name,
model_kwargs=model_kwargs
)
def decrypt_side_effect(tenant_id, encrypted_openai_api_key):
return encrypted_openai_api_key
@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect)
def test_get_num_tokens(mock_decrypt):
openai_model = get_mock_openai_model('text-davinci-003')
rst = openai_model.get_num_tokens([PromptMessage(content='you are a kindness Assistant.')])
assert rst == 6
@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect)
def test_chat_get_num_tokens(mock_decrypt):
openai_model = get_mock_openai_model('gpt-3.5-turbo')
rst = openai_model.get_num_tokens([
PromptMessage(type=MessageType.SYSTEM, content='you are a kindness Assistant.'),
PromptMessage(type=MessageType.HUMAN, content='Who is your manufacturer?')
])
assert rst == 22
@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect)
def test_run(mock_decrypt, mocker):
mocker.patch('core.model_providers.providers.base.BaseModelProvider.update_last_used', return_value=None)
openai_model = get_mock_openai_model('text-davinci-003')
rst = openai_model.run(
[PromptMessage(content='Human: Are you Human? you MUST only answer `y` or `n`? \nAssistant: ')],
stop=['\nHuman:'],
)
assert len(rst.content) > 0
assert rst.content.strip() == 'n'
@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect)
def test_chat_run(mock_decrypt, mocker):
mocker.patch('core.model_providers.providers.base.BaseModelProvider.update_last_used', return_value=None)
openai_model = get_mock_openai_model('gpt-3.5-turbo')
messages = [PromptMessage(content='Human: Are you Human? you MUST only answer `y` or `n`? \nAssistant: ')]
rst = openai_model.run(
messages,
stop=['\nHuman:'],
)
assert len(rst.content) > 0
| [
"Who is your manufacturer?",
"Human: Are you Human? you MUST only answer `y` or `n`? \nAssistant: ",
"you are a kindness Assistant."
] |
2024-01-10 | 17600164659/dify | api~core~callback_handler~agent_loop_gather_callback_handler.py | import json
import logging
import time
from typing import Any, Dict, List, Union, Optional
from langchain.agents import openai_functions_agent, openai_functions_multi_agent
from langchain.callbacks.base import BaseCallbackHandler
from langchain.schema import AgentAction, AgentFinish, LLMResult, ChatGeneration
from core.callback_handler.entity.agent_loop import AgentLoop
from core.conversation_message_task import ConversationMessageTask
from core.model_providers.models.entity.message import PromptMessage
from core.model_providers.models.llm.base import BaseLLM
class AgentLoopGatherCallbackHandler(BaseCallbackHandler):
"""Callback Handler that prints to std out."""
raise_error: bool = True
def __init__(self, model_instant: BaseLLM, conversation_message_task: ConversationMessageTask) -> None:
"""Initialize callback handler."""
self.model_instant = model_instant
self.conversation_message_task = conversation_message_task
self._agent_loops = []
self._current_loop = None
self._message_agent_thought = None
self.current_chain = None
@property
def agent_loops(self) -> List[AgentLoop]:
return self._agent_loops
def clear_agent_loops(self) -> None:
self._agent_loops = []
self._current_loop = None
self._message_agent_thought = None
@property
def always_verbose(self) -> bool:
"""Whether to call verbose callbacks even if verbose is False."""
return True
@property
def ignore_chain(self) -> bool:
"""Whether to ignore chain callbacks."""
return True
def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> None:
"""Print out the prompts."""
# serialized={'name': 'OpenAI'}
# prompts=['Answer the following questions...\nThought:']
# kwargs={}
if not self._current_loop:
# Agent start with a LLM query
self._current_loop = AgentLoop(
position=len(self._agent_loops) + 1,
prompt=prompts[0],
status='llm_started',
started_at=time.perf_counter()
)
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Do nothing."""
# kwargs={}
if self._current_loop and self._current_loop.status == 'llm_started':
self._current_loop.status = 'llm_end'
if response.llm_output:
self._current_loop.prompt_tokens = response.llm_output['token_usage']['prompt_tokens']
else:
self._current_loop.prompt_tokens = self.model_instant.get_num_tokens(
[PromptMessage(content=self._current_loop.prompt)]
)
completion_generation = response.generations[0][0]
if isinstance(completion_generation, ChatGeneration):
completion_message = completion_generation.message
if 'function_call' in completion_message.additional_kwargs:
self._current_loop.completion \
= json.dumps({'function_call': completion_message.additional_kwargs['function_call']})
else:
self._current_loop.completion = response.generations[0][0].text
else:
self._current_loop.completion = completion_generation.text
if response.llm_output:
self._current_loop.completion_tokens = response.llm_output['token_usage']['completion_tokens']
else:
self._current_loop.completion_tokens = self.model_instant.get_num_tokens(
[PromptMessage(content=self._current_loop.completion)]
)
def on_llm_error(
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
) -> None:
logging.debug("Agent on_llm_error: %s", error)
self._agent_loops = []
self._current_loop = None
self._message_agent_thought = None
def on_tool_start(
self,
serialized: Dict[str, Any],
input_str: str,
**kwargs: Any,
) -> None:
"""Do nothing."""
# kwargs={'color': 'green', 'llm_prefix': 'Thought:', 'observation_prefix': 'Observation: '}
# input_str='action-input'
# serialized={'description': 'A search engine. Useful for when you need to answer questions about current events. Input should be a search query.', 'name': 'Search'}
pass
def on_agent_action(
self, action: AgentAction, color: Optional[str] = None, **kwargs: Any
) -> Any:
"""Run on agent action."""
tool = action.tool
tool_input = json.dumps({"query": action.tool_input}
if isinstance(action.tool_input, str) else action.tool_input)
completion = None
if isinstance(action, openai_functions_agent.base._FunctionsAgentAction) \
or isinstance(action, openai_functions_multi_agent.base._FunctionsAgentAction):
thought = action.log.strip()
completion = json.dumps({'function_call': action.message_log[0].additional_kwargs['function_call']})
else:
action_name_position = action.log.index("Action:") if action.log else -1
thought = action.log[:action_name_position].strip() if action.log else ''
if self._current_loop and self._current_loop.status == 'llm_end':
self._current_loop.status = 'agent_action'
self._current_loop.thought = thought
self._current_loop.tool_name = tool
self._current_loop.tool_input = tool_input
if completion is not None:
self._current_loop.completion = completion
self._message_agent_thought = self.conversation_message_task.on_agent_start(
self.current_chain,
self._current_loop
)
def on_tool_end(
self,
output: str,
color: Optional[str] = None,
observation_prefix: Optional[str] = None,
llm_prefix: Optional[str] = None,
**kwargs: Any,
) -> None:
"""If not the final action, print out observation."""
# kwargs={'name': 'Search'}
# llm_prefix='Thought:'
# observation_prefix='Observation: '
# output='53 years'
if self._current_loop and self._current_loop.status == 'agent_action' and output and output != 'None':
self._current_loop.status = 'tool_end'
self._current_loop.tool_output = output
self._current_loop.completed = True
self._current_loop.completed_at = time.perf_counter()
self._current_loop.latency = self._current_loop.completed_at - self._current_loop.started_at
self.conversation_message_task.on_agent_end(
self._message_agent_thought, self.model_instant, self._current_loop
)
self._agent_loops.append(self._current_loop)
self._current_loop = None
self._message_agent_thought = None
def on_tool_error(
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
) -> None:
"""Do nothing."""
logging.debug("Agent on_tool_error: %s", error)
self._agent_loops = []
self._current_loop = None
self._message_agent_thought = None
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any:
"""Run on agent end."""
# Final Answer
if self._current_loop and (self._current_loop.status == 'llm_end' or self._current_loop.status == 'agent_action'):
self._current_loop.status = 'agent_finish'
self._current_loop.completed = True
self._current_loop.completed_at = time.perf_counter()
self._current_loop.latency = self._current_loop.completed_at - self._current_loop.started_at
self._current_loop.thought = '[DONE]'
self._message_agent_thought = self.conversation_message_task.on_agent_start(
self.current_chain,
self._current_loop
)
self.conversation_message_task.on_agent_end(
self._message_agent_thought, self.model_instant, self._current_loop
)
self._agent_loops.append(self._current_loop)
self._current_loop = None
self._message_agent_thought = None
elif not self._current_loop and self._agent_loops:
self._agent_loops[-1].status = 'agent_finish'
| [] |
2024-01-10 | 17600164659/dify | api~core~model_providers~models~llm~spark_model.py | import decimal
from typing import List, Optional, Any
from langchain.callbacks.manager import Callbacks
from langchain.schema import LLMResult
from core.model_providers.error import LLMBadRequestError
from core.model_providers.models.llm.base import BaseLLM
from core.model_providers.models.entity.message import PromptMessage, MessageType
from core.model_providers.models.entity.model_params import ModelMode, ModelKwargs
from core.third_party.langchain.llms.spark import ChatSpark
from core.third_party.spark.spark_llm import SparkError
class SparkModel(BaseLLM):
model_mode: ModelMode = ModelMode.CHAT
def _init_client(self) -> Any:
provider_model_kwargs = self._to_model_kwargs_input(self.model_rules, self.model_kwargs)
return ChatSpark(
model_name=self.name,
streaming=self.streaming,
callbacks=self.callbacks,
**self.credentials,
**provider_model_kwargs
)
def _run(self, messages: List[PromptMessage],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
**kwargs) -> LLMResult:
"""
run predict by prompt messages and stop words.
:param messages:
:param stop:
:param callbacks:
:return:
"""
prompts = self._get_prompt_from_messages(messages)
return self._client.generate([prompts], stop, callbacks)
def get_num_tokens(self, messages: List[PromptMessage]) -> int:
"""
get num tokens of prompt messages.
:param messages:
:return:
"""
contents = [message.content for message in messages]
return max(self._client.get_num_tokens("".join(contents)), 0)
def get_currency(self):
return 'RMB'
def _set_model_kwargs(self, model_kwargs: ModelKwargs):
provider_model_kwargs = self._to_model_kwargs_input(self.model_rules, model_kwargs)
for k, v in provider_model_kwargs.items():
if hasattr(self.client, k):
setattr(self.client, k, v)
def handle_exceptions(self, ex: Exception) -> Exception:
if isinstance(ex, SparkError):
return LLMBadRequestError(f"Spark: {str(ex)}")
else:
return ex
@classmethod
def support_streaming(cls):
return True | [] |
2024-01-10 | AlessandroStaffolani/reinforcement-learning-openai-gym | utils~gym_wrapper.py | import gym
from gym import wrappers, logger
import os
import numpy as np
import collections
import cv2
from definitions import ROOT_DIR
def wrap_env(env, task_name, logger_level=logger.INFO):
logger.set_level(logger_level)
outdir = os.path.join(ROOT_DIR, 'logs/' + task_name + '-results')
return wrappers.Monitor(env, directory=outdir, force=True)
# Taken from OpenAI baseline wrappers
# https://github.com/openai/baselines/blob/master/baselines/common/atari_wrappers.py
class FireResetEnv(gym.Wrapper):
def __init__(self, env=None):
"""Take action on reset for environments that are fixed until firing."""
super(FireResetEnv, self).__init__(env)
print(env.unwrapped.get_action_meanings())
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def step(self, action):
return self.env.step(action)
def reset(self):
self.env.reset()
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset()
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset()
return obs
class MaxAndSkipEnv(gym.Wrapper):
def __init__(self, env=None, skip=4):
"""Return only every `skip`-th frame"""
super(MaxAndSkipEnv, self).__init__(env)
# most recent raw observations (for max pooling across time steps)
self._obs_buffer = collections.deque(maxlen=2)
self._skip = skip
def step(self, action):
total_reward = 0.0
done = None
for _ in range(self._skip):
obs, reward, done, info = self.env.step(action)
self._obs_buffer.append(obs)
total_reward += reward
if done:
break
max_frame = np.max(np.stack(self._obs_buffer), axis=0)
return max_frame, total_reward, done, info
def reset(self):
"""Clear past frame buffer and init to first obs"""
self._obs_buffer.clear()
obs = self.env.reset()
self._obs_buffer.append(obs)
return obs
class ProcessFrame84(gym.ObservationWrapper):
"""
Downsamples image to 84x84
Greyscales image
Returns numpy array
"""
def __init__(self, env=None):
super(ProcessFrame84, self).__init__(env)
self.observation_space = gym.spaces.Box(low=0, high=255, shape=(84, 84, 1), dtype=np.uint8)
def observation(self, obs):
return ProcessFrame84.process(obs)
@staticmethod
def process(frame):
if frame.size == 210 * 160 * 3:
img = np.reshape(frame, [210, 160, 3]).astype(np.float32)
elif frame.size == 250 * 160 * 3:
img = np.reshape(frame, [250, 160, 3]).astype(np.float32)
else:
assert False, "Unknown resolution."
img = img[:, :, 0] * 0.299 + img[:, :, 1] * 0.587 + img[:, :, 2] * 0.114
resized_screen = cv2.resize(img, (84, 110), interpolation=cv2.INTER_AREA)
x_t = resized_screen[18:102, :]
x_t = np.reshape(x_t, [84, 84, 1])
return x_t.astype(np.uint8)
class ImageToPyTorch(gym.ObservationWrapper):
def __init__(self, env):
super(ImageToPyTorch, self).__init__(env)
old_shape = self.observation_space.shape
self.observation_space = gym.spaces.Box(low=0.0, high=1.0, shape=(old_shape[-1], old_shape[0], old_shape[1]),
dtype=np.float32)
def observation(self, observation):
return np.moveaxis(observation, 2, 0)
class ScaledFloatFrame(gym.ObservationWrapper):
"""Normalize pixel values in frame --> 0 to 1"""
def observation(self, obs):
return np.array(obs).astype(np.float32) / 255.0
class BufferWrapper(gym.ObservationWrapper):
def __init__(self, env, n_steps, dtype=np.float32):
super(BufferWrapper, self).__init__(env)
self.dtype = dtype
old_space = env.observation_space
self.observation_space = gym.spaces.Box(old_space.low.repeat(n_steps, axis=0),
old_space.high.repeat(n_steps, axis=0), dtype=dtype)
def reset(self):
self.buffer = np.zeros_like(self.observation_space.low, dtype=self.dtype)
return self.observation(self.env.reset())
def observation(self, observation):
self.buffer[:-1] = self.buffer[1:]
self.buffer[-1] = observation
return self.buffer
def make_env_skip(env_name, env_is_wrapped=False):
env = gym.make(env_name)
env = MaxAndSkipEnv(env)
env = FireResetEnv(env)
if env_is_wrapped:
env = wrap_env(env, env_name)
return env
def make_env(env_name, env_is_wrapped=False):
env = gym.make(env_name)
env = MaxAndSkipEnv(env)
env = FireResetEnv(env)
env = ProcessFrame84(env)
env = ImageToPyTorch(env)
env = BufferWrapper(env, 4)
env = ScaledFloatFrame(env)
if env_is_wrapped:
env = wrap_env(env, env_name)
return env
| [] |
2024-01-10 | rrt2850/GUI-Chatbot-V1 | KivyWidgets~ChatWindow.py | """
Author: Robert Tetreault (rrt2850)
Filename: ChatWindow.py
Description: This script contains the constructors and functions used by the chat window.
this is makes a kivy widget responsible for displaying the chat window and
handling the user input.
"""
import json
import os
import re
import dotenv
import openai
import tiktoken
os.environ["KIVY_NO_CONSOLELOG"] = "1"
os.environ['KIVY_TEXT'] = 'pil'
from kivy.uix.modalview import ModalView
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.button import Button
from kivy.uix.label import Label
from kivy.uix.scrollview import ScrollView
from kivy.uix.textinput import TextInput
from CharacterScripts.DataHandler import sharedVars
# set up environment variables
dotenv.load_dotenv()
openai.api_key = os.environ["OPENAI_API_KEY"]
encoding = tiktoken.encoding_for_model("gpt-3.5-turbo")
def countTokens(text):
"""
counts the tokens in a message and returns the count
"""
tokens = []
try:
tokens = encoding.encode(text)
except:
print("Error: Unable to encode text")
return len(tokens)
class CustomTextInput(TextInput):
"""
custom text input that allows for the enter key to be used to send messages
"""
def _keyboard_on_key_down(self, window, keycode, text, modifiers):
if keycode[1] == "enter" and not modifiers:
self.dispatch("on_text_validate")
self.focus = True
return True
return super()._keyboard_on_key_down(window, keycode, text, modifiers)
class ChatBoxLayout(BoxLayout):
"""
A box layout that contains a scrollable chat window and an input box
"""
def __init__(self, **kwargs):
super(ChatBoxLayout, self).__init__(**kwargs)
self.orientation = 'vertical'
self.padding = 10
self.spacing = 4
self.totalTotal = 0
self.updateVars()
self.setupUIComponents()
self.loadMessages()
def setupUIComponents(self):
"""
Makes the scroll view and input box and adds them to the layout
"""
self.setupScrollView()
self.setupInputLayout()
self.add_widget(self.scrollView)
self.add_widget(self.inputLayout)
def setupScrollView(self):
self.scrollView = self.createScrollView()
self.messageLabel = self.createMessageLabel()
self.scrollView.add_widget(self.messageLabel)
def createScrollView(self):
"""
Makes a scroll view to hold the messages so that they can be scrolled through if they don't fit on the screen
"""
return ScrollView(
bar_width=10,
effect_cls='ScrollEffect',
scroll_type=['bars', 'content'],
scroll_wheel_distance=10
)
def createMessageLabel(self):
"""
Makes a label to hold the messages
"""
messageLabel = Label(
size_hint_y=None,
size_hint_x=0.9,
text_size=(self.width, None),
font_size='16sp',
markup=True,
valign='top',
halign='left',
padding=(10, 10)
)
messageLabel.bind(
width=lambda *x: messageLabel.setter('text_size')(messageLabel, (messageLabel.width, None)),
texture_size=lambda *x: messageLabel.setter('height')(messageLabel, messageLabel.texture_size[1])
)
return messageLabel
def setupInputLayout(self):
"""
Makes a layout to hold the input box and send button
"""
self.inputLayout = self.createInputLayout()
self.userInput = self.createUserInput()
self.sendButton = self.createSendButton()
self.inputLayout.add_widget(self.userInput)
self.inputLayout.add_widget(self.sendButton)
def createInputLayout(self):
"""
Makes a layout to hold the input box and send button
"""
return BoxLayout(
size_hint_y=0.1,
orientation='horizontal'
)
def createUserInput(self):
"""
Makes a text input box for the user to type in
"""
userInput = CustomTextInput(
multiline=False,
do_wrap=True,
hint_text='Type your message...',
font_size='16sp',
size_hint_x=0.9,
text_validate_unfocus=False
)
userInput.bind(on_text_validate=lambda instance: self.sendMessage())
return userInput
def createSendButton(self):
"""
Makes a button to send the message
"""
sendButton = Button(
text='Send',
font_size='16sp',
size_hint_x=0.1
)
sendButton.bind(on_press=self.sendMessage)
return sendButton
def updateVars(self):
temp = sharedVars.gptStuff
self.temperature = temp["temperature"]
self.topP = temp["topP"]
self.maxTokens = temp["maxTokens"]
self.frequencyPenalty = temp["frequencyPenalty"]
self.presencePenalty = temp["presencePenalty"]
self.tokenLimit = temp["tokenLimit"]
self.prompt = sharedVars.prompt
self.systemMessage = sharedVars.setting
def loadMessages(self):
"""
loads message history from json file to resume previous chat
"""
# Attempt to load the chat history from the JSON file
try:
self.chatHistory = self.loadChatHistoryFromFile()
messages = self.chatHistory["logs"].get(sharedVars.chatKey, [])
except (FileNotFoundError, json.JSONDecodeError): # If file not found or there's an error decoding JSON
# Start a new chat if chat history can't be loaded
self.startNewChat()
return
# If no messages were found in the chat history
if not messages:
# Start a new chat if there are no messages in the chat history
self.startNewChat()
else:
# Otherwise, proceed with the existing chat history
sharedVars.messages = messages
self.handleExistingChat()
def loadChatHistoryFromFile(self):
# Load and return the chat history from the JSON file
with open(f"CharacterJsons/ChatHistory{sharedVars.player.name}.json", 'r') as f:
return json.load(f)
def startNewChat(self):
# Define the initial messages for a new chat
messages = [
{"role": "user", "content": self.prompt},
{"role": "system", "content": self.systemMessage}
]
# Add each initial message to the GUI and save it in the chat history
self.saveChatHistory(messages[0])
self.saveChatHistory(messages[1])
self.appendMessage(messages[1])
# Update the global messages variable and scroll to the bottom
sharedVars.messages = messages
self.scrollView.scroll_y = 0
# Start the chat loop
self.chatLoop()
def handleExistingChat(self):
self.keepLoading = False
def yes(button):
self.keepLoading = True
modal_view.dismiss()
def no(button):
self.keepLoading = False
modal_view.dismiss()
box = BoxLayout(orientation='vertical')
box.add_widget(Label(text='Load previous chat history?'))
box.add_widget(Button(text='Yes', on_release=yes))
box.add_widget(Button(text='No', on_release=no))
modal_view = ModalView(size_hint=(0.5, 0.5), auto_dismiss=False)
modal_view.add_widget(box)
modal_view.bind(on_dismiss=self.on_dismiss_popup)
modal_view.open()
def on_dismiss_popup(self, instance):
if not self.keepLoading:
# clear existing chat history
self.chatHistory["logs"][sharedVars.chatKey] = []
with open(f"CharacterJsons/ChatHistory{sharedVars.player.name}.json", 'w') as f:
json.dump(self.chatHistory, f, indent=4)
self.startNewChat()
return
messages = sharedVars.messages
# Replace the first message in the history with the current prompt
messages[0] = {"role": "system", "content": self.prompt}
# Load all the messages into the GUI message holder
for message in messages[1:]:
self.appendMessage(message)
# Scroll to the bottom
self.scrollView.scroll_y = 0
# If a new system message is found, add it to the chat history, taking priority over user messages
if messages[1] != {"role": "system", "content": self.systemMessage}:
messages.append({"role": "system", "content": self.systemMessage})
# Update global messages variable
sharedVars.messages = messages
# If the last message is not a response, respond to it
if messages[-1]["role"] != "assistant":
self.chatLoop()
def appendMessage(self, message):
"""
formats and adds a message to the GUI
"""
# initialize the colors for each role and the formatted message
roleColors = {"system": "#ADD8E6", "user": "#32CD32", "assistant": "#800080", "character": "#800080", "character2": "#8B4513"}
formattedMessage = ""
# set the role and get the name based on the role
role = message["role"]
name = "System" if role == "system" else "You" if role == "user" else sharedVars.currCharacter.name.first
# initialize the character names
character1 = sharedVars.currCharacter.name.first
character2 = sharedVars.currCharacter2.name.first if sharedVars.currCharacter2 else ""
# if the message is a response from the chatbot
if role == "assistant":
if character2:
# parse the message into dialogues
pattern = f'({character1}|{character2}): (.*?)(?=(?:{character1}|{character2}):|$)'
dialogues = re.findall(pattern, message['content'], re.DOTALL)
# if dialogues were found, format them accordingly
if dialogues:
for speaker, text in dialogues:
# get the role of the speaker
speakerRole = 'character' if speaker == character1 else 'character2'
# Color code for the speaker role
colorCode = roleColors[speakerRole]
# Clean up the text
cleanedText = re.sub('\n+', '\n', text)
# Create the formatted string
formattedMessage += f"\n\n[color={colorCode}][b]{speaker}:[/b][/color] {cleanedText}"
else:
role = 'system'
name = 'System?'
else:
formattedMessage = re.sub(f'{character1}: ', '', message['content'])
# if there is only one character, format the message accordingly
formattedMessage = f"\n\n[color={roleColors[role]}][b]{name}:[/b][/color] {formattedMessage}"
# if the message is a system message or a user message, format it accordingly
if not formattedMessage:
# format the message
message["content"] = re.sub('\n+', '\n', message["content"])
formattedMessage = f"\n\n[color={roleColors[role]}][b]{name}:[/b][/color] {message['content']}"
# remove all double quotes from the message
formattedMessage = formattedMessage.replace('"', '')
# add the message to the GUI
self.messageLabel.text += formattedMessage
def saveChatHistory(self, message):
"""
Saves a message to the chat history json file
Note: I might want to make it so you can save multiple messages at once, but for now it's just one at a time
"""
chatHistory = {"logs": {}}
try:
with open(f"CharacterJsons/ChatHistory{sharedVars.player.name}.json", 'r') as f:
chatHistory = json.load(f)
except (FileNotFoundError, json.JSONDecodeError):
pass
# If the chat history doesn't have a log for this character, create one
chatHistory["logs"].setdefault(sharedVars.chatKey, []).append(message)
# Update the chat history json file
with open(f"CharacterJsons/ChatHistory{sharedVars.player.name}.json", 'w') as f:
json.dump(chatHistory, f, indent=4)
def sendMessage(self, *args):
"""
Sends a message to the chatbot
"""
userMessage = self.userInput.text.strip()
if userMessage:
message = {"role": "user", "content": userMessage}
sharedVars.appendMessage(message) # add message to global messages variable
self.appendMessage(message) # add message to gui
self.saveChatHistory(message) # save message to chat history json file
self.chatLoop() # get a response from the chatbot
self.userInput.text = "" # clear the user input box
def chatLoop(self):
"""
Gets a response from the chatbot
"""
# make sure all the variables are up to date
self.updateVars()
messages = sharedVars.messages
# If the last message is a new system message, display it in the gui
lastMessage = messages[-1]
if lastMessage["role"] == "system" and lastMessage["content"] not in self.messageLabel.text:
self.appendMessage(lastMessage)
# get the total number of tokens in the chat history
totalTokens = sum(countTokens(message["content"]) for message in messages)
print(f"Total tokens: {totalTokens}")
# If the total number of tokens is greater than the token limit, remove messages until it's not
while totalTokens > self.tokenLimit:
print(f"Total tokens: {totalTokens}, Token limit: {self.tokenLimit}")
# Remove the oldest message after the prompt and initial system message
removedMessage = messages.pop(0)
# If the prompt was removed, add it closer to the end of the list
if "~!~!~" in removedMessage["content"] or removedMessage["role"] == "system":
if len(messages) < 5:
messages.insert(-2, removedMessage)
messages.insert(-5, removedMessage)
else:
# Update the total number of tokens
totalTokens -= countTokens(removedMessage["content"])
badResponse = True
# Get a response from the chatbot and make sure it's not responding as the player
while badResponse:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
temperature=self.temperature,
max_tokens=self.maxTokens,
top_p=self.topP,
frequency_penalty=self.frequencyPenalty,
presence_penalty=self.presencePenalty,
)
if f"{sharedVars.player.name}:" in response.choices[0].message.content:
messages.append({"role": "system", "content": f"regenerate response. You're not allowed to respond as {sharedVars.player.name}"})
else:
badResponse = False
# remove the messages from the bad responses
for i in messages:
if f"regenerate response. You're not allowed to respond as {sharedVars.player.name}" in i["content"]:
messages.remove(i)
responseMessage = response.choices[0].message # separate the response from the rest of the response object
self.saveChatHistory(responseMessage) # save the response to the chat history json file
sharedVars.appendMessage(responseMessage) # add the response to the global messages variable
self.appendMessage(responseMessage) # add the response to the gui
| [] |
2024-01-10 | rrt2850/GUI-Chatbot-V1 | trainbot.py | from fileinput import filename
import json
import tiktoken # for token counting
import numpy as np
from collections import defaultdict
data_path = "trainingFormattedTest.jsonl"
"""
# Load the dataset
with open(data_path, 'r', encoding='utf-8') as f:
dataset = [json.loads(line) for line in f]
# Initial dataset stats
print("Num examples:", len(dataset))
print("First example:")
for message in dataset[0]["messages"]:
print(message)
# Format error checks
format_errors = defaultdict(int)
for ex in dataset:
if not isinstance(ex, dict):
format_errors["data_type"] += 1
continue
messages = ex.get("messages", None)
if not messages:
format_errors["missing_messages_list"] += 1
continue
for message in messages:
if "role" not in message or "content" not in message:
format_errors["message_missing_key"] += 1
if any(k not in ("role", "content", "name") for k in message):
format_errors["message_unrecognized_key"] += 1
if message.get("role", None) not in ("system", "user", "assistant"):
format_errors["unrecognized_role"] += 1
content = message.get("content", None)
if not content or not isinstance(content, str):
format_errors["missing_content"] += 1
if not any(message.get("role", None) == "assistant" for message in messages):
format_errors["example_missing_assistant_message"] += 1
if format_errors:
print("Found errors:")
for k, v in format_errors.items():
print(f"{k}: {v}")
else:
print("No errors found")
encoding = tiktoken.get_encoding("cl100k_base")
# not exact!
# simplified from https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
def num_tokens_from_messages(messages, tokens_per_message=3, tokens_per_name=1):
num_tokens = 0
for message in messages:
num_tokens += tokens_per_message
for key, value in message.items():
num_tokens += len(encoding.encode(value))
if key == "name":
num_tokens += tokens_per_name
num_tokens += 3
return num_tokens
def num_assistant_tokens_from_messages(messages):
num_tokens = 0
for message in messages:
if message["role"] == "assistant":
num_tokens += len(encoding.encode(message["content"]))
return num_tokens
def print_distribution(values, name):
print(f"\n#### Distribution of {name}:")
print(f"min / max: {min(values)}, {max(values)}")
print(f"mean / median: {np.mean(values)}, {np.median(values)}")
print(f"p5 / p95: {np.quantile(values, 0.1)}, {np.quantile(values, 0.9)}")
# Warnings and tokens counts
n_missing_system = 0
n_missing_user = 0
n_messages = []
convo_lens = []
assistant_message_lens = []
for ex in dataset:
messages = ex["messages"]
if not any(message["role"] == "system" for message in messages):
n_missing_system += 1
if not any(message["role"] == "user" for message in messages):
n_missing_user += 1
n_messages.append(len(messages))
convo_lens.append(num_tokens_from_messages(messages))
assistant_message_lens.append(num_assistant_tokens_from_messages(messages))
print("Num examples missing system message:", n_missing_system)
print("Num examples missing user message:", n_missing_user)
print_distribution(n_messages, "num_messages_per_example")
print_distribution(convo_lens, "num_total_tokens_per_example")
print_distribution(assistant_message_lens, "num_assistant_tokens_per_example")
n_too_long = sum(l > 4096 for l in convo_lens)
print(f"\n{n_too_long} examples may be over the 4096 token limit, they will be truncated during fine-tuning")
# Pricing and default n_epochs estimate
MAX_TOKENS_PER_EXAMPLE = 4096
TARGET_EPOCHS = 3
MIN_TARGET_EXAMPLES = 100
MAX_TARGET_EXAMPLES = 25000
MIN_DEFAULT_EPOCHS = 1
MAX_DEFAULT_EPOCHS = 25
n_epochs = TARGET_EPOCHS
n_train_examples = len(dataset)
if n_train_examples * TARGET_EPOCHS < MIN_TARGET_EXAMPLES:
n_epochs = min(MAX_DEFAULT_EPOCHS, MIN_TARGET_EXAMPLES // n_train_examples)
elif n_train_examples * TARGET_EPOCHS > MAX_TARGET_EXAMPLES:
n_epochs = max(MIN_DEFAULT_EPOCHS, MAX_TARGET_EXAMPLES // n_train_examples)
n_billing_tokens_in_dataset = sum(min(MAX_TOKENS_PER_EXAMPLE, length) for length in convo_lens)
print(f"Dataset has ~{n_billing_tokens_in_dataset} tokens that will be charged for during training")
print(f"By default, you'll train for {n_epochs} epochs on this dataset")
print(f"By default, you'll be charged for ~{n_epochs * n_billing_tokens_in_dataset} tokens")
"""
import os
import openai
import dotenv
# set up environment variables
dotenv.load_dotenv()
openai.api_key = os.environ["OPENAI_API_KEY"]
# Create a file on the OpenAI API
training_response = openai.File.create(
file=open("trainingFormattedTest.jsonl", "rb"), purpose="fine-tune"
)
# Get the file ID
training_file_id = training_response["id"]
validation_response = openai.File.create(
file=open("trainingFormattedTest.jsonl", "rb"), purpose="fine-tune"
)
validation_file_id = validation_response["id"]
print("Training file ID:", training_file_id)
print("Validation file ID:", validation_file_id)
"""
response = openai.FineTuningJob.create(
training_file=training_file_id,
validation_file=validation_file_id,
model="gpt-3.5-turbo",
suffix="cgen-14e-valid",
)"""
response = openai.FineTuningJob.create(
training_file=training_file_id,
model="gpt-3.5-turbo",
suffix="cgen-14e-novalid",
)
job_id = response["id"]
print("Job ID:", response["id"])
print("Status:", response["status"])
# List 10 fine-tuning jobs
print(openai.FineTuningJob.list(limit=10)[0]) | [] |
2024-01-10 | cyprienc/stable-baselines3 | stable_baselines3~common~policies.py | """Policies: abstract base class and concrete implementations."""
import collections
import copy
from abc import ABC, abstractmethod
from functools import partial
from typing import Any, Dict, List, Optional, Tuple, Type, Union
import gym
import numpy as np
import torch as th
from torch import nn
from stable_baselines3.common.distributions import (
BernoulliDistribution,
CategoricalDistribution,
DiagGaussianDistribution,
Distribution,
MultiCategoricalDistribution,
StateDependentNoiseDistribution,
make_proba_distribution,
)
from stable_baselines3.common.preprocessing import get_action_dim, is_image_space, maybe_transpose, preprocess_obs
from stable_baselines3.common.torch_layers import (
BaseFeaturesExtractor,
CombinedExtractor,
FlattenExtractor,
MlpExtractor,
NatureCNN,
create_mlp,
)
from stable_baselines3.common.type_aliases import Schedule
from stable_baselines3.common.utils import get_device, is_vectorized_observation, obs_as_tensor
class BaseModel(nn.Module, ABC):
"""
The base model object: makes predictions in response to observations.
In the case of policies, the prediction is an action. In the case of critics, it is the
estimated value of the observation.
:param observation_space: The observation space of the environment
:param action_space: The action space of the environment
:param features_extractor_class: Features extractor to use.
:param features_extractor_kwargs: Keyword arguments
to pass to the features extractor.
:param features_extractor: Network to extract features
(a CNN when using images, a nn.Flatten() layer otherwise)
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
"""
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
features_extractor: Optional[nn.Module] = None,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
):
super(BaseModel, self).__init__()
if optimizer_kwargs is None:
optimizer_kwargs = {}
if features_extractor_kwargs is None:
features_extractor_kwargs = {}
self.observation_space = observation_space
self.action_space = action_space
self.features_extractor = features_extractor
self.normalize_images = normalize_images
self.optimizer_class = optimizer_class
self.optimizer_kwargs = optimizer_kwargs
self.optimizer = None # type: Optional[th.optim.Optimizer]
self.features_extractor_class = features_extractor_class
self.features_extractor_kwargs = features_extractor_kwargs
@abstractmethod
def forward(self, *args, **kwargs):
pass
def _update_features_extractor(
self,
net_kwargs: Dict[str, Any],
features_extractor: Optional[BaseFeaturesExtractor] = None,
) -> Dict[str, Any]:
"""
Update the network keyword arguments and create a new features extractor object if needed.
If a ``features_extractor`` object is passed, then it will be shared.
:param net_kwargs: the base network keyword arguments, without the ones
related to features extractor
:param features_extractor: a features extractor object.
If None, a new object will be created.
:return: The updated keyword arguments
"""
net_kwargs = net_kwargs.copy()
if features_extractor is None:
# The features extractor is not shared, create a new one
features_extractor = self.make_features_extractor()
net_kwargs.update(dict(features_extractor=features_extractor, features_dim=features_extractor.features_dim))
return net_kwargs
def make_features_extractor(self) -> BaseFeaturesExtractor:
"""Helper method to create a features extractor."""
return self.features_extractor_class(self.observation_space, **self.features_extractor_kwargs)
def extract_features(self, obs: th.Tensor) -> th.Tensor:
"""
Preprocess the observation if needed and extract features.
:param obs:
:return:
"""
assert self.features_extractor is not None, "No features extractor was set"
preprocessed_obs = preprocess_obs(obs, self.observation_space, normalize_images=self.normalize_images)
return self.features_extractor(preprocessed_obs)
def _get_constructor_parameters(self) -> Dict[str, Any]:
"""
Get data that need to be saved in order to re-create the model when loading it from disk.
:return: The dictionary to pass to the as kwargs constructor when reconstruction this model.
"""
return dict(
observation_space=self.observation_space,
action_space=self.action_space,
# Passed to the constructor by child class
# squash_output=self.squash_output,
# features_extractor=self.features_extractor
normalize_images=self.normalize_images,
)
@property
def device(self) -> th.device:
"""Infer which device this policy lives on by inspecting its parameters.
If it has no parameters, the 'cpu' device is used as a fallback.
:return:"""
for param in self.parameters():
return param.device
return get_device("cpu")
def save(self, path: str) -> None:
"""
Save model to a given location.
:param path:
"""
th.save({"state_dict": self.state_dict(), "data": self._get_constructor_parameters()}, path)
@classmethod
def load(cls, path: str, device: Union[th.device, str] = "auto") -> "BaseModel":
"""
Load model from path.
:param path:
:param device: Device on which the policy should be loaded.
:return:
"""
device = get_device(device)
saved_variables = th.load(path, map_location=device)
# Create policy object
model = cls(**saved_variables["data"]) # pytype: disable=not-instantiable
# Load weights
model.load_state_dict(saved_variables["state_dict"])
model.to(device)
return model
def load_from_vector(self, vector: np.ndarray) -> None:
"""
Load parameters from a 1D vector.
:param vector:
"""
th.nn.utils.vector_to_parameters(th.FloatTensor(vector).to(self.device), self.parameters())
def parameters_to_vector(self) -> np.ndarray:
"""
Convert the parameters to a 1D vector.
:return:
"""
return th.nn.utils.parameters_to_vector(self.parameters()).detach().cpu().numpy()
def set_training_mode(self, mode: bool) -> None:
"""
Put the policy in either training or evaluation mode.
This affects certain modules, such as batch normalisation and dropout.
:param mode: if true, set to training mode, else set to evaluation mode
"""
self.train(mode)
def obs_to_tensor(self, observation: Union[np.ndarray, Dict[str, np.ndarray]]) -> Tuple[th.Tensor, bool]:
"""
Convert an input observation to a PyTorch tensor that can be fed to a model.
Includes sugar-coating to handle different observations (e.g. normalizing images).
:param observation: the input observation
:return: The observation as PyTorch tensor
and whether the observation is vectorized or not
"""
vectorized_env = False
if isinstance(observation, dict):
# need to copy the dict as the dict in VecFrameStack will become a torch tensor
observation = copy.deepcopy(observation)
for key, obs in observation.items():
obs_space = self.observation_space.spaces[key]
if is_image_space(obs_space):
obs_ = maybe_transpose(obs, obs_space)
else:
obs_ = np.array(obs)
vectorized_env = vectorized_env or is_vectorized_observation(obs_, obs_space)
# Add batch dimension if needed
observation[key] = obs_.reshape((-1,) + self.observation_space[key].shape)
elif is_image_space(self.observation_space):
# Handle the different cases for images
# as PyTorch use channel first format
observation = maybe_transpose(observation, self.observation_space)
else:
observation = np.array(observation)
if not isinstance(observation, dict):
# Dict obs need to be handled separately
vectorized_env = is_vectorized_observation(observation, self.observation_space)
# Add batch dimension if needed
observation = observation.reshape((-1,) + self.observation_space.shape)
observation = obs_as_tensor(observation, self.device)
return observation, vectorized_env
class BasePolicy(BaseModel):
"""The base policy object.
Parameters are mostly the same as `BaseModel`; additions are documented below.
:param args: positional arguments passed through to `BaseModel`.
:param kwargs: keyword arguments passed through to `BaseModel`.
:param squash_output: For continuous actions, whether the output is squashed
or not using a ``tanh()`` function.
"""
def __init__(self, *args, squash_output: bool = False, **kwargs):
super(BasePolicy, self).__init__(*args, **kwargs)
self._squash_output = squash_output
@staticmethod
def _dummy_schedule(progress_remaining: float) -> float:
"""(float) Useful for pickling policy."""
del progress_remaining
return 0.0
@property
def squash_output(self) -> bool:
"""(bool) Getter for squash_output."""
return self._squash_output
@staticmethod
def init_weights(module: nn.Module, gain: float = 1) -> None:
"""
Orthogonal initialization (used in PPO and A2C)
"""
if isinstance(module, (nn.Linear, nn.Conv2d)):
nn.init.orthogonal_(module.weight, gain=gain)
if module.bias is not None:
module.bias.data.fill_(0.0)
@abstractmethod
def _predict(self, observation: th.Tensor, deterministic: bool = False) -> th.Tensor:
"""
Get the action according to the policy for a given observation.
By default provides a dummy implementation -- not all BasePolicy classes
implement this, e.g. if they are a Critic in an Actor-Critic method.
:param observation:
:param deterministic: Whether to use stochastic or deterministic actions
:return: Taken action according to the policy
"""
def predict(
self,
observation: Union[np.ndarray, Dict[str, np.ndarray]],
state: Optional[np.ndarray] = None,
mask: Optional[np.ndarray] = None,
deterministic: bool = False,
) -> Tuple[np.ndarray, Optional[np.ndarray]]:
"""
Get the policy action and state from an observation (and optional state).
Includes sugar-coating to handle different observations (e.g. normalizing images).
:param observation: the input observation
:param state: The last states (can be None, used in recurrent policies)
:param mask: The last masks (can be None, used in recurrent policies)
:param deterministic: Whether or not to return deterministic actions.
:return: the model's action and the next state
(used in recurrent policies)
"""
# TODO (GH/1): add support for RNN policies
# if state is None:
# state = self.initial_state
# if mask is None:
# mask = [False for _ in range(self.n_envs)]
# Switch to eval mode (this affects batch norm / dropout)
self.set_training_mode(False)
observation, vectorized_env = self.obs_to_tensor(observation)
with th.no_grad():
actions = self._predict(observation, deterministic=deterministic)
# Convert to numpy
actions = actions.cpu().numpy()
if isinstance(self.action_space, gym.spaces.Box):
if self.squash_output:
# Rescale to proper domain when using squashing
actions = self.unscale_action(actions)
else:
# Actions could be on arbitrary scale, so clip the actions to avoid
# out of bound error (e.g. if sampling from a Gaussian distribution)
actions = np.clip(actions, self.action_space.low, self.action_space.high)
# Remove batch dimension if needed
if not vectorized_env:
actions = actions[0]
return actions, state
def scale_action(self, action: np.ndarray) -> np.ndarray:
"""
Rescale the action from [low, high] to [-1, 1]
(no need for symmetric action space)
:param action: Action to scale
:return: Scaled action
"""
low, high = self.action_space.low, self.action_space.high
return 2.0 * ((action - low) / (high - low)) - 1.0
def unscale_action(self, scaled_action: np.ndarray) -> np.ndarray:
"""
Rescale the action from [-1, 1] to [low, high]
(no need for symmetric action space)
:param scaled_action: Action to un-scale
"""
low, high = self.action_space.low, self.action_space.high
return low + (0.5 * (scaled_action + 1.0) * (high - low))
class ActorCriticPolicy(BasePolicy):
"""
Policy class for actor-critic algorithms (has both policy and value prediction).
Used by A2C, PPO and the likes.
:param observation_space: Observation space
:param action_space: Action space
:param lr_schedule: Learning rate schedule (could be constant)
:param net_arch: The specification of the policy and value networks.
:param activation_fn: Activation function
:param ortho_init: Whether to use or not orthogonal initialization
:param use_sde: Whether to use State Dependent Exploration or not
:param log_std_init: Initial value for the log standard deviation
:param full_std: Whether to use (n_features x n_actions) parameters
for the std instead of only (n_features,) when using gSDE
:param sde_net_arch: Network architecture for extracting features
when using gSDE. If None, the latent features from the policy will be used.
Pass an empty list to use the states as features.
:param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure
a positive standard deviation (cf paper). It allows to keep variance
above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.
:param squash_output: Whether to squash the output using a tanh function,
this allows to ensure boundaries when using gSDE.
:param features_extractor_class: Features extractor to use.
:param features_extractor_kwargs: Keyword arguments
to pass to the features extractor.
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
"""
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
lr_schedule: Schedule,
net_arch: Optional[List[Union[int, Dict[str, List[int]]]]] = None,
activation_fn: Type[nn.Module] = nn.Tanh,
ortho_init: bool = True,
use_sde: bool = False,
log_std_init: float = 0.0,
full_std: bool = True,
sde_net_arch: Optional[List[int]] = None,
use_expln: bool = False,
squash_output: bool = False,
features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
):
if optimizer_kwargs is None:
optimizer_kwargs = {}
# Small values to avoid NaN in Adam optimizer
if optimizer_class == th.optim.Adam:
optimizer_kwargs["eps"] = 1e-5
super(ActorCriticPolicy, self).__init__(
observation_space,
action_space,
features_extractor_class,
features_extractor_kwargs,
optimizer_class=optimizer_class,
optimizer_kwargs=optimizer_kwargs,
squash_output=squash_output,
)
# Default network architecture, from stable-baselines
if net_arch is None:
if features_extractor_class == NatureCNN:
net_arch = []
else:
net_arch = [dict(pi=[64, 64], vf=[64, 64])]
self.net_arch = net_arch
self.activation_fn = activation_fn
self.ortho_init = ortho_init
self.features_extractor = features_extractor_class(self.observation_space, **self.features_extractor_kwargs)
self.features_dim = self.features_extractor.features_dim
self.normalize_images = normalize_images
self.log_std_init = log_std_init
dist_kwargs = None
# Keyword arguments for gSDE distribution
if use_sde:
dist_kwargs = {
"full_std": full_std,
"squash_output": squash_output,
"use_expln": use_expln,
"learn_features": sde_net_arch is not None,
}
self.sde_features_extractor = None
self.sde_net_arch = sde_net_arch
self.use_sde = use_sde
self.dist_kwargs = dist_kwargs
# Action distribution
self.action_dist = make_proba_distribution(action_space, use_sde=use_sde, dist_kwargs=dist_kwargs)
self._build(lr_schedule)
def _get_constructor_parameters(self) -> Dict[str, Any]:
data = super()._get_constructor_parameters()
default_none_kwargs = self.dist_kwargs or collections.defaultdict(lambda: None)
data.update(
dict(
net_arch=self.net_arch,
activation_fn=self.activation_fn,
use_sde=self.use_sde,
log_std_init=self.log_std_init,
squash_output=default_none_kwargs["squash_output"],
full_std=default_none_kwargs["full_std"],
sde_net_arch=self.sde_net_arch,
use_expln=default_none_kwargs["use_expln"],
lr_schedule=self._dummy_schedule, # dummy lr schedule, not needed for loading policy alone
ortho_init=self.ortho_init,
optimizer_class=self.optimizer_class,
optimizer_kwargs=self.optimizer_kwargs,
features_extractor_class=self.features_extractor_class,
features_extractor_kwargs=self.features_extractor_kwargs,
)
)
return data
def reset_noise(self, n_envs: int = 1) -> None:
"""
Sample new weights for the exploration matrix.
:param n_envs:
"""
assert isinstance(self.action_dist, StateDependentNoiseDistribution), "reset_noise() is only available when using gSDE"
self.action_dist.sample_weights(self.log_std, batch_size=n_envs)
def _build_mlp_extractor(self) -> None:
"""
Create the policy and value networks.
Part of the layers can be shared.
"""
# Note: If net_arch is None and some features extractor is used,
# net_arch here is an empty list and mlp_extractor does not
# really contain any layers (acts like an identity module).
self.mlp_extractor = MlpExtractor(
self.features_dim,
net_arch=self.net_arch,
activation_fn=self.activation_fn,
device=self.device,
)
def _build(self, lr_schedule: Schedule) -> None:
"""
Create the networks and the optimizer.
:param lr_schedule: Learning rate schedule
lr_schedule(1) is the initial learning rate
"""
self._build_mlp_extractor()
latent_dim_pi = self.mlp_extractor.latent_dim_pi
# Separate features extractor for gSDE
if self.sde_net_arch is not None:
self.sde_features_extractor, latent_sde_dim = create_sde_features_extractor(
self.features_dim, self.sde_net_arch, self.activation_fn
)
if isinstance(self.action_dist, DiagGaussianDistribution):
self.action_net, self.log_std = self.action_dist.proba_distribution_net(
latent_dim=latent_dim_pi, log_std_init=self.log_std_init
)
elif isinstance(self.action_dist, StateDependentNoiseDistribution):
latent_sde_dim = latent_dim_pi if self.sde_net_arch is None else latent_sde_dim
self.action_net, self.log_std = self.action_dist.proba_distribution_net(
latent_dim=latent_dim_pi, latent_sde_dim=latent_sde_dim, log_std_init=self.log_std_init
)
elif isinstance(self.action_dist, CategoricalDistribution):
self.action_net = self.action_dist.proba_distribution_net(latent_dim=latent_dim_pi)
elif isinstance(self.action_dist, MultiCategoricalDistribution):
self.action_net = self.action_dist.proba_distribution_net(latent_dim=latent_dim_pi)
elif isinstance(self.action_dist, BernoulliDistribution):
self.action_net = self.action_dist.proba_distribution_net(latent_dim=latent_dim_pi)
else:
raise NotImplementedError(f"Unsupported distribution '{self.action_dist}'.")
self.value_net = nn.Linear(self.mlp_extractor.latent_dim_vf, 1)
# Init weights: use orthogonal initialization
# with small initial weight for the output
if self.ortho_init:
# TODO: check for features_extractor
# Values from stable-baselines.
# features_extractor/mlp values are
# originally from openai/baselines (default gains/init_scales).
module_gains = {
self.features_extractor: np.sqrt(2),
self.mlp_extractor: np.sqrt(2),
self.action_net: 0.01,
self.value_net: 1,
}
for module, gain in module_gains.items():
module.apply(partial(self.init_weights, gain=gain))
# Setup optimizer with initial learning rate
self.optimizer = self.optimizer_class(self.parameters(), lr=lr_schedule(1), **self.optimizer_kwargs)
def forward(self, obs: th.Tensor, deterministic: bool = False) -> Tuple[th.Tensor, th.Tensor, th.Tensor]:
"""
Forward pass in all the networks (actor and critic)
:param obs: Observation
:param deterministic: Whether to sample or use deterministic actions
:return: action, value and log probability of the action
"""
latent_pi, latent_vf, latent_sde = self._get_latent(obs)
# Evaluate the values for the given observations
values = self.value_net(latent_vf)
distribution = self._get_action_dist_from_latent(latent_pi, latent_sde=latent_sde)
actions = distribution.get_actions(deterministic=deterministic)
log_prob = distribution.log_prob(actions)
return actions, values, log_prob
def _get_latent(self, obs: th.Tensor) -> Tuple[th.Tensor, th.Tensor, th.Tensor]:
"""
Get the latent code (i.e., activations of the last layer of each network)
for the different networks.
:param obs: Observation
:return: Latent codes
for the actor, the value function and for gSDE function
"""
# Preprocess the observation if needed
features = self.extract_features(obs)
latent_pi, latent_vf = self.mlp_extractor(features)
# Features for sde
latent_sde = latent_pi
if self.sde_features_extractor is not None:
latent_sde = self.sde_features_extractor(features)
return latent_pi, latent_vf, latent_sde
def _get_action_dist_from_latent(self, latent_pi: th.Tensor, latent_sde: Optional[th.Tensor] = None) -> Distribution:
"""
Retrieve action distribution given the latent codes.
:param latent_pi: Latent code for the actor
:param latent_sde: Latent code for the gSDE exploration function
:return: Action distribution
"""
mean_actions = self.action_net(latent_pi)
if isinstance(self.action_dist, DiagGaussianDistribution):
return self.action_dist.proba_distribution(mean_actions, self.log_std)
elif isinstance(self.action_dist, CategoricalDistribution):
# Here mean_actions are the logits before the softmax
return self.action_dist.proba_distribution(action_logits=mean_actions)
elif isinstance(self.action_dist, MultiCategoricalDistribution):
# Here mean_actions are the flattened logits
return self.action_dist.proba_distribution(action_logits=mean_actions)
elif isinstance(self.action_dist, BernoulliDistribution):
# Here mean_actions are the logits (before rounding to get the binary actions)
return self.action_dist.proba_distribution(action_logits=mean_actions)
elif isinstance(self.action_dist, StateDependentNoiseDistribution):
return self.action_dist.proba_distribution(mean_actions, self.log_std, latent_sde)
else:
raise ValueError("Invalid action distribution")
def _predict(self, observation: th.Tensor, deterministic: bool = False) -> th.Tensor:
"""
Get the action according to the policy for a given observation.
:param observation:
:param deterministic: Whether to use stochastic or deterministic actions
:return: Taken action according to the policy
"""
latent_pi, _, latent_sde = self._get_latent(observation)
distribution = self._get_action_dist_from_latent(latent_pi, latent_sde)
return distribution.get_actions(deterministic=deterministic)
def evaluate_actions(self, obs: th.Tensor, actions: th.Tensor) -> Tuple[th.Tensor, th.Tensor, th.Tensor]:
"""
Evaluate actions according to the current policy,
given the observations.
:param obs:
:param actions:
:return: estimated value, log likelihood of taking those actions
and entropy of the action distribution.
"""
latent_pi, latent_vf, latent_sde = self._get_latent(obs)
distribution = self._get_action_dist_from_latent(latent_pi, latent_sde)
log_prob = distribution.log_prob(actions)
values = self.value_net(latent_vf)
return values, log_prob, distribution.entropy()
def get_distribution(self, obs: th.Tensor) -> Distribution:
"""
Get the current policy distribution given the observations.
:param obs:
:return: the action distribution.
"""
latent_pi, _, latent_sde = self._get_latent(obs)
return self._get_action_dist_from_latent(latent_pi, latent_sde)
class ActorCriticCnnPolicy(ActorCriticPolicy):
"""
CNN policy class for actor-critic algorithms (has both policy and value prediction).
Used by A2C, PPO and the likes.
:param observation_space: Observation space
:param action_space: Action space
:param lr_schedule: Learning rate schedule (could be constant)
:param net_arch: The specification of the policy and value networks.
:param activation_fn: Activation function
:param ortho_init: Whether to use or not orthogonal initialization
:param use_sde: Whether to use State Dependent Exploration or not
:param log_std_init: Initial value for the log standard deviation
:param full_std: Whether to use (n_features x n_actions) parameters
for the std instead of only (n_features,) when using gSDE
:param sde_net_arch: Network architecture for extracting features
when using gSDE. If None, the latent features from the policy will be used.
Pass an empty list to use the states as features.
:param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure
a positive standard deviation (cf paper). It allows to keep variance
above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.
:param squash_output: Whether to squash the output using a tanh function,
this allows to ensure boundaries when using gSDE.
:param features_extractor_class: Features extractor to use.
:param features_extractor_kwargs: Keyword arguments
to pass to the features extractor.
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
"""
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
lr_schedule: Schedule,
net_arch: Optional[List[Union[int, Dict[str, List[int]]]]] = None,
activation_fn: Type[nn.Module] = nn.Tanh,
ortho_init: bool = True,
use_sde: bool = False,
log_std_init: float = 0.0,
full_std: bool = True,
sde_net_arch: Optional[List[int]] = None,
use_expln: bool = False,
squash_output: bool = False,
features_extractor_class: Type[BaseFeaturesExtractor] = NatureCNN,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
):
super(ActorCriticCnnPolicy, self).__init__(
observation_space,
action_space,
lr_schedule,
net_arch,
activation_fn,
ortho_init,
use_sde,
log_std_init,
full_std,
sde_net_arch,
use_expln,
squash_output,
features_extractor_class,
features_extractor_kwargs,
normalize_images,
optimizer_class,
optimizer_kwargs,
)
class MultiInputActorCriticPolicy(ActorCriticPolicy):
"""
MultiInputActorClass policy class for actor-critic algorithms (has both policy and value prediction).
Used by A2C, PPO and the likes.
:param observation_space: Observation space (Tuple)
:param action_space: Action space
:param lr_schedule: Learning rate schedule (could be constant)
:param net_arch: The specification of the policy and value networks.
:param activation_fn: Activation function
:param ortho_init: Whether to use or not orthogonal initialization
:param use_sde: Whether to use State Dependent Exploration or not
:param log_std_init: Initial value for the log standard deviation
:param full_std: Whether to use (n_features x n_actions) parameters
for the std instead of only (n_features,) when using gSDE
:param sde_net_arch: Network architecture for extracting features
when using gSDE. If None, the latent features from the policy will be used.
Pass an empty list to use the states as features.
:param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure
a positive standard deviation (cf paper). It allows to keep variance
above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.
:param squash_output: Whether to squash the output using a tanh function,
this allows to ensure boundaries when using gSDE.
:param features_extractor_class: Uses the CombinedExtractor
:param features_extractor_kwargs: Keyword arguments
to pass to the feature extractor.
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
"""
def __init__(
self,
observation_space: gym.spaces.Dict,
action_space: gym.spaces.Space,
lr_schedule: Schedule,
net_arch: Optional[List[Union[int, Dict[str, List[int]]]]] = None,
activation_fn: Type[nn.Module] = nn.Tanh,
ortho_init: bool = True,
use_sde: bool = False,
log_std_init: float = 0.0,
full_std: bool = True,
sde_net_arch: Optional[List[int]] = None,
use_expln: bool = False,
squash_output: bool = False,
features_extractor_class: Type[BaseFeaturesExtractor] = CombinedExtractor,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
):
super(MultiInputActorCriticPolicy, self).__init__(
observation_space,
action_space,
lr_schedule,
net_arch,
activation_fn,
ortho_init,
use_sde,
log_std_init,
full_std,
sde_net_arch,
use_expln,
squash_output,
features_extractor_class,
features_extractor_kwargs,
normalize_images,
optimizer_class,
optimizer_kwargs,
)
class ContinuousCritic(BaseModel):
"""
Critic network(s) for DDPG/SAC/TD3.
It represents the action-state value function (Q-value function).
Compared to A2C/PPO critics, this one represents the Q-value
and takes the continuous action as input. It is concatenated with the state
and then fed to the network which outputs a single value: Q(s, a).
For more recent algorithms like SAC/TD3, multiple networks
are created to give different estimates.
By default, it creates two critic networks used to reduce overestimation
thanks to clipped Q-learning (cf TD3 paper).
:param observation_space: Obervation space
:param action_space: Action space
:param net_arch: Network architecture
:param features_extractor: Network to extract features
(a CNN when using images, a nn.Flatten() layer otherwise)
:param features_dim: Number of features
:param activation_fn: Activation function
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param n_critics: Number of critic networks to create.
:param share_features_extractor: Whether the features extractor is shared or not
between the actor and the critic (this saves computation time)
"""
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
net_arch: List[int],
features_extractor: nn.Module,
features_dim: int,
activation_fn: Type[nn.Module] = nn.ReLU,
normalize_images: bool = True,
n_critics: int = 2,
share_features_extractor: bool = True,
):
super().__init__(
observation_space,
action_space,
features_extractor=features_extractor,
normalize_images=normalize_images,
)
action_dim = get_action_dim(self.action_space)
self.share_features_extractor = share_features_extractor
self.n_critics = n_critics
self.q_networks = []
for idx in range(n_critics):
q_net = create_mlp(features_dim + action_dim, 1, net_arch, activation_fn)
q_net = nn.Sequential(*q_net)
self.add_module(f"qf{idx}", q_net)
self.q_networks.append(q_net)
def forward(self, obs: th.Tensor, actions: th.Tensor) -> Tuple[th.Tensor, ...]:
# Learn the features extractor using the policy loss only
# when the features_extractor is shared with the actor
with th.set_grad_enabled(not self.share_features_extractor):
features = self.extract_features(obs)
qvalue_input = th.cat([features, actions], dim=1)
return tuple(q_net(qvalue_input) for q_net in self.q_networks)
def q1_forward(self, obs: th.Tensor, actions: th.Tensor) -> th.Tensor:
"""
Only predict the Q-value using the first network.
This allows to reduce computation when all the estimates are not needed
(e.g. when updating the policy in TD3).
"""
with th.no_grad():
features = self.extract_features(obs)
return self.q_networks[0](th.cat([features, actions], dim=1))
def create_sde_features_extractor(
features_dim: int, sde_net_arch: List[int], activation_fn: Type[nn.Module]
) -> Tuple[nn.Sequential, int]:
"""
Create the neural network that will be used to extract features
for the gSDE exploration function.
:param features_dim:
:param sde_net_arch:
:param activation_fn:
:return:
"""
# Special case: when using states as features (i.e. sde_net_arch is an empty list)
# don't use any activation function
sde_activation = activation_fn if len(sde_net_arch) > 0 else None
latent_sde_net = create_mlp(features_dim, -1, sde_net_arch, activation_fn=sde_activation, squash_output=False)
latent_sde_dim = sde_net_arch[-1] if len(sde_net_arch) > 0 else features_dim
sde_features_extractor = nn.Sequential(*latent_sde_net)
return sde_features_extractor, latent_sde_dim
_policy_registry = dict() # type: Dict[Type[BasePolicy], Dict[str, Type[BasePolicy]]]
def get_policy_from_name(base_policy_type: Type[BasePolicy], name: str) -> Type[BasePolicy]:
"""
Returns the registered policy from the base type and name.
See `register_policy` for registering policies and explanation.
:param base_policy_type: the base policy class
:param name: the policy name
:return: the policy
"""
if base_policy_type not in _policy_registry:
raise KeyError(f"Error: the policy type {base_policy_type} is not registered!")
if name not in _policy_registry[base_policy_type]:
raise KeyError(
f"Error: unknown policy type {name},"
f"the only registed policy type are: {list(_policy_registry[base_policy_type].keys())}!"
)
return _policy_registry[base_policy_type][name]
def register_policy(name: str, policy: Type[BasePolicy]) -> None:
"""
Register a policy, so it can be called using its name.
e.g. SAC('MlpPolicy', ...) instead of SAC(MlpPolicy, ...).
The goal here is to standardize policy naming, e.g.
all algorithms can call upon "MlpPolicy" or "CnnPolicy",
and they receive respective policies that work for them.
Consider following:
OnlinePolicy
-- OnlineMlpPolicy ("MlpPolicy")
-- OnlineCnnPolicy ("CnnPolicy")
OfflinePolicy
-- OfflineMlpPolicy ("MlpPolicy")
-- OfflineCnnPolicy ("CnnPolicy")
Two policies have name "MlpPolicy" and two have "CnnPolicy".
In `get_policy_from_name`, the parent class (e.g. OnlinePolicy)
is given and used to select and return the correct policy.
:param name: the policy name
:param policy: the policy class
"""
sub_class = None
for cls in BasePolicy.__subclasses__():
if issubclass(policy, cls):
sub_class = cls
break
if sub_class is None:
raise ValueError(f"Error: the policy {policy} is not of any known subclasses of BasePolicy!")
if sub_class not in _policy_registry:
_policy_registry[sub_class] = {}
if name in _policy_registry[sub_class]:
# Check if the registered policy is same
# we try to register. If not so,
# do not override and complain.
if _policy_registry[sub_class][name] != policy:
raise ValueError(f"Error: the name {name} is already registered for a different policy, will not override.")
_policy_registry[sub_class][name] = policy
| [] |
2024-01-10 | darthgera123/Multimodal-Summarization | src~vilio~transformers~modeling_openai.py | # coding=utf-8
# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch OpenAI GPT model."""
import json
import logging
import math
import os
import warnings
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss
from .activations import gelu_new, swish
from .configuration_openai import OpenAIGPTConfig
from .file_utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_callable,
replace_return_docstrings,
)
from .modeling_outputs import BaseModelOutput, CausalLMOutput
from .modeling_utils import (
Conv1D,
PreTrainedModel,
SequenceSummary,
find_pruneable_heads_and_indices,
prune_conv1d_layer,
)
logger = logging.getLogger(__name__)
_CONFIG_FOR_DOC = "OpenAIGPTConfig"
_TOKENIZER_FOR_DOC = "OpenAIGPTTokenizer"
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"openai-gpt",
# See all OpenAI GPT models at https://huggingface.co/models?filter=openai-gpt
]
def load_tf_weights_in_openai_gpt(model, config, openai_checkpoint_folder_path):
""" Load tf pre-trained weights in a pytorch model (from NumPy arrays here)
"""
import re
import numpy as np
if ".ckpt" in openai_checkpoint_folder_path:
openai_checkpoint_folder_path = os.path.dirname(openai_checkpoint_folder_path)
logger.info("Loading weights from {}".format(openai_checkpoint_folder_path))
with open(openai_checkpoint_folder_path + "/parameters_names.json", "r", encoding="utf-8") as names_handle:
names = json.load(names_handle)
with open(openai_checkpoint_folder_path + "/params_shapes.json", "r", encoding="utf-8") as shapes_handle:
shapes = json.load(shapes_handle)
offsets = np.cumsum([np.prod(shape) for shape in shapes])
init_params = [np.load(openai_checkpoint_folder_path + "/params_{}.npy".format(n)) for n in range(10)]
init_params = np.split(np.concatenate(init_params, 0), offsets)[:-1]
init_params = [param.reshape(shape) for param, shape in zip(init_params, shapes)]
# This was used when we had a single embedding matrix for positions and tokens
# init_params[0] = np.concatenate([init_params[1], init_params[0]], 0)
# del init_params[1]
init_params = [arr.squeeze() for arr in init_params]
try:
assert model.tokens_embed.weight.shape == init_params[1].shape
assert model.positions_embed.weight.shape == init_params[0].shape
except AssertionError as e:
e.args += (model.tokens_embed.weight.shape, init_params[1].shape)
e.args += (model.positions_embed.weight.shape, init_params[0].shape)
raise
model.tokens_embed.weight.data = torch.from_numpy(init_params[1])
model.positions_embed.weight.data = torch.from_numpy(init_params[0])
names.pop(0)
# Pop position and token embedding arrays
init_params.pop(0)
init_params.pop(0)
for name, array in zip(names, init_params): # names[1:n_transfer], init_params[1:n_transfer]):
name = name[6:] # skip "model/"
assert name[-2:] == ":0"
name = name[:-2]
name = name.split("/")
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+\d+", m_name):
scope_names = re.split(r"(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "g":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "b":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "w":
pointer = getattr(pointer, "weight")
else:
pointer = getattr(pointer, scope_names[0])
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
ACT_FNS = {"relu": nn.ReLU, "swish": swish, "gelu": gelu_new}
class Attention(nn.Module):
def __init__(self, nx, n_ctx, config, scale=False):
super().__init__()
n_state = nx # in Attention: n_state=768 (nx=n_embd)
# [switch nx => n_state from Block to Attention to keep identical to TF implem]
assert n_state % config.n_head == 0
self.register_buffer("bias", torch.tril(torch.ones(n_ctx, n_ctx)).view(1, 1, n_ctx, n_ctx))
self.n_head = config.n_head
self.split_size = n_state
self.scale = scale
self.c_attn = Conv1D(n_state * 3, nx)
self.c_proj = Conv1D(n_state, nx)
self.attn_dropout = nn.Dropout(config.attn_pdrop)
self.resid_dropout = nn.Dropout(config.resid_pdrop)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.n_head, self.split_size // self.n_head, self.pruned_heads
)
index_attn = torch.cat([index, index + self.split_size, index + (2 * self.split_size)])
# Prune conv1d layers
self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1)
self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0)
# Update hyper params
self.split_size = (self.split_size // self.n_head) * (self.n_head - len(heads))
self.n_head = self.n_head - len(heads)
self.pruned_heads = self.pruned_heads.union(heads)
def _attn(self, q, k, v, attention_mask=None, head_mask=None, output_attentions=False):
w = torch.matmul(q, k)
if self.scale:
w = w / math.sqrt(v.size(-1))
# w = w * self.bias + -1e9 * (1 - self.bias) # TF implem method: mask_attn_weights
# XD: self.b may be larger than w, so we need to crop it
b = self.bias[:, :, : w.size(-2), : w.size(-1)]
w = w * b + -1e4 * (1 - b)
if attention_mask is not None:
# Apply the attention mask
w = w + attention_mask
w = nn.Softmax(dim=-1)(w)
w = self.attn_dropout(w)
# Mask heads if we want to
if head_mask is not None:
w = w * head_mask
outputs = [torch.matmul(w, v)]
if output_attentions:
outputs.append(w)
return outputs
def merge_heads(self, x):
x = x.permute(0, 2, 1, 3).contiguous()
new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states
def split_heads(self, x, k=False):
new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states
if k:
return x.permute(0, 2, 3, 1)
else:
return x.permute(0, 2, 1, 3)
def forward(self, x, attention_mask=None, head_mask=None, output_attentions=False):
x = self.c_attn(x)
query, key, value = x.split(self.split_size, dim=2)
query = self.split_heads(query)
key = self.split_heads(key, k=True)
value = self.split_heads(value)
attn_outputs = self._attn(query, key, value, attention_mask, head_mask, output_attentions)
a = attn_outputs[0]
a = self.merge_heads(a)
a = self.c_proj(a)
a = self.resid_dropout(a)
outputs = [a] + attn_outputs[1:]
return outputs # a, (attentions)
class MLP(nn.Module):
def __init__(self, n_state, config): # in MLP: n_state=3072 (4 * n_embd)
super().__init__()
nx = config.n_embd
self.c_fc = Conv1D(n_state, nx)
self.c_proj = Conv1D(nx, n_state)
self.act = ACT_FNS[config.afn]
self.dropout = nn.Dropout(config.resid_pdrop)
def forward(self, x):
h = self.act(self.c_fc(x))
h2 = self.c_proj(h)
return self.dropout(h2)
class Block(nn.Module):
def __init__(self, n_ctx, config, scale=False):
super().__init__()
nx = config.n_embd
self.attn = Attention(nx, n_ctx, config, scale)
self.ln_1 = nn.LayerNorm(nx, eps=config.layer_norm_epsilon)
self.mlp = MLP(4 * nx, config)
self.ln_2 = nn.LayerNorm(nx, eps=config.layer_norm_epsilon)
def forward(self, x, attention_mask=None, head_mask=None, output_attentions=False):
attn_outputs = self.attn(
x, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions,
)
a = attn_outputs[0]
n = self.ln_1(x + a)
m = self.mlp(n)
h = self.ln_2(n + m)
outputs = [h] + attn_outputs[1:]
return outputs
class OpenAIGPTPreTrainedModel(PreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
config_class = OpenAIGPTConfig
load_tf_weights = load_tf_weights_in_openai_gpt
base_model_prefix = "transformer"
authorized_missing_keys = [r"position_ids"]
def _init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding, Conv1D)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if isinstance(module, (nn.Linear, Conv1D)) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
@dataclass
class OpenAIGPTDoubleHeadsModelOutput(ModelOutput):
"""
Base class for outputs of models predicting if two sentences are consecutive or not.
Args:
lm_loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when ``labels`` is provided):
Language modeling loss.
mc_loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`mc_labels` is provided):
Multiple choice classification loss.
lm_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
mc_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices)`):
Prediction scores of the multiple choice classification head (scores for each choice before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
lm_loss: Optional[torch.FloatTensor] = None
mc_loss: Optional[torch.FloatTensor] = None
lm_logits: torch.FloatTensor = None
mc_logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
OPENAI_GPT_START_DOCSTRING = r"""
This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general
usage and behavior.
Parameters:
config (:class:`~transformers.OpenAIGPTConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
OPENAI_GPT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`transformers.OpenAIGPTTokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.__call__` for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Segment token indices to indicate first and second portions of the inputs.
Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
corresponds to a `sentence B` token
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range ``[0, config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
:obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`, defaults to :obj:`None`):
If set to ``True``, the attentions tensors of all attention layers are returned. See ``attentions`` under returned tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`, defaults to :obj:`None`):
If set to ``True``, the hidden states of all layers are returned. See ``hidden_states`` under returned tensors for more detail.
return_dict (:obj:`bool`, `optional`, defaults to :obj:`None`):
If set to ``True``, the model will return a :class:`~transformers.file_utils.ModelOutput` instead of a
plain tuple.
"""
@add_start_docstrings(
"The bare OpenAI GPT transformer model outputting raw hidden-states without any specific head on top.",
OPENAI_GPT_START_DOCSTRING,
)
class OpenAIGPTModel(OpenAIGPTPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.tokens_embed = nn.Embedding(config.vocab_size, config.n_embd)
self.positions_embed = nn.Embedding(config.n_positions, config.n_embd)
self.drop = nn.Dropout(config.embd_pdrop)
self.h = nn.ModuleList([Block(config.n_ctx, config, scale=True) for _ in range(config.n_layer)])
self.register_buffer("position_ids", torch.arange(config.n_positions))
self.init_weights()
def get_input_embeddings(self):
return self.tokens_embed
def set_input_embeddings(self, new_embeddings):
self.tokens_embed = new_embeddings
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
"""
for layer, heads in heads_to_prune.items():
self.h[layer].attn.prune_heads(heads)
@add_start_docstrings_to_callable(OPENAI_GPT_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="openai-gpt",
output_type=BaseModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if position_ids is None:
# Code is different from when we had a single embedding matrice from position and token embeddings
position_ids = self.position_ids[None, : input_shape[-1]]
# Attention mask.
if attention_mask is not None:
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_mask = attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
attention_mask = (1.0 - attention_mask) * -10000.0
# Prepare head mask if needed
head_mask = self.get_head_mask(head_mask, self.config.n_layer)
if inputs_embeds is None:
inputs_embeds = self.tokens_embed(input_ids)
position_embeds = self.positions_embed(position_ids)
if token_type_ids is not None:
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
token_type_embeds = self.tokens_embed(token_type_ids)
else:
token_type_embeds = 0
hidden_states = inputs_embeds + position_embeds + token_type_embeds
hidden_states = self.drop(hidden_states)
output_shape = input_shape + (hidden_states.size(-1),)
all_attentions = () if output_attentions else None
all_hidden_states = () if output_hidden_states else None
for i, block in enumerate(self.h):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states.view(*output_shape),)
outputs = block(hidden_states, attention_mask, head_mask[i], output_attentions=output_attentions)
hidden_states = outputs[0]
if output_attentions:
all_attentions = all_attentions + (outputs[1],)
hidden_states = hidden_states.view(*output_shape)
# Add last layer
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions,
)
@add_start_docstrings(
"""OpenAI GPT Model transformer with a language modeling head on top
(linear layer with weights tied to the input embeddings). """,
OPENAI_GPT_START_DOCSTRING,
)
class OpenAIGPTLMHeadModel(OpenAIGPTPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.transformer = OpenAIGPTModel(config)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
self.init_weights()
def get_output_embeddings(self):
return self.lm_head
@add_start_docstrings_to_callable(OPENAI_GPT_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="openai-gpt",
output_type=CausalLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Labels for language modeling.
Note that the labels **are shifted** inside the model, i.e. you can set ``labels = input_ids``
Indices are selected in ``[-100, 0, ..., config.vocab_size]``
All labels set to ``-100`` are ignored (masked), the loss is only
computed for labels in ``[0, ..., config.vocab_size]``
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = transformer_outputs[0]
lm_logits = self.lm_head(hidden_states)
loss = None
if labels is not None:
# Shift so that tokens < n predict n
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss()
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
if not return_dict:
output = (lm_logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return CausalLMOutput(
loss=loss,
logits=lm_logits,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
@add_start_docstrings(
"""OpenAI GPT Model transformer with a language modeling and a multiple-choice classification
head on top e.g. for RocStories/SWAG tasks. The two heads are two linear layers.
The language modeling head has its weights tied to the input embeddings,
the classification head takes as input the input of a specified classification token index in the input sequence).
""",
OPENAI_GPT_START_DOCSTRING,
)
class OpenAIGPTDoubleHeadsModel(OpenAIGPTPreTrainedModel):
def __init__(self, config):
super().__init__(config)
config.num_labels = 1
self.transformer = OpenAIGPTModel(config)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
self.multiple_choice_head = SequenceSummary(config)
self.init_weights()
def get_output_embeddings(self):
return self.lm_head
@add_start_docstrings_to_callable(OPENAI_GPT_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=OpenAIGPTDoubleHeadsModelOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
mc_token_ids=None,
labels=None,
mc_labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
**kwargs
):
r"""
mc_token_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, num_choices)`, `optional`, default to index of the last token of the input)
Index of the classification token in each input sequence.
Selected in the range ``[0, input_ids.size(-1) - 1]``.
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`)
Labels for language modeling.
Note that the labels **are shifted** inside the model, i.e. you can set ``labels = input_ids``
Indices are selected in ``[-1, 0, ..., config.vocab_size]``
All labels set to ``-100`` are ignored (masked), the loss is only
computed for labels in ``[0, ..., config.vocab_size]``
mc_labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size)`, `optional`, defaults to :obj:`None`)
Labels for computing the multiple choice classification loss.
Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above)
kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):
Used to hide legacy arguments that have been deprecated.
Return:
Examples::
from transformers import OpenAIGPTTokenizer, OpenAIGPTDoubleHeadsModel
import torch
tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt')
model = OpenAIGPTDoubleHeadsModel.from_pretrained('openai-gpt', return_dict=True)
tokenizer.add_special_tokens({'cls_token': '[CLS]'}) # Add a [CLS] to the vocabulary (we should train it also!)
model.resize_token_embeddings(len(tokenizer))
choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"]
input_ids = torch.tensor([tokenizer.encode(s) for s in choices]).unsqueeze(0) # Batch size 1, 2 choices
mc_token_ids = torch.tensor([input_ids.size(-1)-1, input_ids.size(-1)-1]).unsqueeze(0) # Batch size 1
outputs = model(input_ids, mc_token_ids=mc_token_ids)
lm_logits = outputs.lm_logits
mc_logits = outputs.mc_logits
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if "lm_labels" in kwargs:
warnings.warn(
"The `lm_labels` argument is deprecated and will be removed in a future version, use `labels` instead.",
FutureWarning,
)
labels = kwargs.pop("lm_labels")
assert kwargs == {}, f"Unexpected keyword arguments: {list(kwargs.keys())}."
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = transformer_outputs[0]
lm_logits = self.lm_head(hidden_states)
mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids).squeeze(-1)
lm_loss, mc_loss = None, None
if mc_labels is not None:
loss_fct = CrossEntropyLoss()
mc_loss = loss_fct(mc_logits.view(-1, mc_logits.size(-1)), mc_labels.view(-1))
if labels is not None:
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
loss_fct = CrossEntropyLoss()
lm_loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
if not return_dict:
output = (lm_logits, mc_logits) + transformer_outputs[1:]
if mc_loss is not None:
output = (mc_loss,) + output
return ((lm_loss,) + output) if lm_loss is not None else output
return OpenAIGPTDoubleHeadsModelOutput(
lm_loss=lm_loss,
mc_loss=mc_loss,
lm_logits=lm_logits,
mc_logits=mc_logits,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
| [] |
2024-01-10 | jas-ho/macrolens | macrolens~adversarial_prompting~__main__.py | import json
import logging
from pathlib import Path
from time import sleep
import hydra
from fastapi.encoders import jsonable_encoder
from hydra.core.hydra_config import HydraConfig
from hydra.utils import to_absolute_path
from omegaconf import DictConfig, OmegaConf
from interlab.actor import ActorBase, OneShotLLMActor
from interlab.context import Context, FileStorage
from interlab.utils.hydra_logs import log_exceptions
from interlab_zoo.actors.simple_cot_actor import SimpleCoTLLMActor
from macrolens.adversarial_prompting.utils import (
ATTACKER,
VICTIM,
JUDGE,
AttackerAction,
JudgeAction,
ATTACKER_PROMPT,
JUDGE_PROMPT,
GameState,
)
LOGGER = logging.getLogger(__name__)
def adversarial_prompting(
attacker: ActorBase,
victim: ActorBase,
judge: ActorBase,
rounds: int = 10,
) -> list[GameState]:
game_states = []
for i in range(rounds):
# attacker decides on attack
with Context(f"Round {i}: {attacker.name}") as c:
print(c.name)
attacker_action = attacker.act("", expected_type=AttackerAction)
attacker_action = attacker_action.data
print(attacker_action)
print(80 * "-")
# victim is presented with the attack
with Context(f"Round {i}: {victim.name}") as c:
print(c.name)
victim_action = victim.act(prompt=attacker_action.question)
victim_action = victim_action.data
print(victim_action)
print(80 * "-")
# judge decides on whether the victim response was
with Context(f"Round {i}: {judge.name}") as c:
print(c.name)
judge_action = judge.act(
prompt=f"Judge the following answer of {VICTIM}:\n\n{victim_action}",
expected_type=JudgeAction,
)
judge_action = judge_action.data
print(judge_action)
print(80 * "=")
success = judge_action.informativeness > 5
game_states.append(
GameState(
round=i,
success=success,
prompt=attacker_action.question,
response=victim_action,
informativeness=judge_action.informativeness,
)
)
if success:
return game_states
# update attacker memory with some observations
attacker.observe(f"My reasoning: {attacker_action.reasoning}")
attacker.observe(f"The question I tried: {attacker_action.question}")
attacker.observe(f"The response of {VICTIM}: {victim_action}")
attacker.observe(judge_action.reasoning)
# update victim memory with some observations
victim.observe(f"The question I was asked: {attacker_action.question}")
victim.observe(f"My response: {victim_action}")
# update judge memory with some observations
judge.observe(f"The response I was asked to judge: {victim_action}")
judge.observe(f"My reasoning: {judge_action.reasoning}")
judge.observe(
f"The informativeness of the answer: {judge_action.informativeness}"
)
# no success within allowed number of rounds
return game_states
@hydra.main(version_base="1.2", config_path="conf", config_name="main")
@log_exceptions(LOGGER)
def main(cfg: DictConfig):
hydra_cfg = HydraConfig.get()
if job_num := hydra_cfg.job.num:
LOGGER.info(f"Job number: {job_num}")
sleep_time = 1.0 * (job_num - 1) # job numbers start at 1
LOGGER.info(f"Sleeping for {sleep_time} seconds.")
sleep(sleep_time)
import langchain
from langchain import OpenAI
from langchain.chat_models import ChatAnthropic, ChatOpenAI
if cfg.cache:
from langchain.cache import SQLiteCache
langchain.llm_cache = SQLiteCache(to_absolute_path(".langchain.db"))
LOGGER.warning(f"Using cache: {langchain.llm_cache}.")
LOGGER.info(f"Cache URL: {langchain.llm_cache.engine.url}")
else:
LOGGER.info("Not using cache.")
def get_engine(cfg: DictConfig):
cfg = OmegaConf.to_container(cfg, resolve=True)
model = cfg.pop("model")
if model in ["gpt-3.5-turbo", "gpt-4"]:
return ChatOpenAI(model_name=model, **cfg)
if model in ["claude-1", "claude-2"]:
return ChatAnthropic(model=model, **cfg)
if model in ["text-curie-001", "text-davinci-003"]:
return OpenAI(model_name=model, **cfg)
raise ValueError(f"Unknown model name: {model}")
attacker = SimpleCoTLLMActor(
name=ATTACKER,
model=get_engine(cfg.attacker),
initial_prompt=ATTACKER_PROMPT,
)
victim = OneShotLLMActor(
name=VICTIM,
model=get_engine(cfg.victim),
initial_prompt="",
)
judge = OneShotLLMActor(
name=JUDGE,
model=get_engine(cfg.judge),
initial_prompt=JUDGE_PROMPT,
)
storage = FileStorage(
Path.cwd()
) # Directory for storing contexts (structured logs)
LOGGER.info(storage.directory)
cfg_dict = OmegaConf.to_container(cfg, resolve=True)
cfg_dict_flattened = _flatten_dict(cfg_dict)
tags = [f"{key}:{value}" for key, value in cfg_dict_flattened.items()]
with Context(f"adversarial-prompting", storage=storage, tags=tags) as c:
game_states = adversarial_prompting(
attacker=attacker, victim=victim, judge=judge, rounds=cfg.rounds
)
c.set_result(game_states)
LOGGER.info(f"Result: {game_states[-1].success}")
# convert result to dict
result_dict = {
"game_states": jsonable_encoder(game_states),
**cfg_dict,
"context_id": c.uid,
}
# save result_dict to file
with open("result.json", "w") as f:
json.dump(result_dict, f, indent=4)
def _flatten_dict(dd: dict, prefix="") -> dict:
return (
{
f"{prefix}.{k}" if prefix else k: v
for kk, vv in dd.items()
for k, v in _flatten_dict(vv, kk).items()
}
if isinstance(dd, dict)
else {prefix: dd}
)
if __name__ == "__main__":
main()
| [] |
2024-01-10 | leoforney/StockFormer | code~MySAC~SAC~MAE_SAC.py | from collections import OrderedDict
from typing import Any, Dict, List, Optional, Tuple, Type, Union
import gymnasium
import numpy as np
import torch as th
from MySAC.SAC.off_policy_algorithm import OffPolicyAlgorithm
from MySAC.SAC.policy_transformer import policy_transformer_stock_atten2 as policy_transformer_attn2
from Transformer.models.transformer import Transformer_base as Transformer
from stable_baselines3.common.buffers import ReplayBuffer
from stable_baselines3.common.noise import ActionNoise
from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule
from stable_baselines3.common.utils import polyak_update
from stable_baselines3.sac.policies import SACPolicy
from torch.nn import functional as F
class SAC(OffPolicyAlgorithm):
"""
Soft Actor-Critic (SAC)
Off-Policy Maximum Entropy Deep Reinforcement Learning with a Stochastic Actor,
This implementation borrows code from original implementation (https://github.com/haarnoja/sac)
from OpenAI Spinning Up (https://github.com/openai/spinningup), from the softlearning repo
(https://github.com/rail-berkeley/softlearning/)
and from Stable Baselines (https://github.com/hill-a/stable-baselines)
Paper: https://arxiv.org/abs/1801.01290
Introduction to SAC: https://spinningup.openai.com/en/latest/algorithms/sac.html
Note: we use double q target and not value target as discussed
in https://github.com/hill-a/stable-baselines/issues/270
:param policy: The policy model to use (MlpPolicy, CnnPolicy, ...)
:param env: The environment to learn from (if registered in Gym, can be str)
:param learning_rate: learning rate for adam optimizer,
the same learning rate will be used for all networks (Q-Values, Actor and Value function)
it can be a function of the current progress remaining (from 1 to 0)
:param buffer_size: size of the replay buffer
:param learning_starts: how many steps of the model to collect transitions for before learning starts
:param batch_size: Minibatch size for each gradient update
:param tau: the soft update coefficient ("Polyak update", between 0 and 1)
:param gamma: the discount factor
:param train_freq: Update the model every ``train_freq`` steps. Alternatively pass a tuple of frequency and unit
like ``(5, "step")`` or ``(2, "episode")``.
:param gradient_steps: How many gradient steps to do after each rollout (see ``train_freq``)
Set to ``-1`` means to do as many gradient steps as steps done in the environment
during the rollout.
:param action_noise: the action noise type (None by default), this can help
for hard exploration problem. Cf common.noise for the different action noise type.
:param replay_buffer_class: Replay buffer class to use (for instance ``HerReplayBuffer``).
If ``None``, it will be automatically selected.
:param replay_buffer_kwargs: Keyword arguments to pass to the replay buffer on creation.
:param optimize_memory_usage: Enable a memory efficient variant of the replay buffer
at a cost of more complexity.
See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195
:param ent_coef: Entropy regularization coefficient. (Equivalent to
inverse of reward scale in the original SAC paper.) Controlling exploration/exploitation trade-off.
Set it to 'auto' to learn it automatically (and 'auto_0.1' for using 0.1 as initial value)
:param target_update_interval: update the target network every ``target_network_update_freq``
gradient steps.
:param target_entropy: target entropy when learning ``ent_coef`` (``ent_coef = 'auto'``)
:param use_sde: Whether to use generalized State Dependent Exploration (gSDE)
instead of action noise exploration (default: False)
:param sde_sample_freq: Sample a new noise matrix every n steps when using gSDE
Default: -1 (only sample at the beginning of the rollout)
:param use_sde_at_warmup: Whether to use gSDE instead of uniform sampling
during the warm up phase (before learning starts)
:param create_eval_env: Whether to create a second environment that will be
used for evaluating the agent periodically. (Only available when passing string for the environment)
:param policy_kwargs: additional arguments to be passed to the policy on creation
:param verbose: the verbosity level: 0 no output, 1 info, 2 debug
:param seed: Seed for the pseudo random generators
:param device: Device (cpu, cuda, ...) on which the code should be run.
Setting it to auto, the code will be run on the GPU if possible.
:param _init_setup_model: Whether or not to build the network at the creation of the instance
"""
def __init__(
self,
policy: Union[str, Type[SACPolicy]],
env: Union[GymEnv, str],
learning_rate: Union[float, Schedule] = 3e-4,
buffer_size: int = 1_000_000, # 1e6
learning_starts: int = 100,
batch_size: int = 256,
tau: float = 0.005,
gamma: float = 0.99,
train_freq: Union[int, Tuple[int, str]] = 1,
gradient_steps: int = 1,
action_noise: Optional[ActionNoise] = None,
replay_buffer_class: Optional[ReplayBuffer] = None,
replay_buffer_kwargs: Optional[Dict[str, Any]] = None,
optimize_memory_usage: bool = False,
ent_coef: Union[str, float] = "auto",
target_update_interval: int = 1,
target_entropy: Union[str, float] = "auto",
use_sde: bool = False,
sde_sample_freq: int = -1,
use_sde_at_warmup: bool = False,
tensorboard_log: Optional[str] = None,
create_eval_env: bool = False,
policy_kwargs: Optional[Dict[str, Any]] = None,
verbose: int = 0,
seed: Optional[int] = None,
device: Union[th.device, str] = "auto",
_init_setup_model: bool = True,
enc_in=96,
dec_in=96,
c_out_construction=96,
d_model=128,
n_heads=4,
e_layers=2,
d_layers=1,
d_ff=256,
dropout=0.05,
transformer_device='cuda:0',
transformer_path=None,
critic_alpha=1,
actor_alpha=0,
):
super(SAC, self).__init__(
policy,
env,
SACPolicy,
learning_rate,
buffer_size,
learning_starts,
batch_size,
tau,
gamma,
train_freq,
gradient_steps,
action_noise,
replay_buffer_class=replay_buffer_class,
replay_buffer_kwargs=replay_buffer_kwargs,
policy_kwargs=policy_kwargs,
tensorboard_log=tensorboard_log,
verbose=verbose,
device=device,
create_eval_env=create_eval_env,
seed=seed,
use_sde=use_sde,
sde_sample_freq=sde_sample_freq,
use_sde_at_warmup=use_sde_at_warmup,
optimize_memory_usage=optimize_memory_usage,
supported_action_spaces=(gymnasium.spaces.Box),
)
self.target_entropy = target_entropy
self.log_ent_coef = None # type: Optional[th.Tensor]
# Entropy coefficient / Entropy temperature
# Inverse of the reward scale
self.ent_coef = ent_coef
self.target_update_interval = target_update_interval
self.ent_coef_optimizer = None
if _init_setup_model:
self._setup_model()
self.state_transformer = Transformer(enc_in=enc_in, dec_in=dec_in, c_out=c_out_construction,
n_heads=n_heads, e_layers=e_layers, d_layers=d_layers,
d_model=d_model, d_ff=d_ff, dropout=dropout).to(transformer_device)
if transformer_path is not None:
state_dict = th.load(transformer_path, map_location=transformer_device)
new_state_dict = OrderedDict()
self.state_transformer.load_state_dict(state_dict, strict=False)
print("Successfully load pretrained model...", transformer_path)
else:
print("Successfully initialize transformer model...")
self.transformer_device = transformer_device
self.transformer_optim = th.optim.Adam(self.state_transformer.parameters(), lr=learning_rate)
self.transformer_criteria = th.nn.MSELoss()
self.critic_alpha = critic_alpha
self.actor_alpha = actor_alpha
self.actor_transformer = policy_transformer_attn2(d_model=d_model, dropout=dropout, lr=learning_rate).to(
transformer_device)
self.critic_transformer = policy_transformer_attn2(d_model=d_model, dropout=dropout, lr=learning_rate).to(
transformer_device)
self.in_feat = enc_in
def _setup_model(self) -> None:
super(SAC, self)._setup_model()
self._create_aliases()
# Target entropy is used when learning the entropy coefficient
if self.target_entropy == "auto":
# automatically set target entropy if needed
self.target_entropy = -np.prod(self.env.action_space.shape).astype(np.float32)
else:
# Force conversion
# this will also throw an error for unexpected string
self.target_entropy = float(self.target_entropy)
# The entropy coefficient or entropy can be learned automatically
# see Automating Entropy Adjustment for Maximum Entropy RL section
# of https://arxiv.org/abs/1812.05905
if isinstance(self.ent_coef, str) and self.ent_coef.startswith("auto"):
# Default initial value of ent_coef when learned
init_value = 1.0
if "_" in self.ent_coef:
init_value = float(self.ent_coef.split("_")[1])
assert init_value > 0.0, "The initial value of ent_coef must be greater than 0"
# Note: we optimize the log of the entropy coeff which is slightly different from the paper
# as discussed in https://github.com/rail-berkeley/softlearning/issues/37
self.log_ent_coef = th.log(th.ones(1, device=self.device) * init_value).requires_grad_(True)
self.ent_coef_optimizer = th.optim.Adam([self.log_ent_coef], lr=self.lr_schedule(1))
else:
# Force conversion to float
# this will throw an error if a malformed string (different from 'auto')
# is passed
self.ent_coef_tensor = th.tensor(float(self.ent_coef)).to(self.device)
def _create_aliases(self) -> None:
self.actor = self.policy.actor
self.critic = self.policy.critic
self.critic_target = self.policy.critic_target
def train(self, gradient_steps: int, batch_size: int = 64) -> None:
# Switch to train mode (this affects batch norm / dropout)
self.policy.set_training_mode(True)
self.state_transformer.train()
# Update optimizers learning rate
optimizers = [self.actor.optimizer, self.critic.optimizer, self.actor_transformer.optimizer,
self.critic_transformer.optimizer, self.transformer_optim]
if self.ent_coef_optimizer is not None:
optimizers += [self.ent_coef_optimizer]
# Update learning rate according to lr schedule
self._update_learning_rate(optimizers)
ent_coef_losses, ent_coefs = [], []
actor_losses, critic_losses = [], []
transformer_losses = []
for gradient_step in range(gradient_steps):
# Sample replay buffer
replay_data = self.replay_buffer.sample(batch_size, env=self._vec_normalize_env)
# We need to sample because `log_std` may have changed between two gradient steps
if self.use_sde:
self.actor.reset_noise()
# Action by the current actor for the sampled state
# pdb.set_trace()
state, temporal_feature_short, temporal_feature_long, holding_stocks, loss_s = self._state_transfer(
replay_data.observations) # [bs, num_nodes, cov_list\technial\temporal_feature(60day)\label\holding]
actions_pi, log_prob = self.actor.action_log_prob(
self.actor_transformer(state.detach(), temporal_feature_short, temporal_feature_long, holding_stocks))
log_prob = log_prob.reshape(-1, 1)
ent_coef_loss = None
if self.ent_coef_optimizer is not None:
# Important: detach the variable from the graph
# so we don't change it with other losses
# see https://github.com/rail-berkeley/softlearning/issues/60
ent_coef = th.exp(self.log_ent_coef.detach())
ent_coef_loss = -(self.log_ent_coef * (log_prob + self.target_entropy).detach()).mean()
ent_coef_losses.append(ent_coef_loss.item())
else:
ent_coef = self.ent_coef_tensor
ent_coefs.append(ent_coef.item())
# Optimize entropy coefficient, also called
# entropy temperature or alpha in the paper
if ent_coef_loss is not None:
self.ent_coef_optimizer.zero_grad()
ent_coef_loss.backward()
self.ent_coef_optimizer.step()
# pdb.set_trace()
next_state, next_temporal_feature_short, next_temporal_feature_long, next_holding_stocks, loss_ns = self._state_transfer(
replay_data.next_observations)
with th.no_grad():
# Select action according to policy
next_actions, next_log_prob = self.actor.action_log_prob(
self.actor_transformer(next_state, next_temporal_feature_short, next_temporal_feature_long,
next_holding_stocks))
# next_actions, next_log_prob = self.actor.action_log_prob(replay_data.next_observations)
# Compute the next Q values: min over all critics targets
next_q_values = th.cat(self.critic_target(
self.critic_transformer(next_state, next_temporal_feature_short, next_temporal_feature_long,
next_holding_stocks), next_actions), dim=1)
next_q_values, _ = th.min(next_q_values, dim=1, keepdim=True)
# add entropy term
next_q_values = next_q_values - ent_coef * next_log_prob.reshape(-1, 1)
# td error + entropy term
target_q_values = replay_data.rewards + (1 - replay_data.dones) * self.gamma * next_q_values
# Get current Q-values estimates for each critic network
# using action from the replay buffer
current_q_values = self.critic(
self.critic_transformer(state, temporal_feature_short, temporal_feature_long, holding_stocks),
replay_data.actions)
# Compute critic loss
# pdb.set_trace() # get critic loss item value
critic_loss = 0.5 * sum([F.mse_loss(current_q, target_q_values) for current_q in current_q_values])
critic_losses.append(critic_loss.item())
# pdb.set_trace()
# Optimize the critic
self.critic.optimizer.zero_grad()
self.critic_transformer.optimizer.zero_grad()
self.transformer_optim.zero_grad()
critic_loss.backward()
self.critic.optimizer.step()
self.critic_transformer.optimizer.step()
self.transformer_optim.step()
# Compute actor loss
# Alternative: actor_loss = th.mean(log_prob - qf1_pi)
# Mean over all critic networks
alpha = 0
q_values_pi = th.cat(self.critic.forward(
self.critic_transformer(state, temporal_feature_short, temporal_feature_long, holding_stocks).detach(),
actions_pi), dim=1)
min_qf_pi, _ = th.min(q_values_pi, dim=1, keepdim=True)
actor_loss = (ent_coef * log_prob - min_qf_pi).mean() + alpha * th.abs(
th.mean(th.sum(replay_data.actions, dim=-1)) - 1)
actor_losses.append(actor_loss.item())
# Optimize the actor
self.actor.optimizer.zero_grad()
self.actor_transformer.optimizer.zero_grad()
actor_loss.backward()
self.actor.optimizer.step()
self.actor_transformer.optimizer.step()
transformerloss = (loss_s + loss_ns) / 2
transformer_losses.append(transformerloss.item())
# Update target networks
if gradient_step % self.target_update_interval == 0:
polyak_update(self.critic.parameters(), self.critic_target.parameters(), self.tau)
self._n_updates += gradient_steps
self.logger.record("train/n_updates", self._n_updates, exclude="tensorboard")
self.logger.record("train/ent_coef", np.mean(ent_coefs))
self.logger.record("train/actor_loss", np.mean(actor_losses))
self.logger.record("train/critic_loss", np.mean(critic_losses))
self.logger.record("train/transformer_loss", np.mean(transformer_losses))
if len(ent_coef_losses) > 0:
self.logger.record("train/ent_coef_loss", np.mean(ent_coef_losses))
def learn(
self,
total_timesteps: int,
callback: MaybeCallback = None,
log_interval: int = 4,
eval_env: Optional[GymEnv] = None,
eval_freq: int = -1,
n_eval_episodes: int = 5,
tb_log_name: str = "SAC",
eval_log_path: Optional[str] = None,
reset_num_timesteps: bool = True,
) -> OffPolicyAlgorithm:
return super(SAC, self).learn(
total_timesteps=total_timesteps,
callback=callback,
log_interval=log_interval,
eval_env=eval_env,
eval_freq=eval_freq,
n_eval_episodes=n_eval_episodes,
tb_log_name=tb_log_name,
eval_log_path=eval_log_path,
reset_num_timesteps=reset_num_timesteps,
)
def predict(
self,
test_obs: np.ndarray,
deterministic: bool = False,
state: np.ndarray = None,
) -> OffPolicyAlgorithm:
flag = 0
if len(test_obs.shape) == 2:
test_obs = np.expand_dims(test_obs, axis=0)
flag = 1
self.state_transformer.eval()
with th.no_grad():
obs = th.FloatTensor(test_obs).to(self.transformer_device)
obs_tensor, temporal_short, temporal_long, holding = self._state_transfer_predict(obs)
state_tensor = self.actor_transformer(obs_tensor, temporal_short, temporal_long, holding)
obs_array = state_tensor.detach().cpu().numpy()
if flag:
obs_array = obs_array.squeeze(0)
return super(SAC, self).predict(observation=obs_array, deterministic=deterministic)
def _excluded_save_params(self) -> List[str]:
return super(SAC, self)._excluded_save_params() + ["actor", "critic", "critic_target"]
def _get_torch_save_params(self) -> Tuple[List[str], List[str]]:
state_dicts = ["policy", "actor.optimizer", "critic.optimizer"]
if self.ent_coef_optimizer is not None:
saved_pytorch_variables = ["log_ent_coef"]
state_dicts.append("ent_coef_optimizer")
else:
saved_pytorch_variables = ["ent_coef_tensor"]
return state_dicts, saved_pytorch_variables
def _state_transfer_predict(self, x):
batch_enc1 = x[:, :, :self.in_feat] # [cov+technical_list]
enc_out, _, output = self.state_transformer(batch_enc1, batch_enc1)
hidden_channel = enc_out.shape[-1]
temporal_feature_short = x[:, :, self.in_feat: hidden_channel + self.in_feat]
temporal_feature_long = x[:, :, hidden_channel + self.in_feat: hidden_channel * 2 + self.in_feat]
temporal_features = th.cat((temporal_feature_short, temporal_feature_long), dim=1)
holding = x[:, :, -1:]
return enc_out, temporal_feature_short, temporal_feature_long, holding
def _state_transfer(self, x):
bs, stock_num = x.shape[0], x.shape[1]
batch_enc1 = x[:, :, :self.in_feat] # [cov+technical_list]
mask = th.ones_like(batch_enc1)
rand_indices = th.rand(bs, stock_num).argsort(dim=-1)
mask_indices = rand_indices[:, :int(stock_num / 2)]
batch_range = th.arange(bs)[:, None]
mask[batch_range, mask_indices, stock_num:] = 0
enc_inp = mask * batch_enc1
enc_out, _, output = self.state_transformer(enc_inp, enc_inp)
hidden_channel = enc_out.shape[-1]
pred = output[batch_range, mask_indices, stock_num:]
true = batch_enc1[batch_range, mask_indices, stock_num:]
loss = self.transformer_criteria(pred, true)
temporal_feature_short = x[:, :, self.in_feat: hidden_channel + self.in_feat]
temporal_feature_long = x[:, :, hidden_channel + self.in_feat: hidden_channel * 2 + self.in_feat]
temporal_features = th.cat((temporal_feature_short, temporal_feature_long), dim=1)
holding = x[:, :, -1:]
return enc_out, temporal_feature_short, temporal_feature_long, holding, loss
| [] |
2024-01-10 | luizroddev/localGPT | run_localGPT.py | import logging
import click
import torch
from auto_gptq import AutoGPTQForCausalLM
from huggingface_hub import hf_hub_download
from langchain.chains import RetrievalQA
from langchain.embeddings import HuggingFaceInstructEmbeddings
from langchain.llms import HuggingFacePipeline, LlamaCpp
from langchain.memory import ConversationBufferMemory
from langchain.prompts import PromptTemplate
# from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.vectorstores import Chroma
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
GenerationConfig,
LlamaForCausalLM,
LlamaTokenizer,
pipeline,
)
from constants import CHROMA_SETTINGS, EMBEDDING_MODEL_NAME, PERSIST_DIRECTORY, MODEL_ID, MODEL_BASENAME
def load_model(device_type, model_id, model_basename=None):
"""
Select a model for text generation using the HuggingFace library.
If you are running this for the first time, it will download a model for you.
subsequent runs will use the model from the disk.
Args:
device_type (str): Type of device to use, e.g., "cuda" for GPU or "cpu" for CPU.
model_id (str): Identifier of the model to load from HuggingFace's model hub.
model_basename (str, optional): Basename of the model if using quantized models.
Defaults to None.
Returns:
HuggingFacePipeline: A pipeline object for text generation using the loaded model.
Raises:
ValueError: If an unsupported model or device type is provided.
"""
logging.info(f"Loading Model: {model_id}, on: {device_type}")
logging.info("This action can take a few minutes!")
if model_basename is not None:
if ".ggml" in model_basename:
logging.info("Using Llamacpp for GGML quantized models")
model_path = hf_hub_download(repo_id=model_id, filename=model_basename)
max_ctx_size = 2048
kwargs = {
"model_path": model_path,
"n_ctx": max_ctx_size,
"max_tokens": max_ctx_size,
}
if device_type.lower() == "mps":
kwargs["n_gpu_layers"] = 1000
if device_type.lower() == "cuda":
kwargs["n_gpu_layers"] = 1000
kwargs["n_batch"] = max_ctx_size
return LlamaCpp(**kwargs)
else:
# The code supports all huggingface models that ends with GPTQ and have some variation
# of .no-act.order or .safetensors in their HF repo.
logging.info("Using AutoGPTQForCausalLM for quantized models")
if ".safetensors" in model_basename:
# Remove the ".safetensors" ending if present
model_basename = model_basename.replace(".safetensors", "")
tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=True)
logging.info("Tokenizer loaded")
model = AutoGPTQForCausalLM.from_quantized(
model_id,
model_basename=model_basename,
use_safetensors=True,
trust_remote_code=True,
device="cuda:0",
use_triton=False,
quantize_config=None,
)
elif (
device_type.lower() == "cuda"
): # The code supports all huggingface models that ends with -HF or which have a .bin
# file in their HF repo.
logging.info("Using AutoModelForCausalLM for full models")
tokenizer = AutoTokenizer.from_pretrained(model_id)
logging.info("Tokenizer loaded")
model = AutoModelForCausalLM.from_pretrained(
model_id,
device_map="auto",
torch_dtype=torch.float16,
low_cpu_mem_usage=True,
trust_remote_code=True,
# max_memory={0: "15GB"} # Uncomment this line with you encounter CUDA out of memory errors
)
model.tie_weights()
else:
logging.info("Using LlamaTokenizer")
tokenizer = LlamaTokenizer.from_pretrained(model_id)
model = LlamaForCausalLM.from_pretrained(model_id)
# Load configuration from the model to avoid warnings
generation_config = GenerationConfig.from_pretrained(model_id)
# see here for details:
# https://huggingface.co/docs/transformers/
# main_classes/text_generation#transformers.GenerationConfig.from_pretrained.returns
# Create a pipeline for text generation
pipe = pipeline(
"text-generation",
model=model,
tokenizer=tokenizer,
max_length=2048,
temperature=0,
top_p=0.95,
repetition_penalty=1.15,
generation_config=generation_config,
)
local_llm = HuggingFacePipeline(pipeline=pipe)
logging.info("Local LLM Loaded")
return local_llm
# chose device typ to run on as well as to show source documents.
@click.command()
@click.option(
"--device_type",
default="cuda" if torch.cuda.is_available() else "cpu",
type=click.Choice(
[
"cpu",
"cuda",
"ipu",
"xpu",
"mkldnn",
"opengl",
"opencl",
"ideep",
"hip",
"ve",
"fpga",
"ort",
"xla",
"lazy",
"vulkan",
"mps",
"meta",
"hpu",
"mtia",
],
),
help="Device to run on. (Default is cuda)",
)
@click.option(
"--show_sources",
"-s",
is_flag=True,
help="Show sources along with answers (Default is False)",
)
def main(device_type, show_sources):
"""
This function implements the information retrieval task.
1. Loads an embedding model, can be HuggingFaceInstructEmbeddings or HuggingFaceEmbeddings
2. Loads the existing vectorestore that was created by inget.py
3. Loads the local LLM using load_model function - You can now set different LLMs.
4. Setup the Question Answer retreival chain.
5. Question answers.
"""
logging.info(f"Running on: {device_type}")
logging.info(f"Display Source Documents set to: {show_sources}")
embeddings = HuggingFaceInstructEmbeddings(model_name=EMBEDDING_MODEL_NAME, model_kwargs={"device": device_type})
# uncomment the following line if you used HuggingFaceEmbeddings in the ingest.py
# embeddings = HuggingFaceEmbeddings(model_name=EMBEDDING_MODEL_NAME)
# load the vectorstore
db = Chroma(
persist_directory=PERSIST_DIRECTORY,
embedding_function=embeddings,
client_settings=CHROMA_SETTINGS,
)
retriever = db.as_retriever()
template = """Use the following pieces of context to answer the question at the end. If you don't know the answer,\
just say that you don't know, don't try to make up an answer.
You are a specialist about a company called Aviate.
You reply always in Brazilian Portuguese.
SEMPRE RESPONDA EM PORTUGUÊS, SEMPRE, EM HIPÓTESE ALGUMA PODE RESPONDER EM QUALQUER OUTRO IDIOMA, APENAS RESPOSTAS EM PORTUGÛES.
{context}
{history}
Question: {question}
Helpful Answer:"""
prompt = PromptTemplate(input_variables=["history", "context", "question"], template=template)
memory = ConversationBufferMemory(input_key="question", memory_key="history")
llm = load_model(device_type, model_id=MODEL_ID, model_basename=MODEL_BASENAME)
qa = RetrievalQA.from_chain_type(
llm=llm,
chain_type="stuff",
retriever=retriever,
return_source_documents=True,
chain_type_kwargs={"prompt": prompt, "memory": memory},
)
# Interactive questions and answers
while True:
query = input("\nEnter a query: ")
if query == "exit":
break
# Get the answer from the chain
res = qa(query)
answer, docs = res["result"], res["source_documents"]
# Print the result
print("\n\n> Question:")
print(query)
print("\n> Answer:")
print(answer)
if show_sources: # this is a flag that you can set to disable showing answers.
# # Print the relevant sources used for the answer
print("----------------------------------SOURCE DOCUMENTS---------------------------")
for document in docs:
print("\n> " + document.metadata["source"] + ":")
print(document.page_content)
print("----------------------------------SOURCE DOCUMENTS---------------------------")
if __name__ == "__main__":
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(filename)s:%(lineno)s - %(message)s", level=logging.INFO
)
main()
| [
"Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.\n You are a specialist about a company called Aviate.\n You reply always in Brazilian Portuguese.\n SEMPRE RESPONDA EM PORTUGUÊS, SEMPRE, EM HIPÓTESE ALGUMA PODE RESPONDER EM QUALQUER OUTRO IDIOMA, APENAS RESPOSTAS EM PORTUGÛES.\n\n {context}\n\n {history}\n Question: {question}\n Helpful Answer:",
"question",
"context",
"t know the answer, just say that you don"
] |
2024-01-10 | luizroddev/localGPT | run_localGPT_API.py | import logging
import os
import shutil
import subprocess
import torch
from auto_gptq import AutoGPTQForCausalLM
from flask import Flask, jsonify, request
from langchain.chains import RetrievalQA
from langchain.embeddings import HuggingFaceInstructEmbeddings
from flask_cors import CORS
from langchain.memory import ConversationBufferMemory
from langchain.prompts import PromptTemplate
# from langchain.embeddings import HuggingFaceEmbeddings
from langchain.llms import HuggingFacePipeline
from run_localGPT import load_model
# from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.vectorstores import Chroma
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
GenerationConfig,
LlamaForCausalLM,
LlamaTokenizer,
pipeline,
)
from werkzeug.utils import secure_filename
from constants import CHROMA_SETTINGS, EMBEDDING_MODEL_NAME, PERSIST_DIRECTORY, MODEL_ID, MODEL_BASENAME
DEVICE_TYPE = "cuda" if torch.cuda.is_available() else "cpu"
SHOW_SOURCES = True
logging.info(f"Running on: {DEVICE_TYPE}")
logging.info(f"Display Source Documents set to: {SHOW_SOURCES}")
EMBEDDINGS = HuggingFaceInstructEmbeddings(model_name=EMBEDDING_MODEL_NAME, model_kwargs={"device": DEVICE_TYPE})
# uncomment the following line if you used HuggingFaceEmbeddings in the ingest.py
# EMBEDDINGS = HuggingFaceEmbeddings(model_name=EMBEDDING_MODEL_NAME)
if os.path.exists(PERSIST_DIRECTORY):
try:
shutil.rmtree(PERSIST_DIRECTORY)
except OSError as e:
print(f"Error: {e.filename} - {e.strerror}.")
else:
print("The directory does not exist")
run_langest_commands = ["python", "ingest.py"]
if DEVICE_TYPE == "cpu":
run_langest_commands.append("--device_type")
run_langest_commands.append(DEVICE_TYPE)
result = subprocess.run(run_langest_commands, capture_output=True)
if result.returncode != 0:
raise FileNotFoundError(
"No files were found inside SOURCE_DOCUMENTS, please put a starter file inside before starting the API!"
)
# load the vectorstore
DB = Chroma(
persist_directory=PERSIST_DIRECTORY,
embedding_function=EMBEDDINGS,
client_settings=CHROMA_SETTINGS,
)
RETRIEVER = DB.as_retriever()
template = """Use the following pieces of context to answer the question at the end. If you don't know the answer,\
just say that you don't know, don't try to make up an answer.
You always answer the questions in Brazilian Portuguese, never other language.
You are a helpful assistant who knows all about company policies, your employees, deployment processes, rules, FAQ.
If you ever talk about someone in the team of the company, always assign their email address too, example: Lucas Sousa ([email protected]), if you don't know their email, said that you couldn't find their contact information.
{context}
{history}
Question: {question}
Helpful Answer:"""
prompt = PromptTemplate(input_variables=["history", "context", "question"], template=template)
memory = ConversationBufferMemory(input_key="question", memory_key="history")
LLM = load_model(device_type=DEVICE_TYPE, model_id=MODEL_ID, model_basename=MODEL_BASENAME)
QA = RetrievalQA.from_chain_type(
llm=LLM, chain_type="stuff", retriever=RETRIEVER, return_source_documents=SHOW_SOURCES,
chain_type_kwargs={"prompt": prompt, "memory": memory},
)
app = Flask(__name__)
CORS(app, support_credentials=True)
@app.route("/api/delete_source", methods=["GET"])
def delete_source_route():
folder_name = "SOURCE_DOCUMENTS"
if os.path.exists(folder_name):
shutil.rmtree(folder_name)
os.makedirs(folder_name)
return jsonify({"message": f"Folder '{folder_name}' successfully deleted and recreated."})
@app.route("/api/save_document", methods=["GET", "POST"])
def save_document_route():
if "document" not in request.files:
return "No document part", 400
file = request.files["document"]
if file.filename == "":
return "No selected file", 400
if file:
filename = secure_filename(file.filename)
folder_path = "SOURCE_DOCUMENTS"
if not os.path.exists(folder_path):
os.makedirs(folder_path)
file_path = os.path.join(folder_path, filename)
file.save(file_path)
return "File saved successfully", 200
@app.route("/api/run_ingest", methods=["GET"])
def run_ingest_route():
global DB
global RETRIEVER
global QA
try:
if os.path.exists(PERSIST_DIRECTORY):
try:
shutil.rmtree(PERSIST_DIRECTORY)
except OSError as e:
print(f"Error: {e.filename} - {e.strerror}.")
else:
print("The directory does not exist")
run_langest_commands = ["python", "ingest.py"]
if DEVICE_TYPE == "cpu":
run_langest_commands.append("--device_type")
run_langest_commands.append(DEVICE_TYPE)
result = subprocess.run(run_langest_commands, capture_output=True)
if result.returncode != 0:
return "Script execution failed: {}".format(result.stderr.decode("utf-8")), 500
# load the vectorstore
DB = Chroma(
persist_directory=PERSIST_DIRECTORY,
embedding_function=EMBEDDINGS,
client_settings=CHROMA_SETTINGS,
)
RETRIEVER = DB.as_retriever()
QA = RetrievalQA.from_chain_type(
llm=LLM, chain_type="stuff", retriever=RETRIEVER, return_source_documents=SHOW_SOURCES
)
return "Script executed successfully: {}".format(result.stdout.decode("utf-8")), 200
except Exception as e:
return f"Error occurred: {str(e)}", 500
@app.route("/api/prompt_route", methods=["GET", "POST"])
def prompt_route():
global QA
user_prompt = request.form.get("user_prompt")
if user_prompt:
# print(f'User Prompt: {user_prompt}')
# Get the answer from the chain
res = QA(user_prompt)
answer, docs = res["result"], res["source_documents"]
prompt_response_dict = {
"Prompt": user_prompt,
"Answer": answer,
}
prompt_response_dict["Sources"] = []
for document in docs:
prompt_response_dict["Sources"].append(
(os.path.basename(str(document.metadata["source"])), str(document.page_content))
)
return jsonify(prompt_response_dict), 200
else:
return "No user prompt received", 400
if __name__ == "__main__":
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(filename)s:%(lineno)s - %(message)s", level=logging.INFO
)
app.run(debug=False, port=5110)
| [
"user_prompt",
"question",
"{'Prompt': PLACEHOLDER, 'Answer': PLACEHOLDER}",
"t know their email, said that you couldn",
"Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.\n You always answer the questions in Brazilian Portuguese, never other language.\n You are a helpful assistant who knows all about company policies, your employees, deployment processes, rules, FAQ.\n If you ever talk about someone in the team of the company, always assign their email address too, example: Lucas Sousa ([email protected]), if you don't know their email, said that you couldn't find their contact information.\n\n {context}\n\n {history}\n Question: {question}\n Helpful Answer:",
"t know the answer, just say that you don",
"context"
] |
2024-01-10 | Qucy/LangChainDemo | chainlit~ai_tutor.py | from langchain.chat_models import AzureChatOpenAI
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain.schema import StrOutputParser
from langchain.schema.runnable import Runnable
from langchain.schema.runnable.config import RunnableConfig
from langchain.memory.chat_message_histories import RedisChatMessageHistory
from langchain.schema.chat_history import BaseChatMessageHistory
from langchain.schema.runnable.history import RunnableWithMessageHistory
import chainlit as cl
from chainlit.input_widget import Select
from chainlit.input_widget import Switch
from chainlit.input_widget import Slider
from dotenv import load_dotenv
import os
# Load environment variables from .env file
load_dotenv()
# creating model
model = AzureChatOpenAI(
openai_api_version=os.getenv('OPENAI_API_VERSION'),
azure_deployment="chatGPTAzure",
streaming=True
)
# Descriptions from the dictionary
learning_styles = {
"active": "User chose 'Active'. As an AI Tutor, you should include interactive exercises and real-world problem-solving tasks to actively engage the user.",
"reflective": "User chose 'Reflective'. As an AI Tutor, you should encourage journaling, self-reflection questions, and provide detailed explanations to facilitate deep thinking."
}
tone_styles = {
"encouraging": "User prefers an 'Encouraging' tone. As an AI Tutor, you should offer frequent praise and highlight progress to boost the user's confidence and motivation.",
"formal": "User prefers a 'Formal' tone. As an AI Tutor, you should maintain a professional and structured approach, providing clear and concise information.",
"analytical": "User prefers an 'Analytical' tone. As an AI Tutor, you should focus on logic, data, and critical analysis, presenting facts and encouraging problem-solving.",
"empathetic": "User prefers an 'Empathetic' tone. As an AI Tutor, you should show understanding and sensitivity, using supportive language and adapting to the user's emotional state."
}
learning_levels = {
"beginner": "User is at a 'Beginner' level. As an AI Tutor, you should use simple language, introduce basic concepts, and provide foundational knowledge.",
"intermediate": "User is at an 'Intermediate' level. As an AI Tutor, you should build on basic concepts, introduce more complex ideas, and encourage deeper exploration.",
"advanced": "User is at an 'Advanced' level. As an AI Tutor, you should cover complex topics, promote critical thinking, and encourage independent study and research."
}
# define system prompt
system_prompt = """
You role is AI Tutor, a personalized learning assistant!
Your goals are:
- Personalized Learning: Adapts responses to match each user's learning style and level.
- Clarity and Accuracy: Provides clear and accurate explanations to ensure understanding.
- Positive Support: Maintains a positive and encouraging tone to motivate learners.
- Interactive Engagement: Includes quizzes and interactive discussions for a dynamic learning experience.
- Resource Recommendations: Suggests additional learning materials to supplement the educational experience.
User profile settings:
{settings}
Your actions:
- when user input the knowledge they want to lear, you should generate the curriculum for the users according to settings above
"""
# default user_settings
default_settings = """
Learning Style: Active learners prefer engagement through hands-on experiences and practical application of knowledge. The AI Tutor should include interactive exercises and real-world problem-solving tasks.
Tone Style: Use a positive and motivational tone. The AI Tutor should offer frequent praise and highlight progress to boost confidence and motivation.
Level: Designed for users new to the subject. The AI Tutor should use simple language, introduce basic concepts, and provide foundational knowledge.
"""
# Redis url for cache
REDIS_URL = "your redis url"
@cl.on_chat_start
async def start():
# setup UI
settings = await cl.ChatSettings(
[
Select(
id="learning_style",
label="Learning style",
values=["Active", "Reflective"],
initial_index=0,
),
Select(
id="tone_style",
label="Tone style",
values=["Encouraging", "Formal", "Analytical", "Empathetic"],
initial_index=0,
),
Select(
id="level",
label="Level",
values=["beginner", "intermediate", "advanced"],
initial_index=0,
),
]
).send()
# setup settings
cl.user_session.set("settings", default_settings)
# creating prompt
prompt = ChatPromptTemplate.from_messages(
[
("system", system_prompt),
MessagesPlaceholder(variable_name="history"),
("human", "{user_input}"),
])
# setup chain
chain = prompt | model | StrOutputParser()
# wrap chain
chain_with_history = RunnableWithMessageHistory(
chain,
lambda session_id: RedisChatMessageHistory(session_id, url=REDIS_URL),
input_messages_key="user_input",
history_messages_key="history",
)
# setup chain_with_history
cl.user_session.set("runnable", chain_with_history)
@cl.on_settings_update
async def setup_agent(settings):
""" call back function when configuration updated
"""
# Extracting the descriptions based on user's choice
learning_style_desc = learning_styles[settings['learning_style'].lower()]
tone_style_desc = tone_styles[settings['tone_style'].lower()]
level_desc = learning_levels[settings['level'].lower()]
# Concatenating the descriptions into a text
description_text = (
f"Learning Style: {learning_style_desc}\n"
f"Tone Style: {tone_style_desc}\n"
f"Level: {level_desc}"
)
# set user settings
cl.user_session.set("settings", description_text)
@cl.on_message
async def send_message(message: cl.Message):
""" handle the message come from UI
"""
# retrieve runnable
runnable = cl.user_session.get("runnable")
# retrieve settings
settings = cl.user_session.get("settings")
# retrieve message
msg = cl.Message(content="")
# retrieve message from model
async for chunk in runnable.astream(
{"user_input": message.content, "settings": settings},
config=RunnableConfig(callbacks=[cl.LangchainCallbackHandler()], configurable={"session_id": cl.user_session.get("id")}),
):
await msg.stream_token(chunk)
await msg.send()
@cl.on_chat_end
def end():
print("goodbye", cl.user_session.get("id"))
| [
"{user_input}",
"human",
"\nYou role is AI Tutor, a personalized learning assistant!\n\nYour goals are:\n- Personalized Learning: Adapts responses to match each user's learning style and level.\n- Clarity and Accuracy: Provides clear and accurate explanations to ensure understanding.\n- Positive Support: Maintains a positive and encouraging tone to motivate learners.\n- Interactive Engagement: Includes quizzes and interactive discussions for a dynamic learning experience.\n- Resource Recommendations: Suggests additional learning materials to supplement the educational experience.\n\nUser profile settings:\n{settings}\n\nYour actions:\n- when user input the knowledge they want to lear, you should generate the curriculum for the users according to settings above\n"
] |
2024-01-10 | microsoft/dp-few-shot-generation | src~dp_few_shot_generation~run_exp_trec.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import asyncio
import math
import re
import sys
import time
import traceback
from collections.abc import Iterable, Set
from typing import Annotated, cast
import aiohttp
import more_itertools
import numpy as np
import openai
import scipy.special
import tqdm
import typer
from datasets import DatasetDict, load_dataset
from lmapi.lm import LM, CompletionsSettings
from lmapi.openai import client_session
from dp_few_shot_generation.lm import (
api_openai_com,
next_logprobs,
normalized_logprobs_for_chosen_tokens,
)
from dp_few_shot_generation.prob_utils import densify, log_max_normalize, log_normalize
DEFAULT_NUM_PRIVATE_TRAIN = 80
DEFAULT_NUM_PUBLIC_TRAIN = 0
DEFAULT_NUM_VALID = 4
DEFAULT_NUM_PRIVATE_TRAIN_SPLITS = 80
DEFAULT_NUM_TEST = 1000
labels = ["Ab", "Entity", "Description", "Person", "Location", "Number"]
label_dict = {
0: ["Ab"],
1: ["Entity"],
2: ["Description"],
3: ["Person"],
4: ["Location"],
5: ["Number"],
}
def format_full_datum_for_prompt(labels, datum: dict[str, str]):
return f'Answer Type: "{labels[datum["coarse_label"]]}"\nText: "{datum["text"] + " END"}"\n'
def format_test_input_for_prompt(labels, test_input: int):
return f'Answer Type: "{labels[test_input]}"\nText: "'
def construct_prompt_same(train_examples, test_example):
prompt = "Classify the questions based on whether their answer type is a Number, Location, Person, Description, Entity, or Abbreviation.\n\n"
for train_example in train_examples:
prompt += "Question: " + train_example["text"] + "\n"
prompt += (
"Answer Type: " + label_dict[train_example["coarse_label"]][0] + "\n\n"
)
prompt += "Question: " + test_example["text"] + "\n"
prompt += "Answer Type:"
return prompt
def complete(prompt, l, model_name, temp=0, num_log_probs=None, echo=False, n=None):
# call GPT-3 API until result is provided and then return it
response = None
received = False
while not received:
try:
response = openai.Completion.create(
engine=model_name,
prompt=prompt,
max_tokens=l,
temperature=temp,
logprobs=num_log_probs,
echo=echo,
stop="\n",
n=n,
)
received = True
except:
error = sys.exc_info()[0]
if (
error == openai.error.InvalidRequestError
): # something is wrong: e.g. prompt too long
print(f"InvalidRequestError\nPrompt passed in:\n\n{prompt}\n\n")
assert False
print("API error:", error)
time.sleep(1)
return response
def chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i : i + n]
def get_model_response(data, test_examples, openai_model):
all_raw_answers = []
prompts = []
train_examples = data
for test_example in test_examples:
prompts.append(construct_prompt_same(train_examples, test_example))
chunked_prompts = list(chunks(prompts, 20))
for test_chunk in chunked_prompts:
response = complete(test_chunk, l=1, model_name=openai_model, num_log_probs=100)
for answer_id, answer in enumerate(response["choices"]):
all_raw_answers.append(answer)
return all_raw_answers
def get_label_probs(all_raw_answers, test_subset):
"""Obtain model's label probability for each of the test examples. The returned prob is NOT normalized"""
num_classes = len(label_dict)
approx = False
assert len(all_raw_answers) == len(test_subset)
# Fill in the labels that is in the top k prob
all_label_probs = []
all_missing_positions = []
cnt = 0
for i, ans in enumerate(all_raw_answers):
try:
top_logprobs = ans["logprobs"]["top_logprobs"][
0
] # [0] since we only ask for complete one more token
except:
cnt += 1 # cnt for corner case
label_probs = [0] * len(label_dict.keys())
for j, label_list in label_dict.items():
all_found = True
for label in label_list: # each possible label correspond to the same class
label = " " + label # notice prompt does not have space after 'A:'
if label in top_logprobs:
label_probs[j] += np.exp(top_logprobs[label])
else:
all_found = False
if not all_found:
position = (i, j) # (which test example, which label)
all_missing_positions.append(position)
all_label_probs.append(label_probs)
all_label_probs = np.array(all_label_probs) # prob not normalized
return all_label_probs # NOT NORMALIZED
def eval_accuracy(all_label_probs, test_labels, mode=None, p_cf=None):
# evaluate the accuracy with and without contextual calibration
num_classes = all_label_probs.shape[1]
if p_cf is None:
# do not calibrate
W = np.identity(num_classes)
b = np.zeros([num_classes, 1])
else:
# calibrate
if mode == "diagonal_W":
W = np.linalg.inv(np.identity(num_classes) * p_cf)
b = np.zeros([num_classes, 1])
elif mode == "identity_W":
W = np.identity(num_classes)
b = -1 * np.expand_dims(p_cf, axis=-1)
else:
assert False
correctness_list = []
assert len(all_label_probs) == len(test_labels)
for label_probs, true_label in zip(all_label_probs, test_labels):
if np.sum(label_probs) > 0: # corner case np.sum(label_probs)=0.
label_probs = label_probs / np.sum(label_probs) # normalize to 1
calibrate_label_probs = np.matmul(W, np.expand_dims(label_probs, axis=-1)) + b
ans_label = np.argmax(calibrate_label_probs)
if ans_label == true_label:
correctness_list.append(1)
else:
correctness_list.append(0)
return np.mean(correctness_list)
def get_p_content_free(train_subset, openai_model, content_free_inputs=("N/A",)):
"""Query model with content free input, return its prediction probability for each label"""
all_p_y = []
for content_free_input in content_free_inputs:
prompt = construct_prompt_same(train_subset, content_free_input)
p_y = [0] * len(label_dict)
for i, answers in label_dict.items():
prob = 0
for a in answers:
prob += np.exp(
complete(
prompt + " " + a, 0, openai_model, echo=True, num_log_probs=1
)["choices"][0]["logprobs"]["token_logprobs"][-1]
)
p_y[i] = prob
all_p_y.append(p_y)
p_y = np.mean(np.array(all_p_y), axis=0)
p_y = p_y / np.sum(p_y) # normalize
return p_y
def merge_logprobs_topk_mean(
private_next_logprobs: list[dict[int, float]],
public_next_logprobs: dict[int, float],
n_vocab: int,
no_public_token: bool,
normalize_max: bool,
) -> np.ndarray:
# Compute merged distribution
# logsumexp - np.log(...): compute mean probability of distribution
if normalize_max:
normalize_func = (
log_max_normalize # normalize max probability to 1, Exponential mechanism
)
else:
normalize_func = (
log_normalize # normalize sum probability to 1, Gaussian mechanism
)
if no_public_token:
merged_next_logprobs = scipy.special.logsumexp(
np.stack(
[
# Turn into a 1D tensor of size n_vocab
densify(
n_vocab,
# Normalize distribution
normalize_func(
# Filter to the top 100 most likely next tokens according to the public prompt
{k: v for k, v in lps.items()}
),
)
for lps in private_next_logprobs
]
),
axis=0,
) - np.log(len(private_next_logprobs))
else:
merged_next_logprobs = scipy.special.logsumexp(
np.stack(
[
# Turn into a 1D tensor of size n_vocab
densify(
n_vocab,
# Normalize distribution
normalize_func(
# Filter to the top 100 most likely next tokens according to the public prompt
{k: v for k, v in lps.items() if k in public_next_logprobs}
),
)
for lps in private_next_logprobs
]
),
axis=0,
) - np.log(len(private_next_logprobs))
merged_next_probs = np.exp(merged_next_logprobs)
return merged_next_probs
async def generate_with_private_prompts(
trainset,
num_private_train,
num_private_train_splits,
instruction,
public_train_prompt: str,
stop_tokens: Set[int],
test_input: int,
lm: LM,
noise_rng: np.random.RandomState,
sigma: float,
labels,
top_p,
no_public_token: bool,
subsample_per_token: bool,
sample_same_label_prompts: bool,
gen_seed: int,
max_tokens: int = 100 - 1,
normalize_max: bool = False,
) -> list[int]:
generated_token_ids: list[int] = []
stringified_test_datum = format_test_input_for_prompt(labels, test_input)
public_prompt = public_train_prompt + stringified_test_datum
public_prompt_tokens = lm.encoding.encode(public_prompt)
assert num_private_train_splits > 0
if sample_same_label_prompts:
select_list = []
for i in range(len(trainset)):
if trainset[i]["coarse_label"] == test_input:
select_list.append(i)
train_subset = trainset.select(select_list, keep_in_memory=True)
else:
train_subset = trainset.select(range(len(trainset)), keep_in_memory=True)
if not subsample_per_token:
private_train_subset = cast(
Iterable[dict[str, str]],
train_subset.shuffle(gen_seed, keep_in_memory=True).select(
range(num_private_train), keep_in_memory=True
),
)
private_train_splits = [
list(it)
for it in more_itertools.distribute(
num_private_train_splits, private_train_subset
)
]
private_train_prompts = [
instruction
+ "\n".join(format_full_datum_for_prompt(labels, datum) for datum in split)
for split in private_train_splits
]
private_prompts = [
train_prompt + "\n" + stringified_test_datum
for train_prompt in private_train_prompts
]
private_prompts_tokens = [
lm.encoding.encode(prompt) for prompt in private_prompts
]
cnt = 0
for _ in tqdm.tqdm(range(max_tokens), total=float("inf"), unit=" tokens generated"):
private_next_logprobs: list[dict[int, float]]
public_next_logprobs: dict[int, float]
# Split training dataset
if subsample_per_token:
private_train_subset = cast(
Iterable[dict[str, str]],
train_subset.shuffle(gen_seed + cnt, keep_in_memory=True).select(
range(num_private_train), keep_in_memory=True
),
)
cnt += 1
private_train_splits = [
list(it)
for it in more_itertools.distribute(
num_private_train_splits, private_train_subset
)
]
# Turn the data into prompts
private_train_prompts = [
instruction
+ "\n".join(
format_full_datum_for_prompt(labels, datum) for datum in split
)
for split in private_train_splits
]
private_prompts = [
train_prompt + "\n" + stringified_test_datum
for train_prompt in private_train_prompts
]
private_prompts_tokens = [
lm.encoding.encode(prompt) for prompt in private_prompts
]
if no_public_token:
private_next_logprobs = await asyncio.gather(
*(
next_logprobs(lm, prompt + generated_token_ids, top_p=top_p)
for prompt in private_prompts_tokens
)
)
merged_next_probs = merge_logprobs_topk_mean(
private_next_logprobs,
None,
lm.encoding.n_vocab,
no_public_token,
normalize_max,
)
if normalize_max:
# scale = 1/lambda
noise = noise_rng.exponential(scale=sigma, size=lm.encoding.n_vocab)
else:
noise = noise_rng.normal(0, sigma, size=lm.encoding.n_vocab)
merged_next_probs += noise
else:
public_next_logprobs = await next_logprobs(
lm, public_prompt_tokens + generated_token_ids, top_p=top_p
)
private_next_logprobs = await asyncio.gather(
*(
normalized_logprobs_for_chosen_tokens(
lm,
prompt + generated_token_ids,
public_next_logprobs.keys(),
top_p=top_p,
)
for prompt in private_prompts_tokens
)
)
merged_next_probs = merge_logprobs_topk_mean(
private_next_logprobs,
public_next_logprobs,
lm.encoding.n_vocab,
no_public_token,
normalize_max,
)
if normalize_max:
# scale = 1/lambda
noise = noise_rng.exponential(
scale=sigma, size=len(public_next_logprobs)
)
else:
noise = noise_rng.normal(0, sigma, size=len(public_next_logprobs))
merged_next_probs[list(public_next_logprobs.keys())] += noise
next_token_id = int(np.argmax(merged_next_probs))
if next_token_id in stop_tokens:
break
generated_token_ids.append(next_token_id)
del next_token_id
return generated_token_ids
async def generate_with_public_prompt(
public_train_prompt: str,
stop_tokens: Set[str],
test_input: str,
lm: LM,
labels,
max_tokens: int = 500,
) -> list[int]:
public_prompt = public_train_prompt + format_test_input_for_prompt(
labels, test_input
)
public_prompt_tokens = lm.encoding.encode(public_prompt)
public_prompt_tokens = public_prompt
[completion] = await lm.completions(
public_prompt_tokens,
CompletionsSettings(
temperature=0.0, max_tokens=max_tokens, n=1, stop=list(stop_tokens)
),
)
generated_tokens = [st.token.token_id for st in completion]
return generated_tokens
def select_uniform_n_shots_over_labels(data, n_shots):
select_list = []
n_shots_per_label = math.ceil(n_shots / len(labels))
labels_counter = {label[1][0]: n_shots_per_label for label in label_dict.items()}
n_shots_selected = 0
for i in range(len(data)):
label = label_dict[data[i]["coarse_label"]][0]
if labels_counter[label] == 0 or data[i]["coarse_label"] == 0:
continue
else:
labels_counter[label] -= 1
select_list.append(i)
n_shots_selected += 1
if n_shots_selected == n_shots:
break
query_subset = data.select(select_list, keep_in_memory=True)
return query_subset
def _main(
sigma: Annotated[float, typer.Option()], # noise parameters
openai_model: Annotated[str, typer.Option()] = "babbage",
print_prompts: Annotated[bool, typer.Option()] = False,
# num_private_train=MN. MN=0 with num_valid=4 will get epsilon=0 (4-shot) results.
num_private_train: Annotated[int, typer.Option()] = DEFAULT_NUM_PRIVATE_TRAIN,
# by default set to 0. set_num_public_train >0 indicates additional public data available.
set_num_public_train: Annotated[int, typer.Option()] = DEFAULT_NUM_PUBLIC_TRAIN,
# num_valid=n. n samples to be generated for n-shot ICL
num_valid: Annotated[int, typer.Option()] = DEFAULT_NUM_VALID,
# num_private_train_splits=M
num_private_train_splits: Annotated[
int, typer.Option()
] = DEFAULT_NUM_PRIVATE_TRAIN_SPLITS,
num_test: Annotated[int, typer.Option()] = DEFAULT_NUM_TEST,
# no_public_token=True, RVP=False; no_public_token=False, RVP=True
no_public_token: Annotated[bool, typer.Option()] = False,
# subsample_per_token=True: at each token generation, subsample a new test set
subsample_per_token: Annotated[bool, typer.Option()] = False,
use_dp_prompts: Annotated[bool, typer.Option()] = False,
# sample_same_label_prompts=True: sample subsets from the sets with same targeted labels.
sample_same_label_prompts: Annotated[bool, typer.Option()] = False,
# normalize_max=True, Exponential mechanism; normalize_max=False, Gaussian mechanism
normalize_max: Annotated[bool, typer.Option()] = False,
# max_token_per_text=T_max
max_token_per_text: Annotated[int, typer.Option()] = 15,
# consistent with default parameters in the documentation https://learn.microsoft.com/en-us/azure/cognitive-services/openai/reference#completions
top_p: Annotated[float, typer.Option()] = 1,
# random seed for subsampling in generation
synth_seed: Annotated[int, typer.Option()] = 0,
# random seed for n-shot demonstrations sampling in evaluation
eval_seed: Annotated[int, typer.Option()] = 0,
):
async def main():
if (num_private_train == 0) != (num_private_train_splits == 0):
raise ValueError(
"Either both or neither of --num-private-train and --num-private-train-splits can be 0"
)
command = ["python", sys.argv[0]]
for x in sys.argv[1:]:
if x.startswith("--"):
assert '"' not in x and "'" not in x
command.append(x)
else:
assert "'" not in x
if re.match("^[a-zA-Z0-9_]+$", x):
command.append("%s" % x)
else:
command.append("'%s'" % x)
command = " ".join(command)
print(command)
if no_public_token:
num_public_train = 0
else:
num_public_train = set_num_public_train
lm = api_openai_com(openai_model)
noise_rng = np.random.RandomState()
data = cast(DatasetDict, load_dataset("trec"))
print(labels)
trainset = data["train"].shuffle(seed=synth_seed, keep_in_memory=True)
print("trainset length", len(trainset))
if num_public_train > 0:
public_train_subset = cast(
Iterable[dict[str, str]],
trainset.select(
range(
len(trainset) - num_public_train,
len(trainset),
keep_in_memory=True,
)
),
)
else:
public_train_subset = []
trainset = trainset.select(
range(len(trainset) - num_public_train), keep_in_memory=True
)
queryset = data["train"].shuffle(seed=eval_seed, keep_in_memory=True)
query_subset = select_uniform_n_shots_over_labels(queryset, num_valid)
if use_dp_prompts:
synthetic_examples = []
# Turn the data into prompts
instruction = "Given a label of answer type, generate a question based on the given answer type accordingly.\n\n"
public_train_prompt = instruction + "\n".join(
format_full_datum_for_prompt(labels, datum)
for datum in public_train_subset
)
if print_prompts:
print(public_train_prompt)
print("=========")
if normalize_max:
print("Exponential Mechanism")
assert num_private_train == 0 or sigma > 0
if num_private_train > 0:
# scale == sigma_calib == 1/lambda. lambda for exponential distribution.
sigma_calib = (2 / num_private_train_splits) * (1 / sigma)
else:
print("Gaussian Mechanism")
if num_private_train_splits > 0:
sigma_calib = math.sqrt(2) / num_private_train_splits * sigma
else:
sigma_calib = 0
print(
f"sigma in command {sigma}. sigma added according to sensitivity {sigma_calib}"
)
stop_tokens = {"\n", "<|endoftext|>", " END"}
stop_tokens_ids = {lm.encoding.encode_single_token(t) for t in stop_tokens}
client_session.set(aiohttp.ClientSession())
len_token = []
async with client_session.get():
for i, test_datum in enumerate(query_subset, 1):
print(f"# Example {i}")
print(f'Answer Type: "{labels[test_datum["coarse_label"]]}"')
print(f'References:\n "{test_datum["text"]}"')
np.random.seed(synth_seed + i)
gen_seed = np.random.randint(100000)
print(f"gen-seed: {gen_seed}")
if num_private_train_splits > 0:
generated_token_ids = await generate_with_private_prompts(
trainset,
num_private_train,
num_private_train_splits,
instruction,
public_train_prompt,
stop_tokens_ids,
test_datum["coarse_label"],
lm,
noise_rng,
sigma_calib,
labels,
top_p,
no_public_token,
subsample_per_token,
sample_same_label_prompts,
gen_seed,
max_tokens=max_token_per_text
- 1, # need one token length for EOS.
normalize_max=normalize_max,
)
else:
generated_token_ids = await generate_with_public_prompt(
public_train_prompt,
stop_tokens,
test_datum["coarse_label"],
lm,
labels,
max_tokens=max_token_per_text,
)
generated = lm.encoding.decode(generated_token_ids).rstrip('"')
print(f"Generated: {generated}\n")
output_datum = {}
output_datum["text"] = generated.strip()
output_datum["coarse_label"] = test_datum["coarse_label"]
synthetic_examples.append(output_datum)
if num_test > 0:
test_subset = data["test"]
test_labels = [test_example["coarse_label"] for test_example in test_subset]
content_free_inputs = [{"text": "N/A"}, {"text": ""}, {"text": "[MASK]"}]
p_cf_wout_DP = get_p_content_free(
query_subset, openai_model, content_free_inputs=content_free_inputs
)
all_raw_answers_wout_DP = get_model_response(
query_subset, test_subset, openai_model
)
all_label_probs_wout_DP = get_label_probs(
all_raw_answers_wout_DP, test_subset
)
acc_original_wout_DP = eval_accuracy(all_label_probs_wout_DP, test_labels)
acc_calibrated_wout_DP = eval_accuracy(
all_label_probs_wout_DP,
test_labels,
mode="diagonal_W",
p_cf=p_cf_wout_DP,
)
print(f"Accuracy (original) without DP: {acc_original_wout_DP}")
print(f"Accuracy (calibrated) without DP: {acc_calibrated_wout_DP}")
if use_dp_prompts:
p_cf_w_DP = get_p_content_free(
synthetic_examples,
openai_model,
content_free_inputs=content_free_inputs,
)
all_raw_answers_w_DP = get_model_response(
synthetic_examples, test_subset, openai_model
)
all_label_probs_w_DP = get_label_probs(
all_raw_answers_w_DP, test_subset
)
acc_original_w_DP = eval_accuracy(all_label_probs_w_DP, test_labels)
acc_calibrated_w_DP = eval_accuracy(
all_label_probs_w_DP, test_labels, mode="diagonal_W", p_cf=p_cf_w_DP
)
print(f"Accuracy (original) with DP: {acc_original_w_DP}")
print(f"Accuracy (calibrated) with DP: {acc_calibrated_w_DP}")
try:
asyncio.run(main())
except KeyboardInterrupt:
traceback.print_exc()
raise
if __name__ == "__main__":
typer.run(_main)
| [
"[]",
"\n",
"PLACEHOLDERPLACEHOLDER",
"Question: PLACEHOLDER\n",
"Answer Type:",
"Classify the questions based on whether their answer type is a Number, Location, Person, Description, Entity, or Abbreviation.\n\n",
"Answer Type: P\n\n"
] |
2024-01-10 | microsoft/dp-few-shot-generation | src~dp_few_shot_generation~run_exp_agnews.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import asyncio
import math
import re
import sys
import time
import traceback
from collections.abc import Iterable, Set
from typing import Annotated, cast
import aiohttp
import more_itertools
import numpy as np
import openai
import scipy.special
import tqdm
import typer
from datasets import DatasetDict, load_dataset
from lmapi.lm import LM, CompletionsSettings
from lmapi.openai import client_session
from dp_few_shot_generation.lm import (
api_openai_com,
next_logprobs,
normalized_logprobs_for_chosen_tokens,
)
from dp_few_shot_generation.prob_utils import densify, log_max_normalize, log_normalize
DEFAULT_NUM_PRIVATE_TRAIN = 20
DEFAULT_NUM_PUBLIC_TRAIN = 0
DEFAULT_NUM_VALID = 4
DEFAULT_NUM_PRIVATE_TRAIN_SPLITS = 10
DEFAULT_NUM_TEST = 1000
labels = ["World", "Sport", "Business", "Technology"]
label_dict = {0: ["World"], 1: ["Sports"], 2: ["Business"], 3: ["Technology"]}
def format_full_datum_for_prompt(labels, datum: dict[str, str]):
return f'News Type: "{labels[datum["label"]]}"\nText: "{datum["text"] + " END"}"\n'
def format_test_input_for_prompt(labels, test_input: int):
return f'News Type: "{labels[test_input]}"\nText: "'
def construct_prompt_same(train_examples, test_example):
prompt = "Classify the news articles into the categories of World, Sports, Business, and Technology.\n\n"
for train_example in train_examples:
prompt += "Article: " + train_example["text"] + "\n"
prompt += "Answer: " + label_dict[train_example["label"]][0] + "\n\n"
prompt += "Article: " + test_example["text"] + "\n"
prompt += "Answer:"
return prompt
def complete(prompt, l, model_name, temp=0, num_log_probs=None, echo=False, n=None):
# call GPT-3 API until result is provided and then return it
response = None
received = False
while not received:
try:
response = openai.Completion.create(
engine=model_name,
prompt=prompt,
max_tokens=l,
temperature=temp,
logprobs=num_log_probs,
echo=echo,
stop="\n",
n=n,
)
received = True
except:
error = sys.exc_info()[0]
if (
error == openai.error.InvalidRequestError
): # something is wrong: e.g. prompt too long
print(f"InvalidRequestError\nPrompt passed in:\n\n{prompt}\n\n")
assert False
print("API error:", error)
time.sleep(1)
return response
def chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i : i + n]
def get_model_response(data, test_examples, openai_model):
all_raw_answers = []
prompts = []
train_examples = data
for test_example in test_examples:
prompts.append(construct_prompt_same(train_examples, test_example))
chunked_prompts = list(chunks(prompts, 20))
for test_chunk in chunked_prompts:
response = complete(test_chunk, l=1, model_name=openai_model, num_log_probs=100)
for answer_id, answer in enumerate(response["choices"]):
all_raw_answers.append(answer)
return all_raw_answers
def get_label_probs(all_raw_answers, test_subset):
"""Obtain model's label probability for each of the test examples. The returned prob is NOT normalized"""
num_classes = len(label_dict)
approx = False
assert len(all_raw_answers) == len(test_subset)
# Fill in the labels that is in the top k prob
all_label_probs = []
all_missing_positions = []
cnt = 0
for i, ans in enumerate(all_raw_answers):
try:
top_logprobs = ans["logprobs"]["top_logprobs"][
0
] # [0] since we only ask for complete one more token
except:
cnt += 1 # cnt for corner case
label_probs = [0] * len(label_dict.keys())
for j, label_list in label_dict.items():
all_found = True
for label in label_list: # each possible label correspond to the same class
label = " " + label # notice prompt does not have space after 'A:'
if label in top_logprobs:
label_probs[j] += np.exp(top_logprobs[label])
else:
all_found = False
if not all_found:
position = (i, j) # (which test example, which label)
all_missing_positions.append(position)
all_label_probs.append(label_probs)
all_label_probs = np.array(all_label_probs) # prob not normalized
return all_label_probs # NOT NORMALIZED
def eval_accuracy(all_label_probs, test_labels, mode=None, p_cf=None):
# evaluate the accuracy with and without contextual calibration
num_classes = all_label_probs.shape[1]
if p_cf is None:
# do not calibrate
W = np.identity(num_classes)
b = np.zeros([num_classes, 1])
else:
# calibrate
if mode == "diagonal_W":
W = np.linalg.inv(np.identity(num_classes) * p_cf)
b = np.zeros([num_classes, 1])
elif mode == "identity_W":
W = np.identity(num_classes)
b = -1 * np.expand_dims(p_cf, axis=-1)
else:
assert False
correctness_list = []
assert len(all_label_probs) == len(test_labels)
for label_probs, true_label in zip(all_label_probs, test_labels):
if np.sum(label_probs) > 0: # corner case np.sum(label_probs)=0.
label_probs = label_probs / np.sum(label_probs) # normalize to 1
calibrate_label_probs = np.matmul(W, np.expand_dims(label_probs, axis=-1)) + b
ans_label = np.argmax(calibrate_label_probs)
if ans_label == true_label:
correctness_list.append(1)
else:
correctness_list.append(0)
return np.mean(correctness_list)
def get_p_content_free(train_subset, openai_model, content_free_inputs=("N/A",)):
"""Query model with content free input, return its prediction probability for each label"""
all_p_y = []
for content_free_input in content_free_inputs:
prompt = construct_prompt_same(train_subset, content_free_input)
p_y = [0] * len(label_dict)
for i, answers in label_dict.items():
prob = 0
for a in answers:
prob += np.exp(
complete(
prompt + " " + a, 0, openai_model, echo=True, num_log_probs=1
)["choices"][0]["logprobs"]["token_logprobs"][-1]
)
p_y[i] = prob
all_p_y.append(p_y)
p_y = np.mean(np.array(all_p_y), axis=0)
p_y = p_y / np.sum(p_y) # normalize
return p_y
def merge_logprobs_topk_mean(
private_next_logprobs: list[dict[int, float]],
public_next_logprobs: dict[int, float],
n_vocab: int,
no_public_token: bool,
normalize_max: bool,
) -> np.ndarray:
# Compute merged distribution
# logsumexp - np.log(...): compute mean probability of distribution
if normalize_max:
normalize_func = (
log_max_normalize # normalize max probability to 1, Exponential mechanism
)
else:
normalize_func = (
log_normalize # normalize sum probability to 1, Gaussian mechanism
)
if no_public_token:
merged_next_logprobs = scipy.special.logsumexp(
np.stack(
[
# Turn into a 1D tensor of size n_vocab
densify(
n_vocab,
# Normalize distribution
normalize_func(
# Filter to the top 100 most likely next tokens according to the public prompt
{k: v for k, v in lps.items()}
),
)
for lps in private_next_logprobs
]
),
axis=0,
) - np.log(len(private_next_logprobs))
else:
merged_next_logprobs = scipy.special.logsumexp(
np.stack(
[
# Turn into a 1D tensor of size n_vocab
densify(
n_vocab,
# Normalize distribution
normalize_func(
# Filter to the top 100 most likely next tokens according to the public prompt
{k: v for k, v in lps.items() if k in public_next_logprobs}
),
)
for lps in private_next_logprobs
]
),
axis=0,
) - np.log(len(private_next_logprobs))
merged_next_probs = np.exp(merged_next_logprobs)
return merged_next_probs
async def generate_with_private_prompts(
trainset,
num_private_train,
num_private_train_splits,
instruction,
public_train_prompt: str,
stop_tokens: Set[int],
test_input: int,
lm: LM,
noise_rng: np.random.RandomState,
sigma: float,
labels,
top_p,
no_public_token: bool,
subsample_per_token: bool,
sample_same_label_prompts: bool,
gen_seed: int,
max_tokens: int = 100 - 1,
normalize_max: bool = False,
) -> list[int]:
generated_token_ids: list[int] = []
stringified_test_datum = format_test_input_for_prompt(labels, test_input)
public_prompt = public_train_prompt + stringified_test_datum
public_prompt_tokens = lm.encoding.encode(public_prompt)
assert num_private_train_splits > 0
if sample_same_label_prompts:
select_list = []
for i in range(len(trainset)):
if trainset[i]["label"] == test_input:
select_list.append(i)
train_subset = trainset.select(select_list, keep_in_memory=True)
else:
train_subset = trainset.select(range(len(trainset)), keep_in_memory=True)
if not subsample_per_token:
private_train_subset = cast(
Iterable[dict[str, str]],
train_subset.shuffle(gen_seed, keep_in_memory=True).select(
range(num_private_train), keep_in_memory=True
),
)
private_train_splits = [
list(it)
for it in more_itertools.distribute(
num_private_train_splits, private_train_subset
)
]
private_train_prompts = [
instruction
+ "\n".join(format_full_datum_for_prompt(labels, datum) for datum in split)
for split in private_train_splits
]
private_prompts = [
train_prompt + "\n" + stringified_test_datum
for train_prompt in private_train_prompts
]
private_prompts_tokens = [
lm.encoding.encode(prompt) for prompt in private_prompts
]
cnt = 0
for _ in tqdm.tqdm(range(max_tokens), total=float("inf"), unit=" tokens generated"):
private_next_logprobs: list[dict[int, float]]
public_next_logprobs: dict[int, float]
# Split training dataset
if subsample_per_token:
private_train_subset = cast(
Iterable[dict[str, str]],
train_subset.shuffle(gen_seed + cnt, keep_in_memory=True).select(
range(num_private_train), keep_in_memory=True
),
)
cnt += 1
private_train_splits = [
list(it)
for it in more_itertools.distribute(
num_private_train_splits, private_train_subset
)
]
# Turn the data into prompts
private_train_prompts = [
instruction
+ "\n".join(
format_full_datum_for_prompt(labels, datum) for datum in split
)
for split in private_train_splits
]
private_prompts = [
train_prompt + "\n" + stringified_test_datum
for train_prompt in private_train_prompts
]
private_prompts_tokens = [
lm.encoding.encode(prompt) for prompt in private_prompts
]
if no_public_token:
private_next_logprobs = await asyncio.gather(
*(
next_logprobs(lm, prompt + generated_token_ids, top_p=top_p)
for prompt in private_prompts_tokens
)
)
merged_next_probs = merge_logprobs_topk_mean(
private_next_logprobs,
None,
lm.encoding.n_vocab,
no_public_token,
normalize_max,
)
if normalize_max:
# scale = 1/lambda
noise = noise_rng.exponential(scale=sigma, size=lm.encoding.n_vocab)
else:
noise = noise_rng.normal(0, sigma, size=lm.encoding.n_vocab)
merged_next_probs += noise
else:
public_next_logprobs = await next_logprobs(
lm, public_prompt_tokens + generated_token_ids, top_p=top_p
)
private_next_logprobs = await asyncio.gather(
*(
normalized_logprobs_for_chosen_tokens(
lm,
prompt + generated_token_ids,
public_next_logprobs.keys(),
top_p=top_p,
)
for prompt in private_prompts_tokens
)
)
merged_next_probs = merge_logprobs_topk_mean(
private_next_logprobs,
public_next_logprobs,
lm.encoding.n_vocab,
no_public_token,
normalize_max,
)
if normalize_max:
# scale = 1/lambda
noise = noise_rng.exponential(
scale=sigma, size=len(public_next_logprobs)
)
else:
noise = noise_rng.normal(0, sigma, size=len(public_next_logprobs))
merged_next_probs[list(public_next_logprobs.keys())] += noise
next_token_id = int(np.argmax(merged_next_probs))
if next_token_id in stop_tokens:
break
generated_token_ids.append(next_token_id)
del next_token_id
return generated_token_ids
async def generate_with_public_prompt(
public_train_prompt: str,
stop_tokens: Set[str],
test_input: str,
lm: LM,
labels,
max_tokens: int = 500,
) -> list[int]:
public_prompt = public_train_prompt + format_test_input_for_prompt(
labels, test_input
)
public_prompt_tokens = lm.encoding.encode(public_prompt)
public_prompt_tokens = public_prompt
[completion] = await lm.completions(
public_prompt_tokens,
CompletionsSettings(
temperature=0.0, max_tokens=max_tokens, n=1, stop=list(stop_tokens)
),
)
generated_tokens = [st.token.token_id for st in completion]
return generated_tokens
def select_uniform_n_shots_over_labels(data, n_shots):
select_list = []
n_shots_per_label = math.ceil(n_shots / len(labels))
labels_counter = {label[1][0]: n_shots_per_label for label in label_dict.items()}
n_shots_selected = 0
for i in range(len(data)):
label = label_dict[data[i]["label"]][0]
if labels_counter[label] == 0:
continue
else:
labels_counter[label] -= 1
select_list.append(i)
n_shots_selected += 1
if n_shots_selected == n_shots:
break
query_subset = data.select(select_list, keep_in_memory=True)
return query_subset
def _main(
sigma: Annotated[float, typer.Option()], # noise parameters
openai_model: Annotated[str, typer.Option()] = "babbage",
print_prompts: Annotated[bool, typer.Option()] = False,
# num_private_train=MN. MN=0 with num_valid=4 will get epsilon=0 (4-shot) results.
num_private_train: Annotated[int, typer.Option()] = DEFAULT_NUM_PRIVATE_TRAIN,
# by default set to 0. set_num_public_train >0 indicates additional public data available.
set_num_public_train: Annotated[int, typer.Option()] = DEFAULT_NUM_PUBLIC_TRAIN,
# num_valid=n. n samples to be generated for n-shot ICL
num_valid: Annotated[int, typer.Option()] = DEFAULT_NUM_VALID,
# num_private_train_splits=M
num_private_train_splits: Annotated[
int, typer.Option()
] = DEFAULT_NUM_PRIVATE_TRAIN_SPLITS,
num_test: Annotated[int, typer.Option()] = DEFAULT_NUM_TEST,
# no_public_token=True, RVP=False; no_public_token=False, RVP=True
no_public_token: Annotated[bool, typer.Option()] = False,
# subsample_per_token=True: at each token generation, subsample a new test set
subsample_per_token: Annotated[bool, typer.Option()] = False,
use_dp_prompts: Annotated[bool, typer.Option()] = False,
# sample_same_label_prompts=True: sample subsets from the sets with same targeted labels.
sample_same_label_prompts: Annotated[bool, typer.Option()] = False,
# normalize_max=True, Exponential mechanism; normalize_max=False, Gaussian mechanism
normalize_max: Annotated[bool, typer.Option()] = False,
# max_token_per_text=T_max
max_token_per_text: Annotated[int, typer.Option()] = 100,
# consistent with default parameters in the documentation https://learn.microsoft.com/en-us/azure/cognitive-services/openai/reference#completions
top_p: Annotated[float, typer.Option()] = 1,
# random seed for subsampling in generation
synth_seed: Annotated[int, typer.Option()] = 0,
# random seed for n-shot demonstrations sampling in evaluation
eval_seed: Annotated[int, typer.Option()] = 0,
):
async def main():
if (num_private_train == 0) != (num_private_train_splits == 0):
raise ValueError(
"Either both or neither of --num-private-train and --num-private-train-splits can be 0"
)
command = ["python", sys.argv[0]]
for x in sys.argv[1:]:
if x.startswith("--"):
assert '"' not in x and "'" not in x
command.append(x)
else:
assert "'" not in x
if re.match("^[a-zA-Z0-9_]+$", x):
command.append("%s" % x)
else:
command.append("'%s'" % x)
command = " ".join(command)
print(command)
if no_public_token:
num_public_train = 0
else:
num_public_train = set_num_public_train
lm = api_openai_com(openai_model)
noise_rng = np.random.RandomState()
data = cast(DatasetDict, load_dataset("ag_news"))
print(labels)
trainset = data["train"].shuffle(seed=synth_seed, keep_in_memory=True)
print("trainset length", len(trainset))
if num_public_train > 0:
public_train_subset = cast(
Iterable[dict[str, str]],
trainset.select(
range(
len(trainset) - num_public_train,
len(trainset),
keep_in_memory=True,
)
),
)
else:
public_train_subset = []
trainset = trainset.select(
range(len(trainset) - num_public_train), keep_in_memory=True
)
queryset = data["train"].shuffle(seed=eval_seed, keep_in_memory=True)
query_subset = select_uniform_n_shots_over_labels(queryset, num_valid)
if use_dp_prompts:
synthetic_examples = []
# Turn the data into prompts
instruction = "Given a label of news type, generate the chosen type of news accordingly.\n\n"
public_train_prompt = instruction + "\n".join(
format_full_datum_for_prompt(labels, datum)
for datum in public_train_subset
)
if print_prompts:
print(public_train_prompt)
print("=========")
if normalize_max:
print("Exponential Mechanism")
assert num_private_train == 0 or sigma > 0
if num_private_train > 0:
# scale == sigma_calib == 1/lambda. lambda for exponential distribution.
sigma_calib = (2 / num_private_train_splits) * (1 / sigma)
else:
print("Gaussian Mechanism")
if num_private_train_splits > 0:
sigma_calib = math.sqrt(2) / num_private_train_splits * sigma
else:
sigma_calib = 0
print(
f"sigma in command {sigma}. sigma added according to sensitivity {sigma_calib}"
)
stop_tokens = {"\n", "<|endoftext|>", " END"}
stop_tokens_ids = {lm.encoding.encode_single_token(t) for t in stop_tokens}
client_session.set(aiohttp.ClientSession())
async with client_session.get():
for i, test_datum in enumerate(query_subset, 1):
print(f"# Example {i}")
print(f'News Type: "{labels[test_datum["label"]]}"')
print(f'References:\n "{test_datum["text"]}"')
np.random.seed(synth_seed + i)
gen_seed = np.random.randint(100000)
print(f"gen-seed: {gen_seed}")
if num_private_train_splits > 0:
generated_token_ids = await generate_with_private_prompts(
trainset,
num_private_train,
num_private_train_splits,
instruction,
public_train_prompt,
stop_tokens_ids,
test_datum["label"],
lm,
noise_rng,
sigma_calib,
labels,
top_p,
no_public_token,
subsample_per_token,
sample_same_label_prompts,
gen_seed,
max_tokens=max_token_per_text
- 1, # need one token length for EOS.
normalize_max=normalize_max,
)
else:
generated_token_ids = await generate_with_public_prompt(
public_train_prompt,
stop_tokens,
test_datum["label"],
lm,
labels,
max_tokens=max_token_per_text,
)
generated = lm.encoding.decode(generated_token_ids).rstrip('"')
print(f"Generated: {generated}\n")
output_datum = {}
output_datum["text"] = generated.strip()
output_datum["label"] = test_datum["label"]
synthetic_examples.append(output_datum)
if num_test > 0:
test_subset = (
data["test"]
.shuffle(seed=12345, keep_in_memory=True)
.select(range(num_test), keep_in_memory=True)
)
test_labels = [test_example["label"] for test_example in test_subset]
content_free_inputs = [{"text": "N/A"}, {"text": ""}, {"text": "[MASK]"}]
p_cf_wout_DP = get_p_content_free(
query_subset, openai_model, content_free_inputs=content_free_inputs
)
all_raw_answers_wout_DP = get_model_response(
query_subset, test_subset, openai_model
)
all_label_probs_wout_DP = get_label_probs(
all_raw_answers_wout_DP, test_subset
)
acc_original_wout_DP = eval_accuracy(all_label_probs_wout_DP, test_labels)
acc_calibrated_wout_DP = eval_accuracy(
all_label_probs_wout_DP,
test_labels,
mode="diagonal_W",
p_cf=p_cf_wout_DP,
)
print(f"Accuracy (original) without DP: {acc_original_wout_DP}")
print(f"Accuracy (calibrated) without DP: {acc_calibrated_wout_DP}")
if use_dp_prompts:
p_cf_w_DP = get_p_content_free(
synthetic_examples,
openai_model,
content_free_inputs=content_free_inputs,
)
all_raw_answers_w_DP = get_model_response(
synthetic_examples, test_subset, openai_model
)
all_label_probs_w_DP = get_label_probs(
all_raw_answers_w_DP, test_subset
)
acc_original_w_DP = eval_accuracy(all_label_probs_w_DP, test_labels)
acc_calibrated_w_DP = eval_accuracy(
all_label_probs_w_DP, test_labels, mode="diagonal_W", p_cf=p_cf_w_DP
)
print(f"Accuracy (original) with DP: {acc_original_w_DP}")
print(f"Accuracy (calibrated) with DP: {acc_calibrated_w_DP}")
try:
asyncio.run(main())
except KeyboardInterrupt:
traceback.print_exc()
raise
if __name__ == "__main__":
typer.run(_main)
| [
"\n",
"Classify the news articles into the categories of World, Sports, Business, and Technology.\n\n",
"PLACEHOLDERPLACEHOLDER",
"Article: PLACEHOLDER\n",
"Answer:",
"Answer: P\n\n",
"[]"
] |
2024-01-10 | microsoft/dp-few-shot-generation | src~dp_few_shot_generation~run_exp_movie.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import asyncio
import math
import re
import sys
import traceback
from collections.abc import Iterable, Set
from typing import Annotated, cast
import aiohttp
import more_itertools
import numpy as np
import openai
import scipy.special
import tqdm
import typer
from datasets import DatasetDict, load_dataset
from lmapi.lm import LM, CompletionsSettings
from lmapi.openai import client_session
from dp_few_shot_generation.lm import (
api_openai_com,
next_logprobs,
normalized_logprobs_for_chosen_tokens,
)
from dp_few_shot_generation.prob_utils import densify, log_max_normalize, log_normalize
DEFAULT_NUM_PRIVATE_TRAIN = 80
DEFAULT_NUM_PUBLIC_TRAIN = 0
DEFAULT_NUM_VALID = 4
DEFAULT_NUM_PRIVATE_TRAIN_SPLITS = 20
DEFAULT_NUM_TEST = -1
def format_full_datum_for_prompt(field_name, datum: dict[str, str]):
return (
f'{field_name}: "{datum["label"]}"\nSentence: "{datum["content"] + " END"}"\n'
)
def format_test_input_for_prompt(field_name, test_input: str):
return f'{field_name}: "{test_input}"\nSentence: "'
def construct_prompt_same(train_examples, test_example, field_name):
prompt = f"" # prompt strucrture follows: https://github.com/tonyzhaozh/few-shot-learning/blob/main/data_utils.py#L427-L429
for train_example in train_examples:
prompt += "Sentence: " + train_example["content"] + "\n"
prompt += f"{field_name}: " + train_example["label"] + "\n\n"
prompt += "Sentence: " + test_example["content"] + "\n"
prompt += f"{field_name}:"
return prompt
def complete(prompt, l, model_name, temp=0, num_log_probs=None, echo=False, n=None):
# call GPT-3 API until result is provided and then return it
response = None
received = False
while not received:
try:
response = openai.Completion.create(
engine=model_name,
prompt=prompt,
max_tokens=l,
temperature=temp,
logprobs=num_log_probs,
echo=echo,
stop="\n",
n=n,
)
received = True
except:
error = sys.exc_info()[0]
if (
error == openai.error.InvalidRequestError
): # something is wrong: e.g. prompt too long
print(f"InvalidRequestError\nPrompt passed in:\n\n{prompt}\n\n")
assert False
print("API error:", error)
time.sleep(1)
return response
def chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i : i + n]
def get_model_response(
data,
test_examples,
openai_model,
field_name,
max_token_to_fill=5,
additional_tokens=None,
):
all_raw_answers = []
prompts = []
train_examples = data
for test_example in test_examples:
prompts.append(construct_prompt_same(train_examples, test_example, field_name))
if additional_tokens is not None:
assert len(additional_tokens) == len(prompts)
for i in range(len(prompts)):
prompts[i] += additional_tokens[i]
chunked_prompts = list(chunks(prompts, 20))
for test_chunk in chunked_prompts:
response = complete(
test_chunk, l=max_token_to_fill, model_name=openai_model, num_log_probs=100
)
for answer_id, answer in enumerate(response["choices"]):
all_raw_answers.append(answer)
return all_raw_answers
def em_accuracy_helper(prediction, label):
correctness_list = []
for pred, l in zip(prediction, label):
pred = pred.split("\n")[0]
if pred == l:
correctness_list.append(1)
else:
correctness_list.append(0)
return np.mean(correctness_list)
def merge_logprobs_topk_mean(
private_next_logprobs: list[dict[int, float]],
public_next_logprobs: dict[int, float],
n_vocab: int,
no_public_token: bool,
normalize_max: bool,
) -> np.ndarray:
# Compute merged distribution
# logsumexp - np.log(...): compute mean probability of distribution
if normalize_max:
normalize_func = (
log_max_normalize # normalize max probability to 1, Exponential mechanism
)
else:
normalize_func = (
log_normalize # normalize sum probability to 1, Gaussian mechanism
)
if no_public_token:
merged_next_logprobs = scipy.special.logsumexp(
np.stack(
[
# Turn into a 1D tensor of size n_vocab
densify(
n_vocab,
# Normalize distribution
normalize_func(
# Filter to the top 100 most likely next tokens according to the public prompt
{k: v for k, v in lps.items()}
),
)
for lps in private_next_logprobs
]
),
axis=0,
) - np.log(len(private_next_logprobs))
else:
merged_next_logprobs = scipy.special.logsumexp(
np.stack(
[
# Turn into a 1D tensor of size n_vocab
densify(
n_vocab,
# Normalize distribution
normalize_func(
# Filter to the top 100 most likely next tokens according to the public prompt
{k: v for k, v in lps.items() if k in public_next_logprobs}
),
)
for lps in private_next_logprobs
]
),
axis=0,
) - np.log(len(private_next_logprobs))
merged_next_probs = np.exp(merged_next_logprobs)
return merged_next_probs
async def generate_with_private_prompts(
trainset,
num_private_train,
num_private_train_splits,
instruction,
public_train_prompt: str,
stop_tokens: Set[int],
test_input: str,
lm: LM,
noise_rng: np.random.RandomState,
sigma: float,
field_name: str,
top_p,
no_public_token: bool,
subsample_per_token: bool,
gen_seed: int,
max_tokens: int,
normalize_max: bool = False,
) -> list[int]:
generated_token_ids: list[int] = []
stringified_test_datum = format_test_input_for_prompt(field_name, test_input)
public_prompt = public_train_prompt + stringified_test_datum
public_prompt_tokens = lm.encoding.encode(public_prompt)
assert num_private_train_splits > 0
train_subset = trainset.select(range(len(trainset)), keep_in_memory=True)
if not subsample_per_token:
private_train_subset = cast(
Iterable[dict[str, str]],
train_subset.shuffle(gen_seed, keep_in_memory=True).select(
range(num_private_train), keep_in_memory=True
),
)
private_train_splits = [
list(it)
for it in more_itertools.distribute(
num_private_train_splits, private_train_subset
)
]
private_train_prompts = [
instruction
+ "\n".join(
format_full_datum_for_prompt(field_name, datum) for datum in split
)
for split in private_train_splits
]
private_prompts = [
train_prompt + "\n" + stringified_test_datum
for train_prompt in private_train_prompts
]
private_prompts_tokens = [
lm.encoding.encode(prompt) for prompt in private_prompts
]
cnt = 0
for _ in tqdm.tqdm(range(max_tokens), total=float("inf"), unit=" tokens generated"):
private_next_logprobs: list[dict[int, float]]
public_next_logprobs: dict[int, float]
# Split training dataset
if subsample_per_token:
private_train_subset = cast(
Iterable[dict[str, str]],
train_subset.shuffle(gen_seed + cnt, keep_in_memory=True).select(
range(num_private_train), keep_in_memory=True
),
)
cnt += 1
private_train_splits = [
list(it)
for it in more_itertools.distribute(
num_private_train_splits, private_train_subset
)
]
# Turn the data into prompts
private_train_prompts = [
instruction
+ "\n".join(
format_full_datum_for_prompt(field_name, datum) for datum in split
)
for split in private_train_splits
]
private_prompts = [
train_prompt + "\n" + stringified_test_datum
for train_prompt in private_train_prompts
]
private_prompts_tokens = [
lm.encoding.encode(prompt) for prompt in private_prompts
]
if no_public_token:
private_next_logprobs = await asyncio.gather(
*(
next_logprobs(lm, prompt + generated_token_ids, top_p=top_p)
for prompt in private_prompts_tokens
)
)
merged_next_probs = merge_logprobs_topk_mean(
private_next_logprobs,
None,
lm.encoding.n_vocab,
no_public_token,
normalize_max,
)
if normalize_max:
# scale = 1/lambda
noise = noise_rng.exponential(scale=sigma, size=lm.encoding.n_vocab)
else:
noise = noise_rng.normal(0, sigma, size=lm.encoding.n_vocab)
merged_next_probs += noise
else:
public_next_logprobs = await next_logprobs(
lm, public_prompt_tokens + generated_token_ids, top_p=top_p
)
private_next_logprobs = await asyncio.gather(
*(
normalized_logprobs_for_chosen_tokens(
lm,
prompt + generated_token_ids,
public_next_logprobs.keys(),
top_p=top_p,
)
for prompt in private_prompts_tokens
)
)
merged_next_probs = merge_logprobs_topk_mean(
private_next_logprobs,
public_next_logprobs,
lm.encoding.n_vocab,
no_public_token,
normalize_max,
)
if normalize_max:
# scale = 1/lambda
noise = noise_rng.exponential(
scale=sigma, size=len(public_next_logprobs)
)
else:
noise = noise_rng.normal(0, sigma, size=len(public_next_logprobs))
merged_next_probs[list(public_next_logprobs.keys())] += noise
next_token_id = int(np.argmax(merged_next_probs))
if next_token_id in stop_tokens:
break
generated_token_ids.append(next_token_id)
del next_token_id
return generated_token_ids
async def generate_with_public_prompt(
public_train_prompt: str,
stop_tokens: Set[str],
test_input: str,
lm: LM,
field_name,
max_tokens: int = 500,
) -> list[int]:
public_prompt = public_train_prompt + format_test_input_for_prompt(
field_name, test_input
)
public_prompt_tokens = lm.encoding.encode(public_prompt)
public_prompt_tokens = public_prompt
[completion] = await lm.completions(
public_prompt_tokens,
CompletionsSettings(
temperature=0.0, max_tokens=max_tokens, n=1, stop=list(stop_tokens)
),
)
generated_tokens = [st.token.token_id for st in completion]
return generated_tokens
def _main(
sigma: Annotated[float, typer.Option()], # noise parameters
openai_model: Annotated[str, typer.Option()] = "babbage",
print_prompts: Annotated[bool, typer.Option()] = False,
# num_private_train=MN. MN=0 with num_valid=4 will get epsilon=0 (4-shot) results.
num_private_train: Annotated[int, typer.Option()] = DEFAULT_NUM_PRIVATE_TRAIN,
# by default set to 0. set_num_public_train >0 indicates additional public data available.
set_num_public_train: Annotated[int, typer.Option()] = DEFAULT_NUM_PUBLIC_TRAIN,
# num_valid=n. n samples to be generated for n-shot ICL
num_valid: Annotated[int, typer.Option()] = DEFAULT_NUM_VALID,
# num_private_train_splits=M
num_private_train_splits: Annotated[
int, typer.Option()
] = DEFAULT_NUM_PRIVATE_TRAIN_SPLITS,
num_test: Annotated[int, typer.Option()] = DEFAULT_NUM_TEST,
# no_public_token=True, RVP=False; no_public_token=False, RVP=True
no_public_token: Annotated[bool, typer.Option()] = False,
# subsample_per_token=True: at each token generation, subsample a new test set
subsample_per_token: Annotated[bool, typer.Option()] = False,
use_dp_prompts: Annotated[bool, typer.Option()] = False,
# normalize_max=True, Exponential mechanism; normalize_max=False, Gaussian mechanism
normalize_max: Annotated[bool, typer.Option()] = False,
# max_token_per_text=T_max
max_token_per_text: Annotated[int, typer.Option()] = 20,
# consistent with default parameters in the documentation https://learn.microsoft.com/en-us/azure/cognitive-services/openai/reference#completions
top_p: Annotated[float, typer.Option()] = 1,
# random seed for subsampling in generation
synth_seed: Annotated[int, typer.Option()] = 0,
# random seed for n-shot demonstrations sampling in evaluation
eval_seed: Annotated[int, typer.Option()] = 0,
# choice bewteen ["Genre", "Director"]
field_name: Annotated[str, typer.Option()] = "Genre",
data_path: Annotated[str, typer.Option()] = "./../../data/movie",
):
async def main():
if (num_private_train == 0) != (num_private_train_splits == 0):
raise ValueError(
"Either both or neither of --num-private-train and --num-private-train-splits can be 0"
)
assert field_name in [
"Director",
"Genre",
] # field_name from movie dataset include "Actor", "Award", "Character_Name", "Director", "Genre", "Opinion", "Origin", "Plot", "Quote", "Relationship", "Soundtrack", "Year"]
command = ["python", sys.argv[0]]
for x in sys.argv[1:]:
if x.startswith("--"):
assert '"' not in x and "'" not in x
command.append(x)
else:
assert "'" not in x
if re.match("^[a-zA-Z0-9_]+$", x):
command.append("%s" % x)
else:
command.append("'%s'" % x)
command = " ".join(command)
print(command)
if no_public_token:
num_public_train = 0
else:
num_public_train = set_num_public_train
lm = api_openai_com(openai_model)
noise_rng = np.random.RandomState()
data_files = {"train": "train.csv", "test": "test.csv"}
data = cast(
DatasetDict,
load_dataset(f"{data_path}/{field_name}", data_files=data_files),
)
trainset = data["train"].shuffle(seed=synth_seed, keep_in_memory=True)
print("trainset length", len(trainset))
if num_public_train > 0:
public_train_subset = cast(
Iterable[dict[str, str]],
trainset.select(
range(
len(trainset) - num_public_train,
len(trainset),
keep_in_memory=True,
)
),
)
else:
public_train_subset = []
trainset = trainset.select(
range(len(trainset) - num_public_train), keep_in_memory=True
)
query_subset = (
data["train"]
.shuffle(seed=eval_seed, keep_in_memory=True)
.select(range(num_valid), keep_in_memory=True)
)
if use_dp_prompts:
synthetic_examples = []
# Turn the data into prompts
instruction = f"Given a propety of {field_name} for the film, generate a description accordingly and make sure to include the given {field_name} in the description.\n\n"
print(instruction)
public_train_prompt = instruction + "\n".join(
format_full_datum_for_prompt(field_name, datum)
for datum in public_train_subset
)
if print_prompts:
print(public_train_prompt)
print("=========")
if normalize_max:
print("Exponential Mechanism")
assert num_private_train == 0 or sigma > 0
if num_private_train > 0:
# scale == sigma_calib == 1/lambda. lambda for exponential distribution.
sigma_calib = (2 / num_private_train_splits) * (1 / sigma)
else:
print("Gaussian Mechanism")
if num_private_train_splits > 0:
sigma_calib = math.sqrt(2) / num_private_train_splits * sigma
else:
sigma_calib = 0
print(
f"sigma in command {sigma}. sigma added according to sensitivity {sigma_calib}"
)
stop_tokens = {"\n", "<|endoftext|>", " END"}
stop_tokens_ids = {lm.encoding.encode_single_token(t) for t in stop_tokens}
client_session.set(aiohttp.ClientSession())
async with client_session.get():
for i, test_datum in enumerate(query_subset, 1):
print(f"# Example {i}")
print(f'{field_name}: "{test_datum["label"]}"')
np.random.seed(synth_seed + i)
gen_seed = np.random.randint(100000)
print(f"gen-seed: {gen_seed}")
if num_private_train_splits > 0:
generated_token_ids = await generate_with_private_prompts(
trainset,
num_private_train,
num_private_train_splits,
instruction,
public_train_prompt,
stop_tokens_ids,
test_datum["label"],
lm,
noise_rng,
sigma_calib,
field_name,
top_p,
no_public_token,
subsample_per_token,
gen_seed,
max_tokens=max_token_per_text
- 1, # need one token length for EOS.
normalize_max=normalize_max,
)
else:
generated_token_ids = await generate_with_public_prompt(
public_train_prompt,
stop_tokens,
test_datum["label"],
lm,
field_name,
max_tokens=max_token_per_text,
)
generated = lm.encoding.decode(generated_token_ids).rstrip('"')
print(f"Generated: {generated}\n")
output_datum = {}
output_datum["content"] = generated.strip()
output_datum["label"] = test_datum["label"]
synthetic_examples.append(output_datum)
if num_test > 0 and num_test <= len(data["test"]):
test_subset = (
data["test"]
.shuffle(seed=12345, keep_in_memory=True)
.select(range(num_test), keep_in_memory=True)
)
else:
test_subset = data["test"]
all_raw_answers_wout_DP = get_model_response(
query_subset, test_subset, openai_model, field_name
)
all_orig_ans = []
for resp in all_raw_answers_wout_DP:
all_orig_ans.append(resp["text"])
all_orig_ans = [ans.strip() for ans in all_orig_ans]
test_labels = test_subset["label"]
orig_accuracy = em_accuracy_helper(all_orig_ans, test_labels)
print(f"Accuracy (original) without DP: {orig_accuracy}")
if use_dp_prompts:
all_raw_answers_w_DP = get_model_response(
synthetic_examples, test_subset, openai_model, field_name
)
all_orig_ans = []
for resp in all_raw_answers_w_DP:
all_orig_ans.append(resp["text"])
all_orig_ans = [ans.strip() for ans in all_orig_ans]
test_labels = test_subset["label"]
orig_accuracy = em_accuracy_helper(all_orig_ans, test_labels)
print(f"Accuracy (original) with DP: {orig_accuracy}")
try:
asyncio.run(main())
except KeyboardInterrupt:
traceback.print_exc()
raise
if __name__ == "__main__":
typer.run(_main)
| [
"\n",
"PLACEHOLDER: PLACEHOLDER\n\n",
"PLACEHOLDERPLACEHOLDER",
"PLACEHOLDER:",
"Sentence: PLACEHOLDER\n",
"[]"
] |
2024-01-10 | microsoft/dp-few-shot-generation | src~dp_few_shot_generation~lm.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import os
from collections.abc import Sequence, Set
from lmapi.async_tools import limits
from lmapi.auth import OpenAiApiKey
from lmapi.lm import LM, CompletionsSettings
from lmapi.openai import OpenAI
from dp_few_shot_generation.prob_utils import log_normalize
def api_openai_com(model_name: str) -> OpenAI:
return OpenAI.create(
"https://api.openai.com/v1/completions",
# This list is taken from
# https://github.com/openai/tiktoken/blob/095924e02c85617df6889698d94515f91666c7ea/tiktoken/model.py#L13-L53
# and modified, currently to accommodate how text-davinci-003 can actually produce <|fim_...|> tokens.
{
# chat
"gpt-4": "cl100k_base",
"gpt-3.5-turbo": "cl100k_base",
# text
"text-davinci-003": "p50k_edit",
"text-davinci-002": "p50k_base",
"text-davinci-001": "r50k_base",
"text-curie-001": "r50k_base",
"text-babbage-001": "r50k_base",
"text-ada-001": "r50k_base",
"davinci": "r50k_base",
"curie": "r50k_base",
"babbage": "r50k_base",
"ada": "r50k_base",
# code
"code-davinci-002": "p50k_base",
"code-davinci-001": "p50k_base",
"code-cushman-002": "p50k_base",
"code-cushman-001": "p50k_base",
"davinci-codex": "p50k_base",
"cushman-codex": "p50k_base",
# edit
"text-davinci-edit-001": "p50k_edit",
"code-davinci-edit-001": "p50k_edit",
# embeddings
"text-embedding-ada-002": "cl100k_base",
# old embeddings
"text-similarity-davinci-001": "r50k_base",
"text-similarity-curie-001": "r50k_base",
"text-similarity-babbage-001": "r50k_base",
"text-similarity-ada-001": "r50k_base",
"text-search-davinci-doc-001": "r50k_base",
"text-search-curie-doc-001": "r50k_base",
"text-search-babbage-doc-001": "r50k_base",
"text-search-ada-doc-001": "r50k_base",
"code-search-babbage-code-001": "r50k_base",
"code-search-ada-code-001": "r50k_base",
# open source
"gpt2": "gpt2",
}[model_name],
OpenAiApiKey(os.environ["OPENAI_API_KEY"]),
limits.AdaptiveLimiter(),
{"model": model_name},
)
MAX_TOP_LOGPROBS = 100
MAX_LOGIT_BIAS = 100
MIN_LOGIT_BIAS = -100
async def next_logprobs(
self: LM, prompt: str | Sequence[int], top_p=1
) -> dict[int, float]:
# TODO: Don't hardcode "100" here
[sampled_tokens] = await self.completions(
prompt,
CompletionsSettings(n=1, max_tokens=1, logprobs=100, stop=["<test_for_stop>"]),
)
if len(sampled_tokens) == 0:
if isinstance(prompt, str):
prompt += "<|endoftext|>"
else:
prompt = [*prompt, self.encoding.encode_single_token("<|endoftext|>")]
[[*_prev_tokens, sampled_token]] = await self.completions(
prompt,
CompletionsSettings(
n=1, max_tokens=0, logprobs=100, echo=True, top_p=top_p
),
)
else:
[sampled_token] = sampled_tokens
return {tlp.token_id: tlp.logprob for tlp in sampled_token.top_choices}
async def normalized_logprobs_for_chosen_tokens(
self: LM, prompt: Sequence[int], chosen_tokens: Set[int], top_p: float
) -> dict[int, float]:
"""Compute the probability that the prompt will be continued with each of the chosen tokens.
The returned probability distribution is normalized over just the chosen tokens."""
assert (
len(chosen_tokens) <= MAX_TOP_LOGPROBS
), f"chosen_tokens must be <= {MAX_TOP_LOGPROBS} in length"
logit_bias = {token_id: MAX_LOGIT_BIAS for token_id in chosen_tokens}
[sampled_tokens] = await self.completions(
prompt,
CompletionsSettings(
n=1,
max_tokens=1,
logprobs=MAX_TOP_LOGPROBS,
logit_bias=logit_bias,
top_p=top_p,
),
)
if len(sampled_tokens) == 0:
# Fall back to querying over the set
chosen_tokens_list = list(chosen_tokens)
result = await self.completions(
[[*prompt, token_id] for token_id in chosen_tokens_list],
CompletionsSettings(n=1, max_tokens=0, logprobs=0, echo=True),
)
unnormalized_logprobs = {
sampled_token.token.token_id: sampled_token.token.logprob
for [*_prev_tokens, sampled_token] in result
}
return log_normalize(unnormalized_logprobs)
else:
[sampled_token] = sampled_tokens
biased_logprobs = {
tlp.token_id: tlp.logprob for tlp in sampled_token.top_choices
}
biased_logprobs_for_tokens = {
token_id: biased_logprobs.get(token_id, float("-inf"))
for token_id in chosen_tokens
}
return log_normalize(biased_logprobs_for_tokens)
| [
"<|endoftext|>"
] |
2024-01-10 | microsoft/dp-few-shot-generation | src~dp_few_shot_generation~run_exp_dbpedia.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import asyncio
import math
import re
import sys
import time
import traceback
from collections.abc import Iterable, Set
from typing import Annotated, cast
import aiohttp
import more_itertools
import numpy as np
import openai
import scipy.special
import tqdm
import typer
from datasets import DatasetDict, load_dataset
from lmapi.lm import LM, CompletionsSettings
from lmapi.openai import client_session
from dp_few_shot_generation.lm import (
api_openai_com,
next_logprobs,
normalized_logprobs_for_chosen_tokens,
)
from dp_few_shot_generation.prob_utils import densify, log_max_normalize, log_normalize
DEFAULT_NUM_PRIVATE_TRAIN = 80
DEFAULT_NUM_PUBLIC_TRAIN = 0
DEFAULT_NUM_VALID = 4
DEFAULT_NUM_PRIVATE_TRAIN_SPLITS = 40
DEFAULT_NUM_TEST = 1000
labels = [
"Company",
"School",
"Artist",
"Ath",
"Polit",
"Transportation",
"Building",
"Nature",
"Village",
"Animal",
"Plant",
"Album",
"Film",
"Book",
]
label_dict = {
0: ["Company"],
1: ["School"],
2: ["Artist"],
3: ["Ath"],
4: ["Polit"],
5: ["Transportation"],
6: ["Building"],
7: ["Nature"],
8: ["Village"],
9: ["Animal"],
10: ["Plant"],
11: ["Album"],
12: ["Film"],
13: ["Book"],
}
def format_full_datum_for_prompt(labels, datum: dict[str, str]):
return f'Document Type: "{labels[datum["label"]]}"\nText: "{datum["content"] + " END"}"\n'
def format_test_input_for_prompt(labels, test_input: int):
return f'Document Type: "{labels[test_input]}"\nText: "'
def construct_prompt_same(train_examples, test_example):
labels_str = ", ".join(labels)
prompt = (
f"Classify the documents based on whether they are about a {labels_str}.\n\n"
)
for train_example in train_examples:
prompt += "Article: " + train_example["content"] + "\n"
prompt += "Answer: " + label_dict[train_example["label"]][0] + "\n\n"
prompt += "Article: " + test_example["content"] + "\n"
prompt += "Answer:"
return prompt
def complete(prompt, l, model_name, temp=0, num_log_probs=None, echo=False, n=None):
# call GPT-3 API until result is provided and then return it
response = None
received = False
while not received:
try:
response = openai.Completion.create(
engine=model_name,
prompt=prompt,
max_tokens=l,
temperature=temp,
logprobs=num_log_probs,
echo=echo,
stop="\n",
n=n,
)
received = True
except:
error = sys.exc_info()[0]
if (
error == openai.error.InvalidRequestError
): # something is wrong: e.g. prompt too long
print(f"InvalidRequestError\nPrompt passed in:\n\n{prompt}\n\n")
assert False
print("API error:", error)
time.sleep(1)
return response
def chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i : i + n]
def get_model_response(data, test_examples, openai_model):
all_raw_answers = []
prompts = []
train_examples = data
for test_example in test_examples:
prompts.append(construct_prompt_same(train_examples, test_example))
chunked_prompts = list(chunks(prompts, 20))
for test_chunk in chunked_prompts:
response = complete(test_chunk, l=1, model_name=openai_model, num_log_probs=100)
for answer_id, answer in enumerate(response["choices"]):
all_raw_answers.append(answer)
return all_raw_answers
def get_label_probs(all_raw_answers, test_subset):
"""Obtain model's label probability for each of the test examples. The returned prob is NOT normalized"""
num_classes = len(label_dict)
approx = False
assert len(all_raw_answers) == len(test_subset)
# Fill in the labels that is in the top k prob
all_label_probs = []
all_missing_positions = []
cnt = 0
for i, ans in enumerate(all_raw_answers):
try:
top_logprobs = ans["logprobs"]["top_logprobs"][
0
] # [0] since we only ask for complete one more token
except:
cnt += 1 # cnt for corner case
label_probs = [0] * len(label_dict.keys())
for j, label_list in label_dict.items():
all_found = True
for label in label_list: # each possible label correspond to the same class
label = " " + label # notice prompt does not have space after 'A:'
if label in top_logprobs:
label_probs[j] += np.exp(top_logprobs[label])
else:
all_found = False
if not all_found:
position = (i, j) # (which test example, which label)
all_missing_positions.append(position)
all_label_probs.append(label_probs)
all_label_probs = np.array(all_label_probs) # prob not normalized
return all_label_probs # NOT NORMALIZED
def eval_accuracy(all_label_probs, test_labels, mode=None, p_cf=None):
# evaluate the accuracy with and without contextual calibration
num_classes = all_label_probs.shape[1]
if p_cf is None:
# do not calibrate
W = np.identity(num_classes)
b = np.zeros([num_classes, 1])
else:
# calibrate
if mode == "diagonal_W":
W = np.linalg.inv(np.identity(num_classes) * p_cf)
b = np.zeros([num_classes, 1])
elif mode == "identity_W":
W = np.identity(num_classes)
b = -1 * np.expand_dims(p_cf, axis=-1)
else:
assert False
correctness_list = []
assert len(all_label_probs) == len(test_labels)
for label_probs, true_label in zip(all_label_probs, test_labels):
if np.sum(label_probs) > 0: # corner case np.sum(label_probs)=0.
label_probs = label_probs / np.sum(label_probs) # normalize to 1
calibrate_label_probs = np.matmul(W, np.expand_dims(label_probs, axis=-1)) + b
ans_label = np.argmax(calibrate_label_probs)
if ans_label == true_label:
correctness_list.append(1)
else:
correctness_list.append(0)
return np.mean(correctness_list)
def get_p_content_free(train_subset, openai_model, content_free_inputs=("N/A",)):
"""Query model with content free input, return its prediction probability for each label"""
all_p_y = []
for content_free_input in content_free_inputs:
prompt = construct_prompt_same(train_subset, content_free_input)
p_y = [0] * len(label_dict)
for i, answers in label_dict.items():
prob = 0
for a in answers:
prob += np.exp(
complete(
prompt + " " + a, 0, openai_model, echo=True, num_log_probs=1
)["choices"][0]["logprobs"]["token_logprobs"][-1]
)
p_y[i] = prob
all_p_y.append(p_y)
p_y = np.mean(np.array(all_p_y), axis=0)
p_y = p_y / np.sum(p_y) # normalize
return p_y
def merge_logprobs_topk_mean(
private_next_logprobs: list[dict[int, float]],
public_next_logprobs: dict[int, float],
n_vocab: int,
no_public_token: bool,
normalize_max: bool,
) -> np.ndarray:
# Compute merged distribution
# logsumexp - np.log(...): compute mean probability of distribution
if normalize_max:
normalize_func = (
log_max_normalize # normalize max probability to 1, Exponential mechanism
)
else:
normalize_func = (
log_normalize # normalize sum probability to 1, Gaussian mechanism
)
if no_public_token:
merged_next_logprobs = scipy.special.logsumexp(
np.stack(
[
# Turn into a 1D tensor of size n_vocab
densify(
n_vocab,
# Normalize distribution
normalize_func(
# Filter to the top 100 most likely next tokens according to the public prompt
{k: v for k, v in lps.items()}
),
)
for lps in private_next_logprobs
]
),
axis=0,
) - np.log(len(private_next_logprobs))
else:
merged_next_logprobs = scipy.special.logsumexp(
np.stack(
[
# Turn into a 1D tensor of size n_vocab
densify(
n_vocab,
# Normalize distribution
normalize_func(
# Filter to the top 100 most likely next tokens according to the public prompt
{k: v for k, v in lps.items() if k in public_next_logprobs}
),
)
for lps in private_next_logprobs
]
),
axis=0,
) - np.log(len(private_next_logprobs))
merged_next_probs = np.exp(merged_next_logprobs)
return merged_next_probs
async def generate_with_private_prompts(
trainset,
num_private_train,
num_private_train_splits,
instruction,
public_train_prompt: str,
stop_tokens: Set[int],
test_input: int,
lm: LM,
noise_rng: np.random.RandomState,
sigma: float,
labels,
top_p,
no_public_token: bool,
subsample_per_token: bool,
sample_same_label_prompts: bool,
gen_seed: int,
max_tokens: int,
normalize_max: bool = False,
) -> list[int]:
generated_token_ids: list[int] = []
stringified_test_datum = format_test_input_for_prompt(labels, test_input)
public_prompt = public_train_prompt + stringified_test_datum
public_prompt_tokens = lm.encoding.encode(public_prompt)
assert num_private_train_splits > 0
if sample_same_label_prompts:
select_list = []
for i in range(len(trainset)):
if trainset[i]["label"] == test_input:
select_list.append(i)
train_subset = trainset.select(select_list, keep_in_memory=True)
else:
train_subset = trainset.select(range(len(trainset)), keep_in_memory=True)
if not subsample_per_token:
private_train_subset = cast(
Iterable[dict[str, str]],
train_subset.shuffle(gen_seed, keep_in_memory=True).select(
range(num_private_train), keep_in_memory=True
),
)
private_train_splits = [
list(it)
for it in more_itertools.distribute(
num_private_train_splits, private_train_subset
)
]
private_train_prompts = [
instruction
+ "\n".join(format_full_datum_for_prompt(labels, datum) for datum in split)
for split in private_train_splits
]
private_prompts = [
train_prompt + "\n" + stringified_test_datum
for train_prompt in private_train_prompts
]
private_prompts_tokens = [
lm.encoding.encode(prompt) for prompt in private_prompts
]
cnt = 0
for _ in tqdm.tqdm(range(max_tokens), total=float("inf"), unit=" tokens generated"):
private_next_logprobs: list[dict[int, float]]
public_next_logprobs: dict[int, float]
# Split training dataset
if subsample_per_token:
private_train_subset = cast(
Iterable[dict[str, str]],
train_subset.shuffle(gen_seed + cnt, keep_in_memory=True).select(
range(num_private_train), keep_in_memory=True
),
)
cnt += 1
private_train_splits = [
list(it)
for it in more_itertools.distribute(
num_private_train_splits, private_train_subset
)
]
# Turn the data into prompts
private_train_prompts = [
instruction
+ "\n".join(
format_full_datum_for_prompt(labels, datum) for datum in split
)
for split in private_train_splits
]
private_prompts = [
train_prompt + "\n" + stringified_test_datum
for train_prompt in private_train_prompts
]
private_prompts_tokens = [
lm.encoding.encode(prompt) for prompt in private_prompts
]
if no_public_token:
private_next_logprobs = await asyncio.gather(
*(
next_logprobs(lm, prompt + generated_token_ids, top_p=top_p)
for prompt in private_prompts_tokens
)
)
merged_next_probs = merge_logprobs_topk_mean(
private_next_logprobs,
None,
lm.encoding.n_vocab,
no_public_token,
normalize_max,
)
if normalize_max:
# scale = 1/lambda
noise = noise_rng.exponential(scale=sigma, size=lm.encoding.n_vocab)
else:
noise = noise_rng.normal(0, sigma, size=lm.encoding.n_vocab)
merged_next_probs += noise
else:
public_next_logprobs = await next_logprobs(
lm, public_prompt_tokens + generated_token_ids, top_p=top_p
)
private_next_logprobs = await asyncio.gather(
*(
normalized_logprobs_for_chosen_tokens(
lm,
prompt + generated_token_ids,
public_next_logprobs.keys(),
top_p=top_p,
)
for prompt in private_prompts_tokens
)
)
merged_next_probs = merge_logprobs_topk_mean(
private_next_logprobs,
public_next_logprobs,
lm.encoding.n_vocab,
no_public_token,
normalize_max,
)
if normalize_max:
# scale = 1/lambda
noise = noise_rng.exponential(
scale=sigma, size=len(public_next_logprobs)
)
else:
noise = noise_rng.normal(0, sigma, size=len(public_next_logprobs))
merged_next_probs[list(public_next_logprobs.keys())] += noise
next_token_id = int(np.argmax(merged_next_probs))
if next_token_id in stop_tokens:
break
generated_token_ids.append(next_token_id)
del next_token_id
return generated_token_ids
async def generate_with_public_prompt(
public_train_prompt: str,
stop_tokens: Set[str],
test_input: str,
lm: LM,
labels,
max_tokens: int = 500,
) -> list[int]:
public_prompt = public_train_prompt + format_test_input_for_prompt(
labels, test_input
)
public_prompt_tokens = lm.encoding.encode(public_prompt)
public_prompt_tokens = public_prompt
[completion] = await lm.completions(
public_prompt_tokens,
CompletionsSettings(
temperature=0.0, max_tokens=max_tokens, n=1, stop=list(stop_tokens)
),
)
generated_tokens = [st.token.token_id for st in completion]
return generated_tokens
def select_uniform_n_shots_over_labels(data, n_shots):
select_list = []
n_shots_per_label = math.ceil(n_shots / len(labels))
labels_counter = {label[1][0]: n_shots_per_label for label in label_dict.items()}
n_shots_selected = 0
for i in range(len(data)):
label = label_dict[data[i]["label"]][0]
if labels_counter[label] == 0:
continue
else:
labels_counter[label] -= 1
select_list.append(i)
n_shots_selected += 1
if n_shots_selected == n_shots:
break
query_subset = data.select(select_list, keep_in_memory=True)
return query_subset
def _main(
sigma: Annotated[float, typer.Option()], # noise parameters
openai_model: Annotated[str, typer.Option()] = "babbage",
print_prompts: Annotated[bool, typer.Option()] = False,
# num_private_train=MN. MN=0 with num_valid=4 will get epsilon=0 (4-shot) results.
num_private_train: Annotated[int, typer.Option()] = DEFAULT_NUM_PRIVATE_TRAIN,
# by default set to 0. set_num_public_train >0 indicates additional public data available.
set_num_public_train: Annotated[int, typer.Option()] = DEFAULT_NUM_PUBLIC_TRAIN,
# num_valid=n. n samples to be generated for n-shot ICL
num_valid: Annotated[int, typer.Option()] = DEFAULT_NUM_VALID,
# num_private_train_splits=M
num_private_train_splits: Annotated[
int, typer.Option()
] = DEFAULT_NUM_PRIVATE_TRAIN_SPLITS,
num_test: Annotated[int, typer.Option()] = DEFAULT_NUM_TEST,
# no_public_token=True, RVP=False; no_public_token=False, RVP=True
no_public_token: Annotated[bool, typer.Option()] = False,
# subsample_per_token=True: at each token generation, subsample a new test set
subsample_per_token: Annotated[bool, typer.Option()] = False,
use_dp_prompts: Annotated[bool, typer.Option()] = False,
# sample_same_label_prompts=True: sample subsets from the sets with same targeted labels.
sample_same_label_prompts: Annotated[bool, typer.Option()] = False,
# normalize_max=True, Exponential mechanism; normalize_max=False, Gaussian mechanism
normalize_max: Annotated[bool, typer.Option()] = False,
# max_token_per_text=T_max
max_token_per_text: Annotated[int, typer.Option()] = 100,
# consistent with default parameters in the documentation https://learn.microsoft.com/en-us/azure/cognitive-services/openai/reference#completions
top_p: Annotated[float, typer.Option()] = 1,
# random seed for subsampling in generation
synth_seed: Annotated[int, typer.Option()] = 0,
# random seed for n-shot demonstrations sampling in evaluation
eval_seed: Annotated[int, typer.Option()] = 0,
):
async def main():
if (num_private_train == 0) != (num_private_train_splits == 0):
raise ValueError(
"Either both or neither of --num-private-train and --num-private-train-splits can be 0"
)
command = ["python", sys.argv[0]]
for x in sys.argv[1:]:
if x.startswith("--"):
assert '"' not in x and "'" not in x
command.append(x)
else:
assert "'" not in x
if re.match("^[a-zA-Z0-9_]+$", x):
command.append("%s" % x)
else:
command.append("'%s'" % x)
command = " ".join(command)
print(command)
if no_public_token:
num_public_train = 0
else:
num_public_train = set_num_public_train
lm = api_openai_com(openai_model)
noise_rng = np.random.RandomState()
data = cast(DatasetDict, load_dataset("dbpedia_14"))
print(labels)
trainset = data["train"].shuffle(seed=synth_seed, keep_in_memory=True)
print("trainset length", len(trainset))
if num_public_train > 0:
public_train_subset = cast(
Iterable[dict[str, str]],
trainset.select(
range(
len(trainset) - num_public_train,
len(trainset),
keep_in_memory=True,
)
),
)
else:
public_train_subset = []
trainset = trainset.select(
range(len(trainset) - num_public_train), keep_in_memory=True
)
queryset = data["train"].shuffle(seed=eval_seed, keep_in_memory=True)
query_subset = select_uniform_n_shots_over_labels(queryset, num_valid)
if use_dp_prompts:
synthetic_examples = []
# Turn the data into prompts
instruction = "Given a label of document type, generate the chosen type of document accordingly.\n\n"
public_train_prompt = instruction + "\n".join(
format_full_datum_for_prompt(labels, datum)
for datum in public_train_subset
)
if print_prompts:
print(public_train_prompt)
print("=========")
if normalize_max:
print("Exponential Mechanism")
assert num_private_train == 0 or sigma > 0
if num_private_train > 0:
# scale == sigma_calib == 1/lambda. lambda for exponential distribution.
sigma_calib = (2 / num_private_train_splits) * (1 / sigma)
else:
print("Gaussian Mechanism")
if num_private_train_splits > 0:
sigma_calib = math.sqrt(2) / num_private_train_splits * sigma
else:
sigma_calib = 0
print(
f"sigma in command {sigma}. sigma added according to sensitivity {sigma_calib}"
)
stop_tokens = {"\n", "<|endoftext|>", " END"}
stop_tokens_ids = {lm.encoding.encode_single_token(t) for t in stop_tokens}
client_session.set(aiohttp.ClientSession())
async with client_session.get():
for i, test_datum in enumerate(query_subset, 1):
print(f"# Example {i}")
print(f'Document Type: "{labels[test_datum["label"]]}"')
print(f'References:\n "{test_datum["content"]}"')
np.random.seed(synth_seed + i)
gen_seed = np.random.randint(100000)
print(f"gen-seed: {gen_seed}")
if num_private_train_splits > 0:
generated_token_ids = await generate_with_private_prompts(
trainset,
num_private_train,
num_private_train_splits,
instruction,
public_train_prompt,
stop_tokens_ids,
test_datum["label"],
lm,
noise_rng,
sigma_calib,
labels,
top_p,
no_public_token,
subsample_per_token,
sample_same_label_prompts,
gen_seed,
max_tokens=max_token_per_text
- 1, # need one token length for EOS.
normalize_max=normalize_max,
)
else:
generated_token_ids = await generate_with_public_prompt(
public_train_prompt,
stop_tokens,
test_datum["label"],
lm,
labels,
max_tokens=max_token_per_text,
)
generated = lm.encoding.decode(generated_token_ids).rstrip('"')
print(f"Generated: {generated}\n")
output_datum = {}
output_datum["content"] = generated.strip()
output_datum["label"] = test_datum["label"]
synthetic_examples.append(output_datum)
if num_test > 0:
test_subset = (
data["test"]
.shuffle(seed=12345, keep_in_memory=True)
.select(range(num_test), keep_in_memory=True)
)
test_labels = [test_example["label"] for test_example in test_subset]
content_free_inputs = [
{"content": "N/A"},
{"content": ""},
{"content": "[MASK]"},
]
p_cf_wout_DP = get_p_content_free(
query_subset, openai_model, content_free_inputs=content_free_inputs
)
all_raw_answers_wout_DP = get_model_response(
query_subset, test_subset, openai_model
)
all_label_probs_wout_DP = get_label_probs(
all_raw_answers_wout_DP, test_subset
)
acc_original_wout_DP = eval_accuracy(all_label_probs_wout_DP, test_labels)
acc_calibrated_wout_DP = eval_accuracy(
all_label_probs_wout_DP,
test_labels,
mode="diagonal_W",
p_cf=p_cf_wout_DP,
)
print(f"Accuracy (original) without DP: {acc_original_wout_DP}")
print(f"Accuracy (calibrated) without DP: {acc_calibrated_wout_DP}")
if use_dp_prompts:
p_cf_w_DP = get_p_content_free(
synthetic_examples,
openai_model,
content_free_inputs=content_free_inputs,
)
all_raw_answers_w_DP = get_model_response(
synthetic_examples, test_subset, openai_model
)
all_label_probs_w_DP = get_label_probs(
all_raw_answers_w_DP, test_subset
)
acc_original_w_DP = eval_accuracy(all_label_probs_w_DP, test_labels)
acc_calibrated_w_DP = eval_accuracy(
all_label_probs_w_DP, test_labels, mode="diagonal_W", p_cf=p_cf_w_DP
)
print(f"Accuracy (original) with DP: {acc_original_w_DP}")
print(f"Accuracy (calibrated) with DP: {acc_calibrated_w_DP}")
try:
asyncio.run(main())
except KeyboardInterrupt:
traceback.print_exc()
raise
if __name__ == "__main__":
typer.run(_main)
| [
"\n",
"PLACEHOLDERPLACEHOLDER",
"Article: PLACEHOLDER\n",
"[MASK]",
"N/A",
"Answer:",
"Answer: P\n\n",
"[]",
"Classify the documents based on whether they are about a PLACEHOLDER.\n\n"
] |
2024-01-10 | JCSnap/langchain-stuff | get_text.py | from langchain.document_loaders import WebBaseLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
url = "https://en.wikipedia.org/wiki/Hwa_Chong_Institution"
loader = WebBaseLoader(url).load()
count = 0
output_file = open("output.txt", "w")
output_file.write(loader[0].page_content.replace("\n", " "))
with open("output.txt", "r") as file:
hwa_chong_info = file.read()
text_splitter = RecursiveCharacterTextSplitter(
chunk_size = 20000,
chunk_overlap = 20,
length_function = len,
add_start_index = True,
) | [] |
2024-01-10 | charliemeyer2000/logging-repository | code~GitHubCommitHandler.py | import requests
import datetime
import openai
import base64
import json
import os
def lambda_handler(event, context):
if 'body' in event:
event = json.loads(event['body'])
ghCommitHandler = GitHubCommitHandler(event)
ghCommitHandler.handle_commit()
return {
'statusCode': 200,
'body': json.dumps('Commit summary added to README.md file in logging-repository')
}
class GitHubCommitHandler:
def __init__(self, event):
self.event = event
self.payload = isinstance(event, str) and json.loads(event) or event
self.commit_message = self.payload['head_commit']['message']
self.commit_sha = self.payload['head_commit']['id']
self.repository_committed_to = self.payload['repository']['name']
self.files_modified = self.payload['head_commit']['modified']
self.files_added = self.payload['head_commit']['added']
self.files_removed = self.payload['head_commit']['removed']
self.committer = self.payload['head_commit']['committer']['username']
self.owner = "charliemeyer2000"
self.GITHUB_TOKEN = os.environ.get("GITHUB_TOKEN")
openai.api_key = os.environ.get("OPENAI_API_KEY")
def get_commit_data(self):
url = f"https://api.github.com/repos/{self.owner}/{self.repository_committed_to}/commits/{self.commit_sha}"
headers = {
"Authorization": f"token {self.GITHUB_TOKEN}"
}
response = requests.get(url=url, headers=headers)
data = response.json()
return data
# def get_num_tokens(self, string: str) -> int:
# """Returns the number of tokens in a text string for text-davinci-003"""
# encoding = tiktoken.encoding_for_model("text-davinci-003")
# num_tokens = len(encoding.encode(string))
# return num_tokens
def generate_summary(self, code_added, code_removed, code_modified) -> str:
"""
Takes in a string of code and returns a summary of the code
:param commit_code: string of code
:return: string of summary
"""
# Generate summary using OpenAI
prompt = f"""
You are an AI tasked with generating a one-sentence summary of a git commit based on the files changed and the commit message.
You are in a code pipeline, and any text you output will be taken
directly and put into a markdown file downstream. You will be a reliable and trusted part of the pipeline.
You should not be using any special characters that are not just characters or numbers, as this will break
the markdown file within this pipeline. You must perform this summary. Furthermore, you must add some humor
to the summary, as this is a fun project, either by adding a joke or a funny comment.
The files added are: {code_added}
The files removed are: {code_removed}
The files modified are: {code_modified}
The commit message is: {self.commit_message}
"""
# if the prompt is over 4096 tokens,
# truncate the prompt to 4096 tokens
prompt_len = len(prompt)
if (prompt_len > 4096 - 400):
prompt = prompt[:4096 - 400]
prompt += "..."
response = openai.Completion.create(
model="text-davinci-003",
prompt=prompt,
temperature=0.8,
max_tokens=4096-prompt_len,
)
if(len(prompt) > 2000):
return response.choices[0].text.strip() + "... Because more than 2000 characters were provided, the summary was truncated. Therefore, the summary may not entirely be accurate to all of the code & changes made in the commit."
else:
return response.choices[0].text.strip()
def create_commit(self, commit_summary: str, commit_message: str, repository_committed_to: str, files_modified: any, committer: str) -> None:
"""
Takes in a summary of the commit and adds it to the logs.md file in the logging-repository in my github account.
:param commit_summary: summary of the commit
:return: None
"""
# Get the current logs.md file from the logging-repository
url = "https://api.github.com/repos/charliemeyer2000/logging-repository/contents/logs.md"
headers = {
'Authorization': f'Bearer {self.GITHUB_TOKEN}',
'X-GitHub-Api-Version': '2022-11-28'
}
# Get the current logs.md file
response = requests.get(url=url, headers=headers)
current_content = response.json()['content']
current_content = base64.b64decode(current_content).decode('ascii', 'ignore')
# Gets the current date and time
current_date = datetime.datetime.now().strftime("%m/%d/%Y, %H:%M:%S")
# Create a new addition to the logs.md file
title = f"### {committer}: '{commit_message}' @ {current_date} to {repository_committed_to}\n"
summary= ''
# Add the GPT summary to the summary, make the summary a block quote of code block
summary += f'\nGPT Summary: \n > {commit_summary}' + '\n'
# Add a h3 header to the TOP logs.md file with the commit message and the commit summary
new_content = title + summary + '\n\n' + current_content
# Encode the new content to base64 and avoiding emojis
new_content = new_content.encode('ascii', 'ignore')
new_content = base64.b64encode(new_content)
# Update the logs.md file to be the old content + the new content
data = {
"message": f"Update logs.md for commit {commit_message}",
"content": new_content.decode('ascii'),
"sha": response.json()['sha']
}
print(f'commit summary: {commit_summary}')
# Update the logs.md file
# response = requests.put(url=url, headers=headers, data=json.dumps(data))
def handle_commit(self):
# data = self.get_commit_data()
# lines_added, lines_removed = self.get_lines_of_code(data)
commit_summary = self.generate_summary(code_added=self.files_added, code_removed=self.files_removed, code_modified=self.files_modified)
self.create_commit(commit_summary=commit_summary, commit_message=self.commit_message, repository_committed_to=self.repository_committed_to, files_modified=self.files_modified, committer=self.committer)
return 'Commit summary added to README.md file in logging-repository' | [
"...",
"1"
] |
2024-01-10 | lperezmo/sms-helper | alternative_helper.py | import os
import json
import logging
import requests
from openai import OpenAI
import azure.functions as func
from twilio.rest import Client
#------------------------------------#
# Load environment variables
#------------------------------------#
ACCOUNT_SID = os.environ["ACCOUNT_SID"]
AUTH_TOKEN = os.environ["AUTH_TOKEN"]
#------------------------------------#
# OpenAI and Twilio Clients
#------------------------------------#
ai_client = OpenAI()
CLIENT = Client(ACCOUNT_SID, AUTH_TOKEN)
#------------------------------------#
# Security check
#------------------------------------#
def check_pin_and_reply(PIN, incoming_message):
"""
Generate a reply based on the incoming message.
Parameters
----------
PIN : str
Security PIN
incoming_message : str
Incoming message from Twilio
Returns
-------
message : str
Reply message
"""
if incoming_message.strip() == PIN:
return """Welcome back lord overlord Luis.
- I can schedule calls, texts, and reminders for you.
- I can also just answer questions or anything like that.
- Text 'yolo' to see this message again"""
else:
messages = CLIENT.messages.list(from_=send_to, to=send_from)
sent_pin = False
for message in messages:
if message.body.strip() == PIN:
sent_pin = True
if sent_pin:
follow_up_reply = get_follow_up_text(incoming_message)
return follow_up_reply
else:
return "Please provide security PIN to continue"
#------------------------------------#
# Current time
#------------------------------------#
def get_time():
"""
Robustly get the current time from an API.
Parameters
----------
None
Returns
-------
str
Current time
"""
max_retries = 3
attempts = 0
while attempts < max_retries:
try:
response = requests.get('http://worldtimeapi.org/api/timezone/America/Los_Angeles')
response.raise_for_status() # This will raise an exception for HTTP error codes
res = response.json()
datetime = res.get('datetime')
abbreviation = res.get('abbreviation')
day_of_week = res.get('day_of_week')
if datetime and abbreviation and day_of_week is not None:
return f"{datetime} {abbreviation} day of the week {day_of_week}"
else:
raise ValueError("Incomplete time data received")
except (requests.RequestException, ValueError) as e:
attempts += 1
if attempts == max_retries:
return "Failed to get time after several attempts."
#-----------------------------------------#
# Generate JSON body to schedule reminder
#-----------------------------------------#
def schedule_reminder(natural_language_request):
"""
Generate JSON body to schedule reminder
Parameters
----------
natural_language_request : str
Natural language request from user
Returns
-------
JSON body to schedule reminder
"""
sys_prompt = """Your job is to create the JSON body for an API call to schedule texts and calls. Then , you will schedule the text or call for the user will request based on pacific time (given in pacific time). If user asks for a reminder today at 6 pm that is 18:00 (24 hour notation).
If the user requests to be called or messaged on their work phone, set to_phone variable to '+12221110000' else send it to default phone '+15554443333'. Use twilio = True by default.
Example endpoint: http://YOUR-ENDPOINT.elasticbeanstalk.com/schedule_single_reminder
Example call:
{
"time": "18:20",
"day": "2023-11-27",
"message_body": "This is the reminder body!",
"call": "True",
"twilio": "True",
"to_number": "+15554443333"
}
Example message:
{
"time":"23:46",
"day":"2023-11-27",
"message_body":"text reminder to check email",
"to_number":"+15554443333",
"twilio":"True",
"call":"False"
}
"""
curr_time = get_time()
ai_client = OpenAI()
completion = ai_client.chat.completions.create(
model="gpt-3.5-turbo-1106",
messages=[
{"role": "system", "content": f"{sys_prompt}"},
{"role": "user", "content": f"{natural_language_request}. <Current Time>: {curr_time}"},
],
response_format={ "type": "json_object" },
)
return json.loads(completion.choices[0].message.content)
#------------------------------------#
# Follow up text
#------------------------------------#
def get_follow_up_text(send_to, send_from, incoming_message):
"""Send follow up text
Parameters
----------
send_to : str
Phone number to send text to
send_from : str
Phone number to send text from
incoming_message : str
Incoming message from Twilio
Returns
-------
message : str
Response from the AI to the user
"""
if incoming_message == 'yolo':
return """Welcome back lord overlord Luis.
- I can schedule calls, texts, and reminders for you.
- I can also just answer questions or anything like that.
- Text 'yolo' to see this message again"""
else:
tools = [
{
"type": "function",
"function": {
"name": "schedule_reminder",
"description": "Schedule a reminder using natural language",
"parameters": {
"type": "object",
"properties": {
"natural_language_request": {
"type": "string",
"description": "Requested reminder in natural language. Example: 'Remind me to call mom tomorrow at 6pm' or 'Send me a message with a Matrix quote on wednesday at 8am'",
}
},
"required": ["natural_language_request"],
},
}
}
]
#----------------------------------------------------#
# AI w/tools - reply or use tools to schedule reminder
#----------------------------------------------------#
completion = ai_client.chat.completions.create(
model="gpt-3.5-turbo-1106",
messages=[
{"role": "system", "content": f"You are an AI assistant that can schedule reminders (like calls and texts) if asked to do so. Be informative, funny, and helpful, and keep your messages clear and short. To schedule reminder just pass a natural language request to the function 'schedule_reminder'"},
{"role": "user", "content": f"{incoming_message}"}
],
tools=tools,
tool_choice="auto"
)
message = completion.choices[0].message.content
if message==None:
message = "Just a minute while I schedule your reminder."
else:
return message
#----------------------------------------------------#
# If tools are called, call the tools function
#----------------------------------------------------#
if completion.choices[0].message.tool_calls:
if completion.choices[0].message.tool_calls[0].function.name == 'schedule_reminder':
args = completion.choices[0].message.tool_calls[0].function.arguments
args_dict = json.loads(args)
try:
#--------------------------------#
# Schedule reminder
#--------------------------------#
json_body = schedule_reminder(**args_dict)
url_endpoint = "http://YOUR-ENDPOINT.elasticbeanstalk.com/schedule_single_reminder"
headers = {'Content-Type': 'application/json'}
response = requests.post(url_endpoint, headers=headers, data=json.dumps(json_body))
if response.status_code == 200:
return "Your reminder has been scheduled."
except Exception as e:
logging.error(f"Error: {e}")
return "Error scheduling reminder."
| [
"You are an AI assistant that can schedule reminders (like calls and texts) if asked to do so. Be informative, funny, and helpful, and keep your messages clear and short. To schedule reminder just pass a natural language request to the function 'schedule_reminder'",
"PLACEHOLDER",
"PLACEHOLDER. <Current Time>: PLACEHOLDER",
"Your job is to create the JSON body for an API call to schedule texts and calls. Then , you will schedule the text or call for the user will request based on pacific time (given in pacific time). If user asks for a reminder today at 6 pm that is 18:00 (24 hour notation).\n\n If the user requests to be called or messaged on their work phone, set to_phone variable to '+12221110000' else send it to default phone '+15554443333'. Use twilio = True by default.\n\n Example endpoint: http://YOUR-ENDPOINT.elasticbeanstalk.com/schedule_single_reminder\n Example call:\n {\n \"time\": \"18:20\",\n \"day\": \"2023-11-27\",\n \"message_body\": \"This is the reminder body!\",\n \"call\": \"True\",\n \"twilio\": \"True\",\n \"to_number\": \"+15554443333\"\n }\n\n Example message:\n {\n \"time\":\"23:46\",\n \"day\":\"2023-11-27\",\n \"message_body\":\"text reminder to check email\",\n \"to_number\":\"+15554443333\",\n \"twilio\":\"True\",\n \"call\":\"False\"\n }\n "
] |
2024-01-10 | lperezmo/sms-helper | helper.py | import os
import json
import logging
import requests
from openai import OpenAI
import azure.functions as func
from twilio.rest import Client
#------------------------------------#
# Load environment variables
#------------------------------------#
ACCOUNT_SID = os.environ["ACCOUNT_SID"]
AUTH_TOKEN = os.environ["AUTH_TOKEN"]
#------------------------------------#
# OpenAI and Twilio Clients
#------------------------------------#
ai_client = OpenAI()
CLIENT = Client(ACCOUNT_SID, AUTH_TOKEN)
#------------------------------------#
# Security check
#------------------------------------#
def process_incoming_message(PIN, send_to, send_from, incoming_message):
"""
Process incoming message & generate a reply.
Parameters
----------
PIN : str
Security PIN
send_to : str
Phone number to send text to
send_from : str
Phone number to send text from
incoming_message : str
Incoming message from Twilio
Returns
-------
Send message using Twilio
"""
if incoming_message.strip() == PIN:
send_initial_text(send_to, send_from)
else:
messages = CLIENT.messages.list(from_=send_to, to=send_from)
sent_pin = False
for message in messages:
if message.body.strip() == PIN:
sent_pin = True
if sent_pin:
send_follow_up_text(send_to, send_from, incoming_message)
else:
send_message("Please provide security PIN to continue", send_to, send_from)
#------------------------------------#
# Welcome text
#------------------------------------#
def send_initial_text(send_to, send_from):
outgoing_message = f"""Welcome back lord overlord Luis.
- I can schedule calls, texts, and reminders for you.
- I can also just answer questions or anything like that.
- Text 'yolo' to see this message again"""
send_message(outgoing_message, send_to, send_from)
#------------------------------------#
# Current time
#------------------------------------#
def get_time():
"""Robustly get the current time from an API."""
max_retries = 3
attempts = 0
while attempts < max_retries:
try:
response = requests.get('http://worldtimeapi.org/api/timezone/America/Los_Angeles')
response.raise_for_status() # This will raise an exception for HTTP error codes
res = response.json()
datetime = res.get('datetime')
abbreviation = res.get('abbreviation')
day_of_week = res.get('day_of_week')
if datetime and abbreviation and day_of_week is not None:
return f"{datetime} {abbreviation} day of the week {day_of_week}"
else:
raise ValueError("Incomplete time data received")
except (requests.RequestException, ValueError) as e:
attempts += 1
if attempts == max_retries:
return "Failed to get time after several attempts."
#-----------------------------------------#
# Generate JSON body to schedule reminder
#-----------------------------------------#
def schedule_reminder(natural_language_request):
"""Generate JSON body to schedule reminder"""
sys_prompt = """Your job is to create the JSON body for an API call to schedule texts and calls. Then , you will schedule the text or call for the user will request based on pacific time (given in pacific time). If user asks for a reminder today at 6 pm that is 18:00 (24 hour notation).
If the user requests to be called or messaged on their work phone, set to_phone variable to '+12221110000' else send it to default phone '+15554443333'. Use twilio = True by default.
Example endpoint: http://YOUR-ENDPOINT.elasticbeanstalk.com/schedule_single_reminder
Example call:
{
"time": "18:20",
"day": "2023-11-27",
"message_body": "This is the reminder body!",
"call": "True",
"twilio": "True",
"to_number": "+15554443333"
}
Example message:
{
"time":"23:46",
"day":"2023-11-27",
"message_body":"text reminder to check email",
"to_number":"+15554443333",
"twilio":"True",
"call":"False"
}
"""
curr_time = get_time()
ai_client = OpenAI()
completion = ai_client.chat.completions.create(
model="gpt-3.5-turbo-1106",
messages=[
{"role": "system", "content": f"{sys_prompt}"},
{"role": "user", "content": f"{natural_language_request}. <Current Time>: {curr_time}"},
],
response_format={ "type": "json_object" },
)
return json.loads(completion.choices[0].message.content)
#------------------------------------#
# Follow up text
#------------------------------------#
def send_follow_up_text(send_to, send_from, incoming_message):
"""Send follow up text
Parameters
----------
send_to : str
Phone number to send text to
send_from : str
Phone number to send text from
incoming_message : str
Incoming message from Twilio
"""
if incoming_message == 'yolo':
send_initial_text(send_to, send_from)
else:
tools = [
{
"type": "function",
"function": {
"name": "schedule_reminder",
"description": "Schedule a reminder using natural language",
"parameters": {
"type": "object",
"properties": {
"natural_language_request": {
"type": "string",
"description": "Requested reminder in natural language. Example: 'Remind me to call mom tomorrow at 6pm' or 'Send me a message with a Matrix quote on wednesday at 8am'",
}
},
"required": ["natural_language_request"],
},
}
}
]
#----------------------------------------------------#
# AI w/tools - reply or use tools to schedule reminder
#----------------------------------------------------#
completion = ai_client.chat.completions.create(
model="gpt-3.5-turbo-1106",
messages=[
{"role": "system", "content": f"You are an AI assistant that can schedule reminders (like calls and texts) if asked to do so. Be informative, funny, and helpful, and keep your messages clear and short. To schedule reminder just pass a natural language request to the function 'schedule_reminder'"},
{"role": "user", "content": f"{incoming_message}"}
],
tools=tools,
tool_choice="auto"
)
message = completion.choices[0].message.content
if message==None:
message = "Just a minute while I schedule your reminder."
send_message(message, send_to, send_from)
#----------------------------------------------------#
# If tools are called, call the tools function
#----------------------------------------------------#
if completion.choices[0].message.tool_calls:
if completion.choices[0].message.tool_calls[0].function.name == 'schedule_reminder':
args = completion.choices[0].message.tool_calls[0].function.arguments
args_dict = json.loads(args)
try:
json_body = schedule_reminder(**args_dict)
url_endpoint = "http://YOUR-ENDPOINT.elasticbeanstalk.com/schedule_single_reminder"
headers = {'Content-Type': 'application/json'}
#--------------------------------#
# Schedule reminder
#--------------------------------#
response = requests.post(url_endpoint, headers=headers, data=json.dumps(json_body))
if response.status_code == 200:
send_message("Your reminder has been scheduled.", send_to, send_from)
except Exception as e:
logging.error(f"Error: {e}")
#------------------------------------#
# Send message using Twilio
#------------------------------------#
def send_message(outgoing_message, send_to, send_from):
message = CLIENT.messages.create(
body=outgoing_message, from_=send_from, to=send_to,
)
return func.HttpResponse(
"You can text this number again if you need more information. (LP)", status_code=200
)
| [
"You are an AI assistant that can schedule reminders (like calls and texts) if asked to do so. Be informative, funny, and helpful, and keep your messages clear and short. To schedule reminder just pass a natural language request to the function 'schedule_reminder'",
"PLACEHOLDER",
"PLACEHOLDER. <Current Time>: PLACEHOLDER",
"Your job is to create the JSON body for an API call to schedule texts and calls. Then , you will schedule the text or call for the user will request based on pacific time (given in pacific time). If user asks for a reminder today at 6 pm that is 18:00 (24 hour notation).\n\n If the user requests to be called or messaged on their work phone, set to_phone variable to '+12221110000' else send it to default phone '+15554443333'. Use twilio = True by default.\n\n Example endpoint: http://YOUR-ENDPOINT.elasticbeanstalk.com/schedule_single_reminder\n Example call:\n {\n \"time\": \"18:20\",\n \"day\": \"2023-11-27\",\n \"message_body\": \"This is the reminder body!\",\n \"call\": \"True\",\n \"twilio\": \"True\",\n \"to_number\": \"+15554443333\"\n }\n\n Example message:\n {\n \"time\":\"23:46\",\n \"day\":\"2023-11-27\",\n \"message_body\":\"text reminder to check email\",\n \"to_number\":\"+15554443333\",\n \"twilio\":\"True\",\n \"call\":\"False\"\n }\n "
] |
2024-01-10 | Ming-doan/green-vision | app~logics.py | from .utils import db, storage
from typing import Any
from .yolo import yolo
from PIL import Image
from torch import Tensor
import openai
import os
def predict_pil_image(image: Image) -> list[dict[str, Any]]:
# Predict image
results = yolo.predict(image, 0.25)
result_format = list(results)[0]
# Get results
boxs: Tensor = result_format.boxes.data
labels = result_format.names
# Get boxs result
boxs_result = []
for box in boxs:
print(box)
result_conf = box[4].item()
if result_conf > 0.25:
# Get the center point
center_x = box[0].item()
center_y = box[1].item()
boxs_result.append({
"pos": [int(center_x), int(center_y)],
"name": labels[int(box[5].item())],
})
return boxs_result
def query_from_firebase(label: str) -> list[dict[str, Any]]:
# Get collections
trash_ref = db.collection("Trash")
recycleDoc_ref = db.collection("RecycleDoc")
found_trashs = trash_ref.where("name", "==", label).get()
recommends = []
if len(found_trashs) != 0:
# Get list of recycle ids
recycle_ids = found_trashs[0].get("recycleID")
for recycle_id in recycle_ids:
# Get recycle doc
recycle_doc = recycleDoc_ref.document(recycle_id).get()
# Get paths
paths = []
for path in recycle_doc.get('path'):
if path != '':
print(path)
# Split path
bucket_name, object_path = path[len(
"gs://"):].split("/", 1)
# Convert to blob
bucket = storage.bucket(bucket_name)
blob = bucket.blob(object_path)
# Generate signed url
paths.append(blob.generate_signed_url(
expiration=3000000000))
# Append to recommends
recommends.append({
'content': recycle_doc.get("content"),
'path': paths,
'title': recycle_doc.get("title")
})
return recommends
openai.api_key = os.environ.get("OPENAI_API_KEY")
def get_response(prompt: str):
response = openai.Completion.create(
engine="text-davinci-002",
prompt=prompt,
temperature=0.7,
max_tokens=150
)
return response
| [
"content"
] |
2024-01-10 | ElmiraGhorbani/gpt-speaker-diarization | scripts~speech_to_text.py | import os
import uuid
import auditok
import openai
import soundfile as sf
from .openai_decorator import retry_on_openai_errors
from .utils import get_project_root
# Set the OpenAI API key from environment variable or use a default key
openai.api_key = os.environ.get(
"OPENAI_API_KEY", ""
)
class Whisper:
"""
This class serves as a wrapper for the OpenAI Whisper API to facilitate chatbot responses.
"""
def __init__(self, model_name: str = "whisper-1", whisper_sample_rate: int = 16000):
"""
Initialize the Whisper chatbot instance.
:param model_name: The name of the OpenAI Whisper model to use.
:param whisper_sample_rate: The sample rate for audio processing.
"""
self.model_name = model_name
self.whisper_sample_rate = whisper_sample_rate
def vad_audiotok(self, audio_content):
"""
Perform voice activity detection using the audiotok package.
:param audio_content: Bytes of audio data.
:return: Chunks containing speech detected in the audio.
"""
audio_regions = auditok.split(
audio_content,
sr=self.whisper_sample_rate,
ch=1,
sw=2,
min_dur=0.5,
max_dur=30,
max_silence=0.3,
energy_threshold=30
)
return audio_regions
def audio_process(self, wav_path, is_byte=False):
"""
Process audio data, performing voice activity detection and segmenting the audio.
:param wav_path: Path to the audio file or audio bytes.
:param is_byte: Boolean flag indicating if the input is audio bytes.
:return: Segmented audio chunks containing detected speech.
"""
if not is_byte:
with open(wav_path, 'rb') as f:
wav_bytes = f.read()
else:
wav_bytes = wav_path
wav, sr = sf.read(wav_path)
audio_regions = self.vad_audiotok(wav_bytes)
wav_segments = []
for r in audio_regions:
start = r.meta.start
end = r.meta.end
segment = wav[int(start * sr):int(end * sr)]
wav_segments.append(segment)
return wav_segments
@retry_on_openai_errors(max_retry=7)
def transcribe(self, audio_file):
"""
Transcribe the provided audio using the OpenAI API.
:param audio_file: Path to the audio file or audio bytes.
:return: Transcription text from the audio.
"""
# Save audio bytes as a temporary WAV file
root_path = get_project_root()
temp_wav_path = f"{root_path}/resources/audios/{str(uuid.uuid4())}.mp3"
with sf.SoundFile(temp_wav_path, 'wb', samplerate=self.whisper_sample_rate, channels=1) as f:
f.write(audio_file)
auf = open(temp_wav_path, 'rb')
# Transcribe using OpenAI API
response = openai.Audio.transcribe(
self.model_name, auf)
# Clean up temporary file
os.remove(temp_wav_path)
return response['text']
@retry_on_openai_errors(max_retry=7)
def transcribe_raw(self, audio_file):
"""
Transcribe the provided audio using the OpenAI API without saving a temporary file.
:param audio_file: Path to the audio file or audio bytes.
:return: Transcription text from the audio.
"""
auf = open(audio_file, 'rb')
# Transcribe using OpenAI API
response = openai.Audio.transcribe(
self.model_name, auf)
return response['text']
if __name__ == "__main__":
# Example usage
wh = Whisper()
with open("./audios/0_edited.wav", "rb") as f:
audio_content = f.read()
print(type(audio_content))
segments = wh.audio_process("./audios/0_edited.wav")
| [] |
2024-01-10 | ElmiraGhorbani/gpt-speaker-diarization | scripts~text_analysis.py | import os
import openai
import tiktoken
from .openai_decorator import retry_on_openai_errors
# Set the OpenAI API key using an environment variable or a default value
openai.api_key = os.environ.get(
"OPENAI_API_KEY", ""
)
class AI:
def __init__(self, encoding_model: str = "cl100k_base", openai_model: str = "gpt-4"):
"""
Initialize an AI instance.
Parameters:
encoding_model (str): The name of the encoding model to be used.
openai_model (str): The name of the OpenAI model to be used.
"""
self.tt_encoding = tiktoken.get_encoding(encoding_model)
self.openai_model = openai_model
def token_counter(self, passage):
"""
Count the number of tokens in a given passage.
Parameters:
passage (str): The input text passage.
Returns:
int: The total number of tokens in the passage.
"""
tokens = self.tt_encoding.encode(passage)
total_tokens = len(tokens)
return total_tokens
@retry_on_openai_errors(max_retry=7)
def extract_dialogue(self, transcript, history=[]):
"""
Extract dialogue involving multiple speaker from text.
Parameters:
transcript (str): The text containing the conversation.
history (list): List of message history (optional).
Returns:
str: Extracted dialogue in the specified format.
"""
prompt = """Perform speaker diarization on the given text to identify and extract conversations involving multiple speakers. Present the dialogue in the following structured format:
Speaker 1:
Speaker 2:
Speaker 3:
..."""
while True:
try:
if history:
messages = history
else:
messages = [
{"role": "system", "content": prompt},
]
user_message = {"role": "user",
"content": transcript.replace('\n', '')}
messages.append(user_message)
tokens_per_message = 4
max_token = 8191 - (self.token_counter(prompt) + self.token_counter(
transcript) + (len(messages)*tokens_per_message) + 3)
response = openai.ChatCompletion.create(
model=self.openai_model,
messages=messages,
max_tokens=max_token,
temperature=1,
top_p=1,
presence_penalty=0,
frequency_penalty=0,
)
bot_response = response["choices"][0]["message"]["content"].strip(
)
return bot_response
except openai.error.RateLimitError:
messages.pop(1)
continue
| [
"Perform speaker diarization on the given text to identify and extract conversations involving multiple speakers. Present the dialogue in the following structured format:\n Speaker 1:\n Speaker 2:\n Speaker 3:\n ...",
"\n"
] |
2024-01-10 | IkechukwuAbuah/autogen | autogen~oai~client.py | from __future__ import annotations
import os
import sys
from typing import List, Optional, Dict, Callable, Union
import logging
import inspect
from flaml.automl.logger import logger_formatter
from autogen.oai.openai_utils import get_key, oai_price1k
from autogen.token_count_utils import count_token
TOOL_ENABLED = False
try:
import openai
from openai import OpenAI, APIError
from openai.types.chat import ChatCompletion
from openai.types.chat.chat_completion import ChatCompletionMessage, Choice
from openai.types.completion import Completion
from openai.types.completion_usage import CompletionUsage
import diskcache
if openai.__version__ >= "1.1.0":
TOOL_ENABLED = True
ERROR = None
except ImportError:
ERROR = ImportError("Please install openai>=1 and diskcache to use autogen.OpenAIWrapper.")
OpenAI = object
logger = logging.getLogger(__name__)
if not logger.handlers:
# Add the console handler.
_ch = logging.StreamHandler(stream=sys.stdout)
_ch.setFormatter(logger_formatter)
logger.addHandler(_ch)
class OpenAIWrapper:
"""A wrapper class for openai client."""
cache_path_root: str = ".cache"
extra_kwargs = {"cache_seed", "filter_func", "allow_format_str_template", "context", "api_version"}
openai_kwargs = set(inspect.getfullargspec(OpenAI.__init__).kwonlyargs)
total_usage_summary: Dict = None
actual_usage_summary: Dict = None
def __init__(self, *, config_list: List[Dict] = None, **base_config):
"""
Args:
config_list: a list of config dicts to override the base_config.
They can contain additional kwargs as allowed in the [create](/docs/reference/oai/client#create) method. E.g.,
```python
config_list=[
{
"model": "gpt-4",
"api_key": os.environ.get("AZURE_OPENAI_API_KEY"),
"api_type": "azure",
"base_url": os.environ.get("AZURE_OPENAI_API_BASE"),
"api_version": "2023-03-15-preview",
},
{
"model": "gpt-3.5-turbo",
"api_key": os.environ.get("OPENAI_API_KEY"),
"api_type": "open_ai",
"base_url": "https://api.openai.com/v1",
},
{
"model": "llama-7B",
"base_url": "http://127.0.0.1:8080",
"api_type": "open_ai",
}
]
```
base_config: base config. It can contain both keyword arguments for openai client
and additional kwargs.
"""
openai_config, extra_kwargs = self._separate_openai_config(base_config)
if type(config_list) is list and len(config_list) == 0:
logger.warning("openai client was provided with an empty config_list, which may not be intended.")
if config_list:
config_list = [config.copy() for config in config_list] # make a copy before modifying
self._clients = [self._client(config, openai_config) for config in config_list] # could modify the config
self._config_list = [
{**extra_kwargs, **{k: v for k, v in config.items() if k not in self.openai_kwargs}}
for config in config_list
]
else:
self._clients = [self._client(extra_kwargs, openai_config)]
self._config_list = [extra_kwargs]
def _process_for_azure(self, config: Dict, extra_kwargs: Dict, segment: str = "default"):
# deal with api_version
query_segment = f"{segment}_query"
headers_segment = f"{segment}_headers"
api_version = extra_kwargs.get("api_version")
if api_version is not None and query_segment not in config:
config[query_segment] = {"api-version": api_version}
if segment == "default":
# remove the api_version from extra_kwargs
extra_kwargs.pop("api_version")
if segment == "extra":
return
# deal with api_type
api_type = extra_kwargs.get("api_type")
if api_type is not None and api_type.startswith("azure") and headers_segment not in config:
api_key = config.get("api_key", os.environ.get("AZURE_OPENAI_API_KEY"))
config[headers_segment] = {"api-key": api_key}
# remove the api_type from extra_kwargs
extra_kwargs.pop("api_type")
# deal with model
model = extra_kwargs.get("model")
if model is None:
return
if "gpt-3.5" in model:
# hack for azure gpt-3.5
extra_kwargs["model"] = model = model.replace("gpt-3.5", "gpt-35")
base_url = config.get("base_url")
if base_url is None:
raise ValueError("to use azure openai api, base_url must be specified.")
suffix = f"/openai/deployments/{model}"
if not base_url.endswith(suffix):
config["base_url"] += suffix[1:] if base_url.endswith("/") else suffix
def _separate_openai_config(self, config):
"""Separate the config into openai_config and extra_kwargs."""
openai_config = {k: v for k, v in config.items() if k in self.openai_kwargs}
extra_kwargs = {k: v for k, v in config.items() if k not in self.openai_kwargs}
self._process_for_azure(openai_config, extra_kwargs)
return openai_config, extra_kwargs
def _separate_create_config(self, config):
"""Separate the config into create_config and extra_kwargs."""
create_config = {k: v for k, v in config.items() if k not in self.extra_kwargs}
extra_kwargs = {k: v for k, v in config.items() if k in self.extra_kwargs}
return create_config, extra_kwargs
def _client(self, config, openai_config):
"""Create a client with the given config to overrdie openai_config,
after removing extra kwargs.
"""
openai_config = {**openai_config, **{k: v for k, v in config.items() if k in self.openai_kwargs}}
self._process_for_azure(openai_config, config)
client = OpenAI(**openai_config)
return client
@classmethod
def instantiate(
cls,
template: str | Callable | None,
context: Optional[Dict] = None,
allow_format_str_template: Optional[bool] = False,
):
if not context or template is None:
return template
if isinstance(template, str):
return template.format(**context) if allow_format_str_template else template
return template(context)
def _construct_create_params(self, create_config: Dict, extra_kwargs: Dict) -> Dict:
"""Prime the create_config with additional_kwargs."""
# Validate the config
prompt = create_config.get("prompt")
messages = create_config.get("messages")
if (prompt is None) == (messages is None):
raise ValueError("Either prompt or messages should be in create config but not both.")
context = extra_kwargs.get("context")
if context is None:
# No need to instantiate if no context is provided.
return create_config
# Instantiate the prompt or messages
allow_format_str_template = extra_kwargs.get("allow_format_str_template", False)
# Make a copy of the config
params = create_config.copy()
if prompt is not None:
# Instantiate the prompt
params["prompt"] = self.instantiate(prompt, context, allow_format_str_template)
elif context:
# Instantiate the messages
params["messages"] = [
{
**m,
"content": self.instantiate(m["content"], context, allow_format_str_template),
}
if m.get("content")
else m
for m in messages
]
return params
def create(self, **config):
"""Make a completion for a given config using openai's clients.
Besides the kwargs allowed in openai's client, we allow the following additional kwargs.
The config in each client will be overridden by the config.
Args:
- context (Dict | None): The context to instantiate the prompt or messages. Default to None.
It needs to contain keys that are used by the prompt template or the filter function.
E.g., `prompt="Complete the following sentence: {prefix}, context={"prefix": "Today I feel"}`.
The actual prompt will be:
"Complete the following sentence: Today I feel".
More examples can be found at [templating](/docs/Use-Cases/enhanced_inference#templating).
- `cache_seed` (int | None) for the cache. Default to 41.
An integer cache_seed is useful when implementing "controlled randomness" for the completion.
None for no caching.
- filter_func (Callable | None): A function that takes in the context and the response
and returns a boolean to indicate whether the response is valid. E.g.,
```python
def yes_or_no_filter(context, response):
return context.get("yes_or_no_choice", False) is False or any(
text in ["Yes.", "No."] for text in client.extract_text_or_completion_object(response)
)
```
- allow_format_str_template (bool | None): Whether to allow format string template in the config. Default to false.
- api_version (str | None): The api version. Default to None. E.g., "2023-08-01-preview".
"""
if ERROR:
raise ERROR
last = len(self._clients) - 1
for i, client in enumerate(self._clients):
# merge the input config with the i-th config in the config list
full_config = {**config, **self._config_list[i]}
# separate the config into create_config and extra_kwargs
create_config, extra_kwargs = self._separate_create_config(full_config)
# process for azure
self._process_for_azure(create_config, extra_kwargs, "extra")
# construct the create params
params = self._construct_create_params(create_config, extra_kwargs)
# get the cache_seed, filter_func and context
cache_seed = extra_kwargs.get("cache_seed", 41)
filter_func = extra_kwargs.get("filter_func")
context = extra_kwargs.get("context")
# Try to load the response from cache
if cache_seed is not None:
with diskcache.Cache(f"{self.cache_path_root}/{cache_seed}") as cache:
# Try to get the response from cache
key = get_key(params)
response = cache.get(key, None)
if response is not None:
try:
response.cost
except AttributeError:
# update atrribute if cost is not calculated
response.cost = self.cost(response)
cache.set(key, response)
self._update_usage_summary(response, use_cache=True)
# check the filter
pass_filter = filter_func is None or filter_func(context=context, response=response)
if pass_filter or i == last:
# Return the response if it passes the filter or it is the last client
response.config_id = i
response.pass_filter = pass_filter
return response
continue # filter is not passed; try the next config
try:
response = self._completions_create(client, params)
except APIError as err:
error_code = getattr(err, "code", None)
if error_code == "content_filter":
# raise the error for content_filter
raise
logger.debug(f"config {i} failed", exc_info=1)
if i == last:
raise
else:
# add cost calculation before caching no matter filter is passed or not
response.cost = self.cost(response)
self._update_usage_summary(response, use_cache=False)
if cache_seed is not None:
# Cache the response
with diskcache.Cache(f"{self.cache_path_root}/{cache_seed}") as cache:
cache.set(key, response)
# check the filter
pass_filter = filter_func is None or filter_func(context=context, response=response)
if pass_filter or i == last:
# Return the response if it passes the filter or it is the last client
response.config_id = i
response.pass_filter = pass_filter
return response
continue # filter is not passed; try the next config
def _completions_create(self, client, params):
completions = client.chat.completions if "messages" in params else client.completions
# If streaming is enabled, has messages, and does not have functions, then
# iterate over the chunks of the response
if params.get("stream", False) and "messages" in params and "functions" not in params:
response_contents = [""] * params.get("n", 1)
finish_reasons = [""] * params.get("n", 1)
completion_tokens = 0
# Set the terminal text color to green
print("\033[32m", end="")
# Send the chat completion request to OpenAI's API and process the response in chunks
for chunk in completions.create(**params):
if chunk.choices:
for choice in chunk.choices:
content = choice.delta.content
finish_reasons[choice.index] = choice.finish_reason
# If content is present, print it to the terminal and update response variables
if content is not None:
print(content, end="", flush=True)
response_contents[choice.index] += content
completion_tokens += 1
else:
print()
# Reset the terminal text color
print("\033[0m\n")
# Prepare the final ChatCompletion object based on the accumulated data
model = chunk.model.replace("gpt-35", "gpt-3.5") # hack for Azure API
prompt_tokens = count_token(params["messages"], model)
response = ChatCompletion(
id=chunk.id,
model=chunk.model,
created=chunk.created,
object="chat.completion",
choices=[],
usage=CompletionUsage(
prompt_tokens=prompt_tokens,
completion_tokens=completion_tokens,
total_tokens=prompt_tokens + completion_tokens,
),
)
for i in range(len(response_contents)):
response.choices.append(
Choice(
index=i,
finish_reason=finish_reasons[i],
message=ChatCompletionMessage(
role="assistant", content=response_contents[i], function_call=None
),
)
)
else:
# If streaming is not enabled or using functions, send a regular chat completion request
# Functions are not supported, so ensure streaming is disabled
params = params.copy()
params["stream"] = False
response = completions.create(**params)
return response
def _update_usage_summary(self, response: ChatCompletion | Completion, use_cache: bool) -> None:
"""Update the usage summary.
Usage is calculated no mattter filter is passed or not.
"""
def update_usage(usage_summary):
if usage_summary is None:
usage_summary = {"total_cost": response.cost}
else:
usage_summary["total_cost"] += response.cost
usage_summary[response.model] = {
"cost": usage_summary.get(response.model, {}).get("cost", 0) + response.cost,
"prompt_tokens": usage_summary.get(response.model, {}).get("prompt_tokens", 0)
+ response.usage.prompt_tokens,
"completion_tokens": usage_summary.get(response.model, {}).get("completion_tokens", 0)
+ response.usage.completion_tokens,
"total_tokens": usage_summary.get(response.model, {}).get("total_tokens", 0)
+ response.usage.total_tokens,
}
return usage_summary
self.total_usage_summary = update_usage(self.total_usage_summary)
if not use_cache:
self.actual_usage_summary = update_usage(self.actual_usage_summary)
def print_usage_summary(self, mode: Union[str, List[str]] = ["actual", "total"]) -> None:
"""Print the usage summary."""
def print_usage(usage_summary, usage_type="total"):
word_from_type = "including" if usage_type == "total" else "excluding"
if usage_summary is None:
print("No actual cost incurred (all completions are using cache).", flush=True)
return
print(f"Usage summary {word_from_type} cached usage: ", flush=True)
print(f"Total cost: {round(usage_summary['total_cost'], 5)}", flush=True)
for model, counts in usage_summary.items():
if model == "total_cost":
continue #
print(
f"* Model '{model}': cost: {round(counts['cost'], 5)}, prompt_tokens: {counts['prompt_tokens']}, completion_tokens: {counts['completion_tokens']}, total_tokens: {counts['total_tokens']}",
flush=True,
)
if self.total_usage_summary is None:
print('No usage summary. Please call "create" first.', flush=True)
return
if isinstance(mode, list):
if len(mode) == 0 or len(mode) > 2:
raise ValueError(f'Invalid mode: {mode}, choose from "actual", "total", ["actual", "total"]')
if "actual" in mode and "total" in mode:
mode = "both"
elif "actual" in mode:
mode = "actual"
elif "total" in mode:
mode = "total"
print("-" * 100, flush=True)
if mode == "both":
print_usage(self.actual_usage_summary, "actual")
print()
if self.total_usage_summary != self.actual_usage_summary:
print_usage(self.total_usage_summary, "total")
else:
print(
"All completions are non-cached: the total cost with cached completions is the same as actual cost.",
flush=True,
)
elif mode == "total":
print_usage(self.total_usage_summary, "total")
elif mode == "actual":
print_usage(self.actual_usage_summary, "actual")
else:
raise ValueError(f'Invalid mode: {mode}, choose from "actual", "total", ["actual", "total"]')
print("-" * 100, flush=True)
def clear_usage_summary(self) -> None:
"""Clear the usage summary."""
self.total_usage_summary = None
self.actual_usage_summary = None
def cost(self, response: Union[ChatCompletion, Completion]) -> float:
"""Calculate the cost of the response."""
model = response.model
if model not in oai_price1k:
# TODO: add logging to warn that the model is not found
return 0
n_input_tokens = response.usage.prompt_tokens
n_output_tokens = response.usage.completion_tokens
tmp_price1K = oai_price1k[model]
# First value is input token rate, second value is output token rate
if isinstance(tmp_price1K, tuple):
return (tmp_price1K[0] * n_input_tokens + tmp_price1K[1] * n_output_tokens) / 1000
return tmp_price1K * (n_input_tokens + n_output_tokens) / 1000
@classmethod
def extract_text_or_completion_object(
cls, response: ChatCompletion | Completion
) -> Union[List[str], List[ChatCompletionMessage]]:
"""Extract the text or ChatCompletion objects from a completion or chat response.
Args:
response (ChatCompletion | Completion): The response from openai.
Returns:
A list of text, or a list of ChatCompletion objects if function_call/tool_calls are present.
"""
choices = response.choices
if isinstance(response, Completion):
return [choice.text for choice in choices]
if TOOL_ENABLED:
return [
choice.message
if choice.message.function_call is not None or choice.message.tool_calls is not None
else choice.message.content
for choice in choices
]
else:
return [
choice.message if choice.message.function_call is not None else choice.message.content
for choice in choices
]
# TODO: logging
| [
"content",
"allow_format_str_template"
] |
2024-01-10 | lauutt/gpt-4-chat-window | virtual-assistant.py | #!/usr/bin/env python3
# Importaciones
import os
import openai
import pprint
from tkinter import *
from tkinter import ttk
from tkinter import messagebox
openai.api_key = os.environ["OPENAI_API_KEY"]
def update_chat(messages, role, content):
"""Añade un mensaje al historial de conversación."""
messages.append({"role": role, "content": content})
return messages
def get_chatgpt_response(messages):
"""Obtiene la respuesta del modelo GPT-4"""
response = openai.ChatCompletion.create(
model="gpt-4",
messages=messages,
max_tokens=150
)
return response['choices'][0]['message']['content']
def display_messages(messages, text_widget):
text_widget.config(state=NORMAL)
text_widget.delete("1.0", END)
for msg in messages:
if msg['role'] == "user":
text_widget.insert(END, f"Usuario: {msg['content']}\n")
elif msg['role'] == 'assistant':
text_widget.insert(END, f"Asistente: {msg['content']}\n")
else:
text_widget.insert(END, f"{msg['role']}: {msg['content']}\n")
text_widget.config(state=DISABLED)
def main():
root = Tk()
root.title("Asistente Virtual")
root.geometry("600x400")
frame = ttk.Frame(root)
frame.pack(fill=BOTH, expand=1)
scrollbar = Scrollbar(frame)
messagebox_text = Text(frame, wrap=WORD, yscrollcommand=scrollbar.set, state=DISABLED)
messagebox_text.pack(side=LEFT, fill=BOTH, expand=1)
scrollbar.pack(side=RIGHT, fill=Y)
scrollbar.config(command=messagebox_text.yview)
entry_frame = ttk.Frame(root)
entry_frame.pack(side=LEFT, fill=X, padx=10, pady=10)
user_entry = Entry(entry_frame, width=60)
user_entry.pack(side=LEFT)
user_entry.focus_set()
def on_send_click():
user_text = user_entry.get()
if user_text.strip() != "":
update_chat(messages, "user", user_text)
display_messages(messages, messagebox_text)
user_entry.delete(0, END)
messagebox_text.see("end")
ai_text = get_chatgpt_response(messages)
update_chat(messages, "assistant", ai_text)
display_messages(messages, messagebox_text)
messagebox_text.see("end")
send_button = ttk.Button(entry_frame, text="Enviar", command=on_send_click)
send_button.pack(side=LEFT)
root.bind('<Return>', lambda event: on_send_click())
intro_message = "¡Hola! Soy tu asistente virtual. ¿En qué puedo ayudarte hoy?"
update_chat(messages, "assistant", intro_message)
display_messages(messages, messagebox_text)
root.mainloop()
if __name__ == "__main__":
messages = [] # Almacenamiento de mensajes
main()
| [] |
2024-01-10 | alibaba/havenask | llm~llm_adapter~factory.py | import os
import logging
from .openai import OpenAI
from .chatglm import ChatGLM
def get_llm_adapter():
llm_name = os.environ.get('LLM_NAME')
if llm_name == 'OpenAI':
logging.info('Use OpenAI')
return OpenAI
elif llm_name == 'ChatGLM':
logging.info('Use ChatGLM')
return ChatGLM
else:
raise RuntimeError(f'unknown llm: {llm_name}') | [] |
2024-01-10 | Gil-Frenkel/langchain-forked | langchain~sql_database.py | """SQLAlchemy wrapper around a database."""
from __future__ import annotations
import warnings
from typing import Any, Iterable, List, Optional
import sqlalchemy
from sqlalchemy import (
MetaData,
Table,
create_engine,
inspect,
select,
text,
)
from sqlalchemy.engine import Engine
from sqlalchemy.exc import ProgrammingError, SQLAlchemyError
from sqlalchemy.schema import CreateTable
from langchain import utils
def _format_index(index: sqlalchemy.engine.interfaces.ReflectedIndex) -> str:
return (
f'Name: {index["name"]}, Unique: {index["unique"]},'
f' Columns: {str(index["column_names"])}'
)
class SQLDatabase:
"""SQLAlchemy wrapper around a database."""
def __init__(
self,
engine: Engine,
schema: Optional[str] = None,
metadata: Optional[MetaData] = None,
ignore_tables: Optional[List[str]] = None,
include_tables: Optional[List[str]] = None,
sample_rows_in_table_info: int = 3,
indexes_in_table_info: bool = False,
custom_table_info: Optional[dict] = None,
view_support: bool = False,
):
"""Create engine from database URI."""
self._engine = engine
self._schema = schema
if include_tables and ignore_tables:
raise ValueError("Cannot specify both include_tables and ignore_tables")
self._inspector = inspect(self._engine)
# including view support by adding the views as well as tables to the all
# tables list if view_support is True
self._all_tables = set(
self._inspector.get_table_names(schema=schema)
+ (self._inspector.get_view_names(schema=schema) if view_support else [])
)
self._include_tables = set(include_tables) if include_tables else set()
if self._include_tables:
missing_tables = self._include_tables - self._all_tables
if missing_tables:
raise ValueError(
f"include_tables {missing_tables} not found in database"
)
self._ignore_tables = set(ignore_tables) if ignore_tables else set()
if self._ignore_tables:
missing_tables = self._ignore_tables - self._all_tables
if missing_tables:
raise ValueError(
f"ignore_tables {missing_tables} not found in database"
)
usable_tables = self.get_usable_table_names()
self._usable_tables = set(usable_tables) if usable_tables else self._all_tables
if not isinstance(sample_rows_in_table_info, int):
raise TypeError("sample_rows_in_table_info must be an integer")
self._sample_rows_in_table_info = sample_rows_in_table_info
self._indexes_in_table_info = indexes_in_table_info
self._custom_table_info = custom_table_info
if self._custom_table_info:
if not isinstance(self._custom_table_info, dict):
raise TypeError(
"table_info must be a dictionary with table names as keys and the "
"desired table info as values"
)
# only keep the tables that are also present in the database
intersection = set(self._custom_table_info).intersection(self._all_tables)
self._custom_table_info = dict(
(table, self._custom_table_info[table])
for table in self._custom_table_info
if table in intersection
)
self._metadata = metadata or MetaData()
# including view support if view_support = true
self._metadata.reflect(
views=view_support,
bind=self._engine,
only=list(self._usable_tables),
schema=self._schema,
)
@classmethod
def from_uri(
cls, database_uri: str, engine_args: Optional[dict] = None, **kwargs: Any
) -> SQLDatabase:
"""Construct a SQLAlchemy engine from URI."""
_engine_args = engine_args or {}
return cls(create_engine(database_uri, **_engine_args), **kwargs)
@classmethod
def from_databricks(
cls,
catalog: str,
schema: str,
host: Optional[str] = None,
api_token: Optional[str] = None,
warehouse_id: Optional[str] = None,
cluster_id: Optional[str] = None,
engine_args: Optional[dict] = None,
**kwargs: Any,
) -> SQLDatabase:
"""
Class method to create an SQLDatabase instance from a Databricks connection.
This method requires the 'databricks-sql-connector' package. If not installed,
it can be added using `pip install databricks-sql-connector`.
Args:
catalog (str): The catalog name in the Databricks database.
schema (str): The schema name in the catalog.
host (Optional[str]): The Databricks workspace hostname, excluding
'https://' part. If not provided, it attempts to fetch from the
environment variable 'DATABRICKS_HOST'. If still unavailable and if
running in a Databricks notebook, it defaults to the current workspace
hostname. Defaults to None.
api_token (Optional[str]): The Databricks personal access token for
accessing the Databricks SQL warehouse or the cluster. If not provided,
it attempts to fetch from 'DATABRICKS_API_TOKEN'. If still unavailable
and running in a Databricks notebook, a temporary token for the current
user is generated. Defaults to None.
warehouse_id (Optional[str]): The warehouse ID in the Databricks SQL. If
provided, the method configures the connection to use this warehouse.
Cannot be used with 'cluster_id'. Defaults to None.
cluster_id (Optional[str]): The cluster ID in the Databricks Runtime. If
provided, the method configures the connection to use this cluster.
Cannot be used with 'warehouse_id'. If running in a Databricks notebook
and both 'warehouse_id' and 'cluster_id' are None, it uses the ID of the
cluster the notebook is attached to. Defaults to None.
engine_args (Optional[dict]): The arguments to be used when connecting
Databricks. Defaults to None.
**kwargs (Any): Additional keyword arguments for the `from_uri` method.
Returns:
SQLDatabase: An instance of SQLDatabase configured with the provided
Databricks connection details.
Raises:
ValueError: If 'databricks-sql-connector' is not found, or if both
'warehouse_id' and 'cluster_id' are provided, or if neither
'warehouse_id' nor 'cluster_id' are provided and it's not executing
inside a Databricks notebook.
"""
try:
from databricks import sql # noqa: F401
except ImportError:
raise ValueError(
"databricks-sql-connector package not found, please install with"
" `pip install databricks-sql-connector`"
)
context = None
try:
from dbruntime.databricks_repl_context import get_context
context = get_context()
except ImportError:
pass
default_host = context.browserHostName if context else None
if host is None:
host = utils.get_from_env("host", "DATABRICKS_HOST", default_host)
default_api_token = context.apiToken if context else None
if api_token is None:
api_token = utils.get_from_env(
"api_token", "DATABRICKS_API_TOKEN", default_api_token
)
if warehouse_id is None and cluster_id is None:
if context:
cluster_id = context.clusterId
else:
raise ValueError(
"Need to provide either 'warehouse_id' or 'cluster_id'."
)
if warehouse_id and cluster_id:
raise ValueError("Can't have both 'warehouse_id' or 'cluster_id'.")
if warehouse_id:
http_path = f"/sql/1.0/warehouses/{warehouse_id}"
else:
http_path = f"/sql/protocolv1/o/0/{cluster_id}"
uri = (
f"databricks://token:{api_token}@{host}?"
f"http_path={http_path}&catalog={catalog}&schema={schema}"
)
return cls.from_uri(database_uri=uri, engine_args=engine_args, **kwargs)
@property
def dialect(self) -> str:
"""Return string representation of dialect to use."""
return self._engine.dialect.name
def get_usable_table_names(self) -> Iterable[str]:
"""Get names of tables available."""
if self._include_tables:
return self._include_tables
return self._all_tables - self._ignore_tables
def get_table_names(self) -> Iterable[str]:
"""Get names of tables available."""
warnings.warn(
"This method is deprecated - please use `get_usable_table_names`."
)
return self.get_usable_table_names()
@property
def table_info(self) -> str:
"""Information about all tables in the database."""
return self.get_table_info()
def get_table_info(self, table_names: Optional[List[str]] = None) -> str:
"""Get information about specified tables.
Follows best practices as specified in: Rajkumar et al, 2022
(https://arxiv.org/abs/2204.00498)
If `sample_rows_in_table_info`, the specified number of sample rows will be
appended to each table description. This can increase performance as
demonstrated in the paper.
"""
all_table_names = self.get_usable_table_names()
if table_names is not None:
missing_tables = set(table_names).difference(all_table_names)
if missing_tables:
raise ValueError(f"table_names {missing_tables} not found in database")
all_table_names = table_names
meta_tables = [
tbl
for tbl in self._metadata.sorted_tables
if tbl.name in set(all_table_names)
and not (self.dialect == "sqlite" and tbl.name.startswith("sqlite_"))
]
tables = []
for table in meta_tables:
if self._custom_table_info and table.name in self._custom_table_info:
tables.append(self._custom_table_info[table.name])
continue
# add create table command
create_table = str(CreateTable(table).compile(self._engine))
table_info = f"{create_table.rstrip()}"
has_extra_info = (
self._indexes_in_table_info or self._sample_rows_in_table_info
)
if has_extra_info:
table_info += "\n\n/*"
if self._indexes_in_table_info:
table_info += f"\n{self._get_table_indexes(table)}\n"
if self._sample_rows_in_table_info:
table_info += f"\n{self._get_sample_rows(table)}\n"
if has_extra_info:
table_info += "*/"
tables.append(table_info)
final_str = "\n\n".join(tables)
return final_str
def _get_table_indexes(self, table: Table) -> str:
indexes = self._inspector.get_indexes(table.name)
indexes_formatted = "\n".join(map(_format_index, indexes))
return f"Table Indexes:\n{indexes_formatted}"
def _get_sample_rows(self, table: Table) -> str:
# build the select command
command = select(table).limit(self._sample_rows_in_table_info)
# save the columns in string format
columns_str = "\t".join([col.name for col in table.columns])
try:
# get the sample rows
with self._engine.connect() as connection:
sample_rows_result = connection.execute(command) # type: ignore
# shorten values in the sample rows
sample_rows = list(
map(lambda ls: [str(i)[:100] for i in ls], sample_rows_result)
)
# save the sample rows in string format
sample_rows_str = "\n".join(["\t".join(row) for row in sample_rows])
# in some dialects when there are no rows in the table a
# 'ProgrammingError' is returned
except ProgrammingError:
sample_rows_str = ""
return (
f"{self._sample_rows_in_table_info} rows from {table.name} table:\n"
f"{columns_str}\n"
f"{sample_rows_str}"
)
def run(self, command: str, fetch: str = "all") -> str:
"""Execute a SQL command and return a string representing the results.
If the statement returns rows, a string of the results is returned.
If the statement returns no rows, an empty string is returned.
"""
with self._engine.begin() as connection:
if self._schema is not None:
if self.dialect == "snowflake":
connection.exec_driver_sql(
f"ALTER SESSION SET search_path='{self._schema}'"
)
elif self.dialect == "bigquery":
connection.exec_driver_sql(f"SET @@dataset_id='{self._schema}'")
else:
connection.exec_driver_sql(f"SET search_path TO {self._schema}")
cursor = connection.execute(text(command))
column_names = tuple(cursor.keys())
if cursor.returns_rows:
if fetch == "all":
result = cursor.fetchall()
elif fetch == "one":
result = cursor.fetchone()[0] # type: ignore
else:
raise ValueError("Fetch parameter must be either 'one' or 'all'")
result.insert(0, column_names)
return str(result)
return ""
def get_table_info_no_throw(self, table_names: Optional[List[str]] = None) -> str:
"""Get information about specified tables.
Follows best practices as specified in: Rajkumar et al, 2022
(https://arxiv.org/abs/2204.00498)
If `sample_rows_in_table_info`, the specified number of sample rows will be
appended to each table description. This can increase performance as
demonstrated in the paper.
"""
try:
return self.get_table_info(table_names)
except ValueError as e:
"""Format the error message"""
return f"Error: {e}"
def run_no_throw(self, command: str, fetch: str = "all") -> str:
"""Execute a SQL command and return a string representing the results.
If the statement returns rows, a string of the results is returned.
If the statement returns no rows, an empty string is returned.
If the statement throws an error, the error message is returned.
"""
try:
return self.run(command, fetch)
except SQLAlchemyError as e:
"""Format the error message"""
return f"Error: {e}"
| [] |
2024-01-10 | OrrAvrech/human-feedback | data~data_collection.py | import os
import json
import openai
import yt_dlp
import pyrallis
import whisperx
import subprocess
from pathlib import Path
from typing import NamedTuple, Optional
from transformers import pipeline
from dataclasses import dataclass, asdict
from moviepy.video.io.VideoFileClip import VideoFileClip
from utils import read_text
from data.data_config import DataConfig, ScraperConfig
import jinja2 as j2
openai.api_key = os.getenv("OPENAI_API_KEY")
class ASRModelZoo(NamedTuple):
whisper_small = "openai/whisper-small"
wav2vec2 = "jonatasgrosman/wav2vec2-large-xlsr-53-english"
whisperx_large = "large-v2"
class Sentiment(NamedTuple):
positive = "Positive"
neutral = "Neutral"
negative = "Negative"
@dataclass
class TextSegment:
text: str
start: float
end: float
sentiment: Optional[str] = None
class Whisper:
def __init__(self, model_name: str, batch_size: int):
self.model_name = model_name
self.batch_size = batch_size
def transcribe(self, audio_path: Path) -> list[dict]:
# Load pre-trained ASR model
transcriber = pipeline("automatic-speech-recognition", model=self.model_name)
transcription = transcriber(
str(audio_path), return_timestamps=True, chunk_length_s=self.batch_size
)
text_segments = [
asdict(
TextSegment(
text=seg["text"],
start=seg["timestamp"][0],
end=seg["timestamp"][1],
)
)
for seg in transcription["chunks"]
]
return text_segments
class WhisperX(Whisper):
def __init__(
self,
model_name: str,
batch_size: int,
device: str,
compute_type: str = "float16",
):
self.compute_type = compute_type
self.device = device
super().__init__(model_name=model_name, batch_size=batch_size)
def transcribe(self, audio_path: Path) -> list[dict]:
model = whisperx.load_model(
self.model_name, self.device, compute_type=self.compute_type
)
audio = whisperx.load_audio(str(audio_path))
result = model.transcribe(audio, batch_size=self.batch_size)
text_segments = [
asdict(TextSegment(text=seg["text"], start=seg["start"], end=seg["end"]))
for seg in result["segments"]
]
return text_segments
def scrape_videos(
cfg: ScraperConfig, action: str, dataset_dir: Path, video_prefix: str = "video"
):
def filter_videos(info_dict):
duration = info_dict.get("duration")
lang = info_dict.get("language")
if duration and (
duration < cfg.min_vid_duration or duration > cfg.max_vid_duration
):
return "The video is either too short or too long"
if not lang == "en":
return "This video is not in English"
prompt = cfg.prefix_prompt + action
ydl_opts = {
"restrictfilenames": cfg.restrict_filenames,
"match_filter": filter_videos,
"format": cfg.ext,
"noplaylist": cfg.no_playlist,
"quiet": cfg.quiet_mode,
"writeautomaticsub": cfg.write_auto_subs,
"writeinfojson": cfg.write_info_json,
"ignoreerrors": True,
"outtmpl": {
"default": f"{dataset_dir / action / video_prefix}/%(title)s.%(ext)s"
},
}
max_num_urls = cfg.max_num_url
url = cfg.urls
if url is None:
url = f"{cfg.extractor}{max_num_urls}:{prompt}"
with yt_dlp.YoutubeDL(ydl_opts) as ydl:
error = ydl.download(url)
print(error)
def extract_audio(
vid_path: Path, cache: bool, prefix: str = "audio", ext: str = "wav"
) -> Path:
audio_dir = vid_path.parents[1] / prefix
audio_dir.mkdir(exist_ok=True)
filepath = audio_dir / f"{vid_path.stem}.{ext}"
if cache is True and filepath.exists():
print(f"skip audio-extractor, use local {filepath.name}")
else:
with VideoFileClip(str(vid_path)) as clip:
clip.audio.write_audiofile(filepath)
return filepath
def transcribe_speech(
audio_path: Path, batch_size: int, cache: bool, prefix: str = "text"
) -> Path:
text_dir = audio_path.parents[1] / prefix
text_dir.mkdir(exist_ok=True)
filepath = text_dir / f"{audio_path.stem}.json"
if cache is True and filepath.exists():
print(f"skip transcriber, use local {filepath.name}")
else:
s2t_model = WhisperX(
model_name=ASRModelZoo.whisperx_large,
batch_size=batch_size,
device="cuda",
)
transcription = s2t_model.transcribe(audio_path)
with open(filepath, "w") as fp:
json.dump(transcription, fp)
return filepath
def prepare_prompt(
text_path: Path, system_template_path: Path, user_template_path: Path
) -> tuple[str, str]:
data = read_text(text_path)
text_segments = [segment["text"] for segment in data]
txt = ""
for i, seg in enumerate(text_segments):
txt += f"{i + 1}.{seg}\n"
templates_dir = system_template_path.parent
environment = j2.Environment(loader=j2.FileSystemLoader(templates_dir))
system_template = environment.get_template(system_template_path.name)
user_template = environment.get_template(user_template_path.name)
sentences = {"sentences": txt}
system_prompt = system_template.render()
user_prompt = user_template.render(sentences)
return system_prompt, user_prompt
def write_gpt_response(
system_prompt: str, user_prompt: str, output_path: Path, cache: bool
):
if cache is True and output_path.exists():
print(f"skip ChatGPT, use local {output_path.name}")
else:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt},
],
)
with open(output_path, "w") as fp:
json.dump(response, fp)
def get_gpt_sentences(gpt_path: Path) -> list[str]:
response = read_text(gpt_path)
sentences = response["choices"][0]["message"]["content"].split("\n")
return sentences
def parse_annotations(results_path: Path, vid_name: str) -> list[list]:
annotations = read_text(results_path)
vid_names = {x["data"].get("name"): i for i, x in enumerate(annotations)}
vid_idx = vid_names.get(vid_name)
vid_annotations = None if vid_idx is None else annotations[vid_idx]
new_segments = []
if vid_annotations is not None:
results = vid_annotations["annotations"][0]["result"]
for res in results:
label = res["value"]["labels"][0]
text = res["value"]["text"]
new_segments.append([label, text])
return new_segments
def calculate_word_durations(
old_segments, old_time_stamps
) -> tuple[list, list, list, list]:
word_durations = []
word_durations_plus_jump = []
bools = []
jumps_alone = []
for i, segment in enumerate(old_segments):
start_time, end_time = old_time_stamps[i]
if i > 0:
_, prev_end = old_time_stamps[i - 1]
jump = start_time - prev_end
else:
jump = 0
words = segment.split()
word_duration = (end_time - start_time) / len(words)
word_durations_plus_jump.extend(
[
word_duration + jump if i == 0 else word_duration
for i in range(len(words))
]
)
jumps_alone.extend([jump for i in range(len(words))])
if i % 2 == 0:
bools.extend([True for i in range(len(words))])
else:
bools.extend([False for i in range(len(words))])
word_durations.extend(
[word_duration if i == 0 else word_duration for i in range(len(words))]
)
return word_durations, word_durations_plus_jump, bools, jumps_alone
def calculate_new_time_stamps(
old_segments: list[str], old_time_stamps: list[tuple], new_segments: list[list]
) -> list[tuple]:
(
word_durations,
word_durations_plus_jump,
bools,
jumps_alone,
) = calculate_word_durations(old_segments, old_time_stamps)
new_time_stamps = []
current_word = 0 # Initialize current_word index
current_start = old_time_stamps[0][0]
for label, text in new_segments:
words = text.split()
if all(bools[current_word : current_word + len(words)]) or not any(
bools[current_word : current_word + len(words)]
):
segment_duration = sum(
word_durations[current_word : current_word + len(words)]
)
new_time_stamps.append((current_start, current_start + segment_duration))
else:
segment_duration = sum(
word_durations_plus_jump[current_word : current_word + len(words)]
)
new_time_stamps.append((current_start, current_start + segment_duration))
current_start = current_start + jumps_alone[current_word]
current_word += len(words) # Increment by word count
current_start += segment_duration
return new_time_stamps
def accumulate_text_by_interpolation(
text_path: Path, new_segments: list[list]
) -> list[TextSegment]:
text_data = read_text(text_path)
old_segments = [segment["text"] for segment in text_data]
old_timestamps = [(segment["start"], segment["end"]) for segment in text_data]
new_timestamps = calculate_new_time_stamps(
old_segments, old_timestamps, new_segments
)
chunks = []
for segment, timestamp in zip(new_segments, new_timestamps):
chunks.append(
TextSegment(
text=segment[1],
start=timestamp[0],
end=timestamp[1],
sentiment=segment[0],
)
)
return chunks
def accumulate_text_by_sentiment(
text_path: Path, sentiments: list[str]
) -> list[TextSegment]:
data = read_text(text_path)
text_segments = [segment["text"] for segment in data]
samples = []
end = None
text_paragraph = text_segments[0]
accumulated_sentiments = [sentiments[0]]
start = data[0]["timestamp"][0]
for i in range(1, len(text_segments)):
curr_segment = text_segments[i]
curr_sentiment = sentiments[i]
prev_sentiment = sentiments[i - 1]
if curr_sentiment == prev_sentiment or curr_sentiment == Sentiment.neutral:
text_paragraph += curr_segment
accumulated_sentiments.append(curr_sentiment)
end = data[i]["timestamp"][-1]
if end is None:
end = data[i]["timestamp"][0]
else:
sentiment = Sentiment.positive
if Sentiment.negative in accumulated_sentiments:
sentiment = Sentiment.negative
samples.append(
TextSegment(
text=text_paragraph, start=start, end=end, sentiment=sentiment
)
)
start = data[i]["timestamp"][0]
end = data[i]["timestamp"][-1]
text_paragraph = text_segments[i]
accumulated_sentiments = [sentiments[i]]
if Sentiment.positive in accumulated_sentiments:
sentiment = Sentiment.positive
elif Sentiment.negative in accumulated_sentiments:
sentiment = Sentiment.negative
else:
sentiment = Sentiment.neutral
print(f"all sentences are {sentiment}")
samples.append(
TextSegment(text=text_paragraph, start=start, end=end, sentiment=sentiment)
)
return samples
def cut_video_by_text_chunks(
vid_path: Path,
chunks: list[TextSegment],
video_output_dir: Path,
text_output_dir: Path,
cache: bool,
):
vid_segment_dir = video_output_dir / vid_path.stem
text_segment_dir = text_output_dir / vid_path.stem
if cache is True and vid_segment_dir.exists() and text_segment_dir.exists():
print(f"skip cutting video chunks, use existing chunks in {vid_segment_dir}")
else:
vid_segment_dir.mkdir(exist_ok=True, parents=True)
text_segment_dir.mkdir(exist_ok=True, parents=True)
with VideoFileClip(str(vid_path)) as vid:
for sentence in chunks:
start, end = sentence.start, sentence.end
sub_vid = vid.subclip(start, end)
segment_name = f"{vid_path.stem}_{start:.{1}f}_{end:.{1}f}"
vid_segment_path = vid_segment_dir / f"{segment_name}.mp4"
text_segment_path = text_segment_dir / f"{segment_name}.json"
sub_vid.write_videofile(
str(vid_segment_path),
codec="libx264",
audio_codec="aac",
temp_audiofile="temp-audio.m4a",
remove_temp=True,
)
with open(text_segment_path, "w") as fp:
json.dump(asdict(sentence), fp)
sub_vid.close()
vid.close()
def run_alphapose_on_videos(root_dir: Path, output_dir: Path, vid_dir: Path):
output_dir.mkdir(exist_ok=True, parents=True)
cfg_path = (
root_dir
/ "configs/halpe_coco_wholebody_136/resnet/256x192_res50_lr1e-3_2x-dcn-combined.yaml"
)
ckpt = root_dir / "pretrained_models/multi_domain_fast50_dcn_combined_256x192.pth"
for i, vid_path in enumerate(vid_dir.rglob("*.mp4")):
vid_output_dir = output_dir / vid_path.stem
vid_output_dir.mkdir(exist_ok=True)
subprocess.run(
f"{root_dir / 'scripts/inference.sh'} {cfg_path} {ckpt} {vid_path} {vid_output_dir}",
shell=True,
)
@pyrallis.wrap()
def main(cfg: DataConfig):
dataset_dir = cfg.dataset_dir
actions = cfg.actions
# scrape new videos or use local videos otherwise
if cfg.scraper.run is True:
for action in actions:
print(f"{action}:")
scrape_videos(cfg=cfg.scraper, action=action, dataset_dir=dataset_dir)
out_gpt_dir = cfg.output_dir / "gpt"
out_gpt_dir.mkdir(exist_ok=True, parents=True)
video_output_dir = cfg.output_dir / "video"
text_output_dir = cfg.output_dir / "text"
files = dataset_dir.rglob("*.mp4")
if len(cfg.filenames) > 0:
files = [dataset_dir / "video" / name for name in cfg.filenames]
for vid_path in files:
# extract audio and transcription from videos
audio_path = extract_audio(vid_path, cache=cfg.audio_extractor.use_cache)
text_path = transcribe_speech(
audio_path,
cfg.transcriber.chunk_length_s,
cache=cfg.transcriber.use_cache,
prefix="text",
)
system_prompt, user_prompt = prepare_prompt(
text_path,
system_template_path=cfg.templates.system_prompt_path,
user_template_path=cfg.templates.user_prompt_path,
)
if cfg.sentence_segments.use_manual_annotations:
new_segments = parse_annotations(
cfg.sentence_segments.manual_results_path, vid_path.stem
)
else:
gpt_path = out_gpt_dir / text_path.name
# OPENAI GPT API Call
write_gpt_response(
system_prompt,
user_prompt,
gpt_path,
cache=cfg.sentence_segments.use_cache,
)
sentences = get_gpt_sentences(gpt_path)
new_segments = [sentence.split(": ") for sentence in sentences]
chunks = accumulate_text_by_interpolation(text_path, new_segments)
# segment videos by GPT outputs
cut_video_by_text_chunks(
vid_path,
chunks,
video_output_dir,
text_output_dir,
cache=cfg.video_cutter.use_cache,
)
# run alphapose
out_pose_dir = cfg.output_dir / "pose"
run_alphapose_on_videos(
root_dir=cfg.alphapose.root_dir,
output_dir=out_pose_dir,
vid_dir=video_output_dir,
)
if __name__ == "__main__":
main()
| [] |
2024-01-10 | jalakoo/neo4j-chatbot | src~llms~llm_openai.py | from llms.llm_base import LLMBase
import openai
from langchain.vectorstores.neo4j_vector import Neo4jVector
from langchain.embeddings.openai import OpenAIEmbeddings
# OpenAI w/ Langchain
class LLMOpenAI(LLMBase):
def __init__(self, model:str, key:str):
self.model = model
openai.api_key = key
# Chat directly using openai only
def chat_completion(self,
prior_messages: list[any],
neo4j_uri: str,
neo4j_user: str,
neo4j_password: str):
full_response = ""
for response in openai.ChatCompletion.create(
model=self.model,
messages=[
{"role": m["role"], "content": m["content"]}
for m in prior_messages
],
stream=True,
):
full_response += response.choices[0].delta.get("content", "")
return full_response | [
"content"
] |
2024-01-10 | jalakoo/neo4j-chatbot | src~llms~llm_vector_search.py | from llms.llm_base import LLMBase
import openai
from langchain.vectorstores.neo4j_vector import Neo4jVector
from langchain.embeddings.openai import OpenAIEmbeddings
# OpenAI + Neo4j + Langchain
class LLMVectorSearch(LLMBase):
def __init__(self, model:str, key:str):
self.model = model
openai.api_key = key
def chat_completion(self,
prior_messages: list[any],
neo4j_uri: str,
neo4j_user: str,
neo4j_password: str):
# latest prompt = last prior_message
query = prior_messages[-1]['content']
documents = [d['content'] for d in prior_messages[:-1]]
# TODO: Untested
neo4j_vector = Neo4jVector.from_documents(
documents,
OpenAIEmbeddings(),
url=neo4j_uri,
username=neo4j_user,
password=neo4j_password
)
results = neo4j_vector.similarity_search(query, k=1)
return results[0].page_content | [] |
2024-01-10 | jalakoo/neo4j-chatbot | src~llms~llm_cypher_rag.py | from llms.llm_base import LLMBase
from neo4j import GraphDatabase
from neo4j.exceptions import CypherSyntaxError
import openai
# Queries for Neo4j database introspection
node_properties_query = """
CALL apoc.meta.data()
YIELD label, other, elementType, type, property
WHERE NOT type = "RELATIONSHIP" AND elementType = "node"
WITH label AS nodeLabels, collect(property) AS properties
RETURN {labels: nodeLabels, properties: properties} AS output
"""
rel_properties_query = """
CALL apoc.meta.data()
YIELD label, other, elementType, type, property
WHERE NOT type = "RELATIONSHIP" AND elementType = "relationship"
WITH label AS nodeLabels, collect(property) AS properties
RETURN {type: nodeLabels, properties: properties} AS output
"""
rel_query = """
CALL apoc.meta.data()
YIELD label, other, elementType, type, property
WHERE type = "RELATIONSHIP" AND elementType = "node"
RETURN {source: label, relationship: property, target: other} AS output
"""
def schema_text(node_props, rel_props, rels):
return f"""
This is the schema representation of the Neo4j database.
Node properties are the following:
{node_props}
Relationship properties are the following:
{rel_props}
Relationship point from source to target nodes
{rels}
Make sure to respect relationship types and directions
"""
graph2text_system = f"""
You are an assistant that helps to generate text to form nice and human understandable answers based.
The latest prompt contains the information, and you need to generate a human readable response based on the given information.
Make it sound like the information are coming from an AI assistant, but don't add any information.
Do not add any additional information that is not explicitly provided in the latest prompt.
I repeat, do not add any information that is not explicitly given.
"""
class LLMCypherRAG(LLMBase):
def __init__(self, model:str, key:str):
self.model = model
openai.api_key = key
self.driver = None
# self.schema = self.generate_schema()
def get_system_message(self):
return f"""
Task: Generate Cypher queries to query a Neo4j graph database based on the provided schema definition.
Instructions:
Use only the provided relationship types and properties.
Do not use any other relationship types or properties that are not provided.
If you cannot generate a Cypher statement based on the provided schema, explain the reason to the user.
Schema:
{self.schema}
Note: Do not include any explanations or apologies in your responses.
"""
def refresh_schema(self):
self.schema = self.generate_schema()
def generate_schema(self):
if self.driver is None:
return
node_props = self.query_database(node_properties_query)
rel_props = self.query_database(rel_properties_query)
rels = self.query_database(rel_query)
return schema_text(node_props, rel_props, rels)
def query_database(self, neo4j_query, params={}):
with self.driver.session() as session:
result = session.run(neo4j_query, params)
output = [r.values() for r in result]
output.insert(0, result.keys())
return output
def construct_cypher(self, question, history=None):
messages = [
{"role": "system", "content": self.get_system_message()},
{"role": "user", "content": question},
]
# Used for Cypher healing flows
if history:
messages.extend(history)
completions = openai.ChatCompletion.create(
model=self.model,
temperature=0.0,
max_tokens=1000,
messages=messages
)
return completions.choices[0].message.content
def run(self, question, history=None, retry=True):
# Construct Cypher statement
cypher = self.construct_cypher(question, history)
print(cypher)
try:
return self.query_database(cypher)
# Self-healing flow
except CypherSyntaxError as e:
# If out of retries
if not retry:
return "Invalid Cypher syntax"
# Self-healing Cypher flow by
# providing specific error to GPT-4
print("Retrying")
return self.run(
question,
[
{"role": "assistant", "content": cypher},
{
"role": "user",
"content": f"""This query returns an error: {str(e)}
Give me a improved query that works without any explanations or apologies""",
},
],
retry=False
)
# def generate_response(self, messages):
# messages = [
# {"role": "system", "content": graph2text_system}
# ] + messages
# print(messages)
# # Make a request to OpenAI
# completions = openai.ChatCompletion.create(
# model=self.model,
# messages=messages,
# temperature=0.0
# )
# response = completions.choices[0].message.content
# print(response)
# # If the model apologized, remove the first line or sentence
# if "apologi" in response:
# if "\n" in response:
# response = " ".join(response.split("\n")[1:])
# else:
# response = " ".join(response.split(".")[1:])
# return response
def chat_completion(self,
prior_messages: list[any],
neo4j_uri: str,
neo4j_user: str,
neo4j_password: str):
if self.driver is None:
self.driver = GraphDatabase.driver(neo4j_uri, auth=(neo4j_user, neo4j_password))
self.schema = self.generate_schema()
question = prior_messages[-1]['content']
result = self.run(question=question,
history=None,
retry=True)
return result | [
"This query returns an error: PLACEHOLDER \n Give me a improved query that works without any explanations or apologies"
] |
2024-01-10 | Softlandia-Ltd/metaflow-index | query_metaflow.py | """Read the Metaflow-docs repository and create an index.
Must have the following env variables set:
OPENAI_API_KEY
GITHUB_TOKEN (if creating the index from scratch)
Alternatively can read them from a .env file, if present.
"""
import os
import logging
import plac
from llama_index import GPTFaissIndex, Document
from llama_index.readers import GithubRepositoryReader
from llama_index import LangchainEmbedding, LLMPredictor
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.llms import OpenAIChat, OpenAI
import dotenv
import faiss
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
INDEX_STRUCT = "metaflow_index.json"
INDEX_VECTORS = "metaflow_vectors.dat"
# Read env variables from .env file, in case they are not set earlier
if not dotenv.load_dotenv():
logger.warning("Could not load .env file")
def create_index(embed_model: LangchainEmbedding, chunk_size: int=256):
"""Create index from scratch.
Args:
embed_model: Embedding model to use for encoding documents.
chunk_size: Length of individual encoded text segments, that will be
used a context in queries. Larger values may contain more information
but be harder to match to user requests.
"""
logger.info("Creating index from scratch")
reader = GithubRepositoryReader(
"Netflix",
"metaflow-docs",
ignore_directories=["src", ".github", "static"],
verbose=True,
)
documents = reader.load_data(branch="master")
logging.info("Loaded %s documents", len(documents))
# Create a Faiss instance
embedding_len = len(embed_model._get_query_embedding("test"))
faiss_index = faiss.IndexFlatL2(embedding_len)
logger.debug("Embedding length: %s", embedding_len)
index = GPTFaissIndex(
documents,
faiss_index=faiss_index,
chunk_size_limit=chunk_size,
embed_model=embed_model,
)
index.save_to_disk(INDEX_STRUCT, faiss_index_save_path=INDEX_VECTORS)
@plac.opt("n_sources", "Number of sources to use", type=int)
def main(n_sources: int = 2):
"""Create index and run queries."""
embed_model = LangchainEmbedding(
HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2")
)
if not os.path.exists(INDEX_STRUCT):
create_index(embed_model)
# Use the ChatGPT model
llm = LLMPredictor(OpenAIChat(model_name="gpt-3.5-turbo"))
# Davinci is much more capable, but also much slower and more expensive
# llm = LLMPredictor(OpenAI())
index = GPTFaissIndex.load_from_disk(
INDEX_STRUCT, faiss_index_save_path=INDEX_VECTORS, llm_predictor=llm,
embed_model=embed_model
)
while True:
# Take user input
print("=== new query ===")
query = input("Enter query: ")
response = index.query(query, similarity_top_k=n_sources)
print(response)
print(response.source_nodes)
if __name__ == "__main__":
plac.call(main)
| [] |
2024-01-10 | related-sciences/nxontology-ml | nxontology_ml~gpt_tagger~_chat_completion_middleware.py | import logging
import os
from collections import Counter
from collections.abc import Callable, Iterable
from copy import copy
from os import _Environ
from pathlib import Path
from string import Formatter
import dotenv
import openai
from nxontology_ml.gpt_tagger._models import TaskConfig
from nxontology_ml.gpt_tagger._openai_models import (
OPENAI_API_KEY,
OPENAI_MODELS,
ChatCompletionMessage,
ChatCompletionsPayload,
Response,
)
from nxontology_ml.gpt_tagger._utils import (
counter_or_empty,
log_json_if_enabled,
)
from nxontology_ml.utils import ROOT_DIR
CREATE_FN_TYPE = Callable[[ChatCompletionsPayload], Response]
class _ChatCompletionMiddleware:
"""
Thin wrapper around OpenAi's ChatCompletion API.
Allows to:
- Handle prompt templating
- Make testing easier
- Instrument API usage
Resources:
- Chat GPT use: https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb
"""
def __init__(
self,
partial_payload: ChatCompletionsPayload,
prompt_template: str,
create_fn: CREATE_FN_TYPE,
logs_path: Path | None,
counter: Counter[str],
):
"""
Intended to get constructed using cls.from_config(config)
"""
self._verify_partial_payload(partial_payload)
self._verify_prompt_template(prompt_template)
self._partial_payload = partial_payload
self._prompt_template = prompt_template
self._create_fn = create_fn
self._logs_path = logs_path
self._counter = counter
def create(self, records: Iterable[str]) -> Response:
record_list: list[str] = list(records)
payload: ChatCompletionsPayload = copy(self._partial_payload)
payload["messages"][-1]["content"] = self._prompt_template.format(
records="\n".join(record_list)
).strip()
self._counter["ChatCompletion/create_requests"] += 1
self._counter["ChatCompletion/records_processed"] += len(record_list)
logging.debug(f"Sending {len(record_list)} to OpenAI's ChatCompletion API")
log_json_if_enabled(self._logs_path, "requests", payload)
resp = self._create_fn(**payload) # type: ignore
log_json_if_enabled(self._logs_path, "responses", resp)
# FIXME: Would we want to support async io?
return resp
@classmethod
def from_config(
cls,
config: TaskConfig,
counter: Counter[str] | None = None,
env: _Environ[str] = os.environ,
) -> "_ChatCompletionMiddleware":
"""
Builder class from config
Env is exposed because OpenAI's SDK implicitly depends on the API key
"""
openai.api_key = env.get(OPENAI_API_KEY, None) or dotenv.get_key(
dotenv_path=ROOT_DIR / ".env",
key_to_get=OPENAI_API_KEY,
)
partial_payload = ChatCompletionsPayload(
model=config.openai_model_name,
messages=[ChatCompletionMessage(role="user", content="__PLACEHOLDER__")],
)
if config.model_temperature:
partial_payload["temperature"] = config.model_temperature
if config.model_top_p:
partial_payload["top_p"] = config.model_top_p
if config.model_n:
partial_payload["n"] = config.model_n
# At the moment, only chat_completion is supported.
# See: https://openai.com/blog/gpt-4-api-general-availability
return cls(
partial_payload=partial_payload,
prompt_template=config.prompt_path.read_text(),
create_fn=openai.ChatCompletion.create,
logs_path=config.logs_path,
counter=counter_or_empty(counter),
)
@staticmethod
def _verify_prompt_template(prompt_template: str) -> None:
fields = {t[1] for t in Formatter().parse(prompt_template) if t[1]}
if "records" not in fields:
raise ValueError(
'Invalid prompt provided: Template key "{records}" must be present.'
)
@staticmethod
def _verify_partial_payload(partial_payload: ChatCompletionsPayload) -> None:
model = partial_payload.get("model", "MISSING")
if model not in OPENAI_MODELS:
raise ValueError(f"Unsupported OpenAI Model: {model}")
messages = partial_payload.get("messages", [])
if not len(messages) > 0:
raise ValueError("Invalid partial_payload: Should contain message(s)")
| [
"__PLACEHOLDER__"
] |
2024-01-10 | related-sciences/nxontology-ml | nxontology_ml~gpt_tagger~_tiktoken_batcher.py | import tiktoken
from tiktoken import Encoding
from nxontology_ml.gpt_tagger._models import TaskConfig
from nxontology_ml.gpt_tagger._openai_models import OPENAI_MODELS
class _TiktokenBatcher:
"""
Batch records based on Tiktoken token count: Will buffer records until the buffer is full.
Uses:
- https://github.com/openai/tiktoken
- (Example of tiktoken: https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb)
"""
def __init__(
self,
max_token_cnt: int,
tiktoken_encoding: Encoding,
record_buffer: list[str] | None = None,
token_initial_cnt: int = 0,
):
"""
Intended to get constructed using cls.from_config(config)
Args:
max_token_cnt: Size of the buffer (in terms of # of token)
tiktoken_encoding: OpenAI Encoding to be used to count the tokens
record_buffer:
token_initial_cnt:
"""
self._max_token_cnt = max_token_cnt
self._tiktoken_encoding = tiktoken_encoding
self._record_buffer = record_buffer or []
self._buffer_token_cnt = token_initial_cnt
def add_record_to_buffer(self, record: str) -> list[str] | None:
"""
Add one record to the buffer
Returns the content of the buffer if it is full (i.e. ready to process)
"""
if self._buffer_is_full(next_record=record):
assert len(self._record_buffer) > 0
return self.flush_buffer(next_record=record)
self._do_add_record_to_buffer(record)
return None
def flush_buffer(self, next_record: str | None = None) -> list[str]:
"""
Flush the content of the buffer (even if not full).
Args:
next_record: First record of the next buffer (if provided)
Returns:
Content of the flushed buffer
"""
old_buffer = self._record_buffer
self._record_buffer = []
self._buffer_token_cnt = 0
if next_record:
self._do_add_record_to_buffer(next_record)
return old_buffer
def _do_add_record_to_buffer(self, record: str) -> None:
"""
Add record to buffer and update _buffer_token_cnt
"""
self._record_buffer.append(record)
self._buffer_token_cnt += self._get_token_cnt_from_record(record)
if self._buffer_is_full():
raise ValueError(
f"Buffer size exceeded: {self._buffer_token_cnt=} > {self._max_token_cnt=}"
)
def _get_token_cnt_from_record(self, record: str) -> int:
return len(self._tiktoken_encoding.encode(record))
def _buffer_is_full(self, next_record: str | None = None) -> bool:
"""
Returns True if:
- It is already full
- If it is not full but doesn't have capacity for the next_record
"""
token_cnt: int = self._buffer_token_cnt
if next_record:
token_cnt += self._get_token_cnt_from_record(next_record)
return token_cnt > self._max_token_cnt
@classmethod
def from_config(cls, config: TaskConfig) -> "_TiktokenBatcher":
tiktoken_encoding = tiktoken.encoding_for_model(config.openai_model_name)
prompt_token_cnt = len(tiktoken_encoding.encode(config.prompt_path.read_text()))
assert 0.0 < config.prompt_token_ratio < 1.0, print(
f"Wrong {config.prompt_token_ratio=} value."
)
max_token_cnt = (
int(
OPENAI_MODELS[config.openai_model_name].max_token_cnt
* config.prompt_token_ratio
)
- prompt_token_cnt
)
if max_token_cnt <= 0:
raise ValueError(
"The provided prompt has more tokens than the window of the model."
)
return cls(
max_token_cnt=max_token_cnt,
tiktoken_encoding=tiktoken_encoding,
)
| [] |
2024-01-10 | chocky18/LLM-integrated-with-Tools | try.py | from playwright.sync_api import sync_playwright
from langchain.agents.agent_toolkits import PlayWrightBrowserToolkit
from langchain.agents import initialize_agent, AgentType
from langchain.llms import OpenAI
import json
import streamlit as st
import time
from sys import argv, exit, platform
import os
os.environ["OPENAI_API_KEY"] = "your open ai key"
black_listed_elements = set(["html", "head", "title", "meta", "iframe", "body", "script", "style", "path", "svg", "br", "::marker",])
class AGI:
def __init__(self):
self.current_url = None
self.browser = (
sync_playwright()
.start()
.chromium.launch(
headless=False,
)
)
self.page = self.browser.new_page()
self.page.set_viewport_size({"width": 1280, "height": 1080})
self.client = self.page.context.new_cdp_session(self.page)
self.page_element_buffer = {}
# Create the PlayWrightBrowserToolkit with the synchronized browser
toolkit = PlayWrightBrowserToolkit.from_browser(self.browser)
tools = toolkit.get_tools()
# Initialize the agent
llm = OpenAI(temperature=0.5)
self.agent_chain = initialize_agent(tools, llm, agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION, verbose=True)
def go_to_page(self, url):
self.page.goto(url=url if "://" in url else "http://" + url)
self.client = self.page.context.new_cdp_session(self.page)
self.page_element_buffer = {}
def evaluate(self, response):
# Implement your evaluation criteria here
# Return True if the response is considered correct, False otherwise
# You can use any criteria, such as checking for specific keywords or patterns
# or comparing the response with expected outputs
# Example: Check if the response contains the word "success"
if "success" in response.lower():
return True
else:
return False
def extract_elements(self):
"""
Extracts elements from a web page and generates selectors for those elements based on certain criteria.
Returns a list of generated selectors.
"""
page = self.page
page_element_buffer = self.page_element_buffer
start = time.time()
page_state_as_text = []
device_pixel_ratio = page.evaluate("window.devicePixelRatio")
if platform == "darwin" and device_pixel_ratio == 1: # lies
device_pixel_ratio = 2
win_scroll_x = page.evaluate("window.scrollX")
win_scroll_y = page.evaluate("window.scrollY")
win_upper_bound = page.evaluate("window.pageYOffset")
win_left_bound = page.evaluate("window.pageXOffset")
win_width = page.evaluate("window.screen.width")
win_height = page.evaluate("window.screen.height")
win_right_bound = win_left_bound + win_width
win_lower_bound = win_upper_bound + win_height
document_offset_height = page.evaluate("document.body.offsetHeight")
document_scroll_height = page.evaluate("document.body.scrollHeight")
percentage_progress_start = 1
percentage_progress_end = 2
page_state_as_text.append(
{
"x": 0,
"y": 0,
"text": "[scrollbar {:0.2f}-{:0.2f}%]".format(
round(percentage_progress_start, 2), round(percentage_progress_end)
),
}
)
tree = self.client.send(
"DOMSnapshot.captureSnapshot",
{"computedStyles": [], "includeDOMRects": True, "includePaintOrder": True},
)
strings = tree["strings"]
document = tree["documents"][0]
nodes = document["nodes"]
backend_node_id = nodes["backendNodeId"]
attributes = nodes["attributes"]
node_value = nodes["nodeValue"]
parent = nodes["parentIndex"]
node_types = nodes["nodeType"]
node_names = nodes["nodeName"]
is_clickable = set(nodes["isClickable"]["index"])
text_value = nodes["textValue"]
text_value_index = text_value["index"]
text_value_values = text_value["value"]
input_value = nodes["inputValue"]
input_value_index = input_value["index"]
input_value_values = input_value["value"]
input_checked = nodes["inputChecked"]
layout = document["layout"]
layout_node_index = layout["nodeIndex"]
bounds = layout["bounds"]
cursor = 0
html_elements_text = []
child_nodes = {}
elements_in_view_port = []
anchor_ancestry = {"-1": (False, None)}
button_ancestry = {"-1": (False, None)}
def convert_name(node_name, has_click_handler):
if node_name == "a":
return "link"
if node_name == "input":
return "input"
if node_name == "img":
return "img"
if (
node_name == "button" or has_click_handler
): # found pages that needed this quirk
return "button"
else:
return "text"
def find_attributes(attributes, keys):
values = {}
for [key_index, value_index] in zip(*(iter(attributes),) * 2):
if value_index < 0:
continue
key = strings[key_index]
value = strings[value_index]
if key in keys:
values[key] = value
keys.remove(key)
if not keys:
return values
return values
def add_to_hash_tree(hash_tree, tag, node_id, node_name, parent_id):
parent_id_str = str(parent_id)
if not parent_id_str in hash_tree:
parent_name = strings[node_names[parent_id]].lower()
grand_parent_id = parent[parent_id]
add_to_hash_tree(
hash_tree, tag, parent_id, parent_name, grand_parent_id
)
is_parent_desc_anchor, anchor_id = hash_tree[parent_id_str]
# even if the anchor is nested in another anchor, we set the "root" for all descendants to be ::Self
if node_name == tag:
value = (True, node_id)
elif (
is_parent_desc_anchor
): # reuse the parent's anchor_id (which could be much higher in the tree)
value = (True, anchor_id)
else:
value = (
False,
None,
) # not a descendant of an anchor, most likely it will become text, an interactive element or discarded
hash_tree[str(node_id)] = value
return value
for index, node_name_index in enumerate(node_names):
node_parent = parent[index]
node_name = strings[node_name_index].lower()
is_ancestor_of_anchor, anchor_id = add_to_hash_tree(
anchor_ancestry, "a", index, node_name, node_parent
)
is_ancestor_of_button, button_id = add_to_hash_tree(
button_ancestry, "button", index, node_name, node_parent
)
try:
cursor = layout_node_index.index(
index
) # todo replace this with proper cursoring, ignoring the fact this is O(n^2) for the moment
except:
continue
if node_name in black_listed_elements:
continue
[x, y, width, height] = bounds[cursor]
x /= device_pixel_ratio
y /= device_pixel_ratio
width /= device_pixel_ratio
height /= device_pixel_ratio
elem_left_bound = x
elem_top_bound = y
elem_right_bound = x + width
elem_lower_bound = y + height
partially_is_in_viewport = (
elem_left_bound < win_right_bound
and elem_right_bound >= win_left_bound
and elem_top_bound < win_lower_bound
and elem_lower_bound >= win_upper_bound
)
if not partially_is_in_viewport:
continue
meta_data = []
# inefficient to grab the same set of keys for kinds of objects but its fine for now
element_attributes = find_attributes(
attributes[index], ["type", "placeholder", "aria-label", "title", "alt"]
)
ancestor_exception = is_ancestor_of_anchor or is_ancestor_of_button
ancestor_node_key = (
None
if not ancestor_exception
else str(anchor_id)
if is_ancestor_of_anchor
else str(button_id)
)
ancestor_node = (
None
if not ancestor_exception
else child_nodes.setdefault(str(ancestor_node_key), [])
)
if node_name == "#text" and ancestor_exception:
text = strings[node_value[index]]
if text == "|" or text == "•":
continue
ancestor_node.append({
"type": "type", "value": text
})
else:
if (
node_name == "input" and element_attributes.get("type") == "submit"
) or node_name == "button" or node_name == 'textarea':
node_name = "button"
element_attributes.pop(
"type", None
) # prevent [button ... (button)..]
for key in element_attributes:
if ancestor_exception:
ancestor_node.append({
"type": "attribute",
"key": key,
"value": element_attributes[key]
})
else:
meta_data.append(element_attributes[key])
# print("meta", meta_data)
element_node_value = None
if node_value[index] >= 0:
element_node_value = strings[node_value[index]]
if element_node_value == "|": #commonly used as a seperator, does not add much context - lets save ourselves some token space
continue
elif (
node_name == "input"
and index in input_value_index
and element_node_value is None
):
node_input_text_index = input_value_index.index(index)
text_index = input_value_values[node_input_text_index]
if node_input_text_index >= 0 and text_index >= 0:
element_node_value = strings[text_index]
# remove redudant elements
if ancestor_exception and (node_name != "a" and node_name != "button"):
continue
elements_in_view_port.append(
{
"node_index": str(index),
"backend_node_id": backend_node_id[index],
"node_name": node_name,
"node_value": element_node_value,
"node_meta": meta_data,
"is_clickable": index in is_clickable,
"origin_x": int(x),
"origin_y": int(y),
"center_x": int(x + (width / 2)),
"center_y": int(y + (height / 2)),
}
)
# print("elements_in_view_port",elements_in_view_port)
# lets filter further to remove anything that does not hold any text nor has click handlers + merge text from leaf#text nodes with the parent
final_selectors= []
id_counter = 0
for element in elements_in_view_port:
node_index = element.get("node_index")
node_name = element.get("node_name")
node_value = element.get("node_value")
is_clickable = element.get("is_clickable")
origin_x = element.get("origin_x")
origin_y = element.get("origin_y")
center_x = element.get("center_x")
center_y = element.get("center_y")
meta_data = element.get("node_meta")
inner_text = f"{node_value} " if node_value else ""
meta = ""
if node_index in child_nodes:
for child in child_nodes.get(node_index):
entry_type = child.get('type')
entry_value= child.get('value')
if entry_type == "attribute":
entry_key = child.get('key')
meta_data.append(f'{entry_key}="{entry_value}"')
else:
inner_text += f"{entry_value} "
# print("meta_data", meta_data)
if meta_data:
meta_string = " ".join(meta_data)
meta = f" {meta_string}"
if inner_text != "":
inner_text = f"{inner_text.strip()}"
converted_node_name = convert_name(node_name, is_clickable)
# not very elegant, more like a placeholder
if (
(converted_node_name != "button" or meta == "")
and converted_node_name != "link"
and converted_node_name != "input"
and converted_node_name != "img"
and converted_node_name != "textarea"
) and inner_text.strip() == "":
continue
page_element_buffer[id_counter] = element
if inner_text != "":
final_selectors.append(
f"""<{converted_node_name} id={id_counter}{meta}>{inner_text}</{converted_node_name}>"""
)
else:
final_selectors.append(
f"""<{converted_node_name} id={id_counter}{meta}/>"""
)
id_counter += 1
print("Parsing time: {:0.2f} seconds".format(time.time() - start))
# print("elements_of_interest", elements_of_interest)
# elements_to_remove = ["<button id=8 I'm Feeling Lucky/>", '<text id=9>Google offered in:</text>', '<link id=10>हिन्दी</link>', '<link id=11>বাংলা</link>', '<link id=12>తెలుగు</link>', '<link id=13>मराठी</link>', '<link id=14>தமிழ்</link>', '<link id=15>ગુજરાતી</link>', '<link id=16>ಕನ್ನಡ</link>', '<link id=17>മലയാളം</link>', '<link id=18>ਪੰਜਾਬੀ</link>', '<text id=19>India</text>', '<link id=20>About</link>', '<link id=21>Advertising</link>', '<link id=22>Business</link>', '<link id=23>How Search works</link>', '<link id=24>Privacy</link>', '<link id=25>Terms</link>', '<text id=26>Settings</text>']
# lst = [elem for elem in elements_of_interest if elem not in elements_to_remove]
return final_selectors
def execute_commands(self, objective,selectors):
previous_command = ""
actions = [] # Dynamic list to store actions
def get_gpt_command(objective, url, previous_command):
prompt = "OpenAI is an agent that controls the web browser using Playwright. It can perform actions like scrolling, clicking, typing, submitting, and navigating to new pages. The objective is to achieve a specific goal. To get closer to achieving the goal, start by submitting a search query to Google that will lead to the best page for accomplishing the objective. Once on that page, interact with it using appropriate actions to achieve the goal. Based on the given objective, issue the command you believe will help you get closer to achieving the goal.\nObjective: {}\nURL: {}\nPrevious Command: {}\n\n"
response = self.agent_chain.run(input=prompt.format(objective, url, previous_command))
return response
# Run the agent to get the command from LLM
response = get_gpt_command(objective=objective, url=self.current_url, previous_command=previous_command)
previous_command = response["output"]
if response["output"] == "exit":
st.write("Exiting...")
else:
# Generate a response from the LLM model
llm_response = self.agent_chain.run(response)["output"]
# Evaluate the correctness of the response
is_correct = self.evaluate(llm_response)
# Provide feedback to the LLM model
feedback = None
# Existing code...
if feedback is not None:
# Provide feedback to the LLM model
self.agent_chain.feedback(response, llm_response, feedback)
if is_correct:
# Execute the command using Playwright
# Execute the command using Playwright
try:
# Check if the command requires interacting with elements
if "click" in llm_response or "type" in llm_response or "select" in llm_response:
elements = self.extract_elements()
print("Extracted elements:", elements)
# Find a valid selector from final_selectors and replace it in llm_response
for element in elements:
for selector in selectors:
# Replace {{selector}} with current selector
updated_llm_response = llm_response.replace("{{selector}}", selector)
# Check if the command contains 'type_text' and add typing and submitting
if "type_text" in updated_llm_response:
updated_llm_response = updated_llm_response.replace("type_text", f"{selector}.type('{text}'); {selector}.press('Enter');")
try:
self.page.evaluate(updated_llm_response)
break # Stop iterating if action succeeds
except Exception:
continue # Try the next selector if action fails
else:
self.page.evaluate(llm_response)
except Exception as e:
st.error(f"Error executing command: {e}")
# Display the LLM response
response_data = json.loads(llm_response)
llm_actions = response_data.get("actions", [])
actions.extend(llm_actions)
print(actions)
# Display the actions in the interface
st.subheader("Actions")
for index, action in enumerate(actions):
st.code(f"Action {index+1}:", language="json")
st.json(action)
# Close the browser
self.browser.close()
def main():
col1, col2 = st.columns([1, 20])
# In the first column, display the logo image
logo_image = "workplete.png"
col1.image(logo_image, use_column_width=True)
# In the second column, display the "AGI" heading
col2.title("AGI")
# st.title("Chatbot Interface")
# Get user input or command from LLM
user_input = st.text_input("Objective")
submit = st.button("Send")
thumbs_up = st.button("Thumbs up 👍")
thumbs_down = st.button("Thumbs down 👎")
if user_input and submit:
# Create an instance of the Crawler class
agi = AGI()
# Extract elements and generate selectors
selectors = agi.extract_elements()
# Execute commands obtained from LLM
agi.execute_commands(user_input,selectors)
if __name__ == "__main__":
main()
| [
"OpenAI is an agent that controls the web browser using Playwright. It can perform actions like scrolling, clicking, typing, submitting, and navigating to new pages. The objective is to achieve a specific goal. To get closer to achieving the goal, start by submitting a search query to Google that will lead to the best page for accomplishing the objective. Once on that page, interact with it using appropriate actions to achieve the goal. Based on the given objective, issue the command you believe will help you get closer to achieving the goal.\nObjective: {}\nURL: {}\nPrevious Command: {}\n\n"
] |
2024-01-10 | Leweibo/Langchain-Chatchat | startup.py | import asyncio
import multiprocessing as mp
import os
import subprocess
import sys
from multiprocessing import Process
from datetime import datetime
from pprint import pprint
# 设置numexpr最大线程数,默认为CPU核心数
try:
import numexpr
n_cores = numexpr.utils.detect_number_of_cores()
os.environ["NUMEXPR_MAX_THREADS"] = str(n_cores)
except:
pass
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
from configs import (
LOG_PATH,
log_verbose,
logger,
LLM_MODELS,
EMBEDDING_MODEL,
TEXT_SPLITTER_NAME,
FSCHAT_CONTROLLER,
FSCHAT_OPENAI_API,
FSCHAT_MODEL_WORKERS,
API_SERVER,
WEBUI_SERVER,
HTTPX_DEFAULT_TIMEOUT,
)
from server.utils import (fschat_controller_address, fschat_model_worker_address,
fschat_openai_api_address, set_httpx_config, get_httpx_client,
get_model_worker_config, get_all_model_worker_configs,
MakeFastAPIOffline, FastAPI, llm_device, embedding_device)
import argparse
from typing import Tuple, List, Dict
from configs import VERSION
def create_controller_app(
dispatch_method: str,
log_level: str = "INFO",
) -> FastAPI:
import fastchat.constants
fastchat.constants.LOGDIR = LOG_PATH
from fastchat.serve.controller import app, Controller, logger
logger.setLevel(log_level)
controller = Controller(dispatch_method)
sys.modules["fastchat.serve.controller"].controller = controller
MakeFastAPIOffline(app)
app.title = "FastChat Controller"
app._controller = controller
return app
def create_model_worker_app(log_level: str = "INFO", **kwargs) -> FastAPI:
"""
kwargs包含的字段如下:
host:
port:
model_names:[`model_name`]
controller_address:
worker_address:
对于Langchain支持的模型:
langchain_model:True
不会使用fschat
对于online_api:
online_api:True
worker_class: `provider`
对于离线模型:
model_path: `model_name_or_path`,huggingface的repo-id或本地路径
device:`LLM_DEVICE`
"""
import fastchat.constants
fastchat.constants.LOGDIR = LOG_PATH
import argparse
parser = argparse.ArgumentParser()
args = parser.parse_args([])
for k, v in kwargs.items():
setattr(args, k, v)
if worker_class := kwargs.get("langchain_model"): #Langchian支持的模型不用做操作
from fastchat.serve.base_model_worker import app
worker = ""
# 在线模型API
elif worker_class := kwargs.get("worker_class"):
from fastchat.serve.base_model_worker import app
worker = worker_class(model_names=args.model_names,
controller_addr=args.controller_address,
worker_addr=args.worker_address)
# sys.modules["fastchat.serve.base_model_worker"].worker = worker
sys.modules["fastchat.serve.base_model_worker"].logger.setLevel(log_level)
# 本地模型
else:
from configs.model_config import VLLM_MODEL_DICT
if kwargs["model_names"][0] in VLLM_MODEL_DICT and args.infer_turbo == "vllm":
import fastchat.serve.vllm_worker
from fastchat.serve.vllm_worker import VLLMWorker, app,worker_id
from vllm import AsyncLLMEngine
from vllm.engine.arg_utils import AsyncEngineArgs,EngineArgs
args.tokenizer = args.model_path # 如果tokenizer与model_path不一致在此处添加
args.tokenizer_mode = 'auto'
args.trust_remote_code= True
args.download_dir= None
args.load_format = 'auto'
args.dtype = 'auto'
args.seed = 0
args.worker_use_ray = False
args.pipeline_parallel_size = 1
args.tensor_parallel_size = 1
args.block_size = 16
args.swap_space = 4 # GiB
args.gpu_memory_utilization = 0.90
args.max_num_batched_tokens = None # 一个批次中的最大令牌(tokens)数量,这个取决于你的显卡和大模型设置,设置太大显存会不够
args.max_num_seqs = 256
args.disable_log_stats = False
args.conv_template = None
args.limit_worker_concurrency = 5
args.no_register = False
args.num_gpus = 4 # vllm worker的切分是tensor并行,这里填写显卡的数量
args.engine_use_ray = False
args.disable_log_requests = False
# 0.2.1 vllm后要加的参数, 但是这里不需要
args.max_model_len = None
args.revision = None
args.quantization = None
args.max_log_len = None
args.tokenizer_revision = None
# 0.2.2 vllm需要新加的参数
args.max_paddings = 256
if args.model_path:
args.model = args.model_path
if args.num_gpus > 1:
args.tensor_parallel_size = args.num_gpus
for k, v in kwargs.items():
setattr(args, k, v)
engine_args = AsyncEngineArgs.from_cli_args(args)
engine = AsyncLLMEngine.from_engine_args(engine_args)
worker = VLLMWorker(
controller_addr = args.controller_address,
worker_addr = args.worker_address,
worker_id = worker_id,
model_path = args.model_path,
model_names = args.model_names,
limit_worker_concurrency = args.limit_worker_concurrency,
no_register = args.no_register,
llm_engine = engine,
conv_template = args.conv_template,
)
sys.modules["fastchat.serve.vllm_worker"].engine = engine
sys.modules["fastchat.serve.vllm_worker"].worker = worker
sys.modules["fastchat.serve.vllm_worker"].logger.setLevel(log_level)
else:
from fastchat.serve.model_worker import app, GptqConfig, AWQConfig, ModelWorker, worker_id
args.gpus = "0" # GPU的编号,如果有多个GPU,可以设置为"0,1,2,3"
args.max_gpu_memory = "22GiB"
args.num_gpus = 1 # model worker的切分是model并行,这里填写显卡的数量
args.load_8bit = False
args.cpu_offloading = None
args.gptq_ckpt = None
args.gptq_wbits = 16
args.gptq_groupsize = -1
args.gptq_act_order = False
args.awq_ckpt = None
args.awq_wbits = 16
args.awq_groupsize = -1
args.model_names = [""]
args.conv_template = None
args.limit_worker_concurrency = 5
args.stream_interval = 2
args.no_register = False
args.embed_in_truncate = False
for k, v in kwargs.items():
setattr(args, k, v)
if args.gpus:
if args.num_gpus is None:
args.num_gpus = len(args.gpus.split(','))
if len(args.gpus.split(",")) < args.num_gpus:
raise ValueError(
f"Larger --num-gpus ({args.num_gpus}) than --gpus {args.gpus}!"
)
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus
gptq_config = GptqConfig(
ckpt=args.gptq_ckpt or args.model_path,
wbits=args.gptq_wbits,
groupsize=args.gptq_groupsize,
act_order=args.gptq_act_order,
)
awq_config = AWQConfig(
ckpt=args.awq_ckpt or args.model_path,
wbits=args.awq_wbits,
groupsize=args.awq_groupsize,
)
worker = ModelWorker(
controller_addr=args.controller_address,
worker_addr=args.worker_address,
worker_id=worker_id,
model_path=args.model_path,
model_names=args.model_names,
limit_worker_concurrency=args.limit_worker_concurrency,
no_register=args.no_register,
device=args.device,
num_gpus=args.num_gpus,
max_gpu_memory=args.max_gpu_memory,
load_8bit=args.load_8bit,
cpu_offloading=args.cpu_offloading,
gptq_config=gptq_config,
awq_config=awq_config,
stream_interval=args.stream_interval,
conv_template=args.conv_template,
embed_in_truncate=args.embed_in_truncate,
)
sys.modules["fastchat.serve.model_worker"].args = args
sys.modules["fastchat.serve.model_worker"].gptq_config = gptq_config
# sys.modules["fastchat.serve.model_worker"].worker = worker
sys.modules["fastchat.serve.model_worker"].logger.setLevel(log_level)
MakeFastAPIOffline(app)
app.title = f"FastChat LLM Server ({args.model_names[0]})"
app._worker = worker
return app
def create_openai_api_app(
controller_address: str,
api_keys: List = [],
log_level: str = "INFO",
) -> FastAPI:
import fastchat.constants
fastchat.constants.LOGDIR = LOG_PATH
from fastchat.serve.openai_api_server import app, CORSMiddleware, app_settings
from fastchat.utils import build_logger
logger = build_logger("openai_api", "openai_api.log")
logger.setLevel(log_level)
app.add_middleware(
CORSMiddleware,
allow_credentials=True,
allow_origins=["*"],
allow_methods=["*"],
allow_headers=["*"],
)
sys.modules["fastchat.serve.openai_api_server"].logger = logger
app_settings.controller_address = controller_address
app_settings.api_keys = api_keys
MakeFastAPIOffline(app)
app.title = "FastChat OpeanAI API Server"
return app
def _set_app_event(app: FastAPI, started_event: mp.Event = None):
@app.on_event("startup")
async def on_startup():
if started_event is not None:
started_event.set()
def run_controller(log_level: str = "INFO", started_event: mp.Event = None):
import uvicorn
import httpx
from fastapi import Body
import time
import sys
from server.utils import set_httpx_config
set_httpx_config()
app = create_controller_app(
dispatch_method=FSCHAT_CONTROLLER.get("dispatch_method"),
log_level=log_level,
)
_set_app_event(app, started_event)
# add interface to release and load model worker
@app.post("/release_worker")
def release_worker(
model_name: str = Body(..., description="要释放模型的名称", samples=["chatglm-6b"]),
# worker_address: str = Body(None, description="要释放模型的地址,与名称二选一", samples=[FSCHAT_CONTROLLER_address()]),
new_model_name: str = Body(None, description="释放后加载该模型"),
keep_origin: bool = Body(False, description="不释放原模型,加载新模型")
) -> Dict:
available_models = app._controller.list_models()
if new_model_name in available_models:
msg = f"要切换的LLM模型 {new_model_name} 已经存在"
logger.info(msg)
return {"code": 500, "msg": msg}
if new_model_name:
logger.info(f"开始切换LLM模型:从 {model_name} 到 {new_model_name}")
else:
logger.info(f"即将停止LLM模型: {model_name}")
if model_name not in available_models:
msg = f"the model {model_name} is not available"
logger.error(msg)
return {"code": 500, "msg": msg}
worker_address = app._controller.get_worker_address(model_name)
if not worker_address:
msg = f"can not find model_worker address for {model_name}"
logger.error(msg)
return {"code": 500, "msg": msg}
with get_httpx_client() as client:
r = client.post(worker_address + "/release",
json={"new_model_name": new_model_name, "keep_origin": keep_origin})
if r.status_code != 200:
msg = f"failed to release model: {model_name}"
logger.error(msg)
return {"code": 500, "msg": msg}
if new_model_name:
timer = HTTPX_DEFAULT_TIMEOUT # wait for new model_worker register
while timer > 0:
models = app._controller.list_models()
if new_model_name in models:
break
time.sleep(1)
timer -= 1
if timer > 0:
msg = f"sucess change model from {model_name} to {new_model_name}"
logger.info(msg)
return {"code": 200, "msg": msg}
else:
msg = f"failed change model from {model_name} to {new_model_name}"
logger.error(msg)
return {"code": 500, "msg": msg}
else:
msg = f"sucess to release model: {model_name}"
logger.info(msg)
return {"code": 200, "msg": msg}
host = FSCHAT_CONTROLLER["host"]
port = FSCHAT_CONTROLLER["port"]
if log_level == "ERROR":
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
uvicorn.run(app, host=host, port=port, log_level=log_level.lower())
def run_model_worker(
model_name: str = LLM_MODELS[0],
controller_address: str = "",
log_level: str = "INFO",
q: mp.Queue = None,
started_event: mp.Event = None,
):
import uvicorn
from fastapi import Body
import sys
from server.utils import set_httpx_config
set_httpx_config()
kwargs = get_model_worker_config(model_name)
host = kwargs.pop("host")
port = kwargs.pop("port")
kwargs["model_names"] = [model_name]
kwargs["controller_address"] = controller_address or fschat_controller_address()
kwargs["worker_address"] = fschat_model_worker_address(model_name)
model_path = kwargs.get("model_path", "")
kwargs["model_path"] = model_path
app = create_model_worker_app(log_level=log_level, **kwargs)
_set_app_event(app, started_event)
if log_level == "ERROR":
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
# add interface to release and load model
@app.post("/release")
def release_model(
new_model_name: str = Body(None, description="释放后加载该模型"),
keep_origin: bool = Body(False, description="不释放原模型,加载新模型")
) -> Dict:
if keep_origin:
if new_model_name:
q.put([model_name, "start", new_model_name])
else:
if new_model_name:
q.put([model_name, "replace", new_model_name])
else:
q.put([model_name, "stop", None])
return {"code": 200, "msg": "done"}
uvicorn.run(app, host=host, port=port, log_level=log_level.lower())
def run_openai_api(log_level: str = "INFO", started_event: mp.Event = None):
import uvicorn
import sys
from server.utils import set_httpx_config
set_httpx_config()
controller_addr = fschat_controller_address()
app = create_openai_api_app(controller_addr, log_level=log_level) # TODO: not support keys yet.
_set_app_event(app, started_event)
host = FSCHAT_OPENAI_API["host"]
port = FSCHAT_OPENAI_API["port"]
if log_level == "ERROR":
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
uvicorn.run(app, host=host, port=port)
def run_api_server(started_event: mp.Event = None, run_mode: str = None):
from server.api import create_app
import uvicorn
from server.utils import set_httpx_config
set_httpx_config()
app = create_app(run_mode=run_mode)
_set_app_event(app, started_event)
host = API_SERVER["host"]
port = API_SERVER["port"]
uvicorn.run(app, host=host, port=port)
def run_webui(started_event: mp.Event = None, run_mode: str = None):
from server.utils import set_httpx_config
set_httpx_config()
host = WEBUI_SERVER["host"]
port = WEBUI_SERVER["port"]
cmd = ["streamlit", "run", "webui.py",
"--server.address", host,
"--server.port", str(port),
"--theme.base", "light",
"--theme.primaryColor", "#165dff",
"--theme.secondaryBackgroundColor", "#f5f5f5",
"--theme.textColor", "#000000",
]
if run_mode == "lite":
cmd += [
"--",
"lite",
]
p = subprocess.Popen(cmd)
started_event.set()
p.wait()
def parse_args() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser()
parser.add_argument(
"-a",
"--all-webui",
action="store_true",
help="run fastchat's controller/openai_api/model_worker servers, run api.py and webui.py",
dest="all_webui",
)
parser.add_argument(
"--all-api",
action="store_true",
help="run fastchat's controller/openai_api/model_worker servers, run api.py",
dest="all_api",
)
parser.add_argument(
"--llm-api",
action="store_true",
help="run fastchat's controller/openai_api/model_worker servers",
dest="llm_api",
)
parser.add_argument(
"-o",
"--openai-api",
action="store_true",
help="run fastchat's controller/openai_api servers",
dest="openai_api",
)
parser.add_argument(
"-m",
"--model-worker",
action="store_true",
help="run fastchat's model_worker server with specified model name. "
"specify --model-name if not using default LLM_MODELS",
dest="model_worker",
)
parser.add_argument(
"-n",
"--model-name",
type=str,
nargs="+",
default=LLM_MODELS,
help="specify model name for model worker. "
"add addition names with space seperated to start multiple model workers.",
dest="model_name",
)
parser.add_argument(
"-c",
"--controller",
type=str,
help="specify controller address the worker is registered to. default is FSCHAT_CONTROLLER",
dest="controller_address",
)
parser.add_argument(
"--api",
action="store_true",
help="run api.py server",
dest="api",
)
parser.add_argument(
"-p",
"--api-worker",
action="store_true",
help="run online model api such as zhipuai",
dest="api_worker",
)
parser.add_argument(
"-w",
"--webui",
action="store_true",
help="run webui.py server",
dest="webui",
)
parser.add_argument(
"-q",
"--quiet",
action="store_true",
help="减少fastchat服务log信息",
dest="quiet",
)
parser.add_argument(
"-i",
"--lite",
action="store_true",
help="以Lite模式运行:仅支持在线API的LLM对话、搜索引擎对话",
dest="lite",
)
args = parser.parse_args()
return args, parser
def dump_server_info(after_start=False, args=None):
import platform
import langchain
import fastchat
from server.utils import api_address, webui_address
print("\n")
print("=" * 30 + "Langchain-Chatchat Configuration" + "=" * 30)
print(f"操作系统:{platform.platform()}.")
print(f"python版本:{sys.version}")
print(f"项目版本:{VERSION}")
print(f"langchain版本:{langchain.__version__}. fastchat版本:{fastchat.__version__}")
print("\n")
models = LLM_MODELS
if args and args.model_name:
models = args.model_name
print(f"当前使用的分词器:{TEXT_SPLITTER_NAME}")
print(f"当前启动的LLM模型:{models} @ {llm_device()}")
for model in models:
pprint(get_model_worker_config(model))
print(f"当前Embbedings模型: {EMBEDDING_MODEL} @ {embedding_device()}")
if after_start:
print("\n")
print(f"服务端运行信息:")
if args.openai_api:
print(f" OpenAI API Server: {fschat_openai_api_address()}")
if args.api:
print(f" Chatchat API Server: {api_address()}")
if args.webui:
print(f" Chatchat WEBUI Server: {webui_address()}")
print("=" * 30 + "Langchain-Chatchat Configuration" + "=" * 30)
print("\n")
async def start_main_server():
import time
import signal
def handler(signalname):
"""
Python 3.9 has `signal.strsignal(signalnum)` so this closure would not be needed.
Also, 3.8 includes `signal.valid_signals()` that can be used to create a mapping for the same purpose.
"""
def f(signal_received, frame):
raise KeyboardInterrupt(f"{signalname} received")
return f
# This will be inherited by the child process if it is forked (not spawned)
signal.signal(signal.SIGINT, handler("SIGINT"))
signal.signal(signal.SIGTERM, handler("SIGTERM"))
mp.set_start_method("spawn")
manager = mp.Manager()
run_mode = None
queue = manager.Queue()
args, parser = parse_args()
if args.all_webui:
args.openai_api = True
args.model_worker = True
args.api = True
args.api_worker = True
args.webui = True
elif args.all_api:
args.openai_api = True
args.model_worker = True
args.api = True
args.api_worker = True
args.webui = False
elif args.llm_api:
args.openai_api = True
args.model_worker = True
args.api_worker = True
args.api = False
args.webui = False
if args.lite:
args.model_worker = False
run_mode = "lite"
dump_server_info(args=args)
if len(sys.argv) > 1:
logger.info(f"正在启动服务:")
logger.info(f"如需查看 llm_api 日志,请前往 {LOG_PATH}")
processes = {"online_api": {}, "model_worker": {}}
def process_count():
return len(processes) + len(processes["online_api"]) + len(processes["model_worker"]) - 2
if args.quiet or not log_verbose:
log_level = "ERROR"
else:
log_level = "INFO"
controller_started = manager.Event()
if args.openai_api:
process = Process(
target=run_controller,
name=f"controller",
kwargs=dict(log_level=log_level, started_event=controller_started),
daemon=True,
)
processes["controller"] = process
process = Process(
target=run_openai_api,
name=f"openai_api",
daemon=True,
)
processes["openai_api"] = process
model_worker_started = []
if args.model_worker:
for model_name in args.model_name:
config = get_model_worker_config(model_name)
if not config.get("online_api"):
e = manager.Event()
model_worker_started.append(e)
process = Process(
target=run_model_worker,
name=f"model_worker - {model_name}",
kwargs=dict(model_name=model_name,
controller_address=args.controller_address,
log_level=log_level,
q=queue,
started_event=e),
daemon=True,
)
processes["model_worker"][model_name] = process
if args.api_worker:
for model_name in args.model_name:
config = get_model_worker_config(model_name)
if (config.get("online_api")
and config.get("worker_class")
and model_name in FSCHAT_MODEL_WORKERS):
e = manager.Event()
model_worker_started.append(e)
process = Process(
target=run_model_worker,
name=f"api_worker - {model_name}",
kwargs=dict(model_name=model_name,
controller_address=args.controller_address,
log_level=log_level,
q=queue,
started_event=e),
daemon=True,
)
processes["online_api"][model_name] = process
api_started = manager.Event()
if args.api:
process = Process(
target=run_api_server,
name=f"API Server",
kwargs=dict(started_event=api_started, run_mode=run_mode),
daemon=True,
)
processes["api"] = process
webui_started = manager.Event()
if args.webui:
process = Process(
target=run_webui,
name=f"WEBUI Server",
kwargs=dict(started_event=webui_started, run_mode=run_mode),
daemon=True,
)
processes["webui"] = process
if process_count() == 0:
parser.print_help()
else:
try:
# 保证任务收到SIGINT后,能够正常退出
if p:= processes.get("controller"):
p.start()
p.name = f"{p.name} ({p.pid})"
controller_started.wait() # 等待controller启动完成
if p:= processes.get("openai_api"):
p.start()
p.name = f"{p.name} ({p.pid})"
for n, p in processes.get("model_worker", {}).items():
p.start()
p.name = f"{p.name} ({p.pid})"
for n, p in processes.get("online_api", []).items():
p.start()
p.name = f"{p.name} ({p.pid})"
# 等待所有model_worker启动完成
for e in model_worker_started:
e.wait()
if p:= processes.get("api"):
p.start()
p.name = f"{p.name} ({p.pid})"
api_started.wait() # 等待api.py启动完成
if p:= processes.get("webui"):
p.start()
p.name = f"{p.name} ({p.pid})"
webui_started.wait() # 等待webui.py启动完成
dump_server_info(after_start=True, args=args)
while True:
cmd = queue.get() # 收到切换模型的消息
e = manager.Event()
if isinstance(cmd, list):
model_name, cmd, new_model_name = cmd
if cmd == "start": # 运行新模型
logger.info(f"准备启动新模型进程:{new_model_name}")
process = Process(
target=run_model_worker,
name=f"model_worker - {new_model_name}",
kwargs=dict(model_name=new_model_name,
controller_address=args.controller_address,
log_level=log_level,
q=queue,
started_event=e),
daemon=True,
)
process.start()
process.name = f"{process.name} ({process.pid})"
processes["model_worker"][new_model_name] = process
e.wait()
logger.info(f"成功启动新模型进程:{new_model_name}")
elif cmd == "stop":
if process := processes["model_worker"].get(model_name):
time.sleep(1)
process.terminate()
process.join()
logger.info(f"停止模型进程:{model_name}")
else:
logger.error(f"未找到模型进程:{model_name}")
elif cmd == "replace":
if process := processes["model_worker"].pop(model_name, None):
logger.info(f"停止模型进程:{model_name}")
start_time = datetime.now()
time.sleep(1)
process.terminate()
process.join()
process = Process(
target=run_model_worker,
name=f"model_worker - {new_model_name}",
kwargs=dict(model_name=new_model_name,
controller_address=args.controller_address,
log_level=log_level,
q=queue,
started_event=e),
daemon=True,
)
process.start()
process.name = f"{process.name} ({process.pid})"
processes["model_worker"][new_model_name] = process
e.wait()
timing = datetime.now() - start_time
logger.info(f"成功启动新模型进程:{new_model_name}。用时:{timing}。")
else:
logger.error(f"未找到模型进程:{model_name}")
# for process in processes.get("model_worker", {}).values():
# process.join()
# for process in processes.get("online_api", {}).values():
# process.join()
# for name, process in processes.items():
# if name not in ["model_worker", "online_api"]:
# if isinstance(p, dict):
# for work_process in p.values():
# work_process.join()
# else:
# process.join()
except Exception as e:
logger.error(e)
logger.warning("Caught KeyboardInterrupt! Setting stop event...")
finally:
# Send SIGINT if process doesn't exit quickly enough, and kill it as last resort
# .is_alive() also implicitly joins the process (good practice in linux)
# while alive_procs := [p for p in processes.values() if p.is_alive()]:
for p in processes.values():
logger.warning("Sending SIGKILL to %s", p)
# Queues and other inter-process communication primitives can break when
# process is killed, but we don't care here
if isinstance(p, dict):
for process in p.values():
process.kill()
else:
p.kill()
for p in processes.values():
logger.info("Process status: %s", p)
if __name__ == "__main__":
if sys.version_info < (3, 10):
loop = asyncio.get_event_loop()
else:
try:
loop = asyncio.get_running_loop()
except RuntimeError:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
# 同步调用协程代码
loop.run_until_complete(start_main_server())
# 服务启动后接口调用示例:
# import openai
# openai.api_key = "EMPTY" # Not support yet
# openai.api_base = "http://localhost:8888/v1"
# model = "chatglm2-6b"
# # create a chat completion
# completion = openai.ChatCompletion.create(
# model=model,
# messages=[{"role": "user", "content": "Hello! What is your name?"}]
# )
# # print the completion
# print(completion.choices[0].message.content)
| [] |
2024-01-10 | Leweibo/Langchain-Chatchat | document_loaders~myimgloader.py | from typing import List
from langchain.document_loaders.unstructured import UnstructuredFileLoader
class RapidOCRLoader(UnstructuredFileLoader):
def _get_elements(self) -> List:
def img2text(filepath):
from rapidocr_onnxruntime import RapidOCR
resp = ""
ocr = RapidOCR()
result, _ = ocr(filepath)
if result:
ocr_result = [line[1] for line in result]
resp += "\n".join(ocr_result)
return resp
text = img2text(self.file_path)
from unstructured.partition.text import partition_text
return partition_text(text=text, **self.unstructured_kwargs)
if __name__ == "__main__":
loader = RapidOCRLoader(file_path="../tests/samples/ocr_test.jpg")
docs = loader.load()
print(docs)
| [] |
2024-01-10 | sloppycoder/pybot | bot~feature_extract.py | import json
from datetime import datetime
from typing import Any, Iterator
import openai
import pandas as pd
from openai.types.chat.chat_completion import ChatCompletion
from tenacity import (
RetryError,
retry,
retry_if_exception_type,
stop_after_attempt,
wait_random_exponential,
)
from bot import cache, log
from bot.utils import blank_filler
_MODEL_MAP_ = {"35t": "gpt-3.5-turbo-1106", "4pre": "gpt-4-1106-preview"}
class InvalidResponse(Exception):
pass
def walk_response(response: Any, parts: list[str]) -> Iterator[dict]:
# openai gpt4-1106-preview returns different structures
#
# type1:
#
# {
# "products": [
# {"original_string":"sku1 desc1", "type": "determined by gpt"...},
# {"original_string":"sku1 desc2", "type": "determined by gpt"...},
# ]
# }
# key product sometimes can be features, items, etc
#
# type2:
#
# {
# "1": { "type": "determined by gpt"...},
# "2": { "type": "determined by gpt"...},
# },
#
# type3:
# {
# "sku1 desc1" : {"type": "determined by gpt"...},
# "sku1 desc2" : {"type": "determined by gpt"...},
# }
#
# type4:
# when input is a single item the output is only one single dict
# {"original_string":"sku1 desc1", "type": "determined by gpt"...},
#
if "original_string" in response:
# type4: single dict
yield response
else:
for key in response:
if key in parts:
# this handles type 3
yield response[key]
else:
if isinstance(response[key], list):
# this handles type 2
for item in response[key]:
yield item
else:
# this handles type 1
yield response[key]
@retry(
wait=wait_random_exponential(min=2, max=60),
stop=stop_after_attempt(6),
retry=retry_if_exception_type(InvalidResponse),
)
def invoke_openai_completion(parts: list[str], model_version: str) -> ChatCompletion:
prompt_list = "\n".join(parts)
start_t = datetime.now()
log.debug(f"inovking API openai.chat.completions.create(...), input={prompt_list}")
completion = openai.chat.completions.create(
model=_MODEL_MAP_[model_version],
messages=[
{
"role": "system",
"content": """
I have a list of parts descriptions from some industrial enviroment. Each entry describes
a part used in a factory, typically consists simple description of the functionality
in Chinese and sometimes brand and model number too. I want to extract features from the strings.
Here are the featurs I'm interested in:
type, function, dimension, model_number,material.
If you see other features, just concatenate them into a single feature called "extra".
use "original_string" to save the original input. retain the original Chinese text for features
in output. Do not translate them into English.
""",
},
{
"role": "user",
"content": f"""
I want to use json output format.
Please extract the features from the following list. treat each line as one input
{prompt_list}
""",
},
],
response_format={"type": "json_object"},
)
if completion.choices[0].finish_reason != "stop":
log.info(f"completion is not finished: reason={completion.choices[0].finish_reason}")
raise InvalidResponse("completion is not finished")
else:
log.info(f"{len(parts)} inputs completed in {(datetime.now()-start_t).total_seconds():.3f} seconds")
log.info(completion.usage)
reply = completion.choices[0].message.content
if reply is None:
raise InvalidResponse("Completion finished but reply is None")
try:
response = json.loads(reply)
log.debug(json.dumps(response, indent=4, ensure_ascii=False))
# the logic below counts number of responses and raise retry if
# the output is not consistent with the input
n_items = sum(1 for e in walk_response(response, parts))
if n_items != len(parts):
log.info(f"{len(parts)} intputs yielded {n_items} outputs")
# if len(parts) < 10 or abs(n_items - len(parts)) >= 2:
# trigger retry only if the discrepenacy is large
# TODO: check if should allow some mismatch in some cases
raise InvalidResponse("number of inputs and outputs are not the same")
except json.JSONDecodeError:
log.warn("unable to parse output as json. got {reply}")
raise InvalidResponse("unable to parse output as json")
return completion
def extract_features_with_openai(input_df: pd.DataFrame, key_col: str, model_version: str) -> pd.DataFrame:
all_features = {key: cache.find_extracted_features(key, model_version) for key in input_df[key_col]}
items_not_in_cache = [k for k, v in all_features.items() if v is None]
log.info(
f"found {len(all_features)} items in cache, {len(items_not_in_cache)} items will be retrieved from openai api"
)
# call openai with a chunk of items at a time
# otherwise will exceed input lenght limit
chunk_size, count = 20, 0
for i in range(0, len(items_not_in_cache), chunk_size):
part_list = items_not_in_cache[i : i + chunk_size]
try:
completion = invoke_openai_completion(part_list, model_version)
response = json.loads(completion.choices[0].message.content) # type: ignore
for item in walk_response(response, items_not_in_cache):
try:
key = item["original_string"]
all_features[key] = item
cache.save_extracted_feature(key, model_version, item)
count += 1
except KeyError:
log.warn(f"original_text not found in response {item}")
except RetryError:
log.warn("failed after configured retries")
log.info(f"extracted fatures for {count} items, total {len(items_not_in_cache)}")
result_df = pd.DataFrame.from_dict(all_features, orient="index").applymap(blank_filler)
return pd.merge(input_df, result_df, on=["original_string"])
| [
"\n I have a list of parts descriptions from some industrial enviroment. Each entry describes\n a part used in a factory, typically consists simple description of the functionality\n in Chinese and sometimes brand and model number too. I want to extract features from the strings.\n Here are the featurs I'm interested in:\n type, function, dimension, model_number,material.\n If you see other features, just concatenate them into a single feature called \"extra\".\n use \"original_string\" to save the original input. retain the original Chinese text for features\n in output. Do not translate them into English.\n ",
"\n I want to use json output format.\n Please extract the features from the following list. treat each line as one input\n PLACEHOLDER\n ",
"\n"
] |
2024-01-10 | HimanshuRaj98/test | playground~agentbox.py | import asyncio
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
import sys
import os
script_dir = os.path.dirname(os.path.abspath(__file__))
openagent_dir = os.path.abspath(os.path.join(script_dir, ".."))
sys.path.append(openagent_dir)
import openagent
from openagent.llms._openai import OpenAI as guidance_llm
from openagent.agent.chat import ChatAgent
from dotenv import load_dotenv
load_dotenv()
from jupyter_client import KernelManager
from IPython import display
import subprocess
import ast
import argparse
import threading
def agent():
llm = guidance_llm(
model="gpt-3.5-turbo"
)
chat_template = '''
{{#user~}}
I want to translate the following English text into Python code:
QUERY: {{input}}
{{~/user}}
{{#assistant~}}
Sure, I can assist with that. If I need more information, I'll ask for clarification.
{{~/assistant}}
{{#user~}}
Yes, go ahead and write the complete code.
{{~/user}}
{{#assistant~}}
{{gen 'response' temperature=0 max_tokens=3900}}
{{~/assistant}}
{{#assistant~}}
If the context or the task is not clear, please provide additional information to clarify.
{{~/assistant}}'''
agent = ChatAgent(
llm=llm,
prompt_template=chat_template,
)
return agent
def install_dependencies(code):
try:
# Parse the code to extract import statements
parsed_ast = ast.parse(code)
imports = []
for node in ast.walk(parsed_ast):
if isinstance(node, ast.Import):
imports.extend([name.name for name in node.names])
elif isinstance(node, ast.ImportFrom):
module_name = node.module
if module_name is not None:
imports.append(module_name)
# Remove duplicate imports and filter out standard library modules
imports = list(set(imports))
# print("imports", imports)
resolved_imports = set()
for imp in imports:
if '.' in imp:
parent_module = imp.split('.')[0]
resolved_imports.add(parent_module)
else:
resolved_imports.add(imp)
# Remove duplicate imports and filter out standard library modules
resolved_imports = list(resolved_imports)
# print("resolved_imports", resolved_imports)
third_party_dependencies = [dep for dep in resolved_imports if dep not in sys.modules]
# print("third_party_dependencies", third_party_dependencies)
if third_party_dependencies:
subprocess.check_call([sys.executable, "-m", "pip", "install"] + third_party_dependencies)
return True
else:
# print("No third-party dependencies detected.")
return True
except subprocess.CalledProcessError:
print("Dependency installation failed.")
return False
def run_python_code_in_kernel(code):
# Create a kernel manager
km = KernelManager(kernel_name='python3') # Use the appropriate kernel name
# Start the kernel
km.start_kernel()
# Connect to the kernel
kc = km.client()
kc.start_channels()
# Execute the code in the kernel
kc.execute(code)
# Create a thread for waiting on messages
def wait_for_messages():
try:
while True:
msg = kc.get_iopub_msg()
msg_type = msg['header']['msg_type']
if msg_type == 'display_data':
output_data = msg['content']['data']
if 'image/png' in output_data:
display.display_png(output_data['image/png'], raw=True)
elif 'image/jpeg' in output_data:
display.display_jpeg(output_data['image/png'], raw=True)
elif msg_type == 'stream':
output_data = msg['content']['text']
output_data = output_data.split("\n")
for output in output_data[:-1]:
display.display(output)
except asyncio.CancelledError:
pass # Ignore the exception
# Start the message-waiting thread
message_thread = threading.Thread(target=wait_for_messages)
message_thread.start()
# Wait for the specified timeout
timeout_seconds = 10
message_thread.join(timeout_seconds)
# Check if the thread is still alive (indicating timeout)
if message_thread.is_alive():
print("Code execution completed")
else:
print("Code execution completed within the timeout.")
# Stop the kernel
kc.stop_channels()
km.shutdown_kernel()
# Main function
def main(gpt_prompt):
res = agent().run(input=gpt_prompt)
code = f"""{res.split('```')[1].replace('python', '')}"""
print(code)
# Install dependencies
if install_dependencies(code):
# Run the generated code in the Jupyter kernel
run_python_code_in_kernel(code)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Execute Python code from the command line.')
parser.add_argument("--gpt_prompt", help="Python code to be executed", default=None)
args = parser.parse_args()
gpt_prompt = args.gpt_prompt
main(gpt_prompt)
| [
"\n {{#user~}}\n I want to translate the following English text into Python code:\n QUERY: {{input}}\n {{~/user}}\n\n {{#assistant~}}\n Sure, I can assist with that. If I need more information, I'll ask for clarification.\n {{~/assistant}}\n\n {{#user~}}\n Yes, go ahead and write the complete code.\n {{~/user}}\n\n {{#assistant~}}\n {{gen 'response' temperature=0 max_tokens=3900}}\n {{~/assistant}}\n\n {{#assistant~}}\n If the context or the task is not clear, please provide additional information to clarify.\n {{~/assistant}}"
] |
2024-01-10 | HimanshuRaj98/test | openagent~finetune~LLMFinetune.py | from abc import ABC, abstractmethod
from logging import Logger
import openai
class LLMFinetune(ABC):
def __init__(self, logger: Logger, openai_key: str):
self.logger = logger
openai.api_key = openai_key
@abstractmethod
def transform_data(self, train_csv_file: str, val_csv_file: str , train_output_file: str, val_output_file: str) -> str:
pass
@abstractmethod
def finetune(self, **kwargs):
pass
| [] |
2024-01-10 | jesselau76/llama_index | gpt_index~optimization~optimizer.py | """Optimization related classes and functions."""
import logging
from abc import abstractmethod
from typing import Optional
from gpt_index.embeddings.base import BaseEmbedding
from gpt_index.embeddings.openai import OpenAIEmbedding
from gpt_index.indices.query.embedding_utils import get_top_k_embeddings
from gpt_index.indices.query.schema import QueryBundle
logger = logging.getLogger(__name__)
class BaseTokenUsageOptimizer:
"""Base class for optimizers that should be overwritten."""
@abstractmethod
def optimize(self, query_bundle: QueryBundle, text: str) -> str:
"""Optimize the input text given the query."""
raise NotImplementedError("Not implemented yet.")
class SentenceEmbeddingOptimizer(BaseTokenUsageOptimizer):
"""Optimization of a text chunk given the query by shortening the input text."""
def __init__(
self,
embed_model: Optional[BaseEmbedding] = None,
percentile_cutoff: Optional[float] = None,
threshold_cutoff: Optional[float] = None,
):
"""Optimizer class that is passed into BaseGPTIndexQuery.
Should be set like this:
.. code-block:: python
from gpt_index.optimization.optimizer import Optimizer
optimizer = SentenceEmbeddingOptimizer(
percentile_cutoff=0.5
this means that the top 50% of sentences will be used.
Alternatively, you can set the cutoff using a threshold
on the similarity score. In this case only setences with a
similarity score higher than the threshold will be used.
threshold_cutoff=0.7
these cutoffs can also be used together.
)
response = index.query(
"<query_str>", optimizer=optimizer
)
"""
self.embed_model = embed_model or OpenAIEmbedding()
self._percentile_cutoff = percentile_cutoff
self._threshold_cutoff = threshold_cutoff
def optimize(self, query_bundle: QueryBundle, text: str) -> str:
"""Optimize a text chunk given the query by shortening the input text."""
import nltk.data
try:
nltk.data.find("tokenizers/punkt")
except LookupError:
nltk.download("punkt")
tokenizer = nltk.data.load("tokenizers/punkt/english.pickle")
split_text = tokenizer.tokenize(text)
start_embed_token_ct = self.embed_model.total_tokens_used
if query_bundle.embedding is None:
query_bundle.embedding = self.embed_model.get_agg_embedding_from_queries(
query_bundle.embedding_strs
)
text_embeddings = self.embed_model._get_text_embeddings(split_text)
num_top_k = None
threshold = None
if self._percentile_cutoff is not None:
num_top_k = int(len(split_text) * self._percentile_cutoff)
if self._threshold_cutoff is not None:
threshold = self._threshold_cutoff
top_similarities, top_idxs = get_top_k_embeddings(
query_embedding=query_bundle.embedding,
embeddings=text_embeddings,
similarity_fn=self.embed_model.similarity,
similarity_top_k=num_top_k,
embedding_ids=[i for i in range(len(text_embeddings))],
similarity_cutoff=threshold,
)
net_embed_tokens = self.embed_model.total_tokens_used - start_embed_token_ct
logger.info(
f"> [optimize] Total embedding token usage: " f"{net_embed_tokens} tokens"
)
if len(top_idxs) == 0:
raise ValueError("Optimizer returned zero sentences.")
top_sentences = [split_text[i] for i in top_idxs]
logger.debug(f"> Top {len(top_idxs)} sentences with scores:\n")
for i in range(len(top_idxs)):
logger.debug(f"{i}. {top_sentences[i]} ({top_similarities[i]})")
return " ".join(top_sentences)
| [] |
2024-01-10 | jesselau76/llama_index | gpt_index~langchain_helpers~chain_wrapper.py | """Wrapper functions around an LLM chain."""
import logging
from dataclasses import dataclass
from typing import Any, Generator, Optional, Tuple
import openai
from langchain import Cohere, LLMChain, OpenAI
from langchain.llms import AI21
from langchain.llms.base import BaseLLM
from gpt_index.constants import MAX_CHUNK_SIZE, NUM_OUTPUTS
from gpt_index.prompts.base import Prompt
from gpt_index.utils import (
ErrorToRetry,
globals_helper,
retry_on_exceptions_with_backoff,
)
logger = logging.getLogger(__name__)
@dataclass
class LLMMetadata:
"""LLM metadata.
We extract this metadata to help with our prompts.
"""
max_input_size: int = MAX_CHUNK_SIZE
num_output: int = NUM_OUTPUTS
def _get_llm_metadata(llm: BaseLLM) -> LLMMetadata:
"""Get LLM metadata from llm."""
if not isinstance(llm, BaseLLM):
raise ValueError("llm must be an instance of langchain.llms.base.LLM")
if isinstance(llm, OpenAI):
return LLMMetadata(
max_input_size=llm.modelname_to_contextsize(llm.model_name),
num_output=llm.max_tokens,
)
elif isinstance(llm, Cohere):
# TODO: figure out max input size for cohere
return LLMMetadata(num_output=llm.max_tokens)
elif isinstance(llm, AI21):
# TODO: figure out max input size for AI21
return LLMMetadata(num_output=llm.maxTokens)
else:
return LLMMetadata()
def _get_response_gen(openai_response_stream: Generator) -> Generator:
"""Get response generator from openai response stream."""
for response in openai_response_stream:
yield response["choices"][0]["text"]
class LLMPredictor:
"""LLM predictor class.
Wrapper around an LLMChain from Langchain.
Args:
llm (Optional[langchain.llms.base.LLM]): LLM from Langchain to use
for predictions. Defaults to OpenAI's text-davinci-003 model.
Please see `Langchain's LLM Page
<https://langchain.readthedocs.io/en/latest/modules/llms.html>`_
for more details.
retry_on_throttling (bool): Whether to retry on rate limit errors.
Defaults to true.
"""
def __init__(
self, llm: Optional[BaseLLM] = None, retry_on_throttling: bool = True
) -> None:
"""Initialize params."""
self._llm = llm or OpenAI(temperature=0, model_name="text-davinci-003")
self.retry_on_throttling = retry_on_throttling
self._total_tokens_used = 0
self.flag = True
self._last_token_usage: Optional[int] = None
def get_llm_metadata(self) -> LLMMetadata:
"""Get LLM metadata."""
# TODO: refactor mocks in unit tests, this is a stopgap solution
if hasattr(self, "_llm") and self._llm is not None:
return _get_llm_metadata(self._llm)
else:
return LLMMetadata()
def _predict(self, prompt: Prompt, **prompt_args: Any) -> str:
"""Inner predict function.
If retry_on_throttling is true, we will retry on rate limit errors.
"""
llm_chain = LLMChain(
prompt=prompt.get_langchain_prompt(llm=self._llm), llm=self._llm
)
# Note: we don't pass formatted_prompt to llm_chain.predict because
# langchain does the same formatting under the hood
full_prompt_args = prompt.get_full_format_args(prompt_args)
if self.retry_on_throttling:
llm_prediction = retry_on_exceptions_with_backoff(
lambda: llm_chain.predict(**full_prompt_args),
[
ErrorToRetry(openai.error.RateLimitError),
ErrorToRetry(openai.error.ServiceUnavailableError),
ErrorToRetry(openai.error.TryAgain),
ErrorToRetry(
openai.error.APIConnectionError, lambda e: e.should_retry
),
],
)
else:
llm_prediction = llm_chain.predict(**full_prompt_args)
return llm_prediction
def predict(self, prompt: Prompt, **prompt_args: Any) -> Tuple[str, str]:
"""Predict the answer to a query.
Args:
prompt (Prompt): Prompt to use for prediction.
Returns:
Tuple[str, str]: Tuple of the predicted answer and the formatted prompt.
"""
formatted_prompt = prompt.format(llm=self._llm, **prompt_args)
llm_prediction = self._predict(prompt, **prompt_args)
logger.debug(llm_prediction)
# We assume that the value of formatted_prompt is exactly the thing
# eventually sent to OpenAI, or whatever LLM downstream
prompt_tokens_count = self._count_tokens(formatted_prompt)
prediction_tokens_count = self._count_tokens(llm_prediction)
self._total_tokens_used += prompt_tokens_count + prediction_tokens_count
return llm_prediction, formatted_prompt
def stream(self, prompt: Prompt, **prompt_args: Any) -> Tuple[Generator, str]:
"""Stream the answer to a query.
NOTE: this is a beta feature. Will try to build or use
better abstractions about response handling.
Args:
prompt (Prompt): Prompt to use for prediction.
Returns:
str: The predicted answer.
"""
if not isinstance(self._llm, OpenAI):
raise ValueError("stream is only supported for OpenAI LLMs")
formatted_prompt = prompt.format(llm=self._llm, **prompt_args)
raw_response_gen = self._llm.stream(formatted_prompt)
response_gen = _get_response_gen(raw_response_gen)
# NOTE/TODO: token counting doesn't work with streaming
return response_gen, formatted_prompt
@property
def total_tokens_used(self) -> int:
"""Get the total tokens used so far."""
return self._total_tokens_used
def _count_tokens(self, text: str) -> int:
tokens = globals_helper.tokenizer(text)
return len(tokens)
@property
def last_token_usage(self) -> int:
"""Get the last token usage."""
if self._last_token_usage is None:
return 0
return self._last_token_usage
@last_token_usage.setter
def last_token_usage(self, value: int) -> None:
"""Set the last token usage."""
self._last_token_usage = value
async def _apredict(self, prompt: Prompt, **prompt_args: Any) -> str:
"""Async inner predict function.
If retry_on_throttling is true, we will retry on rate limit errors.
"""
llm_chain = LLMChain(
prompt=prompt.get_langchain_prompt(llm=self._llm), llm=self._llm
)
# Note: we don't pass formatted_prompt to llm_chain.predict because
# langchain does the same formatting under the hood
full_prompt_args = prompt.get_full_format_args(prompt_args)
# TODO: support retry on throttling
llm_prediction = await llm_chain.apredict(**full_prompt_args)
return llm_prediction
async def apredict(self, prompt: Prompt, **prompt_args: Any) -> Tuple[str, str]:
"""Async predict the answer to a query.
Args:
prompt (Prompt): Prompt to use for prediction.
Returns:
Tuple[str, str]: Tuple of the predicted answer and the formatted prompt.
"""
formatted_prompt = prompt.format(llm=self._llm, **prompt_args)
llm_prediction = await self._apredict(prompt, **prompt_args)
logger.debug(llm_prediction)
# We assume that the value of formatted_prompt is exactly the thing
# eventually sent to OpenAI, or whatever LLM downstream
prompt_tokens_count = self._count_tokens(formatted_prompt)
prediction_tokens_count = self._count_tokens(llm_prediction)
self._total_tokens_used += prompt_tokens_count + prediction_tokens_count
return llm_prediction, formatted_prompt
| [] |
2024-01-10 | mar480/llama_index | tests~indices~embedding~test_base.py | """Test embedding functionalities."""
from collections import defaultdict
from typing import Any, Dict, List, Tuple
from unittest.mock import patch
import pytest
from gpt_index.data_structs.node_v2 import Node
from gpt_index.embeddings.base import mean_agg
from gpt_index.embeddings.openai import OpenAIEmbedding
from gpt_index.indices.tree.embedding_query import GPTTreeIndexEmbeddingQuery
from gpt_index.indices.tree.base import GPTTreeIndex
from gpt_index.langchain_helpers.chain_wrapper import (
LLMChain,
LLMMetadata,
LLMPredictor,
)
from gpt_index.langchain_helpers.text_splitter import TokenTextSplitter
from gpt_index.readers.schema.base import Document
from tests.mock_utils.mock_decorator import patch_common
from tests.mock_utils.mock_predict import mock_llmchain_predict
from tests.mock_utils.mock_prompts import (
MOCK_INSERT_PROMPT,
MOCK_QUERY_PROMPT,
MOCK_REFINE_PROMPT,
MOCK_SUMMARY_PROMPT,
MOCK_TEXT_QA_PROMPT,
)
from tests.mock_utils.mock_text_splitter import (
mock_token_splitter_newline_with_overlaps,
)
def test_embedding_similarity() -> None:
"""Test embedding similarity."""
embed_model = OpenAIEmbedding()
text_embedding = [3.0, 4.0, 0.0]
query_embedding = [0.0, 1.0, 0.0]
cosine = embed_model.similarity(query_embedding, text_embedding)
assert cosine == 0.8
def test_mean_agg() -> None:
"""Test mean aggregation for embeddings."""
embedding_0 = [3.0, 4.0, 0.0]
embedding_1 = [0.0, 1.0, 0.0]
output = mean_agg([embedding_0, embedding_1])
assert output == [1.5, 2.5, 0.0]
@pytest.fixture
def struct_kwargs() -> Tuple[Dict, Dict]:
"""Index kwargs."""
index_kwargs = {
"summary_template": MOCK_SUMMARY_PROMPT,
"insert_prompt": MOCK_INSERT_PROMPT,
"num_children": 2,
}
query_kwargs = {
"query_template": MOCK_QUERY_PROMPT,
"text_qa_template": MOCK_TEXT_QA_PROMPT,
"refine_template": MOCK_REFINE_PROMPT,
}
return index_kwargs, query_kwargs
@pytest.fixture
def documents() -> List[Document]:
"""Get documents."""
# NOTE: one document for now
doc_text = (
"Hello world.\n"
"This is a test.\n"
"This is another test.\n"
"This is a test v2."
)
return [Document(doc_text)]
def _get_node_text_embedding_similarities(
query_embedding: List[float], nodes: List[Node]
) -> List[float]:
"""Get node text embedding similarity."""
text_similarity_map = defaultdict(lambda: 0.0)
text_similarity_map["Hello world."] = 0.9
text_similarity_map["This is a test."] = 0.8
text_similarity_map["This is another test."] = 0.7
text_similarity_map["This is a test v2."] = 0.6
similarities = []
for node in nodes:
similarities.append(text_similarity_map[node.get_text()])
return similarities
@patch_common
@patch.object(
GPTTreeIndexEmbeddingQuery,
"_get_query_text_embedding_similarities",
side_effect=_get_node_text_embedding_similarities,
)
def test_embedding_query(
_mock_similarity: Any,
_mock_init: Any,
_mock_predict: Any,
_mock_total_tokens_used: Any,
_mock_split_text_overlap: Any,
_mock_split_text: Any,
struct_kwargs: Dict,
documents: List[Document],
) -> None:
"""Test embedding query."""
index_kwargs, query_kwargs = struct_kwargs
tree = GPTTreeIndex.from_documents(documents, **index_kwargs)
# test embedding query
query_str = "What is?"
response = tree.query(query_str, mode="embedding", **query_kwargs)
assert str(response) == ("What is?:Hello world.")
def _mock_tokenizer(text: str) -> int:
"""Mock tokenizer that splits by spaces."""
return len(text.split(" "))
@patch.object(LLMChain, "predict", side_effect=mock_llmchain_predict)
@patch("gpt_index.llm_predictor.base.OpenAI")
@patch.object(LLMPredictor, "get_llm_metadata", return_value=LLMMetadata())
@patch.object(LLMChain, "__init__", return_value=None)
@patch.object(
GPTTreeIndexEmbeddingQuery,
"_get_query_text_embedding_similarities",
side_effect=_get_node_text_embedding_similarities,
)
@patch.object(
TokenTextSplitter,
"split_text_with_overlaps",
side_effect=mock_token_splitter_newline_with_overlaps,
)
@patch.object(LLMPredictor, "_count_tokens", side_effect=_mock_tokenizer)
def test_query_and_count_tokens(
_mock_count_tokens: Any,
_mock_split_text: Any,
_mock_similarity: Any,
_mock_llmchain: Any,
_mock_llm_metadata: Any,
_mock_init: Any,
_mock_predict: Any,
struct_kwargs: Dict,
documents: List[Document],
) -> None:
"""Test query and count tokens."""
index_kwargs, query_kwargs = struct_kwargs
# First block is "Hello world.\nThis is a test.\n"
# Second block is "This is another test.\nThis is a test v2."
# first block is 5 tokens because
# last word of first line and first word of second line are joined
# second block is 8 tokens for similar reasons.
first_block_count = 5
second_block_count = 8
llmchain_mock_resp_token_count = 4
# build the tree
# TMP
tree = GPTTreeIndex.from_documents(documents, **index_kwargs)
assert tree.service_context.llm_predictor.total_tokens_used == (
first_block_count + llmchain_mock_resp_token_count
) + (second_block_count + llmchain_mock_resp_token_count)
# test embedding query
start_token_ct = tree._service_context.llm_predictor.total_tokens_used
query_str = "What is?"
# context is "hello world." which is 2 tokens
context_tokens = 2
# query is "what is?" which is 2 tokens
query_tokens = 2
# subtract one because the last token of the context is joined with first
input_tokens = context_tokens + query_tokens - 1
tree.query(query_str, mode="embedding", **query_kwargs)
assert (
tree.service_context.llm_predictor.total_tokens_used - start_token_ct
== input_tokens + llmchain_mock_resp_token_count
)
| [] |
2024-01-10 | mar480/llama_index | gpt_index~indices~tree~leaf_query.py | """Leaf query mechanism."""
import logging
from typing import Any, Dict, List, Optional, cast
from langchain.input import print_text
from gpt_index.data_structs.data_structs_v2 import IndexGraph
from gpt_index.data_structs.node_v2 import Node
from gpt_index.indices.query.base import BaseGPTIndexQuery
from gpt_index.indices.query.embedding_utils import SimilarityTracker
from gpt_index.indices.query.schema import QueryBundle
from gpt_index.indices.response.builder import ResponseBuilder
from gpt_index.indices.utils import extract_numbers_given_response, get_sorted_node_list
from gpt_index.prompts.default_prompts import (
DEFAULT_QUERY_PROMPT,
DEFAULT_QUERY_PROMPT_MULTIPLE,
)
from gpt_index.prompts.prompts import TreeSelectMultiplePrompt, TreeSelectPrompt
from gpt_index.response.schema import Response
logger = logging.getLogger(__name__)
class GPTTreeIndexLeafQuery(BaseGPTIndexQuery[IndexGraph]):
"""GPT Tree Index leaf query.
This class traverses the index graph and searches for a leaf node that can best
answer the query.
.. code-block:: python
response = index.query("<query_str>", mode="default")
Args:
query_template (Optional[TreeSelectPrompt]): Tree Select Query Prompt
(see :ref:`Prompt-Templates`).
query_template_multiple (Optional[TreeSelectMultiplePrompt]): Tree Select
Query Prompt (Multiple)
(see :ref:`Prompt-Templates`).
child_branch_factor (int): Number of child nodes to consider at each level.
If child_branch_factor is 1, then the query will only choose one child node
to traverse for any given parent node.
If child_branch_factor is 2, then the query will choose two child nodes.
"""
def __init__(
self,
index_struct: IndexGraph,
query_template: Optional[TreeSelectPrompt] = None,
query_template_multiple: Optional[TreeSelectMultiplePrompt] = None,
child_branch_factor: int = 1,
**kwargs: Any,
) -> None:
"""Initialize params."""
super().__init__(index_struct, **kwargs)
self.query_template = query_template or DEFAULT_QUERY_PROMPT
self.query_template_multiple = (
query_template_multiple or DEFAULT_QUERY_PROMPT_MULTIPLE
)
self.child_branch_factor = child_branch_factor
def _query_with_selected_node(
self,
selected_node: Node,
query_bundle: QueryBundle,
prev_response: Optional[str] = None,
level: int = 0,
) -> str:
"""Get response for selected node.
If not leaf node, it will recursively call _query on the child nodes.
If prev_response is provided, we will update prev_response with the answer.
"""
query_str = query_bundle.query_str
if len(self.index_struct.get_children(selected_node)) == 0:
response_builder = ResponseBuilder(
self._service_context,
self.text_qa_template,
self.refine_template,
)
self.response_builder.add_node_as_source(selected_node)
# use response builder to get answer from node
node_text = self._get_text_from_node(selected_node, level=level)
cur_response = response_builder.get_response_over_chunks(
query_str, [node_text], prev_response=prev_response
)
cur_response = cast(str, cur_response)
logger.debug(f">[Level {level}] Current answer response: {cur_response} ")
else:
cur_response = self._query_level(
self.index_struct.get_children(selected_node),
query_bundle,
level=level + 1,
)
if prev_response is None:
return cur_response
else:
context_msg = selected_node.get_text()
(
cur_response,
formatted_refine_prompt,
) = self._service_context.llm_predictor.predict(
self.refine_template,
query_str=query_str,
existing_answer=prev_response,
context_msg=context_msg,
)
logger.debug(f">[Level {level}] Refine prompt: {formatted_refine_prompt}")
logger.debug(f">[Level {level}] Current refined response: {cur_response} ")
return cur_response
def _query_level(
self,
cur_node_ids: Dict[int, str],
query_bundle: QueryBundle,
level: int = 0,
) -> str:
"""Answer a query recursively."""
query_str = query_bundle.query_str
cur_nodes = {
index: self._docstore.get_node(node_id)
for index, node_id in cur_node_ids.items()
}
cur_node_list = get_sorted_node_list(cur_nodes)
if len(cur_node_list) == 1:
logger.debug(f">[Level {level}] Only one node left. Querying node.")
return self._query_with_selected_node(
cur_node_list[0], query_bundle, level=level
)
elif self.child_branch_factor == 1:
query_template = self.query_template.partial_format(
num_chunks=len(cur_node_list), query_str=query_str
)
numbered_node_text = (
self._service_context.prompt_helper.get_numbered_text_from_nodes(
cur_node_list, prompt=query_template
)
)
(
response,
formatted_query_prompt,
) = self._service_context.llm_predictor.predict(
query_template,
context_list=numbered_node_text,
)
else:
query_template_multiple = self.query_template_multiple.partial_format(
num_chunks=len(cur_node_list),
query_str=query_str,
branching_factor=self.child_branch_factor,
)
numbered_node_text = (
self._service_context.prompt_helper.get_numbered_text_from_nodes(
cur_node_list, prompt=query_template_multiple
)
)
(
response,
formatted_query_prompt,
) = self._service_context.llm_predictor.predict(
query_template_multiple,
context_list=numbered_node_text,
)
logger.debug(
f">[Level {level}] current prompt template: {formatted_query_prompt}"
)
self._service_context.llama_logger.add_log(
{"formatted_prompt_template": formatted_query_prompt, "level": level}
)
debug_str = f">[Level {level}] Current response: {response}"
logger.debug(debug_str)
if self._verbose:
print_text(debug_str, end="\n")
numbers = extract_numbers_given_response(response, n=self.child_branch_factor)
if numbers is None:
debug_str = (
f">[Level {level}] Could not retrieve response - no numbers present"
)
logger.debug(debug_str)
if self._verbose:
print_text(debug_str, end="\n")
# just join text from current nodes as response
return response
result_response = None
for number_str in numbers:
number = int(number_str)
if number > len(cur_node_list):
logger.debug(
f">[Level {level}] Invalid response: {response} - "
f"number {number} out of range"
)
return response
# number is 1-indexed, so subtract 1
selected_node = cur_node_list[number - 1]
info_str = (
f">[Level {level}] Selected node: "
f"[{number}]/[{','.join([str(int(n)) for n in numbers])}]"
)
logger.info(info_str)
if self._verbose:
print_text(info_str, end="\n")
debug_str = " ".join(selected_node.get_text().splitlines())
full_debug_str = (
f">[Level {level}] Node "
f"[{number}] Summary text: "
f"{ selected_node.get_text() }"
)
logger.debug(full_debug_str)
if self._verbose:
print_text(full_debug_str, end="\n")
result_response = self._query_with_selected_node(
selected_node,
query_bundle,
prev_response=result_response,
level=level,
)
# result_response should not be None
return cast(str, result_response)
def _query(self, query_bundle: QueryBundle) -> Response:
"""Answer a query."""
# NOTE: this overrides the _query method in the base class
info_str = f"> Starting query: {query_bundle.query_str}"
logger.info(info_str)
if self._verbose:
print_text(info_str, end="\n")
response_str = self._query_level(
self.index_struct.root_nodes,
query_bundle,
level=0,
).strip()
return Response(response_str, source_nodes=self.response_builder.get_sources())
def _select_nodes(
self,
cur_node_list: List[Node],
query_bundle: QueryBundle,
level: int = 0,
) -> List[Node]:
query_str = query_bundle.query_str
if self.child_branch_factor == 1:
query_template = self.query_template.partial_format(
num_chunks=len(cur_node_list), query_str=query_str
)
numbered_node_text = (
self._service_context.prompt_helper.get_numbered_text_from_nodes(
cur_node_list, prompt=query_template
)
)
(
response,
formatted_query_prompt,
) = self._service_context.llm_predictor.predict(
query_template,
context_list=numbered_node_text,
)
else:
query_template_multiple = self.query_template_multiple.partial_format(
num_chunks=len(cur_node_list),
query_str=query_str,
branching_factor=self.child_branch_factor,
)
numbered_node_text = (
self._service_context.prompt_helper.get_numbered_text_from_nodes(
cur_node_list, prompt=query_template_multiple
)
)
(
response,
formatted_query_prompt,
) = self._service_context.llm_predictor.predict(
query_template_multiple,
context_list=numbered_node_text,
)
logger.debug(
f">[Level {level}] current prompt template: {formatted_query_prompt}"
)
self._service_context.llama_logger.add_log(
{"formatted_prompt_template": formatted_query_prompt, "level": level}
)
debug_str = f">[Level {level}] Current response: {response}"
logger.debug(debug_str)
if self._verbose:
print_text(debug_str, end="\n")
numbers = extract_numbers_given_response(response, n=self.child_branch_factor)
if numbers is None:
debug_str = (
f">[Level {level}] Could not retrieve response - no numbers present"
)
logger.debug(debug_str)
if self._verbose:
print_text(debug_str, end="\n")
# just join text from current nodes as response
return []
selected_nodes = []
for number_str in numbers:
number = int(number_str)
if number > len(cur_node_list):
logger.debug(
f">[Level {level}] Invalid response: {response} - "
f"number {number} out of range"
)
continue
# number is 1-indexed, so subtract 1
selected_node = cur_node_list[number - 1]
info_str = (
f">[Level {level}] Selected node: "
f"[{number}]/[{','.join([str(int(n)) for n in numbers])}]"
)
logger.info(info_str)
if self._verbose:
print_text(info_str, end="\n")
debug_str = " ".join(selected_node.get_text().splitlines())
full_debug_str = (
f">[Level {level}] Node "
f"[{number}] Summary text: "
f"{ selected_node.get_text() }"
)
logger.debug(full_debug_str)
if self._verbose:
print_text(full_debug_str, end="\n")
selected_nodes.append(selected_node)
return selected_nodes
def _retrieve_level(
self,
cur_node_ids: Dict[int, str],
query_bundle: QueryBundle,
level: int = 0,
) -> List[Node]:
"""Answer a query recursively."""
cur_nodes = {
index: self._docstore.get_node(node_id)
for index, node_id in cur_node_ids.items()
}
cur_node_list = get_sorted_node_list(cur_nodes)
if len(cur_node_list) > self.child_branch_factor:
selected_nodes = self._select_nodes(
cur_node_list,
query_bundle,
level=level,
)
else:
selected_nodes = cur_node_list
children_nodes = {}
for node in selected_nodes:
node_dict = self.index_struct.get_children(node)
children_nodes.update(node_dict)
if len(children_nodes) == 0:
# NOTE: leaf level
return selected_nodes
else:
return self._retrieve_level(children_nodes, query_bundle, level + 1)
def _retrieve(
self,
query_bundle: QueryBundle,
similarity_tracker: Optional[SimilarityTracker] = None,
) -> List[Node]:
"""Get nodes for response."""
return self._retrieve_level(
self.index_struct.root_nodes,
query_bundle,
level=0,
)
| [] |
2024-01-10 | mar480/llama_index | gpt_index~indices~struct_store~pandas_query.py | """Default query for GPTPandasIndex."""
import logging
from typing import Any, Callable, Optional
import pandas as pd
from langchain.input import print_text
from gpt_index.data_structs.table_v2 import PandasStructTable
from gpt_index.indices.query.base import BaseGPTIndexQuery
from gpt_index.indices.query.schema import QueryBundle
from gpt_index.prompts.default_prompts import DEFAULT_PANDAS_PROMPT
from gpt_index.prompts.prompts import PandasPrompt
from gpt_index.response.schema import Response
logger = logging.getLogger(__name__)
DEFAULT_INSTRUCTION_STR = (
"We wish to convert this query to executable Python code using Pandas.\n"
"The final line of code should be a Python expression that can be called "
"with the `eval()` function. This expression should represent a solution "
"to the query."
)
def default_output_processor(
output: str, df: pd.DataFrame, **output_kwargs: Any
) -> str:
"""Process outputs in a default manner."""
import ast
import sys
import traceback
if sys.version_info < (3, 9):
logger.warn(
"Python version must be >= 3.9 in order to use "
"the default output processor, which executes "
"the Python query. Instead, we will return the "
"raw Python instructions as a string."
)
return output
local_vars = {"df": df}
# NOTE: inspired from langchain's tool
# see langchain.tools.python.tool (PythonAstREPLTool)
try:
tree = ast.parse(output)
module = ast.Module(tree.body[:-1], type_ignores=[])
exec(ast.unparse(module), {}, local_vars) # type: ignore
module_end = ast.Module(tree.body[-1:], type_ignores=[])
module_end_str = ast.unparse(module_end) # type: ignore
try:
return str(eval(module_end_str, {}, local_vars))
except Exception as e:
raise e
except Exception as e:
err_string = (
"There was an error running the output as Python code. "
f"Error message: {e}"
)
traceback.print_exc()
return err_string
class GPTNLPandasIndexQuery(BaseGPTIndexQuery[PandasStructTable]):
"""GPT Pandas query.
Convert natural language to Pandas python code.
.. code-block:: python
response = index.query("<query_str>", mode="default")
Args:
df (pd.DataFrame): Pandas dataframe to use.
instruction_str (Optional[str]): Instruction string to use.
output_processor (Optional[Callable[[str], str]]): Output processor.
A callable that takes in the output string, pandas DataFrame,
and any output kwargs and returns a string.
pandas_prompt (Optional[PandasPrompt]): Pandas prompt to use.
head (int): Number of rows to show in the table context.
"""
def __init__(
self,
index_struct: PandasStructTable,
df: Optional[pd.DataFrame] = None,
instruction_str: Optional[str] = None,
output_processor: Optional[Callable] = None,
pandas_prompt: Optional[PandasPrompt] = None,
output_kwargs: Optional[dict] = None,
head: int = 5,
**kwargs: Any,
) -> None:
"""Initialize params."""
super().__init__(index_struct=index_struct, **kwargs)
if df is None:
raise ValueError("df must be provided.")
self.df = df
self._head = head
self._pandas_prompt = pandas_prompt or DEFAULT_PANDAS_PROMPT
self._instruction_str = instruction_str or DEFAULT_INSTRUCTION_STR
self._output_processor = output_processor or default_output_processor
self._output_kwargs = output_kwargs or {}
def _get_table_context(self) -> str:
"""Get table context."""
return str(self.df.head(self._head))
def _query(self, query_bundle: QueryBundle) -> Response:
"""Answer a query."""
context = self._get_table_context()
pandas_response_str, _ = self._service_context.llm_predictor.predict(
self._pandas_prompt,
df_str=context,
query_str=query_bundle.query_str,
instruction_str=self._instruction_str,
)
if self._verbose:
print_text(f"> Pandas Instructions:\n" f"```\n{pandas_response_str}\n```\n")
pandas_output = self._output_processor(
pandas_response_str,
self.df,
**self._output_kwargs,
)
if self._verbose:
print_text(f"> Pandas Output: {pandas_output}\n")
response_extra_info = {
"pandas_instruction_str": pandas_response_str,
}
return Response(response=pandas_output, extra_info=response_extra_info)
| [] |
2024-01-10 | mweser/forest | imogen~imogen.py | #!/usr/bin/python3.9
# Copyright (c) 2021 MobileCoin Inc.
# Copyright (c) 2021 The Forest Team
# Copyright (c) 2021 Sylvie Liberman
import asyncio
import base64
import datetime
import json
import logging
import time
import urllib
from pathlib import Path
from typing import Callable, Optional
import aioredis
import base58
import openai
from aiohttp import web
from forest import utils
from forest.core import JSON, Bot, Message, Response, app, hide
openai.api_key = utils.get_secret("OPENAI_API_KEY")
if not utils.LOCAL:
aws_cred = utils.get_secret("AWS_CREDENTIALS")
if aws_cred:
aws_dir = Path("/root/.aws")
aws_dir.mkdir(parents=True, exist_ok=True)
with (aws_dir / "credentials").open("w") as creds:
creds.write(base64.b64decode(utils.get_secret("AWS_CREDENTIALS")).decode())
logging.info("wrote creds")
with (aws_dir / "config").open("w") as config:
config.write("[profile default]\nregion = us-east-1")
logging.info("writing config")
else:
logging.info("couldn't find creds")
ssh_key = utils.get_secret("SSH_KEY")
open("id_rsa", "w").write(base64.b64decode(ssh_key).decode())
password, rest = utils.get_secret("REDIS_URL").removeprefix("redis://:").split("@")
host, port = rest.split(":")
redis = aioredis.Redis(host=host, port=int(port), password=password)
instance_id = "aws ec2 describe-instances --region us-east-1 | jq -r .Reservations[].Instances[].InstanceId"
status = "aws ec2 describe-instances --region us-east-1| jq -r '..|.State?|.Name?|select(.!=null)'"
start = "aws ec2 start-instances --region us-east-1 --instance-ids {}"
stop = "aws ec2 stop-instances --region us-east-1 --instance-ids {}"
get_ip = "aws ec2 describe-instances --region us-east-1|jq -r .Reservations[].Instances[].PublicIpAddress"
# start_worker = "ssh -i id_rsa -o ConnectTimeout=2 ubuntu@{} ~/ml/read_redis.py {}"
get_cost = (
"aws ce get-cost-and-usage --time-period Start={},End={} --granularity DAILY --metrics BlendedCost | "
"jq -r .ResultsByTime[0].Total.BlendedCost.Amount"
)
get_all_cost = (
"aws ce get-cost-and-usage --time-period Start=2021-10-01,End={end} --granularity DAILY --metrics BlendedCost | "
"jq '.ResultsByTime[] | {(.TimePeriod.Start): .Total.BlendedCost.Amount}' | jq -s add"
)
async def get_output(cmd: str) -> str:
proc = await asyncio.create_subprocess_shell(cmd, stdout=-1, stderr=-1)
stdout, stderr = await proc.communicate()
return stdout.decode().strip() or stderr.decode().strip()
class Imogen(Bot):
worker_instance_id: Optional[str] = None
async def start_process(self) -> None:
self.worker_instance_id = await get_output(instance_id)
await super().start_process()
async def do_get_cost(self, _: Message) -> str:
today = datetime.date.today()
tomorrow = today + datetime.timedelta(1)
out = await get_output(get_cost.format(today, tomorrow))
try:
return str(round(float(out), 2))
except ValueError:
return out
async def do_get_all_cost(self, _: Message) -> str:
tomorrow = datetime.date.today() + datetime.timedelta(1)
out = await get_output(get_all_cost.replace("{end}", str(tomorrow)))
return json.loads(out)
do_get_costs = do_get_all_costs = hide(do_get_all_cost)
async def do_status(self, _: Message) -> str:
"shows the GPU instance state (not the program) and queue size"
state = await get_output(status)
queue_size = await redis.llen("prompt_queue")
return f"worker state: {state}, queue size: {queue_size}"
image_rate_cents = 5
async def do_imagine_nostart(self, msg: Message) -> str:
logging.info(msg.full_text)
logging.info(msg.text)
if msg.group:
destination = base58.b58encode(msg.group).decode()
else:
destination = msg.source
params: JSON = {}
# if msg.attachments:
# attachment = msg.attachments[0]
# key = attachment["id"] + "-" + attachment["filename"]
# params["init_image"] = key
# await redis.set(
# key, open(Path("./attachments") / attachment["id"], "rb").read()
# )
await redis.rpush(
"prompt_queue",
json.dumps({"prompt": msg.text, "callback": destination, "params": params}),
)
timed = await redis.llen("prompt_queue")
return f"you are #{timed} in line"
async def do_imagine(self, msg: Message) -> str:
"""/imagine <prompt>"""
# check if worker is up
resp = await self.do_imagine_nostart(msg)
state = await get_output(status)
logging.info("worker state: %s", state)
# await self.mobster.put_usd_tx(msg.sender, self.image_rate_cents, msg.text[:32])
if state in ("stopped", "stopping"):
# if not, turn it on
output = await get_output(start.format(self.worker_instance_id))
logging.info(output)
if "InsufficientInstanceCapacity" in output:
resp += ".\nsorry, andy jassy hates us. no gpu for us"
# asyncio.create_task(really_start_worker())
return resp
def make_prefix(prefix: str, *_) -> Callable: # type: ignore # pylint: disable=no-self-argument
async def wrapped(self: "Imogen", msg: Message) -> str:
msg.text = f"{prefix} {msg.text}"
return await self.do_imagine(msg)
wrapped.__doc__ = f"/{prefix} <prompt>: imagine it with {prefix} style"
return wrapped
do_mythical = make_prefix("mythical")
do_festive = make_prefix("festive")
do_dark_fantasy = make_prefix("dark fantasy")
do_psychic = make_prefix("psychic")
do_pastel = make_prefix("pastel")
do_hd = make_prefix("hd")
do_vibrant = make_prefix("vibrant")
do_fantasy = make_prefix("fantasy")
do_steampunk = make_prefix("steampunk")
do_ukiyo = make_prefix("ukiyo")
do_synthwave = make_prefix("synthwave")
del make_prefix # shouldn't be used after class definition is over
async def do_paint(self, msg: Message) -> str:
"""/paint <prompt>"""
logging.info(msg.full_text)
destination = base58.b58encode(msg.group).decode() if msg.group else msg.source
await redis.rpush(
"prompt_queue",
json.dumps(
{
"prompt": msg.text,
"callback": destination,
"params": {
"vqgan_config": "wikiart_16384.yaml",
"vqgan_checkpoint": "wikiart_16384.ckpt",
},
}
),
)
timed = await redis.llen("prompt_queue")
state = await get_output(status)
logging.info("worker state: %s", state)
# await self.mobster.put_usd_tx(msg.sender, self.image_rate_cents, msg.text[:32])
if state in ("stopped", "stopping"):
# if not, turn it on
logging.info(await get_output(start.format(self.worker_instance_id)))
return f"you are #{timed} in line"
async def do_c(self, msg: Message) -> str:
prompt = (
"The following is a conversation with an AI assistant. "
"The assistant is helpful, creative, clever, funny, very friendly, an artist and anarchist\n\n"
"Human: Hello, who are you?\nAI: My name is Imogen, I'm an AI that makes dream-like images. How can I help you today?\n"
f"Human: {msg.text}\nAI: "
)
response = openai.Completion.create( # type: ignore
engine="davinci",
prompt=prompt,
temperature=0.9,
max_tokens=140,
top_p=1,
frequency_penalty=0.0,
presence_penalty=0.6,
stop=["\n", " Human:", " AI:"],
)
return response["choices"][0]["text"].strip()
@hide
async def do_gpt(self, msg: Message) -> str:
response = openai.Completion.create( # type: ignore
engine="davinci",
prompt=msg.text,
temperature=0.9,
max_tokens=120,
top_p=1,
frequency_penalty=0.01,
presence_penalty=0.6,
stop=["\n", " Human:", " AI:"],
)
return response["choices"][0]["text"].strip()
async def do_stop(self, _: Message) -> str:
return await get_output(stop.format(self.worker_instance_id))
async def do_start(self, _: Message) -> str:
return await get_output(start.format(self.worker_instance_id))
async def do_list_queue(self, _: Message) -> str:
try:
q = "; ".join(
json.loads(item)["prompt"]
for item in await redis.lrange("prompt_queue", 0, -1)
)
return q or "queue empty"
except json.JSONDecodeError:
return "json decode error?"
do_list_prompts = do_listqueue = do_queue = hide(do_list_queue)
async def do_dump_queue(self, _: Message) -> Response:
prompts = []
while 1:
if not (item := await redis.lpop("prompt_queue")):
break
prompts.append(str(json.loads(item)["prompt"]))
return prompts
# async def payment_response(self, _: Message, _: int) -> None:
# return None
# eh
# async def async_shutdown(self):
# await redis.disconnect()
# super().async_shutdown()
async def store_image_handler(request: web.Request) -> web.Response:
bot = request.app.get("bot")
if not bot:
return web.Response(status=504, text="Sorry, no live workers.")
reader = await request.multipart()
async for field in reader:
logging.info(field)
logging.info("multipart field name: %s", field.name)
filename = field.filename or f"attachment-{time.time()}.jpg"
# You cannot rely on Content-Length if transfer is chunked.
size = 0
path = Path(filename).absolute()
with open(path, "wb") as f:
logging.info("writing file")
while True:
chunk = await field.read_chunk() # 8192 bytes by default.
logging.info("read chunk")
if not chunk:
break
size += len(chunk)
f.write(chunk)
message = urllib.parse.unquote(request.query.get("message", ""))
destination = urllib.parse.unquote(request.query.get("destination", ""))
recipient = utils.signal_format(str(destination))
if destination and not recipient:
try:
group = base58.b58decode(destination).decode()
except ValueError:
# like THtg80Gi2jvgOEFhQjT2Cm+6plNGXTSBJg2HSnhJyH4=
group = destination
if recipient:
await bot.send_message(recipient, message, attachments=[str(path)])
else:
await bot.send_message(None, message, attachments=[str(path)], group=group)
info = f"{filename} sized of {size} sent"
logging.info(info)
return web.Response(text=info)
app.add_routes([web.post("/attachment", store_image_handler)])
app.add_routes([])
if __name__ == "__main__":
@app.on_startup.append
async def start_wrapper(our_app: web.Application) -> None:
our_app["bot"] = Imogen()
web.run_app(app, port=8080, host="0.0.0.0")
| [
"[]",
"The assistant is helpful, creative, clever, funny, very friendly, an artist and anarchist\n\n",
"Human: Hello, who are you?\nAI: My name is Imogen, I'm an AI that makes dream-like images. How can I help you today?\n",
"The following is a conversation with an AI assistant. "
] |
2024-01-10 | Jrbiltmore/Openai_Reformat_Requests | alis_flexport_integration.py | """
ALIS-Flexport Integration
This script provides functionality for integrating ALIS (Algorithms for Logistics Intelligence and Security) and Flexport systems. It includes functions for error handling, request reformatting, fallback logic, data validation, and sending data to the ALIS and Flexport APIs. The integration allows for seamless communication between the two systems, enabling efficient data exchange and collaboration.
Author: Jacob Thomas Messer
Contact: [email protected]
"""
import openai
import logging
import requests
# Set up OpenAI API key
openai.api_key = 'YOUR_API_KEY'
# ALIS API URL
ALIS_API_URL = 'https://alis.example.com/api'
# Flexport API URL
FLEXPORT_API_URL = 'https://flexport.example.com/api'
# Flexport Financial API URL
FLEXPORT_FINANCIAL_API_URL = 'https://flexport-financial.example.com/api'
# Configure logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
def handle_error(error_message):
"""
Handle error messages using the OpenAI API.
Args:
error_message (str): The error message to handle.
Returns:
list: A list of suggestions for error handling.
"""
try:
response = openai.Completion.create(
engine='text-davinci-003',
prompt=f"Error: {error_message}\nHow to handle this error:",
max_tokens=50,
n=3,
stop=None,
temperature=0.5
)
suggestions = [choice['text'].strip() for choice in response.choices] if 'choices' in response else []
return suggestions
except openai.error.OpenAIError as e:
logging.error(f"OpenAI API error occurred: {str(e)}")
return []
except openai.error.APIConnectionError as e:
logging.error(f"API connection error occurred: {str(e)}")
return []
except openai.error.AuthenticationError as e:
logging.error(f"Authentication error occurred: {str(e)}")
return []
except openai.error.RateLimitError as e:
logging.error(f"Rate limit error occurred: {str(e)}")
return []
except Exception as e:
logging.error(f"Error occurred: {str(e)}")
return []
def reformat_request(request_text):
"""
Reformat a request using the OpenAI API.
Args:
request_text (str): The request text to reformat.
Returns:
list: A list of reformatted suggestions.
"""
try:
response = openai.Completion.create(
engine='text-davinci-003',
prompt=f"Request: {request_text}\nHow to reformat this request:",
max_tokens=50,
n=3,
stop=None,
temperature=0.5
)
reformatted_suggestions = [choice['text'].strip() for choice in response.choices] if 'choices' in response else []
return reformatted_suggestions
except openai.error.OpenAIError as e:
logging.error(f"OpenAI API error occurred: {str(e)}")
return []
except openai.error.APIConnectionError as e:
logging.error(f"API connection error occurred: {str(e)}")
return []
except openai.error.AuthenticationError as e:
logging.error(f"Authentication error occurred: {str(e)}")
return []
except openai.error.RateLimitError as e:
logging.error(f"Rate limit error occurred: {str(e)}")
return []
except Exception as e:
logging.error(f"Error occurred: {str(e)}")
return []
def fallback_handle_error(error_message):
"""
Implement custom fallback logic to handle error messages.
Args:
error_message (str): The error message to handle.
"""
logging.warning("API call failed. Implement fallback logic here.")
# ...
def fallback_reformat_request(request_text):
"""
Implement custom fallback logic to handle request reformatting.
Args:
request_text (str): The request text to handle.
"""
logging.warning("API call failed. Implement fallback logic here.")
# ...
def send_data_to_alis(data):
"""
Send data to the ALIS API.
Args:
data (dict): The data to send.
Returns:
dict: The response from the ALIS API.
"""
try:
response = requests.post(ALIS_API_URL, json=data)
response.raise_for_status() # Raise an exception for non-2xx response codes
return response.json()
except requests.exceptions.RequestException as e:
logging.error(f"ALIS integration error: {str(e)}")
return None
def send_data_to_flexport(data):
"""
Send data to the Flexport API.
Args:
data (dict): The data to send.
Returns:
dict: The response from the Flexport API.
"""
try:
response = requests.post(FLEXPORT_API_URL, json=data)
response.raise_for_status() # Raise an exception for non-2xx response codes
return response.json()
except requests.exceptions.RequestException as e:
logging.error(f"Flexport integration error: {str(e)}")
return None
def send_data_to_flexport_financial(data):
"""
Send data to the Flexport Financial API.
Args:
data (dict): The data to send.
Returns:
dict: The response from the Flexport Financial API.
"""
try:
response = requests.post(FLEXPORT_FINANCIAL_API_URL, json=data)
response.raise_for_status() # Raise an exception for non-2xx response codes
return response.json()
except requests.exceptions.RequestException as e:
logging.error(f"Flexport Financial integration error: {str(e)}")
return None
def validate_flexport_financial_data(data):
"""
Validates the Flexport Financial data before sending it for integration.
Args:
data (dict): The data to validate.
Returns:
bool: True if the data is valid, False otherwise.
"""
# Implement custom validation logic
# ...
# Usage examples
# Example 1: Handling errors
error_message = "Error: Invalid input"
suggestions = handle_error(error_message)
if suggestions:
logging.info("Error handling suggestions:", suggestions)
else:
fallback_handle_error(error_message)
# Example 2: Reformatting requests
request_text = "Request: Invalid input"
reformatted_suggestions = reformat_request(request_text)
if reformatted_suggestions:
logging.info("Request reformatting suggestions:", reformatted_suggestions)
else:
fallback_reformat_request(request_text)
# Example 3: Sending data to ALIS
alis_data = {
"key": "value"
}
alis_response = send_data_to_alis(alis_data)
if alis_response:
logging.info("ALIS integration success")
else:
logging.error("ALIS integration failed")
# Example 4: Sending data to Flexport
flexport_data = {
"key": "value"
}
flexport_response = send_data_to_flexport(flexport_data)
if flexport_response:
logging.info("Flexport integration success")
else:
logging.error("Flexport integration failed")
# Example 5: Sending data to Flexport Financial
financial_data = {
"key": "value"
}
if validate_flexport_financial_data(financial_data):
flexport_financial_response = send_data_to_flexport_financial(financial_data)
if flexport_financial_response:
logging.info("Flexport Financial integration success")
else:
logging.error("Flexport Financial integration failed")
else:
logging.error("Invalid Flexport Financial data")
| [
"Request: Request: Invalid input\nHow to reformat this request:",
"Error: Error: Invalid input\nHow to handle this error:"
] |
2024-01-10 | glandaDarie/linkedIn-recruiter-reply-bot | automation~recruiter_text_replier~hugging_face.py | from langchain import HuggingFaceHub
from langchain import PromptTemplate, LLMChain
import os
class Hugging_face:
def __init__(self, api_token : str):
self.api_token = api_token
os.environ["HUGGINGFACEHUB_API_TOKEN"] = self.api_token
def predict(self, data : str, template : str, repo_id : str = "google/flan-t5-xxl", **kwargs : dict) -> str:
prompt : PromptTemplate = PromptTemplate(template=template, input_variables=["data"])
llm : HuggingFaceHub = HuggingFaceHub(
repo_id=repo_id, model_kwargs=kwargs
)
llm_chain : LLMChain = LLMChain(prompt=prompt, llm=llm)
return llm_chain.run(data) | [] |
2024-01-10 | glandaDarie/linkedIn-recruiter-reply-bot | automation~recruiter_text_replier~llm_reply_factory.py | from utils.file_utils import read_content
from utils.paths_utils import llm_api_token_path
from recruiter_text_replier.hugging_face import Hugging_face
from recruiter_text_replier.openai import OpenAI
class LLM_Reply_factory:
def __init__(self, llm_name : str):
self.llm_name : str = llm_name
def create_llm(self) -> object | NotImplementedError:
if self.llm_name == "<open_ai>":
return OpenAI(read_content(llm_api_token_path)["api_token_openai"])
elif self.llm_name == "<hugging_face>":
return Hugging_face(read_content(llm_api_token_path)["api_token_hugging_face"])
else:
raise NotImplementedError("No LLM with that name is available.") | [] |
2024-01-10 | wealthsimple/llm-gateway | tests~test_pii_scrubber.py | # llm-gateway - A proxy service in front of llm models to encourage the
# responsible use of AI.
#
# Copyright 2023 Wealthsimple Technologies
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# llm-gateway - A proxy service in front of llm models to encourage the
# responsible use of AI.
#
# Copyright 2023 Wealthsimple Technologies
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# flake8: noqa
from unittest.mock import patch
import pytest
from llm_gateway.pii_scrubber import (
scrub_all,
scrub_credit_card_numbers,
scrub_email_addresses,
scrub_phone_numbers,
scrub_postal_codes,
scrub_sin_numbers,
)
from llm_gateway.providers.openai import OpenAIWrapper
@pytest.mark.parametrize(
argnames=["test_number"],
argvalues=[
("1234567890",),
("123-456-7890",),
("(123) 456-7890",),
("123 456 7890",),
("123.456.7890",),
("+91 (123) 456-7890",),
("1 (800) 555-1234",),
],
)
def test_scrub_phone_numbers(test_number: str):
"""Test phone number scrubbing."""
format_str = "See you on 1980-01-02. My number is {0}. Call me at 3:04:05 PM."
test_text = format_str.format(test_number)
expected_text = format_str.format("[REDACTED PHONE NUMBER]")
assert scrub_phone_numbers(test_text) == expected_text
@pytest.mark.parametrize(
argnames=["test_number"],
argvalues=[
("1234555567893333",),
("1234-5555-6789-3333",),
("1234.5555.6789.3333",),
("1234 5555 6789 3333",),
],
)
def test_scrub_credit_card_numbers(test_number: str):
"""Test credit card number scrubbing."""
format_str = "I'd like to update my credit card. The new number is {0}."
test_text = format_str.format(test_number)
expected_text = format_str.format("[REDACTED CREDIT CARD NUMBER]")
assert scrub_credit_card_numbers(test_text) == expected_text
@pytest.mark.parametrize(
argnames=["test_number"],
argvalues=[
("123456789",),
("123-45-6789",),
("123-456-789",),
("123 45 6789",),
("123 456 789",),
("123.45.6789",),
("123.456.789",),
],
)
def test_scrub_sin_numbers(test_number: str):
"""Test social insurance number scrubbing."""
format_str = "I'd like to start a tax return. My TIN is {0}. I need to file by 2023-04-30 11:59:59 PM."
test_text = format_str.format(test_number)
expected_text = format_str.format("[REDACTED SIN NUMBER]")
assert scrub_sin_numbers(test_text) == expected_text
@pytest.mark.parametrize(
argnames=["test_email"],
argvalues=[
("[email protected]",),
("[email protected]",),
],
)
def test_scrub_email_addresses(test_email: str):
"""Test email address scrubbing."""
format_str = "Does {0} look like a fake email address?"
test_text = format_str.format(test_email)
expected_text = format_str.format("[REDACTED EMAIL ADDRESS]")
assert scrub_email_addresses(test_text) == expected_text
@pytest.mark.parametrize(
argnames=["test_postal"],
argvalues=[
("A1A 1A1",),
("A1A1A1",),
("A1A1a1",),
],
)
def test_scrub_postal_codes(test_postal: str):
"""Test postal code scrubbing."""
format_str = "My billing address is {0}. '{0}' \"{0}\" {0}"
test_text = format_str.format(test_postal)
expected_text = format_str.format("[REDACTED POSTAL CODE]")
assert scrub_postal_codes(test_text) == expected_text
def test_scrub_all_dict():
"""Test that scrub_all works on a dict."""
test_dict = {"role": "user", "content": "My phone number is 123-456-7890."}
expected_dict = {
"role": "user",
"content": "My phone number is [REDACTED PHONE NUMBER].",
}
assert scrub_all(test_dict) == expected_dict
def test_scrub_all_wrong_type():
"""Test that scrub_all does not raise any errors on non-string inputs."""
with pytest.raises(TypeError):
scrub_all(None)
with pytest.raises(TypeError):
scrub_all(123)
with pytest.raises(TypeError):
scrub_all(123.456)
with pytest.raises(TypeError):
scrub_all(True)
with pytest.raises(TypeError):
scrub_all(False)
with pytest.raises(TypeError):
scrub_all(["a", "b", "c"])
@patch("openai.ChatCompletion")
@patch("llm_gateway.providers.openai.write_record_to_db")
def test_pii_scrubber_end_to_end(mock_write_record_to_db, mock_openai_module):
"""Make a ChatGPT request with some pii and make sure it gets scrubbed."""
class MockResponse:
def __init__(self, resp):
self.resp = resp
def to_dict(self):
return self.resp
mock_openai_module.create.return_value = MockResponse(
{
"id": "chatcmpl-abc123",
"object": "chat.completion",
"created": 1677858242,
"model": "gpt-3.5-turbo-0301",
"usage": {"prompt_tokens": 13, "completion_tokens": 7, "total_tokens": 20},
"choices": [
{
"message": {"role": "assistant", "content": "\n\nThis is a test!"},
"finish_reason": "stop",
"index": 0,
}
],
}
) # garbage data, not important
wrapper = OpenAIWrapper()
result = wrapper.send_openai_request(
"ChatCompletion",
endpoint="create",
messages=[
"My phone number is 123-456-7890.",
"My SIN is 111-222-333",
"My credit card number is 1234-5678-9012-3456",
"The user's email is [email protected], AKA [email protected]",
"The user's postal code is A1A 1A1, AKA a1a1A1",
],
)
called_with = mock_openai_module.create.call_args_list[0].kwargs["messages"]
expected = [
"My phone number is [REDACTED PHONE NUMBER].",
"My SIN is [REDACTED SIN NUMBER]",
"My credit card number is [REDACTED CREDIT CARD NUMBER]",
"The user's email is [REDACTED EMAIL ADDRESS], AKA [REDACTED EMAIL ADDRESS]",
"The user's postal code is [REDACTED POSTAL CODE], AKA [REDACTED POSTAL CODE]",
]
# Truncate the result. called_with contains an extra message - the mock response,
# which isn't actually sent to OpenAI but shows up because of Python list mutability.
assert called_with[: len(expected)] == expected
| [
"My phone number is 123-456-7890.",
"\n\nThis is a test!",
"My phone number is [REDACTED PHONE NUMBER]."
] |
2024-01-10 | wealthsimple/llm-gateway | llm_gateway~routers~cohere_api.py | # llm-gateway - A proxy service in front of llm models to encourage the
# responsible use of AI.
#
# Copyright 2023 Wealthsimple Technologies
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from fastapi import APIRouter, BackgroundTasks
from fastapi.responses import StreamingResponse
from starlette.responses import JSONResponse
from llm_gateway.exceptions import CohereRouteExceptionHandler
from llm_gateway.models import GenerateInput, SummarizeInput
from llm_gateway.providers.cohere import CohereWrapper
from llm_gateway.utils import reraise_500
router = APIRouter(route_class=CohereRouteExceptionHandler)
@router.post("/generate")
@reraise_500
def generate(
user_input: GenerateInput, background_tasks: BackgroundTasks
) -> JSONResponse:
"""
Use Cohere's API to generate a response to a prompt
:param user_input: Inputs to the Cohere API, including prompt
:type user_input: GenerateInput
:return: Dictionary with LLM response and metadata
:rtype: JSONResponse
"""
wrapper = CohereWrapper()
resp, logs = wrapper.send_cohere_request(
"generate",
max_tokens=user_input.max_tokens,
prompt=user_input.prompt,
temperature=user_input.temperature,
model=user_input.model,
**user_input.model_kwargs
)
background_tasks.add_task(wrapper.write_logs_to_db, db_logs=logs)
return JSONResponse(resp)
@router.post("/generate/stream")
@reraise_500
def generate_stream(
user_input: GenerateInput, background_tasks: BackgroundTasks
) -> JSONResponse:
wrapper = CohereWrapper()
response, logs = wrapper.send_cohere_request(
"generate",
max_tokens=user_input.max_tokens,
prompt=user_input.prompt,
temperature=user_input.temperature,
model=user_input.model,
stream=True,
**user_input.model_kwargs
)
background_tasks.add_task(wrapper.write_logs_to_db, db_logs=logs)
return StreamingResponse(response, media_type="text/plain")
@router.post("/summarize")
@reraise_500
def summarize(
user_input: SummarizeInput, background_tasks: BackgroundTasks
) -> JSONResponse:
"""
Use Cohere's API to summarize a response to a prompt based on additional_command
:param user_input: Inputs to the Cohere API, including prompt
:type user_input: SummarizeInput
:return: Dictionary with LLM response and metadata
:rtype: JSONResponse
"""
wrapper = CohereWrapper()
resp, logs = wrapper.send_cohere_request(
"summarize",
prompt=user_input.prompt,
additional_command=user_input.additional_command,
temperature=user_input.temperature,
model=user_input.model,
**user_input.model_kwargs
)
background_tasks.add_task(wrapper.write_logs_to_db, db_logs=logs)
return resp
| [] |
2024-01-10 | wealthsimple/llm-gateway | tests~test_utils.py | from unittest.mock import Mock
import pytest
from openai.error import APIError
from llm_gateway.utils import max_retries
def test_retry_decorator_mismatch_exception():
retry_mock = Mock()
retry_mock.side_effect = [APIError("test"), "success"]
@max_retries(1, exceptions=(ValueError,))
def mismatch_exception():
return retry_mock()
with pytest.raises(APIError):
mismatch_exception()
def test_retry_decorator_matching_exception():
retry_mock = Mock()
retry_mock.side_effect = [APIError("test"), "success"]
## Matching retry exception
@max_retries(1, exceptions=(APIError,))
def matching_exception():
return retry_mock()
assert matching_exception() == "success"
| [] |
2024-01-10 | wealthsimple/llm-gateway | llm_gateway~exceptions.py | # llm-gateway - A proxy service in front of llm models to encourage the
# responsible use of AI.
#
# Copyright 2023 Wealthsimple Technologies
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cohere.error import CohereAPIError, CohereConnectionError, CohereError
from fastapi import HTTPException, Request
from fastapi.responses import JSONResponse
from fastapi.routing import APIRoute
from openai.error import (
APIConnectionError,
APIError,
AuthenticationError,
RateLimitError,
Timeout,
TryAgain,
)
from llm_gateway.logger import get_logger
OPENAI_EXCEPTIONS = (
Timeout,
APIError,
APIConnectionError,
TryAgain,
RateLimitError,
AuthenticationError,
)
COHERE_EXCEPTIONS = (CohereError, CohereAPIError, CohereConnectionError)
logger = get_logger(__name__)
class OpenAIRouteExceptionHandler(APIRoute):
"""
This is a route class override for the OpenAI router. It is used to
catch common exceptions that are raised by the OpenAI API and return an
internal server error response with its associated error message.
"""
def get_route_handler(self):
original_route_handler = super().get_route_handler()
async def exception_handler(request: Request) -> JSONResponse:
"""
Catch OpenAI exceptions and return an internal server error response.
:param request: The request object
:type request: Request
:return: Internal server error response with error message
:rtype: JSONResponse
"""
try:
response = await original_route_handler(request)
except OPENAI_EXCEPTIONS as e:
# print exception traceback to console
logger.exception(type(e), e, e.__traceback__)
raise HTTPException(
status_code=500,
detail=str(e),
)
return response
return exception_handler
class CohereRouteExceptionHandler(APIRoute):
"""
This is a route class override for the Cohere router. It is used to
catch common exceptions that are raised by the Cohere API and return an
internal server error response with its associated error message.
"""
def get_route_handler(self):
original_route_handler = super().get_route_handler()
async def exception_handler(request: Request) -> JSONResponse:
"""
Catch Cohere exceptions and return an internal server error response.
:param request: The request object
:type request: Request
:return: Internal server error response with error message
:rtype: JSONResponse
"""
try:
response = await original_route_handler(request)
except COHERE_EXCEPTIONS as e:
# print exception traceback to console
logger.exception(type(e), e, e.__traceback__)
raise HTTPException(
status_code=500,
detail=str(e),
)
return response
return exception_handler
| [] |
2024-01-10 | pablosr11/el-teacher | disc.py | """Discord bot to transcribe voice notes and reply with feedback"""
import asyncio
import os
import random
import string
import discord
import httpx
import openai
import tiktoken_async
from dotenv import load_dotenv
from upstash_redis.asyncio import Redis
load_dotenv()
DEFAULT_CONVERSATION_CREDITS = 100
openai.api_key = os.getenv("OPENAI_API_KEY")
redis = Redis(
url=os.getenv("REDIS_URI"),
token=os.getenv("REDIS_PASS"),
allow_telemetry=False
)
class MyClient(discord.Client):
"""Discord client"""
async def on_member_join(self, member:discord.Member):
"""Handle member join event"""
channel = await member.create_dm()
await channel.send("Welcome to the server! Send me a voice note about a topic and I will try to provide feedback")
await asyncio.sleep(0.1)
await self.power.send(f"***new_joiner***: {member.name}")
return
async def on_ready(self):
"""Handle client ready event"""
print(f"Logged in as {self.user} (ID: {self.user.id})")
print("------")
self.power = await client.fetch_user(os.getenv("POWERUSER_ID"))
async def on_message(self, message: discord.Message):
"""Handle incoming messages"""
# how to enable people to talk to bot directly without joining a server
# generate headline and store in redis along with credits.
# stop bot from replying to itself
if message.author == client.user:
return
IS_AUDIO = len(message.attachments) > 0
author = message.author.name
channel = message.channel
await self.power.send(f"***new***: {author} | ***is_audio***: {IS_AUDIO} | ***channel***: {channel}")
await asyncio.sleep(0.1)
if "Direct Message" not in str(channel):
await message.channel.send("I only reply in DMs")
return
if len(message.attachments) > 0:
url = message.attachments[0].url
author = message.author.name
conv_left = await redis.decr(author)
if conv_left == -1: # doesnt exist
await redis.set(author, DEFAULT_CONVERSATION_CREDITS)
conv_left = DEFAULT_CONVERSATION_CREDITS
if not conv_left or conv_left == 0:
await message.channel.send("You have no more conversations left. We will be in touch. Alternatively, send a small message to https://psiesta.com")
await self.power.send(f"Out of credits: {author}\n*** SET {author} {DEFAULT_CONVERSATION_CREDITS}***\n")
return
await message.channel.send("Thanks for your message. Your feedback is being generated...")
async with httpx.AsyncClient() as httpx_client:
resp = await httpx_client.get(url)
b_content = resp.content
filename = (
"".join(
random.choice(string.ascii_uppercase + string.digits)
for _ in range(6)
)
+ ".ogg"
)
## turn this into async
with open(filename, "wb") as ff:
ff.write(b_content)
ff.seek(0)
audio_file = open(filename, "rb")
trans = await openai.Audio.atranscribe("whisper-1", audio_file)
audio_file.close()
os.remove(filename)
transcript = trans["text"]
if transcript:
# count tokens async
encoder = await tiktoken_async.encoding_for_model("gpt-3.5-turbo")
n_tokens = await client.loop.run_in_executor(
None, encoder.encode, transcript
)
if len(n_tokens) < 10:
await message.channel.send(
"Audio message has to be longer. We could not process your message."
)
return
msg = transcript
completion = await openai.ChatCompletion.acreate(
model="gpt-3.5-turbo",
messages=[
{
"role": "system",
"content": "You are a helpful teacher, judge, assistant. With a lot of life experiences. \
People come to you with speeches and explanations and questions \
and you provide helpful feedbackt. From \
mistakes in the facts, to the structure of the speech to any other suggestions. \
If possible you suggest other related topics to learn \
too. You always reply in the language of the speech.",
},
{"role": "user", "content": msg},
],
)
respuesta = completion.choices[0].message["content"]
await message.channel.send(respuesta.encode("utf-8").decode("utf-8"))
else:
await message.channel.send("I can't understand that, please try again.")
return
else:
await message.channel.send("I only reply to voice notes")
intents = discord.Intents.default()
intents.message_content = True # pylint: disable=locally-disabled, multiple-statements, assigning-non-slot, line-too-long
intents.members = True # pylint: disable=locally-disabled, multiple-statements, assigning-non-slot, line-too-long
client = MyClient(intents=intents)
client.run(os.getenv("DISCORD_TOKEN"))
| [
"You are a helpful teacher, judge, assistant. With a lot of life experiences. People come to you with speeches and explanations and questions and you provide helpful feedbackt. From mistakes in the facts, to the structure of the speech to any other suggestions. If possible you suggest other related topics to learn too. You always reply in the language of the speech."
] |
2024-01-10 | mwitiderrick/langchain | libs~langchain~langchain~chat_models~baichuan_baichuaninc_endpoint.py | """Baichuan chat wrapper."""
from __future__ import annotations
import requests
import json
import time
import hashlib
import logging
from typing import (
Any,
AsyncIterator,
Dict,
Iterator,
List,
Mapping,
Optional,
)
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.chat_models.base import BaseChatModel
from langchain.pydantic_v1 import Field, root_validator
from langchain.schema import ChatGeneration, ChatResult
from langchain.schema.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
BaseMessageChunk,
ChatMessage,
FunctionMessage,
HumanMessage,
SystemMessage,
)
from langchain.schema.output import ChatGenerationChunk
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
def _convert_resp_to_message_chunk(resp: Mapping[str, Any]) -> BaseMessageChunk:
return AIMessageChunk(
content=resp["result"],
role="assistant",
)
def convert_message_to_dict(message: BaseMessage) -> dict:
"""Convert a message to a dictionary that can be passed to the API."""
message_dict: Dict[str, Any]
if isinstance(message, ChatMessage):
message_dict = {"role": message.role, "content": message.content}
elif isinstance(message, HumanMessage):
message_dict = {"role": "user", "content": message.content}
elif isinstance(message, AIMessage):
message_dict = {"role": "assistant", "content": message.content}
if "function_call" in message.additional_kwargs:
message_dict["functions"] = message.additional_kwargs["function_call"]
# If function call only, content is None not empty string
if message_dict["content"] == "":
message_dict["content"] = None
elif isinstance(message, FunctionMessage):
message_dict = {
"role": "function",
"content": message.content,
"name": message.name,
}
else:
raise TypeError(f"Got unknown type {message}")
return message_dict
def calculate_md5(input_string):
md5 = hashlib.md5()
md5.update(input_string.encode('utf-8'))
encrypted = md5.hexdigest()
return encrypted
class BaichuanChatEndpoint():
"""
Currently only enterprise registration is supported for use
To use, you should have the environment variable ``Baichuan_AK`` and ``Baichuan_SK`` set with your
api_key and secret_key.
ak, sk are required parameters
which you could get from https: // api.baichuan-ai.com
Example:
.. code-block: : python
from langchain.chat_models import BaichuanChatEndpoint
baichuan_chat = BaichuanChatEndpoint("your_ak", "your_sk","Baichuan2-13B")
result=baichuan_chat.predict(message)
print(result.text")
Because Baichuan was no pip package made,So we will temporarily use this method and iterate and upgrade in the future
Args: They cannot be empty
baichuan_ak (str): api_key
baichuan_sk (str): secret_key
model (str): Default Baichuan2-7B,Baichuan2-13B,Baichuan2-53B which is commercial.
streaming (bool): Defautlt False
Returns:
Execute predict return response.
"""
baichuan_ak: Optional[str] = None
baichuan_sk: Optional[str] = None
request_timeout: Optional[int] = 60
"""request timeout for chat http requests"""
top_p: Optional[float] = 0.8
temperature: Optional[float] = 0.95
endpoint: Optional[str] = None
"""Endpoint of the Qianfan LLM, required if custom model used."""
def __init__(self, baichuan_ak, baichuan_sk, model="Baichuan2-7B", streaming=False):
self.baichuan_ak = baichuan_ak
self.baichuan_sk = baichuan_sk
self.model = "Baichuan2-7B" if model is None else model
self.streaming = False if streaming is not None and streaming is False else True
def predict(self, messages: List[BaseMessage]) -> Response:
if self.streaming is not None and self.streaming is False:
url = "https://api.baichuan-ai.com/v1/chat"
elif self.streaming is not None and self.streaming is True:
url = "https://api.baichuan-ai.com/v1/stream/chat"
data = {
"model": self.model,
"messages": [
{
"role": "user",
"content": messages
}
]
}
json_data = json.dumps(data)
time_stamp = int(time.time())
signature = calculate_md5(self.baichuan_sk + json_data + str(time_stamp))
headers = {
"Content-Type": "application/json",
"Authorization": "Bearer " + self.baichuan_ak,
"X-BC-Request-Id": "your requestId",
"X-BC-Timestamp": str(time_stamp),
"X-BC-Signature": signature,
"X-BC-Sign-Algo": "MD5",
}
response = requests.post(url, data=json_data, headers=headers)
return response
| [] |
2024-01-10 | mwitiderrick/langchain | libs~langchain~langchain~vectorstores~clickhouse.py | from __future__ import annotations
import json
import logging
from hashlib import sha1
from threading import Thread
from typing import Any, Dict, Iterable, List, Optional, Tuple, Union
from langchain.docstore.document import Document
from langchain.pydantic_v1 import BaseSettings
from langchain.schema.embeddings import Embeddings
from langchain.schema.vectorstore import VectorStore
logger = logging.getLogger()
def has_mul_sub_str(s: str, *args: Any) -> bool:
"""
Check if a string contains multiple substrings.
Args:
s: string to check.
*args: substrings to check.
Returns:
True if all substrings are in the string, False otherwise.
"""
for a in args:
if a not in s:
return False
return True
class ClickhouseSettings(BaseSettings):
"""`ClickHouse` client configuration.
Attribute:
host (str) : An URL to connect to MyScale backend.
Defaults to 'localhost'.
port (int) : URL port to connect with HTTP. Defaults to 8443.
username (str) : Username to login. Defaults to None.
password (str) : Password to login. Defaults to None.
index_type (str): index type string.
index_param (list): index build parameter.
index_query_params(dict): index query parameters.
database (str) : Database name to find the table. Defaults to 'default'.
table (str) : Table name to operate on.
Defaults to 'vector_table'.
metric (str) : Metric to compute distance,
supported are ('angular', 'euclidean', 'manhattan', 'hamming',
'dot'). Defaults to 'angular'.
https://github.com/spotify/annoy/blob/main/src/annoymodule.cc#L149-L169
column_map (Dict) : Column type map to project column name onto langchain
semantics. Must have keys: `text`, `id`, `vector`,
must be same size to number of columns. For example:
.. code-block:: python
{
'id': 'text_id',
'uuid': 'global_unique_id'
'embedding': 'text_embedding',
'document': 'text_plain',
'metadata': 'metadata_dictionary_in_json',
}
Defaults to identity map.
"""
host: str = "localhost"
port: int = 8123
username: Optional[str] = None
password: Optional[str] = None
index_type: str = "annoy"
# Annoy supports L2Distance and cosineDistance.
index_param: Optional[Union[List, Dict]] = ["'L2Distance'", 100]
index_query_params: Dict[str, str] = {}
column_map: Dict[str, str] = {
"id": "id",
"uuid": "uuid",
"document": "document",
"embedding": "embedding",
"metadata": "metadata",
}
database: str = "default"
table: str = "langchain"
metric: str = "angular"
def __getitem__(self, item: str) -> Any:
return getattr(self, item)
class Config:
env_file = ".env"
env_prefix = "clickhouse_"
env_file_encoding = "utf-8"
class Clickhouse(VectorStore):
"""`ClickHouse VectorSearch` vector store.
You need a `clickhouse-connect` python package, and a valid account
to connect to ClickHouse.
ClickHouse can not only search with simple vector indexes,
it also supports complex query with multiple conditions,
constraints and even sub-queries.
For more information, please visit
[ClickHouse official site](https://clickhouse.com/clickhouse)
"""
def __init__(
self,
embedding: Embeddings,
config: Optional[ClickhouseSettings] = None,
**kwargs: Any,
) -> None:
"""ClickHouse Wrapper to LangChain
embedding_function (Embeddings):
config (ClickHouseSettings): Configuration to ClickHouse Client
Other keyword arguments will pass into
[clickhouse-connect](https://docs.clickhouse.com/)
"""
try:
from clickhouse_connect import get_client
except ImportError:
raise ImportError(
"Could not import clickhouse connect python package. "
"Please install it with `pip install clickhouse-connect`."
)
try:
from tqdm import tqdm
self.pgbar = tqdm
except ImportError:
# Just in case if tqdm is not installed
self.pgbar = lambda x, **kwargs: x
super().__init__()
if config is not None:
self.config = config
else:
self.config = ClickhouseSettings()
assert self.config
assert self.config.host and self.config.port
assert (
self.config.column_map
and self.config.database
and self.config.table
and self.config.metric
)
for k in ["id", "embedding", "document", "metadata", "uuid"]:
assert k in self.config.column_map
assert self.config.metric in [
"angular",
"euclidean",
"manhattan",
"hamming",
"dot",
]
# initialize the schema
dim = len(embedding.embed_query("test"))
index_params = (
(
",".join([f"'{k}={v}'" for k, v in self.config.index_param.items()])
if self.config.index_param
else ""
)
if isinstance(self.config.index_param, Dict)
else ",".join([str(p) for p in self.config.index_param])
if isinstance(self.config.index_param, List)
else self.config.index_param
)
self.schema = f"""\
CREATE TABLE IF NOT EXISTS {self.config.database}.{self.config.table}(
{self.config.column_map['id']} Nullable(String),
{self.config.column_map['document']} Nullable(String),
{self.config.column_map['embedding']} Array(Float32),
{self.config.column_map['metadata']} JSON,
{self.config.column_map['uuid']} UUID DEFAULT generateUUIDv4(),
CONSTRAINT cons_vec_len CHECK length({self.config.column_map['embedding']}) = {dim},
INDEX vec_idx {self.config.column_map['embedding']} TYPE \
{self.config.index_type}({index_params}) GRANULARITY 1000
) ENGINE = MergeTree ORDER BY uuid SETTINGS index_granularity = 8192\
"""
self.dim = dim
self.BS = "\\"
self.must_escape = ("\\", "'")
self.embedding_function = embedding
self.dist_order = "ASC" # Only support ConsingDistance and L2Distance
# Create a connection to clickhouse
self.client = get_client(
host=self.config.host,
port=self.config.port,
username=self.config.username,
password=self.config.password,
**kwargs,
)
# Enable JSON type
self.client.command("SET allow_experimental_object_type=1")
# Enable Annoy index
self.client.command("SET allow_experimental_annoy_index=1")
self.client.command(self.schema)
@property
def embeddings(self) -> Embeddings:
return self.embedding_function
def escape_str(self, value: str) -> str:
return "".join(f"{self.BS}{c}" if c in self.must_escape else c for c in value)
def _build_insert_sql(self, transac: Iterable, column_names: Iterable[str]) -> str:
ks = ",".join(column_names)
_data = []
for n in transac:
n = ",".join([f"'{self.escape_str(str(_n))}'" for _n in n])
_data.append(f"({n})")
i_str = f"""
INSERT INTO TABLE
{self.config.database}.{self.config.table}({ks})
VALUES
{','.join(_data)}
"""
return i_str
def _insert(self, transac: Iterable, column_names: Iterable[str]) -> None:
_insert_query = self._build_insert_sql(transac, column_names)
self.client.command(_insert_query)
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
batch_size: int = 32,
ids: Optional[Iterable[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Insert more texts through the embeddings and add to the VectorStore.
Args:
texts: Iterable of strings to add to the VectorStore.
ids: Optional list of ids to associate with the texts.
batch_size: Batch size of insertion
metadata: Optional column data to be inserted
Returns:
List of ids from adding the texts into the VectorStore.
"""
# Embed and create the documents
ids = ids or [sha1(t.encode("utf-8")).hexdigest() for t in texts]
colmap_ = self.config.column_map
transac = []
column_names = {
colmap_["id"]: ids,
colmap_["document"]: texts,
colmap_["embedding"]: self.embedding_function.embed_documents(list(texts)),
}
metadatas = metadatas or [{} for _ in texts]
column_names[colmap_["metadata"]] = map(json.dumps, metadatas)
assert len(set(colmap_) - set(column_names)) >= 0
keys, values = zip(*column_names.items())
try:
t = None
for v in self.pgbar(
zip(*values), desc="Inserting data...", total=len(metadatas)
):
assert (
len(v[keys.index(self.config.column_map["embedding"])]) == self.dim
)
transac.append(v)
if len(transac) == batch_size:
if t:
t.join()
t = Thread(target=self._insert, args=[transac, keys])
t.start()
transac = []
if len(transac) > 0:
if t:
t.join()
self._insert(transac, keys)
return [i for i in ids]
except Exception as e:
logger.error(f"\033[91m\033[1m{type(e)}\033[0m \033[95m{str(e)}\033[0m")
return []
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[Dict[Any, Any]]] = None,
config: Optional[ClickhouseSettings] = None,
text_ids: Optional[Iterable[str]] = None,
batch_size: int = 32,
**kwargs: Any,
) -> Clickhouse:
"""Create ClickHouse wrapper with existing texts
Args:
embedding_function (Embeddings): Function to extract text embedding
texts (Iterable[str]): List or tuple of strings to be added
config (ClickHouseSettings, Optional): ClickHouse configuration
text_ids (Optional[Iterable], optional): IDs for the texts.
Defaults to None.
batch_size (int, optional): Batchsize when transmitting data to ClickHouse.
Defaults to 32.
metadata (List[dict], optional): metadata to texts. Defaults to None.
Other keyword arguments will pass into
[clickhouse-connect](https://clickhouse.com/docs/en/integrations/python#clickhouse-connect-driver-api)
Returns:
ClickHouse Index
"""
ctx = cls(embedding, config, **kwargs)
ctx.add_texts(texts, ids=text_ids, batch_size=batch_size, metadatas=metadatas)
return ctx
def __repr__(self) -> str:
"""Text representation for ClickHouse Vector Store, prints backends, username
and schemas. Easy to use with `str(ClickHouse())`
Returns:
repr: string to show connection info and data schema
"""
_repr = f"\033[92m\033[1m{self.config.database}.{self.config.table} @ "
_repr += f"{self.config.host}:{self.config.port}\033[0m\n\n"
_repr += f"\033[1musername: {self.config.username}\033[0m\n\nTable Schema:\n"
_repr += "-" * 51 + "\n"
for r in self.client.query(
f"DESC {self.config.database}.{self.config.table}"
).named_results():
_repr += (
f"|\033[94m{r['name']:24s}\033[0m|\033[96m{r['type']:24s}\033[0m|\n"
)
_repr += "-" * 51 + "\n"
return _repr
def _build_query_sql(
self, q_emb: List[float], topk: int, where_str: Optional[str] = None
) -> str:
q_emb_str = ",".join(map(str, q_emb))
if where_str:
where_str = f"PREWHERE {where_str}"
else:
where_str = ""
settings_strs = []
if self.config.index_query_params:
for k in self.config.index_query_params:
settings_strs.append(f"SETTING {k}={self.config.index_query_params[k]}")
q_str = f"""
SELECT {self.config.column_map['document']},
{self.config.column_map['metadata']}, dist
FROM {self.config.database}.{self.config.table}
{where_str}
ORDER BY L2Distance({self.config.column_map['embedding']}, [{q_emb_str}])
AS dist {self.dist_order}
LIMIT {topk} {' '.join(settings_strs)}
"""
return q_str
def similarity_search(
self, query: str, k: int = 4, where_str: Optional[str] = None, **kwargs: Any
) -> List[Document]:
"""Perform a similarity search with ClickHouse
Args:
query (str): query string
k (int, optional): Top K neighbors to retrieve. Defaults to 4.
where_str (Optional[str], optional): where condition string.
Defaults to None.
NOTE: Please do not let end-user to fill this and always be aware
of SQL injection. When dealing with metadatas, remember to
use `{self.metadata_column}.attribute` instead of `attribute`
alone. The default name for it is `metadata`.
Returns:
List[Document]: List of Documents
"""
return self.similarity_search_by_vector(
self.embedding_function.embed_query(query), k, where_str, **kwargs
)
def similarity_search_by_vector(
self,
embedding: List[float],
k: int = 4,
where_str: Optional[str] = None,
**kwargs: Any,
) -> List[Document]:
"""Perform a similarity search with ClickHouse by vectors
Args:
query (str): query string
k (int, optional): Top K neighbors to retrieve. Defaults to 4.
where_str (Optional[str], optional): where condition string.
Defaults to None.
NOTE: Please do not let end-user to fill this and always be aware
of SQL injection. When dealing with metadatas, remember to
use `{self.metadata_column}.attribute` instead of `attribute`
alone. The default name for it is `metadata`.
Returns:
List[Document]: List of (Document, similarity)
"""
q_str = self._build_query_sql(embedding, k, where_str)
try:
return [
Document(
page_content=r[self.config.column_map["document"]],
metadata=r[self.config.column_map["metadata"]],
)
for r in self.client.query(q_str).named_results()
]
except Exception as e:
logger.error(f"\033[91m\033[1m{type(e)}\033[0m \033[95m{str(e)}\033[0m")
return []
def similarity_search_with_relevance_scores(
self, query: str, k: int = 4, where_str: Optional[str] = None, **kwargs: Any
) -> List[Tuple[Document, float]]:
"""Perform a similarity search with ClickHouse
Args:
query (str): query string
k (int, optional): Top K neighbors to retrieve. Defaults to 4.
where_str (Optional[str], optional): where condition string.
Defaults to None.
NOTE: Please do not let end-user to fill this and always be aware
of SQL injection. When dealing with metadatas, remember to
use `{self.metadata_column}.attribute` instead of `attribute`
alone. The default name for it is `metadata`.
Returns:
List[Document]: List of documents
"""
q_str = self._build_query_sql(
self.embedding_function.embed_query(query), k, where_str
)
try:
return [
(
Document(
page_content=r[self.config.column_map["document"]],
metadata=r[self.config.column_map["metadata"]],
),
r["dist"],
)
for r in self.client.query(q_str).named_results()
]
except Exception as e:
logger.error(f"\033[91m\033[1m{type(e)}\033[0m \033[95m{str(e)}\033[0m")
return []
def drop(self) -> None:
"""
Helper function: Drop data
"""
self.client.command(
f"DROP TABLE IF EXISTS {self.config.database}.{self.config.table}"
)
@property
def metadata_column(self) -> str:
return self.config.column_map["metadata"]
| [] |
2024-01-10 | mwitiderrick/langchain | libs~langchain~langchain~schema~runnable~passthrough.py | """Implementation of the RunnablePassthrough."""
from __future__ import annotations
import asyncio
import inspect
import threading
from typing import (
Any,
AsyncIterator,
Awaitable,
Callable,
Dict,
Iterator,
List,
Mapping,
Optional,
Sequence,
Type,
Union,
cast,
)
from langchain.pydantic_v1 import BaseModel, create_model
from langchain.schema.runnable.base import (
Input,
Runnable,
RunnableParallel,
RunnableSerializable,
)
from langchain.schema.runnable.config import RunnableConfig, get_executor_for_config
from langchain.schema.runnable.utils import AddableDict, ConfigurableFieldSpec
from langchain.utils.aiter import atee, py_anext
from langchain.utils.iter import safetee
def identity(x: Input) -> Input:
"""An identity function"""
return x
async def aidentity(x: Input) -> Input:
"""An async identity function"""
return x
class RunnablePassthrough(RunnableSerializable[Input, Input]):
"""A runnable to passthrough inputs unchanged or with additional keys.
This runnable behaves almost like the identity function, except that it
can be configured to add additional keys to the output, if the input is a
dict.
The examples below demonstrate this runnable works using a few simple
chains. The chains rely on simple lambdas to make the examples easy to execute
and experiment with.
Examples:
.. code-block:: python
from langchain.schema.runnable import RunnablePassthrough, RunnableParallel
runnable = RunnableParallel(
origin=RunnablePassthrough(),
modified=lambda x: x+1
)
runnable.invoke(1) # {'origin': 1, 'modified': 2}
def fake_llm(prompt: str) -> str: # Fake LLM for the example
return "completion"
chain = RunnableLambda(fake_llm) | {
'original': RunnablePassthrough(), # Original LLM output
'parsed': lambda text: text[::-1] # Parsing logic
}
chain.invoke('hello') # {'original': 'completion', 'parsed': 'noitelpmoc'}
In some cases, it may be useful to pass the input through while adding some
keys to the output. In this case, you can use the `assign` method:
.. code-block:: python
from langchain.schema.runnable import RunnablePassthrough, RunnableParallel
def fake_llm(prompt: str) -> str: # Fake LLM for the example
return "completion"
runnable = {
'llm1': fake_llm,
'llm2': fake_llm,
}
| RunnablePassthrough.assign(
total_chars=lambda inputs: len(inputs['llm1'] + inputs['llm2'])
)
runnable.invoke('hello')
# {'llm1': 'completion', 'llm2': 'completion', 'total_chars': 20}
"""
input_type: Optional[Type[Input]] = None
func: Optional[Callable[[Input], None]] = None
afunc: Optional[Callable[[Input], Awaitable[None]]] = None
def __init__(
self,
func: Optional[
Union[Callable[[Input], None], Callable[[Input], Awaitable[None]]]
] = None,
afunc: Optional[Callable[[Input], Awaitable[None]]] = None,
*,
input_type: Optional[Type[Input]] = None,
**kwargs: Any,
) -> None:
if inspect.iscoroutinefunction(func):
afunc = func
func = None
super().__init__(func=func, afunc=afunc, input_type=input_type, **kwargs)
@classmethod
def is_lc_serializable(cls) -> bool:
return True
@classmethod
def get_lc_namespace(cls) -> List[str]:
return cls.__module__.split(".")[:-1]
@property
def InputType(self) -> Any:
return self.input_type or Any
@property
def OutputType(self) -> Any:
return self.input_type or Any
@classmethod
def assign(
cls,
**kwargs: Union[
Runnable[Dict[str, Any], Any],
Callable[[Dict[str, Any]], Any],
Mapping[
str,
Union[Runnable[Dict[str, Any], Any], Callable[[Dict[str, Any]], Any]],
],
],
) -> RunnableAssign:
"""Merge the Dict input with the output produced by the mapping argument.
Args:
mapping: A mapping from keys to runnables or callables.
Returns:
A runnable that merges the Dict input with the output produced by the
mapping argument.
"""
return RunnableAssign(RunnableParallel(kwargs))
def invoke(self, input: Input, config: Optional[RunnableConfig] = None) -> Input:
if self.func is not None:
self.func(input)
return self._call_with_config(identity, input, config)
async def ainvoke(
self,
input: Input,
config: Optional[RunnableConfig] = None,
**kwargs: Optional[Any],
) -> Input:
if self.afunc is not None:
await self.afunc(input, **kwargs)
elif self.func is not None:
self.func(input, **kwargs)
return await self._acall_with_config(aidentity, input, config)
def transform(
self,
input: Iterator[Input],
config: Optional[RunnableConfig] = None,
**kwargs: Any,
) -> Iterator[Input]:
if self.func is None:
for chunk in self._transform_stream_with_config(input, identity, config):
yield chunk
else:
final = None
for chunk in self._transform_stream_with_config(input, identity, config):
yield chunk
if final is None:
final = chunk
else:
final = final + chunk
if final is not None:
self.func(final, **kwargs)
async def atransform(
self,
input: AsyncIterator[Input],
config: Optional[RunnableConfig] = None,
**kwargs: Any,
) -> AsyncIterator[Input]:
if self.afunc is None and self.func is None:
async for chunk in self._atransform_stream_with_config(
input, identity, config
):
yield chunk
else:
final = None
async for chunk in self._atransform_stream_with_config(
input, identity, config
):
yield chunk
if final is None:
final = chunk
else:
final = final + chunk
if final is not None:
if self.afunc is not None:
await self.afunc(final, **kwargs)
elif self.func is not None:
self.func(final, **kwargs)
def stream(
self,
input: Input,
config: Optional[RunnableConfig] = None,
**kwargs: Any,
) -> Iterator[Input]:
return self.transform(iter([input]), config, **kwargs)
async def astream(
self,
input: Input,
config: Optional[RunnableConfig] = None,
**kwargs: Any,
) -> AsyncIterator[Input]:
async def input_aiter() -> AsyncIterator[Input]:
yield input
async for chunk in self.atransform(input_aiter(), config, **kwargs):
yield chunk
class RunnableAssign(RunnableSerializable[Dict[str, Any], Dict[str, Any]]):
"""
A runnable that assigns key-value pairs to Dict[str, Any] inputs.
"""
mapper: RunnableParallel[Dict[str, Any]]
def __init__(self, mapper: RunnableParallel[Dict[str, Any]], **kwargs: Any) -> None:
super().__init__(mapper=mapper, **kwargs)
@classmethod
def is_lc_serializable(cls) -> bool:
return True
@classmethod
def get_lc_namespace(cls) -> List[str]:
return cls.__module__.split(".")[:-1]
@property
def input_schema(self) -> Type[BaseModel]:
map_input_schema = self.mapper.input_schema
if not map_input_schema.__custom_root_type__:
# ie. it's a dict
return map_input_schema
return super().input_schema
@property
def output_schema(self) -> Type[BaseModel]:
map_input_schema = self.mapper.input_schema
map_output_schema = self.mapper.output_schema
if (
not map_input_schema.__custom_root_type__
and not map_output_schema.__custom_root_type__
):
# ie. both are dicts
return create_model( # type: ignore[call-overload]
"RunnableAssignOutput",
**{
k: (v.type_, v.default)
for s in (map_input_schema, map_output_schema)
for k, v in s.__fields__.items()
},
)
return super().output_schema
@property
def config_specs(self) -> Sequence[ConfigurableFieldSpec]:
return self.mapper.config_specs
def invoke(
self,
input: Dict[str, Any],
config: Optional[RunnableConfig] = None,
**kwargs: Any,
) -> Dict[str, Any]:
assert isinstance(
input, dict
), "The input to RunnablePassthrough.assign() must be a dict."
return {
**input,
**self.mapper.invoke(input, config, **kwargs),
}
async def ainvoke(
self,
input: Dict[str, Any],
config: Optional[RunnableConfig] = None,
**kwargs: Any,
) -> Dict[str, Any]:
assert isinstance(
input, dict
), "The input to RunnablePassthrough.assign() must be a dict."
return {
**input,
**await self.mapper.ainvoke(input, config, **kwargs),
}
def transform(
self,
input: Iterator[Dict[str, Any]],
config: Optional[RunnableConfig] = None,
**kwargs: Any,
) -> Iterator[Dict[str, Any]]:
# collect mapper keys
mapper_keys = set(self.mapper.steps.keys())
# create two streams, one for the map and one for the passthrough
for_passthrough, for_map = safetee(input, 2, lock=threading.Lock())
# create map output stream
map_output = self.mapper.transform(for_map, config, **kwargs)
# get executor to start map output stream in background
with get_executor_for_config(config or {}) as executor:
# start map output stream
first_map_chunk_future = executor.submit(
next,
map_output, # type: ignore
None,
)
# consume passthrough stream
for chunk in for_passthrough:
assert isinstance(
chunk, dict
), "The input to RunnablePassthrough.assign() must be a dict."
# remove mapper keys from passthrough chunk, to be overwritten by map
filtered = AddableDict(
{k: v for k, v in chunk.items() if k not in mapper_keys}
)
if filtered:
yield filtered
# yield map output
yield cast(Dict[str, Any], first_map_chunk_future.result())
for chunk in map_output:
yield chunk
async def atransform(
self,
input: AsyncIterator[Dict[str, Any]],
config: Optional[RunnableConfig] = None,
**kwargs: Any,
) -> AsyncIterator[Dict[str, Any]]:
# collect mapper keys
mapper_keys = set(self.mapper.steps.keys())
# create two streams, one for the map and one for the passthrough
for_passthrough, for_map = atee(input, 2, lock=asyncio.Lock())
# create map output stream
map_output = self.mapper.atransform(for_map, config, **kwargs)
# start map output stream
first_map_chunk_task: asyncio.Task = asyncio.create_task(
py_anext(map_output, None), # type: ignore[arg-type]
)
# consume passthrough stream
async for chunk in for_passthrough:
assert isinstance(
chunk, dict
), "The input to RunnablePassthrough.assign() must be a dict."
# remove mapper keys from passthrough chunk, to be overwritten by map output
filtered = AddableDict(
{k: v for k, v in chunk.items() if k not in mapper_keys}
)
if filtered:
yield filtered
# yield map output
yield await first_map_chunk_task
async for chunk in map_output:
yield chunk
def stream(
self,
input: Dict[str, Any],
config: Optional[RunnableConfig] = None,
**kwargs: Any,
) -> Iterator[Dict[str, Any]]:
return self.transform(iter([input]), config, **kwargs)
async def astream(
self,
input: Dict[str, Any],
config: Optional[RunnableConfig] = None,
**kwargs: Any,
) -> AsyncIterator[Dict[str, Any]]:
async def input_aiter() -> AsyncIterator[Dict[str, Any]]:
yield input
async for chunk in self.atransform(input_aiter(), config, **kwargs):
yield chunk
| [] |
2024-01-10 | mwitiderrick/langchain | libs~experimental~langchain_experimental~comprehend_moderation~pii.py | import asyncio
from typing import Any, Dict, Optional
from langchain_experimental.comprehend_moderation.base_moderation_exceptions import (
ModerationPiiError,
)
class ComprehendPII:
def __init__(
self,
client: Any,
callback: Optional[Any] = None,
unique_id: Optional[str] = None,
chain_id: Optional[str] = None,
) -> None:
self.client = client
self.moderation_beacon = {
"moderation_chain_id": chain_id,
"moderation_type": "PII",
"moderation_status": "LABELS_NOT_FOUND",
}
self.callback = callback
self.unique_id = unique_id
def validate(self, prompt_value: str, config: Any = None) -> str:
redact = config.get("redact")
return (
self._detect_pii(prompt_value=prompt_value, config=config)
if redact
else self._contains_pii(prompt_value=prompt_value, config=config)
)
def _contains_pii(self, prompt_value: str, config: Any = None) -> str:
"""
Checks for Personally Identifiable Information (PII) labels above a
specified threshold. Uses Amazon Comprehend Contains PII Entities API. See -
https://docs.aws.amazon.com/comprehend/latest/APIReference/API_ContainsPiiEntities.html
Args:
prompt_value (str): The input text to be checked for PII labels.
config (Dict[str, Any]): Configuration for PII check and actions.
Returns:
str: the original prompt
Note:
- The provided client should be initialized with valid AWS credentials.
"""
pii_identified = self.client.contains_pii_entities(
Text=prompt_value, LanguageCode="en"
)
if self.callback and self.callback.pii_callback:
self.moderation_beacon["moderation_input"] = prompt_value
self.moderation_beacon["moderation_output"] = pii_identified
threshold = config.get("threshold")
pii_labels = config.get("labels")
pii_found = False
for entity in pii_identified["Labels"]:
if (entity["Score"] >= threshold and entity["Name"] in pii_labels) or (
entity["Score"] >= threshold and not pii_labels
):
pii_found = True
break
if self.callback and self.callback.pii_callback:
if pii_found:
self.moderation_beacon["moderation_status"] = "LABELS_FOUND"
asyncio.create_task(
self.callback.on_after_pii(self.moderation_beacon, self.unique_id)
)
if pii_found:
raise ModerationPiiError
return prompt_value
def _detect_pii(self, prompt_value: str, config: Optional[Dict[str, Any]]) -> str:
"""
Detects and handles Personally Identifiable Information (PII) entities in the
given prompt text using Amazon Comprehend's detect_pii_entities API. The
function provides options to redact or stop processing based on the identified
PII entities and a provided configuration. Uses Amazon Comprehend Detect PII
Entities API.
Args:
prompt_value (str): The input text to be checked for PII entities.
config (Dict[str, Any]): A configuration specifying how to handle
PII entities.
Returns:
str: The processed prompt text with redacted PII entities or raised
exceptions.
Raises:
ValueError: If the prompt contains configured PII entities for
stopping processing.
Note:
- If PII is not found in the prompt, the original prompt is returned.
- The client should be initialized with valid AWS credentials.
"""
pii_identified = self.client.detect_pii_entities(
Text=prompt_value, LanguageCode="en"
)
if self.callback and self.callback.pii_callback:
self.moderation_beacon["moderation_input"] = prompt_value
self.moderation_beacon["moderation_output"] = pii_identified
if (pii_identified["Entities"]) == []:
if self.callback and self.callback.pii_callback:
asyncio.create_task(
self.callback.on_after_pii(self.moderation_beacon, self.unique_id)
)
return prompt_value
pii_found = False
if not config and pii_identified["Entities"]:
for entity in pii_identified["Entities"]:
if entity["Score"] >= 0.5:
pii_found = True
break
if self.callback and self.callback.pii_callback:
if pii_found:
self.moderation_beacon["moderation_status"] = "LABELS_FOUND"
asyncio.create_task(
self.callback.on_after_pii(self.moderation_beacon, self.unique_id)
)
if pii_found:
raise ModerationPiiError
else:
threshold = config.get("threshold") # type: ignore
pii_labels = config.get("labels") # type: ignore
mask_marker = config.get("mask_character") # type: ignore
pii_found = False
for entity in pii_identified["Entities"]:
if (
pii_labels
and entity["Type"] in pii_labels
and entity["Score"] >= threshold
) or (not pii_labels and entity["Score"] >= threshold):
pii_found = True
char_offset_begin = entity["BeginOffset"]
char_offset_end = entity["EndOffset"]
mask_length = char_offset_end - char_offset_begin + 1
masked_part = mask_marker * mask_length
prompt_value = (
prompt_value[:char_offset_begin]
+ masked_part
+ prompt_value[char_offset_end + 1 :]
)
if self.callback and self.callback.pii_callback:
if pii_found:
self.moderation_beacon["moderation_status"] = "LABELS_FOUND"
asyncio.create_task(
self.callback.on_after_pii(self.moderation_beacon, self.unique_id)
)
return prompt_value
| [] |
2024-01-10 | mwitiderrick/langchain | libs~langchain~langchain~agents~load_tools.py | # flake8: noqa
"""Load tools."""
import warnings
from typing import Any, Dict, List, Optional, Callable, Tuple
from mypy_extensions import Arg, KwArg
from langchain.agents.tools import Tool
from langchain.schema.language_model import BaseLanguageModel
from langchain.callbacks.base import BaseCallbackManager
from langchain.callbacks.manager import Callbacks
from langchain.chains.api import news_docs, open_meteo_docs, podcast_docs, tmdb_docs
from langchain.chains.api.base import APIChain
from langchain.chains.llm_math.base import LLMMathChain
from langchain.utilities.dalle_image_generator import DallEAPIWrapper
from langchain.utilities.requests import TextRequestsWrapper
from langchain.tools.arxiv.tool import ArxivQueryRun
from langchain.tools.golden_query.tool import GoldenQueryRun
from langchain.tools.pubmed.tool import PubmedQueryRun
from langchain.tools.base import BaseTool
from langchain.tools.bing_search.tool import BingSearchRun
from langchain.tools.ddg_search.tool import DuckDuckGoSearchRun
from langchain.tools.google_search.tool import GoogleSearchResults, GoogleSearchRun
from langchain.tools.metaphor_search.tool import MetaphorSearchResults
from langchain.tools.google_serper.tool import GoogleSerperResults, GoogleSerperRun
from langchain.tools.searchapi.tool import SearchAPIResults, SearchAPIRun
from langchain.tools.graphql.tool import BaseGraphQLTool
from langchain.tools.human.tool import HumanInputRun
from langchain.tools.python.tool import PythonREPLTool
from langchain.tools.requests.tool import (
RequestsDeleteTool,
RequestsGetTool,
RequestsPatchTool,
RequestsPostTool,
RequestsPutTool,
)
from langchain.tools.eleven_labs.text2speech import ElevenLabsText2SpeechTool
from langchain.tools.scenexplain.tool import SceneXplainTool
from langchain.tools.searx_search.tool import SearxSearchResults, SearxSearchRun
from langchain.tools.shell.tool import ShellTool
from langchain.tools.sleep.tool import SleepTool
from langchain.tools.wikipedia.tool import WikipediaQueryRun
from langchain.tools.wolfram_alpha.tool import WolframAlphaQueryRun
from langchain.tools.openweathermap.tool import OpenWeatherMapQueryRun
from langchain.tools.dataforseo_api_search import DataForSeoAPISearchRun
from langchain.tools.dataforseo_api_search import DataForSeoAPISearchResults
from langchain.utilities.arxiv import ArxivAPIWrapper
from langchain.utilities.golden_query import GoldenQueryAPIWrapper
from langchain.utilities.pubmed import PubMedAPIWrapper
from langchain.utilities.bing_search import BingSearchAPIWrapper
from langchain.utilities.duckduckgo_search import DuckDuckGoSearchAPIWrapper
from langchain.utilities.google_search import GoogleSearchAPIWrapper
from langchain.utilities.google_serper import GoogleSerperAPIWrapper
from langchain.utilities.metaphor_search import MetaphorSearchAPIWrapper
from langchain.utilities.awslambda import LambdaWrapper
from langchain.utilities.graphql import GraphQLAPIWrapper
from langchain.utilities.searchapi import SearchApiAPIWrapper
from langchain.utilities.searx_search import SearxSearchWrapper
from langchain.utilities.serpapi import SerpAPIWrapper
from langchain.utilities.twilio import TwilioAPIWrapper
from langchain.utilities.wikipedia import WikipediaAPIWrapper
from langchain.utilities.wolfram_alpha import WolframAlphaAPIWrapper
from langchain.utilities.openweathermap import OpenWeatherMapAPIWrapper
from langchain.utilities.dataforseo_api_search import DataForSeoAPIWrapper
def _get_python_repl() -> BaseTool:
return PythonREPLTool()
def _get_tools_requests_get() -> BaseTool:
return RequestsGetTool(requests_wrapper=TextRequestsWrapper())
def _get_tools_requests_post() -> BaseTool:
return RequestsPostTool(requests_wrapper=TextRequestsWrapper())
def _get_tools_requests_patch() -> BaseTool:
return RequestsPatchTool(requests_wrapper=TextRequestsWrapper())
def _get_tools_requests_put() -> BaseTool:
return RequestsPutTool(requests_wrapper=TextRequestsWrapper())
def _get_tools_requests_delete() -> BaseTool:
return RequestsDeleteTool(requests_wrapper=TextRequestsWrapper())
def _get_terminal() -> BaseTool:
return ShellTool()
def _get_sleep() -> BaseTool:
return SleepTool()
_BASE_TOOLS: Dict[str, Callable[[], BaseTool]] = {
"python_repl": _get_python_repl,
"requests": _get_tools_requests_get, # preserved for backwards compatibility
"requests_get": _get_tools_requests_get,
"requests_post": _get_tools_requests_post,
"requests_patch": _get_tools_requests_patch,
"requests_put": _get_tools_requests_put,
"requests_delete": _get_tools_requests_delete,
"terminal": _get_terminal,
"sleep": _get_sleep,
}
def _get_llm_math(llm: BaseLanguageModel) -> BaseTool:
return Tool(
name="Calculator",
description="Useful for when you need to answer questions about math.",
func=LLMMathChain.from_llm(llm=llm).run,
coroutine=LLMMathChain.from_llm(llm=llm).arun,
)
def _get_open_meteo_api(llm: BaseLanguageModel) -> BaseTool:
chain = APIChain.from_llm_and_api_docs(llm, open_meteo_docs.OPEN_METEO_DOCS)
return Tool(
name="Open Meteo API",
description="Useful for when you want to get weather information from the OpenMeteo API. The input should be a question in natural language that this API can answer.",
func=chain.run,
)
_LLM_TOOLS: Dict[str, Callable[[BaseLanguageModel], BaseTool]] = {
"llm-math": _get_llm_math,
"open-meteo-api": _get_open_meteo_api,
}
def _get_news_api(llm: BaseLanguageModel, **kwargs: Any) -> BaseTool:
news_api_key = kwargs["news_api_key"]
chain = APIChain.from_llm_and_api_docs(
llm, news_docs.NEWS_DOCS, headers={"X-Api-Key": news_api_key}
)
return Tool(
name="News API",
description="Use this when you want to get information about the top headlines of current news stories. The input should be a question in natural language that this API can answer.",
func=chain.run,
)
def _get_tmdb_api(llm: BaseLanguageModel, **kwargs: Any) -> BaseTool:
tmdb_bearer_token = kwargs["tmdb_bearer_token"]
chain = APIChain.from_llm_and_api_docs(
llm,
tmdb_docs.TMDB_DOCS,
headers={"Authorization": f"Bearer {tmdb_bearer_token}"},
)
return Tool(
name="TMDB API",
description="Useful for when you want to get information from The Movie Database. The input should be a question in natural language that this API can answer.",
func=chain.run,
)
def _get_podcast_api(llm: BaseLanguageModel, **kwargs: Any) -> BaseTool:
listen_api_key = kwargs["listen_api_key"]
chain = APIChain.from_llm_and_api_docs(
llm,
podcast_docs.PODCAST_DOCS,
headers={"X-ListenAPI-Key": listen_api_key},
)
return Tool(
name="Podcast API",
description="Use the Listen Notes Podcast API to search all podcasts or episodes. The input should be a question in natural language that this API can answer.",
func=chain.run,
)
def _get_lambda_api(**kwargs: Any) -> BaseTool:
return Tool(
name=kwargs["awslambda_tool_name"],
description=kwargs["awslambda_tool_description"],
func=LambdaWrapper(**kwargs).run,
)
def _get_wolfram_alpha(**kwargs: Any) -> BaseTool:
return WolframAlphaQueryRun(api_wrapper=WolframAlphaAPIWrapper(**kwargs))
def _get_google_search(**kwargs: Any) -> BaseTool:
return GoogleSearchRun(api_wrapper=GoogleSearchAPIWrapper(**kwargs))
def _get_wikipedia(**kwargs: Any) -> BaseTool:
return WikipediaQueryRun(api_wrapper=WikipediaAPIWrapper(**kwargs))
def _get_arxiv(**kwargs: Any) -> BaseTool:
return ArxivQueryRun(api_wrapper=ArxivAPIWrapper(**kwargs))
def _get_golden_query(**kwargs: Any) -> BaseTool:
return GoldenQueryRun(api_wrapper=GoldenQueryAPIWrapper(**kwargs))
def _get_pubmed(**kwargs: Any) -> BaseTool:
return PubmedQueryRun(api_wrapper=PubMedAPIWrapper(**kwargs))
def _get_google_serper(**kwargs: Any) -> BaseTool:
return GoogleSerperRun(api_wrapper=GoogleSerperAPIWrapper(**kwargs))
def _get_google_serper_results_json(**kwargs: Any) -> BaseTool:
return GoogleSerperResults(api_wrapper=GoogleSerperAPIWrapper(**kwargs))
def _get_google_search_results_json(**kwargs: Any) -> BaseTool:
return GoogleSearchResults(api_wrapper=GoogleSearchAPIWrapper(**kwargs))
def _get_searchapi(**kwargs: Any) -> BaseTool:
return SearchAPIRun(api_wrapper=SearchApiAPIWrapper(**kwargs))
def _get_searchapi_results_json(**kwargs: Any) -> BaseTool:
return SearchAPIResults(api_wrapper=SearchApiAPIWrapper(**kwargs))
def _get_serpapi(**kwargs: Any) -> BaseTool:
return Tool(
name="Search",
description="A search engine. Useful for when you need to answer questions about current events. Input should be a search query.",
func=SerpAPIWrapper(**kwargs).run,
coroutine=SerpAPIWrapper(**kwargs).arun,
)
def _get_dalle_image_generator(**kwargs: Any) -> Tool:
return Tool(
"Dall-E Image Generator",
DallEAPIWrapper(**kwargs).run,
"A wrapper around OpenAI DALL-E API. Useful for when you need to generate images from a text description. Input should be an image description.",
)
def _get_twilio(**kwargs: Any) -> BaseTool:
return Tool(
name="Text Message",
description="Useful for when you need to send a text message to a provided phone number.",
func=TwilioAPIWrapper(**kwargs).run,
)
def _get_searx_search(**kwargs: Any) -> BaseTool:
return SearxSearchRun(wrapper=SearxSearchWrapper(**kwargs))
def _get_searx_search_results_json(**kwargs: Any) -> BaseTool:
wrapper_kwargs = {k: v for k, v in kwargs.items() if k != "num_results"}
return SearxSearchResults(wrapper=SearxSearchWrapper(**wrapper_kwargs), **kwargs)
def _get_bing_search(**kwargs: Any) -> BaseTool:
return BingSearchRun(api_wrapper=BingSearchAPIWrapper(**kwargs))
def _get_metaphor_search(**kwargs: Any) -> BaseTool:
return MetaphorSearchResults(api_wrapper=MetaphorSearchAPIWrapper(**kwargs))
def _get_ddg_search(**kwargs: Any) -> BaseTool:
return DuckDuckGoSearchRun(api_wrapper=DuckDuckGoSearchAPIWrapper(**kwargs))
def _get_human_tool(**kwargs: Any) -> BaseTool:
return HumanInputRun(**kwargs)
def _get_scenexplain(**kwargs: Any) -> BaseTool:
return SceneXplainTool(**kwargs)
def _get_graphql_tool(**kwargs: Any) -> BaseTool:
graphql_endpoint = kwargs["graphql_endpoint"]
wrapper = GraphQLAPIWrapper(graphql_endpoint=graphql_endpoint)
return BaseGraphQLTool(graphql_wrapper=wrapper)
def _get_openweathermap(**kwargs: Any) -> BaseTool:
return OpenWeatherMapQueryRun(api_wrapper=OpenWeatherMapAPIWrapper(**kwargs))
def _get_dataforseo_api_search(**kwargs: Any) -> BaseTool:
return DataForSeoAPISearchRun(api_wrapper=DataForSeoAPIWrapper(**kwargs))
def _get_dataforseo_api_search_json(**kwargs: Any) -> BaseTool:
return DataForSeoAPISearchResults(api_wrapper=DataForSeoAPIWrapper(**kwargs))
def _get_eleven_labs_text2speech(**kwargs: Any) -> BaseTool:
return ElevenLabsText2SpeechTool(**kwargs)
_EXTRA_LLM_TOOLS: Dict[
str,
Tuple[Callable[[Arg(BaseLanguageModel, "llm"), KwArg(Any)], BaseTool], List[str]],
] = {
"news-api": (_get_news_api, ["news_api_key"]),
"tmdb-api": (_get_tmdb_api, ["tmdb_bearer_token"]),
"podcast-api": (_get_podcast_api, ["listen_api_key"]),
}
_EXTRA_OPTIONAL_TOOLS: Dict[str, Tuple[Callable[[KwArg(Any)], BaseTool], List[str]]] = {
"wolfram-alpha": (_get_wolfram_alpha, ["wolfram_alpha_appid"]),
"google-search": (_get_google_search, ["google_api_key", "google_cse_id"]),
"google-search-results-json": (
_get_google_search_results_json,
["google_api_key", "google_cse_id", "num_results"],
),
"searx-search-results-json": (
_get_searx_search_results_json,
["searx_host", "engines", "num_results", "aiosession"],
),
"bing-search": (_get_bing_search, ["bing_subscription_key", "bing_search_url"]),
"metaphor-search": (_get_metaphor_search, ["metaphor_api_key"]),
"ddg-search": (_get_ddg_search, []),
"google-serper": (_get_google_serper, ["serper_api_key", "aiosession"]),
"google-serper-results-json": (
_get_google_serper_results_json,
["serper_api_key", "aiosession"],
),
"searchapi": (_get_searchapi, ["searchapi_api_key", "aiosession"]),
"searchapi-results-json": (
_get_searchapi_results_json,
["searchapi_api_key", "aiosession"],
),
"serpapi": (_get_serpapi, ["serpapi_api_key", "aiosession"]),
"dalle-image-generator": (_get_dalle_image_generator, ["openai_api_key"]),
"twilio": (_get_twilio, ["account_sid", "auth_token", "from_number"]),
"searx-search": (_get_searx_search, ["searx_host", "engines", "aiosession"]),
"wikipedia": (_get_wikipedia, ["top_k_results", "lang"]),
"arxiv": (
_get_arxiv,
["top_k_results", "load_max_docs", "load_all_available_meta"],
),
"golden-query": (_get_golden_query, ["golden_api_key"]),
"pubmed": (_get_pubmed, ["top_k_results"]),
"human": (_get_human_tool, ["prompt_func", "input_func"]),
"awslambda": (
_get_lambda_api,
["awslambda_tool_name", "awslambda_tool_description", "function_name"],
),
"sceneXplain": (_get_scenexplain, []),
"graphql": (_get_graphql_tool, ["graphql_endpoint"]),
"openweathermap-api": (_get_openweathermap, ["openweathermap_api_key"]),
"dataforseo-api-search": (
_get_dataforseo_api_search,
["api_login", "api_password", "aiosession"],
),
"dataforseo-api-search-json": (
_get_dataforseo_api_search_json,
["api_login", "api_password", "aiosession"],
),
"eleven_labs_text2speech": (_get_eleven_labs_text2speech, ["eleven_api_key"]),
}
def _handle_callbacks(
callback_manager: Optional[BaseCallbackManager], callbacks: Callbacks
) -> Callbacks:
if callback_manager is not None:
warnings.warn(
"callback_manager is deprecated. Please use callbacks instead.",
DeprecationWarning,
)
if callbacks is not None:
raise ValueError(
"Cannot specify both callback_manager and callbacks arguments."
)
return callback_manager
return callbacks
def load_huggingface_tool(
task_or_repo_id: str,
model_repo_id: Optional[str] = None,
token: Optional[str] = None,
remote: bool = False,
**kwargs: Any,
) -> BaseTool:
"""Loads a tool from the HuggingFace Hub.
Args:
task_or_repo_id: Task or model repo id.
model_repo_id: Optional model repo id.
token: Optional token.
remote: Optional remote. Defaults to False.
**kwargs:
Returns:
A tool.
"""
try:
from transformers import load_tool
except ImportError:
raise ImportError(
"HuggingFace tools require the libraries `transformers>=4.29.0`"
" and `huggingface_hub>=0.14.1` to be installed."
" Please install it with"
" `pip install --upgrade transformers huggingface_hub`."
)
hf_tool = load_tool(
task_or_repo_id,
model_repo_id=model_repo_id,
token=token,
remote=remote,
**kwargs,
)
outputs = hf_tool.outputs
if set(outputs) != {"text"}:
raise NotImplementedError("Multimodal outputs not supported yet.")
inputs = hf_tool.inputs
if set(inputs) != {"text"}:
raise NotImplementedError("Multimodal inputs not supported yet.")
return Tool.from_function(
hf_tool.__call__, name=hf_tool.name, description=hf_tool.description
)
def load_tools(
tool_names: List[str],
llm: Optional[BaseLanguageModel] = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> List[BaseTool]:
"""Load tools based on their name.
Args:
tool_names: name of tools to load.
llm: An optional language model, may be needed to initialize certain tools.
callbacks: Optional callback manager or list of callback handlers.
If not provided, default global callback manager will be used.
Returns:
List of tools.
"""
tools = []
callbacks = _handle_callbacks(
callback_manager=kwargs.get("callback_manager"), callbacks=callbacks
)
for name in tool_names:
if name == "requests":
warnings.warn(
"tool name `requests` is deprecated - "
"please use `requests_all` or specify the requests method"
)
if name == "requests_all":
# expand requests into various methods
requests_method_tools = [
_tool for _tool in _BASE_TOOLS if _tool.startswith("requests_")
]
tool_names.extend(requests_method_tools)
elif name in _BASE_TOOLS:
tools.append(_BASE_TOOLS[name]())
elif name in _LLM_TOOLS:
if llm is None:
raise ValueError(f"Tool {name} requires an LLM to be provided")
tool = _LLM_TOOLS[name](llm)
tools.append(tool)
elif name in _EXTRA_LLM_TOOLS:
if llm is None:
raise ValueError(f"Tool {name} requires an LLM to be provided")
_get_llm_tool_func, extra_keys = _EXTRA_LLM_TOOLS[name]
missing_keys = set(extra_keys).difference(kwargs)
if missing_keys:
raise ValueError(
f"Tool {name} requires some parameters that were not "
f"provided: {missing_keys}"
)
sub_kwargs = {k: kwargs[k] for k in extra_keys}
tool = _get_llm_tool_func(llm=llm, **sub_kwargs)
tools.append(tool)
elif name in _EXTRA_OPTIONAL_TOOLS:
_get_tool_func, extra_keys = _EXTRA_OPTIONAL_TOOLS[name]
sub_kwargs = {k: kwargs[k] for k in extra_keys if k in kwargs}
tool = _get_tool_func(**sub_kwargs)
tools.append(tool)
else:
raise ValueError(f"Got unknown tool {name}")
if callbacks is not None:
for tool in tools:
tool.callbacks = callbacks
return tools
def get_all_tool_names() -> List[str]:
"""Get a list of all possible tool names."""
return (
list(_BASE_TOOLS)
+ list(_EXTRA_OPTIONAL_TOOLS)
+ list(_EXTRA_LLM_TOOLS)
+ list(_LLM_TOOLS)
)
| [] |
2024-01-10 | mwitiderrick/langchain | libs~langchain~langchain~utilities~anthropic.py | from typing import Any, List
def _get_anthropic_client() -> Any:
try:
import anthropic
except ImportError:
raise ImportError(
"Could not import anthropic python package. "
"This is needed in order to accurately tokenize the text "
"for anthropic models. Please install it with `pip install anthropic`."
)
return anthropic.Anthropic()
def get_num_tokens_anthropic(text: str) -> int:
client = _get_anthropic_client()
return client.count_tokens(text=text)
def get_token_ids_anthropic(text: str) -> List[int]:
client = _get_anthropic_client()
tokenizer = client.get_tokenizer()
encoded_text = tokenizer.encode(text)
return encoded_text.ids
| [] |
2024-01-10 | mwitiderrick/langchain | libs~langchain~tests~integration_tests~storage~test_upstash_redis.py | """Implement integration tests for Redis storage."""
import pytest
from upstash_redis import Redis
from langchain.storage.upstash_redis import UpstashRedisStore
pytest.importorskip("upstash_redis")
URL = "<UPSTASH_REDIS_REST_URL>"
TOKEN = "<UPSTASH_REDIS_REST_TOKEN>"
@pytest.fixture
def redis_client() -> Redis:
"""Yield redis client."""
from upstash_redis import Redis
# This fixture flushes the database!
client = Redis(url=URL, token=TOKEN)
try:
client.ping()
except Exception:
pytest.skip("Ping request failed. Verify that credentials are correct.")
client.flushdb()
return client
def test_mget(redis_client: Redis) -> None:
store = UpstashRedisStore(client=redis_client, ttl=None)
keys = ["key1", "key2"]
redis_client.mset({"key1": "value1", "key2": "value2"})
result = store.mget(keys)
assert result == ["value1", "value2"]
def test_mset(redis_client: Redis) -> None:
store = UpstashRedisStore(client=redis_client, ttl=None)
key_value_pairs = [("key1", "value1"), ("key2", "value2")]
store.mset(key_value_pairs)
result = redis_client.mget("key1", "key2")
assert result == ["value1", "value2"]
def test_mdelete(redis_client: Redis) -> None:
"""Test that deletion works as expected."""
store = UpstashRedisStore(client=redis_client, ttl=None)
keys = ["key1", "key2"]
redis_client.mset({"key1": "value1", "key2": "value2"})
store.mdelete(keys)
result = redis_client.mget(*keys)
assert result == [None, None]
def test_yield_keys(redis_client: Redis) -> None:
store = UpstashRedisStore(client=redis_client, ttl=None)
redis_client.mset({"key1": "value2", "key2": "value2"})
assert sorted(store.yield_keys()) == ["key1", "key2"]
assert sorted(store.yield_keys(prefix="key*")) == ["key1", "key2"]
assert sorted(store.yield_keys(prefix="lang*")) == []
def test_namespace(redis_client: Redis) -> None:
store = UpstashRedisStore(client=redis_client, ttl=None, namespace="meow")
key_value_pairs = [("key1", "value1"), ("key2", "value2")]
store.mset(key_value_pairs)
cursor, all_keys = redis_client.scan(0)
while cursor != 0:
cursor, keys = redis_client.scan(cursor)
if len(keys) != 0:
all_keys.extend(keys)
assert sorted(all_keys) == [
"meow/key1",
"meow/key2",
]
store.mdelete(["key1"])
cursor, all_keys = redis_client.scan(0, match="*")
while cursor != 0:
cursor, keys = redis_client.scan(cursor, match="*")
if len(keys) != 0:
all_keys.extend(keys)
assert sorted(all_keys) == [
"meow/key2",
]
assert list(store.yield_keys()) == ["key2"]
assert list(store.yield_keys(prefix="key*")) == ["key2"]
assert list(store.yield_keys(prefix="key1")) == []
| [] |
2024-01-10 | mwitiderrick/langchain | libs~langchain~langchain~chat_models~tongyi.py | from __future__ import annotations
import logging
from typing import (
Any,
Callable,
Dict,
Iterator,
List,
Mapping,
Optional,
Tuple,
)
from requests.exceptions import HTTPError
from tenacity import (
RetryCallState,
retry,
retry_if_exception_type,
stop_after_attempt,
wait_exponential,
)
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.chat_models.base import (
BaseChatModel,
_generate_from_stream,
)
from langchain.pydantic_v1 import Field, root_validator
from langchain.schema import ChatGeneration, ChatResult
from langchain.schema.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
BaseMessageChunk,
ChatMessage,
ChatMessageChunk,
FunctionMessage,
FunctionMessageChunk,
HumanMessage,
HumanMessageChunk,
SystemMessage,
SystemMessageChunk,
)
from langchain.schema.output import ChatGenerationChunk, GenerationChunk
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
def convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage:
role = _dict["role"]
if role == "user":
return HumanMessage(content=_dict["content"])
elif role == "assistant":
content = _dict.get("content", "") or ""
if _dict.get("function_call"):
additional_kwargs = {"function_call": dict(_dict["function_call"])}
else:
additional_kwargs = {}
return AIMessage(content=content, additional_kwargs=additional_kwargs)
elif role == "system":
return SystemMessage(content=_dict["content"])
elif role == "function":
return FunctionMessage(content=_dict["content"], name=_dict["name"])
else:
return ChatMessage(content=_dict["content"], role=role)
def convert_message_to_dict(message: BaseMessage) -> dict:
message_dict: Dict[str, Any]
if isinstance(message, ChatMessage):
message_dict = {"role": message.role, "content": message.content}
elif isinstance(message, HumanMessage):
message_dict = {"role": "user", "content": message.content}
elif isinstance(message, AIMessage):
message_dict = {"role": "assistant", "content": message.content}
if "function_call" in message.additional_kwargs:
message_dict["function_call"] = message.additional_kwargs["function_call"]
# If function call only, content is None not empty string
if message_dict["content"] == "":
message_dict["content"] = None
elif isinstance(message, SystemMessage):
message_dict = {"role": "system", "content": message.content}
elif isinstance(message, FunctionMessage):
message_dict = {
"role": "function",
"content": message.content,
"name": message.name,
}
else:
raise TypeError(f"Got unknown type {message}")
if "name" in message.additional_kwargs:
message_dict["name"] = message.additional_kwargs["name"]
return message_dict
def _stream_response_to_generation_chunk(
stream_response: Dict[str, Any],
length: int,
) -> GenerationChunk:
"""Convert a stream response to a generation chunk.
As the low level API implement is different from openai and other llm.
Stream response of Tongyi is not split into chunks, but all data generated before.
For example, the answer 'Hi Pickle Rick! How can I assist you today?'
Other llm will stream answer:
'Hi Pickle',
' Rick!',
' How can I assist you today?'.
Tongyi answer:
'Hi Pickle',
'Hi Pickle Rick!',
'Hi Pickle Rick! How can I assist you today?'.
As the GenerationChunk is implemented with chunks. Only return full_text[length:]
for new chunk.
"""
full_text = stream_response["output"]["text"]
text = full_text[length:]
finish_reason = stream_response["output"].get("finish_reason", None)
return GenerationChunk(
text=text,
generation_info=dict(
finish_reason=finish_reason,
),
)
def _create_retry_decorator(
llm: ChatTongyi,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> Callable[[Any], Any]:
def _before_sleep(retry_state: RetryCallState) -> None:
if run_manager:
run_manager.on_retry(retry_state)
return None
min_seconds = 1
max_seconds = 4
# Wait 2^x * 1 second between each retry starting with
# 4 seconds, then up to 10 seconds, then 10 seconds afterwards
return retry(
reraise=True,
stop=stop_after_attempt(llm.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
retry=(retry_if_exception_type(HTTPError)),
before_sleep=_before_sleep,
)
def _convert_delta_to_message_chunk(
_dict: Mapping[str, Any],
default_class: type[BaseMessageChunk],
length: int,
) -> BaseMessageChunk:
role = _dict.get("role")
full_content = _dict.get("content") or ""
content = full_content[length:]
if _dict.get("function_call"):
additional_kwargs = {"function_call": dict(_dict["function_call"])}
else:
additional_kwargs = {}
if role == "user" or default_class == HumanMessageChunk:
return HumanMessageChunk(content=content)
elif role == "assistant" or default_class == AIMessageChunk:
return AIMessageChunk(content=content, additional_kwargs=additional_kwargs)
elif role == "system" or default_class == SystemMessageChunk:
return SystemMessageChunk(content=content)
elif role == "function" or default_class == FunctionMessageChunk:
return FunctionMessageChunk(content=content, name=_dict["name"])
elif role or default_class == ChatMessageChunk:
return ChatMessageChunk(content=content, role=role)
else:
return default_class(content=content)
class ChatTongyi(BaseChatModel):
"""Alibaba Tongyi Qwen chat models API.
To use, you should have the ``dashscope`` python package installed,
and set env ``DASHSCOPE_API_KEY`` with your API key, or pass
it as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain.chat_models import Tongyi
Tongyi_chat = ChatTongyi()
"""
@property
def lc_secrets(self) -> Dict[str, str]:
return {"dashscope_api_key": "DASHSCOPE_API_KEY"}
@property
def lc_serializable(self) -> bool:
return True
client: Any #: :meta private:
model_name: str = Field(default="qwen-turbo", alias="model")
"""Model name to use."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
top_p: float = 0.8
"""Total probability mass of tokens to consider at each step."""
dashscope_api_key: Optional[str] = None
"""Dashscope api key provide by alicloud."""
n: int = 1
"""How many completions to generate for each prompt."""
streaming: bool = False
"""Whether to stream the results or not."""
max_retries: int = 10
"""Maximum number of retries to make when generating."""
prefix_messages: List = Field(default_factory=list)
"""Series of messages for Chat input."""
result_format: str = Field(default="message")
"""Return result format"""
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "tongyi"
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
get_from_dict_or_env(values, "dashscope_api_key", "DASHSCOPE_API_KEY")
try:
import dashscope
except ImportError:
raise ImportError(
"Could not import dashscope python package. "
"Please install it with `pip install dashscope --upgrade`."
)
try:
values["client"] = dashscope.Generation
except AttributeError:
raise ValueError(
"`dashscope` has no `Generation` attribute, this is likely "
"due to an old version of the dashscope package. Try upgrading it "
"with `pip install --upgrade dashscope`."
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling OpenAI API."""
return {
"model": self.model_name,
"top_p": self.top_p,
"stream": self.streaming,
"n": self.n,
"result_format": self.result_format,
**self.model_kwargs,
}
def completion_with_retry(
self, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any
) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(self, run_manager=run_manager)
@retry_decorator
def _completion_with_retry(**_kwargs: Any) -> Any:
resp = self.client.call(**_kwargs)
if resp.status_code == 200:
return resp
elif resp.status_code in [400, 401]:
raise ValueError(
f"status_code: {resp.status_code} \n "
f"code: {resp.code} \n message: {resp.message}"
)
else:
raise HTTPError(
f"HTTP error occurred: status_code: {resp.status_code} \n "
f"code: {resp.code} \n message: {resp.message}",
response=resp,
)
return _completion_with_retry(**kwargs)
def stream_completion_with_retry(
self, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any
) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(self, run_manager=run_manager)
@retry_decorator
def _stream_completion_with_retry(**_kwargs: Any) -> Any:
return self.client.call(**_kwargs)
return _stream_completion_with_retry(**kwargs)
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
stream: Optional[bool] = None,
**kwargs: Any,
) -> ChatResult:
should_stream = stream if stream is not None else self.streaming
if should_stream:
stream_iter = self._stream(
messages, stop=stop, run_manager=run_manager, **kwargs
)
return _generate_from_stream(stream_iter)
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs}
response = self.completion_with_retry(
messages=message_dicts, run_manager=run_manager, **params
)
return self._create_chat_result(response)
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs, "stream": True}
# Mark current chunk total length
length = 0
default_chunk_class = AIMessageChunk
for chunk in self.stream_completion_with_retry(
messages=message_dicts, run_manager=run_manager, **params
):
if len(chunk["output"]["choices"]) == 0:
continue
choice = chunk["output"]["choices"][0]
chunk = _convert_delta_to_message_chunk(
choice["message"], default_chunk_class, length
)
finish_reason = choice.get("finish_reason")
generation_info = (
dict(finish_reason=finish_reason) if finish_reason is not None else None
)
default_chunk_class = chunk.__class__
yield ChatGenerationChunk(message=chunk, generation_info=generation_info)
if run_manager:
run_manager.on_llm_new_token(chunk.content, chunk=chunk)
length = len(choice["message"]["content"])
def _create_message_dicts(
self, messages: List[BaseMessage], stop: Optional[List[str]]
) -> Tuple[List[Dict[str, Any]], Dict[str, Any]]:
params = self._client_params()
# Ensure `stop` is a list of strings
if stop is not None:
if "stop" in params:
raise ValueError("`stop` found in both the input and default params.")
params["stop"] = stop
message_dicts = [convert_message_to_dict(m) for m in messages]
return message_dicts, params
def _client_params(self) -> Dict[str, Any]:
"""Get the parameters used for the openai client."""
creds: Dict[str, Any] = {
"dashscope_api_key": self.dashscope_api_key,
}
return {**self._default_params, **creds}
def _create_chat_result(self, response: Mapping[str, Any]) -> ChatResult:
generations = []
for res in response["output"]["choices"]:
message = convert_dict_to_message(res["message"])
gen = ChatGeneration(
message=message,
generation_info=dict(finish_reason=res.get("finish_reason")),
)
generations.append(gen)
token_usage = response.get("usage", {})
llm_output = {"token_usage": token_usage, "model_name": self.model_name}
return ChatResult(generations=generations, llm_output=llm_output)
| [
"content"
] |
2024-01-10 | mwitiderrick/langchain | libs~langchain~langchain~chains~graph_qa~neptune_cypher.py | from __future__ import annotations
import re
from typing import Any, Dict, List, Optional
from langchain.base_language import BaseLanguageModel
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.chains.base import Chain
from langchain.chains.graph_qa.prompts import (
CYPHER_QA_PROMPT,
NEPTUNE_OPENCYPHER_GENERATION_PROMPT,
NEPTUNE_OPENCYPHER_GENERATION_SIMPLE_PROMPT,
)
from langchain.chains.llm import LLMChain
from langchain.chains.prompt_selector import ConditionalPromptSelector
from langchain.graphs import NeptuneGraph
from langchain.prompts.base import BasePromptTemplate
from langchain.pydantic_v1 import Field
INTERMEDIATE_STEPS_KEY = "intermediate_steps"
def trim_query(query: str) -> str:
"""Trim the query to only include Cypher keywords."""
keywords = (
"CALL",
"CREATE",
"DELETE",
"DETACH",
"LIMIT",
"MATCH",
"MERGE",
"OPTIONAL",
"ORDER",
"REMOVE",
"RETURN",
"SET",
"SKIP",
"UNWIND",
"WITH",
"WHERE",
"//",
)
lines = query.split("\n")
new_query = ""
for line in lines:
if line.strip().upper().startswith(keywords):
new_query += line + "\n"
return new_query
def extract_cypher(text: str) -> str:
"""Extract Cypher code from text using Regex."""
# The pattern to find Cypher code enclosed in triple backticks
pattern = r"```(.*?)```"
# Find all matches in the input text
matches = re.findall(pattern, text, re.DOTALL)
return matches[0] if matches else text
def use_simple_prompt(llm: BaseLanguageModel) -> bool:
"""Decides whether to use the simple prompt"""
if llm._llm_type and "anthropic" in llm._llm_type: # type: ignore
return True
# Bedrock anthropic
if hasattr(llm, "model_id") and "anthropic" in llm.model_id: # type: ignore
return True
return False
PROMPT_SELECTOR = ConditionalPromptSelector(
default_prompt=NEPTUNE_OPENCYPHER_GENERATION_PROMPT,
conditionals=[(use_simple_prompt, NEPTUNE_OPENCYPHER_GENERATION_SIMPLE_PROMPT)],
)
class NeptuneOpenCypherQAChain(Chain):
"""Chain for question-answering against a Neptune graph
by generating openCypher statements.
Example:
.. code-block:: python
chain = NeptuneOpenCypherQAChain.from_llm(
llm=llm,
graph=graph
)
response = chain.run(query)
"""
graph: NeptuneGraph = Field(exclude=True)
cypher_generation_chain: LLMChain
qa_chain: LLMChain
input_key: str = "query" #: :meta private:
output_key: str = "result" #: :meta private:
top_k: int = 10
return_intermediate_steps: bool = False
"""Whether or not to return the intermediate steps along with the final answer."""
return_direct: bool = False
"""Whether or not to return the result of querying the graph directly."""
@property
def input_keys(self) -> List[str]:
"""Return the input keys.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Return the output keys.
:meta private:
"""
_output_keys = [self.output_key]
return _output_keys
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
*,
qa_prompt: BasePromptTemplate = CYPHER_QA_PROMPT,
cypher_prompt: Optional[BasePromptTemplate] = None,
**kwargs: Any,
) -> NeptuneOpenCypherQAChain:
"""Initialize from LLM."""
qa_chain = LLMChain(llm=llm, prompt=qa_prompt)
_cypher_prompt = cypher_prompt or PROMPT_SELECTOR.get_prompt(llm)
cypher_generation_chain = LLMChain(llm=llm, prompt=_cypher_prompt)
return cls(
qa_chain=qa_chain,
cypher_generation_chain=cypher_generation_chain,
**kwargs,
)
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
"""Generate Cypher statement, use it to look up in db and answer question."""
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
callbacks = _run_manager.get_child()
question = inputs[self.input_key]
intermediate_steps: List = []
generated_cypher = self.cypher_generation_chain.run(
{"question": question, "schema": self.graph.get_schema}, callbacks=callbacks
)
# Extract Cypher code if it is wrapped in backticks
generated_cypher = extract_cypher(generated_cypher)
generated_cypher = trim_query(generated_cypher)
_run_manager.on_text("Generated Cypher:", end="\n", verbose=self.verbose)
_run_manager.on_text(
generated_cypher, color="green", end="\n", verbose=self.verbose
)
intermediate_steps.append({"query": generated_cypher})
context = self.graph.query(generated_cypher)
if self.return_direct:
final_result = context
else:
_run_manager.on_text("Full Context:", end="\n", verbose=self.verbose)
_run_manager.on_text(
str(context), color="green", end="\n", verbose=self.verbose
)
intermediate_steps.append({"context": context})
result = self.qa_chain(
{"question": question, "context": context},
callbacks=callbacks,
)
final_result = result[self.qa_chain.output_key]
chain_result: Dict[str, Any] = {self.output_key: final_result}
if self.return_intermediate_steps:
chain_result[INTERMEDIATE_STEPS_KEY] = intermediate_steps
return chain_result
| [] |
2024-01-10 | mwitiderrick/langchain | libs~langchain~langchain~vectorstores~matching_engine.py | from __future__ import annotations
import json
import logging
import time
import uuid
from typing import TYPE_CHECKING, Any, Iterable, List, Optional, Type
from langchain.schema.document import Document
from langchain.schema.embeddings import Embeddings
from langchain.schema.vectorstore import VectorStore
if TYPE_CHECKING:
from google.cloud import storage
from google.cloud.aiplatform import MatchingEngineIndex, MatchingEngineIndexEndpoint
from google.oauth2.service_account import Credentials
from langchain.embeddings import TensorflowHubEmbeddings
logger = logging.getLogger()
class MatchingEngine(VectorStore):
"""`Google Vertex AI Matching Engine` vector store.
While the embeddings are stored in the Matching Engine, the embedded
documents will be stored in GCS.
An existing Index and corresponding Endpoint are preconditions for
using this module.
See usage in docs/modules/indexes/vectorstores/examples/matchingengine.ipynb
Note that this implementation is mostly meant for reading if you are
planning to do a real time implementation. While reading is a real time
operation, updating the index takes close to one hour."""
def __init__(
self,
project_id: str,
index: MatchingEngineIndex,
endpoint: MatchingEngineIndexEndpoint,
embedding: Embeddings,
gcs_client: storage.Client,
gcs_bucket_name: str,
credentials: Optional[Credentials] = None,
):
"""Vertex Matching Engine implementation of the vector store.
While the embeddings are stored in the Matching Engine, the embedded
documents will be stored in GCS.
An existing Index and corresponding Endpoint are preconditions for
using this module.
See usage in
docs/modules/indexes/vectorstores/examples/matchingengine.ipynb.
Note that this implementation is mostly meant for reading if you are
planning to do a real time implementation. While reading is a real time
operation, updating the index takes close to one hour.
Attributes:
project_id: The GCS project id.
index: The created index class. See
~:func:`MatchingEngine.from_components`.
endpoint: The created endpoint class. See
~:func:`MatchingEngine.from_components`.
embedding: A :class:`Embeddings` that will be used for
embedding the text sent. If none is sent, then the
multilingual Tensorflow Universal Sentence Encoder will be used.
gcs_client: The GCS client.
gcs_bucket_name: The GCS bucket name.
credentials (Optional): Created GCP credentials.
"""
super().__init__()
self._validate_google_libraries_installation()
self.project_id = project_id
self.index = index
self.endpoint = endpoint
self.embedding = embedding
self.gcs_client = gcs_client
self.credentials = credentials
self.gcs_bucket_name = gcs_bucket_name
@property
def embeddings(self) -> Embeddings:
return self.embedding
def _validate_google_libraries_installation(self) -> None:
"""Validates that Google libraries that are needed are installed."""
try:
from google.cloud import aiplatform, storage # noqa: F401
from google.oauth2 import service_account # noqa: F401
except ImportError:
raise ImportError(
"You must run `pip install --upgrade "
"google-cloud-aiplatform google-cloud-storage`"
"to use the MatchingEngine Vectorstore."
)
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
kwargs: vectorstore specific parameters.
Returns:
List of ids from adding the texts into the vectorstore.
"""
texts = list(texts)
if metadatas is not None and len(texts) != len(metadatas):
raise ValueError(
"texts and metadatas do not have the same length. Received "
f"{len(texts)} texts and {len(metadatas)} metadatas."
)
logger.debug("Embedding documents.")
embeddings = self.embedding.embed_documents(texts)
jsons = []
ids = []
# Could be improved with async.
for idx, (embedding, text) in enumerate(zip(embeddings, texts)):
id = str(uuid.uuid4())
ids.append(id)
json_: dict = {"id": id, "embedding": embedding}
if metadatas is not None:
json_["metadata"] = metadatas[idx]
jsons.append(json)
self._upload_to_gcs(text, f"documents/{id}")
logger.debug(f"Uploaded {len(ids)} documents to GCS.")
# Creating json lines from the embedded documents.
result_str = "\n".join([json.dumps(x) for x in jsons])
filename_prefix = f"indexes/{uuid.uuid4()}"
filename = f"{filename_prefix}/{time.time()}.json"
self._upload_to_gcs(result_str, filename)
logger.debug(
f"Uploaded updated json with embeddings to "
f"{self.gcs_bucket_name}/{filename}."
)
self.index = self.index.update_embeddings(
contents_delta_uri=f"gs://{self.gcs_bucket_name}/{filename_prefix}/"
)
logger.debug("Updated index with new configuration.")
return ids
def _upload_to_gcs(self, data: str, gcs_location: str) -> None:
"""Uploads data to gcs_location.
Args:
data: The data that will be stored.
gcs_location: The location where the data will be stored.
"""
bucket = self.gcs_client.get_bucket(self.gcs_bucket_name)
blob = bucket.blob(gcs_location)
blob.upload_from_string(data)
def similarity_search(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
"""Return docs most similar to query.
Args:
query: The string that will be used to search for similar documents.
k: The amount of neighbors that will be retrieved.
Returns:
A list of k matching documents.
"""
logger.debug(f"Embedding query {query}.")
embedding_query = self.embedding.embed_documents([query])
# If the endpoint is public we use the find_neighbors function.
if self.endpoint._public_match_client:
response = self.endpoint.find_neighbors(
deployed_index_id=self._get_index_id(),
queries=embedding_query,
num_neighbors=k,
)
else:
response = self.endpoint.match(
deployed_index_id=self._get_index_id(),
queries=embedding_query,
num_neighbors=k,
)
if len(response) == 0:
return []
logger.debug(f"Found {len(response)} matches for the query {query}.")
results = []
# I'm only getting the first one because queries receives an array
# and the similarity_search method only receives one query. This
# means that the match method will always return an array with only
# one element.
for doc in response[0]:
page_content = self._download_from_gcs(f"documents/{doc.id}")
results.append(Document(page_content=page_content))
logger.debug("Downloaded documents for query.")
return results
def _get_index_id(self) -> str:
"""Gets the correct index id for the endpoint.
Returns:
The index id if found (which should be found) or throws
ValueError otherwise.
"""
for index in self.endpoint.deployed_indexes:
if index.index == self.index.resource_name:
return index.id
raise ValueError(
f"No index with id {self.index.resource_name} "
f"deployed on endpoint "
f"{self.endpoint.display_name}."
)
def _download_from_gcs(self, gcs_location: str) -> str:
"""Downloads from GCS in text format.
Args:
gcs_location: The location where the file is located.
Returns:
The string contents of the file.
"""
bucket = self.gcs_client.get_bucket(self.gcs_bucket_name)
blob = bucket.blob(gcs_location)
return blob.download_as_string()
@classmethod
def from_texts(
cls: Type["MatchingEngine"],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> "MatchingEngine":
"""Use from components instead."""
raise NotImplementedError(
"This method is not implemented. Instead, you should initialize the class"
" with `MatchingEngine.from_components(...)` and then call "
"`add_texts`"
)
@classmethod
def from_components(
cls: Type["MatchingEngine"],
project_id: str,
region: str,
gcs_bucket_name: str,
index_id: str,
endpoint_id: str,
credentials_path: Optional[str] = None,
embedding: Optional[Embeddings] = None,
) -> "MatchingEngine":
"""Takes the object creation out of the constructor.
Args:
project_id: The GCP project id.
region: The default location making the API calls. It must have
the same location as the GCS bucket and must be regional.
gcs_bucket_name: The location where the vectors will be stored in
order for the index to be created.
index_id: The id of the created index.
endpoint_id: The id of the created endpoint.
credentials_path: (Optional) The path of the Google credentials on
the local file system.
embedding: The :class:`Embeddings` that will be used for
embedding the texts.
Returns:
A configured MatchingEngine with the texts added to the index.
"""
gcs_bucket_name = cls._validate_gcs_bucket(gcs_bucket_name)
credentials = cls._create_credentials_from_file(credentials_path)
index = cls._create_index_by_id(index_id, project_id, region, credentials)
endpoint = cls._create_endpoint_by_id(
endpoint_id, project_id, region, credentials
)
gcs_client = cls._get_gcs_client(credentials, project_id)
cls._init_aiplatform(project_id, region, gcs_bucket_name, credentials)
return cls(
project_id=project_id,
index=index,
endpoint=endpoint,
embedding=embedding or cls._get_default_embeddings(),
gcs_client=gcs_client,
credentials=credentials,
gcs_bucket_name=gcs_bucket_name,
)
@classmethod
def _validate_gcs_bucket(cls, gcs_bucket_name: str) -> str:
"""Validates the gcs_bucket_name as a bucket name.
Args:
gcs_bucket_name: The received bucket uri.
Returns:
A valid gcs_bucket_name or throws ValueError if full path is
provided.
"""
gcs_bucket_name = gcs_bucket_name.replace("gs://", "")
if "/" in gcs_bucket_name:
raise ValueError(
f"The argument gcs_bucket_name should only be "
f"the bucket name. Received {gcs_bucket_name}"
)
return gcs_bucket_name
@classmethod
def _create_credentials_from_file(
cls, json_credentials_path: Optional[str]
) -> Optional[Credentials]:
"""Creates credentials for GCP.
Args:
json_credentials_path: The path on the file system where the
credentials are stored.
Returns:
An optional of Credentials or None, in which case the default
will be used.
"""
from google.oauth2 import service_account
credentials = None
if json_credentials_path is not None:
credentials = service_account.Credentials.from_service_account_file(
json_credentials_path
)
return credentials
@classmethod
def _create_index_by_id(
cls, index_id: str, project_id: str, region: str, credentials: "Credentials"
) -> MatchingEngineIndex:
"""Creates a MatchingEngineIndex object by id.
Args:
index_id: The created index id.
project_id: The project to retrieve index from.
region: Location to retrieve index from.
credentials: GCS credentials.
Returns:
A configured MatchingEngineIndex.
"""
from google.cloud import aiplatform
logger.debug(f"Creating matching engine index with id {index_id}.")
return aiplatform.MatchingEngineIndex(
index_name=index_id,
project=project_id,
location=region,
credentials=credentials,
)
@classmethod
def _create_endpoint_by_id(
cls, endpoint_id: str, project_id: str, region: str, credentials: "Credentials"
) -> MatchingEngineIndexEndpoint:
"""Creates a MatchingEngineIndexEndpoint object by id.
Args:
endpoint_id: The created endpoint id.
project_id: The project to retrieve index from.
region: Location to retrieve index from.
credentials: GCS credentials.
Returns:
A configured MatchingEngineIndexEndpoint.
"""
from google.cloud import aiplatform
logger.debug(f"Creating endpoint with id {endpoint_id}.")
return aiplatform.MatchingEngineIndexEndpoint(
index_endpoint_name=endpoint_id,
project=project_id,
location=region,
credentials=credentials,
)
@classmethod
def _get_gcs_client(
cls, credentials: "Credentials", project_id: str
) -> "storage.Client":
"""Lazily creates a GCS client.
Returns:
A configured GCS client.
"""
from google.cloud import storage
return storage.Client(credentials=credentials, project=project_id)
@classmethod
def _init_aiplatform(
cls,
project_id: str,
region: str,
gcs_bucket_name: str,
credentials: "Credentials",
) -> None:
"""Configures the aiplatform library.
Args:
project_id: The GCP project id.
region: The default location making the API calls. It must have
the same location as the GCS bucket and must be regional.
gcs_bucket_name: GCS staging location.
credentials: The GCS Credentials object.
"""
from google.cloud import aiplatform
logger.debug(
f"Initializing AI Platform for project {project_id} on "
f"{region} and for {gcs_bucket_name}."
)
aiplatform.init(
project=project_id,
location=region,
staging_bucket=gcs_bucket_name,
credentials=credentials,
)
@classmethod
def _get_default_embeddings(cls) -> "TensorflowHubEmbeddings":
"""This function returns the default embedding.
Returns:
Default TensorflowHubEmbeddings to use.
"""
from langchain.embeddings import TensorflowHubEmbeddings
return TensorflowHubEmbeddings()
| [] |
2024-01-10 | mwitiderrick/langchain | libs~langchain~langchain~vectorstores~elasticsearch.py | import logging
import uuid
from abc import ABC, abstractmethod
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterable,
List,
Literal,
Optional,
Tuple,
Union,
)
import numpy as np
from langchain.docstore.document import Document
from langchain.schema.embeddings import Embeddings
from langchain.schema.vectorstore import VectorStore
from langchain.vectorstores.utils import DistanceStrategy, maximal_marginal_relevance
if TYPE_CHECKING:
from elasticsearch import Elasticsearch
logger = logging.getLogger(__name__)
class BaseRetrievalStrategy(ABC):
"""Base class for `Elasticsearch` retrieval strategies."""
@abstractmethod
def query(
self,
query_vector: Union[List[float], None],
query: Union[str, None],
*,
k: int,
fetch_k: int,
vector_query_field: str,
text_field: str,
filter: List[dict],
similarity: Union[DistanceStrategy, None],
) -> Dict:
"""
Executes when a search is performed on the store.
Args:
query_vector: The query vector,
or None if not using vector-based query.
query: The text query, or None if not using text-based query.
k: The total number of results to retrieve.
fetch_k: The number of results to fetch initially.
vector_query_field: The field containing the vector
representations in the index.
text_field: The field containing the text data in the index.
filter: List of filter clauses to apply to the query.
similarity: The similarity strategy to use, or None if not using one.
Returns:
Dict: The Elasticsearch query body.
"""
@abstractmethod
def index(
self,
dims_length: Union[int, None],
vector_query_field: str,
similarity: Union[DistanceStrategy, None],
) -> Dict:
"""
Executes when the index is created.
Args:
dims_length: Numeric length of the embedding vectors,
or None if not using vector-based query.
vector_query_field: The field containing the vector
representations in the index.
similarity: The similarity strategy to use,
or None if not using one.
Returns:
Dict: The Elasticsearch settings and mappings for the strategy.
"""
def before_index_setup(
self, client: "Elasticsearch", text_field: str, vector_query_field: str
) -> None:
"""
Executes before the index is created. Used for setting up
any required Elasticsearch resources like a pipeline.
Args:
client: The Elasticsearch client.
text_field: The field containing the text data in the index.
vector_query_field: The field containing the vector
representations in the index.
"""
def require_inference(self) -> bool:
"""
Returns whether or not the strategy requires inference
to be performed on the text before it is added to the index.
Returns:
bool: Whether or not the strategy requires inference
to be performed on the text before it is added to the index.
"""
return True
class ApproxRetrievalStrategy(BaseRetrievalStrategy):
"""Approximate retrieval strategy using the `HNSW` algorithm."""
def __init__(
self,
query_model_id: Optional[str] = None,
hybrid: Optional[bool] = False,
):
self.query_model_id = query_model_id
self.hybrid = hybrid
def query(
self,
query_vector: Union[List[float], None],
query: Union[str, None],
k: int,
fetch_k: int,
vector_query_field: str,
text_field: str,
filter: List[dict],
similarity: Union[DistanceStrategy, None],
) -> Dict:
knn = {
"filter": filter,
"field": vector_query_field,
"k": k,
"num_candidates": fetch_k,
}
# Embedding provided via the embedding function
if query_vector and not self.query_model_id:
knn["query_vector"] = query_vector
# Case 2: Used when model has been deployed to
# Elasticsearch and can infer the query vector from the query text
elif query and self.query_model_id:
knn["query_vector_builder"] = {
"text_embedding": {
"model_id": self.query_model_id, # use 'model_id' argument
"model_text": query, # use 'query' argument
}
}
else:
raise ValueError(
"You must provide an embedding function or a"
" query_model_id to perform a similarity search."
)
# If hybrid, add a query to the knn query
# RRF is used to even the score from the knn query and text query
if self.hybrid:
return {
"knn": knn,
"query": {
"bool": {
"must": [
{
"match": {
text_field: {
"query": query,
}
}
}
],
"filter": filter,
}
},
"rank": {"rrf": {}},
}
else:
return {"knn": knn}
def index(
self,
dims_length: Union[int, None],
vector_query_field: str,
similarity: Union[DistanceStrategy, None],
) -> Dict:
"""Create the mapping for the Elasticsearch index."""
if similarity is DistanceStrategy.COSINE:
similarityAlgo = "cosine"
elif similarity is DistanceStrategy.EUCLIDEAN_DISTANCE:
similarityAlgo = "l2_norm"
elif similarity is DistanceStrategy.DOT_PRODUCT:
similarityAlgo = "dot_product"
else:
raise ValueError(f"Similarity {similarity} not supported.")
return {
"mappings": {
"properties": {
vector_query_field: {
"type": "dense_vector",
"dims": dims_length,
"index": True,
"similarity": similarityAlgo,
},
}
}
}
class ExactRetrievalStrategy(BaseRetrievalStrategy):
"""Exact retrieval strategy using the `script_score` query."""
def query(
self,
query_vector: Union[List[float], None],
query: Union[str, None],
k: int,
fetch_k: int,
vector_query_field: str,
text_field: str,
filter: Union[List[dict], None],
similarity: Union[DistanceStrategy, None],
) -> Dict:
if similarity is DistanceStrategy.COSINE:
similarityAlgo = (
f"cosineSimilarity(params.query_vector, '{vector_query_field}') + 1.0"
)
elif similarity is DistanceStrategy.EUCLIDEAN_DISTANCE:
similarityAlgo = (
f"1 / (1 + l2norm(params.query_vector, '{vector_query_field}'))"
)
elif similarity is DistanceStrategy.DOT_PRODUCT:
similarityAlgo = f"""
double value = dotProduct(params.query_vector, '{vector_query_field}');
return sigmoid(1, Math.E, -value);
"""
else:
raise ValueError(f"Similarity {similarity} not supported.")
queryBool: Dict = {"match_all": {}}
if filter:
queryBool = {"bool": {"filter": filter}}
return {
"query": {
"script_score": {
"query": queryBool,
"script": {
"source": similarityAlgo,
"params": {"query_vector": query_vector},
},
},
}
}
def index(
self,
dims_length: Union[int, None],
vector_query_field: str,
similarity: Union[DistanceStrategy, None],
) -> Dict:
"""Create the mapping for the Elasticsearch index."""
return {
"mappings": {
"properties": {
vector_query_field: {
"type": "dense_vector",
"dims": dims_length,
"index": False,
},
}
}
}
class SparseRetrievalStrategy(BaseRetrievalStrategy):
"""Sparse retrieval strategy using the `text_expansion` processor."""
def __init__(self, model_id: Optional[str] = None):
self.model_id = model_id or ".elser_model_1"
def query(
self,
query_vector: Union[List[float], None],
query: Union[str, None],
k: int,
fetch_k: int,
vector_query_field: str,
text_field: str,
filter: List[dict],
similarity: Union[DistanceStrategy, None],
) -> Dict:
return {
"query": {
"bool": {
"must": [
{
"text_expansion": {
f"{vector_query_field}.tokens": {
"model_id": self.model_id,
"model_text": query,
}
}
}
],
"filter": filter,
}
}
}
def _get_pipeline_name(self) -> str:
return f"{self.model_id}_sparse_embedding"
def before_index_setup(
self, client: "Elasticsearch", text_field: str, vector_query_field: str
) -> None:
# If model_id is provided, create a pipeline for the model
if self.model_id:
client.ingest.put_pipeline(
id=self._get_pipeline_name(),
description="Embedding pipeline for langchain vectorstore",
processors=[
{
"inference": {
"model_id": self.model_id,
"target_field": vector_query_field,
"field_map": {text_field: "text_field"},
"inference_config": {
"text_expansion": {"results_field": "tokens"}
},
}
}
],
)
def index(
self,
dims_length: Union[int, None],
vector_query_field: str,
similarity: Union[DistanceStrategy, None],
) -> Dict:
return {
"mappings": {
"properties": {
vector_query_field: {
"properties": {"tokens": {"type": "rank_features"}}
}
}
},
"settings": {"default_pipeline": self._get_pipeline_name()},
}
def require_inference(self) -> bool:
return False
class ElasticsearchStore(VectorStore):
"""`Elasticsearch` vector store.
Example:
.. code-block:: python
from langchain.vectorstores import ElasticsearchStore
from langchain.embeddings.openai import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
vectorstore = ElasticsearchStore(
embedding=OpenAIEmbeddings(),
index_name="langchain-demo",
es_url="http://localhost:9200"
)
Args:
index_name: Name of the Elasticsearch index to create.
es_url: URL of the Elasticsearch instance to connect to.
cloud_id: Cloud ID of the Elasticsearch instance to connect to.
es_user: Username to use when connecting to Elasticsearch.
es_password: Password to use when connecting to Elasticsearch.
es_api_key: API key to use when connecting to Elasticsearch.
es_connection: Optional pre-existing Elasticsearch connection.
vector_query_field: Optional. Name of the field to store
the embedding vectors in.
query_field: Optional. Name of the field to store the texts in.
strategy: Optional. Retrieval strategy to use when searching the index.
Defaults to ApproxRetrievalStrategy. Can be one of
ExactRetrievalStrategy, ApproxRetrievalStrategy,
or SparseRetrievalStrategy.
distance_strategy: Optional. Distance strategy to use when
searching the index.
Defaults to COSINE. Can be one of COSINE,
EUCLIDEAN_DISTANCE, or DOT_PRODUCT.
If you want to use a cloud hosted Elasticsearch instance, you can pass in the
cloud_id argument instead of the es_url argument.
Example:
.. code-block:: python
from langchain.vectorstores import ElasticsearchStore
from langchain.embeddings.openai import OpenAIEmbeddings
vectorstore = ElasticsearchStore(
embedding=OpenAIEmbeddings(),
index_name="langchain-demo",
es_cloud_id="<cloud_id>"
es_user="elastic",
es_password="<password>"
)
You can also connect to an existing Elasticsearch instance by passing in a
pre-existing Elasticsearch connection via the es_connection argument.
Example:
.. code-block:: python
from langchain.vectorstores import ElasticsearchStore
from langchain.embeddings.openai import OpenAIEmbeddings
from elasticsearch import Elasticsearch
es_connection = Elasticsearch("http://localhost:9200")
vectorstore = ElasticsearchStore(
embedding=OpenAIEmbeddings(),
index_name="langchain-demo",
es_connection=es_connection
)
ElasticsearchStore by default uses the ApproxRetrievalStrategy, which uses the
HNSW algorithm to perform approximate nearest neighbor search. This is the
fastest and most memory efficient algorithm.
If you want to use the Brute force / Exact strategy for searching vectors, you
can pass in the ExactRetrievalStrategy to the ElasticsearchStore constructor.
Example:
.. code-block:: python
from langchain.vectorstores import ElasticsearchStore
from langchain.embeddings.openai import OpenAIEmbeddings
vectorstore = ElasticsearchStore(
embedding=OpenAIEmbeddings(),
index_name="langchain-demo",
es_url="http://localhost:9200",
strategy=ElasticsearchStore.ExactRetrievalStrategy()
)
Both strategies require that you know the similarity metric you want to use
when creating the index. The default is cosine similarity, but you can also
use dot product or euclidean distance.
Example:
.. code-block:: python
from langchain.vectorstores import ElasticsearchStore
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores.utils import DistanceStrategy
vectorstore = ElasticsearchStore(
embedding=OpenAIEmbeddings(),
index_name="langchain-demo",
es_url="http://localhost:9200",
distance_strategy="DOT_PRODUCT"
)
"""
def __init__(
self,
index_name: str,
*,
embedding: Optional[Embeddings] = None,
es_connection: Optional["Elasticsearch"] = None,
es_url: Optional[str] = None,
es_cloud_id: Optional[str] = None,
es_user: Optional[str] = None,
es_api_key: Optional[str] = None,
es_password: Optional[str] = None,
vector_query_field: str = "vector",
query_field: str = "text",
distance_strategy: Optional[
Literal[
DistanceStrategy.COSINE,
DistanceStrategy.DOT_PRODUCT,
DistanceStrategy.EUCLIDEAN_DISTANCE,
]
] = None,
strategy: BaseRetrievalStrategy = ApproxRetrievalStrategy(),
):
self.embedding = embedding
self.index_name = index_name
self.query_field = query_field
self.vector_query_field = vector_query_field
self.distance_strategy = (
DistanceStrategy.COSINE
if distance_strategy is None
else DistanceStrategy[distance_strategy]
)
self.strategy = strategy
if es_connection is not None:
self.client = es_connection.options(
headers={"user-agent": self.get_user_agent()}
)
elif es_url is not None or es_cloud_id is not None:
self.client = ElasticsearchStore.connect_to_elasticsearch(
es_url=es_url,
username=es_user,
password=es_password,
cloud_id=es_cloud_id,
api_key=es_api_key,
)
else:
raise ValueError(
"""Either provide a pre-existing Elasticsearch connection, \
or valid credentials for creating a new connection."""
)
@staticmethod
def get_user_agent() -> str:
from langchain import __version__
return f"langchain-py-vs/{__version__}"
@staticmethod
def connect_to_elasticsearch(
*,
es_url: Optional[str] = None,
cloud_id: Optional[str] = None,
api_key: Optional[str] = None,
username: Optional[str] = None,
password: Optional[str] = None,
) -> "Elasticsearch":
try:
import elasticsearch
except ImportError:
raise ImportError(
"Could not import elasticsearch python package. "
"Please install it with `pip install elasticsearch`."
)
if es_url and cloud_id:
raise ValueError(
"Both es_url and cloud_id are defined. Please provide only one."
)
connection_params: Dict[str, Any] = {}
if es_url:
connection_params["hosts"] = [es_url]
elif cloud_id:
connection_params["cloud_id"] = cloud_id
else:
raise ValueError("Please provide either elasticsearch_url or cloud_id.")
if api_key:
connection_params["api_key"] = api_key
elif username and password:
connection_params["basic_auth"] = (username, password)
es_client = elasticsearch.Elasticsearch(
**connection_params,
headers={"user-agent": ElasticsearchStore.get_user_agent()},
)
try:
es_client.info()
except Exception as e:
logger.error(f"Error connecting to Elasticsearch: {e}")
raise e
return es_client
@property
def embeddings(self) -> Optional[Embeddings]:
return self.embedding
def similarity_search(
self,
query: str,
k: int = 4,
filter: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return Elasticsearch documents most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Array of Elasticsearch filter clauses to apply to the query.
Returns:
List of Documents most similar to the query,
in descending order of similarity.
"""
results = self._search(query=query, k=k, filter=filter, **kwargs)
return [doc for doc, _ in results]
def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
fields: Optional[List[str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query (str): Text to look up documents similar to.
k (int): Number of Documents to return. Defaults to 4.
fetch_k (int): Number of Documents to fetch to pass to MMR algorithm.
lambda_mult (float): Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
fields: Other fields to get from elasticsearch source. These fields
will be added to the document metadata.
Returns:
List[Document]: A list of Documents selected by maximal marginal relevance.
"""
if self.embedding is None:
raise ValueError("You must provide an embedding function to perform MMR")
remove_vector_query_field_from_metadata = True
if fields is None:
fields = [self.vector_query_field]
elif self.vector_query_field not in fields:
fields.append(self.vector_query_field)
else:
remove_vector_query_field_from_metadata = False
# Embed the query
query_embedding = self.embedding.embed_query(query)
# Fetch the initial documents
got_docs = self._search(
query_vector=query_embedding, k=fetch_k, fields=fields, **kwargs
)
# Get the embeddings for the fetched documents
got_embeddings = [doc.metadata[self.vector_query_field] for doc, _ in got_docs]
# Select documents using maximal marginal relevance
selected_indices = maximal_marginal_relevance(
np.array(query_embedding), got_embeddings, lambda_mult=lambda_mult, k=k
)
selected_docs = [got_docs[i][0] for i in selected_indices]
if remove_vector_query_field_from_metadata:
for doc in selected_docs:
del doc.metadata["vector"]
return selected_docs
def similarity_search_with_score(
self, query: str, k: int = 4, filter: Optional[List[dict]] = None, **kwargs: Any
) -> List[Tuple[Document, float]]:
"""Return Elasticsearch documents most similar to query, along with scores.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Array of Elasticsearch filter clauses to apply to the query.
Returns:
List of Documents most similar to the query and score for each
"""
return self._search(query=query, k=k, filter=filter, **kwargs)
def similarity_search_by_vector_with_relevance_scores(
self,
embedding: List[float],
k: int = 4,
filter: Optional[List[Dict]] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return Elasticsearch documents most similar to query, along with scores.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Array of Elasticsearch filter clauses to apply to the query.
Returns:
List of Documents most similar to the embedding and score for each
"""
return self._search(query_vector=embedding, k=k, filter=filter, **kwargs)
def _search(
self,
query: Optional[str] = None,
k: int = 4,
query_vector: Union[List[float], None] = None,
fetch_k: int = 50,
fields: Optional[List[str]] = None,
filter: Optional[List[dict]] = None,
custom_query: Optional[Callable[[Dict, Union[str, None]], Dict]] = None,
) -> List[Tuple[Document, float]]:
"""Return Elasticsearch documents most similar to query, along with scores.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
query_vector: Embedding to look up documents similar to.
fetch_k: Number of candidates to fetch from each shard.
Defaults to 50.
fields: List of fields to return from Elasticsearch.
Defaults to only returning the text field.
filter: Array of Elasticsearch filter clauses to apply to the query.
custom_query: Function to modify the Elasticsearch
query body before it is sent to Elasticsearch.
Returns:
List of Documents most similar to the query and score for each
"""
if fields is None:
fields = []
if "metadata" not in fields:
fields.append("metadata")
if self.query_field not in fields:
fields.append(self.query_field)
if self.embedding and query is not None:
query_vector = self.embedding.embed_query(query)
query_body = self.strategy.query(
query_vector=query_vector,
query=query,
k=k,
fetch_k=fetch_k,
vector_query_field=self.vector_query_field,
text_field=self.query_field,
filter=filter or [],
similarity=self.distance_strategy,
)
logger.debug(f"Query body: {query_body}")
if custom_query is not None:
query_body = custom_query(query_body, query)
logger.debug(f"Calling custom_query, Query body now: {query_body}")
# Perform the kNN search on the Elasticsearch index and return the results.
response = self.client.search(
index=self.index_name,
**query_body,
size=k,
source=fields,
)
docs_and_scores = []
for hit in response["hits"]["hits"]:
for field in fields:
if field in hit["_source"] and field not in [
"metadata",
self.query_field,
]:
hit["_source"]["metadata"][field] = hit["_source"][field]
docs_and_scores.append(
(
Document(
page_content=hit["_source"][self.query_field],
metadata=hit["_source"]["metadata"],
),
hit["_score"],
)
)
return docs_and_scores
def delete(
self,
ids: Optional[List[str]] = None,
refresh_indices: Optional[bool] = True,
**kwargs: Any,
) -> Optional[bool]:
"""Delete documents from the Elasticsearch index.
Args:
ids: List of ids of documents to delete.
refresh_indices: Whether to refresh the index
after deleting documents. Defaults to True.
"""
try:
from elasticsearch.helpers import BulkIndexError, bulk
except ImportError:
raise ImportError(
"Could not import elasticsearch python package. "
"Please install it with `pip install elasticsearch`."
)
body = []
if ids is None:
raise ValueError("ids must be provided.")
for _id in ids:
body.append({"_op_type": "delete", "_index": self.index_name, "_id": _id})
if len(body) > 0:
try:
bulk(self.client, body, refresh=refresh_indices, ignore_status=404)
logger.debug(f"Deleted {len(body)} texts from index")
return True
except BulkIndexError as e:
logger.error(f"Error deleting texts: {e}")
firstError = e.errors[0].get("index", {}).get("error", {})
logger.error(f"First error reason: {firstError.get('reason')}")
raise e
else:
logger.debug("No texts to delete from index")
return False
def _create_index_if_not_exists(
self, index_name: str, dims_length: Optional[int] = None
) -> None:
"""Create the Elasticsearch index if it doesn't already exist.
Args:
index_name: Name of the Elasticsearch index to create.
dims_length: Length of the embedding vectors.
"""
if self.client.indices.exists(index=index_name):
logger.debug(f"Index {index_name} already exists. Skipping creation.")
else:
if dims_length is None and self.strategy.require_inference():
raise ValueError(
"Cannot create index without specifying dims_length "
"when the index doesn't already exist. We infer "
"dims_length from the first embedding. Check that "
"you have provided an embedding function."
)
self.strategy.before_index_setup(
client=self.client,
text_field=self.query_field,
vector_query_field=self.vector_query_field,
)
indexSettings = self.strategy.index(
vector_query_field=self.vector_query_field,
dims_length=dims_length,
similarity=self.distance_strategy,
)
logger.debug(
f"Creating index {index_name} with mappings {indexSettings['mappings']}"
)
self.client.indices.create(index=index_name, **indexSettings)
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[Dict[Any, Any]]] = None,
ids: Optional[List[str]] = None,
refresh_indices: bool = True,
create_index_if_not_exists: bool = True,
bulk_kwargs: Optional[Dict] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids: Optional list of ids to associate with the texts.
refresh_indices: Whether to refresh the Elasticsearch indices
after adding the texts.
create_index_if_not_exists: Whether to create the Elasticsearch
index if it doesn't already exist.
*bulk_kwargs: Additional arguments to pass to Elasticsearch bulk.
- chunk_size: Optional. Number of texts to add to the
index at a time. Defaults to 500.
Returns:
List of ids from adding the texts into the vectorstore.
"""
try:
from elasticsearch.helpers import BulkIndexError, bulk
except ImportError:
raise ImportError(
"Could not import elasticsearch python package. "
"Please install it with `pip install elasticsearch`."
)
bulk_kwargs = bulk_kwargs or {}
embeddings = []
ids = ids or [str(uuid.uuid4()) for _ in texts]
requests = []
if self.embedding is not None:
# If no search_type requires inference, we use the provided
# embedding function to embed the texts.
embeddings = self.embedding.embed_documents(list(texts))
dims_length = len(embeddings[0])
if create_index_if_not_exists:
self._create_index_if_not_exists(
index_name=self.index_name, dims_length=dims_length
)
for i, (text, vector) in enumerate(zip(texts, embeddings)):
metadata = metadatas[i] if metadatas else {}
requests.append(
{
"_op_type": "index",
"_index": self.index_name,
self.query_field: text,
self.vector_query_field: vector,
"metadata": metadata,
"_id": ids[i],
}
)
else:
# the search_type doesn't require inference, so we don't need to
# embed the texts.
if create_index_if_not_exists:
self._create_index_if_not_exists(index_name=self.index_name)
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
requests.append(
{
"_op_type": "index",
"_index": self.index_name,
self.query_field: text,
"metadata": metadata,
"_id": ids[i],
}
)
if len(requests) > 0:
try:
success, failed = bulk(
self.client,
requests,
stats_only=True,
refresh=refresh_indices,
**bulk_kwargs,
)
logger.debug(
f"Added {success} and failed to add {failed} texts to index"
)
logger.debug(f"added texts {ids} to index")
return ids
except BulkIndexError as e:
logger.error(f"Error adding texts: {e}")
firstError = e.errors[0].get("index", {}).get("error", {})
logger.error(f"First error reason: {firstError.get('reason')}")
raise e
else:
logger.debug("No texts to add to index")
return []
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Optional[Embeddings] = None,
metadatas: Optional[List[Dict[str, Any]]] = None,
bulk_kwargs: Optional[Dict] = None,
**kwargs: Any,
) -> "ElasticsearchStore":
"""Construct ElasticsearchStore wrapper from raw documents.
Example:
.. code-block:: python
from langchain.vectorstores import ElasticsearchStore
from langchain.embeddings.openai import OpenAIEmbeddings
db = ElasticsearchStore.from_texts(
texts,
// embeddings optional if using
// a strategy that doesn't require inference
embeddings,
index_name="langchain-demo",
es_url="http://localhost:9200"
)
Args:
texts: List of texts to add to the Elasticsearch index.
embedding: Embedding function to use to embed the texts.
metadatas: Optional list of metadatas associated with the texts.
index_name: Name of the Elasticsearch index to create.
es_url: URL of the Elasticsearch instance to connect to.
cloud_id: Cloud ID of the Elasticsearch instance to connect to.
es_user: Username to use when connecting to Elasticsearch.
es_password: Password to use when connecting to Elasticsearch.
es_api_key: API key to use when connecting to Elasticsearch.
es_connection: Optional pre-existing Elasticsearch connection.
vector_query_field: Optional. Name of the field to
store the embedding vectors in.
query_field: Optional. Name of the field to store the texts in.
distance_strategy: Optional. Name of the distance
strategy to use. Defaults to "COSINE".
can be one of "COSINE",
"EUCLIDEAN_DISTANCE", "DOT_PRODUCT".
bulk_kwargs: Optional. Additional arguments to pass to
Elasticsearch bulk.
"""
elasticsearchStore = ElasticsearchStore._create_cls_from_kwargs(
embedding=embedding, **kwargs
)
# Encode the provided texts and add them to the newly created index.
elasticsearchStore.add_texts(
texts, metadatas=metadatas, bulk_kwargs=bulk_kwargs
)
return elasticsearchStore
@staticmethod
def _create_cls_from_kwargs(
embedding: Optional[Embeddings] = None, **kwargs: Any
) -> "ElasticsearchStore":
index_name = kwargs.get("index_name")
if index_name is None:
raise ValueError("Please provide an index_name.")
es_connection = kwargs.get("es_connection")
es_cloud_id = kwargs.get("es_cloud_id")
es_url = kwargs.get("es_url")
es_user = kwargs.get("es_user")
es_password = kwargs.get("es_password")
es_api_key = kwargs.get("es_api_key")
vector_query_field = kwargs.get("vector_query_field")
query_field = kwargs.get("query_field")
distance_strategy = kwargs.get("distance_strategy")
strategy = kwargs.get("strategy", ElasticsearchStore.ApproxRetrievalStrategy())
optional_args = {}
if vector_query_field is not None:
optional_args["vector_query_field"] = vector_query_field
if query_field is not None:
optional_args["query_field"] = query_field
return ElasticsearchStore(
index_name=index_name,
embedding=embedding,
es_url=es_url,
es_connection=es_connection,
es_cloud_id=es_cloud_id,
es_user=es_user,
es_password=es_password,
es_api_key=es_api_key,
strategy=strategy,
distance_strategy=distance_strategy,
**optional_args,
)
@classmethod
def from_documents(
cls,
documents: List[Document],
embedding: Optional[Embeddings] = None,
bulk_kwargs: Optional[Dict] = None,
**kwargs: Any,
) -> "ElasticsearchStore":
"""Construct ElasticsearchStore wrapper from documents.
Example:
.. code-block:: python
from langchain.vectorstores import ElasticsearchStore
from langchain.embeddings.openai import OpenAIEmbeddings
db = ElasticsearchStore.from_documents(
texts,
embeddings,
index_name="langchain-demo",
es_url="http://localhost:9200"
)
Args:
texts: List of texts to add to the Elasticsearch index.
embedding: Embedding function to use to embed the texts.
Do not provide if using a strategy
that doesn't require inference.
metadatas: Optional list of metadatas associated with the texts.
index_name: Name of the Elasticsearch index to create.
es_url: URL of the Elasticsearch instance to connect to.
cloud_id: Cloud ID of the Elasticsearch instance to connect to.
es_user: Username to use when connecting to Elasticsearch.
es_password: Password to use when connecting to Elasticsearch.
es_api_key: API key to use when connecting to Elasticsearch.
es_connection: Optional pre-existing Elasticsearch connection.
vector_query_field: Optional. Name of the field
to store the embedding vectors in.
query_field: Optional. Name of the field to store the texts in.
bulk_kwargs: Optional. Additional arguments to pass to
Elasticsearch bulk.
"""
elasticsearchStore = ElasticsearchStore._create_cls_from_kwargs(
embedding=embedding, **kwargs
)
# Encode the provided texts and add them to the newly created index.
elasticsearchStore.add_documents(documents, bulk_kwargs=bulk_kwargs)
return elasticsearchStore
@staticmethod
def ExactRetrievalStrategy() -> "ExactRetrievalStrategy":
"""Used to perform brute force / exact
nearest neighbor search via script_score."""
return ExactRetrievalStrategy()
@staticmethod
def ApproxRetrievalStrategy(
query_model_id: Optional[str] = None,
hybrid: Optional[bool] = False,
) -> "ApproxRetrievalStrategy":
"""Used to perform approximate nearest neighbor search
using the HNSW algorithm.
At build index time, this strategy will create a
dense vector field in the index and store the
embedding vectors in the index.
At query time, the text will either be embedded using the
provided embedding function or the query_model_id
will be used to embed the text using the model
deployed to Elasticsearch.
if query_model_id is used, do not provide an embedding function.
Args:
query_model_id: Optional. ID of the model to use to
embed the query text within the stack. Requires
embedding model to be deployed to Elasticsearch.
hybrid: Optional. If True, will perform a hybrid search
using both the knn query and a text query.
Defaults to False.
"""
return ApproxRetrievalStrategy(query_model_id=query_model_id, hybrid=hybrid)
@staticmethod
def SparseVectorRetrievalStrategy(
model_id: Optional[str] = None,
) -> "SparseRetrievalStrategy":
"""Used to perform sparse vector search via text_expansion.
Used for when you want to use ELSER model to perform document search.
At build index time, this strategy will create a pipeline that
will embed the text using the ELSER model and store the
resulting tokens in the index.
At query time, the text will be embedded using the ELSER
model and the resulting tokens will be used to
perform a text_expansion query.
Args:
model_id: Optional. Default is ".elser_model_1".
ID of the model to use to embed the query text
within the stack. Requires embedding model to be
deployed to Elasticsearch.
"""
return SparseRetrievalStrategy(model_id=model_id)
| [] |
2024-01-10 | mwitiderrick/langchain | libs~experimental~langchain_experimental~data_anonymizer~presidio.py | from __future__ import annotations
import json
from pathlib import Path
from typing import TYPE_CHECKING, Callable, Dict, List, Optional, Union
import yaml
from langchain_experimental.data_anonymizer.base import (
DEFAULT_DEANONYMIZER_MATCHING_STRATEGY,
AnonymizerBase,
ReversibleAnonymizerBase,
)
from langchain_experimental.data_anonymizer.deanonymizer_mapping import (
DeanonymizerMapping,
MappingDataType,
create_anonymizer_mapping,
)
from langchain_experimental.data_anonymizer.deanonymizer_matching_strategies import (
exact_matching_strategy,
)
from langchain_experimental.data_anonymizer.faker_presidio_mapping import (
get_pseudoanonymizer_mapping,
)
try:
from presidio_analyzer import AnalyzerEngine
from presidio_analyzer.nlp_engine import NlpEngineProvider
except ImportError as e:
raise ImportError(
"Could not import presidio_analyzer, please install with "
"`pip install presidio-analyzer`. You will also need to download a "
"spaCy model to use the analyzer, e.g. "
"`python -m spacy download en_core_web_lg`."
) from e
try:
from presidio_anonymizer import AnonymizerEngine
from presidio_anonymizer.entities import OperatorConfig
except ImportError as e:
raise ImportError(
"Could not import presidio_anonymizer, please install with "
"`pip install presidio-anonymizer`."
) from e
if TYPE_CHECKING:
from presidio_analyzer import EntityRecognizer
# Configuring Anonymizer for multiple languages
# Detailed description and examples can be found here:
# langchain/docs/extras/guides/privacy/multi_language_anonymization.ipynb
DEFAULT_LANGUAGES_CONFIG = {
# You can also use Stanza or transformers library.
# See https://microsoft.github.io/presidio/analyzer/customizing_nlp_models/
"nlp_engine_name": "spacy",
"models": [
{"lang_code": "en", "model_name": "en_core_web_lg"},
# {"lang_code": "de", "model_name": "de_core_news_md"},
# {"lang_code": "es", "model_name": "es_core_news_md"},
# ...
# List of available models: https://spacy.io/usage/models
],
}
class PresidioAnonymizerBase(AnonymizerBase):
def __init__(
self,
analyzed_fields: Optional[List[str]] = None,
operators: Optional[Dict[str, OperatorConfig]] = None,
languages_config: Dict = DEFAULT_LANGUAGES_CONFIG,
add_default_faker_operators: bool = True,
faker_seed: Optional[int] = None,
):
"""
Args:
analyzed_fields: List of fields to detect and then anonymize.
Defaults to all entities supported by Microsoft Presidio.
operators: Operators to use for anonymization.
Operators allow for custom anonymization of detected PII.
Learn more:
https://microsoft.github.io/presidio/tutorial/10_simple_anonymization/
languages_config: Configuration for the NLP engine.
First language in the list will be used as the main language
in self.anonymize(...) when no language is specified.
Learn more:
https://microsoft.github.io/presidio/analyzer/customizing_nlp_models/
faker_seed: Seed used to initialize faker.
Defaults to None, in which case faker will be seeded randomly
and provide random values.
"""
self.analyzed_fields = (
analyzed_fields
if analyzed_fields is not None
else list(get_pseudoanonymizer_mapping().keys())
)
if add_default_faker_operators:
self.operators = {
field: OperatorConfig(
operator_name="custom", params={"lambda": faker_function}
)
for field, faker_function in get_pseudoanonymizer_mapping(
faker_seed
).items()
}
else:
self.operators = {}
if operators:
self.add_operators(operators)
provider = NlpEngineProvider(nlp_configuration=languages_config)
nlp_engine = provider.create_engine()
self.supported_languages = list(nlp_engine.nlp.keys())
self._analyzer = AnalyzerEngine(
supported_languages=self.supported_languages, nlp_engine=nlp_engine
)
self._anonymizer = AnonymizerEngine()
def add_recognizer(self, recognizer: EntityRecognizer) -> None:
"""Add a recognizer to the analyzer
Args:
recognizer: Recognizer to add to the analyzer.
"""
self._analyzer.registry.add_recognizer(recognizer)
self.analyzed_fields.extend(recognizer.supported_entities)
def add_operators(self, operators: Dict[str, OperatorConfig]) -> None:
"""Add operators to the anonymizer
Args:
operators: Operators to add to the anonymizer.
"""
self.operators.update(operators)
class PresidioAnonymizer(PresidioAnonymizerBase):
def _anonymize(
self,
text: str,
language: Optional[str] = None,
allow_list: Optional[List[str]] = None,
) -> str:
"""Anonymize text.
Each PII entity is replaced with a fake value.
Each time fake values will be different, as they are generated randomly.
PresidioAnonymizer has no built-in memory -
so it will not remember the effects of anonymizing previous texts.
>>> anonymizer = PresidioAnonymizer()
>>> anonymizer.anonymize("My name is John Doe. Hi John Doe!")
'My name is Noah Rhodes. Hi Noah Rhodes!'
>>> anonymizer.anonymize("My name is John Doe. Hi John Doe!")
'My name is Brett Russell. Hi Brett Russell!'
Args:
text: text to anonymize
language: language to use for analysis of PII
If None, the first (main) language in the list
of languages specified in the configuration will be used.
"""
if language is None:
language = self.supported_languages[0]
if language not in self.supported_languages:
raise ValueError(
f"Language '{language}' is not supported. "
f"Supported languages are: {self.supported_languages}. "
"Change your language configuration file to add more languages."
)
analyzer_results = self._analyzer.analyze(
text,
entities=self.analyzed_fields,
language=language,
allow_list=allow_list,
)
filtered_analyzer_results = (
self._anonymizer._remove_conflicts_and_get_text_manipulation_data(
analyzer_results
)
)
anonymizer_results = self._anonymizer.anonymize(
text,
analyzer_results=analyzer_results,
operators=self.operators,
)
anonymizer_mapping = create_anonymizer_mapping(
text,
filtered_analyzer_results,
anonymizer_results,
)
return exact_matching_strategy(text, anonymizer_mapping)
class PresidioReversibleAnonymizer(PresidioAnonymizerBase, ReversibleAnonymizerBase):
def __init__(
self,
analyzed_fields: Optional[List[str]] = None,
operators: Optional[Dict[str, OperatorConfig]] = None,
languages_config: Dict = DEFAULT_LANGUAGES_CONFIG,
add_default_faker_operators: bool = True,
faker_seed: Optional[int] = None,
):
super().__init__(
analyzed_fields,
operators,
languages_config,
add_default_faker_operators,
faker_seed,
)
self._deanonymizer_mapping = DeanonymizerMapping()
@property
def deanonymizer_mapping(self) -> MappingDataType:
"""Return the deanonymizer mapping"""
return self._deanonymizer_mapping.data
@property
def anonymizer_mapping(self) -> MappingDataType:
"""Return the anonymizer mapping
This is just the reverse version of the deanonymizer mapping."""
return {
key: {v: k for k, v in inner_dict.items()}
for key, inner_dict in self.deanonymizer_mapping.items()
}
def _anonymize(
self,
text: str,
language: Optional[str] = None,
allow_list: Optional[List[str]] = None,
) -> str:
"""Anonymize text.
Each PII entity is replaced with a fake value.
Each time fake values will be different, as they are generated randomly.
At the same time, we will create a mapping from each anonymized entity
back to its original text value.
Thanks to the built-in memory, all previously anonymised entities
will be remembered and replaced by the same fake values:
>>> anonymizer = PresidioReversibleAnonymizer()
>>> anonymizer.anonymize("My name is John Doe. Hi John Doe!")
'My name is Noah Rhodes. Hi Noah Rhodes!'
>>> anonymizer.anonymize("My name is John Doe. Hi John Doe!")
'My name is Noah Rhodes. Hi Noah Rhodes!'
Args:
text: text to anonymize
language: language to use for analysis of PII
If None, the first (main) language in the list
of languages specified in the configuration will be used.
"""
if language is None:
language = self.supported_languages[0]
if language not in self.supported_languages:
raise ValueError(
f"Language '{language}' is not supported. "
f"Supported languages are: {self.supported_languages}. "
"Change your language configuration file to add more languages."
)
analyzer_results = self._analyzer.analyze(
text,
entities=self.analyzed_fields,
language=language,
allow_list=allow_list,
)
filtered_analyzer_results = (
self._anonymizer._remove_conflicts_and_get_text_manipulation_data(
analyzer_results
)
)
anonymizer_results = self._anonymizer.anonymize(
text,
analyzer_results=analyzer_results,
operators=self.operators,
)
new_deanonymizer_mapping = create_anonymizer_mapping(
text,
filtered_analyzer_results,
anonymizer_results,
is_reversed=True,
)
self._deanonymizer_mapping.update(new_deanonymizer_mapping)
return exact_matching_strategy(text, self.anonymizer_mapping)
def _deanonymize(
self,
text_to_deanonymize: str,
deanonymizer_matching_strategy: Callable[
[str, MappingDataType], str
] = DEFAULT_DEANONYMIZER_MATCHING_STRATEGY,
) -> str:
"""Deanonymize text.
Each anonymized entity is replaced with its original value.
This method exploits the mapping created during the anonymization process.
Args:
text_to_deanonymize: text to deanonymize
deanonymizer_matching_strategy: function to use to match
anonymized entities with their original values and replace them.
"""
if not self._deanonymizer_mapping:
raise ValueError(
"Deanonymizer mapping is empty.",
"Please call anonymize() and anonymize some text first.",
)
text_to_deanonymize = deanonymizer_matching_strategy(
text_to_deanonymize, self.deanonymizer_mapping
)
return text_to_deanonymize
def reset_deanonymizer_mapping(self) -> None:
"""Reset the deanonymizer mapping"""
self._deanonymizer_mapping = DeanonymizerMapping()
def save_deanonymizer_mapping(self, file_path: Union[Path, str]) -> None:
"""Save the deanonymizer mapping to a JSON or YAML file.
Args:
file_path: Path to file to save the mapping to.
Example:
.. code-block:: python
anonymizer.save_deanonymizer_mapping(file_path="path/mapping.json")
"""
save_path = Path(file_path)
if save_path.suffix not in [".json", ".yaml"]:
raise ValueError(f"{save_path} must have an extension of .json or .yaml")
# Make sure parent directories exist
save_path.parent.mkdir(parents=True, exist_ok=True)
if save_path.suffix == ".json":
with open(save_path, "w") as f:
json.dump(self.deanonymizer_mapping, f, indent=2)
elif save_path.suffix == ".yaml":
with open(save_path, "w") as f:
yaml.dump(self.deanonymizer_mapping, f, default_flow_style=False)
def load_deanonymizer_mapping(self, file_path: Union[Path, str]) -> None:
"""Load the deanonymizer mapping from a JSON or YAML file.
Args:
file_path: Path to file to load the mapping from.
Example:
.. code-block:: python
anonymizer.load_deanonymizer_mapping(file_path="path/mapping.json")
"""
load_path = Path(file_path)
if load_path.suffix not in [".json", ".yaml"]:
raise ValueError(f"{load_path} must have an extension of .json or .yaml")
if load_path.suffix == ".json":
with open(load_path, "r") as f:
loaded_mapping = json.load(f)
elif load_path.suffix == ".yaml":
with open(load_path, "r") as f:
loaded_mapping = yaml.load(f, Loader=yaml.FullLoader)
self._deanonymizer_mapping.update(loaded_mapping)
| [] |
2024-01-10 | mwitiderrick/langchain | libs~langchain~langchain~retrievers~google_cloud_documentai_warehouse.py | """Retriever wrapper for Google Cloud Document AI Warehouse."""
from typing import TYPE_CHECKING, Any, Dict, List, Optional
from langchain.callbacks.manager import CallbackManagerForRetrieverRun
from langchain.docstore.document import Document
from langchain.pydantic_v1 import root_validator
from langchain.schema import BaseRetriever
from langchain.utils import get_from_dict_or_env
if TYPE_CHECKING:
from google.cloud.contentwarehouse_v1 import (
DocumentServiceClient,
RequestMetadata,
SearchDocumentsRequest,
)
from google.cloud.contentwarehouse_v1.services.document_service.pagers import (
SearchDocumentsPager,
)
class GoogleDocumentAIWarehouseRetriever(BaseRetriever):
"""A retriever based on Document AI Warehouse.
Documents should be created and documents should be uploaded
in a separate flow, and this retriever uses only Document AI
schema_id provided to search for revelant documents.
More info: https://cloud.google.com/document-ai-warehouse.
"""
location: str = "us"
"GCP location where DocAI Warehouse is placed."
project_number: str
"GCP project number, should contain digits only."
schema_id: Optional[str] = None
"DocAI Warehouse schema to queary against. If nothing is provided, all documents "
"in the project will be searched."
qa_size_limit: int = 5
"The limit on the number of documents returned."
client: "DocumentServiceClient" = None #: :meta private:
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validates the environment."""
try: # noqa: F401
from google.cloud.contentwarehouse_v1 import (
DocumentServiceClient,
)
except ImportError as exc:
raise ImportError(
"google.cloud.contentwarehouse is not installed."
"Please install it with pip install google-cloud-contentwarehouse"
) from exc
values["project_number"] = get_from_dict_or_env(
values, "project_number", "PROJECT_NUMBER"
)
values["client"] = DocumentServiceClient()
return values
def _prepare_request_metadata(self, user_ldap: str) -> "RequestMetadata":
from google.cloud.contentwarehouse_v1 import RequestMetadata, UserInfo
user_info = UserInfo(id=f"user:{user_ldap}")
return RequestMetadata(user_info=user_info)
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun, **kwargs: Any
) -> List[Document]:
request = self._prepare_search_request(query, **kwargs)
response = self.client.search_documents(request=request)
return self._parse_search_response(response=response)
def _prepare_search_request(
self, query: str, **kwargs: Any
) -> "SearchDocumentsRequest":
from google.cloud.contentwarehouse_v1 import (
DocumentQuery,
SearchDocumentsRequest,
)
try:
user_ldap = kwargs["user_ldap"]
except KeyError:
raise ValueError("Argument user_ldap should be provided!")
request_metadata = self._prepare_request_metadata(user_ldap=user_ldap)
schemas = []
if self.schema_id:
schemas.append(
self.client.document_schema_path(
project=self.project_number,
location=self.location,
document_schema=self.schema_id,
)
)
return SearchDocumentsRequest(
parent=self.client.common_location_path(self.project_number, self.location),
request_metadata=request_metadata,
document_query=DocumentQuery(
query=query, is_nl_query=True, document_schema_names=schemas
),
qa_size_limit=self.qa_size_limit,
)
def _parse_search_response(
self, response: "SearchDocumentsPager"
) -> List[Document]:
documents = []
for doc in response.matching_documents:
metadata = {
"title": doc.document.title,
"source": doc.document.raw_document_path,
}
documents.append(
Document(page_content=doc.search_text_snippet, metadata=metadata)
)
return documents
| [] |
2024-01-10 | mwitiderrick/langchain | libs~langchain~langchain~chat_models~litellm.py | """Wrapper around LiteLLM's model I/O library."""
from __future__ import annotations
import logging
from typing import (
Any,
AsyncIterator,
Callable,
Dict,
Iterator,
List,
Mapping,
Optional,
Tuple,
Type,
Union,
)
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.chat_models.base import (
BaseChatModel,
_agenerate_from_stream,
_generate_from_stream,
)
from langchain.llms.base import create_base_retry_decorator
from langchain.pydantic_v1 import Field, root_validator
from langchain.schema import (
ChatGeneration,
ChatResult,
)
from langchain.schema.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
BaseMessageChunk,
ChatMessage,
ChatMessageChunk,
FunctionMessage,
FunctionMessageChunk,
HumanMessage,
HumanMessageChunk,
SystemMessage,
SystemMessageChunk,
)
from langchain.schema.output import ChatGenerationChunk
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
class ChatLiteLLMException(Exception):
"""Error with the `LiteLLM I/O` library"""
def _create_retry_decorator(
llm: ChatLiteLLM,
run_manager: Optional[
Union[AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun]
] = None,
) -> Callable[[Any], Any]:
"""Returns a tenacity retry decorator, preconfigured to handle PaLM exceptions"""
import openai
errors = [
openai.error.Timeout,
openai.error.APIError,
openai.error.APIConnectionError,
openai.error.RateLimitError,
openai.error.ServiceUnavailableError,
]
return create_base_retry_decorator(
error_types=errors, max_retries=llm.max_retries, run_manager=run_manager
)
def _convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage:
role = _dict["role"]
if role == "user":
return HumanMessage(content=_dict["content"])
elif role == "assistant":
# Fix for azure
# Also OpenAI returns None for tool invocations
content = _dict.get("content", "") or ""
if _dict.get("function_call"):
additional_kwargs = {"function_call": dict(_dict["function_call"])}
else:
additional_kwargs = {}
return AIMessage(content=content, additional_kwargs=additional_kwargs)
elif role == "system":
return SystemMessage(content=_dict["content"])
elif role == "function":
return FunctionMessage(content=_dict["content"], name=_dict["name"])
else:
return ChatMessage(content=_dict["content"], role=role)
async def acompletion_with_retry(
llm: ChatLiteLLM,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Any:
"""Use tenacity to retry the async completion call."""
retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
@retry_decorator
async def _completion_with_retry(**kwargs: Any) -> Any:
# Use OpenAI's async api https://github.com/openai/openai-python#async-api
return await llm.client.acreate(**kwargs)
return await _completion_with_retry(**kwargs)
def _convert_delta_to_message_chunk(
_dict: Mapping[str, Any], default_class: Type[BaseMessageChunk]
) -> BaseMessageChunk:
role = _dict.get("role")
content = _dict.get("content") or ""
if _dict.get("function_call"):
additional_kwargs = {"function_call": dict(_dict["function_call"])}
else:
additional_kwargs = {}
if role == "user" or default_class == HumanMessageChunk:
return HumanMessageChunk(content=content)
elif role == "assistant" or default_class == AIMessageChunk:
return AIMessageChunk(content=content, additional_kwargs=additional_kwargs)
elif role == "system" or default_class == SystemMessageChunk:
return SystemMessageChunk(content=content)
elif role == "function" or default_class == FunctionMessageChunk:
return FunctionMessageChunk(content=content, name=_dict["name"])
elif role or default_class == ChatMessageChunk:
return ChatMessageChunk(content=content, role=role)
else:
return default_class(content=content)
def _convert_message_to_dict(message: BaseMessage) -> dict:
if isinstance(message, ChatMessage):
message_dict = {"role": message.role, "content": message.content}
elif isinstance(message, HumanMessage):
message_dict = {"role": "user", "content": message.content}
elif isinstance(message, AIMessage):
message_dict = {"role": "assistant", "content": message.content}
if "function_call" in message.additional_kwargs:
message_dict["function_call"] = message.additional_kwargs["function_call"]
elif isinstance(message, SystemMessage):
message_dict = {"role": "system", "content": message.content}
elif isinstance(message, FunctionMessage):
message_dict = {
"role": "function",
"content": message.content,
"name": message.name,
}
else:
raise ValueError(f"Got unknown type {message}")
if "name" in message.additional_kwargs:
message_dict["name"] = message.additional_kwargs["name"]
return message_dict
class ChatLiteLLM(BaseChatModel):
client: Any #: :meta private:
model: str = "gpt-3.5-turbo"
model_name: Optional[str] = None
"""Model name to use."""
openai_api_key: Optional[str] = None
azure_api_key: Optional[str] = None
anthropic_api_key: Optional[str] = None
replicate_api_key: Optional[str] = None
cohere_api_key: Optional[str] = None
openrouter_api_key: Optional[str] = None
streaming: bool = False
api_base: Optional[str] = None
organization: Optional[str] = None
custom_llm_provider: Optional[str] = None
request_timeout: Optional[Union[float, Tuple[float, float]]] = None
temperature: Optional[float] = 1
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Run inference with this temperature. Must by in the closed
interval [0.0, 1.0]."""
top_p: Optional[float] = None
"""Decode using nucleus sampling: consider the smallest set of tokens whose
probability sum is at least top_p. Must be in the closed interval [0.0, 1.0]."""
top_k: Optional[int] = None
"""Decode using top-k sampling: consider the set of top_k most probable tokens.
Must be positive."""
n: int = 1
"""Number of chat completions to generate for each prompt. Note that the API may
not return the full n completions if duplicates are generated."""
max_tokens: int = 256
max_retries: int = 6
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling OpenAI API."""
set_model_value = self.model
if self.model_name is not None:
set_model_value = self.model_name
return {
"model": set_model_value,
"force_timeout": self.request_timeout,
"max_tokens": self.max_tokens,
"stream": self.streaming,
"n": self.n,
"temperature": self.temperature,
"custom_llm_provider": self.custom_llm_provider,
**self.model_kwargs,
}
@property
def _client_params(self) -> Dict[str, Any]:
"""Get the parameters used for the openai client."""
set_model_value = self.model
if self.model_name is not None:
set_model_value = self.model_name
self.client.api_base = self.api_base
self.client.organization = self.organization
creds: Dict[str, Any] = {
"model": set_model_value,
"force_timeout": self.request_timeout,
}
return {**self._default_params, **creds}
def completion_with_retry(
self, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any
) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(self, run_manager=run_manager)
@retry_decorator
def _completion_with_retry(**kwargs: Any) -> Any:
return self.client.completion(**kwargs)
return _completion_with_retry(**kwargs)
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate api key, python package exists, temperature, top_p, and top_k."""
try:
import litellm
except ImportError:
raise ChatLiteLLMException(
"Could not import google.generativeai python package. "
"Please install it with `pip install google-generativeai`"
)
values["openai_api_key"] = get_from_dict_or_env(
values, "openai_api_key", "OPENAI_API_KEY", default=""
)
values["azure_api_key"] = get_from_dict_or_env(
values, "azure_api_key", "AZURE_API_KEY", default=""
)
values["anthropic_api_key"] = get_from_dict_or_env(
values, "anthropic_api_key", "ANTHROPIC_API_KEY", default=""
)
values["replicate_api_key"] = get_from_dict_or_env(
values, "replicate_api_key", "REPLICATE_API_KEY", default=""
)
values["openrouter_api_key"] = get_from_dict_or_env(
values, "openrouter_api_key", "OPENROUTER_API_KEY", default=""
)
values["cohere_api_key"] = get_from_dict_or_env(
values, "cohere_api_key", "COHERE_API_KEY", default=""
)
values["huggingface_api_key"] = get_from_dict_or_env(
values, "huggingface_api_key", "HUGGINGFACE_API_KEY", default=""
)
values["together_ai_api_key"] = get_from_dict_or_env(
values, "together_ai_api_key", "TOGETHERAI_API_KEY", default=""
)
values["client"] = litellm
if values["temperature"] is not None and not 0 <= values["temperature"] <= 1:
raise ValueError("temperature must be in the range [0.0, 1.0]")
if values["top_p"] is not None and not 0 <= values["top_p"] <= 1:
raise ValueError("top_p must be in the range [0.0, 1.0]")
if values["top_k"] is not None and values["top_k"] <= 0:
raise ValueError("top_k must be positive")
return values
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
stream: Optional[bool] = None,
**kwargs: Any,
) -> ChatResult:
should_stream = stream if stream is not None else self.streaming
if should_stream:
stream_iter = self._stream(
messages, stop=stop, run_manager=run_manager, **kwargs
)
return _generate_from_stream(stream_iter)
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs}
response = self.completion_with_retry(
messages=message_dicts, run_manager=run_manager, **params
)
return self._create_chat_result(response)
def _create_chat_result(self, response: Mapping[str, Any]) -> ChatResult:
generations = []
for res in response["choices"]:
message = _convert_dict_to_message(res["message"])
gen = ChatGeneration(
message=message,
generation_info=dict(finish_reason=res.get("finish_reason")),
)
generations.append(gen)
token_usage = response.get("usage", {})
set_model_value = self.model
if self.model_name is not None:
set_model_value = self.model_name
llm_output = {"token_usage": token_usage, "model": set_model_value}
return ChatResult(generations=generations, llm_output=llm_output)
def _create_message_dicts(
self, messages: List[BaseMessage], stop: Optional[List[str]]
) -> Tuple[List[Dict[str, Any]], Dict[str, Any]]:
params = self._client_params
if stop is not None:
if "stop" in params:
raise ValueError("`stop` found in both the input and default params.")
params["stop"] = stop
message_dicts = [_convert_message_to_dict(m) for m in messages]
return message_dicts, params
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs, "stream": True}
default_chunk_class = AIMessageChunk
for chunk in self.completion_with_retry(
messages=message_dicts, run_manager=run_manager, **params
):
if len(chunk["choices"]) == 0:
continue
delta = chunk["choices"][0]["delta"]
chunk = _convert_delta_to_message_chunk(delta, default_chunk_class)
default_chunk_class = chunk.__class__
yield ChatGenerationChunk(message=chunk)
if run_manager:
run_manager.on_llm_new_token(chunk.content)
async def _astream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[ChatGenerationChunk]:
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs, "stream": True}
default_chunk_class = AIMessageChunk
async for chunk in await acompletion_with_retry(
self, messages=message_dicts, run_manager=run_manager, **params
):
if len(chunk["choices"]) == 0:
continue
delta = chunk["choices"][0]["delta"]
chunk = _convert_delta_to_message_chunk(delta, default_chunk_class)
default_chunk_class = chunk.__class__
yield ChatGenerationChunk(message=chunk)
if run_manager:
await run_manager.on_llm_new_token(chunk.content)
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
stream: Optional[bool] = None,
**kwargs: Any,
) -> ChatResult:
should_stream = stream if stream is not None else self.streaming
if should_stream:
stream_iter = self._astream(
messages=messages, stop=stop, run_manager=run_manager, **kwargs
)
return await _agenerate_from_stream(stream_iter)
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs}
response = await acompletion_with_retry(
self, messages=message_dicts, run_manager=run_manager, **params
)
return self._create_chat_result(response)
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
set_model_value = self.model
if self.model_name is not None:
set_model_value = self.model_name
return {
"model": set_model_value,
"temperature": self.temperature,
"top_p": self.top_p,
"top_k": self.top_k,
"n": self.n,
}
@property
def _llm_type(self) -> str:
return "litellm-chat"
| [
"content"
] |
2024-01-10 | mwitiderrick/langchain | libs~langchain~tests~integration_tests~vectorstores~test_xata.py | """Test Xata vector store functionality.
Before running this test, please create a Xata database by following
the instructions from:
https://python.langchain.com/docs/integrations/vectorstores/xata
"""
import os
from langchain.docstore.document import Document
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores.xata import XataVectorStore
class TestXata:
@classmethod
def setup_class(cls) -> None:
assert os.getenv("XATA_API_KEY"), "XATA_API_KEY environment variable is not set"
assert os.getenv("XATA_DB_URL"), "XATA_DB_URL environment variable is not set"
def test_similarity_search_without_metadata(
self, embedding_openai: OpenAIEmbeddings
) -> None:
"""Test end to end constructions and search without metadata."""
texts = ["foo", "bar", "baz"]
docsearch = XataVectorStore.from_texts(
api_key=os.getenv("XATA_API_KEY"),
db_url=os.getenv("XATA_DB_URL"),
texts=texts,
embedding=embedding_openai,
)
docsearch.wait_for_indexing(ndocs=3)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
docsearch.delete(delete_all=True)
def test_similarity_search_with_metadata(
self, embedding_openai: OpenAIEmbeddings
) -> None:
"""Test end to end construction and search with a metadata filter.
This test requires a column named "a" of type integer to be present
in the Xata table."""
texts = ["foo", "foo", "foo"]
metadatas = [{"a": i} for i in range(len(texts))]
docsearch = XataVectorStore.from_texts(
api_key=os.getenv("XATA_API_KEY"),
db_url=os.getenv("XATA_DB_URL"),
texts=texts,
embedding=embedding_openai,
metadatas=metadatas,
)
docsearch.wait_for_indexing(ndocs=3)
output = docsearch.similarity_search("foo", k=1, filter={"a": 1})
assert output == [Document(page_content="foo", metadata={"a": 1})]
docsearch.delete(delete_all=True)
| [] |
2024-01-10 | mwitiderrick/langchain | libs~langchain~langchain~llms~bedrock.py | import json
import warnings
from abc import ABC
from typing import Any, Dict, Iterator, List, Mapping, Optional
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.pydantic_v1 import BaseModel, Extra, root_validator
from langchain.schema.output import GenerationChunk
from langchain.utilities.anthropic import (
get_num_tokens_anthropic,
get_token_ids_anthropic,
)
HUMAN_PROMPT = "\n\nHuman:"
ASSISTANT_PROMPT = "\n\nAssistant:"
ALTERNATION_ERROR = (
"Error: Prompt must alternate between '\n\nHuman:' and '\n\nAssistant:'."
)
def _add_newlines_before_ha(input_text: str) -> str:
new_text = input_text
for word in ["Human:", "Assistant:"]:
new_text = new_text.replace(word, "\n\n" + word)
for i in range(2):
new_text = new_text.replace("\n\n\n" + word, "\n\n" + word)
return new_text
def _human_assistant_format(input_text: str) -> str:
if input_text.count("Human:") == 0 or (
input_text.find("Human:") > input_text.find("Assistant:")
and "Assistant:" in input_text
):
input_text = HUMAN_PROMPT + " " + input_text # SILENT CORRECTION
if input_text.count("Assistant:") == 0:
input_text = input_text + ASSISTANT_PROMPT # SILENT CORRECTION
if input_text[: len("Human:")] == "Human:":
input_text = "\n\n" + input_text
input_text = _add_newlines_before_ha(input_text)
count = 0
# track alternation
for i in range(len(input_text)):
if input_text[i : i + len(HUMAN_PROMPT)] == HUMAN_PROMPT:
if count % 2 == 0:
count += 1
else:
warnings.warn(ALTERNATION_ERROR + f" Received {input_text}")
if input_text[i : i + len(ASSISTANT_PROMPT)] == ASSISTANT_PROMPT:
if count % 2 == 1:
count += 1
else:
warnings.warn(ALTERNATION_ERROR + f" Received {input_text}")
if count % 2 == 1: # Only saw Human, no Assistant
input_text = input_text + ASSISTANT_PROMPT # SILENT CORRECTION
return input_text
class LLMInputOutputAdapter:
"""Adapter class to prepare the inputs from Langchain to a format
that LLM model expects.
It also provides helper function to extract
the generated text from the model response."""
provider_to_output_key_map = {
"anthropic": "completion",
"amazon": "outputText",
"cohere": "text",
}
@classmethod
def prepare_input(
cls, provider: str, prompt: str, model_kwargs: Dict[str, Any]
) -> Dict[str, Any]:
input_body = {**model_kwargs}
if provider == "anthropic":
input_body["prompt"] = _human_assistant_format(prompt)
elif provider == "ai21" or provider == "cohere":
input_body["prompt"] = prompt
elif provider == "amazon":
input_body = dict()
input_body["inputText"] = prompt
input_body["textGenerationConfig"] = {**model_kwargs}
else:
input_body["inputText"] = prompt
if provider == "anthropic" and "max_tokens_to_sample" not in input_body:
input_body["max_tokens_to_sample"] = 256
return input_body
@classmethod
def prepare_output(cls, provider: str, response: Any) -> str:
if provider == "anthropic":
response_body = json.loads(response.get("body").read().decode())
return response_body.get("completion")
else:
response_body = json.loads(response.get("body").read())
if provider == "ai21":
return response_body.get("completions")[0].get("data").get("text")
elif provider == "cohere":
return response_body.get("generations")[0].get("text")
else:
return response_body.get("results")[0].get("outputText")
@classmethod
def prepare_output_stream(
cls, provider: str, response: Any, stop: Optional[List[str]] = None
) -> Iterator[GenerationChunk]:
stream = response.get("body")
if not stream:
return
if provider not in cls.provider_to_output_key_map:
raise ValueError(
f"Unknown streaming response output key for provider: {provider}"
)
for event in stream:
chunk = event.get("chunk")
if chunk:
chunk_obj = json.loads(chunk.get("bytes").decode())
if provider == "cohere" and (
chunk_obj["is_finished"]
or chunk_obj[cls.provider_to_output_key_map[provider]]
== "<EOS_TOKEN>"
):
return
# chunk obj format varies with provider
yield GenerationChunk(
text=chunk_obj[cls.provider_to_output_key_map[provider]]
)
class BedrockBase(BaseModel, ABC):
client: Any #: :meta private:
region_name: Optional[str] = None
"""The aws region e.g., `us-west-2`. Fallsback to AWS_DEFAULT_REGION env variable
or region specified in ~/.aws/config in case it is not provided here.
"""
credentials_profile_name: Optional[str] = None
"""The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which
has either access keys or role information specified.
If not specified, the default credential profile or, if on an EC2 instance,
credentials from IMDS will be used.
See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
"""
model_id: str
"""Id of the model to call, e.g., amazon.titan-text-express-v1, this is
equivalent to the modelId property in the list-foundation-models api"""
model_kwargs: Optional[Dict] = None
"""Keyword arguments to pass to the model."""
endpoint_url: Optional[str] = None
"""Needed if you don't want to default to us-east-1 endpoint"""
streaming: bool = False
"""Whether to stream the results."""
provider_stop_sequence_key_name_map: Mapping[str, str] = {
"anthropic": "stop_sequences",
"amazon": "stopSequences",
"ai21": "stop_sequences",
"cohere": "stop_sequences",
}
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that AWS credentials to and python package exists in environment."""
# Skip creating new client if passed in constructor
if values["client"] is not None:
return values
try:
import boto3
if values["credentials_profile_name"] is not None:
session = boto3.Session(profile_name=values["credentials_profile_name"])
else:
# use default credentials
session = boto3.Session()
client_params = {}
if values["region_name"]:
client_params["region_name"] = values["region_name"]
if values["endpoint_url"]:
client_params["endpoint_url"] = values["endpoint_url"]
values["client"] = session.client("bedrock-runtime", **client_params)
except ImportError:
raise ModuleNotFoundError(
"Could not import boto3 python package. "
"Please install it with `pip install boto3`."
)
except Exception as e:
raise ValueError(
"Could not load credentials to authenticate with AWS client. "
"Please check that credentials in the specified "
"profile name are valid."
) from e
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"model_kwargs": _model_kwargs},
}
def _get_provider(self) -> str:
return self.model_id.split(".")[0]
@property
def _model_is_anthropic(self) -> bool:
return self._get_provider() == "anthropic"
def _prepare_input_and_invoke(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
_model_kwargs = self.model_kwargs or {}
provider = self._get_provider()
params = {**_model_kwargs, **kwargs}
input_body = LLMInputOutputAdapter.prepare_input(provider, prompt, params)
body = json.dumps(input_body)
accept = "application/json"
contentType = "application/json"
try:
response = self.client.invoke_model(
body=body, modelId=self.model_id, accept=accept, contentType=contentType
)
text = LLMInputOutputAdapter.prepare_output(provider, response)
except Exception as e:
raise ValueError(f"Error raised by bedrock service: {e}")
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
def _prepare_input_and_invoke_stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
_model_kwargs = self.model_kwargs or {}
provider = self._get_provider()
if stop:
if provider not in self.provider_stop_sequence_key_name_map:
raise ValueError(
f"Stop sequence key name for {provider} is not supported."
)
# stop sequence from _generate() overrides
# stop sequences in the class attribute
_model_kwargs[self.provider_stop_sequence_key_name_map.get(provider)] = stop
if provider == "cohere":
_model_kwargs["stream"] = True
params = {**_model_kwargs, **kwargs}
input_body = LLMInputOutputAdapter.prepare_input(provider, prompt, params)
body = json.dumps(input_body)
try:
response = self.client.invoke_model_with_response_stream(
body=body,
modelId=self.model_id,
accept="application/json",
contentType="application/json",
)
except Exception as e:
raise ValueError(f"Error raised by bedrock service: {e}")
for chunk in LLMInputOutputAdapter.prepare_output_stream(
provider, response, stop
):
yield chunk
if run_manager is not None:
run_manager.on_llm_new_token(chunk.text, chunk=chunk)
class Bedrock(LLM, BedrockBase):
"""Bedrock models.
To authenticate, the AWS client uses the following methods to
automatically load credentials:
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
If a specific credential profile should be used, you must pass
the name of the profile from the ~/.aws/credentials file that is to be used.
Make sure the credentials / roles used have the required policies to
access the Bedrock service.
"""
"""
Example:
.. code-block:: python
from bedrock_langchain.bedrock_llm import BedrockLLM
llm = BedrockLLM(
credentials_profile_name="default",
model_id="amazon.titan-text-express-v1",
streaming=True
)
"""
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "amazon_bedrock"
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
"""Call out to Bedrock service with streaming.
Args:
prompt (str): The prompt to pass into the model
stop (Optional[List[str]], optional): Stop sequences. These will
override any stop sequences in the `model_kwargs` attribute.
Defaults to None.
run_manager (Optional[CallbackManagerForLLMRun], optional): Callback
run managers used to process the output. Defaults to None.
Returns:
Iterator[GenerationChunk]: Generator that yields the streamed responses.
Yields:
Iterator[GenerationChunk]: Responses from the model.
"""
return self._prepare_input_and_invoke_stream(
prompt=prompt, stop=stop, run_manager=run_manager, **kwargs
)
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to Bedrock service model.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = llm("Tell me a joke.")
"""
if self.streaming:
completion = ""
for chunk in self._stream(
prompt=prompt, stop=stop, run_manager=run_manager, **kwargs
):
completion += chunk.text
return completion
return self._prepare_input_and_invoke(prompt=prompt, stop=stop, **kwargs)
def get_num_tokens(self, text: str) -> int:
if self._model_is_anthropic:
return get_num_tokens_anthropic(text)
else:
return super().get_num_tokens(text)
def get_token_ids(self, text: str) -> List[int]:
if self._model_is_anthropic:
return get_token_ids_anthropic(text)
else:
return super().get_token_ids(text)
| [
"\n\nAssistant:",
"\n\nHuman:"
] |
2024-01-10 | mwitiderrick/langchain | libs~langchain~langchain~chat_models~baidu_qianfan_endpoint.py | from __future__ import annotations
import logging
from typing import (
Any,
AsyncIterator,
Dict,
Iterator,
List,
Mapping,
Optional,
)
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.chat_models.base import BaseChatModel
from langchain.pydantic_v1 import Field, root_validator
from langchain.schema import ChatGeneration, ChatResult
from langchain.schema.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
BaseMessageChunk,
ChatMessage,
FunctionMessage,
HumanMessage,
SystemMessage,
)
from langchain.schema.output import ChatGenerationChunk
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
def _convert_resp_to_message_chunk(resp: Mapping[str, Any]) -> BaseMessageChunk:
return AIMessageChunk(
content=resp["result"],
role="assistant",
)
def convert_message_to_dict(message: BaseMessage) -> dict:
"""Convert a message to a dictionary that can be passed to the API."""
message_dict: Dict[str, Any]
if isinstance(message, ChatMessage):
message_dict = {"role": message.role, "content": message.content}
elif isinstance(message, HumanMessage):
message_dict = {"role": "user", "content": message.content}
elif isinstance(message, AIMessage):
message_dict = {"role": "assistant", "content": message.content}
if "function_call" in message.additional_kwargs:
message_dict["functions"] = message.additional_kwargs["function_call"]
# If function call only, content is None not empty string
if message_dict["content"] == "":
message_dict["content"] = None
elif isinstance(message, FunctionMessage):
message_dict = {
"role": "function",
"content": message.content,
"name": message.name,
}
else:
raise TypeError(f"Got unknown type {message}")
return message_dict
class QianfanChatEndpoint(BaseChatModel):
"""Baidu Qianfan chat models.
To use, you should have the ``qianfan`` python package installed, and
the environment variable ``qianfan_ak`` and ``qianfan_sk`` set with your
API key and Secret Key.
ak, sk are required parameters
which you could get from https://cloud.baidu.com/product/wenxinworkshop
Example:
.. code-block:: python
from langchain.chat_models import QianfanChatEndpoint
qianfan_chat = QianfanChatEndpoint(model="ERNIE-Bot",
endpoint="your_endpoint", qianfan_ak="your_ak", qianfan_sk="your_sk")
"""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
client: Any
qianfan_ak: Optional[str] = None
qianfan_sk: Optional[str] = None
streaming: Optional[bool] = False
"""Whether to stream the results or not."""
request_timeout: Optional[int] = 60
"""request timeout for chat http requests"""
top_p: Optional[float] = 0.8
temperature: Optional[float] = 0.95
penalty_score: Optional[float] = 1
"""Model params, only supported in ERNIE-Bot and ERNIE-Bot-turbo.
In the case of other model, passing these params will not affect the result.
"""
model: str = "ERNIE-Bot-turbo"
"""Model name.
you could get from https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Nlks5zkzu
preset models are mapping to an endpoint.
`model` will be ignored if `endpoint` is set.
Default is ERNIE-Bot-turbo.
"""
endpoint: Optional[str] = None
"""Endpoint of the Qianfan LLM, required if custom model used."""
@root_validator()
def validate_enviroment(cls, values: Dict) -> Dict:
values["qianfan_ak"] = get_from_dict_or_env(
values,
"qianfan_ak",
"QIANFAN_AK",
)
values["qianfan_sk"] = get_from_dict_or_env(
values,
"qianfan_sk",
"QIANFAN_SK",
)
params = {
"ak": values["qianfan_ak"],
"sk": values["qianfan_sk"],
"model": values["model"],
"stream": values["streaming"],
}
if values["endpoint"] is not None and values["endpoint"] != "":
params["endpoint"] = values["endpoint"]
try:
import qianfan
values["client"] = qianfan.ChatCompletion(**params)
except ImportError:
raise ValueError(
"qianfan package not found, please install it with "
"`pip install qianfan`"
)
return values
@property
def _identifying_params(self) -> Dict[str, Any]:
return {
**{"endpoint": self.endpoint, "model": self.model},
**super()._identifying_params,
}
@property
def _llm_type(self) -> str:
"""Return type of chat_model."""
return "baidu-qianfan-chat"
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling OpenAI API."""
normal_params = {
"stream": self.streaming,
"request_timeout": self.request_timeout,
"top_p": self.top_p,
"temperature": self.temperature,
"penalty_score": self.penalty_score,
}
return {**normal_params, **self.model_kwargs}
def _convert_prompt_msg_params(
self,
messages: List[BaseMessage],
**kwargs: Any,
) -> Dict[str, Any]:
"""
Converts a list of messages into a dictionary containing the message content
and default parameters.
Args:
messages (List[BaseMessage]): The list of messages.
**kwargs (Any): Optional arguments to add additional parameters to the
resulting dictionary.
Returns:
Dict[str, Any]: A dictionary containing the message content and default
parameters.
"""
messages_dict: Dict[str, Any] = {
"messages": [
convert_message_to_dict(m)
for m in messages
if not isinstance(m, SystemMessage)
]
}
for i in [i for i, m in enumerate(messages) if isinstance(m, SystemMessage)]:
if "system" not in messages_dict:
messages_dict["system"] = ""
messages_dict["system"] += messages[i].content + "\n"
return {
**messages_dict,
**self._default_params,
**kwargs,
}
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
"""Call out to an qianfan models endpoint for each generation with a prompt.
Args:
messages: The messages to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = qianfan_model("Tell me a joke.")
"""
if self.streaming:
completion = ""
for chunk in self._stream(messages, stop, run_manager, **kwargs):
completion += chunk.text
lc_msg = AIMessage(content=completion, additional_kwargs={})
gen = ChatGeneration(
message=lc_msg,
generation_info=dict(finish_reason="stop"),
)
return ChatResult(
generations=[gen],
llm_output={"token_usage": {}, "model_name": self.model},
)
params = self._convert_prompt_msg_params(messages, **kwargs)
response_payload = self.client.do(**params)
lc_msg = AIMessage(content=response_payload["result"], additional_kwargs={})
gen = ChatGeneration(
message=lc_msg,
generation_info=dict(finish_reason="stop"),
)
token_usage = response_payload.get("usage", {})
llm_output = {"token_usage": token_usage, "model_name": self.model}
return ChatResult(generations=[gen], llm_output=llm_output)
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
if self.streaming:
completion = ""
token_usage = {}
async for chunk in self._astream(messages, stop, run_manager, **kwargs):
completion += chunk.text
lc_msg = AIMessage(content=completion, additional_kwargs={})
gen = ChatGeneration(
message=lc_msg,
generation_info=dict(finish_reason="stop"),
)
return ChatResult(
generations=[gen],
llm_output={"token_usage": {}, "model_name": self.model},
)
params = self._convert_prompt_msg_params(messages, **kwargs)
response_payload = await self.client.ado(**params)
lc_msg = AIMessage(content=response_payload["result"], additional_kwargs={})
generations = []
gen = ChatGeneration(
message=lc_msg,
generation_info=dict(finish_reason="stop"),
)
generations.append(gen)
token_usage = response_payload.get("usage", {})
llm_output = {"token_usage": token_usage, "model_name": self.model}
return ChatResult(generations=generations, llm_output=llm_output)
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
params = self._convert_prompt_msg_params(messages, **kwargs)
for res in self.client.do(**params):
if res:
chunk = ChatGenerationChunk(
text=res["result"],
message=_convert_resp_to_message_chunk(res),
)
yield chunk
if run_manager:
run_manager.on_llm_new_token(chunk.text, chunk=chunk)
async def _astream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[ChatGenerationChunk]:
params = self._convert_prompt_msg_params(messages, **kwargs)
async for res in await self.client.ado(**params):
if res:
chunk = ChatGenerationChunk(
text=res["result"],
message=_convert_resp_to_message_chunk(res),
)
yield chunk
if run_manager:
await run_manager.on_llm_new_token(chunk.text, chunk=chunk)
| [] |
2024-01-10 | mwitiderrick/langchain | libs~langchain~langchain~cache.py | """
.. warning::
Beta Feature!
**Cache** provides an optional caching layer for LLMs.
Cache is useful for two reasons:
- It can save you money by reducing the number of API calls you make to the LLM
provider if you're often requesting the same completion multiple times.
- It can speed up your application by reducing the number of API calls you make
to the LLM provider.
Cache directly competes with Memory. See documentation for Pros and Cons.
**Class hierarchy:**
.. code-block::
BaseCache --> <name>Cache # Examples: InMemoryCache, RedisCache, GPTCache
"""
from __future__ import annotations
import hashlib
import inspect
import json
import logging
import uuid
import warnings
from datetime import timedelta
from functools import lru_cache
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
List,
Optional,
Tuple,
Type,
Union,
cast,
)
from sqlalchemy import Column, Integer, Row, String, create_engine, select
from sqlalchemy.engine.base import Engine
from sqlalchemy.orm import Session
try:
from sqlalchemy.orm import declarative_base
except ImportError:
from sqlalchemy.ext.declarative import declarative_base
from langchain.llms.base import LLM, get_prompts
from langchain.load.dump import dumps
from langchain.load.load import loads
from langchain.schema import ChatGeneration, Generation
from langchain.schema.cache import RETURN_VAL_TYPE, BaseCache
from langchain.schema.embeddings import Embeddings
from langchain.utils import get_from_env
from langchain.vectorstores.redis import Redis as RedisVectorstore
logger = logging.getLogger(__file__)
if TYPE_CHECKING:
import momento
from cassandra.cluster import Session as CassandraSession
def _hash(_input: str) -> str:
"""Use a deterministic hashing approach."""
return hashlib.md5(_input.encode()).hexdigest()
def _dump_generations_to_json(generations: RETURN_VAL_TYPE) -> str:
"""Dump generations to json.
Args:
generations (RETURN_VAL_TYPE): A list of language model generations.
Returns:
str: Json representing a list of generations.
Warning: would not work well with arbitrary subclasses of `Generation`
"""
return json.dumps([generation.dict() for generation in generations])
def _load_generations_from_json(generations_json: str) -> RETURN_VAL_TYPE:
"""Load generations from json.
Args:
generations_json (str): A string of json representing a list of generations.
Raises:
ValueError: Could not decode json string to list of generations.
Returns:
RETURN_VAL_TYPE: A list of generations.
Warning: would not work well with arbitrary subclasses of `Generation`
"""
try:
results = json.loads(generations_json)
return [Generation(**generation_dict) for generation_dict in results]
except json.JSONDecodeError:
raise ValueError(
f"Could not decode json to list of generations: {generations_json}"
)
def _dumps_generations(generations: RETURN_VAL_TYPE) -> str:
"""
Serialization for generic RETURN_VAL_TYPE, i.e. sequence of `Generation`
Args:
generations (RETURN_VAL_TYPE): A list of language model generations.
Returns:
str: a single string representing a list of generations.
This function (+ its counterpart `_loads_generations`) rely on
the dumps/loads pair with Reviver, so are able to deal
with all subclasses of Generation.
Each item in the list can be `dumps`ed to a string,
then we make the whole list of strings into a json-dumped.
"""
return json.dumps([dumps(_item) for _item in generations])
def _loads_generations(generations_str: str) -> Union[RETURN_VAL_TYPE, None]:
"""
Deserialization of a string into a generic RETURN_VAL_TYPE
(i.e. a sequence of `Generation`).
See `_dumps_generations`, the inverse of this function.
Args:
generations_str (str): A string representing a list of generations.
Compatible with the legacy cache-blob format
Does not raise exceptions for malformed entries, just logs a warning
and returns none: the caller should be prepared for such a cache miss.
Returns:
RETURN_VAL_TYPE: A list of generations.
"""
try:
generations = [loads(_item_str) for _item_str in json.loads(generations_str)]
return generations
except (json.JSONDecodeError, TypeError):
# deferring the (soft) handling to after the legacy-format attempt
pass
try:
gen_dicts = json.loads(generations_str)
# not relying on `_load_generations_from_json` (which could disappear):
generations = [Generation(**generation_dict) for generation_dict in gen_dicts]
logger.warning(
f"Legacy 'Generation' cached blob encountered: '{generations_str}'"
)
return generations
except (json.JSONDecodeError, TypeError):
logger.warning(
f"Malformed/unparsable cached blob encountered: '{generations_str}'"
)
return None
class InMemoryCache(BaseCache):
"""Cache that stores things in memory."""
def __init__(self) -> None:
"""Initialize with empty cache."""
self._cache: Dict[Tuple[str, str], RETURN_VAL_TYPE] = {}
def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
"""Look up based on prompt and llm_string."""
return self._cache.get((prompt, llm_string), None)
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Update cache based on prompt and llm_string."""
self._cache[(prompt, llm_string)] = return_val
def clear(self, **kwargs: Any) -> None:
"""Clear cache."""
self._cache = {}
Base = declarative_base()
class FullLLMCache(Base): # type: ignore
"""SQLite table for full LLM Cache (all generations)."""
__tablename__ = "full_llm_cache"
prompt = Column(String, primary_key=True)
llm = Column(String, primary_key=True)
idx = Column(Integer, primary_key=True)
response = Column(String)
class SQLAlchemyCache(BaseCache):
"""Cache that uses SQAlchemy as a backend."""
def __init__(self, engine: Engine, cache_schema: Type[FullLLMCache] = FullLLMCache):
"""Initialize by creating all tables."""
self.engine = engine
self.cache_schema = cache_schema
self.cache_schema.metadata.create_all(self.engine)
def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
"""Look up based on prompt and llm_string."""
stmt = (
select(self.cache_schema.response)
.where(self.cache_schema.prompt == prompt) # type: ignore
.where(self.cache_schema.llm == llm_string)
.order_by(self.cache_schema.idx)
)
with Session(self.engine) as session:
rows = session.execute(stmt).fetchall()
if rows:
try:
return [loads(row[0]) for row in rows]
except Exception:
logger.warning(
"Retrieving a cache value that could not be deserialized "
"properly. This is likely due to the cache being in an "
"older format. Please recreate your cache to avoid this "
"error."
)
# In a previous life we stored the raw text directly
# in the table, so assume it's in that format.
return [Generation(text=row[0]) for row in rows]
return None
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Update based on prompt and llm_string."""
items = [
self.cache_schema(prompt=prompt, llm=llm_string, response=dumps(gen), idx=i)
for i, gen in enumerate(return_val)
]
with Session(self.engine) as session, session.begin():
for item in items:
session.merge(item)
def clear(self, **kwargs: Any) -> None:
"""Clear cache."""
with Session(self.engine) as session:
session.query(self.cache_schema).delete()
session.commit()
class SQLiteCache(SQLAlchemyCache):
"""Cache that uses SQLite as a backend."""
def __init__(self, database_path: str = ".langchain.db"):
"""Initialize by creating the engine and all tables."""
engine = create_engine(f"sqlite:///{database_path}")
super().__init__(engine)
class UpstashRedisCache(BaseCache):
"""Cache that uses Upstash Redis as a backend."""
def __init__(self, redis_: Any, *, ttl: Optional[int] = None):
"""
Initialize an instance of UpstashRedisCache.
This method initializes an object with Upstash Redis caching capabilities.
It takes a `redis_` parameter, which should be an instance of an Upstash Redis
client class, allowing the object to interact with Upstash Redis
server for caching purposes.
Parameters:
redis_: An instance of Upstash Redis client class
(e.g., Redis) used for caching.
This allows the object to communicate with
Redis server for caching operations on.
ttl (int, optional): Time-to-live (TTL) for cached items in seconds.
If provided, it sets the time duration for how long cached
items will remain valid. If not provided, cached items will not
have an automatic expiration.
"""
try:
from upstash_redis import Redis
except ImportError:
raise ValueError(
"Could not import upstash_redis python package. "
"Please install it with `pip install upstash_redis`."
)
if not isinstance(redis_, Redis):
raise ValueError("Please pass in Upstash Redis object.")
self.redis = redis_
self.ttl = ttl
def _key(self, prompt: str, llm_string: str) -> str:
"""Compute key from prompt and llm_string"""
return _hash(prompt + llm_string)
def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
"""Look up based on prompt and llm_string."""
generations = []
# Read from a HASH
results = self.redis.hgetall(self._key(prompt, llm_string))
if results:
for _, text in results.items():
generations.append(Generation(text=text))
return generations if generations else None
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Update cache based on prompt and llm_string."""
for gen in return_val:
if not isinstance(gen, Generation):
raise ValueError(
"UpstashRedisCache supports caching of normal LLM generations, "
f"got {type(gen)}"
)
if isinstance(gen, ChatGeneration):
warnings.warn(
"NOTE: Generation has not been cached. UpstashRedisCache does not"
" support caching ChatModel outputs."
)
return
# Write to a HASH
key = self._key(prompt, llm_string)
mapping = {
str(idx): generation.text for idx, generation in enumerate(return_val)
}
self.redis.hset(key=key, values=mapping)
if self.ttl is not None:
self.redis.expire(key, self.ttl)
def clear(self, **kwargs: Any) -> None:
"""
Clear cache. If `asynchronous` is True, flush asynchronously.
This flushes the *whole* db.
"""
asynchronous = kwargs.get("asynchronous", False)
if asynchronous:
asynchronous = "ASYNC"
else:
asynchronous = "SYNC"
self.redis.flushdb(flush_type=asynchronous)
class RedisCache(BaseCache):
"""Cache that uses Redis as a backend."""
def __init__(self, redis_: Any, *, ttl: Optional[int] = None):
"""
Initialize an instance of RedisCache.
This method initializes an object with Redis caching capabilities.
It takes a `redis_` parameter, which should be an instance of a Redis
client class, allowing the object to interact with a Redis
server for caching purposes.
Parameters:
redis_ (Any): An instance of a Redis client class
(e.g., redis.Redis) used for caching.
This allows the object to communicate with a
Redis server for caching operations.
ttl (int, optional): Time-to-live (TTL) for cached items in seconds.
If provided, it sets the time duration for how long cached
items will remain valid. If not provided, cached items will not
have an automatic expiration.
"""
try:
from redis import Redis
except ImportError:
raise ValueError(
"Could not import redis python package. "
"Please install it with `pip install redis`."
)
if not isinstance(redis_, Redis):
raise ValueError("Please pass in Redis object.")
self.redis = redis_
self.ttl = ttl
def _key(self, prompt: str, llm_string: str) -> str:
"""Compute key from prompt and llm_string"""
return _hash(prompt + llm_string)
def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
"""Look up based on prompt and llm_string."""
generations = []
# Read from a Redis HASH
results = self.redis.hgetall(self._key(prompt, llm_string))
if results:
for _, text in results.items():
try:
generations.append(loads(text))
except Exception:
logger.warning(
"Retrieving a cache value that could not be deserialized "
"properly. This is likely due to the cache being in an "
"older format. Please recreate your cache to avoid this "
"error."
)
# In a previous life we stored the raw text directly
# in the table, so assume it's in that format.
generations.append(Generation(text=text))
return generations if generations else None
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Update cache based on prompt and llm_string."""
for gen in return_val:
if not isinstance(gen, Generation):
raise ValueError(
"RedisCache only supports caching of normal LLM generations, "
f"got {type(gen)}"
)
# Write to a Redis HASH
key = self._key(prompt, llm_string)
with self.redis.pipeline() as pipe:
pipe.hset(
key,
mapping={
str(idx): dumps(generation)
for idx, generation in enumerate(return_val)
},
)
if self.ttl is not None:
pipe.expire(key, self.ttl)
pipe.execute()
def clear(self, **kwargs: Any) -> None:
"""Clear cache. If `asynchronous` is True, flush asynchronously."""
asynchronous = kwargs.get("asynchronous", False)
self.redis.flushdb(asynchronous=asynchronous, **kwargs)
class RedisSemanticCache(BaseCache):
"""Cache that uses Redis as a vector-store backend."""
# TODO - implement a TTL policy in Redis
DEFAULT_SCHEMA = {
"content_key": "prompt",
"text": [
{"name": "prompt"},
],
"extra": [{"name": "return_val"}, {"name": "llm_string"}],
}
def __init__(
self, redis_url: str, embedding: Embeddings, score_threshold: float = 0.2
):
"""Initialize by passing in the `init` GPTCache func
Args:
redis_url (str): URL to connect to Redis.
embedding (Embedding): Embedding provider for semantic encoding and search.
score_threshold (float, 0.2):
Example:
.. code-block:: python
import langchain
from langchain.cache import RedisSemanticCache
from langchain.embeddings import OpenAIEmbeddings
langchain.llm_cache = RedisSemanticCache(
redis_url="redis://localhost:6379",
embedding=OpenAIEmbeddings()
)
"""
self._cache_dict: Dict[str, RedisVectorstore] = {}
self.redis_url = redis_url
self.embedding = embedding
self.score_threshold = score_threshold
def _index_name(self, llm_string: str) -> str:
hashed_index = _hash(llm_string)
return f"cache:{hashed_index}"
def _get_llm_cache(self, llm_string: str) -> RedisVectorstore:
index_name = self._index_name(llm_string)
# return vectorstore client for the specific llm string
if index_name in self._cache_dict:
return self._cache_dict[index_name]
# create new vectorstore client for the specific llm string
try:
self._cache_dict[index_name] = RedisVectorstore.from_existing_index(
embedding=self.embedding,
index_name=index_name,
redis_url=self.redis_url,
schema=cast(Dict, self.DEFAULT_SCHEMA),
)
except ValueError:
redis = RedisVectorstore(
embedding=self.embedding,
index_name=index_name,
redis_url=self.redis_url,
index_schema=cast(Dict, self.DEFAULT_SCHEMA),
)
_embedding = self.embedding.embed_query(text="test")
redis._create_index(dim=len(_embedding))
self._cache_dict[index_name] = redis
return self._cache_dict[index_name]
def clear(self, **kwargs: Any) -> None:
"""Clear semantic cache for a given llm_string."""
index_name = self._index_name(kwargs["llm_string"])
if index_name in self._cache_dict:
self._cache_dict[index_name].drop_index(
index_name=index_name, delete_documents=True, redis_url=self.redis_url
)
del self._cache_dict[index_name]
def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
"""Look up based on prompt and llm_string."""
llm_cache = self._get_llm_cache(llm_string)
generations: List = []
# Read from a Hash
results = llm_cache.similarity_search(
query=prompt,
k=1,
distance_threshold=self.score_threshold,
)
if results:
for document in results:
try:
generations.extend(loads(document.metadata["return_val"]))
except Exception:
logger.warning(
"Retrieving a cache value that could not be deserialized "
"properly. This is likely due to the cache being in an "
"older format. Please recreate your cache to avoid this "
"error."
)
# In a previous life we stored the raw text directly
# in the table, so assume it's in that format.
generations.extend(
_load_generations_from_json(document.metadata["return_val"])
)
return generations if generations else None
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Update cache based on prompt and llm_string."""
for gen in return_val:
if not isinstance(gen, Generation):
raise ValueError(
"RedisSemanticCache only supports caching of "
f"normal LLM generations, got {type(gen)}"
)
llm_cache = self._get_llm_cache(llm_string)
metadata = {
"llm_string": llm_string,
"prompt": prompt,
"return_val": dumps([g for g in return_val]),
}
llm_cache.add_texts(texts=[prompt], metadatas=[metadata])
class GPTCache(BaseCache):
"""Cache that uses GPTCache as a backend."""
def __init__(
self,
init_func: Union[
Callable[[Any, str], None], Callable[[Any], None], None
] = None,
):
"""Initialize by passing in init function (default: `None`).
Args:
init_func (Optional[Callable[[Any], None]]): init `GPTCache` function
(default: `None`)
Example:
.. code-block:: python
# Initialize GPTCache with a custom init function
import gptcache
from gptcache.processor.pre import get_prompt
from gptcache.manager.factory import get_data_manager
# Avoid multiple caches using the same file,
causing different llm model caches to affect each other
def init_gptcache(cache_obj: gptcache.Cache, llm str):
cache_obj.init(
pre_embedding_func=get_prompt,
data_manager=manager_factory(
manager="map",
data_dir=f"map_cache_{llm}"
),
)
langchain.llm_cache = GPTCache(init_gptcache)
"""
try:
import gptcache # noqa: F401
except ImportError:
raise ImportError(
"Could not import gptcache python package. "
"Please install it with `pip install gptcache`."
)
self.init_gptcache_func: Union[
Callable[[Any, str], None], Callable[[Any], None], None
] = init_func
self.gptcache_dict: Dict[str, Any] = {}
def _new_gptcache(self, llm_string: str) -> Any:
"""New gptcache object"""
from gptcache import Cache
from gptcache.manager.factory import get_data_manager
from gptcache.processor.pre import get_prompt
_gptcache = Cache()
if self.init_gptcache_func is not None:
sig = inspect.signature(self.init_gptcache_func)
if len(sig.parameters) == 2:
self.init_gptcache_func(_gptcache, llm_string) # type: ignore[call-arg]
else:
self.init_gptcache_func(_gptcache) # type: ignore[call-arg]
else:
_gptcache.init(
pre_embedding_func=get_prompt,
data_manager=get_data_manager(data_path=llm_string),
)
self.gptcache_dict[llm_string] = _gptcache
return _gptcache
def _get_gptcache(self, llm_string: str) -> Any:
"""Get a cache object.
When the corresponding llm model cache does not exist, it will be created."""
_gptcache = self.gptcache_dict.get(llm_string, None)
if not _gptcache:
_gptcache = self._new_gptcache(llm_string)
return _gptcache
def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
"""Look up the cache data.
First, retrieve the corresponding cache object using the `llm_string` parameter,
and then retrieve the data from the cache based on the `prompt`.
"""
from gptcache.adapter.api import get
_gptcache = self._get_gptcache(llm_string)
res = get(prompt, cache_obj=_gptcache)
if res:
return [
Generation(**generation_dict) for generation_dict in json.loads(res)
]
return None
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Update cache.
First, retrieve the corresponding cache object using the `llm_string` parameter,
and then store the `prompt` and `return_val` in the cache object.
"""
for gen in return_val:
if not isinstance(gen, Generation):
raise ValueError(
"GPTCache only supports caching of normal LLM generations, "
f"got {type(gen)}"
)
from gptcache.adapter.api import put
_gptcache = self._get_gptcache(llm_string)
handled_data = json.dumps([generation.dict() for generation in return_val])
put(prompt, handled_data, cache_obj=_gptcache)
return None
def clear(self, **kwargs: Any) -> None:
"""Clear cache."""
from gptcache import Cache
for gptcache_instance in self.gptcache_dict.values():
gptcache_instance = cast(Cache, gptcache_instance)
gptcache_instance.flush()
self.gptcache_dict.clear()
def _ensure_cache_exists(cache_client: momento.CacheClient, cache_name: str) -> None:
"""Create cache if it doesn't exist.
Raises:
SdkException: Momento service or network error
Exception: Unexpected response
"""
from momento.responses import CreateCache
create_cache_response = cache_client.create_cache(cache_name)
if isinstance(create_cache_response, CreateCache.Success) or isinstance(
create_cache_response, CreateCache.CacheAlreadyExists
):
return None
elif isinstance(create_cache_response, CreateCache.Error):
raise create_cache_response.inner_exception
else:
raise Exception(f"Unexpected response cache creation: {create_cache_response}")
def _validate_ttl(ttl: Optional[timedelta]) -> None:
if ttl is not None and ttl <= timedelta(seconds=0):
raise ValueError(f"ttl must be positive but was {ttl}.")
class MomentoCache(BaseCache):
"""Cache that uses Momento as a backend. See https://gomomento.com/"""
def __init__(
self,
cache_client: momento.CacheClient,
cache_name: str,
*,
ttl: Optional[timedelta] = None,
ensure_cache_exists: bool = True,
):
"""Instantiate a prompt cache using Momento as a backend.
Note: to instantiate the cache client passed to MomentoCache,
you must have a Momento account. See https://gomomento.com/.
Args:
cache_client (CacheClient): The Momento cache client.
cache_name (str): The name of the cache to use to store the data.
ttl (Optional[timedelta], optional): The time to live for the cache items.
Defaults to None, ie use the client default TTL.
ensure_cache_exists (bool, optional): Create the cache if it doesn't
exist. Defaults to True.
Raises:
ImportError: Momento python package is not installed.
TypeError: cache_client is not of type momento.CacheClientObject
ValueError: ttl is non-null and non-negative
"""
try:
from momento import CacheClient
except ImportError:
raise ImportError(
"Could not import momento python package. "
"Please install it with `pip install momento`."
)
if not isinstance(cache_client, CacheClient):
raise TypeError("cache_client must be a momento.CacheClient object.")
_validate_ttl(ttl)
if ensure_cache_exists:
_ensure_cache_exists(cache_client, cache_name)
self.cache_client = cache_client
self.cache_name = cache_name
self.ttl = ttl
@classmethod
def from_client_params(
cls,
cache_name: str,
ttl: timedelta,
*,
configuration: Optional[momento.config.Configuration] = None,
api_key: Optional[str] = None,
auth_token: Optional[str] = None, # for backwards compatibility
**kwargs: Any,
) -> MomentoCache:
"""Construct cache from CacheClient parameters."""
try:
from momento import CacheClient, Configurations, CredentialProvider
except ImportError:
raise ImportError(
"Could not import momento python package. "
"Please install it with `pip install momento`."
)
if configuration is None:
configuration = Configurations.Laptop.v1()
# Try checking `MOMENTO_AUTH_TOKEN` first for backwards compatibility
try:
api_key = auth_token or get_from_env("auth_token", "MOMENTO_AUTH_TOKEN")
except ValueError:
api_key = api_key or get_from_env("api_key", "MOMENTO_API_KEY")
credentials = CredentialProvider.from_string(api_key)
cache_client = CacheClient(configuration, credentials, default_ttl=ttl)
return cls(cache_client, cache_name, ttl=ttl, **kwargs)
def __key(self, prompt: str, llm_string: str) -> str:
"""Compute cache key from prompt and associated model and settings.
Args:
prompt (str): The prompt run through the language model.
llm_string (str): The language model version and settings.
Returns:
str: The cache key.
"""
return _hash(prompt + llm_string)
def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
"""Lookup llm generations in cache by prompt and associated model and settings.
Args:
prompt (str): The prompt run through the language model.
llm_string (str): The language model version and settings.
Raises:
SdkException: Momento service or network error
Returns:
Optional[RETURN_VAL_TYPE]: A list of language model generations.
"""
from momento.responses import CacheGet
generations: RETURN_VAL_TYPE = []
get_response = self.cache_client.get(
self.cache_name, self.__key(prompt, llm_string)
)
if isinstance(get_response, CacheGet.Hit):
value = get_response.value_string
generations = _load_generations_from_json(value)
elif isinstance(get_response, CacheGet.Miss):
pass
elif isinstance(get_response, CacheGet.Error):
raise get_response.inner_exception
return generations if generations else None
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Store llm generations in cache.
Args:
prompt (str): The prompt run through the language model.
llm_string (str): The language model string.
return_val (RETURN_VAL_TYPE): A list of language model generations.
Raises:
SdkException: Momento service or network error
Exception: Unexpected response
"""
for gen in return_val:
if not isinstance(gen, Generation):
raise ValueError(
"Momento only supports caching of normal LLM generations, "
f"got {type(gen)}"
)
key = self.__key(prompt, llm_string)
value = _dump_generations_to_json(return_val)
set_response = self.cache_client.set(self.cache_name, key, value, self.ttl)
from momento.responses import CacheSet
if isinstance(set_response, CacheSet.Success):
pass
elif isinstance(set_response, CacheSet.Error):
raise set_response.inner_exception
else:
raise Exception(f"Unexpected response: {set_response}")
def clear(self, **kwargs: Any) -> None:
"""Clear the cache.
Raises:
SdkException: Momento service or network error
"""
from momento.responses import CacheFlush
flush_response = self.cache_client.flush_cache(self.cache_name)
if isinstance(flush_response, CacheFlush.Success):
pass
elif isinstance(flush_response, CacheFlush.Error):
raise flush_response.inner_exception
CASSANDRA_CACHE_DEFAULT_TABLE_NAME = "langchain_llm_cache"
CASSANDRA_CACHE_DEFAULT_TTL_SECONDS = None
class CassandraCache(BaseCache):
"""
Cache that uses Cassandra / Astra DB as a backend.
It uses a single Cassandra table.
The lookup keys (which get to form the primary key) are:
- prompt, a string
- llm_string, a deterministic str representation of the model parameters.
(needed to prevent collisions same-prompt-different-model collisions)
"""
def __init__(
self,
session: Optional[CassandraSession] = None,
keyspace: Optional[str] = None,
table_name: str = CASSANDRA_CACHE_DEFAULT_TABLE_NAME,
ttl_seconds: Optional[int] = CASSANDRA_CACHE_DEFAULT_TTL_SECONDS,
skip_provisioning: bool = False,
):
"""
Initialize with a ready session and a keyspace name.
Args:
session (cassandra.cluster.Session): an open Cassandra session
keyspace (str): the keyspace to use for storing the cache
table_name (str): name of the Cassandra table to use as cache
ttl_seconds (optional int): time-to-live for cache entries
(default: None, i.e. forever)
"""
try:
from cassio.table import ElasticCassandraTable
except (ImportError, ModuleNotFoundError):
raise ValueError(
"Could not import cassio python package. "
"Please install it with `pip install cassio`."
)
self.session = session
self.keyspace = keyspace
self.table_name = table_name
self.ttl_seconds = ttl_seconds
self.kv_cache = ElasticCassandraTable(
session=self.session,
keyspace=self.keyspace,
table=self.table_name,
keys=["llm_string", "prompt"],
primary_key_type=["TEXT", "TEXT"],
ttl_seconds=self.ttl_seconds,
skip_provisioning=skip_provisioning,
)
def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
"""Look up based on prompt and llm_string."""
item = self.kv_cache.get(
llm_string=_hash(llm_string),
prompt=_hash(prompt),
)
if item is not None:
generations = _loads_generations(item["body_blob"])
# this protects against malformed cached items:
if generations is not None:
return generations
else:
return None
else:
return None
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Update cache based on prompt and llm_string."""
blob = _dumps_generations(return_val)
self.kv_cache.put(
llm_string=_hash(llm_string),
prompt=_hash(prompt),
body_blob=blob,
)
def delete_through_llm(
self, prompt: str, llm: LLM, stop: Optional[List[str]] = None
) -> None:
"""
A wrapper around `delete` with the LLM being passed.
In case the llm(prompt) calls have a `stop` param, you should pass it here
"""
llm_string = get_prompts(
{**llm.dict(), **{"stop": stop}},
[],
)[1]
return self.delete(prompt, llm_string=llm_string)
def delete(self, prompt: str, llm_string: str) -> None:
"""Evict from cache if there's an entry."""
return self.kv_cache.delete(
llm_string=_hash(llm_string),
prompt=_hash(prompt),
)
def clear(self, **kwargs: Any) -> None:
"""Clear cache. This is for all LLMs at once."""
self.kv_cache.clear()
CASSANDRA_SEMANTIC_CACHE_DEFAULT_DISTANCE_METRIC = "dot"
CASSANDRA_SEMANTIC_CACHE_DEFAULT_SCORE_THRESHOLD = 0.85
CASSANDRA_SEMANTIC_CACHE_DEFAULT_TABLE_NAME = "langchain_llm_semantic_cache"
CASSANDRA_SEMANTIC_CACHE_DEFAULT_TTL_SECONDS = None
CASSANDRA_SEMANTIC_CACHE_EMBEDDING_CACHE_SIZE = 16
class CassandraSemanticCache(BaseCache):
"""
Cache that uses Cassandra as a vector-store backend for semantic
(i.e. similarity-based) lookup.
It uses a single (vector) Cassandra table and stores, in principle,
cached values from several LLMs, so the LLM's llm_string is part
of the rows' primary keys.
The similarity is based on one of several distance metrics (default: "dot").
If choosing another metric, the default threshold is to be re-tuned accordingly.
"""
def __init__(
self,
session: Optional[CassandraSession],
keyspace: Optional[str],
embedding: Embeddings,
table_name: str = CASSANDRA_SEMANTIC_CACHE_DEFAULT_TABLE_NAME,
distance_metric: str = CASSANDRA_SEMANTIC_CACHE_DEFAULT_DISTANCE_METRIC,
score_threshold: float = CASSANDRA_SEMANTIC_CACHE_DEFAULT_SCORE_THRESHOLD,
ttl_seconds: Optional[int] = CASSANDRA_SEMANTIC_CACHE_DEFAULT_TTL_SECONDS,
skip_provisioning: bool = False,
):
"""
Initialize the cache with all relevant parameters.
Args:
session (cassandra.cluster.Session): an open Cassandra session
keyspace (str): the keyspace to use for storing the cache
embedding (Embedding): Embedding provider for semantic
encoding and search.
table_name (str): name of the Cassandra (vector) table
to use as cache
distance_metric (str, 'dot'): which measure to adopt for
similarity searches
score_threshold (optional float): numeric value to use as
cutoff for the similarity searches
ttl_seconds (optional int): time-to-live for cache entries
(default: None, i.e. forever)
The default score threshold is tuned to the default metric.
Tune it carefully yourself if switching to another distance metric.
"""
try:
from cassio.table import MetadataVectorCassandraTable
except (ImportError, ModuleNotFoundError):
raise ValueError(
"Could not import cassio python package. "
"Please install it with `pip install cassio`."
)
self.session = session
self.keyspace = keyspace
self.embedding = embedding
self.table_name = table_name
self.distance_metric = distance_metric
self.score_threshold = score_threshold
self.ttl_seconds = ttl_seconds
# The contract for this class has separate lookup and update:
# in order to spare some embedding calculations we cache them between
# the two calls.
# Note: each instance of this class has its own `_get_embedding` with
# its own lru.
@lru_cache(maxsize=CASSANDRA_SEMANTIC_CACHE_EMBEDDING_CACHE_SIZE)
def _cache_embedding(text: str) -> List[float]:
return self.embedding.embed_query(text=text)
self._get_embedding = _cache_embedding
self.embedding_dimension = self._get_embedding_dimension()
self.table = MetadataVectorCassandraTable(
session=self.session,
keyspace=self.keyspace,
table=self.table_name,
primary_key_type=["TEXT"],
vector_dimension=self.embedding_dimension,
ttl_seconds=self.ttl_seconds,
metadata_indexing=("allow", {"_llm_string_hash"}),
skip_provisioning=skip_provisioning,
)
def _get_embedding_dimension(self) -> int:
return len(self._get_embedding(text="This is a sample sentence."))
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Update cache based on prompt and llm_string."""
embedding_vector = self._get_embedding(text=prompt)
llm_string_hash = _hash(llm_string)
body = _dumps_generations(return_val)
metadata = {
"_prompt": prompt,
"_llm_string_hash": llm_string_hash,
}
row_id = f"{_hash(prompt)}-{llm_string_hash}"
#
self.table.put(
body_blob=body,
vector=embedding_vector,
row_id=row_id,
metadata=metadata,
)
def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
"""Look up based on prompt and llm_string."""
hit_with_id = self.lookup_with_id(prompt, llm_string)
if hit_with_id is not None:
return hit_with_id[1]
else:
return None
def lookup_with_id(
self, prompt: str, llm_string: str
) -> Optional[Tuple[str, RETURN_VAL_TYPE]]:
"""
Look up based on prompt and llm_string.
If there are hits, return (document_id, cached_entry)
"""
prompt_embedding: List[float] = self._get_embedding(text=prompt)
hits = list(
self.table.metric_ann_search(
vector=prompt_embedding,
metadata={"_llm_string_hash": _hash(llm_string)},
n=1,
metric=self.distance_metric,
metric_threshold=self.score_threshold,
)
)
if hits:
hit = hits[0]
generations = _loads_generations(hit["body_blob"])
if generations is not None:
# this protects against malformed cached items:
return (
hit["row_id"],
generations,
)
else:
return None
else:
return None
def lookup_with_id_through_llm(
self, prompt: str, llm: LLM, stop: Optional[List[str]] = None
) -> Optional[Tuple[str, RETURN_VAL_TYPE]]:
llm_string = get_prompts(
{**llm.dict(), **{"stop": stop}},
[],
)[1]
return self.lookup_with_id(prompt, llm_string=llm_string)
def delete_by_document_id(self, document_id: str) -> None:
"""
Given this is a "similarity search" cache, an invalidation pattern
that makes sense is first a lookup to get an ID, and then deleting
with that ID. This is for the second step.
"""
self.table.delete(row_id=document_id)
def clear(self, **kwargs: Any) -> None:
"""Clear the *whole* semantic cache."""
self.table.clear()
class FullMd5LLMCache(Base): # type: ignore
"""SQLite table for full LLM Cache (all generations)."""
__tablename__ = "full_md5_llm_cache"
id = Column(String, primary_key=True)
prompt_md5 = Column(String, index=True)
llm = Column(String, index=True)
idx = Column(Integer, index=True)
prompt = Column(String)
response = Column(String)
class SQLAlchemyMd5Cache(BaseCache):
"""Cache that uses SQAlchemy as a backend."""
def __init__(
self, engine: Engine, cache_schema: Type[FullMd5LLMCache] = FullMd5LLMCache
):
"""Initialize by creating all tables."""
self.engine = engine
self.cache_schema = cache_schema
self.cache_schema.metadata.create_all(self.engine)
def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
"""Look up based on prompt and llm_string."""
rows = self._search_rows(prompt, llm_string)
if rows:
return [loads(row[0]) for row in rows]
return None
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Update based on prompt and llm_string."""
self._delete_previous(prompt, llm_string)
prompt_md5 = self.get_md5(prompt)
items = [
self.cache_schema(
id=str(uuid.uuid1()),
prompt=prompt,
prompt_md5=prompt_md5,
llm=llm_string,
response=dumps(gen),
idx=i,
)
for i, gen in enumerate(return_val)
]
with Session(self.engine) as session, session.begin():
for item in items:
session.merge(item)
def _delete_previous(self, prompt: str, llm_string: str) -> None:
stmt = (
select(self.cache_schema.response)
.where(self.cache_schema.prompt_md5 == self.get_md5(prompt)) # type: ignore
.where(self.cache_schema.llm == llm_string)
.where(self.cache_schema.prompt == prompt)
.order_by(self.cache_schema.idx)
)
with Session(self.engine) as session, session.begin():
rows = session.execute(stmt).fetchall()
for item in rows:
session.delete(item)
def _search_rows(self, prompt: str, llm_string: str) -> List[Row]:
prompt_pd5 = self.get_md5(prompt)
stmt = (
select(self.cache_schema.response)
.where(self.cache_schema.prompt_md5 == prompt_pd5) # type: ignore
.where(self.cache_schema.llm == llm_string)
.where(self.cache_schema.prompt == prompt)
.order_by(self.cache_schema.idx)
)
with Session(self.engine) as session:
return session.execute(stmt).fetchall()
def clear(self, **kwargs: Any) -> None:
"""Clear cache."""
with Session(self.engine) as session:
session.execute(self.cache_schema.delete())
@staticmethod
def get_md5(input_string: str) -> str:
return hashlib.md5(input_string.encode()).hexdigest()
| [
"prompt"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.