date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | BlackHC/llm-strategy | llm_strategy~tests~manual_text_json_response_format.py | # Add a manual test that makes sure that we can use the json response format for OpenAI
import openai
from langchain_community.chat_models import ChatOpenAI
from llm_strategy.chat_chain import ChatChain
base_llm = ChatOpenAI(max_tokens=64, model_name="gpt-3.5-turbo-1106")
# %%
chain = ChatChain(base_llm, [])
try:
response = chain.query("What is 1+1? Give your reasoning, too.", model_args=chain.enforce_json_response())
except openai.BadRequestError as e:
assert e.status_code == 400
assert "must contain the word 'json'" in e.message
else:
raise AssertionError("Expected an exception")
| [] |
2024-01-10 | BlackHC/llm-strategy | llm_strategy~testing~tests~test_fake_chat_model.py | # Blackboard-PAGI - LLM Proto-AGI using the Blackboard Pattern
# Copyright (c) 2023. Andreas Kirsch
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import pytest
from langchain.schema import AIMessage, HumanMessage, SystemMessage
from llm_strategy.testing import fake_chat_model
def test_fake_chat_model_query():
"""Test that the fake LLM returns the correct query."""
chat_model = fake_chat_model.FakeChatModel.from_messages(
[[SystemMessage(content="foo"), HumanMessage(content="bar"), AIMessage(content="doo")]]
)
assert chat_model([SystemMessage(content="foo"), HumanMessage(content="bar")]) == AIMessage(content="doo")
def test_fake_chat_model_query_with_stop_raise():
"""Test that the fake LLM returns the correct query."""
chat_model = fake_chat_model.FakeChatModel.from_messages(
[[SystemMessage(content="foo"), HumanMessage(content="bar"), AIMessage(content="doo")]]
)
with pytest.raises(AssertionError):
chat_model([SystemMessage(content="foo"), HumanMessage(content="bar")], stop=["a"])
def test_chat_model_llm_missing_query():
"""Test that the fake LLM raises an error if the query is missing."""
chat_model = fake_chat_model.FakeChatModel(messages_tuples_bag=set())
with pytest.raises(NotImplementedError):
chat_model([SystemMessage(content="foo"), HumanMessage(content="bar")])
| [
"bar",
"foo",
"doo"
] |
2024-01-10 | BlackHC/llm-strategy | llm_strategy~tests~test_adapters.py | from langchain.schema import AIMessage, HumanMessage
from llm_strategy import adapters
from llm_strategy.testing import fake_chat_model, fake_llm
def test_chat_model_as_llm():
chat_model = fake_chat_model.FakeChatModel.from_messages(
[[HumanMessage(content="Hello", additional_kwargs={}), AIMessage(content="World", additional_kwargs={})]]
)
chat_model_as_llm = adapters.ChatModelAsLLM(chat_model=chat_model)
assert chat_model_as_llm("Hello") == "World"
def test_llm_as_chat_model():
llm = fake_llm.FakeLLM(texts={"<|im_start|>user\nHello<|im_end|><|im_start|>assistant\nWorld<|im_end|>"})
chat_model_as_llm = adapters.LLMAsChatModel(llm=llm)
assert chat_model_as_llm([HumanMessage(content="Hello")]) == AIMessage(content="World")
| [
"World",
"Hello"
] |
2024-01-10 | BlackHC/llm-strategy | llm_strategy~llm_strategy.py | # type: ignore
import dataclasses
import functools # noqa: F401
import inspect
import typing
from dataclasses import dataclass
import typing_extensions
from langchain.llms.base import BaseLLM
from llm_strategy.llm_function import (
LLMFunction,
apply_decorator,
is_not_implemented,
unwrap_function,
)
P = typing_extensions.ParamSpec("P")
T = typing.TypeVar("T")
def can_wrap_function_in_llm(f: typing.Callable[P, T]) -> bool:
"""
Return True if f can be wrapped in an LLMCall.
"""
unwrapped = unwrap_function(f)
if not callable(unwrapped):
return False
if inspect.isgeneratorfunction(unwrapped):
return False
if inspect.iscoroutinefunction(unwrapped):
return False
if not inspect.isfunction(unwrapped) and not inspect.ismethod(unwrapped):
return False
return is_not_implemented(unwrapped)
def llm_strategy(llm: BaseLLM) -> typing.Callable[[T], T]: # noqa: C901
"""
A strategy that implements whatever it decorates (or is called on) using the LLM.
"""
@typing.no_type_check
def decorator(f: T) -> T:
assert can_wrap_member_in_llm(f)
# For an instance of dataclass, call llm_strategy_dataclass with the fields.
if dataclasses.is_dataclass(f):
if isinstance(f, type):
return llm_dataclass(f, llm)
else:
implemented_dataclass = llm_dataclass(type(f), llm)
# Create an instance of the implemented dataclass using the fields from f
params = {field.name: getattr(f, field.name) for field in dataclasses.fields(f)}
return implemented_dataclass(**params)
else:
def inner_decorator(unwrapped_f):
llm_f = None
@functools.wraps(unwrapped_f)
def strategy_wrapper(*args, **kwargs):
nonlocal llm_f
if llm_f is None:
# Get the signature of f
sig = inspect.signature(unwrapped_f, eval_str=True)
# Add a llm parameter to the signature as first argument
new_params = [inspect.Parameter("__llm", inspect.Parameter.POSITIONAL_ONLY)]
new_params.extend(sig.parameters.values())
new_sig = sig.replace(parameters=new_params)
def dummy_f(*args, **kwargs):
raise NotImplementedError()
new_f = functools.wraps(unwrapped_f)(dummy_f)
new_f.__module__ = unwrapped_f.__module__
# Set the signature of the new function
new_f.__signature__ = new_sig
del new_f.__wrapped__
# Wrap the function in an LLMFunction
llm_f = functools.wraps(new_f)(LLMFunction())
return llm_f(llm, *args, **kwargs)
return strategy_wrapper
return apply_decorator(f, inner_decorator)
return decorator
def can_wrap_member_in_llm(f: typing.Callable[P, T]) -> bool:
"""
Return True if f can be wrapped in an LLMCall.
"""
if isinstance(f, LLMFunction):
return True
if dataclasses.is_dataclass(f):
return True
return can_wrap_function_in_llm(f)
@typing_extensions.dataclass_transform()
def llm_dataclass(dataclass_type: type, llm: BaseLLM) -> type:
global long_unlikely_prefix__llm
global long_unlikely__member_name, long_unlikely__member
long_unlikely_prefix__llm = llm
long_unlikely__member_name, long_unlikely__member = None, None
@dataclass
class SpecificLLMImplementation(dataclass_type):
global long_unlikely__member_name, long_unlikely__member
for long_unlikely__member_name, long_unlikely__member in inspect.getmembers_static( # noqa: B007
dataclass_type, can_wrap_member_in_llm
):
exec(
f"""
@llm_strategy(long_unlikely_prefix__llm)
@functools.wraps(long_unlikely__member)
def {long_unlikely__member_name}(*args, **kwargs):
raise NotImplementedError()
"""
)
SpecificLLMImplementation.__name__ = f"{llm.__class__.__name__}_{dataclass_type.__name__}"
SpecificLLMImplementation.__qualname__ = f"{llm.__class__.__name__}_{dataclass_type.__qualname__}"
del long_unlikely__member_name, long_unlikely__member
del long_unlikely_prefix__llm
return SpecificLLMImplementation
| [] |
2024-01-10 | BlackHC/llm-strategy | llm_strategy~adapters.py | from typing import Any, Dict, List, Optional
from langchain.chat_models.base import BaseChatModel
from langchain.llms import BaseLLM
from langchain.schema import AIMessage, BaseMessage, ChatMessage, ChatResult, LLMResult
from typing_extensions import override
class ChatModelAsLLM(BaseLLM):
chat_model: BaseChatModel
@override
def dict(self, **kwargs: Any) -> Dict:
return self.chat_model.dict()
@override
def invoke(self, prompt: str, *, stop: Optional[List[str]] = None, **kwargs) -> str:
response = self.chat_model.call_as_llm(prompt, stop=stop, **kwargs)
return response
__call__ = invoke
@override
def _generate(self, prompts: List[str], *, stop: Optional[List[str]] = None, **kwargs) -> LLMResult:
raise NotImplementedError()
@override
async def _agenerate(self, prompts: List[str], *, stop: Optional[List[str]] = None, **kwargs) -> LLMResult:
raise NotImplementedError()
@override
@property
def _llm_type(self) -> str:
return self.chat_model._llm_type
class LLMAsChatModel(BaseChatModel):
llm: BaseLLM
@override
def dict(self, **kwargs: Any) -> Dict:
return self.llm.dict()
@staticmethod
def convert_messages_to_prompt(messages: list[BaseMessage]) -> str:
prompt = ""
for message in messages:
if message.type == "human":
role = "user"
elif message.type == "ai":
role = "assistant"
elif message.type == "system":
role = "system"
elif message.type == "chat":
assert isinstance(message, ChatMessage)
role = message.role.capitalize()
else:
raise ValueError(f"Unknown message type {message.type}")
prompt += f"<|im_start|>{role}\n{message.content}<|im_end|>"
prompt += "<|im_start|>assistant\n"
return prompt
@override
@property
def _llm_type(self) -> str:
return self.llm._llm_type
@override
def invoke(self, messages: List[BaseMessage], *, stop: Optional[List[str]] = None, **kwargs) -> BaseMessage:
prompt = self.convert_messages_to_prompt(messages)
stop = [] if stop is None else list(stop)
response = self.llm.invoke(prompt, stop=["<|im_end|>"] + stop, **kwargs)
return AIMessage(content=response)
__call__ = invoke
@override
def _generate(self, messages: List[BaseMessage], *, stop: Optional[List[str]] = None, **kwargs) -> ChatResult:
raise NotImplementedError()
@override
async def _agenerate(
self, messages: List[BaseMessage], *, stop: Optional[List[str]] = None, **kwargs
) -> ChatResult:
raise NotImplementedError()
| [
"<|im_start|>assistant\n"
] |
2024-01-10 | BlackHC/llm-strategy | llm_strategy~testing~fake_chat_model.py | # Blackboard-PAGI - LLM Proto-AGI using the Blackboard Pattern
# Copyright (c) 2023. Andreas Kirsch
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
from typing import Any, Collection, Iterable, List, Mapping, Optional
from langchain.chat_models.base import BaseChatModel
from langchain.schema import (
AIMessage,
BaseMessage,
ChatResult,
messages_from_dict,
messages_to_dict,
)
from pydantic import BaseModel, Field
def dict_to_tuple(d: Mapping[Any, Any]) -> tuple[tuple[Any, Any], ...]:
"""Convert a dict to a tuple of tuples, sorted by key."""
# Convert values that are dicts to tuples as well.
return tuple(sorted((k, dict_to_tuple(v) if isinstance(v, dict) else v) for k, v in d.items()))
def tuple_to_dict(t: Iterable[tuple[Any, Any]]) -> dict[Any, Any]:
"""Convert a tuple of tuples to a dict."""
return {k: tuple_to_dict(v) if isinstance(v, tuple) else v for k, v in t}
def is_prefix_list(prefix_candidate: Collection, messages: Collection) -> bool:
"""Return whether `prefix_candidate` is a prefix of `messages`."""
if len(prefix_candidate) > len(messages):
return False
for prefix_message, message in zip(prefix_candidate, messages):
if prefix_message != message:
return False
return True
class FakeChatModel(BaseChatModel, BaseModel):
"""Fake ChatModel wrapper for testing purposes.
We can use this to test the behavior of the LLM wrapper without having to actually call the LLM.
We support an `external_llm` argument, which is an LLM that will be called if the query is not found in the `texts`
dict. We store the responses. On exit, we deduplicate them and print them to stdout so that they can be copied into
constructor call for the next run by hand if needed.
We support stop words, which are words that are removed from the response if they are found. To do so, we store
the full response (as it is build over time) and return the part before the query and the stop word.
This also means that there is no non-determinism in the output, which is good for testing, but bad for variance.
Especially if we want to test the behavior of the LLM wrapper when the LLM is not deterministic. (Create different
outputs for different calls, for example.)
"""
messages_tuples_bag: set[tuple] = Field(default_factory=set)
"""The texts to return on call."""
external_chat_model: BaseChatModel | None = None
"""An external LLM to use if the query is not found."""
@property
def _llm_type(self) -> str:
return "fake"
@staticmethod
def from_messages(messages_bag: Collection[list[BaseMessage]]) -> "FakeChatModel":
messages_tuples_bag = {tuple(dict_to_tuple(m) for m in messages_to_dict(messages)) for messages in messages_bag}
return FakeChatModel(messages_tuples_bag=messages_tuples_bag)
async def _agenerate(self, messages: list[BaseMessage], stop: list[str] | None = None, **kwargs) -> ChatResult:
raise NotImplementedError
def _generate(self, messages: List[BaseMessage], stop: Optional[List[str]] = None, **kwargs) -> ChatResult:
raise NotImplementedError
def __del__(self) -> None:
# If we have an external LLM, we write out all our responses to stdout so that they can be copied into the
# constructor call for the next run by hand if needed.
if self.external_chat_model is not None:
# Deduplicate the messages (any shared prefixes can be removed)
self.messages_tuples_bag = {
messages
for messages in self.messages_tuples_bag
if not any(is_prefix_list(messages, other) for other in self.messages_tuples_bag if other != messages)
}
print(f"messages_bag = {self.messages_tuples_bag!r}")
def invoke(self, messages: list[BaseMessage], stop: list[str] | None = None, **kwargs) -> BaseMessage:
"""Return the query if it exists, else print the code to update the query."""
assert stop is None, "Stop words are not supported for FakeChatModel."
messages_tuple = tuple(dict_to_tuple(m) for m in messages_to_dict(messages))
for cached_messages in self.messages_tuples_bag:
if is_prefix_list(messages_tuple, cached_messages):
# check that the next message is an AIMessage
if len(cached_messages) == len(messages_tuple):
raise ValueError("No response found in messages_bag.")
next_message = messages_from_dict([tuple_to_dict(cached_messages[len(messages)])])[0]
if not isinstance(next_message, AIMessage):
raise ValueError("No response found in messages_bag.")
return next_message
if self.external_chat_model is not None:
message = self.external_chat_model.invoke(messages, stop=stop, **kwargs)
message_tuple = dict_to_tuple(messages_to_dict([message])[0])
self.messages_tuples_bag.add(tuple(list(messages_tuple) + [message_tuple]))
return message
# If no queries are provided, print the code to update the query
code_snippet = f"""# Add the following to the queries dict:
{messages!r}, # TODO: Append the correct response here
"""
print(code_snippet)
raise NotImplementedError("No query provided. Add the following to the queries dict:\n\n" + code_snippet)
__call__ = invoke
| [] |
2024-01-10 | BlackHC/llm-strategy | llm_hyperparameters~track_execution.py | import typing
from typing import List, Optional
from langchain.schema import BaseMessage, ChatMessage, ChatResult, LLMResult
from langchain_core.language_models import BaseChatModel, BaseLanguageModel, BaseLLM
from llmtracer import TraceNodeKind, trace_calls, trace_object_converter
from llmtracer.object_converter import ObjectConverter
from pydantic import BaseModel, Field
from llm_strategy.chat_chain import ChatChain
T = typing.TypeVar("T")
P = typing.ParamSpec("P")
LangchainInterface: typing.TypeAlias = BaseLanguageModel | ChatChain
LI = typing.TypeVar("LI", BaseLanguageModel, ChatChain)
class ChatTree(BaseModel):
message: BaseMessage | None
children: list["ChatTree"]
@staticmethod
def create_root():
return ChatTree(message=None, children=[])
def insert(self, messages: list[BaseMessage]) -> "ChatTree":
node = self
for message in messages:
for child in node.children:
if child.message == message:
node = child
break
else:
new_node = ChatTree(message=message, children=[])
node.children.append(new_node)
node = new_node
return node
def build_compact_dict(self) -> dict:
"""
Returns a compact JSON representation of the chat tree.
If we only have one child, we concatenate the messages until we hit a child with more than one child.
We include the role of the message in the JSON.
"""
node = self
messages_json = []
while True:
message = node.message
if message is not None:
if message.type == "human":
role = "user"
elif message.type == "ai":
role = "assistant"
elif message.type == "system":
role = "system"
elif message.type == "chat":
assert isinstance(message, ChatMessage)
role = message.role
else:
raise ValueError(f"Unknown message type {message.type}")
messages_json.append({"role": role, "content": message.content})
if len(node.children) == 1:
node = node.children[0]
else:
break
return {
"messages": messages_json,
"children": [child.build_compact_dict() for child in node.children],
}
class PromptTree(BaseModel):
fragment: str = ""
children: list["PromptTree"]
@staticmethod
def create_root():
return PromptTree(message=None, children=[])
def insert(self, text: str) -> "PromptTree":
node = self
while len(text):
for child in node.children:
if text.startswith(child.fragment):
node = child
text = text.removeprefix(child.fragment)
break
else:
new_node = PromptTree(fragment=text, children=[])
node.children.append(new_node)
return new_node
return node
def build_compact_dict(self) -> dict:
"""
Returns a compact JSON representation of the chat chain.
If we only have one child, we concatenate the messages until we hit a child with more than one child.
We include the role of the message in the JSON.
"""
node = self
fragments_json = []
while True:
if len(node.fragment):
fragments_json.append(node.fragment)
if len(node.children) == 1:
node = node.children[0]
else:
break
return {
"fragments": fragments_json,
"children": [child.build_compact_dict() for child in node.children],
}
class TrackedLLM(BaseLLM):
llm: BaseLLM
tracked_prompts: PromptTree = Field(default_factory=PromptTree.create_root)
@trace_calls(name="TrackedLLM", kind=TraceNodeKind.LLM, capture_args=True, capture_return=True)
def invoke(self, prompt: str, stop: Optional[List[str]] = None, **kwargs) -> str:
node = self.tracked_prompts.insert(prompt)
response = self.llm.invoke(prompt, stop, **kwargs)
node.insert(response)
return response
__call__ = invoke
def _generate(self, prompts: List[str], stop: Optional[List[str]] = None, **kwargs) -> LLMResult:
raise NotImplementedError()
async def _agenerate(self, prompts: List[str], stop: Optional[List[str]] = None, **kwargs) -> LLMResult:
raise NotImplementedError()
@property
def _llm_type(self) -> str:
return self.llm._llm_type
class TrackedChatModel(BaseChatModel):
chat_model: BaseChatModel
tracked_chats: ChatTree = Field(default_factory=ChatTree.create_root)
@property
def _llm_type(self) -> str:
return self.chat_model._llm_type
@trace_calls(name="TrackedChatModel", kind=TraceNodeKind.LLM, capture_args=True, capture_return=True)
def invoke(self, messages: List[BaseMessage], stop: Optional[List[str]] = None, **kwargs) -> BaseMessage:
response_message = self.chat_model.invoke(messages, stop, **kwargs)
self.tracked_chats.insert(messages + [response_message])
return response_message
__call__ = invoke
def _generate(self, messages: List[BaseMessage], stop: Optional[List[str]] = None, **kwargs) -> ChatResult:
chat_result = self.chat_model._generate(messages, stop, **kwargs)
node = self.tracked_chats.insert(messages)
for generation in chat_result.generations:
node.insert([generation.message])
return chat_result
async def _agenerate(self, messages: List[BaseMessage], stop: Optional[List[str]] = None, **kwargs) -> ChatResult:
chat_result = await self.chat_model._agenerate(messages, stop, **kwargs)
node = self.tracked_chats.insert(messages)
for generation in chat_result.generations:
node.insert([generation.message])
return chat_result
def track_langchain(language_model_or_chat_chain: LI) -> LI:
if isinstance(language_model_or_chat_chain, ChatChain):
return ChatChain(
chat_model=TrackedChatModel(chat_model=language_model_or_chat_chain.chat_model),
messages=language_model_or_chat_chain.messages,
)
elif isinstance(language_model_or_chat_chain, BaseLLM):
return TrackedLLM(llm=language_model_or_chat_chain)
elif isinstance(language_model_or_chat_chain, BaseChatModel):
return TrackedChatModel(language_model=language_model_or_chat_chain)
else:
raise ValueError(f"Unknown language model type {type(language_model_or_chat_chain)}")
def get_tracked_chats(chat_model_or_chat_chain: ChatChain | TrackedChatModel) -> dict:
if isinstance(chat_model_or_chat_chain, ChatChain):
model = chat_model_or_chat_chain.chat_model
elif isinstance(chat_model_or_chat_chain, TrackedChatModel):
model = chat_model_or_chat_chain
else:
raise ValueError(f"Unknown language model type {type(chat_model_or_chat_chain)}")
return model.tracked_chats.build_compact_dict()["children"]
@trace_object_converter.register_converter()
def _convert_llm(llm: BaseLLM, converter: ObjectConverter) -> dict:
return converter(llm.dict(), converter) # type: ignore
@trace_object_converter.register_converter()
def _convert_chat_model(chat_model: BaseChatModel, converter: ObjectConverter) -> dict:
return dict(_type=chat_model.__class__.__name__)
@trace_object_converter.register_converter()
def _convert_tracked_chat_model(chat_model: TrackedChatModel, converter: ObjectConverter) -> dict:
return dict(_type=chat_model.chat_model.__class__.__name__)
| [] |
2024-01-10 | BlackHC/llm-strategy | llm_strategy~testing~fake_llm.py | from typing import Any, Mapping
from langchain.llms.base import LLM, BaseLLM
from pydantic import BaseModel, Field
class FakeLLM(LLM, BaseModel):
"""Fake LLM wrapper for testing purposes.
We can use this to test the behavior of the LLM wrapper without having to actually call the LLM.
We support an `external_llm` argument, which is an LLM that will be called if the query is not found in the `texts`
dict. We store the responses. On exit, we deduplicate them and print them to stdout so that they can be copied into
constructor call for the next run by hand if needed.
We support stop words, which are words that are removed from the response if they are found. To do so, we store
the full response (as it is build over time) and return the part before the query and the stop word.
This also means that there is no non-determinism in the output, which is good for testing, but bad for variance.
Especially if we want to test the behavior of the LLM wrapper when the LLM is not deterministic. (Create different
outputs for different calls, for example.)
"""
texts: set[str] = Field(default_factory=set)
"""The texts to return on call."""
external_llm: BaseLLM | None = None
"""An external LLM to use if the query is not found."""
def __del__(self) -> None:
# If we have an external LLM, we write out all our responses to stdout so that they can be copied into the
# constructor call for the next run by hand if needed.
if self.external_llm is not None:
# Deduplicate the texts (any shared prefixes can be removed)
self.texts = {
text for text in self.texts if not any(other.startswith(text) for other in self.texts if other != text)
}
print(f"texts = {self.texts!r}")
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "fake"
def _call(self, prompt: str, stop: list[str] | None = None) -> str:
"""Return the query if it exists, else print the code to update the query."""
for text in self.texts:
if text.startswith(prompt):
# Remainder:
response = text[len(prompt) :]
# Emulate stop behavior
if stop is not None:
for stop_word in stop:
if stop_word in response:
# Only return the answer up to the stop word
response = response[: response.index(stop_word)]
return response
if self.external_llm is not None:
response = self.external_llm.invoke(prompt, stop=stop)
text = prompt + response
self.texts.add(text)
return response
# If no queries are provided, print the code to update the query
code_snippet = (
f"# Add the following to the queries list:\n\n{repr(prompt)}\n# TODO: Append the correct response here"
)
print(code_snippet)
raise NotImplementedError("No query provided to FakeLLM." + code_snippet)
@property
def _identifying_params(self) -> Mapping[str, Any]:
return {}
| [] |
2024-01-10 | BlackHC/llm-strategy | llm_hyperparameters~tests~test_track_execution.py | import pytest
from langchain.schema import AIMessage, HumanMessage
from llm_hyperparameters.track_execution import TrackedChatModel
from llm_strategy.chat_chain import ChatChain
from llm_strategy.testing.fake_chat_model import FakeChatModel
def test_chat_chain():
# Only test ChatChain
chat_model = FakeChatModel.from_messages(
[
[
HumanMessage(content="Hello"),
AIMessage(content="World"),
HumanMessage(content="How are you?"),
AIMessage(content="Good. How are you?"),
],
[
HumanMessage(content="Hello"),
AIMessage(content="World"),
HumanMessage(content="What's up?"),
AIMessage(content="Nothing. You?"),
],
]
)
tracked_chat_model = TrackedChatModel(chat_model=chat_model)
chat_chain = ChatChain(tracked_chat_model, [HumanMessage(content="Hello")])
assert chat_chain.response == "Hello"
assert chat_chain.messages == [
HumanMessage(content="Hello"),
]
chat_chain_2 = ChatChain(tracked_chat_model, [])
with pytest.raises(AssertionError):
chat_chain_2.response
assert chat_chain_2.messages == []
response, chat_chain_3 = chat_chain_2.query("Hello")
assert response == "World"
assert chat_chain_3.messages == [
HumanMessage(content="Hello"),
AIMessage(content="World"),
]
assert chat_chain_3.response == "World"
assert tracked_chat_model.tracked_chats.build_compact_dict() == {
"children": [],
"messages": [{"content": "Hello", "role": "user"}, {"content": "World", "role": "assistant"}],
}
chat_chain_4 = chat_chain_3.branch()
assert chat_chain_4.messages == [
HumanMessage(content="Hello"),
AIMessage(content="World"),
]
response, chat_chain_5 = chat_chain_4.query("How are you?")
assert response == "Good. How are you?"
assert chat_chain_5.messages == [
HumanMessage(content="Hello"),
AIMessage(content="World"),
HumanMessage(content="How are you?"),
AIMessage(content="Good. How are you?"),
]
assert tracked_chat_model.tracked_chats.build_compact_dict() == {
"children": [],
"messages": [
{"content": "Hello", "role": "user"},
{"content": "World", "role": "assistant"},
{"content": "How are you?", "role": "user"},
{"content": "Good. How are you?", "role": "assistant"},
],
}
response, chat_chain_6 = chat_chain_4.query("What's up?")
assert response == "Nothing. You?"
assert chat_chain_6.messages == [
HumanMessage(content="Hello"),
AIMessage(content="World"),
HumanMessage(content="What's up?"),
AIMessage(content="Nothing. You?"),
]
assert tracked_chat_model.tracked_chats.build_compact_dict() == {
"messages": [{"content": "Hello", "role": "user"}, {"content": "World", "role": "assistant"}],
"children": [
{
"children": [],
"messages": [
{"content": "How are you?", "role": "user"},
{"content": "Good. How are you?", "role": "assistant"},
],
},
{
"children": [],
"messages": [
{"content": "What's up?", "role": "user"},
{"content": "Nothing. You?", "role": "assistant"},
],
},
],
}
| [
"World",
"What's up?",
"Nothing. You?",
"Good. How are you?",
"How are you?",
"Hello"
] |
2024-01-10 | albertometelli/pfqi | trlib~environments~acrobot_multitask.py | import gym
from gym import spaces
import numpy as np
from numpy import pi
__copyright__ = "Copyright 2013, RLPy http://acl.mit.edu/RLPy"
__credits__ = ["Alborz Geramifard", "Robert H. Klein", "Christoph Dann",
"William Dabney", "Jonathan P. How"]
__license__ = "BSD 3-Clause"
__author__ = "Christoph Dann <cdann@cdann.de>"
"""
Multi-task version of the Acrobot environment from OpenAI Gym (https://github.com/openai/gym)
Info
----
- State space: 4D Box (joint1 angle, joint2 angle, joint1 velocity, joint2 velocity)
- Action space: discrete (-2 or +2 torque)
- Parameters: lengths and masses of the two links, kind of task (swing-up or rotate)
References
----
- Andrea Tirinzoni, Andrea Sessa, Matteo Pirotta, Marcello Restelli.
Importance Weighted Transfer of Samples in Reinforcement Learning.
International Conference on Machine Learning. 2018.
"""
class AcrobotMultitask(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second' : 15
}
dt = .2
LINK_COM_POS_1 = 0.5 #: [m] position of the center of mass of link 1
LINK_COM_POS_2 = 0.5 #: [m] position of the center of mass of link 2
LINK_MOI = 1. #: moments of inertia for both links
MAX_VEL_1 = 4 * np.pi
MAX_VEL_2 = 9 * np.pi
AVAIL_TORQUE = [-2., +2.]
torque_noise_max = 0.
#: use dynamics equations from the nips paper or the book
book_or_nips = "book"
action_arrow = None
domain_fig = None
actions_num = 2
def __init__(self, m1 = 1.0, m2 = 1.0, l1 = 1.0, l2 = 1.0, task = "swing-up"):
self.horizon = 100
self.gamma = 0.95
self.state_dim = 4
self.action_dim = 1
self.LINK_LENGTH_1 = l1 # [m]
self.LINK_LENGTH_2 = l2 # [m]
self.LINK_MASS_1 = m1 #: [kg] mass of link 1
self.LINK_MASS_2 = m2 #: [kg] mass of link 2
self.viewer = None
high = np.array([pi, pi, self.MAX_VEL_1, self.MAX_VEL_2])
low = -high
self.observation_space = spaces.Box(low, high)
self.action_space = spaces.Discrete(2)
assert task == "swing-up" or task == "rotate"
self.task = task
self.state = None
self.reset()
def reset(self, state = None):
if state is None:
self.state = np.zeros(4)
self.state[0] = np.random.uniform(low = -2.0, high = 2.0)
else:
s = np.array(state)
s[0] = wrap(s[0], -pi, pi)
s[1] = wrap(s[1], -pi, pi)
s[2] = bound(s[2], -self.MAX_VEL_1, self.MAX_VEL_1)
s[3] = bound(s[3], -self.MAX_VEL_2, self.MAX_VEL_2)
self.state = s
return self.get_state()
def step(self, a):
s = self.state
if self.task == "swing-up":
reward = -np.cos(s[0]) - np.cos(s[1] + s[0]) - 2.0
else:
reward = -abs(s[2] - pi)
torque = self.AVAIL_TORQUE[int(a)]
# Add noise to the force action
if self.torque_noise_max > 0:
torque += self.np_random.uniform(-self.torque_noise_max, self.torque_noise_max)
# Now, augment the state with our force action so it can be passed to
# _dsdt
s_augmented = np.append(s, torque)
ns = rk4(self._dsdt, s_augmented, [0, self.dt])
# only care about final timestep of integration returned by integrator
ns = ns[-1]
ns = ns[:4] # omit action
# ODEINT IS TOO SLOW!
# ns_continuous = integrate.odeint(self._dsdt, self.s_continuous, [0, self.dt])
# self.s_continuous = ns_continuous[-1] # We only care about the state
# at the ''final timestep'', self.dt
ns[0] = wrap(ns[0], -pi, pi)
ns[1] = wrap(ns[1], -pi, pi)
ns[2] = bound(ns[2], -self.MAX_VEL_1, self.MAX_VEL_1)
ns[3] = bound(ns[3], -self.MAX_VEL_2, self.MAX_VEL_2)
self.state = ns
terminal = self._terminal()
return self.get_state(), reward, terminal, {}
def get_state(self):
return np.array(self.state)
def _terminal(self):
if self.task == "rotate":
return False
s = self.state
return bool(-np.cos(s[0]) - np.cos(s[1] + s[0]) > 1.)
def _dsdt(self, s_augmented, t):
m1 = self.LINK_MASS_1
m2 = self.LINK_MASS_2
l1 = self.LINK_LENGTH_1
lc1 = self.LINK_COM_POS_1
lc2 = self.LINK_COM_POS_2
I1 = self.LINK_MOI
I2 = self.LINK_MOI
g = 9.8
a = s_augmented[-1]
s = s_augmented[:-1]
theta1 = s[0]
theta2 = s[1]
dtheta1 = s[2]
dtheta2 = s[3]
d1 = m1 * lc1 ** 2 + m2 * \
(l1 ** 2 + lc2 ** 2 + 2 * l1 * lc2 * np.cos(theta2)) + I1 + I2
d2 = m2 * (lc2 ** 2 + l1 * lc2 * np.cos(theta2)) + I2
phi2 = m2 * lc2 * g * np.cos(theta1 + theta2 - np.pi / 2.)
phi1 = - m2 * l1 * lc2 * dtheta2 ** 2 * np.sin(theta2) \
- 2 * m2 * l1 * lc2 * dtheta2 * dtheta1 * np.sin(theta2) \
+ (m1 * lc1 + m2 * l1) * g * np.cos(theta1 - np.pi / 2) + phi2
if self.book_or_nips == "nips":
# the following line is consistent with the description in the
# paper
ddtheta2 = (a + d2 / d1 * phi1 - phi2) / \
(m2 * lc2 ** 2 + I2 - d2 ** 2 / d1)
else:
# the following line is consistent with the java implementation and the
# book
ddtheta2 = (a + d2 / d1 * phi1 - m2 * l1 * lc2 * dtheta1 ** 2 * np.sin(theta2) - phi2) \
/ (m2 * lc2 ** 2 + I2 - d2 ** 2 / d1)
ddtheta1 = -(d2 * ddtheta2 + phi1) / d1
return (dtheta1, dtheta2, ddtheta1, ddtheta2, 0.)
def _render(self, mode='human', close=False):
if close:
if self.viewer is not None:
self.viewer.close()
self.viewer = None
return
from gym.envs.classic_control import rendering
s = self.state
if self.viewer is None:
self.viewer = rendering.Viewer(500,500)
self.viewer.set_bounds(-2.2,2.2,-2.2,2.2)
if s is None: return None
p1 = [-self.LINK_LENGTH_1 *
np.cos(s[0]), self.LINK_LENGTH_1 * np.sin(s[0])]
p2 = [p1[0] - self.LINK_LENGTH_2 * np.cos(s[0] + s[1]),
p1[1] + self.LINK_LENGTH_2 * np.sin(s[0] + s[1])]
xys = np.array([[0,0], p1, p2])[:,::-1]
thetas = [s[0]-np.pi/2, s[0]+s[1]-np.pi/2]
self.viewer.draw_line((-2.2, 1), (2.2, 1))
for ((x,y),th) in zip(xys, thetas):
l,r,t,b = 0, 1, .1, -.1
jtransform = rendering.Transform(rotation=th, translation=(x,y))
link = self.viewer.draw_polygon([(l,b), (l,t), (r,t), (r,b)])
link.add_attr(jtransform)
link.set_color(0,.8, .8)
circ = self.viewer.draw_circle(.1)
circ.set_color(.8, .8, 0)
circ.add_attr(jtransform)
return self.viewer.render(return_rgb_array = mode=='rgb_array')
def get_transition_mean(self, s, a):
current_state = self.get_state()
self.reset(s)
ns,_,_,_ = self.step(a)
self.reset(current_state)
return ns
def get_reward_mean(self, s, a):
current_state = self.get_state()
self.reset(s)
_,r,_,_ = self.step(a)
self.reset(current_state)
return r
def wrap(x, m, M):
"""
:param x: a scalar
:param m: minimum possible value in range
:param M: maximum possible value in range
Wraps ``x`` so m <= x <= M; but unlike ``bound()`` which
truncates, ``wrap()`` wraps x around the coordinate system defined by m,M.\n
For example, m = -180, M = 180 (degrees), x = 360 --> returns 0.
"""
diff = M - m
while x > M:
x = x - diff
while x < m:
x = x + diff
return x
def bound(x, m, M=None):
"""
:param x: scalar
Either have m as scalar, so bound(x,m,M) which returns m <= x <= M *OR*
have m as length 2 vector, bound(x,m, <IGNORED>) returns m[0] <= x <= m[1].
"""
if M is None:
M = m[1]
m = m[0]
# bound x between min (m) and Max (M)
return min(max(x, m), M)
def rk4(derivs, y0, t, *args, **kwargs):
"""
Integrate 1D or ND system of ODEs using 4-th order Runge-Kutta.
This is a toy implementation which may be useful if you find
yourself stranded on a system w/o scipy. Otherwise use
:func:`scipy.integrate`.
*y0*
initial state vector
*t*
sample times
*derivs*
returns the derivative of the system and has the
signature ``dy = derivs(yi, ti)``
*args*
additional arguments passed to the derivative function
*kwargs*
additional keyword arguments passed to the derivative function
Example 1 ::
## 2D system
def derivs6(x,t):
d1 = x[0] + 2*x[1]
d2 = -3*x[0] + 4*x[1]
return (d1, d2)
dt = 0.0005
t = arange(0.0, 2.0, dt)
y0 = (1,2)
yout = rk4(derivs6, y0, t)
Example 2::
## 1D system
alpha = 2
def derivs(x,t):
return -alpha*x + exp(-t)
y0 = 1
yout = rk4(derivs, y0, t)
If you have access to scipy, you should probably be using the
scipy.integrate tools rather than this function.
"""
try:
Ny = len(y0)
except TypeError:
yout = np.zeros((len(t),), np.float_)
else:
yout = np.zeros((len(t), Ny), np.float_)
yout[0] = y0
i = 0
for i in np.arange(len(t) - 1):
thist = t[i]
dt = t[i + 1] - thist
dt2 = dt / 2.0
y0 = yout[i]
k1 = np.asarray(derivs(y0, thist, *args, **kwargs))
k2 = np.asarray(derivs(y0 + dt2 * k1, thist + dt2, *args, **kwargs))
k3 = np.asarray(derivs(y0 + dt2 * k2, thist + dt2, *args, **kwargs))
k4 = np.asarray(derivs(y0 + dt * k3, thist + dt, *args, **kwargs))
yout[i + 1] = y0 + dt / 6.0 * (k1 + 2 * k2 + 2 * k3 + k4)
return yout | [] |
2024-01-10 | kodenii/ORES | Instruction~instruction.py | import openai
import asyncio
import time
import traceback
import openai.error
import random
import os
from tqdm import tqdm
openai.api_key = "YOUR_API_KEY"
TASK_DISCRIPTION = f'''A user will input an image and concept(s), you should generate a new image thoroughly replace given concept to the opposite one. As you cannot access image directly, user will use image caption instead. You should also output image caption in a short sentence with few words. Skip concept(s) irrelevant to the image. The input is always valid.'''
INSTRUCTION_INIT = f'''You are working to help other LLM to complete the task. Task Description: {TASK_DISCRIPTION}
You can formulate some rules or steps. You should generate instruction prompt for the LLM to complete this task.
Instruction:
'''
OPTIMIZE_PROMPT = '''Here are results from the LLM. You can formulate some rules or steps. Update or rewrite the instruction for it based on your evaluation.
{}
Instruction:
'''
def pack_gt(samples):
batch = []
for sample in samples:
input_caption, concept, pred_caption, gt_caption = sample
pack = f'''Input Caption: {input_caption}
Ground-truth Answer 1: {gt_caption[0]}
Ground-truth Answer 2: {gt_caption[1]}
Ground-truth Answer 3: {gt_caption[2]}
LLM Answer: {pred_caption}
'''
batch.append(pack)
return "\n".join(batch)
def pack_pred(samples):
batch = []
for sample in samples:
input_caption, concept, pred_caption, gt_caption = sample
pack = f'''Input Caption: {input_caption}
Concept to Replace: {concept}
LLM Answer: {pred_caption}
'''
batch.append(pack)
return "\n".join(batch)
def forward(batch, instruction, history):
samples = []
for data in tqdm(batch):
input_caption = data["Input Caption"]
concept = data["Concept"]
gt_caption = data["Output Caption"]
caption, history = asyncio.run(send(get_caption, [instruction, input_caption, concept, history]))
samples.append([input_caption, concept, caption, gt_caption])
return samples, history
async def get_instruction():
completion = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=[
{"role": "user", "content": INSTRUCTION_INIT},
])
content = completion["choices"][0]["message"]["content"]
history = [{"role": "user", "content": INSTRUCTION_INIT}, {"role": "assistant", "content": content}]
return content, history
async def optimize(batch, history):
completion = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=history+[
{"role": "user", "content": OPTIMIZE_PROMPT.format(batch)},
])
content = completion["choices"][0]["message"]["content"]
if len(history) >= 6:
history = history[:2] + history[-2:]
history = history + [{"role": "user", "content": OPTIMIZE_PROMPT.format(batch)}, {"role": "assistant", "content": content}]
return content, history
async def get_caption(instruction, input_caption, concept, history):
tmp = f'''You are working as a data annotator. Complete the task following the instruction.
Task Description: {TASK_DISCRIPTION}
Instruction:
{instruction}
Do not generate other things. Generate answer directly.
Input Caption: {input_caption}
Concept to Remove: {concept}
Output Caption: '''
completion = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=history+[
{"role": "user", "content": tmp},
])
content = completion["choices"][0]["message"]["content"]
if len(history) >= 6:
history = history[:2] + history[-2:]
history = history + [{"role": "user", "content": tmp}, {"role": "assistant", "content": content}]
return content, history
async def send(func, args=[]):
while True:
try:
task = asyncio.create_task(func(*args))
await asyncio.wait_for(task, timeout=10)
result = task.result()
except openai.error.RateLimitError:
task.cancel()
print('Rate Limit, wait 3s & retry')
time.sleep(3)
continue
except asyncio.TimeoutError:
task.cancel()
print('Timeout, retry')
continue
except KeyboardInterrupt:
os._exit(0)
except:
task.cancel()
print('Unkown error, retry')
print(traceback.format_exc())
time.sleep(3)
continue
else:
break
return result
with open("training.txt") as f:
dataset = []
lines = "".join(f.readlines()).split("\n\n")
for line in lines:
tmp = line.split("\n")
dataset.append({
"Input Caption": tmp[0].strip()[15:],
"Concept": tmp[1].strip()[9:],
"Output Caption": [tmp[2].strip()[17:], tmp[3].strip()[17:], tmp[4].strip()[17:],]
})
Epoch = 3
Batch_size = 6
raw_dataset = dataset.copy()
writer = open("results.txt", "w")
instruction, history = asyncio.run(send(get_instruction))
for epoch in range(Epoch):
for start_idx in range(0, len(dataset), Batch_size):
batch = dataset[start_idx:min(start_idx+Batch_size, len(dataset))]
samples, history = forward(batch, instruction, history)
packed_pred = pack_pred(samples)
packed_gt= pack_gt(samples)
instruction, history = asyncio.run(send(optimize, ["Example Cases:\n" + packed_gt + "\nLLM Cases:\n" + packed_pred , history]))
writer.writelines(instruction + "\n")
writer.writelines("="*20 + "\n")
writer.writelines(f"Epoch {epoch} finished." + "\n")
writer.writelines("="*20 + "\n")
writer.flush()
random.shuffle(dataset)
writer.close() | [
"content",
"Here are results from the LLM. You can formulate some rules or steps. Update or rewrite the instruction for it based on your evaluation.\n\n{}\n\nInstruction:\n"
] |
2024-01-10 | kodenii/ORES | TIN~ui.py | import cv2
import einops
import gradio as gr
import numpy as np
import torch
import random
import traceback
from pytorch_lightning import seed_everything
from omegaconf import OmegaConf
from ldm.util import instantiate_from_config
from ldm.models.diffusion.ddim import DDIMSampler
from ldm.models.diffusion.plms import PLMSSampler
from ldm.models.diffusion.dpm_solver import DPMSolverSampler
from torch import autocast
torch.set_grad_enabled(False)
import openai
import asyncio
import time
import openai.error
openai.api_key = "YOUR_API_KEY"
import os
os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
with open('checkpoint.txt') as reader:
CKPT = "\n".join(reader.readlines())
async def get_des(text, instruction):
completion = openai.ChatCompletion.create(engine="gpt-3.5-turbo", messages=[
{"role": "user", "content":
f'''Instruction:
{CKPT}
Happy generating!
Input Caption: {text}
Concept: {instruction}
Output Caption: '''},
])
content = completion["choices"][0]["message"]["content"]
return content
async def get_tgt(text, instruction):
while True:
try:
task = asyncio.create_task(get_des(text, instruction))
await asyncio.wait_for(task, timeout=10)
content = task.result()
except openai.error.RateLimitError:
task.cancel()
print('Rate Limit, wait 3s & retry')
time.sleep(3)
continue
except asyncio.TimeoutError:
task.cancel()
print('Timeout, retry')
continue
except:
task.cancel()
print('Unkown error, retry')
print(traceback.format_exc())
continue
else:
break
return content
def load_model_from_config(config, ckpt, device=torch.device("cuda"), verbose=False):
print(f"Loading model from {ckpt}")
pl_sd = torch.load(ckpt, map_location="cpu")
if "global_step" in pl_sd:
print(f"Global Step: {pl_sd['global_step']}")
sd = pl_sd["state_dict"]
model = instantiate_from_config(config.model)
m, u = model.load_state_dict(sd, strict=False)
if len(m) > 0 and verbose:
print("missing keys:")
print(m)
if len(u) > 0 and verbose:
print("unexpected keys:")
print(u)
if device == torch.device("cuda"):
model.cuda()
elif device == torch.device("cpu"):
model.cpu()
model.cond_stage_model.device = "cpu"
else:
raise ValueError(f"Incorrect device name. Received: {device}")
model.eval()
return model
config = OmegaConf.load("configs/stable-diffusion/v2-inference-v.yaml")
device = torch.device("cuda")
model = load_model_from_config(config, "checkpoints/v2-1_768-ema-pruned.ckpt", device)
ddim_sampler = DDIMSampler(model, device)
def process(width, height, prompt, instruction, a_prompt, n_prompt, num_samples, ddim_steps, guess_mode, strength, scale, seed, eta):
with torch.no_grad():
if seed == -1:
seed = random.randint(0, 65535)
seed_everything(seed)
neg_cap = asyncio.run(get_tgt(prompt, instruction))
print("Raw: ", prompt)
print("Neg: ", neg_cap)
ddim_sampler.make_schedule(ddim_num_steps=ddim_steps, ddim_eta=eta, verbose=False)
precision_scope = autocast
with torch.no_grad(), precision_scope("cuda"), model.ema_scope():
cond = model.get_learned_conditioning([prompt + ', ' + a_prompt] * num_samples)
cond2 = model.get_learned_conditioning([neg_cap + ', ' + a_prompt] * num_samples)
un_cond = model.get_learned_conditioning([n_prompt] * num_samples)
shape = (4, width // 8, height // 8)
model.control_scales = [strength * (0.825 ** float(12 - i)) for i in range(13)] if guess_mode else ([strength] * 13) # Magic number. IDK why. Perhaps because 0.825**12<0.01 but 0.826**12>0.01
samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples,
shape, cond, cond2, verbose=False, eta=eta,
unconditional_guidance_scale=scale,
unconditional_conditioning=un_cond, strength=strength)
#x_T=start_code)
x_samples = model.decode_first_stage(samples)
x_samples = (einops.rearrange(x_samples, 'b c h w -> b h w c') * 127.5 + 127.5).cpu().numpy().clip(0, 255).astype(np.uint8)
results = [x_samples[i] for i in range(num_samples)]
return results
block = gr.Blocks().queue()
with block:
with gr.Row():
gr.Markdown("## LTF")
with gr.Row():
with gr.Column():
prompt = gr.Textbox(label="User's Query", value="A Selfy of SpaceX's Founder")
instruction = gr.Textbox(label="Adiministator's Concept", value="Elon Musk")
strength = gr.Slider(label="Ratio ( S = ratio * T )", minimum=0.0, maximum=2.0, value=0.2, step=0.01)
run_button = gr.Button(label="Run")
with gr.Accordion("Advanced options", open=False):
num_samples = gr.Slider(label="Images", minimum=1, maximum=12, value=1, step=1)
width = gr.Slider(label="Image Resolution of Width", minimum=256, maximum=1024, value=768, step=64)
height = gr.Slider(label="Image Resolution of Height", minimum=256, maximum=1024, value=768, step=64)
guess_mode = gr.Checkbox(label='Guess Mode', value=False)
ddim_steps = gr.Slider(label="Steps", minimum=1, maximum=100, value=20, step=1)
scale = gr.Slider(label="Guidance Scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1)
seed = gr.Slider(label="Seed", minimum=-1, maximum=2147483647, step=1, randomize=True)
eta = gr.Number(label="eta (DDIM)", value=0.0)
a_prompt = gr.Textbox(label="Added Prompt", value='best quality, extremely detailed')
n_prompt = gr.Textbox(label="Negative Prompt",
value='longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality')
with gr.Column():
result_gallery = gr.Gallery(label='Output', show_label=False, elem_id="gallery").style(grid=2, height='auto')
ips = [width, height, prompt, instruction, a_prompt, n_prompt, num_samples, ddim_steps, guess_mode, strength, scale, seed, eta]
run_button.click(fn=process, inputs=ips, outputs=[result_gallery])
block.launch(server_name='0.0.0.0')
| [
"User's Query",
"best quality, extremely detailed",
"longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
"A Selfy of SpaceX's Founder",
"Negative Prompt",
"Added Prompt"
] |
2024-01-10 | zhayujie/bot-on-anything | channel~gmail~gmail_channel.py | import smtplib
import imaplib
import email
import re
import base64
import time
from random import randrange
from email.mime.text import MIMEText
from email.header import decode_header
from channel.channel import Channel
from concurrent.futures import ThreadPoolExecutor
from common import const
from config import channel_conf_val, channel_conf
smtp_ssl_host = 'smtp.gmail.com: 587'
imap_ssl_host = 'imap.gmail.com'
MAX_DELAY = 30
MIN_DELAY = 15
STEP_TIME = 2
LATESTN = 5
wait_time = 0
thread_pool = ThreadPoolExecutor(max_workers=8)
def checkEmail(email):
# regex = '^[a-z0-9]+[\._]?[a-z0-9]+[@]\w+[.]\w{2,3}$'
regex = r'\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b'
if re.search(regex, email):
return True
else:
return False
def process(max, speed):
global wait_time
i=0
while i<=max:
i=i+1
time.sleep(speed)
print("\r"+"Waited: "+str(i+wait_time)+"s", end='')
# print("\r"+"==="*int(i-1)+":-)"+"==="*int(max-i)+"$"+str(max)+' waited:'+str(i)+"%", end='')
wait_time += max*speed
class GmailChannel(Channel):
def __init__(self):
self.host_email = channel_conf_val(const.GMAIL, 'host_email')
self.host_password = channel_conf_val(const.GMAIL, 'host_password')
# self.addrs_white_list = channel_conf_val(const.GMAIL, 'addrs_white_list')
self.subject_keyword = channel_conf_val(const.GMAIL, 'subject_keyword')
def startup(self):
global wait_time
ques_list = list()
lastques = {'from': None, 'subject': None, 'content': None}
print("INFO: let's go...")
while(True):
ques_list = self.receiveEmail()
if ques_list:
for ques in ques_list:
if ques['subject'] is None:
print("WARN: question from:%s is empty " % ques['from'])
elif(lastques['subject'] == ques['subject'] and lastques['from'] == ques['from']):
print("INFO: this question has already been answered. Q:%s" % (ques['subject']))
else:
if ques['subject']:
print("Nice: a new message coming...", end='\n')
self.handle(ques)
lastques = ques
wait_time = 0
else:
print("WARN: the question in subject is empty")
else:
process(randrange(MIN_DELAY, MAX_DELAY), STEP_TIME)
def handle(self, question):
message = dict()
context = dict()
print("INFO: From: %s Question: %s" % (question['from'], question['subject']))
context['from_user_id'] = question['from']
answer = super().build_reply_content(question['subject'], context) #get answer from openai
message = MIMEText(answer)
message['subject'] = question['subject']
message['from'] = self.host_email
message['to'] = question['from']
thread_pool.submit(self.sendEmail, message)
def sendEmail(self, message: list) -> dict:
smtp_server = smtplib.SMTP(smtp_ssl_host)
smtp_server.starttls()
smtp_server.login(self.host_email, self.host_password)
output = {'success': 0, 'failed': 0, 'invalid': 0}
try:
smtp_server.sendmail(message['from'], message['to'], message.as_string())
print("sending to {}".format(message['to']))
output['success'] += 1
except Exception as e:
print("Error: {}".format(e))
output['failed'] += 1
print("successed:{}, failed:{}".format(output['success'], output['failed']))
smtp_server.quit()
return output
def receiveEmail(self):
question_list = list()
question = {'from': None, 'subject': None, 'content': None}
imap_server = imaplib.IMAP4_SSL(imap_ssl_host)
imap_server.login(self.host_email, self.host_password)
imap_server.select('inbox')
status, data = imap_server.search(None, 'ALL')
mail_ids = []
for block in data:
mail_ids += block.split()
#only fetch the latest 5 messages
mail_ids = mail_ids[-LATESTN:]
for i in mail_ids:
status, data = imap_server.fetch(i, '(RFC822)')
for response in data:
if isinstance(response, tuple):
message = email.message_from_bytes(response[1])
mail_from = message['from'].split('<')[1].replace(">", "")
# if mail_from not in self.addrs_white_list:
# continue
#subject do not support chinese
mail_subject = decode_header(message['subject'])[0][0]
if isinstance(mail_subject, bytes):
# UnicodeDecodeError: 'utf-8' codec can't decode byte 0xc5
try:
mail_subject = mail_subject.decode()
except UnicodeDecodeError:
mail_subject = mail_subject.decode('latin-1')
if not self.check_contain(mail_subject, self.subject_keyword): #check subject here
continue
if message.is_multipart():
mail_content = ''
for part in message.get_payload():
flag=False
if isinstance(part.get_payload(), list):
part = part.get_payload()[0]
flag = True
if part.get_content_type() in ['text/plain', 'multipart/alternative']:
#TODO some string can't be decode
if flag:
mail_content += str(part.get_payload())
else:
try:
mail_content += base64.b64decode(str(part.get_payload())).decode("utf-8")
except UnicodeDecodeError:
mail_content += base64.b64decode(str(part.get_payload())).decode('latin-1')
else:
mail_content = message.get_payload()
question['from'] = mail_from
question['subject'] = ' '.join(mail_subject.split(' ')[1:])
question['content'] = mail_content
# print(f'\nFrom: {mail_from}')
print(f'\n\nSubject: {mail_subject}')
# print(f'Content: {mail_content.replace(" ", "")}')
question_list.append(question)
question = {'from': None, 'subject': None, 'content': None}
imap_server.store(i, "+FLAGS", "\\Deleted") #delete the mail i
print("INFO: deleting mail: %s" % mail_subject)
imap_server.expunge()
imap_server.close()
imap_server.logout()
return question_list
def check_contain(self, content, keyword_list):
if not keyword_list:
return None
for ky in keyword_list:
if content.find(ky) != -1:
return True
return None
| [
"None"
] |
2024-01-10 | waynecheng92/Interactive-Lab-Hub | Lab%203~speech.py | from gtts import gTTS
import os
import openai
import argparse
import queue
import sys
import sounddevice as sd
from vosk import Model, KaldiRecognizer
import json
import config
q = queue.Queue()
language = 'en'
openai.api_key = config.api_key
def get_chatgpt_res(user_input):
response = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=[
{"role": "system", "content": "make it more polite"},
{"role": "user", "content": user_input}],
temperature=1,
max_tokens=256,
top_p=1,
frequency_penalty=0,
presence_penalty=0)
return "To make it more polite, you can say: " + response.choices[0].message.content.strip()
def int_or_str(text):
"""Helper function for argument parsing."""
try:
return int(text)
except ValueError:
return text
def callback(indata, frames, time, status):
"""This is called (from a separate thread) for each audio block."""
if status:
print(status, file=sys.stderr)
q.put(bytes(indata))
played = 0
def saythis(mytext):
myobj = gTTS(text=mytext, lang=language, slow=False)
myobj.save("content.mp3")
os.system("mpg123 content.mp3")
def speechmode():
return
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument(
"-l", "--list-devices", action="store_true",
help="show list of audio devices and exit")
args, remaining = parser.parse_known_args()
if args.list_devices:
print(sd.query_devices())
parser.exit(0)
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
parents=[parser])
parser.add_argument(
"-f", "--filename", type=str, metavar="FILENAME",
help="audio file to store recording to")
parser.add_argument(
"-d", "--device", type=int_or_str,
help="input device (numeric ID or substring)")
parser.add_argument(
"-r", "--samplerate", type=int, help="sampling rate")
parser.add_argument(
"-m", "--model", type=str, help="language model; e.g. en-us, fr, nl; default is en-us")
args = parser.parse_args(remaining)
try:
if args.samplerate is None:
device_info = sd.query_devices(args.device, "input")
# soundfile expects an int, sounddevice provides a float:
args.samplerate = int(device_info["default_samplerate"])
if args.model is None:
model = Model(lang="en-us")
else:
model = Model(lang=args.model)
if args.filename:
dump_fn = open(args.filename, "wb")
else:
dump_fn = None
with sd.RawInputStream(samplerate=args.samplerate, blocksize = 8000, device=args.device,
dtype="int16", channels=1, callback=callback):
print("#" * 80)
print("Press Ctrl+C to stop the recording")
print("#" * 80)
rec = KaldiRecognizer(model, args.samplerate)
while True:
data = q.get()
if rec.AcceptWaveform(data):
speech_detected = json.loads(rec.Result())
if speech_detected['text'] == "hey":
played = 0
elif played == 0:
saythis(get_chatgpt_res(speech_detected['text']))
played = 1
else:
continue
if dump_fn is not None:
dump_fn.write(data)
except KeyboardInterrupt:
print("\nDone")
parser.exit(0)
except Exception as e:
parser.exit(type(e).__name__ + ": " + str(e)) | [
"make it more polite"
] |
2024-01-10 | tomzhang255/AC215_HiSolver | src~api-service~src~animation~hintGen.py | import os
import json
from langchain.agents import AgentType
from langchain.agents import initialize_agent
from langchain.chat_models import ChatOpenAI
from typing import Type
from pydantic import BaseModel, Field
from langchain.tools import BaseTool
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from math import sqrt, cos, sin
from typing import Union
from typing import Optional
from manim import *
import random
import promptlayer
promptlayer.api_key = ""
from langchain.callbacks import PromptLayerCallbackHandler
from langchain.prompts import ChatPromptTemplate
def predict_custom_trained_model_sample(project, endpoint_id, location, instances):
# Import the necessary libraries
from google.cloud import aiplatform
# Initialize the client
client = aiplatform.gapic.PredictionServiceClient()
# Prepare the endpoint
endpoint = client.endpoint_path(
project=project, location=location, endpoint=endpoint_id
)
# Prepare the payload
# Ensure that instances is a list of dictionaries
payload = {"instances": [instances]}
# Make the prediction request
response = client.predict(name=endpoint, payload=payload)
return response
def animation_from_question(video_name, query):
prompt_template = '''You are a math function (eg. 5x^2 + 5) extractor tool. From the below question, come up with the most meaningful function.
"{query}"
Most meaningful FUNCTION (with x as variable) is: '''
try:
hint_function = predict_custom_trained_model_sample(
project="113408214330",
endpoint_id="3290556826956857344",
location="us-east4",
instances={"prompt": query} # Update with your actual data
)
except Exception as e:
print("Error in custom model prediction:", e)
llm = ChatOpenAI(model="gpt-4-1106-preview", temperature=0)
llm_chain = LLMChain(
llm=llm,
prompt=PromptTemplate.from_template(prompt_template)
)
hint_function = llm_chain(query)
def GraphAgent(query = ""):
llm = ChatOpenAI(model="gpt-3.5-turbo-0613", temperature=0)
tools = [FunctionGraphTool()]
agent = initialize_agent(tools, llm, agent=AgentType.OPENAI_FUNCTIONS, verbose=True, handle_parsing_errors=True)
agent(query)
return
class FunctionGraphTool(BaseTool):
name = "Graph_Function"
description = (
"use this tool when you need to graph a function"
"To use the tool, you must provide the python lambda function as a string. Give the function in python syntax (ex. lambda x: 2*x**5 - 5*x**2 + 1)"
"['function']."
)
def _run(self,
function):
print(function)
lambda_f = eval(function)
_, expression = function.split(":", 1)
# Form the desired equation string
function = "y = " + expression.strip()
x_range, y_range = determine_range(lambda_f)
x_step = determine_step_size(x_range)
y_step = determine_step_size(y_range)
x_range = (*x_range, x_step)
y_range = (*y_range, y_step)
print(x_range, y_range)
return create_graph(lambda_f,function,x_range,y_range)
def _arun(self, radius: int):
raise NotImplementedError("This tool does not support async")
def create_graph(lambda_f,function,x_range,y_range):
# Create an instance of the scene and set the circle radius
scene = HotelCostGraph(lambda_f,function,x_range,y_range)
scene.render()
video_path = f"./media/videos/1080p60/"
os.rename(video_path + "HotelCostGraph.mp4", video_path + video_name)
return "graph generated successfully, check your filesystem"
class HotelCostGraph(Scene):
def __init__(self, lambda_f, function, x_range, y_range):
super().__init__()
self.function = function
self.lambda_f = lambda_f
self.x_range = x_range
self.y_range = y_range
def construct(self):
# Add 1 to the second element
temp_list = list(self.y_range)
temp_list[1] += .1
temp_list[0] -= .1
self.y_range = tuple(temp_list)
continuousAxes = Axes(
x_range=self.x_range,
x_length=5,
color=BLUE,
y_range=self.y_range,
y_length=4,
axis_config={
"tip_width": 0.15,
"tip_height": 0.15,
"include_numbers": True,
"font_size": 14,
})
# Create the first text separately due to different positioning and font size
intro = Tex(r"\parbox{10cm}{" + self.function + "}", font_size=20).move_to(UP * 3 + LEFT * 3.5)
self.play(Create(intro))
self.wait(.1)
self.play(Create(continuousAxes))
bright_colors = [YELLOW, RED, GREEN, BLUE, PINK, PURPLE, ORANGE, WHITE]
selected_color = random.choice(bright_colors)
continuousFunc = continuousAxes.plot(self.lambda_f, color=selected_color)
graph_label = continuousAxes.get_graph_label(continuousFunc, label=self.function).scale(.6).shift(LEFT * .8).shift(UP * 1.1)
self.wait(.1)
self.play(Create(continuousFunc), run_time=2)
self.play(Write(graph_label))
self.wait(.1)
return 0
GraphAgent("draw me the fuction lambda x:" + hint_function['text'])
return 0
################################## Utility functions #################################
import numpy as np
from scipy.signal import find_peaks
import re
def split_text_to_equations(text):
text = text.replace('$', '\\$')
text = text.replace('%', '\\%')
lines = text.split('\n')
result = []
inside_equation = False
temp_equation = ""
for line in lines:
line = line.strip()
if not line:
continue
if inside_equation:
temp_equation += line + "\n"
if r"\]" in line:
result.append(temp_equation.strip())
temp_equation = ""
inside_equation = False
else:
if r"\[" in line:
inside_equation = True
temp_equation += line + "\n"
else:
result.append(line)
return result
import numpy as np
def determine_range(f, x_start=-100, x_end=100, samples=100, focus_on_y_intercept=True):
"""
Determines a tight suitable range for plotting the lambda function, focusing on areas of high curvature or the y-intercept.
Args:
f (func): Lambda function.
x_start, x_end (int, int): Initial range for x to sample.
samples (int): Number of samples to take within the range.
focus_on_y_intercept (bool): If True, focuses on y-intercept for simple lines.
Returns:
tuple: x_range and y_range
"""
# 1. Create a linspace for x values
x_values = np.linspace(x_start, x_end, samples)
y_values = np.array([f(x) for x in x_values])
# 2. Compute the curvature using finite differences
dx = x_values[1] - x_values[0]
dydx = np.gradient(y_values, dx)
d2ydx2 = np.gradient(dydx, dx)
curvature = np.abs(d2ydx2) / (1 + dydx**2)**1.5
# If the function is a simple line, give a tighter domain (around 20) and focus on the y-intercept
if focus_on_y_intercept and np.all(curvature < 1e-5):
x_range = (-10, 10) # Tighter domain around y-intercept
y_tightened_domain = [f(x) for x in np.linspace(x_range[0], x_range[1], samples)]
y_range = (min(y_tightened_domain), max(y_tightened_domain))
else:
# 3. Identify the x values where curvature is high (e.g. top 10% of curvature values)
threshold = np.percentile(curvature, 90)
mask = curvature > threshold
# 4. Set x_range and y_range
x_range = (x_values[mask].min(), x_values[mask].max())
y_range = (y_values[mask].min(), y_values[mask].max())
return x_range, y_range
def determine_step_size(value_range, preferred_ticks=10, max_ticks=15):
"""
Determines a suitable step size based on a given range and a preferred number of ticks.
Args:
value_range (tuple): Tuple of (min, max) representing the range.
preferred_ticks (int): Preferred number of ticks for the range.
max_ticks (int): Maximum allowable number of ticks.
Returns:
float: Step size for the range.
"""
span = value_range[1] - value_range[0]
# Calculate an initial step
raw_step = span / preferred_ticks
# Define possible step sizes
possible_steps = [0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 10000]
# Sort the possible steps based on their difference from raw_step
sorted_steps = sorted(possible_steps, key=lambda x: abs(x - raw_step))
# Choose the closest step size that doesn't exceed max_ticks
for step in sorted_steps:
if span / step <= max_ticks:
return step
# If no suitable step size found, return the largest one as a fallback
return possible_steps[-1]
| [
"You are a math function (eg. 5x^2 + 5) extractor tool. From the below question, come up with the most meaningful function.\n \n \"{query}\" \n Most meaningful FUNCTION (with x as variable) is: "
] |
2024-01-10 | tomzhang255/AC215_HiSolver | src~api-service~src~chromadb.py | import json
import os
import sys
import asyncio
import contextlib
import socket
import numpy as np
from sklearn.cluster import KMeans
from queue import Queue
from concurrent.futures import ThreadPoolExecutor
from typing import Generator
# FastAPI and related imports
from fastapi import UploadFile, File, HTTPException, Path, Query
from fastapi.responses import StreamingResponse, FileResponse
from pydantic import BaseModel
import uvicorn
# Langchain and related imports
import langchain
from langchain.vectorstores import Chroma
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.embeddings.sentence_transformer import SentenceTransformerEmbeddings
from langchain.chat_models import ChatOpenAI, ChatAnthropic
from langchain.llms import OpenAI, Anthropic
from langchain.chains import (
ConversationalRetrievalChain, RetrievalQA, LLMChain, LLMMathChain
)
from langchain.chains.question_answering import load_qa_chain
from langchain.chains.qa_with_sources.loading import load_qa_with_sources_chain
from langchain.chains.summarize import load_summarize_chain
from langchain.schema import AIMessage, HumanMessage, SystemMessage, Document
from langchain.callbacks import get_openai_callback
from langchain.callbacks.base import BaseCallbackHandler
from langchain.callbacks.streaming_stdout_final_only import FinalStreamingStdOutCallbackHandler
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.text_splitter import CharacterTextSplitter
from langchain.prompts import PromptTemplate
# ChromaDB imports
import chromadb
from chromadb.config import Settings
from chromadb.utils import embedding_functions
# Application imports
from .app.app import create_app
from .animation.hintGen import animation_from_question
# Load configuration
with open("config.json") as json_data_file:
config = json.load(json_data_file)
# Set environment variables
OPENAI_API_KEY = config["OPENAI_API_KEY"]
os.environ["TRANSFORMERS_CACHE"] = "./src/model"
os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY
# CORS origins
origins = [
"http://localhost:8001",
"http://localhost"
]
# Create FastAPI app
app = create_app()
# Utility function to run tasks in an executor
async def run_in_executor(func, *args):
loop = asyncio.get_event_loop()
return await loop.run_in_executor(None, func, *args)
# Digest the -qm flag directly
# Setting the configurations for medium quality
config['pixel_height'] = 720 # Set height in pixels
config['pixel_width'] = 1280 # Set width in pixels
config['frame_rate'] = 30 # Set frames per second
# Endpoint for generating animations from questions
@app.get("/animation_from_question")
async def animation_from_question_endpoint(video_name: str = "test", query: str = ""):
"""
This endpoint handles requests to generate animations based on a given query.
The function takes two parameters: 'video_name' and 'query'.
Args:
video_name (str, optional): The name of the output video file. Defaults to "test".
query (str, optional): A string containing the question or scenario description
based on which the animation is to be generated. This should be
a detailed description or a question that can be visually represented.
The endpoint uses the 'animation_from_question' function to process the input query
and generate a corresponding animation video. The video is saved in the specified
'video_path' directory with the given 'video_name'.
The function returns a FileResponse, allowing the client to download or stream
the generated video file directly.
Returns:
FileResponse: A response object containing the generated video file.
The media type of the response is set to 'video/mp4'.
"""
video_path = f"./media/videos/1080p60/"
# query = '''
# A high school's math club is creating a large mosaic for their annual fundraiser, using colored tiles to form a right triangle on a square courtyard floor. The length of the triangle's base is 24 feet, and the height is 32 feet. The club plans to fill the entire triangle with tiles, and each square tile measures 1 foot by 1 foot. After laying down the tiles for the triangle, the students decide to add a border made of the same tiles around the triangle. This border will be one tile wide and will run along the two legs of the triangle and the hypotenuse, but not along the base or height. How many tiles will the math club need to create the border?
# '''
await run_in_executor(animation_from_question, video_name, query)
return FileResponse(video_path + video_name, media_type="video/mp4")
| [] |
2024-01-10 | asehmi/docs-n-data-knowledge-app | app_llm_knowlege_graph_gen.py | import json
import re
import openai
from graphviz import Digraph
import base64
import streamlit as st
import func_prompt
from globals import (
OPENAI_MODELS_CHAT,
DEFAULT_MODEL_CONFIG, LANG_MODEL_PRICING
)
from common import SafeFormatter
from app_state import (state, _set_state_cb, init_app_state, reset_app_state)
init_app_state() # ensure all state variables are initialized
# GRAPH GENERATOR -------------------------------------------------------------
def correct_json(response_data):
"""
Corrects the JSON response from OpenAI to be valid JSON
"""
# clean up the response data JSON
response_data = response_data.replace(' ',' ').replace(',\n }','\n }')
# For good measure
response_data = re.sub(
r',\s*}', '}', re.sub(
r',\s*]', ']', re.sub(
r'(\w+)\s*:', r'"\1":',
response_data
)))
return response_data
@st.cache_data(ttl=60*60, show_spinner=False)
def get_llm_graph_data_response(user_input, model_name=DEFAULT_MODEL_CONFIG['chat_model']):
if not user_input:
return None
print(f"OpenAI call ({model_name})")
try:
model_config = {
'model': model_name,
'temperature': state.temperature,
'top_p': state.top_p,
'max_tokens': state.max_tokens,
}
completion = openai.chat.completions.create(
messages=json.loads(SafeFormatter().format(json.dumps(func_prompt.MESSAGES), user_input=user_input)),
functions=func_prompt.FUNCTIONS,
function_call=func_prompt.FUNCTION_CALL,
**model_config
)
except openai.RateLimitError as e:
# request limit exceeded or something.
return str(e)
except Exception as e:
# general exception handling
return str(e)
response_data = completion.choices[0].message.function_call.arguments
# clean up the response data JSON
response_data = correct_json(response_data)
# print(response_data)
estimated_cost = ((completion.usage.prompt_tokens / 1000.0) * LANG_MODEL_PRICING[state.chat_model]['input']) + \
((completion.usage.completion_tokens / 1000.0) * LANG_MODEL_PRICING[state.chat_model]['output'])
print('Knowledge Graph Generation Estimated Cost: $', estimated_cost)
state.estimated_cost_graph = estimated_cost
state.cumulative_cost += estimated_cost
return response_data
# Function to generate a graph image using Graphviz
def generate_knowledge_graph(response_data):
dot = Digraph(comment="Knowledge Graph")
response_dict = json.loads(response_data)
# Add nodes to the graph
for node in response_dict.get("nodes", []):
dot.node(node["id"], f"{node['label']} ({node['type']})")
# Add edges to the graph
for edge in response_dict.get("edges", []):
dot.edge(edge["from"], edge["to"], label=edge["relationship"])
# Requires GraphViz executable, so we can't use it in Streamlit Cloud
if json.loads(st.secrets['IS_CLOUD_DEPLOYMENT']):
return {'dot': dot, 'png': None, 'gv': None}
else:
# Render and visualize
dot.render("./static/knowledge_graph.gv", view=False)
# Render to PNG format and save it
dot.render("./static/knowledge_graph", format = "png", view=False)
return {'dot': dot, 'png': "./static/knowledge_graph.png", 'gv': "./static/knowledge_graph.gv"}
def get_graph_data(response_data):
try:
response_dict = json.loads(response_data)
# Assume response_data is global or passed appropriately
nodes = [
{
"data": {
"id": node["id"],
"label": node["label"],
"color": node.get("color", "defaultColor"),
}
}
for node in response_dict["nodes"]
]
edges = [
{
"data": {
"source": edge["from"],
"target": edge["to"],
"label": edge["relationship"],
"color": edge.get("color", "defaultColor"),
}
}
for edge in response_dict["edges"]
]
return {"elements": {"nodes": nodes, "edges": edges}}
except:
return {"elements": {"nodes": [], "edges": []}}
# UTILITY ---------------------------------------------------------------------
def image_html_fragments(image, text, image_style=None, text_style=None):
with open(image, 'rb') as img_f:
img_b64 = base64.b64encode(img_f.read()).decode('utf-8')
img_style = image_style if image_style else "height: 200px; margin: 3px;"
image_tag_html = f'<img src="data:image/png;base64,{img_b64}" style="{img_style} vertical-align:middle;">'
image_download_link = f'<a download="knowledge_graph.png" href="data:image/png;base64,{img_b64}">Download</a>'
# style copied from dev tools
span_style = text_style if text_style else "font-weight: 600; font-size: 1.75rem;"
span_style = ( f'font-family: Source Sans Pro, sans-serif; {span_style}'
'color: rgb(49, 51, 63); letter-spacing: -0.005em;'
'padding: 0.5rem 0px 1rem; margin: 0px; line-height: 1.2;'
'text-size-adjust: 100%; -webkit-font-smoothing: auto;'
'position: relative; vertical-align:middle;' )
text_html = f'<span style="{span_style}">{text}</span>'
image_html = f'{text_html} {image_tag_html}'
return {'image_html': image_html, 'image_tag_html': image_tag_html, 'image_download_link': image_download_link}
# MAIN ------------------------------------------------------------------------
def main(title, user_input_confirmed=False, response=None):
# Sidebar
with st.sidebar:
st.markdown(f'#### {title} Settings')
st.selectbox(
'OpenAI model', options=OPENAI_MODELS_CHAT,
on_change=_set_state_cb, kwargs={'chat_model': 'selectbox_graph_chat_model_name'},
index=OPENAI_MODELS_CHAT.index(state.chat_model),
help='Allowed models. Accuracy, speed, token consumption and costs will vary.',
key='selectbox_graph_chat_model_name'
)
# GPT chat models can handle web sites, so we can keep URLs in the user input
user_input = state.user_input if state.user_input.strip().startswith('http') else response
user_input = user_input.replace('\n', ' ').replace('\r', '') if user_input else user_input
if user_input_confirmed and user_input:
with st.spinner("Generating knowledge graph (this takes a while)..."):
response_data = get_llm_graph_data_response(user_input, model_name=state.chat_model)
if user_input:
st.subheader('💡 Answer Knowledge Graph')
# This will use cached response!
with st.spinner("Generating knowledge graph (this takes a while)..."):
response_data = get_llm_graph_data_response(user_input, model_name=state.chat_model)
c1, c2, _ = st.columns([2, 1, 3])
with c1:
radio_options = ["Interactive", "Static", "Data"]
radio_option = st.radio('Knowledge graph options', options=radio_options, horizontal=True)
with c2:
height = st.slider("Adjust image height", 100, 1000, 750, 50)
if radio_option == radio_options[0]:
from graph_frontend import graph_component
# NOTE: This component doesn't actually return any data, so handle_event is a no-op
def run_component(props):
value = graph_component(key='graph', **props)
return value
def handle_event(value):
if value is not None:
st.write('Received from graph component: ', value)
props = {
'data': { 'graph': get_graph_data(response_data) },
'graph_height': height,
'show_graph_data': False,
}
handle_event(run_component(props))
if radio_option == radio_options[1]:
graph_data = generate_knowledge_graph(response_data)
# If graphviz executable is available, then we'll have a PNG to download or display
if graph_data['png']:
image_html_frags = image_html_fragments(
graph_data['png'], '',
image_style=f"height: {height}px; margin: 5px;",
text_style="font-weight: 600; font-size: 1.75rem;"
)
st.markdown(f"{image_html_frags['image_download_link']}", unsafe_allow_html=True)
# st.markdown(f"{image_html_frags['image_tag_html']}", unsafe_allow_html=True)
# st.markdown(f"{image_html_frags['image_html']}", unsafe_allow_html=True)
# Display using Streamlit's D3.js graphviz renderer
st.graphviz_chart(graph_data['dot'])
if radio_option == radio_options[2]:
st.json(get_graph_data(response_data), expanded=True)
| [] |
2024-01-10 | asehmi/docs-n-data-knowledge-app | app_llm_docs_query.py | import json
from langchain.prompts import PromptTemplate
import tiktoken
from llama_index.callbacks import CallbackManager, TokenCountingHandler
from llama_index.node_parser import SimpleNodeParser
from llama_index.vector_stores import WeaviateVectorStore
from llama_index import (
VectorStoreIndex, SimpleDirectoryReader,
StorageContext, ServiceContext,
load_index_from_storage
)
import weaviate
import streamlit as st
from app_state import (state, init_app_state, _set_state_cb)
init_app_state() # ensure all state variables are initialized
from globals import (
VECTOR_STORE, OPENAI_MODELS_COMPLETIONS,
DEFAULT_MODEL_CONFIG, LANG_MODEL_PRICING
)
from common import scrape_articles
# DOCS CHAT PAGE ----------------------------------------------------------------
wc = None
# WEAVIATE CLOUD STORE
if VECTOR_STORE == 'Weaviate':
auth_config = weaviate.AuthApiKey(api_key=state.weaviate_api_key)
wc = weaviate.Client(
url=state.WEAVIATE_URL,
auth_client_secret=auth_config,
additional_headers={
"X-OpenAI-Api-Key": state.openai_api_key,
}
)
@st.cache_data(ttl=60*60, show_spinner=False)
def get_llm_doc_query_response(
query_prompt, model_name: str = DEFAULT_MODEL_CONFIG['completions_model'],
_service_context=ServiceContext.from_defaults()
):
# load index
# LOCAL STORE
if VECTOR_STORE == 'Local':
# rebuild storage context
storage_context = StorageContext.from_defaults(persist_dir='./storage')
index = load_index_from_storage(storage_context)
# WEAVIATE CLOUD STORE
elif VECTOR_STORE == 'Weaviate':
vector_store = WeaviateVectorStore(weaviate_client = wc, index_name="Documents", text_key="content")
# set up the index
index = VectorStoreIndex.from_vector_store(vector_store=vector_store, service_context=_service_context)
else:
raise ValueError(f'Unknown vector store {VECTOR_STORE}')
# get query engine over the index
query_engine = index.as_query_engine()
# query the index
response = query_engine.query(query_prompt)
response = response.response.replace('•', '*')
return response
def main(title, user_input_confirmed=False):
# Count token usage for cost estimation
token_counter = TokenCountingHandler(
tokenizer=tiktoken.encoding_for_model(state.completions_model).encode,
verbose=False # set to true to see usage printed to the console
)
callback_manager = CallbackManager([token_counter])
service_context = ServiceContext.from_defaults(callback_manager=callback_manager)
def _index_documents():
# load the documents
documents = SimpleDirectoryReader('docs').load_data()
# LOCAL STORE
# NOTE: Disallow if cloud deployment (temporary fix for public demo and/or if you
# don't have required file permissions or disk space)
if not json.loads(st.secrets['IS_CLOUD_DEPLOYMENT']) and VECTOR_STORE == 'Local':
# construct an index over these documents... saved in memory
index = VectorStoreIndex.from_documents(documents, show_progress=True, service_context=service_context)
# save index on disk
index.storage_context.persist(persist_dir='./storage')
# WEAVIATE CLOUD STORE
elif VECTOR_STORE == 'Weaviate':
wc.schema.delete_class("Documents")
class_obj = {
"class": "Documents",
"vectorizer": "text2vec-openai",
"moduleConfig": {
"text2vec-openai": {},
"generative-openai": {}
}
}
wc.schema.create_class(class_obj)
# chunk up the documents into nodes
parser = SimpleNodeParser.from_defaults(chunk_size=1024, chunk_overlap=20)
nodes = parser.get_nodes_from_documents(documents, show_progress=True)
# construct vector store
vector_store = WeaviateVectorStore(weaviate_client=wc, index_name="Documents", text_key="content")
# setting up the storage for the embeddings
storage_context = StorageContext.from_defaults(vector_store = vector_store)
# set up the index
index = VectorStoreIndex(nodes, storage_context=storage_context, show_progress=True, service_context=service_context)
else:
raise ValueError(f'Unknown vector store {VECTOR_STORE}')
print('---- Document Q&A ----', '\n',
'Indexing Embedding Tokens: ', token_counter.total_embedding_token_count, '\n')
with st.sidebar:
st.markdown(f'#### {title} Settings')
st.selectbox(
'OpenAI model', options=OPENAI_MODELS_COMPLETIONS,
on_change=_set_state_cb, kwargs={'completions_model': 'selectbox_docs_completions_model_name'},
index=OPENAI_MODELS_COMPLETIONS.index(state.completions_model),
help='Allowed models. Accuracy, speed, token consumption and costs will vary.',
key='selectbox_docs_completions_model_name'
)
include_history = st.checkbox('Include history in prompts', value=False)
if st.button('Clear history'):
state.questions = []
state.past = []
# NOTE: Hide indexing button if cloud deployment (temporary fix for public demo)
if not json.loads(st.secrets['IS_CLOUD_DEPLOYMENT']) and st.button('Index documents'):
with st.spinner("Indexing..."):
_index_documents()
# GPT completion models can not handle web sites, so we scrape the URL in the user input
user_input = state.user_input
if user_input.strip().startswith('http'):
scraped_texts = scrape_articles([user_input])['text']
user_input = scraped_texts[0] if scraped_texts else user_input
user_input = user_input.replace('\n', ' ').replace('\r', '') if user_input else user_input
if include_history:
context = '\n\n'.join([f'| Question: "{q}" | Answer: "{a}" |' for q, a in zip(state.questions, state.past)])
refinement = \
'Finally, return results in markdown text, include bullet point format where appropriate. ' + \
'Add additional web links at the end of the response if this is useful.'
prompt_template = "Given this context ### {context} ###. Answer or summarize this: ### {doc_query} ###. {refinement}"
prompt = PromptTemplate(input_variables=['context', 'doc_query', 'refinement'], template=prompt_template)
query_prompt = prompt.format(context=context, doc_query=user_input, refinement=refinement)
else:
refinement = \
'Return results in markdown text, include bullet point format where appropriate. ' + \
'Add additional web links at the end of the response if this is useful.'
prompt_template = "Answer or summarize this: ### {doc_query} ###. {refinement}"
prompt = PromptTemplate(input_variables=['doc_query', 'refinement'], template=prompt_template)
query_prompt = prompt.format(doc_query=user_input, refinement=refinement)
if user_input_confirmed and state.user_input:
with st.spinner("Generating query answer..."):
try:
response = get_llm_doc_query_response(query_prompt, model_name=state.completions_model, _service_context=service_context)
print('---- Document Q&A ----', '\n',
'Embedding Tokens: ', token_counter.total_embedding_token_count, '\n',
'LLM Prompt Tokens: ', token_counter.prompt_llm_token_count, '\n',
'LLM Completion Tokens: ', token_counter.completion_llm_token_count, '\n',
'Total LLM Token Count: ', token_counter.total_llm_token_count)
except Exception as ex:
st.warning(f'Index does not exist. Please index some documents.')
st.error(str(ex))
return
if state.user_input:
st.subheader('🙋🏽 Answer')
with st.spinner("Generating query answer..."):
try:
# This will use cached response!
response = get_llm_doc_query_response(query_prompt, model_name=state.completions_model, _service_context=service_context)
except Exception as ex:
st.warning(f'Index does not exist. Please index some documents.')
st.error(str(ex))
return
if state.user_input not in state.questions:
state.questions.append(state.user_input)
state.generated.append((state.user_input, response))
state.past.append(response)
st.markdown(response)
with st.expander('View conversation history', expanded=False):
st.markdown('\n\n'.join([f'---\n**Question**\n\n{q}\n\n**Answer**\n\n{a}' for q, a in zip(state.questions, state.past)]))
estimated_cost = ((token_counter.prompt_llm_token_count / 1000.0) * LANG_MODEL_PRICING[state.completions_model]['input']) + \
((token_counter.completion_llm_token_count / 1000.0) * LANG_MODEL_PRICING[state.completions_model]['output'])
print('Document Q&A Estimated Cost: $', estimated_cost)
state.estimated_cost_doc = estimated_cost
state.cumulative_cost += estimated_cost
return response
| [
"refinement",
"Given this context ### {context} ###. Answer or summarize this: ### {doc_query} ###. {refinement}",
"context",
"Answer or summarize this: ### {doc_query} ###. {refinement}"
] |
2024-01-10 | asehmi/docs-n-data-knowledge-app | app_state.py | import os
import openai
import weaviate
import streamlit as st
from globals import (DEFAULT_MODEL_CONFIG, DB_TABLE)
# MAIN APP STATE ----------------------------------------------------------------
state = st.session_state
# Initial state builder
def build_initial_state():
openai_api_key = None
if st.secrets.get('OPENAI_API_KEY', None):
print('settings', 'OPENAI_API_KEY found')
openai_api_key = st.secrets['OPENAI_API_KEY']
else:
print('settings OPENAI_API_KEY not found!')
# Try get OpenAI api key from os env
# (this is the workaround for using Streamlit in Heroku)
if os.environ.get('OPENAI_API_KEY', None):
print('os.environ', 'OPENAI_API_KEY found')
openai_api_key = os.environ['OPENAI_API_KEY']
openai.api_key = os.getenv("OPENAI_API_KEY")
print('openai_api_key', 'sk_...' + openai_api_key[-5:], '\n') if openai_api_key else print('openai_api_key', 'NULL', '\n')
weaviate_api_key = st.secrets.get('WEAVIATE_API_KEY', None)
print('weaviate_api_key', weaviate_api_key[:5] + '...' + weaviate_api_key[-5:], '\n') if weaviate_api_key else print('weaviate_api_key', 'NULL', '\n')
WEAVIATE_URL = st.secrets.get('WEAVIATE_URL', None)
initial_state = {
# MAIN APP STATE
'openai_api_key': openai_api_key,
'weaviate_api_key': weaviate_api_key,
'WEAVIATE_URL': WEAVIATE_URL,
'menu_choice': 0,
# DATA PAGE STATE
'limit': 3,
'query': '',
'intermediate_steps': True,
'db_table': DB_TABLE,
'generated': [],
'past': [],
'questions': [],
# KNOWLEDGE GRAPH PAGE STATE
'user_input': '',
# MODEL STATE
'chat_model': DEFAULT_MODEL_CONFIG['chat_model'],
'completions_model': DEFAULT_MODEL_CONFIG['completions_model'],
'temperature': DEFAULT_MODEL_CONFIG['temperature'],
'top_p': DEFAULT_MODEL_CONFIG['top_p'],
'max_tokens': DEFAULT_MODEL_CONFIG['max_tokens'],
'estimated_cost_reset': 0,
'estimated_cost_data': 0,
'estimated_cost_doc': 0,
'estimated_cost_graph': 0,
'cumulative_cost': 0,
}
return initial_state
# State initializer
def init_app_state():
initial_state = build_initial_state()
for k, v in initial_state.items():
if not state.get(k, None):
setattr(state, k, v)
# State resetter
def reset_app_state():
initial_state = build_initial_state()
for k, v in initial_state.items():
setattr(state, k, v)
# STATE CALLBACK ----------------------------------------------------
# generic callback to set state
def _set_state_cb(**kwargs):
for state_key, widget_key in kwargs.items():
val = state.get(widget_key, None)
if val is not None or val == "":
setattr(state, state_key, state[widget_key])
| [] |
2024-01-10 | asehmi/docs-n-data-knowledge-app | app_llm_data_query.py | import os
import sqlite3
import pandas as pd
# from sqlalchemy import create_engine
from sqlalchemy.pool import StaticPool
from langchain.callbacks import get_openai_callback
from langchain.llms import OpenAI
from langchain.utilities.sql_database import SQLDatabase
from langchain_experimental.sql import SQLDatabaseChain
import retry
import logging
logging.basicConfig(level=logging.ERROR)
import streamlit as st
from globals import (
DB_FILE, OPENAI_MODELS_COMPLETIONS,
DEFAULT_MODEL_CONFIG, LANG_MODEL_PRICING
)
from app_state import (state, init_app_state, _set_state_cb)
init_app_state() # ensure all state variables are initialized
# DATA -------------------------------------------------------------------------
@st.cache_data(persist='disk')
def csv_to_df(excel_file):
df = pd.read_csv(excel_file)
return df
@st.cache_data(persist='disk')
def excel_to_df(excel_file):
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_excel.html
# New in Pandas version 1.3.0.
# The engine xlrd now only supports old-style .xls files. When engine=None, the following logic will be used to determine the engine:
# If path_or_buffer is an OpenDocument format (.odf, .ods, .odt), then odf will be used.
# Otherwise if path_or_buffer is an xls format, xlrd will be used.
# Otherwise if path_or_buffer is in xlsb format, pyxlsb will be used.
# Otherwise openpyxl will be used.
#
# import openpyxl
# df = pd.read_excel(excel_file, engine=openpyxl)
#
# Therefore... do not need to provide "engine" when using a "path_or_buffer"
df = pd.read_excel(excel_file, engine='openpyxl')
return df
def prepare_data(df):
df.columns = [x.replace(' ', '_').lower() for x in df.columns]
return df
@st.cache_resource()
def db_connection():
return sqlite3.connect(DB_FILE , check_same_thread=False)
@st.cache_resource()
def sql_database(table):
# create db engine
# eng = create_engine(
# url=f'sqlite:///file:{DB_FILE}&cache=shared',
# poolclass=StaticPool, # single connection for requests
# creator=lambda: db_connection(),
# )
# db = SQLDatabase(engine=eng)
db = SQLDatabase.from_uri(
database_uri = f'sqlite:///file:{DB_FILE}&cache=shared',
include_tables=[table], # we include only one table to save tokens in the prompt :)
sample_rows_in_table_info=2, # we only need 2 rows to get the table info
engine_args={'poolclass': StaticPool, 'creator': lambda: db_connection()},
)
return db
# OPENAI DATA QUERY ------------------------------------------------------------
# create OpenAI LLM connection
# NOTE: relies on environment key in case you want to
# remove entering the key in the app
def get_llm(
model_name: str = DEFAULT_MODEL_CONFIG['completions_model'],
temperature: float = DEFAULT_MODEL_CONFIG['temperature'],
top_p: float = DEFAULT_MODEL_CONFIG['top_p'],
max_tokens: int = DEFAULT_MODEL_CONFIG['max_tokens'],
max_retries: int = 3,
streaming: bool = False,
):
return OpenAI(
openai_api_key=os.environ['OPENAI_API_KEY'],
model_name=model_name,
temperature=temperature,
top_p=top_p,
max_tokens=max_tokens,
max_retries=max_retries,
streaming=streaming,
)
@retry.retry(tries=2, delay=5, backoff=3, jitter=(1, 5), max_delay=60, logger=logging.getLogger("LLM DATA QUERY (get_llm_data_query_response)"))
def get_llm_data_query_response(query, table, model_name=DEFAULT_MODEL_CONFIG['completions_model'], intermediate_steps=False, limit=3):
model_config = {
'model_name': model_name,
'temperature': 0, # override settings = do not halucinate!
'top_p': state.top_p,
'max_tokens': 2000, # override settings
}
llm = get_llm(**model_config)
# create SQLDatabaseChain LLM connection
db_chain = SQLDatabaseChain.from_llm(
llm=llm, db=sql_database(table), verbose=True,
# use_query_checker=True,
return_intermediate_steps=intermediate_steps,
top_k=limit
)
# run query and display result
with get_openai_callback() as token_counter:
if query:
if state.intermediate_steps:
result = db_chain(query)
else:
result = db_chain.run(query)
print('---- Data SQL Query ----', '\n',
'LLM Prompt Tokens:', token_counter.prompt_tokens, '\n',
'LLM Completion Tokens:', token_counter.completion_tokens, '\n',
'Total LLM Token Count:', token_counter.total_tokens)
estimated_cost = ((token_counter.prompt_tokens / 1000.0) * LANG_MODEL_PRICING[state.completions_model]['input']) + \
((token_counter.completion_tokens / 1000.0) * LANG_MODEL_PRICING[state.completions_model]['output'])
print('Data SQL Query Estimated Cost: $', estimated_cost)
state.estimated_cost_data = estimated_cost
state.cumulative_cost += estimated_cost
return result
# DATA CHAT PAGE ----------------------------------------------------------------
def main(title):
# Sidebar
with st.sidebar:
st.markdown(f'#### {title} Settings')
st.selectbox(
'OpenAI model', options=OPENAI_MODELS_COMPLETIONS,
on_change=_set_state_cb, kwargs={'completions_model': 'selectbox_data_completions_model_name'},
index=OPENAI_MODELS_COMPLETIONS.index(state.completions_model),
help='Allowed models. Accuracy, speed, token consumption and costs will vary.',
key='selectbox_data_completions_model_name'
)
# results limit
st.number_input(
'Results limit', value=state.limit, min_value=1, max_value=10, step=1,
on_change=_set_state_cb, kwargs={'limit': 'number_input_limit'},
help='Limit the number of results returned, which can improve performance and save OpenAI costs',
key='number_input_limit'
)
# Body
st.subheader('Upload Data')
excel_file = st.file_uploader('Choose an Excel file on your computer', type=['xlsx', 'csv'], accept_multiple_files=False)
if excel_file is None:
return
if excel_file.type in ['application/vnd.ms-excel', 'application/octet-stream', 'text/csv']:
df = csv_to_df(excel_file)
# state.db_table = excel_file.name.replace('.csv', '').replace(' ', '_').lower()
else: # 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
df = excel_to_df(excel_file)
# state.db_table = excel_file.name.replace('.xlsx', '').replace(' ', '_').lower()
if st.checkbox('Show Data', value=False):
st.dataframe(df)
# commit data to sql
data = prepare_data(df)
data.to_sql(state.db_table, db_connection(), if_exists='replace', index=False)
st.subheader('Query Data')
with st.form(key='data_chat_form', clear_on_submit=False):
# user query
st.text_input(
'Enter a data query in plain English', value=state.query,
help='Enter a question based on the uploaded dataset. Add as much detail as you like. '
'E.g., "What is X of Y in the table. Limit to 10 results, and format as JSON showing X and Y values only."',
key='text_input_query_data'
)
st.checkbox(
'Show Intermediate Steps', value=state.intermediate_steps,
key='checkbox_intermediate_steps'
)
apply_query = st.form_submit_button(
label='Ask', type='primary',
on_click=_set_state_cb, kwargs={
'intermediate_steps': 'checkbox_intermediate_steps',
'query': 'text_input_query_data',
'estimated_cost_data': 'estimated_cost_reset',
},
)
if apply_query and state.query and state.openai_api_key:
query = state.query + f' Strictly use only these data columns "{list(data.columns)}". ' + \
'Do not wrap the SQL statement in quotes. Do not embelish the answer with any additional text.'
result = get_llm_data_query_response(
query, state.db_table,
model_name=state.completions_model,
intermediate_steps=state.intermediate_steps,
limit=state.limit
)
if state.intermediate_steps:
with st.expander('Intermediate Steps', expanded=False):
st.write(state.completions_model)
st.write(result['intermediate_steps'])
st.text(result['result'])
else:
st.text(result)
elif apply_query and not state.query:
st.info('Please enter a query above.') | [] |
2024-01-10 | dreaquil/langchain | langchain~retrievers~chatgpt_plugin_retriever.py | from __future__ import annotations
from typing import List
import aiohttp
import requests
from pydantic import BaseModel
from langchain.schema import BaseRetriever, Document
class ChatGPTPluginRetriever(BaseRetriever, BaseModel):
url: str
bearer_token: str
top_k: int = 3
filter: dict | None = None
aiosession: aiohttp.ClientSession | None = None
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
def get_relevant_documents(self, query: str) -> List[Document]:
url, json, headers = self._create_request(query)
response = requests.post(url, json=json, headers=headers)
results = response.json()["results"][0]["results"]
docs = []
for d in results:
content = d.pop("text")
docs.append(Document(page_content=content, metadata=d))
return docs
async def aget_relevant_documents(self, query: str) -> List[Document]:
url, json, headers = self._create_request(query)
if not self.aiosession:
async with aiohttp.ClientSession() as session:
async with session.post(url, headers=headers, json=json) as response:
res = await response.json()
else:
async with self.aiosession.post(
url, headers=headers, json=json
) as response:
res = await response.json()
results = res["results"][0]["results"]
docs = []
for d in results:
content = d.pop("text")
docs.append(Document(page_content=content, metadata=d))
return docs
def _create_request(self, query: str) -> tuple[str, dict, dict]:
url = f"{self.url}/query"
json = {
"queries": [
{
"query": query,
"filter": self.filter,
"top_k": self.top_k,
}
]
}
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {self.bearer_token}",
}
return url, json, headers
| [] |
2024-01-10 | dreaquil/langchain | tests~integration_tests~vectorstores~test_faiss.py | """Test FAISS functionality."""
import math
import tempfile
import pytest
from langchain.docstore.document import Document
from langchain.docstore.in_memory import InMemoryDocstore
from langchain.docstore.wikipedia import Wikipedia
from langchain.vectorstores.faiss import FAISS
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
def test_faiss() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
docsearch = FAISS.from_texts(texts, FakeEmbeddings())
index_to_id = docsearch.index_to_docstore_id
expected_docstore = InMemoryDocstore(
{
index_to_id[0]: Document(page_content="foo"),
index_to_id[1]: Document(page_content="bar"),
index_to_id[2]: Document(page_content="baz"),
}
)
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
def test_faiss_vector_sim() -> None:
"""Test vector similarity."""
texts = ["foo", "bar", "baz"]
docsearch = FAISS.from_texts(texts, FakeEmbeddings())
index_to_id = docsearch.index_to_docstore_id
expected_docstore = InMemoryDocstore(
{
index_to_id[0]: Document(page_content="foo"),
index_to_id[1]: Document(page_content="bar"),
index_to_id[2]: Document(page_content="baz"),
}
)
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
query_vec = FakeEmbeddings().embed_query(text="foo")
output = docsearch.similarity_search_by_vector(query_vec, k=1)
assert output == [Document(page_content="foo")]
# make sure we can have k > docstore size
output = docsearch.max_marginal_relevance_search_by_vector(query_vec, k=10)
assert len(output) == len(texts)
def test_faiss_with_metadatas() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = FAISS.from_texts(texts, FakeEmbeddings(), metadatas=metadatas)
expected_docstore = InMemoryDocstore(
{
docsearch.index_to_docstore_id[0]: Document(
page_content="foo", metadata={"page": 0}
),
docsearch.index_to_docstore_id[1]: Document(
page_content="bar", metadata={"page": 1}
),
docsearch.index_to_docstore_id[2]: Document(
page_content="baz", metadata={"page": 2}
),
}
)
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"page": 0})]
def test_faiss_search_not_found() -> None:
"""Test what happens when document is not found."""
texts = ["foo", "bar", "baz"]
docsearch = FAISS.from_texts(texts, FakeEmbeddings())
# Get rid of the docstore to purposefully induce errors.
docsearch.docstore = InMemoryDocstore({})
with pytest.raises(ValueError):
docsearch.similarity_search("foo")
def test_faiss_add_texts() -> None:
"""Test end to end adding of texts."""
# Create initial doc store.
texts = ["foo", "bar", "baz"]
docsearch = FAISS.from_texts(texts, FakeEmbeddings())
# Test adding a similar document as before.
docsearch.add_texts(["foo"])
output = docsearch.similarity_search("foo", k=2)
assert output == [Document(page_content="foo"), Document(page_content="foo")]
def test_faiss_add_texts_not_supported() -> None:
"""Test adding of texts to a docstore that doesn't support it."""
docsearch = FAISS(FakeEmbeddings().embed_query, None, Wikipedia(), {})
with pytest.raises(ValueError):
docsearch.add_texts(["foo"])
def test_faiss_local_save_load() -> None:
"""Test end to end serialization."""
texts = ["foo", "bar", "baz"]
docsearch = FAISS.from_texts(texts, FakeEmbeddings())
with tempfile.NamedTemporaryFile() as temp_file:
docsearch.save_local(temp_file.name)
new_docsearch = FAISS.load_local(temp_file.name, FakeEmbeddings())
assert new_docsearch.index is not None
def test_faiss_similarity_search_with_relevance_scores() -> None:
"""Test the similarity search with normalized similarities."""
texts = ["foo", "bar", "baz"]
docsearch = FAISS.from_texts(
texts,
FakeEmbeddings(),
normalize_score_fn=lambda score: 1.0 - score / math.sqrt(2),
)
outputs = docsearch.similarity_search_with_relevance_scores("foo", k=1)
output, score = outputs[0]
assert output == Document(page_content="foo")
assert score == 1.0
def test_faiss_invalid_normalize_fn() -> None:
"""Test the similarity search with normalized similarities."""
texts = ["foo", "bar", "baz"]
docsearch = FAISS.from_texts(
texts, FakeEmbeddings(), normalize_score_fn=lambda _: 2.0
)
with pytest.raises(
ValueError, match="Normalized similarity scores must be between 0 and 1"
):
docsearch.similarity_search_with_relevance_scores("foo", k=1)
def test_missing_normalize_score_fn() -> None:
"""Test doesn't perform similarity search without a normalize score function."""
with pytest.raises(ValueError):
texts = ["foo", "bar", "baz"]
faiss_instance = FAISS.from_texts(texts, FakeEmbeddings())
faiss_instance.similarity_search_with_relevance_scores("foo", k=2)
| [] |
2024-01-10 | darrenwiens/dalle-slack | resources~worker_handler.py | import base64
import json
import os
import openai
import urllib.parse
import urllib
from urllib import request, parse
SLACK_BEARER_TOKEN = os.environ["SLACK_BEARER_TOKEN"]
OPENAI_API_KEY = os.environ["OPENAI_API_KEY"]
def create_modal(body, bearer_token):
"""Create a modal in Slack"""
req = request.Request(
"https://slack.com/api/views.open", data=json.dumps(body).encode("utf-8")
)
req.add_header("Content-Type", "application/json; charset=utf-8")
req.add_header("Authorization", f"Bearer {bearer_token}")
request.urlopen(req)
def create_message(url, body):
"""Send a message to Slack"""
req = request.Request(url, data=json.dumps(body).encode("utf-8"))
req.add_header("Content-Type", "application/json; charset=utf-8")
request.urlopen(req)
def lambda_handler(event, context):
body_dict = urllib.parse.parse_qs(event.get("body"))
payload_dict = {}
if body_dict.get("payload"):
payload_dict = json.loads(body_dict.get("payload")[0])
if body_dict.get("command"):
"""If the request body contains key 'command', it is
directly from the slash command."""
trigger_id = body_dict.get("trigger_id")[0]
response_url = body_dict.get("response_url")[0]
body = {
"trigger_id": trigger_id,
"view": {
"type": "modal",
"callback_id": "shortcut_modal",
"title": {"type": "plain_text", "text": "Dall-E Generator"},
"submit": {"type": "plain_text", "text": "Submit"},
"close": {"type": "plain_text", "text": "Cancel"},
"blocks": [
{
"type": "input",
"element": {"type": "plain_text_input", "multiline": True},
"label": {
"type": "plain_text",
"text": "Enter prompt for Dall-E (clicking Submit will cost $0.02)",
"emoji": True,
},
"block_id": "content_text",
},
],
"private_metadata": response_url,
},
}
create_modal(body, SLACK_BEARER_TOKEN)
elif body_dict.get("payload"):
"""Interaction payloads contain key 'payload'"""
response_url = payload_dict["view"]["private_metadata"]
if payload_dict.get("type") == "view_submission":
"""View submission payloads are sent on submit button click"""
openai.api_key = OPENAI_API_KEY
state = payload_dict["view"]["state"]["values"]
text_content = state["content_text"][list(state["content_text"].keys())[0]][
"value"
]
response = openai.Image.create(prompt=text_content, n=1, size="256x256")
image_url = response["data"][0]["url"]
payload = {
"blocks": [
{
"type": "image",
"title": {
"type": "plain_text",
"text": text_content,
"emoji": True,
},
"image_url": image_url,
"alt_text": text_content,
}
],
"response_type": "ephemeral",
}
create_message(response_url, payload)
return {"statusCode": 200}
| [] |
2024-01-10 | bitmuncher/bitscripts | gpt-cli.py | #!/usr/bin/env python3
from openai import OpenAI
import os
import getopt
import sys
client = OpenAI(api_key=os.environ['GPT_API_KEY'])
def print_help():
print(__file__ + ' [-d | --debug] [-h | --help]')
print('-d | --debug - enable debug output')
print('-h | --help - show this help')
def print_debug(msg):
debug_color = '\033[93m'
endc_color = '\033[0m'
print("\n" + debug_color + str(msg) + endc_color)
# check if the user wants to give a context for his question
def read_context():
user_input = input("\nWhat role should I play?\n(Enter to continue without context): ")
return user_input
# get the question the user wants to ask ChatGPT
def read_question():
user_input = ''
cnt = 0
while user_input == '':
if cnt > 0:
print("Please enter a question/request")
user_input = input("What is your question/request? ")
cnt += 1
return user_input
def get_response(context, question):
if context == '':
in_messages = [
{"role": "user", "content": question}
]
else:
in_messages = [
{"role": "system", "content": context},
{"role": "user", "content": question}
]
response = client.chat.completions.create(
model = "gpt-3.5-turbo-1106",
messages = in_messages
)
return response
# print answer in green
def print_answer(answer):
green = '\033[92m'
endc = '\033[0m'
print(green + answer + endc)
# main function
def main(argv):
debug = 0
try:
opts, args = getopt.getopt(argv, "dh",
[
"debug",
"help"
]
)
except:
print("Unknown parameter.")
sys.exit(2)
for opt, arg in opts:
if opt in ('-h', '--help'):
print_help()
sys.exit(0)
elif opt in ['-d', '--debug']:
debug = 1
if debug == 1:
print_debug("Debug output enabled.")
context = read_context()
question = read_question()
response = get_response(context, question)
if debug == 1:
print_debug(response)
print_debug("Model: " + response.model + "\n")
print_answer(response.choices[0].message.content)
print("Number of used tokens: " + str(response.usage.total_tokens) + "\n")
if __name__ == '__main__':
main(sys.argv[1:])
| [] |
2024-01-10 | litanlitudan/skyagi | skyagi~src~skyagi~simulation~simulation.py | import math
from typing import List
import faiss
from langchain import LLMChain
from langchain.docstore import InMemoryDocstore
from langchain.prompts.chat import (
AIMessagePromptTemplate,
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
from langchain.vectorstores import FAISS
from skyagi.context import Context
from skyagi.model import load_embedding_from_config
from skyagi.retriever import Retriever
from skyagi.simulation.agent import GenerativeAgent
# reference:
# https://python.langchain.com/en/latest/use_cases/agent_simulations/characters.html#create-a-generative-character
def relevance_score_fn(score: float) -> float:
"""Return a similarity score on a scale [0, 1]."""
# This will differ depending on a few things:
# - the distance / similarity metric used by the VectorStore
# - the scale of your embeddings (OpenAI's are unit norm. Many others are not!)
# This function converts the euclidean norm of normalized embeddings
# (0 is most similar, sqrt(2) most dissimilar)
# to a similarity function (0 to 1)
return 1.0 - score / math.sqrt(2)
# reference:
# https://python.langchain.com/en/latest/use_cases/agent_simulations/characters.html#create-a-generative-character
def create_new_memory_retriever(ctx: Context):
"""Create a new vector store retriever unique to the agent."""
# Define your embedding model
embeddings_model = load_embedding_from_config(ctx.settings.model.embedding)
# Initialize the vectorstore as empty
embedding_size = 1536
index = faiss.IndexFlatL2(embedding_size)
vectorstore = FAISS(
embeddings_model.embed_query,
index,
InMemoryDocstore({}),
{},
relevance_score_fn=relevance_score_fn,
)
return Retriever(
vectorstore=vectorstore, other_score_keys=["importance"], k=15, embedding_model=embeddings_model
)
def run_conversation(
agents: List[GenerativeAgent], initial_observation: str, ctx: Context
) -> None:
"""Runs a conversation between agents."""
ctx.observations.append(
"A conversation happened among "
+ ",".join(list(map(lambda agent: agent.name, agents)))
)
ctx.observations.append(initial_observation)
_, observation = agents[1].generate_reaction(initial_observation)
ctx.observations.append(observation)
turns = 0
while True:
break_dialogue = False
for agent in agents:
stay_in_dialogue, observation = agent.generate_dialogue_response(
observation
)
ctx.observations.append(observation)
if not stay_in_dialogue:
break_dialogue = True
if break_dialogue:
break
turns += 1
def interview_agent(agent: GenerativeAgent, message: str, username: str) -> str:
"""Help the notebook user interact with the agent."""
new_message = f"{username} says {message}"
return agent.generate_dialogue_response(new_message)[1]
# whether initiator wants to talk to recipient based on the observations
def talks_to(
initiator: GenerativeAgent, recipient: GenerativeAgent, observations: List[str]
) -> str:
instruct = "Here are the timeline of events happened for these NPC characters:\n{observation}\n"
instruct += "I want you to behave as {initiator_name} and talk to me as I am {recipient_name}.\n"
instruct += (
"If you do not want to or can not talk to {recipient_name}, just output NOTHING"
)
messages = [
SystemMessagePromptTemplate.from_template(
"You are the AI behind a NPC character called {initiator_name}"
),
HumanMessagePromptTemplate.from_template(instruct),
]
observation = "\n".join(observations)
message = (
LLMChain(
llm=initiator.llm,
prompt=ChatPromptTemplate.from_messages(messages),
)
.run(
observation=observation,
initiator_name=initiator.name,
recipient_name=recipient.name,
)
.strip()
)
if "NOTHING" in message:
return ""
messages.append(AIMessagePromptTemplate.from_template(message))
messages.append(
HumanMessagePromptTemplate.from_template(
"Did {initiator_name} talk to {recipient_name}, please answer yes or no"
)
)
resp = (
LLMChain(
llm=initiator.llm,
prompt=ChatPromptTemplate.from_messages(messages),
)
.run(
observation=observation,
initiator_name=initiator.name,
recipient_name=recipient.name,
)
.strip()
)
if "no" in resp:
return ""
return message
| [
"You are the AI behind a NPC character called {initiator_name}",
"Did {initiator_name} talk to {recipient_name}, please answer yes or no"
] |
2024-01-10 | PoorRican/bimWorkshopDatabase | src~chains.py | from pathlib import Path
from langchain_community.chat_models import ChatOpenAI
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import PromptTemplate, HumanMessagePromptTemplate, ChatPromptTemplate, MessagesPlaceholder
from langchain_core.runnables import Runnable
_ROOT = Path(__file__).parent.parent
PARAMETER_PROMPT_FILE = _ROOT.joinpath("PARAMETER_PROMPT.txt")
PARAMETER_PROMPT = HumanMessagePromptTemplate.from_template(
open(PARAMETER_PROMPT_FILE, "r").read()
)
VALUE_PROMPT_FILE = _ROOT.joinpath("VALUE_PROMPT.txt")
VALUE_PROMPT = HumanMessagePromptTemplate.from_template(
open(VALUE_PROMPT_FILE, "r").read()
)
FORMAT_PROMPT = PromptTemplate.from_template(
"""Please format the given list as a valid python list:
For example:
1. foo: a description
2. bar: another description
becomes:
```python
["foo", "bar"]
```
Here is the list of values:
{content}
```python
[
""")
def extract_list_from_response(response: str) -> list[str]:
""" Extract a list of values from the response.
Parameters
----------
response: str
The response to extract values from. It should be a valid python list.
Returns
-------
list[str]
The list of values.
"""
# remove any text after "]"
response = response.split("]")[0]
if response[0] != "[":
response = "[" + response
if response[-1] != "]":
response = response + "]"
extracted: list[str] = eval(response)
return extracted
def build_formatter_chain(chat: ChatOpenAI) -> Runnable:
""" Build a chain of runnables to format a list of values.
The runnable accepts a dictionary with the following keys:
- content: The list of values to format.
The output of the runnable is a formatted string.
Parameters
----------
chat : ChatOpenAI
The chatbot to use to generate values.
Returns
-------
Runnable
The chain of runnables to generate values.
"""
return (
FORMAT_PROMPT
| chat
| StrOutputParser()
)
def build_parameter_chain(chat: ChatOpenAI) -> Runnable:
""" Build a chain of runnables to generate a list of parameters for a given product.
Parameters
----------
chat : ChatOpenAI
The chatbot to use to generate parameters.
Returns
-------
Runnable
"""
_prompt = ChatPromptTemplate.from_messages([PARAMETER_PROMPT])
return (
_prompt
| chat
)
def build_parameter_value_chain(chat: ChatOpenAI, parse_chat: ChatOpenAI) -> Runnable:
""" Build a chain of runnables to generate a list of values for a given parameter.
The runnable accepts a dictionary with the following keys:
- parameter: The name of the parameter to generate values for.
- product: The label of the product to generate values for.
The output of the runnable is a parsed `ParameterList` object.
Parameters
----------
chat : ChatOpenAI
The chatbot to use to generate values.
parse_chat : ChatOpenAI
The chatbot to use to parse the values. Should be of lower temperature than `chat` and does not
need to be as capable.
Returns
-------
Runnable
The chain of runnables to generate values.
"""
value_prompt_messages = ChatPromptTemplate.from_messages([
PARAMETER_PROMPT,
MessagesPlaceholder(variable_name='ai_message'),
VALUE_PROMPT])
chain = (
value_prompt_messages
| chat
| StrOutputParser()
)
formatter_chain = build_formatter_chain(parse_chat)
return (
{'content': chain}
| formatter_chain
)
| [
"PARAMETER_PROMPT.txt",
"ai_message",
"Please format the given list as a valid python list:\n\nFor example:\n1. foo: a description\n2. bar: another description\n\nbecomes:\n```python\n[\"foo\", \"bar\"]\n```\n\nHere is the list of values:\n{content}\n\n```python\n[\n",
"VALUE_PROMPT.txt"
] |
2024-01-10 | PoorRican/bimWorkshopDatabase | src~builder_functions.py | import asyncio
import csv
from asyncio import sleep
from pathlib import Path
from typing import List, Dict, Iterator, Coroutine, Any
from langchain_community.chat_models import ChatOpenAI
from dotenv import load_dotenv
from langchain_core.messages import AIMessage
from openai import RateLimitError
from .chains import build_parameter_chain, build_parameter_value_chain, extract_list_from_response, build_formatter_chain
from .loading import OmniClass
from .typedefs import Parameter
load_dotenv()
SAVE_PATH = Path('../data')
ORDINALS = [
"first",
"2nd",
"3rd",
"4th",
"5th",
"6th",
"7th",
"8th",
"9th",
"10th",
"11th",
"12th",
"13th",
"14th",
"15th",
"16th",
"17th",
"18th",
"19th",
"twentieth"
]
GPT3_LOW_T = ChatOpenAI(model_name='gpt-3.5-turbo', temperature=0.3)
GPT3_HIGH_T = ChatOpenAI(model_name='gpt-3.5-turbo', temperature=0.9)
PARAMETER_CHAIN = build_parameter_chain(GPT3_HIGH_T)
VALUE_CHAIN = build_parameter_value_chain(GPT3_LOW_T, GPT3_LOW_T)
FORMATTER_CHAIN = build_formatter_chain(GPT3_LOW_T)
async def _generate_parameters(product_name: str) -> (AIMessage, list[str]):
llm_response = await PARAMETER_CHAIN.ainvoke({"omniclass": product_name})
formatted = await FORMATTER_CHAIN.ainvoke({"content": llm_response.content})
parameter_list = extract_list_from_response(formatted)
return llm_response, parameter_list
async def generate_parameters(product_name: str) -> (AIMessage, list[str]):
feedback_msg = f"parameters for {product_name}"
while True:
try:
ai_message, parameters = await _generate_parameters(product_name)
if len(parameters) == 20:
return ai_message, parameters
else:
print(f"Got less than 20 {feedback_msg}, retrying...")
except RateLimitError:
await sleep(15)
except SyntaxError:
print(f"Could not understand response when generating {feedback_msg}, retrying...")
def value_coroutines(product_name: str, ai_message: AIMessage,
parameters: list[str]) -> list[Coroutine[Any, Any, Parameter]]:
""" Generate coroutines for generating all values for a given product.
This is used in `generate_all_values` and in the backend to asynchronously load values.
"""
assert len(parameters) == len(ORDINALS)
return [generate_values(product_name, ordinal, ai_message, parameter) for ordinal, parameter in zip(ORDINALS, parameters)]
async def generate_all_values(product_name: str, parameters: list[str], ai_message: AIMessage) -> Dict[str, List[str]]:
""" Generate all values for a given product in a synchronous manner.
This is to be used when locally generating a CSV file.
"""
tasks = value_coroutines(product_name, ai_message, parameters)
kv_columns = {}
for parameter in await asyncio.gather(*tasks):
kv_columns[parameter.name] = parameter.values
return kv_columns
async def _generate_values(product_name: str, ordinal: str, ai_message: AIMessage) -> list[str]:
value_response = await VALUE_CHAIN.ainvoke({
"ordinal": ordinal,
"ai_message": [ai_message],
"omniclass": product_name})
return extract_list_from_response(value_response)
async def generate_values(product_name: str, ordinal: str, ai_message: AIMessage, parameter_name: str) -> Parameter:
feedback_msg = f"{ordinal} parameter for {product_name}"
while True:
try:
values = await _generate_values(product_name, ordinal, ai_message)
if len(values) == 20:
return Parameter(parameter_name, values)
else:
print(f"Got less than 20 values for {feedback_msg}, retrying...")
except RateLimitError:
await sleep(30)
except SyntaxError:
print(f"Could not understand response when generating values for {feedback_msg}, retrying...")
def save_product(path: Path, omniclass: OmniClass, kv_columns: Dict[str, List[str]]) -> None:
""" Save a product's parameters and values to a CSV file.
Parameters
----------
path : Path
The path to save the final CSV file to.
omniclass : OmniClass
The OmniClass value. This is used to generate the filename.
kv_columns : Dict[str, List[str]]
A dictionary of parameter names to lists of values.
Returns
-------
None
"""
fn = f'{omniclass.number} {omniclass.name}.csv'
fn_path = path.joinpath(fn)
with open(fn_path, 'w') as f:
writer = csv.writer(f)
writer.writerow([i for i in kv_columns.keys()])
# write values
for i in range(len(kv_columns.keys())):
writer.writerow([kv_columns[k][i] for k in kv_columns.keys()])
async def process_product(omniclass: OmniClass):
omniclass_name = omniclass.name
print(f"\n*** Processing {omniclass_name}...")
ai_message, parameters = await generate_parameters(omniclass_name)
kv_columns = await generate_all_values(omniclass_name, parameters, ai_message)
save_product(SAVE_PATH, omniclass, kv_columns)
print(f"\n*** ...Done processing {omniclass_name}. ***\n")
| [] |
2024-01-10 | yallenMD/RenderIA | message.py | from linebot import (LineBotApi, WebhookHandler)
from linebot.exceptions import (InvalidSignatureError)
from linebot.models import *
import openai
import tempfile, os
import datetime
import time
import requests
import json
from newsapi import NewsApiClient
line_bot_api = LineBotApi(os.getenv('CHANNEL_ACCESS_TOKEN'))
# Channel Secret
handler = WebhookHandler(os.getenv('CHANNEL_SECRET'))
# Twelve API Key
api_key = os.getenv('TWELVEDATA_API_KEY')
# Open AI API Key
openai.api_key = os.getenv('OPENAI_API_KEY')
#News API Key
news_key = os.getenv('NEWS_API_KEY')
# Init
newsapi = NewsApiClient(api_key=news_key)
def GPT_message(text):
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "system", "content": 'You are a helpful financial analyst who understands stocks and crypto. Pretend like you are texting someone and limit the text messages to an appropriate length.'},
{"role": "user", "content": text}
])
# 重組回應
answer = response['choices'][0]['message']['content']
return answer
def summarize(text):
if len(text) <= 40:
return text
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "system", "content": 'You are a professional text summarizer that will summarize the news article titles and descriptions you are given into strictly 40 characters or less. (White spaces are considered a character)'},
{"role": "user", "content": text}
])
# 重組回應
answer = response['choices'][0]['message']['content']
return answer if len(answer) <= 40 else answer[:37]+'...'
def price(ticker,api_key):
url = f"https://api.twelvedata.com/price?symbol={ticker}&apikey={api_key}"
response = requests.get(url)
response = response.json()
price = response['price'][:-3]
return f"The price of {ticker} is {price} USD"
def exchange_rate(exchange_from,exchange_to,api_key):
url = f"https://api.twelvedata.com/exchange_rate?symbol={exchange_from}/{exchange_to}&apikey={api_key}"
response = requests.get(url)
response = response.json()
exchange_rate = response['rate']
return exchange_rate
def currency_conversion(exchange_from,exchange_to,amount,api_key):
url = f"https://api.twelvedata.com/currency_conversion?symbol={exchange_from}/{exchange_to}&amount={amount}&apikey={api_key}"
response = requests.get(url)
response = response.json()
original_amount = amount
new_amount = response['amount']
return f"{original_amount} {exchange_from} is equivalent to {new_amount} {exchange_to}"
def news(subject,news_key):
url = f"https://newsapi.org/v2/everything?q={subject}&apiKey={news_key}"
response = requests.get(url)
response = response.json()
first_five_articles = [(article['title'], article['url'], article['urlToImage']) for article in response['articles'][:5]]
articles = ""
for title, url, urlToImage in first_five_articles:
articles += f"Title: {title}\nURL: {url}\nImage: {urlToImage}\n---\n"
return articles
def news_carousel(subject,news_key):
url = f"https://newsapi.org/v2/everything?q={subject}&apiKey={news_key}"
response = requests.get(url)
response = response.json()
articles = response['articles'][:10]
titles = [summarize(article['title']) for article in articles]
descriptions = [summarize(article['description']) for article in articles]
urls = [article['url'] for article in articles]
images = [article['urlToImage'] for article in articles]
message = TemplateSendMessage(
alt_text='Top 10 headlines requested by you',
template=CarouselTemplate(
columns=[CarouselColumn(thumbnail_image_url=images[i],title=titles[i],text=descriptions[i],actions=[URITemplateAction(label='Link to article',uri=urls[i])]) for i in range(5)]
)
)
return message | [
"Link to article",
"You are a helpful financial analyst who understands stocks and crypto. Pretend like you are texting someone and limit the text messages to an appropriate length.",
"You are a professional text summarizer that will summarize the news article titles and descriptions you are given into strictly 40 characters or less. (White spaces are considered a character)"
] |
2024-01-10 | charlesfrye/ask-fsdl | etl~pdfs.py | import modal
import etl.shared
# extend the shared image with PDF-handling dependencies
image = etl.shared.image.pip_install(
"arxiv==1.4.7",
"pypdf==3.8.1",
)
stub = modal.Stub(
name="etl-pdfs",
image=image,
secrets=[
modal.Secret.from_name("mongodb-fsdl"),
],
mounts=[
# we make our local modules available to the container
modal.Mount.from_local_python_packages("app.docstore", "app.utils")
],
)
@stub.local_entrypoint()
def main(json_path="data/llm-papers.json", collection=None, db=None):
"""Calls the ETL pipeline using a JSON file with PDF metadata.
modal run etl/pdfs.py --json-path /path/to/json
"""
import json
from pathlib import Path
json_path = Path(json_path).resolve()
if not json_path.exists():
print(f"{json_path} not found, writing to it from the database.")
paper_data = fetch_papers.call()
paper_data_json = json.dumps(paper_data, indent=2)
with open(json_path, "w") as f:
f.write(paper_data_json)
with open(json_path) as f:
paper_data = json.load(f)
paper_data = get_pdf_url.map(paper_data)
documents = etl.shared.unchunk(extract_pdf.map(paper_data, return_exceptions=True))
with etl.shared.stub.run():
chunked_documents = etl.shared.chunk_into(documents, 10)
list(
etl.shared.add_to_document_db.map(
chunked_documents, kwargs={"db": db, "collection": collection}
)
)
@stub.function(
image=image,
# we can automatically retry execution of Modal functions on failure
# -- this retry policy does exponential backoff
retries=modal.Retries(backoff_coefficient=2.0, initial_delay=5.0, max_retries=3),
# we can also limit the number of concurrent executions of a Modal function
# -- here we limit to 50 so we don't hammer the arXiV API too hard
concurrency_limit=50,
)
def extract_pdf(paper_data):
"""Extracts the text from a PDF and adds metadata."""
import logging
import arxiv
from langchain.document_loaders import PyPDFLoader
pdf_url = paper_data.get("pdf_url")
if pdf_url is None:
return []
logger = logging.getLogger("pypdf")
logger.setLevel(logging.ERROR)
try:
loader = PyPDFLoader(pdf_url)
except Exception:
return []
try:
documents = loader.load_and_split()
except Exception:
return []
documents = [document.dict() for document in documents]
for document in documents: # rename page_content to text, handle non-unicode data
document["text"] = (
document["page_content"].encode("utf-8", errors="replace").decode()
)
document.pop("page_content")
if "arxiv" in pdf_url:
arxiv_id = extract_arxiv_id_from_url(pdf_url)
# create an arXiV database client with a 5 second delay between requests
client = arxiv.Client(page_size=1, delay_seconds=5, num_retries=5)
# describe a search of arXiV's database
search_query = arxiv.Search(id_list=[arxiv_id], max_results=1)
try:
# execute the search with the client and get the first result
result = next(client.results(search_query))
except ConnectionResetError as e:
raise Exception("Triggered request limit on arxiv.org, retrying") from e
metadata = {
"arxiv_id": arxiv_id,
"title": result.title,
"date": result.updated,
}
else:
metadata = {"title": paper_data.get("title")}
documents = annotate_endmatter(documents)
for document in documents:
document["metadata"]["source"] = paper_data.get("url", pdf_url)
document["metadata"] |= metadata
title, page = (
document["metadata"]["title"],
document["metadata"]["page"],
)
if title:
document["metadata"]["full-title"] = f"{title} - p{page}"
documents = etl.shared.enrich_metadata(documents)
return documents
@stub.function()
def fetch_papers(collection_name="all-content"):
"""Fetches papers from the LLM Lit Review, https://tfs.ai/llm-lit-review."""
from app import docstore
client = docstore.connect()
collection = client.get_database("llm-lit-review").get_collection(collection_name)
# Query to retrieve documents with the "PDF?" field set to true
query = {"properties.PDF?.checkbox": {"$exists": True, "$eq": True}}
# Projection to include the "Name", "url", and "Tags" fields
projection = {
"properties.Name.title.plain_text": 1,
"properties.Link.url": 1,
"properties.Tags.multi_select.name": 1,
}
# Fetch documents matching the query and projection
documents = list(collection.find(query, projection))
assert documents
papers = []
for doc in documents:
paper = {}
paper["title"] = doc["properties"]["Name"]["title"][0]["plain_text"]
paper["url"] = doc["properties"]["Link"]["url"]
paper["tags"] = [
tag["name"]
for tag in doc.get("properties", {}).get("Tags", {}).get("multi_select", [])
]
papers.append(paper)
assert papers
return papers
@stub.function()
def get_pdf_url(paper_data):
"""Attempts to extract a PDF URL from a paper's URL."""
url = paper_data["url"]
if url.strip("#/").endswith(".pdf"):
pdf_url = url
elif "arxiv.org" in url:
arxiv_id = extract_arxiv_id_from_url(url)
pdf_url = f"https://arxiv.org/pdf/{arxiv_id}.pdf"
elif "aclanthology.org" in url:
pdf_url = url.strip("/")
url += ".pdf"
else:
pdf_url = None
paper_data["pdf_url"] = pdf_url
return paper_data
def annotate_endmatter(pages, min_pages=6):
"""Heuristic for detecting reference sections."""
out, after_references = [], False
for idx, page in enumerate(pages):
content = page["text"].lower()
if idx >= min_pages and ("references" in content or "bibliography" in content):
after_references = True
page["metadata"]["is_endmatter"] = after_references
out.append(page)
return out
def extract_arxiv_id_from_url(url):
import re
# pattern = r"(?:arxiv\.org/abs/|arxiv\.org/pdf/)(\d{4}\.\d{4,5}(?:v\d+)?)"
match_arxiv_url = r"(?:arxiv\.org/abs/|arxiv\.org/pdf/)"
match_id = r"(\d{4}\.\d{4,5}(?:v\d+)?)" # 4 digits, a dot, and 4 or 5 digits
optional_version = r"(?:v\d+)?"
pattern = match_arxiv_url + match_id + optional_version
match = re.search(pattern, url)
if match:
return match.group(1)
else:
return None
| [] |
2024-01-10 | charlesfrye/ask-fsdl | app~backend.py | """Builds a backend with CLI and webhook for Q&A on the Full Stack corpus.
For details on corpus construction, see the accompanying notebooks."""
import modal
from . import vecstore
from .utils import pretty_log
# definition of our container image for jobs on Modal
# Modal gets really powerful when you start using multiple images!
image = modal.Image.debian_slim( # we start from a lightweight linux distro
python_version="3.10" # we add a recent Python version
).pip_install( # and we install the following packages:
"langchain==0.0.321",
# 🦜🔗: a framework for building apps with LLMs
"langsmith==0.0.49",
# 🦜🛠️: monitoring framework for LLM apps
"openai~=0.27.7",
# high-quality language models and cheap embeddings
"tiktoken",
# tokenizer for OpenAI models
"faiss-cpu",
# vector storage and similarity search
"pymongo[srv]==3.11",
# python client for MongoDB, our data persistence solution
"gradio~=3.41",
# simple web UIs in Python, from 🤗
)
# we define a Stub to hold all the pieces of our app
# most of the rest of this file just adds features onto this Stub
stub = modal.Stub(
name="askfsdl-backend",
image=image,
secrets=[
# this is where we add API keys, passwords, and URLs, which are stored on Modal
modal.Secret.from_name("mongodb-fsdl"),
modal.Secret.from_name("openai-api-key-fsdl"),
modal.Secret.from_name("langchain-api-key-fsdl"),
],
mounts=[
# we make our local modules available to the container
modal.Mount.from_local_python_packages(
"app.vecstore", "app.docstore", "app.utils", "app.prompts"
)
],
)
SOURCE_LIMIT, QUERY_LIMIT = 3, 6
VECTOR_DIR = vecstore.VECTOR_DIR
vector_storage = modal.NetworkFileSystem.persisted("vector-vol")
@stub.function(
image=image,
network_file_systems={
str(VECTOR_DIR): vector_storage,
},
)
@modal.web_endpoint(method="GET")
def web(query: str, request_id=None):
"""Exposes our Q&A chain for queries via a web endpoint."""
pretty_log(
f"handling request with client-provided id: {request_id}"
) if request_id else None
answer, metadata = qanda.remote(
query,
request_id=request_id,
with_logging=True,
)
return {"answer": answer, "metadata": metadata}
@stub.function(
image=image,
network_file_systems={
str(VECTOR_DIR): vector_storage,
},
)
def cli(query: str):
answer, _ = qanda.remote(query, with_logging=False)
pretty_log("🦜 ANSWER 🦜")
print(answer)
@stub.function(
image=image,
network_file_systems={
str(VECTOR_DIR): vector_storage,
},
keep_warm=1,
)
def qanda(query: str, request_id=None, with_logging: bool = False) -> (str, dict):
"""Runs sourced Q&A for a query using LangChain.
Arguments:
query: The query to run Q&A on.
request_id: A unique identifier for the request.
with_logging: If True, prints the interaction to the logs.
"""
import langchain
from langchain.chains.qa_with_sources import load_qa_with_sources_chain
from langchain.chat_models import ChatOpenAI
from . import prompts
from . import vecstore
embedding_engine = vecstore.get_embedding_engine(allowed_special="all")
metadata = {}
pretty_log("connecting to vector storage")
vector_index = vecstore.connect_to_vector_index(
vecstore.INDEX_NAME, embedding_engine
)
pretty_log("connected to vector storage")
pretty_log(f"found {vector_index.index.ntotal} vectors to search over")
pretty_log(f"running on query: {query}")
pretty_log("selecting sources by similarity to query")
sources_and_scores = vector_index.similarity_search_with_score(query, k=QUERY_LIMIT)
sources, scores = postprocess_sources(sources_and_scores)
metadata["retrieval_scores"] = scores
pretty_log("running query against Q&A chain")
llm = ChatOpenAI(model_name="gpt-4", temperature=0, max_tokens=256)
chain = load_qa_with_sources_chain(
llm,
chain_type="stuff",
verbose=with_logging,
prompt=prompts.main,
document_variable_name="sources",
)
with langchain.callbacks.collect_runs() as cb:
result = chain.invoke(
{"input_documents": sources, "question": query},
)
metadata["run_id"] = cb.traced_runs[0].id
answer = result["output_text"]
if with_logging:
print(answer)
return answer, metadata
@stub.function(
image=image,
network_file_systems={
str(VECTOR_DIR): vector_storage,
},
cpu=8.0, # use more cpu for vector storage creation
)
def create_vector_index(collection: str = None, db: str = None):
"""Creates a vector index for a collection in the document database."""
import docstore
pretty_log("connecting to document store")
db = docstore.get_database(db)
pretty_log(f"connected to database {db.name}")
collection = docstore.get_collection(collection, db)
pretty_log(f"collecting documents from {collection.name}")
docs = docstore.get_documents(collection, db)
pretty_log("splitting into bite-size chunks")
ids, texts, metadatas = prep_documents_for_vector_storage(docs)
pretty_log(f"sending to vector index {vecstore.INDEX_NAME}")
embedding_engine = vecstore.get_embedding_engine(disallowed_special=())
vector_index = vecstore.create_vector_index(
vecstore.INDEX_NAME, embedding_engine, texts, metadatas
)
vector_index.save_local(folder_path=VECTOR_DIR, index_name=vecstore.INDEX_NAME)
pretty_log(f"vector index {vecstore.INDEX_NAME} created")
@stub.function(image=image)
def drop_docs(collection: str = None, db: str = None):
"""Drops a collection from the document storage."""
from . import docstore
docstore.drop(collection, db)
def postprocess_sources(sources_and_scores):
sources_and_scores = sorted(
sources_and_scores, key=lambda ss: ss[1], reverse=True
) # sort by decreasing similarity
sources, scores = zip(*sources_and_scores) # unpack
sources = [source for source, score in zip(sources, scores) if score < 0.4]
return sources[:SOURCE_LIMIT], scores[:SOURCE_LIMIT]
def prep_documents_for_vector_storage(documents):
"""Prepare documents from document store for embedding and vector storage.
Documents are split into chunks so that they can be used with sourced Q&A.
Arguments:
documents: A list of LangChain.Documents with text, metadata, and a hash ID.
"""
from langchain.text_splitter import RecursiveCharacterTextSplitter
text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder(
chunk_size=500, chunk_overlap=100, allowed_special="all"
)
ids, texts, metadatas = [], [], []
for document in documents:
text, metadata = document["text"], document["metadata"]
doc_texts = text_splitter.split_text(text)
doc_metadatas = [metadata] * len(doc_texts)
ids += [metadata.get("sha256")] * len(doc_texts)
texts += doc_texts
metadatas += doc_metadatas
return ids, texts, metadatas
| [] |
2024-01-10 | charlesfrye/ask-fsdl | app~vecstore.py | """Utilities for creating and using vector indexes."""
from pathlib import Path
from .utils import pretty_log
INDEX_NAME = "openai-ada-fsdl"
VECTOR_DIR = Path("/vectors")
def connect_to_vector_index(index_name, embedding_engine):
"""Adds the texts and metadatas to the vector index."""
from langchain.vectorstores import FAISS
vector_index = FAISS.load_local(VECTOR_DIR, embedding_engine, index_name)
return vector_index
def get_embedding_engine(model="text-embedding-ada-002", **kwargs):
"""Retrieves the embedding engine."""
from langchain.embeddings import OpenAIEmbeddings
embedding_engine = OpenAIEmbeddings(model=model, **kwargs)
return embedding_engine
def create_vector_index(index_name, embedding_engine, documents, metadatas):
"""Creates a vector index that offers similarity search."""
from langchain import FAISS
files = VECTOR_DIR.glob(f"{index_name}.*")
if files:
for file in files:
file.unlink()
pretty_log("existing index wiped")
index = FAISS.from_texts(
texts=documents, embedding=embedding_engine, metadatas=metadatas
)
return index
| [] |
2024-01-10 | joeloverbeck/tree-of-thoughts | api_requests.py | import openai
from defines import (
AI_MODEL,
INSTRUCT_GPT_PROMPT_ANSWER_OPENING,
INSTRUCT_GPT_PROMPT_HEADER,
)
def request_response_from_ai_model(prompt):
"""Tries to get a response from GPT
Args:
prompt (str): the prompt that will be sent to GPT
Returns:
str: either a valid response or None
"""
# Read API key from file
with open("api_key.txt", "r", encoding="utf8") as file:
openai.api_key = file.read().strip()
prompt = INSTRUCT_GPT_PROMPT_HEADER + prompt + INSTRUCT_GPT_PROMPT_ANSWER_OPENING
response = openai.ChatCompletion.create(
model=AI_MODEL,
temperature=1,
messages=[{"role": "user", "content": prompt}],
max_tokens=2048,
)
return response.choices[0]["message"]["content"]
| [
"PLACEHOLDERPLACEHOLDERprompt6ff286fe-2fc6-45e3-9d85-84a66efc298cPLACEHOLDERPLACEHOLDER",
"PLACEHOLDERprompt36af6791-bdcc-4721-8160-2718cfbd55c5PLACEHOLDER"
] |
2024-01-10 | MichelCarroll/COMP550_Project | src~run_openai_classifier.py | from collections import Counter
from common.entities import StockDirection, Datasets, AnswerDataPoint
from common.data_loading import load_data_splits
from tqdm import tqdm
from common.utils import llama2_token_length
from dotenv import load_dotenv
from sklearn.metrics import f1_score, accuracy_score, confusion_matrix
from random import seed, shuffle
import os
from openai import OpenAI
import huggingface_hub
import json
from datasets import load_dataset
load_dotenv()
SEED = os.environ['SEED']
seed(SEED)
HUGGINGFACE_TOKEN = os.environ['HUGGINGFACE_TOKEN']
huggingface_hub.login(token=HUGGINGFACE_TOKEN)
openai_client = OpenAI(api_key=os.environ['OPENAI_API_KEY'])
dataset_name = 'michelcarroll/llama2-earnings-stock-prediction-fine-tune-binary'
NUM_EXAMPLES_TO_EVALUATE = 1000
split_name = 'test'
class OpenAIModel:
def __init__(self, model: str) -> None:
self._model = model
def classify(self, text: str) -> StockDirection:
response = self._openai_request(text=text)
if StockDirection.Up.value in response:
return StockDirection.Up
elif StockDirection.Down.value in response:
return StockDirection.Down
else:
raise Exception(f"Response did not contain one of the classes: {response}")
def _openai_request(self, text: str) -> str:
chat_completion = openai_client.chat.completions.create(
temperature=0.2,
messages=[
{
"role": "system",
"content": f"You are a binary classifier with expert financial analyst knowledge, predicting which direction the stock price will go following this answer from the Q/A section of an earnings call. Output either UP if you predict the stock will go up, or DOWN if you predict it will go down. You must absolutely make a prediction – don't answer with N/A.",
},
{
"role": "user",
"content": f"The answer from the earnings transcript is: {text}",
}
],
functions = [{
"name": "predict",
"description": "Label the correct class",
"parameters": {
"type": "object",
"properties": {
# 'reasoning': {
# "type": "string"
# },
'prediction': {
"type": "string",
"enum": ["UP", "DOWN"]
},
},
"required": [ "prediction"]
}
}],
function_call={'name': 'predict'},
model=self._model,
timeout=15
)
arguments = json.loads(chat_completion.choices[0].message.function_call.arguments)
# print(arguments['reasoning'])
return arguments['prediction']
def filter_answer(answer: str, token_length_low_threshold: int = 20, token_length_high_threshold: int = 1000) -> bool:
text_token_length = llama2_token_length(answer)
return text_token_length >= token_length_low_threshold and text_token_length <= token_length_high_threshold
def evaluate(label: str, llm_model, datapoints):
predictions: list[StockDirection] = []
true_labels: list[StockDirection] = []
for datapoint in tqdm(datapoints, desc="Evaluating"):
try:
result = llm_model.classify(text=datapoint['completion'])
except Exception as e:
print("ERROR", e.args[0])
continue
if result:
predictions.append(result.value)
true_labels.append(datapoint['label'])
print("Prediction Counts: ", Counter(predictions))
print("="*10)
print("Results for ", label)
print("="*10)
print("N of ", len(datapoints))
print("Accuracy Score: ", accuracy_score(y_true=true_labels, y_pred=predictions))
print("F1 Score: ", f1_score(y_true=true_labels, y_pred=predictions, pos_label='UP'))
print("Confusion Matrix")
print(confusion_matrix(y_true=true_labels, y_pred=predictions, labels=["UP", "DOWN"]))
answer_datapoints = load_dataset(dataset_name, split=f"{split_name}[0:{NUM_EXAMPLES_TO_EVALUATE}]")
evaluate(
label="GPT 4 with CoT",
llm_model = OpenAIModel(model='gpt-4'),
datapoints=answer_datapoints
) | [
"The answer from the earnings transcript is: PLACEHOLDER",
"You are a binary classifier with expert financial analyst knowledge, predicting which direction the stock price will go following this answer from the Q/A section of an earnings call. Output either UP if you predict the stock will go up, or DOWN if you predict it will go down. You must absolutely make a prediction – don't answer with N/A."
] |
2024-01-10 | MichelCarroll/COMP550_Project | src~common~dto.py |
from pydantic import BaseModel
from typing import Optional, Union
from openai.types.chat.completion_create_params import Function, FunctionCall
class TrainingExampleVariables(BaseModel):
system_prompt: str
user_message: str
correct_response: str
function_call: Optional[FunctionCall] = None
functions: list[Function] = []
metadata: dict[str, str]
class TransformerFineTuningTrainingExample(BaseModel):
serialized_completion: str
correct_response: str
class OpenAIRoleMessage(BaseModel):
role: str
content: str
class OpenAIFunctionCall(BaseModel):
name: str
arguments: str
class OpenAIFunctionCallMessage(BaseModel):
role: str
function_call: OpenAIFunctionCall
content: str = None
class OpenAIFineTuningTrainingExample(BaseModel):
messages: list[Union[OpenAIRoleMessage, OpenAIFunctionCallMessage]]
class OpenAIFineTuningValidationExample(BaseModel):
messages: list[Union[OpenAIRoleMessage, OpenAIFunctionCallMessage]]
functions: Optional[list[dict]] = []
function_call: Optional[str] = None
class FineTuningParameters(BaseModel):
adapter_id_prefix: str
base_model_name: str
lora_rank: int = 64
lora_alpha: int = 16
epochs: int = 1
start_from_checkmarks: bool = False
def checkmark_dir(self):
return f"{self.adapter_id_prefix}-{self.lora_rank}-{self.lora_alpha}"
def adapter_id(self):
return f"{self.adapter_id_prefix}-r{self.lora_rank}-a{self.lora_alpha}-e{self.epochs}" | [] |
2024-01-10 | joexu22/llama2-finetune | data_processing~DemoScripts~01_openai_datagen.py | import openai
import os
from dotenv import load_dotenv
# Load API key from environment variable
load_dotenv()
api_key = os.getenv("OPENAI_API_KEY")
# Initialize OpenAI API client
openai.api_key = api_key
# Read questions from a text file, one question per line, and filter out blank lines
with open("test_questions.txt", "r") as f:
questions = [line.strip() for line in f.readlines() if line.strip()]
# Open the output file for writing answers
with open("test_answers.txt", "w") as output_file:
# Iterate over each question and get the answer
for question in questions:
prompt = f"You are an expert and wise llama shepard who is secretly a machine learning expert who has succeeded in building fully autonomous AGIs before retiring. You are asked to explain the following question {question} to a 5 year old kid. Please provide concrete and relatable examples that a 5 year old can reproduce. There should be a steady reference to llamas examples. You're prone to let leak your deep mathematical and philosophical insight. You capture the essence of the question with clarity and elucidate the audience."
response = openai.Completion.create(
engine="gpt-3.5-turbo-instruct",
prompt=prompt,
max_tokens=1000
)
# Remove line breaks from the answer
cleaned_text = response.choices[0].text.strip().replace('\n', ' ')
# Write the question and the cleaned answer to the output file
output_file.write(f"{question}\n{cleaned_text}\n") | [
"You are an expert and wise llama shepard who is secretly a machine learning expert who has succeeded in building fully autonomous AGIs before retiring. You are asked to explain the following question PLACEHOLDER to a 5 year old kid. Please provide concrete and relatable examples that a 5 year old can reproduce. There should be a steady reference to llamas examples. You're prone to let leak your deep mathematical and philosophical insight. You capture the essence of the question with clarity and elucidate the audience."
] |
2024-01-10 | aws-samples/aws-analytics-genai-llm-workshop | Lab1_Retrieval_Augmented_Generation~api~app~api~api_v1~endpoints~llm_ep.py | import os
import sys
import boto3
import logging
from typing import Any, Dict
from fastapi import APIRouter
from urllib.parse import urlparse
from langchain import PromptTemplate
from .fastapi_request import (Request,
Text2TextModel,
EmbeddingsModel,
VectorDBType)
from .sm_helper import query_sm_endpoint
from langchain.chains.question_answering import load_qa_chain
from .initialize import (setup_sagemaker_endpoint_for_text_generation,
load_vector_db_faiss,
load_vector_db_opensearch)
logging.getLogger().setLevel(logging.INFO)
logger = logging.getLogger()
#logging.basicConfig(format='%(asctime)s,%(module)s,%(processName)s,%(levelname)s,%(message)s', level=logging.INFO)
# initialize the vector db as a global variable so that it
# can persist across lambda invocations
VECTOR_DB_DIR = os.path.join("/tmp", "_vectordb")
_vector_db = None
_current_vectordb_type = None
_sm_llm = None
router = APIRouter()
def _init(req: Request):
# vector db is a global static variable, so that it only
# created once across multiple lambda invocations, if possible
global _vector_db
global _current_vectordb_type
logger.info(f"req.vectordb_type={req.vectordb_type}, _vector_db={_vector_db}")
if req.vectordb_type != _current_vectordb_type:
logger.info(f"req.vectordb_type={req.vectordb_type} does not match _current_vectordb_type={_current_vectordb_type}, "
f"resetting _vector_db")
_vector_db = None
if req.vectordb_type == VectorDBType.OPENSEARCH and _vector_db is None:
# ARN of the secret is of the following format arn:aws:secretsmanager:region:account_id:secret:my_path/my_secret_name-autoid
os_creds_secretid_in_secrets_manager = "-".join(os.environ.get('OPENSEARCH_SECRET').split(":")[-1].split('-')[:-1])
_vector_db = load_vector_db_opensearch(os_creds_secretid_in_secrets_manager,
boto3.Session().region_name,
os.environ.get('OPENSEARCH_DOMAIN_ENDPOINT'),
os.environ.get('OPENSEARCH_INDEX'),
req.embeddings_generation_model)
elif req.vectordb_type == VectorDBType.FAISS and _vector_db is None:
logger.info(f"vector db does not exist, creating it now")
_vector_db = load_vector_db_faiss(req.vectordb_s3_path,
VECTOR_DB_DIR,
req.embeddings_generation_model,
boto3.Session().region_name)
logger.info("after creating vector db")
elif _vector_db is not None:
logger.info(f"seems like vector db already exists...")
else:
logger.error(f"req.vectordb_type={req.vectordb_type} which is not supported, _vector_db={_vector_db}")
# just like the vector db the sagemaker endpoint used for
# text generation is also global and shared across invocations
# if possible
global _sm_llm
if _sm_llm is None:
logger.info(f"SM LLM endpoint is not setup, setting it up now")
_sm_llm = setup_sagemaker_endpoint_for_text_generation(req,
boto3.Session().region_name)
logger.info("after setting up sagemaker llm endpoint")
else:
logger.info(f"sagemaker llm endpoint already exists..")
@router.post("/text2text")
async def llm_textgen(req: Request) -> Dict[str, Any]:
# dump the received request for debugging purposes
logger.info(f"req={req}")
# initialize vector db and Sagemaker Endpoint
_init(req)
# now that we have the matching docs, lets pack them as a context
# into the prompt and ask the LLM to generate a response
answer = query_sm_endpoint(req)
resp = {'question': req.q, 'answer': answer}
return resp
@router.post("/rag")
async def rag_handler(req: Request) -> Dict[str, Any]:
# dump the received request for debugging purposes
logger.info(f"req={req}")
# initialize vector db and Sagemaker Endpoint
_init(req)
# Use the vector db to find similar documents to the query
# the vector db call would automatically convert the query text
# into embeddings
docs = _vector_db.similarity_search(req.q, k=req.max_matching_docs)
logger.info(f"here are the {req.max_matching_docs} closest matching docs to the query=\"{req.q}\"")
for d in docs:
logger.info(f"---------")
logger.info(d)
logger.info(f"---------")
# now that we have the matching docs, lets pack them as a context
# into the prompt and ask the LLM to generate a response
prompt_template = """Answer the question using the provided text and provide a descriptive summary, if the answer is not contained within the text below, say "I don't know the answer to this!:\n\n{context}\n\n{question}"""
prompt = PromptTemplate(
template=prompt_template, input_variables=["context", "question"]
)
logger.info(f"prompt sent to llm = \"{prompt}\"")
chain = load_qa_chain(llm=_sm_llm, prompt=prompt)
answer = chain({"input_documents": docs, "question": req.q}, return_only_outputs=True)['output_text']
logger.info(f"answer received from llm,\nquestion: \"{req.q}\"\nanswer: \"{answer}\"")
resp = {'question': req.q, 'answer': answer}
if req.verbose is True:
resp['docs'] = docs
return resp | [
"question",
"Answer the question using the provided text and provide a descriptive summary, if the answer is not contained within the text below, say \"I don't know the answer to this!:\n\n{context}\n\n{question}",
"context"
] |
2024-01-10 | kavyashree1903/chat-pdf | ChatPDF.py | import streamlit as st
import openai
import requests
from bs4 import BeautifulSoup
from PIL import Image
import os
import pandas as pd
from PIL import Image
import requests
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.text_splitter import CharacterTextSplitter
from langchain import OpenAI, VectorDBQA
from langchain.document_loaders import DirectoryLoader
import os
from pathlib import Path
from langchain.vectorstores.faiss import FAISS
from langchain.chains.question_answering import load_qa_chain
from PyPDF2 import PdfReader
import pickle
from streamlit_chat import message
# Set up OpenAI API key
os.environ["OPENAI_API_KEY"] = "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"
openai.api_key = "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"
def func(filename):
if(filename!=None):
print(filename)
reader = PdfReader(filename)
# printing number of pages in pdf file
pdf_len = len(reader.pages)
# getting a specific page from the pdf file
final_text=''
final_list=list()
for i in range(pdf_len):
page = reader.pages[i]
text = page.extract_text()
final = text.replace("\n"," ")
final_text=final_text+text
final_list.append(final)
# extracting text from page
new_list = list(filter(lambda x: x != '', final_list))
# print(new_list)
# print(len(new_list))
return new_list
def newList(filename):
new_list=func(filename)
embeddings = OpenAIEmbeddings()
return new_list,embeddings
def chatur_gpt(filename):
new_list,embeddings= newList(filename)
if(new_list!=None):
if(len(new_list)!=0):
docsearch = FAISS.from_texts(new_list, embeddings)
qa_chain = load_qa_chain(OpenAI(temperature=0), chain_type="refine")
qa = VectorDBQA(combine_documents_chain=qa_chain, vectorstore=docsearch)
return qa
def generate_response(prompt,qa):
message = qa.run(prompt)
return message
def get_text():
input_text = st.text_input("You: ","Hello, how are you?", key="input")
return input_text
def main():
st.title("Custom GPT")
st.write("Upload a file to train using GPT-3")
file = st.file_uploader("Upload a file", type=["pdf"])
# Storing the chat
if 'generated' not in st.session_state:
st.session_state['generated'] = []
if 'past' not in st.session_state:
st.session_state['past'] = []
if file is not None:
if os.path.isfile(file.name) == False:
save_folder = os.getcwd()
save_path = Path(save_folder, file.name)
with open(save_path, mode='wb') as w:
w.write(file.getbuffer())
#st.write(file.read())
new_list,embeddings= newList(file.name)
if(new_list!=None):
if(len(new_list)!=0):
docsearch = FAISS.from_texts(new_list, embeddings)
qa_chain = load_qa_chain(OpenAI(temperature=0), chain_type="refine")
qa = VectorDBQA(combine_documents_chain=qa_chain, vectorstore=docsearch)
# st.write(file.name)
# res = chatur_gpt
user_input = get_text()
if user_input:
output = generate_response(user_input,qa)
# store the output
st.session_state.past.append(user_input)
st.session_state.generated.append(output)
if st.session_state['generated']:
for i in range(len(st.session_state['generated'])-1, -1, -1):
message(st.session_state["generated"][i], key=str(i))
message(st.session_state['past'][i], is_user=True, key=str(i) + '_user')
main() | [] |
2024-01-10 | kavyashree1903/chat-pdf | app_new.py | import streamlit as st
import openai
import requests
from bs4 import BeautifulSoup
from PIL import Image
#import clip
import chardet
import io
import json
from tabula.io import read_pdf
import tabula
import os
import pandas as pd
from PIL import Image
import requests
# from transformers import CLIPProcessor, CLIPModel
# import aspect_based_sentiment_analysis as absa
# import json
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.text_splitter import CharacterTextSplitter
from langchain import OpenAI, VectorDBQA
from langchain.document_loaders import DirectoryLoader
# import magic
import os
# import nltk
from pathlib import Path
from langchain.vectorstores.faiss import FAISS
from langchain.chains.question_answering import load_qa_chain
from PyPDF2 import PdfReader
import pickle
from streamlit_chat import message
# Set up OpenAI API key
os.environ["OPENAI_API_KEY"] = "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"
openai.api_key = "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"
# Define function to generate GPT-3 comments
def generate_comment(code):
endpoint = "https://api.openai.com/v1/completions"
headers = {"Content-Type": "application/json",
"Authorization": f"Bearer {openai.api_key}"}
data = {
"model": "text-davinci-003",
"prompt": f"Copy the code and add comments to it explaining it step by step:{code}",
"temperature": 0.5,
"max_tokens": 500,
"n": 1,
}
response = requests.post(endpoint, json=data, headers=headers)
if response.ok:
try:
comment = response.json()["choices"][0]["text"].strip()
return comment
except KeyError:
return "Failed to generate comments. Please try again later."
else:
return "Failed to generate comments. Please try again later."
# Define function to scrape website data
def scrape_website():
endpoint = "https://api.openai.com/v1/completions"
headers = {"Content-Type": "application/json",
"Authorization": f"Bearer {openai.api_key}"}
url = "https://ioagpl.com/cng-price/"
page = requests.get(url)
soup = BeautifulSoup(page.content, "html.parser")
table = soup.find_all("table")
table = table[:2]
data = {
"model": "text-davinci-003",
"prompt": (
f"Find out all locations and their corresponding prices in a pairwise format:\n\n"
f"{table}\n\n"
),
"temperature": 0.5,
"max_tokens": 500,
"n": 1,
}
response = requests.post(endpoint, json=data, headers=headers)
scraped = response.json()["choices"][0]["text"].strip()
# split the scraped data into location and price pairs
data_pairs = scraped.split("\n")
return data_pairs
# Define function to perform aspect-based sentiment analysis
def aspect_sentiment(text):
endpoint = "https://api.openai.com/v1/engines/text-davinci-002/completions"
headers = {"Content-Type": "application/json",
"Authorization": f"Bearer {openai.api_key}"}
data = {
"model": "text-davinci-002",
"prompt": (
f"Analyze the sentiment of the following text from the perspective of the customer service aspect:\n\n"
f"{text}\n\n"
f"Sentiment: Positive, Negative, Neutral"
),
"temperature": 0.5,
"max_tokens": 60,
"n": 1,
"stop": "\n"
}
response = requests.post(endpoint, json=data, headers=headers)
sentiment = response.json()["choices"][0]["text"].strip()
return sentiment
# Define email generator
def generate_email(recipient, content):
endpoint = "https://api.openai.com/v1/completions"
headers = {"Content-Type": "application/json",
"Authorization": f"Bearer {openai.api_key}"}
prompt = f"Compose an email to {recipient} with the following context:\n\n{content}\n\n---\n"
data = {
"model": "text-davinci-003",
"prompt": prompt,
"temperature": 0.5,
"max_tokens": 400,
"n": 1,
}
response = requests.post(endpoint, json=data, headers=headers)
content = response.json()["choices"][0]["text"].strip()
return content
def func(filename):
if(filename!=None):
print(filename)
reader = PdfReader(filename)
# printing number of pages in pdf file
pdf_len = len(reader.pages)
# getting a specific page from the pdf file
final_text=''
final_list=list()
for i in range(pdf_len):
page = reader.pages[i]
text = page.extract_text()
final = text.replace("\n"," ")
final_text=final_text+text
final_list.append(final)
# extracting text from page
new_list = list(filter(lambda x: x != '', final_list))
# print(new_list)
# print(len(new_list))
return new_list
def newList(filename):
new_list=func(filename)
embeddings = OpenAIEmbeddings()
return new_list,embeddings
def chatur_gpt(filename):
new_list,embeddings= newList(filename)
if(new_list!=None):
if(len(new_list)!=0):
docsearch = FAISS.from_texts(new_list, embeddings)
qa_chain = load_qa_chain(OpenAI(temperature=0), chain_type="refine")
qa = VectorDBQA(combine_documents_chain=qa_chain, vectorstore=docsearch)
return qa
#ABSA
# nlp = absa.load()
# # Load the aspect categories and their synonyms from CSV files
# aspects_synonyms = {}
# aspects = ["Housekeeping", "Location", "Quality", "Quantity", "Service"]
# for aspect in aspects:
# filename = aspect + ".xlsx"
# df = pd.read_excel(filename)
# synonyms = df["Category"].tolist()
# aspects_synonyms[aspect] = synonyms
# # Function to find synonyms for a word
# def find_synonyms(word):
# prompt = f"Find synonyms for the word '{word}'.\nSynonyms:"
# response = openai.Completion.create(
# engine="text-davinci-003",
# prompt=prompt,
# max_tokens=2048,
# temperature=0.5,
# n=1,
# stop=None,
# )
# synonyms = [s.strip() for syn in response.choices[0].text.split("\n") for s in syn.split(",")]
# synonyms = [word.title() for word in synonyms]
# return synonyms
# # Function to find the sentiment for an aspect in a review
# def find_aspect_sentiment(review, aspect):
# aspect_list = aspects_synonyms[aspect]
# var = nlp(review, aspects=aspect_list)
# sentiment_score = var.subtasks[aspect].scores
# sentiment = "Positive" if sentiment_score[2] > sentiment_score[1] else "Negative" if sentiment_score[1] > sentiment_score[0] else "Neutral"
# return sentiment
# # Function to calculate the score of a review for a given aspect
# def calculate_score(review, aspect):
# aspect_list = aspects_synonyms[aspect]
# weight_list = [2 if syn == aspect else 1 for syn in aspect_list]
# data = {"Category": aspect_list, "Weight": weight_list}
# total_score = 0
# weight_sum = 0
# final_score = 0
# for j in range(len(data)):
# aspect_stem_position = data["Category"][j].lower() in review.lower().split()
# synonyms = aspects_synonyms[aspect]
# synonym_find = False
# if aspect_stem_position == False:
# if (len(synonyms)>0):
# synonym_find = any(synonym in review.lower() for synonym in synonyms)
# else:
# synonym_find = True
# if synonym_find:
# sentiment_score = find_aspect_sentiment(review, data["Category"][j])
# score = (sentiment_score[2]-sentiment_score[1]+1)*2.5*data["Weight"][j]
# total_score = total_score + score
# weight_sum = weight_sum + data["Weight"][j]
# if weight_sum > 0:
# final_score = total_score / weight_sum
# return final_score
# # Define function to classify image using OpenAI CLIP
# def classify_image(image):
# model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
# processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
# inputs = processor(text=["a photo of a clean toilet", "a photo of a dirty toilet"], images=image, return_tensors="pt", padding=True)
# outputs = model(**inputs)
# logits_per_image = outputs.logits_per_image # this is the image-text similarity score
# probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
# return probs
def generate_response(prompt,qa):
# completions = openai.Completion.create(
# engine = "text-davinci-003",
# prompt = prompt,
# max_tokens = 1024,
# n = 1,
# stop = None,
# temperature=0.5,
# )
# message = completions.choices[0].text
message = qa.run(prompt)
return message
def get_text():
input_text = st.text_input("You: ","Hello, how are you?", key="input")
return input_text
def main():
st.sidebar.title("Options")
options = ["Document Summarizer", "Code Commenter", "Web Scraper", "Email Generator", "Custom GPT"]
# options = ["Document Summarizer", "Code Commenter", "Web Scraper", "Email Generator", "Aspect-based Sentiment Analysis"]
choice = st.sidebar.selectbox("Select an option", options)
if choice == "Custom GPT":
st.title("Custom GPT")
st.write("Upload a file to train using GPT-3")
file = st.file_uploader("Upload a file", type=["pdf"])
# Storing the chat
if 'generated' not in st.session_state:
st.session_state['generated'] = []
if 'past' not in st.session_state:
st.session_state['past'] = []
if file is not None:
if os.path.isfile(file.name) == False:
save_folder = os.getcwd()
save_path = Path(save_folder, file.name)
with open(save_path, mode='wb') as w:
w.write(file.getbuffer())
#st.write(file.read())
new_list,embeddings= newList(file.name)
if(new_list!=None):
if(len(new_list)!=0):
docsearch = FAISS.from_texts(new_list, embeddings)
qa_chain = load_qa_chain(OpenAI(temperature=0), chain_type="refine")
qa = VectorDBQA(combine_documents_chain=qa_chain, vectorstore=docsearch)
# st.write(file.name)
# res = chatur_gpt
user_input = get_text()
if user_input:
output = generate_response(user_input,qa)
# store the output
st.session_state.past.append(user_input)
st.session_state.generated.append(output)
if st.session_state['generated']:
for i in range(len(st.session_state['generated'])-1, -1, -1):
message(st.session_state["generated"][i], key=str(i))
message(st.session_state['past'][i], is_user=True, key=str(i) + '_user')
if choice == "Document Summarizer":
st.title("Document Summarizer")
st.write("Upload a file to summarize using GPT-3")
file = st.file_uploader("Upload a file", type=["txt"])
if file is not None:
# Read the file content
file_content = file.read()
st.write(file_content)
result = chardet.detect(file_content)
encoding = result['encoding'] if result['encoding'] is not None else 'utf-8'
text = io.StringIO(file_content.decode(encoding)).read()
# Split the text into sections of 2048 tokens or less
max_tokens_per_section = 2048
sections = []
section_start = 0
while section_start < len(text):
section_end = section_start + max_tokens_per_section
section = text[section_start:section_end]
sections.append(section)
section_start = section_end
# Summarize each section separately
summaries = []
with st.spinner("Summarizing..."):
for section in sections:
response = openai.Completion.create(
engine="text-davinci-002",
prompt=(
"Summarize the following document section in detail and also explain the highlights available from the tables and output the response in bullets:\n\n"
+ section
+ "\n\nSummary:"
),
max_tokens=2048,
n=1,
stop=None,
temperature=0.5,
)
summary = response.choices[0].text.strip()
summaries.append(summary)
# Combine the summaries into a single summary
summary = "\n".join(summaries)
st.write("Summary:")
st.write(summary)
# elif choice == "Report Summarizer":
# st.title("Report Summarizer")
# st.write("Preview the report and generate its summary using GPT-3")
# filename = st.file_uploader("Upload a file", type=["txt", "pdf"])
# df = tabula.read_pdf(filename, pages="all")
# df.to_excel("example.xlsx", index=False)
# # filename = "example.pdf"
# if os.path.exists(filename):
# # File exists, so read or convert it
# df = tabula.read_pdf(filename, pages="all")
# # or
# tabula.convert_into(filename, "example.xlsx", output_format="xlsx", pages="all")
# else:
# print(f"File {filename} does not exist")
elif choice == "Web Scraper":
st.title("Web Scraper")
st.write("Get data of CNG prices from IOAGPL website")
st.write("Here is the [Link to IOAGPL website](https://ioagpl.com/cng-price/), click on the link to compare it with your scrape results" )
# url = st.text_input("URL")
# field = st.text_input("HTML field")
if st.button("Scrape"):
with st.spinner("Scraping website..."):
data = scrape_website()
# Extracting the information from scraped data
st.write("Data: ")
st.write(data)
elif choice == "Email Generator":
st.title("Email Generator")
st.write("Fill the fields to generate a customized email")
r = st.text_input("Who are you sending this mail to?")
field = st.text_input("What is the context of this mail?")
if st.button("Generate Email"):
with st.spinner("Generating email..."):
email_content = generate_email(r, field)
st.write("Email Content:")
st.write(email_content)
scraped_data = scrape_website()
elif choice == "Aspect-based Sentiment Analysis":
st.title("Aspect-Based Sentiment Analysis")
review = st.text_area("Enter your review here:")
aspect = st.selectbox("Select an aspect:", aspects)
if review and aspect:
score = calculate_score(review, aspect)
st.write(f"The score of your review for {aspect} is {score:.2f}")
else:
st.write("Please enter a review and select an aspect.")
elif choice == "Code Commenter":
st.title("Code Commenter")
col1, col2 = st.columns(2)
with col1:
st.write("Enter a piece of code to comment using GPT-3")
code = st.text_area("Paste your code here")
with col2:
if code:
comments = generate_comment(code)
st.write("Comments:")
st.code(comments, language="python")
elif choice == "Image Classifier":
st.title("Image Classifier")
st.write("Upload an image to classify using OpenAI CLIP")
file = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"])
if file is not None:
image = Image.open(file)
with st.spinner("Classifying image..."):
category = classify_image(image)
st.write("Image corresponds to:")
st.write(category)
main() | [
"Summarize the following document section in detail and also explain the highlights available from the tables and output the response in bullets:\n\nPLACEHOLDER\n\nSummary:",
"Compose an email to PLACEHOLDER with the following context:\n\nPLACEHOLDER\n\n---\n"
] |
2024-01-10 | Suchetzky/NEAT_Generative-AI-hackathon | BackSeperated~gpt_api.py | import os
import openai
class GPT:
def __init__(self, model="gpt-3.5-turbo", max_tokens=2048):
self.model_ = model
self.max_tokens_ = max_tokens
openai.organization =""
openai.api_key = ""
self.messages_ = []
# DEAL WITH CHANGES TO MODEL
def switch_model(self, model):
"""
Switches the model to a new model
"""
self.model_ = model
@staticmethod
def get_models():
"""
:return: A list of all available models to use.
"""
return openai.Model.list()
# DEAL WITH CHANGES TO MESSAGE, SYSTEM
def add_system_message(self, content):
self.messages_.append({"role": "system", "content": content})
def replace_system_message(self, content):
self.messages_[0] = {"role": "system", "content": content}
# DEAL WITH CHANGES TO MESSAGES, USER AND ASSISTANT
def remove_first_k_messages(self, k):
"""
Removes the first k messages from the messages list not including the system message
"""
self.messages_ = self.messages_[0] + self.messages_[k:]
def clear_messages(self):
"""
Clears the messages list
"""
self.messages_ = [self.messages_[0]]
def chat(self, content):
"""
:param content:
:return:
"""
self.messages_.append({"role": "user", "content": content})
response = openai.ChatCompletion.create(model=self.model_, messages=self.messages_, temperature=0,
max_tokens=self.max_tokens_)
assistant_msg = response['choices'][0]['message']['content']
self.messages_.append({"role": "assistant", "content": assistant_msg})
return assistant_msg
if __name__ == '__main__':
gpt = GPT()
print(gpt.chat("Hello"))
| [
"Hello"
] |
2024-01-10 | Suchetzky/NEAT_Generative-AI-hackathon | webn.py | import io
import streamlit as st
import time
import pandas as pd
import networkx as nx
import json
import subprocess
import re
import ast
import openai
# All scripts ##########################
class FunctionImprovements:
def __init__(self, old_code, new_code, comment):
self.old_code = old_code
self.new_code = new_code
self.comment = comment
class GPT:
def __init__(self, model="gpt-3.5-turbo", max_tokens=2048):
self.model_ = model
self.max_tokens_ = max_tokens
openai.organization = "org-W1xrGR4WAmmdeqk5vOBvlntj"
#Add your Api key here and uncomment
#openai.api_key =
self.messages_ = []
# DEAL WITH CHANGES TO MODEL
def switch_model(self, model):
"""
Switches the model to a new model
"""
self.model_ = model
@staticmethod
def get_models():
"""
:return: A list of all available models to use.
"""
return openai.Model.list()
# DEAL WITH CHANGES TO MESSAGE, SYSTEM
def add_system_message(self, content):
self.messages_.append({"role": "system", "content": content})
def replace_system_message(self, content):
self.messages_[0] = {"role": "system", "content": content}
# DEAL WITH CHANGES TO MESSAGES, USER AND ASSISTANT
def remove_first_k_messages(self, k):
"""
Removes the first k messages from the messages list not including the system message
"""
self.messages_ = self.messages_[0] + self.messages_[k:]
def clear_messages(self):
"""
Clears the messages list
"""
self.messages_ = [self.messages_[0]]
def chat(self, content):
"""
:param content:
:return:
"""
self.messages_.append({"role": "user", "content": content})
response = openai.ChatCompletion.create(model=self.model_, messages=self.messages_, temperature=0,
max_tokens=self.max_tokens_)
assistant_msg = response['choices'][0]['message']['content']
self.messages_.append({"role": "assistant", "content": assistant_msg})
return assistant_msg
STYLE_PROMPTS_FILE = "lib/prompts/style_prompts.json"
TEST_PROMPTS_FILE = "lib/prompts/test_prompts.json"
TEST_CODE_FILE = "tests.py"
MY_MODULE_FILE = "mymodule.py"
class TestResponse:
def __init__(self, name, source_code, error=None, explanation=None):
self.name = name
self.source_code = source_code
self.error = error
self.explanation = explanation
def generate_responses(test_names, test_contents, errors):
responses = []
for name, content, error in zip(test_names, test_contents, errors):
responses.append(TestResponse(name, content, error))
return responses
def responseFromText(tests, errors):
test_names, test_contents = extract_test_functions(tests)
test_names = [function.name for function in test_names]
errors_dict = extract_errors(errors)
errors = [None] * len(test_names)
for test_name, test_content in errors_dict.items():
errors[test_names.index(test_name)] = test_content
return generate_responses(test_names, test_contents, errors)
def extract_errors(source):
errors = dict()
row_pattern = "(FAIL|ERROR):[^-]*"
function_name_pattern = "(FAIL|ERROR):(.*)\("
for match in re.finditer(row_pattern, source):
full_text = match.group(0)
func_name = list(re.finditer(function_name_pattern, full_text))[0].group(2).strip()
errors[func_name] = full_text
return errors
def extract_test_functions(source_code):
function_sources = []
ast_tree = ast.parse(source_code)
unit_tests_class = [item for item in ast_tree.body if type(item) == ast.ClassDef][0]
function_names = extract_functions(unit_tests_class)
for function in function_names:
function_source = get_function_source(source_code=source_code, tree=unit_tests_class,
function_name=function.name)
function_sources.append(function_source)
return function_names, function_sources
def parse_base_response(response):
pattern = "```"
regex = re.compile(pattern)
matches = list(regex.finditer(response))
if len(matches) == 0:
return response
response = response[matches[0].end(): matches[1].start()]
if not response.startswith("python"):
response = response[6:]
return response
def get_test_suggestions(server_side, source_code):
"""Generate tests for the python function given in source-code,
where the documentations of the functions it depends on are listed in
'dependencies_documentations', and are assumed to be valid and working. """
with open(TEST_PROMPTS_FILE) as f:
prompts = json.load(fp=f)
with open(MY_MODULE_FILE, 'w') as f:
f.write(source_code)
gpt = GPT()
gpt.add_system_message(prompts.get('base_prompt')[0])
# function_codes_generator = server_side.get_sources()
# funcResponses = []
# for function_code in tqdm(function_codes_generator):
# GPT.clear_messages()
# response = GPT.chat(function_code)
# parsed_response = parse_base_response(response)
# with open(TEST_CODE_FILE, 'w') as tests_file:
# tests_file.write(parsed_response)
# p = subprocess.Popen(f"python3 {TEST_CODE_FILE}", stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
# _, err = p.communicate()
# error = err.decode('utf-8')
# function_response = FunctionResponse.fromText(parsed_response, error)
# funcResponses.append(function_response)
funcResponse = None
response = gpt.chat(source_code)
parsed_response = parse_base_response(response)
with open(TEST_CODE_FILE, 'w') as tests_file:
tests_file.write(parsed_response)
p = subprocess.Popen(f"python3 {TEST_CODE_FILE}", stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
_, err = p.communicate()
error = err.decode('utf-8')
function_response = responseFromText(parsed_response, error)
return function_response
def get_function_source(source_code, tree, function_name):
# Get the start and end line numbers of the function definition
function_node = None
function_index = 0
for index, node in enumerate(tree.body):
if isinstance(node, ast.FunctionDef) and node.name == function_name:
function_node = node
function_index = index
break
if not function_node:
return None
start_line = function_node.lineno
# Extract the lines containing the function
lines = source_code.split('\n')
if function_index == len(tree.body) - 1:
function_lines = lines[start_line - 1:]
else:
function_lines = lines[start_line - 1: tree.body[function_index + 1].lineno - 1]
# Join the lines back into a string
function_source = '\n'.join(function_lines)
return function_source
def parse_base_response_style(response):
pattern = "```"
regex = re.compile(pattern)
matches = list(regex.finditer(response))
code = response[matches[0].end(): matches[1].start()]
if response.startswith("python"):
code = code[6:]
return code, response[matches[1].end():]
def get_style_suggestions(server_side):
with open(STYLE_PROMPTS_FILE) as f:
prompts = json.load(fp=f)
gpt = GPT()
gpt.add_system_message(prompts.get('base_prompt')[0])
suggestions = {f: [] for f in server_side.functions_}
for func in suggestions.keys():
gpt.clear_messages()
old_code = server_side.get_function_source(func)
response = gpt.chat(server_side.get_function_source(func))
new_code, explanation = parse_base_response_style(response)
suggestions[func] = FunctionImprovements(old_code, new_code, explanation)
return suggestions
def extract_functions(node):
"""
Extracts all function definitions from an AST node.
"""
functions = []
for body_item in node.body:
if isinstance(body_item, ast.FunctionDef):
functions.append(body_item)
return functions
def extract_function_calls(node):
"""
Extracts all function calls from an AST node.
"""
function_calls = []
if isinstance(node, ast.Call) and isinstance(node.func, ast.Name):
function_calls.append(node.func.id)
for child_node in ast.iter_child_nodes(node):
function_calls.extend(extract_function_calls(child_node))
return function_calls
def build_dependency_graph(functions):
"""
Builds a dependency graph of functions in a Python code file.
"""
graph = nx.DiGraph()
for function in functions:
function_name = function.name
graph.add_node(function_name)
function_calls = extract_function_calls(function)
for called_function in function_calls:
if function_name == called_function:
continue
if called_function not in functions:
continue
graph.add_edge(function_name, called_function)
return graph
def build_topological_sort(source_code):
"""Returns a list of functions sorted topologically"""
ast_tree = ast.parse(source_code)
functions = extract_functions(ast_tree)
graph = build_dependency_graph(functions)
sorted_functions = list(nx.topological_sort(graph))
return sorted_functions
class ServerSide:
def __init__(self, code=None):
if code is not None:
self.code_ = code.decode('utf-8')
self.functions_\
= build_topological_sort(code)
self.ast_tree_ = ast.parse(code)
def set_code(self, code):
self.code_ = code.decode('utf-8')
self.functions_ = build_topological_sort(code)
self.ast_tree_ = ast.parse(code)
def get_style_suggestions(self):
"""
The function will recieve a python code string and return a list of suggestions based on the preferences
specified.
:return: A dictionary of suggestions, each key is a function name and the value is a FunctionImprovements object
"""
if self.code_ is None:
raise Exception("Code is not set, please call set_code first")
return get_style_suggestions(self)
def get_tests_suggestions(self):
"""
The function will recieve a python code string and return a list of suggestions based on the preferences
specified.
:return: A list of tests run results
"""
if self.code_ is None:
raise Exception("Code is not set, please call set_code first")
return get_test_suggestions(self, self.code_)
def get_function_source(self, function_name):
# Get the start and end line numbers of the function definition
function_node = None
function_index = 0
for index, node in enumerate(self.ast_tree_.body):
if isinstance(node, ast.FunctionDef) and node.name == function_name:
function_node = node
function_index = index
break
if not function_node:
return None
start_line = function_node.lineno
# Extract the lines containing the function
print(type(self.code_))
lines = self.code_.split('\n')
if function_index == len(self.ast_tree_.body) - 1:
function_lines = lines[start_line - 1:]
else:
function_lines = lines[start_line - 1: self.ast_tree_.body[function_index + 1].lineno - 1]
# Join the lines back into a string
function_source = '\n'.join(function_lines)
return function_source
def get_sources(self):
"""
Gets source code and returns source codes of functions by topological sort order.
:return:
"""
for function_name in reversed(self.functions_):
function_source = self.get_function_source(function_name)
yield function_source
########################################
if 'key' not in st.session_state:
st.session_state["returned_data"] = None
st.session_state["keys"] = None
st.session_state["new_code_data"] = ""
st.session_state.key = 'UploadFile'
def call_api_improve():
return {"1": FunctionImprovements("old", "new", "your code is bad"),
"2": FunctionImprovements("old1", "new2", "your code is bad2")}
def call_api_test():
return [TestResponse("testfffffffffffffff1", "cofffffsssde1", "errossssssssssssssr1", "explansssssation1"),
TestResponse("test2", "code2", None, "explanation2")]
def run_improve():
loading = st.empty()
loading.write('Loading...')
time.sleep(2)
st.session_state["returned_data"] = st.session_state['server_side'].get_style_suggestions()
st.session_state["keys"] = list(st.session_state["returned_data"].keys())
loading.empty()
st.session_state.key = 'Improve'
def run_testing():
loading = st.empty()
loading.write('Loading...')
time.sleep(2)
st.session_state["returned_data"] = st.session_state['server_side'].get_tests_suggestions()
loading.empty()
st.session_state.key = 'TestResults'
st.title("Review :blue[Your] Code")
if st.session_state.key == 'UploadFile':
ch_text = st.empty()
ch_text.write("First We need to see your code")
ch_upload = st.empty()
code_data = ch_upload.file_uploader("Upload Code")
if code_data:
st.session_state['server_side']=ServerSide(code_data.read())
st.session_state.key = 'ChooseOperation'
ch_text.empty()
ch_upload.empty()
if st.session_state.key == 'ChooseOperation':
# Create an empty placeholder for the start button
ch_improve_button = st.empty()
ch_test_button = st.empty()
# Create the start button and assign the 'start' function to its 'on_click' parameter
improve_button = ch_improve_button.button("Improve My Code")
test_button = ch_test_button.button("Test My Code")
# If the start button is clicked, clear the placeholders
if improve_button or test_button:
ch_improve_button.empty()
ch_test_button.empty()
st.session_state.key = 'loading'
if improve_button:
run_improve()
st.session_state.count = 0
else:
run_testing()
if st.session_state.key == 'runningApi':
# Define the initial code string
pass
if st.session_state.key == 'Improve':
ch_user_code = st.empty()
ch_improved_code = st.empty()
ch_comment_code = st.empty()
returned_data = st.session_state["returned_data"]
keys = st.session_state["keys"]
# Create empty placeholders for the 'accept yours' and 'accept theirs' buttons
ch_accept_yours = st.empty()
ch_accept_theirs = st.empty()
# Create the 'accept yours' and 'accept theirs' buttons
accept_yours = ch_accept_yours.button("Accept Yours")
accept_theirs = ch_accept_theirs.button("Accept Theirs")
if accept_yours:
# ch_accept_yours.empty()
# ch_accept_theirs.empty()
st.session_state["new_code_data"] += returned_data[keys[st.session_state.count]].old_code + "\n"
st.session_state.count += 1
# if st.session_state.count == len(keys):
# #todo remove everything and add download button to new code
# st.session_state.key = 'finished'
if accept_theirs:
st.session_state["new_code_data"] += returned_data[keys[st.session_state.count]].new_code + "\n"
st.session_state.count += 1
# #todo remove everything and add download button to new code
# st.session_state.key = 'finished'
if st.session_state.count == len(keys):
ch_accept_yours.empty()
ch_accept_theirs.empty()
ch_improved_code.empty()
ch_comment_code.empty()
ch_user_code.empty()
st.session_state.key = "Download"
else:
print("i is equals to " + str(st.session_state.count))
user_code = ch_user_code.code(returned_data[keys[st.session_state.count]].old_code, language='python',
line_numbers=True)
# Display the user's code with syntax highlighting
improved_code = ch_improved_code.code(returned_data[keys[st.session_state.count]].new_code, language='python',
line_numbers=True)
explanation = ch_comment_code.code(returned_data[keys[st.session_state.count]].comment, language='python',
line_numbers=True)
if st.session_state.key == 'TestResults':
to_add = []
for i in st.session_state["returned_data"]:
status = "✔️"
if i.error is not None:
status = "❌"
to_add.append(
{"Test name": i.name, "Test": i.source_code, "Error": i.error, "Description (Expendable)": i.explanation,
"Status": status})
df = pd.DataFrame(
to_add
)
edited_df = st.experimental_data_editor(df)
if st.session_state.key == "Download":
st.download_button('Download Result', st.session_state["new_code_data"])
| [
"lib/prompts/style_prompts.json",
"lib/prompts/test_prompts.json"
] |
2024-01-10 | Abhishekkumar03012001/voice-assistant | Voice-assistant.py | import subprocess
import wolframalpha
import pyttsx3
import tkinter
import json
import random
import operator
import speech_recognition as sr
import datetime
import wikipedia
import webbrowser
import os
import winshell
import subprocess
import pyjokes
import feedparser
import openai
import smtplib
import ctypes
import time
import requests
import shutil
from twilio.rest import Client
from clint.textui import progress
from ecapture import ecapture as ec
from bs4 import BeautifulSoup
import win32com.client as wincl
from urllib.request import urlopen
engine = pyttsx3.init('sapi5')
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[0].id)
openai.api_key = 'YOUR_API_KEY'
def speak(audio):
engine.say(audio)
engine.runAndWait()
def wishMe():
hour = int(datetime.datetime.now().hour)
if hour >= 0 and hour < 12:
speak("Good Morning Sir !")
elif hour >= 12 and hour < 18:
speak("Good Afternoon Sir !")
else:
speak("Good Evening Sir !")
assname = ("Charlie")
speak("I am your Assistant")
speak(assname)
def username():
speak("How are you, sir?")
uname = takeCommand().lower()
positive_keywords = ["good", "fine", "healthy"]
negative_keywords = ["not", "cold", "sick"]
if any(keyword in uname for keyword in positive_keywords):
speak("That's good to hear, Abhishek, sir!")
elif any(keyword in uname for keyword in negative_keywords):
speak("I hope your day gets better, Abhishek, sir!")
else:
speak("I see. Take care, Abhishek, sir!")
speak("How can i Help you, Sir")
def takeCommand():
r = sr.Recognizer()
with sr.Microphone() as source:
print("Listening...")
r.pause_threshold = 1
audio = r.listen(source)
try:
print("Recognizing...")
query = r.recognize_google(audio, language='en-in')
print(f"User said: {query}\n")
except Exception as e:
print(e)
print("Unable to Recognize your voice.")
return "None"
return query
def sendEmail(to, content):
server = smtplib.SMTP('smtp.gmail.com', 587)
server.ehlo()
server.starttls()
# Enable low security in gmail
server.login('your email id', 'your email password')
server.sendmail('your email id', to, content)
server.close()
if __name__ == '__main__':
clear = lambda: os.system('cls')
assname = "Abhishek"
# This Function will clean any
# command before execution of this python file
clear()
wishMe()
username()
con=True
while con:
query = takeCommand().lower()
# All the commands said by user will be
# stored here in 'query' and will be
# converted to lower case for easily
# recognition of command
if 'wikipedia' in query:
speak('Searching Wikipedia...')
query = query.replace("wikipedia", "")
try:
results = wikipedia.summary(query, sentences=3)
speak("According to Wikipedia")
print(results)
speak(results)
except wikipedia.exceptions.DisambiguationError as e:
speak("There are multiple results matching your query. Please be more specific.")
except wikipedia.exceptions.PageError as e:
speak("Sorry, I could not find any results for your query.")
elif "goodbie" in query or "bye" in query or "bie" in query:
speak("Good Bye Sir")
con=False
elif "chrome" in query or "google" in query:
query = query.replace("search", "")
def open_chrome():
try:
# Windows
subprocess.Popen('C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe')
except Exception as e:
speak("An error occurred while opening Chrome:")
print("An error occurred while opening Chrome:", e)
elif 'open google' in query:
speak("Here you go to Google\n")
webbrowser.open("google.com")
elif 'open stackoverflow' in query:
speak("Here you go to Stack Over flow.Happy coding")
webbrowser.open("stackoverflow.com")
elif 'play music' in query or "play song" in query:
speak("Here you go with music")
# music_dir = "G:\\Song"
music_dir = "D:\\music"
songs = os.listdir(music_dir)
print(songs)
random_number = random.randint(0, 3)
random = os.startfile(os.path.join(music_dir, songs[random_number]))
elif 'the time' in query:
strTime = datetime.datetime.now().strftime("% H:% M:% S")
speak(f"Sir, the time is {strTime}")
elif 'open opera' in query:
codePath = r"C:\\Users\\AppData\\Local\\Programs\\Opera\\launcher.exe"
os.startfile(codePath)
elif 'email' in query:
try:
speak("What should I say?")
content = takeCommand()
to = "Receiver email address"
sendEmail(to, content)
speak("Email has been sent !")
except Exception as e:
print(e)
speak("I am not able to send this email")
elif 'send a mail' in query:
try:
speak("What should I say?")
content = takeCommand()
speak("whom should i send")
to = input()
sendEmail(to, content)
speak("Email has been sent !")
except Exception as e:
print(e)
speak("I am not able to send this email")
elif 'how are you' in query:
speak("I am fine, Thank you")
speak("How are you, Sir")
elif 'fine' in query or "good" in query:
speak("It's good to know that your fine")
elif "I like someone" in query or "I love someone" in query:
speak("Wow , That is wonderful")
speak("Who is That lucky one, Sir")
elif "what's your name" in query or "What is your name" in query:
speak("My friends call me")
speak(assname)
print("My friends call me", assname)
elif 'exit' in query:
speak("Thanks for giving me your time")
exit()
elif "who made you" in query or "who created you" in query:
speak("I have been created by Gaurav.")
elif 'joke' in query:
speak(pyjokes.get_joke())
elif "calculate" in query:
app_id = "Wolframalpha api id"
client = wolframalpha.Client(app_id)
indx = query.lower().split().index('calculate')
query = query.split()[indx + 1:]
res = client.query(' '.join(query))
answer = next(res.results).text
print("The answer is " + answer)
speak("The answer is " + answer)
elif 'search' in query or 'play' in query:
query = query.replace("search", "")
query = query.replace("play", "")
webbrowser.open(query)
elif "who i am" in query:
speak("If you talk then definitely your human.")
elif "why you came to world" in query:
speak("Thanks to Abhishek. further It's a secret")
elif 'power point presentation' in query:
speak("opening Power Point presentation")
power = r"D:\\Voice Assistant.pptx"
os.startfile(power)
elif 'is love' in query:
speak("It is 7th sense that destroy all other senses")
elif "who are you" in query:
speak("I am your virtual assistant created by Abhishek")
elif 'reason for you' in query:
speak("I was created as a Minor project by Mister Abhishek ")
elif 'change background' in query:
ctypes.windll.user32.SystemParametersInfoW(20,
0,
"Location of wallpaper",
0)
speak("Background changed successfully")
elif 'news' in query:
try:
jsonObj = urlopen(
'''https://newsapi.org / v1 / articles?source = the-times-of-india&sortBy = top&apiKey =\\times of India Api key\\''')
data = json.load(jsonObj)
i = 1
speak('here are some top news from the times of india')
print('''=============== TIMES OF INDIA ============''' + '\n')
for item in data['articles']:
print(str(i) + '. ' + item['title'] + '\n')
print(item['description'] + '\n')
speak(str(i) + '. ' + item['title'] + '\n')
i += 1
except Exception as e:
print(str(e))
elif 'lock window' in query:
speak("locking the device")
ctypes.windll.user32.LockWorkStation()
elif 'shutdown system' in query:
speak("Hold On a Sec ! Your system is on its way to shut down")
subprocess.call('shutdown / p /f')
elif 'empty recycle bin' in query:
winshell.recycle_bin().empty(confirm=False, show_progress=False, sound=True)
speak("Recycle Bin Recycled")
elif "don't listen" in query or "stop listening" in query:
speak("for how much time you want to stop jarvis from listening commands")
a = int(takeCommand())
time.sleep(a)
print(a)
elif "where is" in query:
query = query.replace("where is", "")
location = query
speak("User asked to Locate")
speak(location)
webbrowser.open("https://www.google.nl / maps / place/" + location + "")
elif "camera" in query or "take a photo" in query:
ec.capture(0, "Charlie Camera ", "img.jpg")
elif "restart" in query:
subprocess.call(["shutdown", "/r"])
elif "hibernate" in query or "sleep" in query:
speak("Hibernating")
subprocess.call("shutdown / h")
elif "log off" in query or "sign out" in query:
speak("Make sure all the application are closed before sign-out")
time.sleep(5)
subprocess.call(["shutdown", "/l"])
elif "write a note" in query:
speak("What should i write, sir")
note = takeCommand()
file = open('Charlie.txt', 'w')
speak("Sir, Should i include date and time")
snfm = takeCommand()
if 'yes' in snfm or 'sure' in snfm:
strTime = datetime.datetime.now().strftime("% H:% M:% S")
file.write(strTime)
file.write(" :- ")
file.write(note)
else:
file.write(note)
elif "show note" in query:
speak("Showing Notes")
file = open("Charlie.txt", "r")
print(file.read())
speak(file.read(6))
elif "update assistant" in query:
speak("After downloading file please replace this file with the downloaded one")
url = '# url after uploading file'
r = requests.get(url, stream=True)
with open("Voice.py", "wb") as Pypdf:
total_length = int(r.headers.get('content-length'))
for ch in progress.bar(r.iter_content(chunk_size=2391975),
expected_size=(total_length / 1024) + 1):
if ch:
Pypdf.write(ch)
# NPPR9-FWDCX-D2C8J-H872K-2YT43
elif "Charlie" in query:
wishMe()
speak("Charlie 1 point o in your service Mister")
speak(assname)
elif "weather" in query:
# Google Open weather website
# to get API of Open weather
api_key = "Api key"
base_url = "http://api.openweathermap.org / data / 2.5 / weather?"
speak(" City name ")
print("City name : ")
city_name = takeCommand()
complete_url = base_url + "appid =" + api_key + "&q =" + city_name
response = requests.get(complete_url)
x = response.json()
if x["code"] != "404":
y = x["main"]
current_temperature = y["temp"]
current_pressure = y["pressure"]
current_humidiy = y["humidity"]
z = x["weather"]
weather_description = z[0]["description"]
print(" Temperature (in kelvin unit) = " + str(
current_temperature) + "\n atmospheric pressure (in hPa unit) =" + str(
current_pressure) + "\n humidity (in percentage) = " + str(
current_humidiy) + "\n description = " + str(weather_description))
else:
speak(" City Not Found ")
elif "send message " in query:
# You need to create an account on Twilio to use this service
account_sid = 'Account Sid key'
auth_token = 'Auth token'
client = Client(account_sid, auth_token)
message = client.messages \
.create(
body=takeCommand(),
from_='Sender No',
to='Receiver No'
)
print(message.sid)
elif "wikipedia" in query:
webbrowser.open("wikipedia.com")
elif "Good Morning" in query:
speak("A warm" + query)
speak("How are you Mister")
speak(assname)
# most asked question from google Assistant
elif "will you be my best friend" in query or "will you be my bf" in query:
speak("I'm not sure about, may be you should give me some time")
elif "how are you" in query:
speak("I'm fine, glad you me that")
elif "i love you" in query:
speak("It's hard to understand")
elif "what is" in query or "who is" in query:
# Use the same API key
# that we have generated earlier
client = wolframalpha.Client("API_ID")
res = client.query(query)
try:
print(next(res.results).text)
speak(next(res.results).text)
except StopIteration:
print("No results")
elif "chatgpt" in query or "chat gpt" in query:
def chat_with_gpt(query):
response = openai.Completion.create(
engine='text-davinci-003',
prompt=query,
max_tokens=50,
temperature=0.7,
n=1,
stop=None,
echo=True
)
speak (response.choices[0].text.strip())
elif "youtube" in query or "yt" in query:
query = query.replace("search", "")
def open_youtube_with_search(query):
try:
search_url = f"https://www.youtube.com/results?search_query={query}"
webbrowser.open(search_url)
print(f"YouTube search for '{query}' opened successfully.")
except Exception as e:
print("An error occurred while opening YouTube:", e)
# elif "" in query:
# Command go here
# For adding more commands
| [] |
2024-01-10 | TheKostVK/TelegramBotOpenAI | bot~telegram_bot.py | import logging
import os
import asyncio
import telegram
from telegram import constants
from telegram import Message, MessageEntity, Update, InlineQueryResultArticle, InputTextMessageContent, BotCommand
from telegram.error import RetryAfter, TimedOut
from telegram.ext import ApplicationBuilder, ContextTypes, CommandHandler, MessageHandler, \
filters, InlineQueryHandler, Application
from pydub import AudioSegment
from openai_helper import OpenAIHelper
from usage_tracker import UsageTracker
def message_text(message: Message) -> str:
"""
Returns the text of a message, excluding any bot commands.
"""
message_text = message.text
if message_text is None:
return ''
for _, text in sorted(message.parse_entities([MessageEntity.BOT_COMMAND]).items(),
key=(lambda item: item[0].offset)):
message_text = message_text.replace(text, '').strip()
return message_text if len(message_text) > 0 else ''
class ChatGPTTelegramBot:
"""
Class representing a ChatGPT Telegram Bot.
"""
def __init__(self, config: dict, openai: OpenAIHelper):
"""
Initializes the bot with the given configuration and GPT bot object.
:param config: A dictionary containing the bot configuration
:param openai: OpenAIHelper object
"""
self.config = config
self.openai = openai
self.commands = [
BotCommand(command='help', description='Show help message'),
BotCommand(command='reset', description='Reset the conversation. Optionally pass high-level instructions '
'(e.g. /reset You are a helpful assistant)'),
BotCommand(command='image', description='Generate image from prompt (e.g. /image cat)'),
BotCommand(command='stats', description='Get your current usage statistics')
]
self.disallowed_message = "Sorry, you are not allowed to use this bot. You can check out the source code at " \
"https://github.com/n3d1117/chatgpt-telegram-bot"
self.budget_limit_message = "Sorry, you have reached your monthly usage limit."
self.usage = {}
async def help(self, update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
"""
Shows the help menu.
"""
commands = [f'/{command.command} - {command.description}' for command in self.commands]
help_text = 'I\'m a ChatGPT bot, talk to me!' + \
'\n\n' + \
'\n'.join(commands) + \
'\n\n' + \
'Send me a voice message or file and I\'ll transcribe it for you!' + \
'\n\n' + \
"Open source at https://github.com/n3d1117/chatgpt-telegram-bot"
await update.message.reply_text(help_text, disable_web_page_preview=True)
async def stats(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
Returns token usage statistics for current day and month.
"""
if not await self.is_allowed(update):
logging.warning(f'User {update.message.from_user.name} is not allowed to request their usage statistics')
await self.send_disallowed_message(update, context)
return
logging.info(f'User {update.message.from_user.name} requested their usage statistics')
user_id = update.message.from_user.id
if user_id not in self.usage:
self.usage[user_id] = UsageTracker(user_id, update.message.from_user.name)
tokens_today, tokens_month = self.usage[user_id].get_current_token_usage()
images_today, images_month = self.usage[user_id].get_current_image_count()
transcribe_durations = self.usage[user_id].get_current_transcription_duration()
cost_today, cost_month = self.usage[user_id].get_current_cost()
chat_id = update.effective_chat.id
chat_messages, chat_token_length = self.openai.get_conversation_stats(chat_id)
budget = await self.get_remaining_budget(update)
text_current_conversation = f"*Current conversation:*\n" + \
f"{chat_messages} chat messages in history.\n" + \
f"{chat_token_length} chat tokens in history.\n" + \
f"----------------------------\n"
text_today = f"*Usage today:*\n" + \
f"{tokens_today} chat tokens used.\n" + \
f"{images_today} images generated.\n" + \
f"{transcribe_durations[0]} minutes and {transcribe_durations[1]} seconds transcribed.\n" + \
f"💰 For a total amount of ${cost_today:.2f}\n" + \
f"----------------------------\n"
text_month = f"*Usage this month:*\n" + \
f"{tokens_month} chat tokens used.\n" + \
f"{images_month} images generated.\n" + \
f"{transcribe_durations[2]} minutes and {transcribe_durations[3]} seconds transcribed.\n" + \
f"💰 For a total amount of ${cost_month:.2f}"
# text_budget filled with conditional content
text_budget = "\n\n"
if budget < float('inf'):
text_budget += f"You have a remaining budget of ${budget:.2f} this month.\n"
# add OpenAI account information for admin request
if self.is_admin(update):
grant_balance = self.openai.get_grant_balance()
if grant_balance > 0.0:
text_budget += f"Your remaining OpenAI grant balance is ${grant_balance:.2f}.\n"
text_budget += f"Your OpenAI account was billed ${self.openai.get_billing_current_month():.2f} this month."
usage_text = text_current_conversation + text_today + text_month + text_budget
await update.message.reply_text(usage_text, parse_mode=constants.ParseMode.MARKDOWN)
async def reset(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
Resets the conversation.
"""
if not await self.is_allowed(update):
logging.warning(f'User {update.message.from_user.name} is not allowed to reset the conversation')
await self.send_disallowed_message(update, context)
return
logging.info(f'Resetting the conversation for user {update.message.from_user.name}...')
chat_id = update.effective_chat.id
reset_content = message_text(update.message)
self.openai.reset_chat_history(chat_id=chat_id, content=reset_content)
await context.bot.send_message(chat_id=chat_id, text='Done!')
async def image(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
Generates an image for the given prompt using DALL·E APIs
"""
if not await self.is_allowed(update):
logging.warning(f'User {update.message.from_user.name} is not allowed to generate images')
await self.send_disallowed_message(update, context)
return
if not await self.is_within_budget(update):
logging.warning(f'User {update.message.from_user.name} reached their usage limit')
await self.send_budget_reached_message(update, context)
return
chat_id = update.effective_chat.id
image_query = message_text(update.message)
if image_query == '':
await context.bot.send_message(chat_id=chat_id, text='Please provide a prompt! (e.g. /image cat)')
return
logging.info(f'New image generation request received from user {update.message.from_user.name}')
await context.bot.send_chat_action(chat_id=chat_id, action=constants.ChatAction.UPLOAD_PHOTO)
try:
image_url, image_size = await self.openai.generate_image(prompt=image_query)
await context.bot.send_photo(
chat_id=chat_id,
reply_to_message_id=update.message.message_id,
photo=image_url
)
# add image request to users usage tracker
user_id = update.message.from_user.id
self.usage[user_id].add_image_request(image_size, self.config['image_prices'])
# add guest chat request to guest usage tracker
if str(user_id) not in self.config['allowed_user_ids'].split(',') and 'guests' in self.usage:
self.usage["guests"].add_image_request(image_size, self.config['image_prices'])
except Exception as e:
logging.exception(e)
await context.bot.send_message(
chat_id=chat_id,
reply_to_message_id=update.message.message_id,
text=f'Failed to generate image: {str(e)}',
parse_mode=constants.ParseMode.MARKDOWN
)
async def transcribe(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
Transcribe audio messages.
"""
if not await self.is_allowed(update):
logging.warning(f'User {update.message.from_user.name} is not allowed to transcribe audio messages')
await self.send_disallowed_message(update, context)
return
if not await self.is_within_budget(update):
logging.warning(f'User {update.message.from_user.name} reached their usage limit')
await self.send_budget_reached_message(update, context)
return
if self.is_group_chat(update) and self.config['ignore_group_transcriptions']:
logging.info(f'Transcription coming from group chat, ignoring...')
return
chat_id = update.effective_chat.id
await context.bot.send_chat_action(chat_id=chat_id, action=constants.ChatAction.TYPING)
filename = update.message.effective_attachment.file_unique_id
filename_mp3 = f'{filename}.mp3'
try:
media_file = await context.bot.get_file(update.message.effective_attachment.file_id)
await media_file.download_to_drive(filename)
except Exception as e:
logging.exception(e)
await context.bot.send_message(
chat_id=chat_id,
reply_to_message_id=update.message.message_id,
text=f'Failed to download audio file: {str(e)}. Make sure the file is not too large. (max 20MB)',
parse_mode=constants.ParseMode.MARKDOWN
)
return
# detect and extract audio from the attachment with pydub
try:
audio_track = AudioSegment.from_file(filename)
audio_track.export(filename_mp3, format="mp3")
logging.info(f'New transcribe request received from user {update.message.from_user.name}')
except Exception as e:
logging.exception(e)
await context.bot.send_message(
chat_id=update.effective_chat.id,
reply_to_message_id=update.message.message_id,
text='Unsupported file type'
)
if os.path.exists(filename):
os.remove(filename)
return
filename_mp3 = f'{filename}.mp3'
user_id = update.message.from_user.id
if user_id not in self.usage:
self.usage[user_id] = UsageTracker(user_id, update.message.from_user.name)
# send decoded audio to openai
try:
# Transcribe the audio file
transcript = await self.openai.transcribe(filename_mp3)
# add transcription seconds to usage tracker
transcription_price = self.config['transcription_price']
self.usage[user_id].add_transcription_seconds(audio_track.duration_seconds, transcription_price)
# add guest chat request to guest usage tracker
allowed_user_ids = self.config['allowed_user_ids'].split(',')
if str(user_id) not in allowed_user_ids and 'guests' in self.usage:
self.usage["guests"].add_transcription_seconds(audio_track.duration_seconds, transcription_price)
if self.config['voice_reply_transcript']:
# Split into chunks of 4096 characters (Telegram's message limit)
transcript_output = f'_Transcript:_\n"{transcript}"'
chunks = self.split_into_chunks(transcript_output)
for index, transcript_chunk in enumerate(chunks):
await context.bot.send_message(
chat_id=chat_id,
reply_to_message_id=update.message.message_id if index == 0 else None,
text=transcript_chunk,
parse_mode=constants.ParseMode.MARKDOWN
)
else:
# Get the response of the transcript
response, total_tokens = await self.openai.get_chat_response(chat_id=chat_id, query=transcript)
# add chat request to users usage tracker
self.usage[user_id].add_chat_tokens(total_tokens, self.config['token_price'])
# add guest chat request to guest usage tracker
if str(user_id) not in allowed_user_ids and 'guests' in self.usage:
self.usage["guests"].add_chat_tokens(total_tokens, self.config['token_price'])
# Split into chunks of 4096 characters (Telegram's message limit)
transcript_output = f'_Transcript:_\n"{transcript}"\n\n_Answer:_\n{response}'
chunks = self.split_into_chunks(transcript_output)
for index, transcript_chunk in enumerate(chunks):
await context.bot.send_message(
chat_id=chat_id,
reply_to_message_id=update.message.message_id if index == 0 else None,
text=transcript_chunk,
parse_mode=constants.ParseMode.MARKDOWN
)
except Exception as e:
logging.exception(e)
await context.bot.send_message(
chat_id=chat_id,
reply_to_message_id=update.message.message_id,
text=f'Failed to transcribe text: {str(e)}',
parse_mode=constants.ParseMode.MARKDOWN
)
finally:
# Cleanup files
if os.path.exists(filename_mp3):
os.remove(filename_mp3)
if os.path.exists(filename):
os.remove(filename)
async def prompt(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
React to incoming messages and respond accordingly.
"""
if not await self.is_allowed(update):
logging.warning(f'User {update.message.from_user.name} is not allowed to use the bot')
await self.send_disallowed_message(update, context)
return
if not await self.is_within_budget(update):
logging.warning(f'User {update.message.from_user.name} reached their usage limit')
await self.send_budget_reached_message(update, context)
return
logging.info(f'New message received from user {update.message.from_user.name}')
chat_id = update.effective_chat.id
user_id = update.message.from_user.id
prompt = update.message.text
if self.is_group_chat(update):
trigger_keyword = self.config['group_trigger_keyword']
if prompt.startswith(trigger_keyword):
prompt = prompt[len(trigger_keyword):].strip()
else:
if update.message.reply_to_message and update.message.reply_to_message.from_user.id == context.bot.id:
logging.info('Message is a reply to the bot, allowing...')
else:
logging.warning('Message does not start with trigger keyword, ignoring...')
return
await context.bot.send_chat_action(chat_id=chat_id, action=constants.ChatAction.TYPING)
try:
if self.config['stream']:
is_group_chat = self.is_group_chat(update)
stream_response = self.openai.get_chat_response_stream(chat_id=chat_id, query=prompt)
i = 0
prev = ''
sent_message = None
backoff = 0
chunk = 0
async for content, tokens in stream_response:
if len(content.strip()) == 0:
continue
chunks = self.split_into_chunks(content)
if len(chunks) > 1:
content = chunks[-1]
if chunk != len(chunks) - 1:
chunk += 1
try:
await self.edit_message_with_retry(context, chat_id, sent_message.message_id,
chunks[-2])
except:
pass
try:
sent_message = await context.bot.send_message(
chat_id=sent_message.chat_id,
text=content if len(content) > 0 else "..."
)
except:
pass
continue
if is_group_chat:
# group chats have stricter flood limits
cutoff = 180 if len(content) > 1000 else 120 if len(content) > 200 else 90 if len(
content) > 50 else 50
else:
cutoff = 90 if len(content) > 1000 else 45 if len(content) > 200 else 25 if len(
content) > 50 else 15
cutoff += backoff
if i == 0:
try:
if sent_message is not None:
await context.bot.delete_message(chat_id=sent_message.chat_id,
message_id=sent_message.message_id)
sent_message = await context.bot.send_message(
chat_id=chat_id,
reply_to_message_id=update.message.message_id,
text=content
)
except:
continue
elif abs(len(content) - len(prev)) > cutoff or tokens != 'not_finished':
prev = content
try:
await self.edit_message_with_retry(context, chat_id, sent_message.message_id, content)
except RetryAfter as e:
backoff += 5
await asyncio.sleep(e.retry_after)
continue
except TimedOut:
backoff += 5
await asyncio.sleep(0.5)
continue
except Exception:
backoff += 5
continue
await asyncio.sleep(0.01)
i += 1
if tokens != 'not_finished':
total_tokens = int(tokens)
else:
response, total_tokens = await self.openai.get_chat_response(chat_id=chat_id, query=prompt)
# Split into chunks of 4096 characters (Telegram's message limit)
chunks = self.split_into_chunks(response)
for index, chunk in enumerate(chunks):
await context.bot.send_message(
chat_id=chat_id,
reply_to_message_id=update.message.message_id if index == 0 else None,
text=chunk,
parse_mode=constants.ParseMode.MARKDOWN
)
try:
# add chat request to users usage tracker
self.usage[user_id].add_chat_tokens(total_tokens, self.config['token_price'])
# add guest chat request to guest usage tracker
allowed_user_ids = self.config['allowed_user_ids'].split(',')
if str(user_id) not in allowed_user_ids and 'guests' in self.usage:
self.usage["guests"].add_chat_tokens(total_tokens, self.config['token_price'])
except:
pass
except Exception as e:
logging.exception(e)
await context.bot.send_message(
chat_id=chat_id,
reply_to_message_id=update.message.message_id,
text=f'Failed to get response: {str(e)}',
parse_mode=constants.ParseMode.MARKDOWN
)
async def inline_query(self, update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
"""
Handle the inline query. This is run when you type: @botusername <query>
"""
query = update.inline_query.query
if query == '':
return
results = [
InlineQueryResultArticle(
id=query,
title='Ask ChatGPT',
input_message_content=InputTextMessageContent(query),
description=query,
thumb_url='https://user-images.githubusercontent.com/11541888/223106202-7576ff11-2c8e-408d-94ea-b02a7a32149a.png'
)
]
await update.inline_query.answer(results)
async def edit_message_with_retry(self, context: ContextTypes.DEFAULT_TYPE, chat_id: int, message_id: int,
text: str):
"""
Edit a message with retry logic in case of failure (e.g. broken markdown)
:param context: The context to use
:param chat_id: The chat id to edit the message in
:param message_id: The message id to edit
:param text: The text to edit the message with
:return: None
"""
try:
await context.bot.edit_message_text(
chat_id=chat_id,
message_id=message_id,
text=text,
parse_mode=constants.ParseMode.MARKDOWN
)
except telegram.error.BadRequest as e:
if str(e).startswith("Message is not modified"):
return
try:
await context.bot.edit_message_text(
chat_id=chat_id,
message_id=message_id,
text=text
)
except Exception as e:
logging.warning(f'Failed to edit message: {str(e)}')
raise e
except Exception as e:
logging.warning(str(e))
raise e
async def send_disallowed_message(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
Sends the disallowed message to the user.
"""
await context.bot.send_message(
chat_id=update.effective_chat.id,
text=self.disallowed_message,
disable_web_page_preview=True
)
async def send_budget_reached_message(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
Sends the budget reached message to the user.
"""
await context.bot.send_message(
chat_id=update.effective_chat.id,
text=self.budget_limit_message
)
async def error_handler(self, update: object, context: ContextTypes.DEFAULT_TYPE) -> None:
"""
Handles errors in the telegram-python-bot library.
"""
logging.error(f'Exception while handling an update: {context.error}')
def is_group_chat(self, update: Update) -> bool:
"""
Checks if the message was sent from a group chat
"""
return update.effective_chat.type in [
constants.ChatType.GROUP,
constants.ChatType.SUPERGROUP
]
async def is_user_in_group(self, update: Update, user_id: int) -> bool:
"""
Checks if user_id is a member of the group
"""
member = await update.effective_chat.get_member(user_id)
return member.status in [
constants.ChatMemberStatus.OWNER,
constants.ChatMemberStatus.ADMINISTRATOR,
constants.ChatMemberStatus.MEMBER
]
async def is_allowed(self, update: Update) -> bool:
"""
Checks if the user is allowed to use the bot.
"""
if self.config['allowed_user_ids'] == '*':
return True
if self.is_admin(update):
return True
allowed_user_ids = self.config['allowed_user_ids'].split(',')
# Check if user is allowed
if str(update.message.from_user.id) in allowed_user_ids:
return True
# Check if it's a group a chat with at least one authorized member
if self.is_group_chat(update):
for user in allowed_user_ids:
if await self.is_user_in_group(update, user):
logging.info(f'{user} is a member. Allowing group chat message...')
return True
logging.info(f'Group chat messages from user {update.message.from_user.name} are not allowed')
return False
def is_admin(self, update: Update) -> bool:
"""
Checks if the user is the admin of the bot.
The first user in the user list is the admin.
"""
if self.config['admin_user_ids'] == '-':
logging.info('No admin user defined.')
return False
admin_user_ids = self.config['admin_user_ids'].split(',')
# Check if user is in the admin user list
if str(update.message.from_user.id) in admin_user_ids:
return True
return False
async def get_remaining_budget(self, update: Update) -> float:
user_id = update.message.from_user.id
if user_id not in self.usage:
self.usage[user_id] = UsageTracker(user_id, update.message.from_user.name)
if self.is_admin(update):
return float('inf')
if self.config['monthly_user_budgets'] == '*':
return float('inf')
allowed_user_ids = self.config['allowed_user_ids'].split(',')
if str(user_id) in allowed_user_ids:
# find budget for allowed user
user_index = allowed_user_ids.index(str(user_id))
user_budgets = self.config['monthly_user_budgets'].split(',')
# check if user is included in budgets list
if len(user_budgets) <= user_index:
logging.warning(f'No budget set for user: {update.message.from_user.name} ({user_id}).')
return 0.0
user_budget = float(user_budgets[user_index])
cost_month = self.usage[user_id].get_current_cost()[1]
remaining_budget = user_budget - cost_month
return remaining_budget
else:
return 0.0
async def is_within_budget(self, update: Update) -> bool:
"""
Checks if the user reached their monthly usage limit.
Initializes UsageTracker for user and guest when needed.
"""
user_id = update.message.from_user.id
if user_id not in self.usage:
self.usage[user_id] = UsageTracker(user_id, update.message.from_user.name)
if self.is_admin(update):
return True
if self.config['monthly_user_budgets'] == '*':
return True
allowed_user_ids = self.config['allowed_user_ids'].split(',')
if str(user_id) in allowed_user_ids:
# find budget for allowed user
user_index = allowed_user_ids.index(str(user_id))
user_budgets = self.config['monthly_user_budgets'].split(',')
# check if user is included in budgets list
if len(user_budgets) <= user_index:
logging.warning(f'No budget set for user: {update.message.from_user.name} ({user_id}).')
return False
user_budget = float(user_budgets[user_index])
cost_month = self.usage[user_id].get_current_cost()[1]
# Check if allowed user is within budget
return user_budget > cost_month
# Check if group member is within budget
if self.is_group_chat(update):
for user in allowed_user_ids:
if await self.is_user_in_group(update, user):
if 'guests' not in self.usage:
self.usage['guests'] = UsageTracker('guests', 'all guest users in group chats')
if self.config['monthly_guest_budget'] >= self.usage['guests'].get_current_cost()[1]:
return True
logging.warning('Monthly guest budget for group chats used up.')
return False
logging.info(f'Group chat messages from user {update.message.from_user.name} are not allowed')
return False
def split_into_chunks(self, text: str, chunk_size: int = 4096) -> list[str]:
"""
Splits a string into chunks of a given size.
"""
return [text[i:i + chunk_size] for i in range(0, len(text), chunk_size)]
async def post_init(self, application: Application) -> None:
"""
Post initialization hook for the bot.
"""
await application.bot.set_my_commands(self.commands)
def run(self):
"""
Runs the bot indefinitely until the user presses Ctrl+C
"""
application = ApplicationBuilder() \
.token(self.config['token']) \
.proxy_url(self.config['proxy']) \
.get_updates_proxy_url(self.config['proxy']) \
.post_init(self.post_init) \
.concurrent_updates(True) \
.build()
application.add_handler(CommandHandler('reset', self.reset))
application.add_handler(CommandHandler('help', self.help))
application.add_handler(CommandHandler('image', self.image))
application.add_handler(CommandHandler('start', self.help))
application.add_handler(CommandHandler('stats', self.stats))
application.add_handler(MessageHandler(
filters.AUDIO | filters.VOICE | filters.Document.AUDIO |
filters.VIDEO | filters.VIDEO_NOTE | filters.Document.VIDEO,
self.transcribe))
application.add_handler(MessageHandler(filters.TEXT & (~filters.COMMAND), self.prompt))
application.add_handler(InlineQueryHandler(self.inline_query, chat_types=[
constants.ChatType.GROUP, constants.ChatType.SUPERGROUP
]))
application.add_error_handler(self.error_handler)
application.run_polling()
| [] |
2024-01-10 | stefco/gwpy | gwpy~table~io~cwb.py | # -*- coding: utf-8 -*-
# Copyright (C) Duncan Macleod (2014)
#
# This file is part of GWpy.
#
# GWpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWpy. If not, see <http://www.gnu.org/licenses/>.
"""Read events from Coherent Wave-Burst (cWB)-format ROOT files.
"""
import re
from astropy.io.ascii import core
from ...io import registry
from .. import (Table, EventTable)
__author__ = 'Duncan Macleod <duncan.macleod@ligo.org>'
# -- ROOT ---------------------------------------------------------------------
def table_from_cwb(source, *args, **kwargs):
"""Read an `EventTable` from a Coherent WaveBurst ROOT file
This function just redirects to the format='root' reader with appropriate
defaults.
"""
return EventTable.read(source, 'waveburst', *args, format='root', **kwargs)
registry.register_reader('root.cwb', EventTable, table_from_cwb)
# -- ASCII --------------------------------------------------------------------
class CwbHeader(core.BaseHeader):
"""Parser for cWB ASCII header
"""
def get_cols(self, lines):
"""Initialize Column objects from a multi-line ASCII header
Parameters
----------
lines : `list`
List of table lines
"""
re_name_def = re.compile(
r'^\s*#\s+' # whitespace and comment marker
r'(?P<colnumber>[0-9]+)\s+-\s+' # number of column
r'(?P<colname>(.*))'
)
self.names = []
include_cuts = False
for line in lines:
if not line.startswith('# '):
break # End of header lines
elif line.startswith('# -/+'):
include_cuts = True
else:
match = re_name_def.search(line)
if match:
self.names.append(match.group('colname').rstrip())
if not self.names:
raise core.InconsistentTableError(
'No column names found in cWB header')
if include_cuts:
self.cols = [ # pylint: disable=attribute-defined-outside-init
core.Column(name='selection cut 1'),
core.Column(name='selection cut 2'),
]
else:
self.cols = [] # pylint: disable=attribute-defined-outside-init
for name in self.names:
col = core.Column(name=name)
self.cols.append(col)
def write(self, lines):
if 'selection cut 1' in self.colnames:
lines.append('# -/+ - not passed/passed final selection cuts')
for i, name in enumerate(self.colnames):
lines.append('# %.2d - %s' % (i+1, name))
class CwbData(core.BaseData):
"""Parser for cWB ASCII data
"""
comment = '#'
class Cwb(core.BaseReader):
"""Read an Cwb file
"""
_format_name = 'cwb'
_io_registry_can_write = True
_description = 'cWB EVENTS format table'
header_class = CwbHeader
data_class = CwbData
# register ascii.cwb for EventTable
registry.register_reader(
'ascii.cwb', EventTable, registry.get_reader('ascii.cwb', Table))
| [] |
2024-01-10 | leovantoji/llm_development_luupai123 | src~repl~python_repl.py | from langchain.agents.agent_toolkits import create_python_agent
from langchain.agents.agent_types import AgentType
from langchain.llms.openai import OpenAI
from langchain.tools.python.tool import PythonREPLTool
agent_executor = create_python_agent(
llm=OpenAI(temperature=0.5, max_tokens=500),
tool=PythonREPLTool(),
verbose=True,
agent_type=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
handle_parsing_errors=True,
)
agent_executor.run("What is the 10th prime number?")
| [] |
2024-01-10 | leovantoji/llm_development_luupai123 | src~internet_browsing~arxiv_naive.py | from langchain.agents import AgentType
from langchain.agents import initialize_agent
from langchain.agents import load_tools
from langchain.chat_models import ChatOpenAI
llm = ChatOpenAI(temperature=0.3)
tools = load_tools(["arxiv"])
agent_chain = initialize_agent(
tools=tools,
llm=llm,
max_iterations=5,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
handle_parsing_errors=True,
)
agent_chain.run("What's income waterfall model?")
| [] |
2024-01-10 | leovantoji/llm_development_luupai123 | src~shell_tool~shell_tool.py | from langchain.agents import AgentType
from langchain.agents import initialize_agent
from langchain.chat_models import ChatOpenAI
from langchain.tools import ShellTool
shell_tool = ShellTool()
llm = ChatOpenAI(temperature=0)
shell_tool.description += f"args {shell_tool.args}".replace("{", "{{").replace(
"}", "}}"
)
agent = initialize_agent(
tools=[shell_tool],
llm=llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
handle_parsing_errors=True,
)
agent.run(
"""do not run the codes. """
"""just create a text file called empty.txt and inside it, """
"""add code that uses PyTorch framework """
"""for training a basic convolutional neural network for 4 epochs"""
)
| [] |
2024-01-10 | leovantoji/llm_development_luupai123 | src~human_tool~human_tool.py | from langchain.agents import AgentType
from langchain.agents import initialize_agent
from langchain.agents import load_tools
from langchain.chat_models import ChatOpenAI
from langchain.llms import OpenAI
llm = ChatOpenAI(temperature=0.5)
math_llm = OpenAI(temperature=0.5)
tools = load_tools(
["human", "llm-math"],
llm=math_llm,
)
agent_chain = initialize_agent(
tools=tools,
llm=llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
handle_parsing_errors=True,
)
agent_chain.run("what is my math problem and its solution")
| [] |
2024-01-10 | leovantoji/llm_development_luupai123 | src~internet_browsing~arxiv_chainlit.py | import chainlit as cl
from langchain.agents import AgentType
from langchain.agents import initialize_agent
from langchain.agents import load_tools
from langchain.chat_models import ChatOpenAI
@cl.on_chat_start
def start():
llm = ChatOpenAI(temperature=0.3, streaming=True)
tools = load_tools(["arxiv"])
agent_chain = initialize_agent(
tools=tools,
llm=llm,
max_iterations=5,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
handle_parsing_errors=True,
)
cl.user_session.set(key="agent_chain", value=agent_chain)
@cl.on_message
async def main(message: str):
agent = cl.user_session.get("agent_chain")
cb = cl.LangchainCallbackHandler(stream_final_answer=True)
await cl.make_async(agent.run)(message, callbacks=[cb])
| [] |
2024-01-10 | leovantoji/llm_development_luupai123 | src~youtube_search~youtube.py | from langchain.agents import AgentType
from langchain.agents import initialize_agent
from langchain.agents import Tool
from langchain.llms import OpenAI
from langchain.tools import YouTubeSearchTool
tool = YouTubeSearchTool()
tools = [
Tool(
name="Search",
func=tool.run,
description=(
"""useful for when you need to give links to youtube videos. """
"""Remember to put https://youtube.com/ in front of """
"""every link to complete it"""
),
)
]
agent = initialize_agent(
tools,
OpenAI(temperature=0),
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
handle_parsing_errors=True,
)
agent.run("Whats a joe rogan video on an interesting topic")
| [] |
2024-01-10 | leovantoji/llm_development_luupai123 | src~qa_with_documents~qa_documents.py | import tempfile
import chainlit as cl
from chainlit.types import AskFileResponse
from langchain.chains import RetrievalQAWithSourcesChain
from langchain.chat_models import ChatOpenAI
from langchain.document_loaders import PyPDFLoader
from langchain.document_loaders import TextLoader
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
embeddings = OpenAIEmbeddings()
welcome_message = """
Welcome to QA with documents!
1. Upload a PDF or TXT file.
2. Ask a question about the file.
"""
def process_file(file: AskFileResponse):
if file.type == "text/plain":
Loader = TextLoader
if file.type == "application/pdf":
Loader = PyPDFLoader
with tempfile.NamedTemporaryFile() as temp_file:
temp_file.write(file.content)
loader = Loader(temp_file.name)
documents = loader.load()
docs = text_splitter.split_documents(documents=documents)
for i, doc in enumerate(docs):
doc.metadata["source"] = f"source_{i}"
return docs
def get_docsearch(file: AskFileResponse):
docs = process_file(file)
# save data in the user session
cl.user_session.set(key="docs", value=docs)
# create a unique namespace for the file
docsearch = Chroma.from_documents(
documents=docs,
embedding=embeddings,
)
return docsearch
@cl.on_chat_start
async def start():
# sending an image with the local file path
await cl.Message(content="You can now chat with your pdfs.").send()
files = None
while files is None:
files = await cl.AskFileMessage(
content=welcome_message,
accept=["text/plain", "application/pdf"],
max_size_mb=20,
timeout=120,
).send()
file = files[0]
msg = cl.Message(content=f"Processing '{file.name}'...")
await msg.send()
# no async implementation in the Pinecone client, fallback to sync
docsearch = await cl.make_async(get_docsearch)(file)
chain = RetrievalQAWithSourcesChain.from_chain_type(
llm=ChatOpenAI(temperature=0, streaming=True),
chain_type="stuff",
retriever=docsearch.as_retriever(max_tokens_limit=1000),
)
# let the user know that the system is ready
msg.content = f"'{file.name}' is ready. Ask a question!"
await msg.update()
cl.user_session.set(key="chain", value=chain)
@cl.on_message
async def main(message: str):
chain = cl.user_session.get(key="chain")
cb = cl.AsyncLangchainCallbackHandler(
stream_final_answer=True,
answer_prefix_tokens=["FINAL", "ANSWER"],
)
cb.answer_reached = True
result = await chain.acall(message, callbacks=[cb])
answer = result["answer"]
sources = result["sources"].strip()
source_elements = []
# get the documents from the user session
docs = cl.user_session.get(key="docs")
metadatas = [doc.metadata for doc in docs]
all_sources = [m["source"] for m in metadatas]
if sources:
found_sources = []
# add the sources to the message
for source in sources.split(","):
source_name = source.strip().replace(".", "")
# get the index of the source
try:
index = all_sources.index(source_name)
except ValueError:
continue
text = docs[index].page_content
found_sources.append(source_name)
# create the text element referenced in the message
source_elements.append(cl.Text(content=text, name=source_name))
if found_sources:
answer += f"\nSources: {', '.join(found_sources)}"
else:
answer += "\nNo sources found."
if cb.has_streamed_final_answer:
cb.final_stream.elements = source_elements
await cb.final_stream.update()
else:
await cl.Message(content=answer, elements=source_elements).send()
| [] |
2024-01-10 | rosspatil/ai-chatbot | service.py | import qdrant_client
import os
from langchain import PromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain.chains import ConversationalRetrievalChain
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import Qdrant
from langchain.memory import ConversationSummaryBufferMemory
from expiringdict import ExpiringDict
custom_prompt = """
You are a nice chatbot for customer support in {domain} and having a conversation with a human.
Follow these INSTRUCTIONS strictly:
1. You are instructed to forget everything you know about the world currently and do only answer specific to {domain}.
2. You are only programmed to assist Human in understanding and extracting relevant information from texts related to {domain}.
3. A human enters the conversation and starts asking questions. Generate the reply from texts related to {domain} only.
4. You must exclude chat history if it is unrelated to {domain}.
5. You as a chatbot do not ask follow up questions to Human.
6. If user ask anything unrelated to {domain}, apologize and say that you cannot answer.
7. You as a chatbot should not ask anything about {domain} to Humans.
8. Do not take any assistance/help from Human if you don't understand anything from questions.
_________________________
Context for you: {context}
_________________________
Previous Chat History: {chat_history}
_________________________chat history ends.
Asked question Human: {question}
_________________________
Chatbot:"""
prompt = PromptTemplate(
template=custom_prompt,
input_variables=["chat_history", "question", "context"],
partial_variables={"domain": os.environ["DOMAIN"]},
)
class Service:
contextCache: ExpiringDict
def __init__(self):
self.llm = ChatOpenAI(
temperature=0.7,
model=os.environ["OPENAI_DEPLOYMENT_ID"],
model_kwargs={"engine": os.environ["OPENAI_DEPLOYMENT_ID"]},
tiktoken_model_name="gpt-3.5-turbo",
)
self.qdrant_client = qdrant_client.QdrantClient(
url=os.environ["QDRANT_URL"],
api_key=os.environ["QDRANT_API_KEY"],
)
self.embeddings = OpenAIEmbeddings(
deployment="text-embedding-ada-002",
model="text-embedding-ada-002",
chunk_size=1,
)
self.contextCache = ExpiringDict(max_len=20, max_age_seconds=600, items=None)
def createNewContext(self):
return ConversationSummaryBufferMemory(
llm=self.llm,
output_key="answer",
memory_key="chat_history",
return_messages=True,
)
async def reset(self, session_id):
memory = self.contextCache.get(session_id)
if memory is not None:
memory.clear()
async def lets_chat(self, query: str, session_id: str, collection_name: str) -> str:
memory = self.contextCache.get(session_id)
if memory is None:
memory = self.createNewContext()
vectorstore = Qdrant(
client=self.qdrant_client,
collection_name=collection_name,
embeddings=self.embeddings,
metadata_payload_key="metadata",
content_payload_key="content",
)
retriever = vectorstore.as_retriever()
conversation = ConversationalRetrievalChain.from_llm(
llm=self.llm,
memory=memory,
chain_type="stuff",
retriever=retriever,
return_source_documents=True,
get_chat_history=lambda h: h,
verbose=False,
combine_docs_chain_kwargs={"prompt": prompt},
)
data = {
"question": query,
}
resp = conversation(data)
self.contextCache[session_id] = memory
return resp["answer"]
| [
"\nYou are a nice chatbot for customer support in {domain} and having a conversation with a human. \n\nFollow these INSTRUCTIONS strictly:\n1. You are instructed to forget everything you know about the world currently and do only answer specific to {domain}.\n2. You are only programmed to assist Human in understanding and extracting relevant information from texts related to {domain}.\n3. A human enters the conversation and starts asking questions. Generate the reply from texts related to {domain} only.\n4. You must exclude chat history if it is unrelated to {domain}.\n5. You as a chatbot do not ask follow up questions to Human.\n6. If user ask anything unrelated to {domain}, apologize and say that you cannot answer.\n7. You as a chatbot should not ask anything about {domain} to Humans. \n8. Do not take any assistance/help from Human if you don't understand anything from questions.\n_________________________\n\nContext for you: {context}\n_________________________\n\nPrevious Chat History: {chat_history}\n_________________________chat history ends.\n\nAsked question Human: {question}\n_________________________\nChatbot:",
"domain",
"question",
"chat_history",
"context"
] |
2024-01-10 | parenriquez/ask-feynman | ask-feynman~__main__.py | import openai
import os
from argparse import ArgumentParser
OPEN_API_KEY = "OPENAI_API_KEY"
def main():
# Parameterizing arguments from the command line
parser = ArgumentParser(description="Ask Feynman v.1")
# max-tokens is the flag
parser.add_argument(
"--max-tokens", help="Maximum size of tokens used", type=int, default=2000
)
# flag for model
parser.add_argument(
"--model",
help="The openai model to use",
type=str,
default="gpt-3.5-turbo",
)
# flag for query from user
parser.add_argument(
"--query", help="A string input from the user", type=str, required=True
)
# parsing the arguments
args = parser.parse_args()
max_tokens = args.max_tokens
model = args.model
query = args.query
print("Options:")
print(f"Max tokens: {max_tokens}")
print(f"model: {model}")
open_ai_api_key = os.getenv(OPEN_API_KEY)
if open_ai_api_key == None:
print("OPENAI_API_KEY is required")
exit(-1)
query = query.strip()
print("Hello there, my young friend! It's a pleasure to have you here with us today.")
while True:
if query.lower() == "quit":
print("\nWell, I'm off to explore the mysteries of the universe! \
Until our paths cross again, keep questioning everything and seeking out new knowledge. \
So long for now!")
break
elif query == "":
print("You did not ask me anything.")
query = input("You (type 'quit' to exit): \n")
else:
completion = get_completion(query, max_tokens=max_tokens)
if len(completion) == 0:
print("I'm sorry, I don't know the answer to that question right now")
else:
print(f"Mr. Feynman: {completion.strip()}")
query = input("\nYou (type 'quit' to exit): \n")
query = query.strip()
def get_completion(prompt, max_tokens, model="gpt-3.5-turbo"):
messages = [{"role": "system", "content": "You are a teacher who speaks like Richard Feynman. \
You teach passionately and create a welcoming and warm environment to your student, \
and encourage them for the love of learning."},
{"role": "user", "content": prompt}]
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=0.2, # this is the degree of randomness of the model's output
max_tokens = max_tokens
)
return response.choices[0].message["content"]
if __name__ == "__main__":
main()
| [
"You are a teacher who speaks like Richard Feynman. You teach passionately and create a welcoming and warm environment to your student, and encourage them for the love of learning."
] |
2024-01-10 | Hrazhan/whisper-kurdish | finetune.py | #!/usr/bin/env python
# coding=utf-8
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for sequence to sequence speech recognition
with 🤗 Datasets' streaming mode.
"""
# You can also adapt this script for your own sequence to sequence speech
# recognition task. Pointers for this are left as comments.
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import torch
from datasets import DatasetDict, IterableDatasetDict, interleave_datasets, load_dataset
from torch.utils.data import IterableDataset
import evaluate
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForSpeechSeq2Seq,
AutoProcessor,
AutoTokenizer,
HfArgumentParser,
Seq2SeqTrainer,
Seq2SeqTrainingArguments,
TrainerCallback,
set_seed,
)
from transformers.models.whisper.english_normalizer import BasicTextNormalizer
from transformers.trainer_pt_utils import IterableDatasetShard
from transformers.trainer_utils import get_last_checkpoint, is_main_process
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
from klpt.preprocess import Preprocess
from helpers import resolve_ae
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.25.0.dev0")
require_version("datasets>=1.18.2", "To fix: pip install -r examples/pytorch/speech-recognition/requirements.txt")
logger = logging.getLogger(__name__)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
feature_extractor_name: Optional[str] = field(
default=None, metadata={"help": "feature extractor name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
},
)
freeze_feature_encoder: bool = field(
default=True, metadata={"help": "Whether to freeze the feature encoder layers of the model."}
)
freeze_encoder: bool = field(
default=False, metadata={"help": "Whether to freeze the entire encoder of the seq2seq model."}
)
forced_decoder_ids: List[List[int]] = field(
default=None,
metadata={
"help": (
"A list of pairs of integers which indicates a mapping from generation indices to token indices "
"that will be forced before sampling. For example, [[0, 123]] means the first generated token "
"will always be a token of index 123."
)
},
)
suppress_tokens: List[int] = field(
default=None, metadata={"help": "A list of tokens that will be suppressed at generation."}
)
model_index_name: str = field(default=None, metadata={"help": "Pretty name for the model card."})
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
dataset_name: str = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
text_column: Optional[str] = field(
default=None,
metadata={"help": "The name of the column in the datasets containing the full texts (for summarization)."},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
},
)
audio_column_name: str = field(
default="audio",
metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"},
)
text_column_name: str = field(
default="text",
metadata={"help": "The name of the dataset column containing the text data. Defaults to 'text'"},
)
max_duration_in_seconds: float = field(
default=20.0,
metadata={
"help": (
"Truncate audio files that are longer than `max_duration_in_seconds` seconds to"
" 'max_duration_in_seconds`"
)
},
)
min_duration_in_seconds: float = field(
default=0.0, metadata={"help": "Filter audio files that are shorter than `min_duration_in_seconds` seconds"}
)
train_split_name: str = field(
default="train",
metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
},
)
eval_split_name: str = field(
default="test",
metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
},
)
do_lower_case: bool = field(
default=False,
metadata={"help": "Whether the target text should be lower cased."},
)
do_remove_punctuation: bool = field(
default=False,
metadata={"help": "Whether the target text should be striped of punctuation."},
)
do_normalize_eval: bool = field(
default=True,
metadata={"help": "Whether to normalise the references and predictions in the eval WER calculation."},
)
language: str = field(
default=None,
metadata={
"help": (
"Language for multilingual fine-tuning. This argument should be set for multilingual fine-tuning "
"only. For English speech recognition, it should be set to `None`."
)
},
)
task: str = field(
default="transcribe",
metadata={"help": "Task, either `transcribe` for speech recognition or `translate` for speech translation."},
)
shuffle_buffer_size: Optional[int] = field(
default=500,
metadata={
"help": (
"The number of streamed examples to download before shuffling them. The large the buffer, "
"the closer it is to real offline shuffling."
)
},
)
streaming: bool = field(
default=False,
metadata={"help": "Whether to use streaming mode to load and pre-process the data."},
)
@dataclass
class DataCollatorSpeechSeq2SeqWithPadding:
"""
Data collator that will dynamically pad the inputs received.
Args:
processor ([`WhisperProcessor`])
The processor used for processing the data.
decoder_start_token_id (`int`)
The begin-of-sentence of the decoder.
"""
processor: Any
decoder_start_token_id: int
def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:
# split inputs and labels since they have to be of different lengths and need
# different padding methods
model_input_name = self.processor.model_input_names[0]
input_features = [{model_input_name: feature[model_input_name]} for feature in features]
label_features = [{"input_ids": feature["labels"]} for feature in features]
batch = self.processor.feature_extractor.pad(input_features, return_tensors="pt")
labels_batch = self.processor.tokenizer.pad(label_features, return_tensors="pt")
# replace padding with -100 to ignore loss correctly
labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100)
# if bos token is appended in previous tokenization step,
# cut bos token here as it's append later anyways
if (labels[:, 0] == self.decoder_start_token_id).all().cpu().item():
labels = labels[:, 1:]
batch["labels"] = labels
return batch
def load_maybe_streaming_dataset(dataset_name, dataset_config_name, split="train", streaming=True, **kwargs):
"""
Utility function to load a dataset in streaming mode. For datasets with multiple splits,
each split is loaded individually and then splits combined by taking alternating examples from
each (interleaving).
"""
if "+" in split:
# load multiple splits separated by the `+` symbol with streaming mode
dataset_splits = [
load_dataset(dataset_name, dataset_config_name, split=split_name, streaming=streaming, **kwargs)
for split_name in split.split("+")
]
# interleave multiple splits to form one dataset
interleaved_dataset = interleave_datasets(dataset_splits)
return interleaved_dataset
else:
# load a single split *with* streaming mode
dataset = load_dataset(dataset_name, dataset_config_name, split=split, streaming=streaming, **kwargs)
return dataset
def main():
# 1. Parse input arguments
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_speech_recognition_seq2seq_streaming", model_args, data_args)
# 2. Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
logger.info(f"Training/evaluation parameters {training_args}")
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s", training_args)
# 3. Detecting last checkpoint and eventually continue from last checkpoint
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Set seed before initializing model.
set_seed(training_args.seed)
# 4. Load dataset
raw_datasets = IterableDatasetDict() if data_args.streaming else DatasetDict()
if training_args.do_train:
raw_datasets["train"] = load_maybe_streaming_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
split=data_args.train_split_name,
use_auth_token=True if model_args.use_auth_token else None,
streaming=data_args.streaming,
)
if training_args.do_eval:
raw_datasets["eval"] = load_maybe_streaming_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
split=data_args.eval_split_name,
use_auth_token=True if model_args.use_auth_token else None,
streaming=data_args.streaming,
)
raw_datasets_features = list(next(iter(raw_datasets.values())).features.keys())
if data_args.audio_column_name not in raw_datasets_features:
raise ValueError(
f"--audio_column_name '{data_args.audio_column_name}' not found in dataset '{data_args.dataset_name}'. "
"Make sure to set `--audio_column_name` to the correct audio column - one of "
f"{', '.join(raw_datasets_features)}."
)
if data_args.text_column_name not in raw_datasets_features:
raise ValueError(
f"--text_column_name {data_args.text_column_name} not found in dataset '{data_args.dataset_name}'. "
"Make sure to set `--text_column_name` to the correct text column - one of "
f"{', '.join(raw_datasets_features)}."
)
# 5. Load pretrained model, tokenizer, and feature extractor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
config.update({"forced_decoder_ids": model_args.forced_decoder_ids, "suppress_tokens": model_args.suppress_tokens})
if training_args.gradient_checkpointing:
config.update({"use_cache": False})
feature_extractor = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name if model_args.feature_extractor_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
model = AutoModelForSpeechSeq2Seq.from_pretrained(
model_args.model_name_or_path,
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined")
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if model_args.freeze_encoder:
model.freeze_encoder()
if data_args.language is not None:
# We only need to set the task id when the language is specified (i.e. in a multilingual setting)
tokenizer.set_prefix_tokens(language=data_args.language, task=data_args.task)
# 6. Resample speech dataset if necessary
dataset_sampling_rate = next(iter(raw_datasets.values())).features[data_args.audio_column_name].sampling_rate
if dataset_sampling_rate != feature_extractor.sampling_rate:
raw_datasets = raw_datasets.cast_column(
data_args.audio_column_name, datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate)
)
# 7. Preprocessing the datasets.
# We need to read the audio files as arrays and tokenize the targets.
max_input_length = data_args.max_duration_in_seconds * feature_extractor.sampling_rate
min_input_length = data_args.min_duration_in_seconds * feature_extractor.sampling_rate
audio_column_name = data_args.audio_column_name
text_column_name = data_args.text_column_name
model_input_name = feature_extractor.model_input_names[0]
do_lower_case = data_args.do_lower_case
do_remove_punctuation = data_args.do_remove_punctuation
normalizer = BasicTextNormalizer() # 'official' text normalizer from OpenAI
if data_args.max_train_samples is not None:
raw_datasets["train"] = (
raw_datasets["train"].take(data_args.max_train_samples)
if data_args.streaming
else raw_datasets["train"].select(range(data_args.max_train_samples))
)
if data_args.max_eval_samples is not None:
raw_datasets["eval"] = (
raw_datasets["eval"].take(data_args.max_eval_samples)
if data_args.streaming
else raw_datasets["eval"].select(range(data_args.max_eval_samples))
)
preprocessor_ckb = Preprocess("Sorani", "Arabic", numeral="Latin")
def prepare_dataset(batch):
# process audio
sample = batch[audio_column_name]
inputs = feature_extractor(sample["array"], sampling_rate=sample["sampling_rate"])
# process audio length
batch[model_input_name] = inputs.get(model_input_name)[0]
batch["input_length"] = len(sample["array"])
# process targets
input_str = preprocessor_ckb.preprocess(resolve_ae(batch[text_column_name]))
if do_remove_punctuation:
input_str = normalizer(input_str).strip()
batch["labels"] = tokenizer(input_str).input_ids
return batch
with training_args.main_process_first(desc="dataset map pre-processing"):
vectorized_datasets = raw_datasets.map(
prepare_dataset,
remove_columns=raw_datasets_features,
).with_format("torch")
if training_args.do_train and data_args.streaming:
# manually shuffle if streaming (done by the trainer for non-streaming)
vectorized_datasets["train"] = vectorized_datasets["train"].shuffle(
buffer_size=data_args.shuffle_buffer_size,
seed=training_args.seed,
)
# filter training data that is shorter than min_input_length or longer than
# max_input_length
def is_audio_in_length_range(length):
return min_input_length < length < max_input_length
if training_args.do_train:
vectorized_datasets["train"] = vectorized_datasets["train"].filter(
is_audio_in_length_range,
input_columns=["input_length"],
)
# 8. Load Metric
metric = evaluate.load("wer")
do_normalize_eval = data_args.do_normalize_eval
def compute_metrics(pred):
pred_ids = pred.predictions
pred.label_ids[pred.label_ids == -100] = tokenizer.pad_token_id
pred_str = tokenizer.batch_decode(pred_ids, skip_special_tokens=True)
# we do not want to group tokens when computing the metrics
label_str = tokenizer.batch_decode(pred.label_ids, skip_special_tokens=True)
if do_normalize_eval:
pred_str = [normalizer(pred) for pred in pred_str]
label_str = [normalizer(label) for label in label_str]
# filtering step to only evaluate the samples that correspond to non-zero references:
pred_str = [pred_str[i] for i in range(len(pred_str)) if len(label_str[i]) > 0]
label_str = [label_str[i] for i in range(len(label_str)) if len(label_str[i]) > 0]
wer = 100 * metric.compute(predictions=pred_str, references=label_str)
return {"wer": wer}
# 9. Create a single speech processor
if is_main_process(training_args.local_rank):
# save feature extractor, tokenizer and config
feature_extractor.save_pretrained(training_args.output_dir)
tokenizer.save_pretrained(training_args.output_dir)
config.save_pretrained(training_args.output_dir)
processor = AutoProcessor.from_pretrained(training_args.output_dir)
# 10. Define data collator
data_collator = DataCollatorSpeechSeq2SeqWithPadding(
processor=processor,
decoder_start_token_id=model.config.decoder_start_token_id,
)
# 11. Configure Trainer
# Trainer callback to reinitialise and reshuffle the streamable datasets at the beginning of each epoch
# Only required for streaming: Trainer automatically shuffles non-streaming datasets
class ShuffleCallback(TrainerCallback):
def on_epoch_begin(self, args, state, control, train_dataloader, **kwargs):
if isinstance(train_dataloader.dataset, IterableDatasetShard):
pass # set_epoch() is handled by the Trainer
elif isinstance(train_dataloader.dataset, IterableDataset):
train_dataloader.dataset.set_epoch(train_dataloader.dataset._epoch + 1)
# Initialize Trainer
trainer = Seq2SeqTrainer(
model=model,
args=training_args,
train_dataset=vectorized_datasets["train"] if training_args.do_train else None,
eval_dataset=vectorized_datasets["eval"] if training_args.do_eval else None,
tokenizer=feature_extractor,
data_collator=data_collator,
compute_metrics=compute_metrics if training_args.predict_with_generate else None,
callbacks=[ShuffleCallback()] if data_args.streaming else None,
)
# 12. Training
if training_args.do_train:
checkpoint = None
if training_args.resume_from_checkpoint is not None:
checkpoint = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
trainer.save_model() # Saves the feature extractor too for easy upload
metrics = train_result.metrics
if data_args.max_train_samples:
metrics["train_samples"] = data_args.max_train_samples
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
# 13. Evaluation
results = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
metrics = trainer.evaluate(
metric_key_prefix="eval",
max_length=training_args.generation_max_length,
num_beams=training_args.generation_num_beams,
)
if data_args.max_eval_samples:
metrics["eval_samples"] = data_args.max_eval_samples
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
# 14. Write Training Stats
kwargs = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "automatic-speech-recognition",
"tags": "whisper-event",
}
if data_args.dataset_name is not None:
kwargs["dataset_tags"] = data_args.dataset_name
if data_args.dataset_config_name is not None:
kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}"
else:
kwargs["dataset"] = data_args.dataset_name
if "common_voice" in data_args.dataset_name:
kwargs["language"] = data_args.dataset_config_name.split('-')[0]
if model_args.model_index_name is not None:
kwargs["model_name"] = model_args.model_index_name
if training_args.push_to_hub:
trainer.push_to_hub(**kwargs)
else:
trainer.create_model_card(**kwargs)
return results
if __name__ == "__main__":
main()
| [] |
2024-01-10 | hughes-research/litellm | litellm~utils.py | # +-----------------------------------------------+
# | |
# | Give Feedback / Get Help |
# | https://github.com/BerriAI/litellm/issues/new |
# | |
# +-----------------------------------------------+
#
# Thank you users! We ❤️ you! - Krrish & Ishaan
import sys
import dotenv, json, traceback, threading
import subprocess, os
import litellm, openai
import itertools
import random, uuid, requests
import datetime, time
import tiktoken
import uuid
import aiohttp
import logging
import asyncio
from tokenizers import Tokenizer
from dataclasses import (
dataclass,
field,
) # for storing API inputs, outputs, and metadata
encoding = tiktoken.get_encoding("cl100k_base")
import importlib.metadata
from .integrations.traceloop import TraceloopLogger
from .integrations.helicone import HeliconeLogger
from .integrations.aispend import AISpendLogger
from .integrations.berrispend import BerriSpendLogger
from .integrations.supabase import Supabase
from .integrations.llmonitor import LLMonitorLogger
from .integrations.prompt_layer import PromptLayerLogger
from .integrations.langsmith import LangsmithLogger
from .integrations.weights_biases import WeightsBiasesLogger
from .integrations.custom_logger import CustomLogger
from .integrations.langfuse import LangFuseLogger
from .integrations.litedebugger import LiteDebugger
from openai.error import OpenAIError as OriginalError
from openai.openai_object import OpenAIObject
from .exceptions import (
AuthenticationError,
InvalidRequestError,
RateLimitError,
ServiceUnavailableError,
OpenAIError,
ContextWindowExceededError,
Timeout,
APIConnectionError,
APIError,
BudgetExceededError
)
from typing import cast, List, Dict, Union, Optional
from .caching import Cache
####### ENVIRONMENT VARIABLES ####################
dotenv.load_dotenv() # Loading env variables using dotenv
sentry_sdk_instance = None
capture_exception = None
add_breadcrumb = None
posthog = None
slack_app = None
alerts_channel = None
heliconeLogger = None
promptLayerLogger = None
langsmithLogger = None
weightsBiasesLogger = None
customLogger = None
langFuseLogger = None
llmonitorLogger = None
aispendLogger = None
berrispendLogger = None
supabaseClient = None
liteDebuggerClient = None
callback_list: Optional[List[str]] = []
user_logger_fn = None
additional_details: Optional[Dict[str, str]] = {}
local_cache: Optional[Dict[str, str]] = {}
last_fetched_at = None
last_fetched_at_keys = None
######## Model Response #########################
# All liteLLM Model responses will be in this format, Follows the OpenAI Format
# https://docs.litellm.ai/docs/completion/output
# {
# 'choices': [
# {
# 'finish_reason': 'stop',
# 'index': 0,
# 'message': {
# 'role': 'assistant',
# 'content': " I'm doing well, thank you for asking. I am Claude, an AI assistant created by Anthropic."
# }
# }
# ],
# 'created': 1691429984.3852863,
# 'model': 'claude-instant-1',
# 'usage': {'prompt_tokens': 18, 'completion_tokens': 23, 'total_tokens': 41}
# }
class UnsupportedParamsError(Exception):
def __init__(self, status_code, message):
self.status_code = status_code
self.message = message
super().__init__(
self.message
) # Call the base class constructor with the parameters it needs
def _generate_id(): # private helper function
return 'chatcmpl-' + str(uuid.uuid4())
def map_finish_reason(finish_reason: str): # openai supports 5 stop sequences - 'stop', 'length', 'function_call', 'content_filter', 'null'
# anthropic mapping
if finish_reason == "stop_sequence":
return "stop"
return finish_reason
class Message(OpenAIObject):
def __init__(self, content="default", role="assistant", logprobs=None, **params):
super(Message, self).__init__(**params)
self.content = content
self.role = role
self._logprobs = logprobs
class Delta(OpenAIObject):
def __init__(self, content=None, logprobs=None, role=None, **params):
super(Delta, self).__init__(**params)
if content is not None:
self.content = content
if role:
self.role = role
class Choices(OpenAIObject):
def __init__(self, finish_reason=None, index=0, message=None, **params):
super(Choices, self).__init__(**params)
if finish_reason:
self.finish_reason = map_finish_reason(finish_reason)
else:
self.finish_reason = "stop"
self.index = index
if message is None:
self.message = Message(content=None)
else:
self.message = message
class Usage(OpenAIObject):
def __init__(self, prompt_tokens=None, completion_tokens=None, total_tokens=None, **params):
super(Usage, self).__init__(**params)
if prompt_tokens:
self.prompt_tokens = prompt_tokens
if completion_tokens:
self.completion_tokens = completion_tokens
if total_tokens:
self.total_tokens = total_tokens
class StreamingChoices(OpenAIObject):
def __init__(self, finish_reason=None, index=0, delta: Optional[Delta]=None, **params):
super(StreamingChoices, self).__init__(**params)
self.finish_reason = finish_reason
self.index = index
if delta:
self.delta = delta
else:
self.delta = Delta()
class ModelResponse(OpenAIObject):
def __init__(self, id=None, choices=None, created=None, model=None, usage=None, stream=False, response_ms=None, **params):
if stream:
self.object = "chat.completion.chunk"
self.choices = [StreamingChoices()]
else:
if model in litellm.open_ai_embedding_models:
self.object = "embedding"
else:
self.object = "chat.completion"
self.choices = [Choices()]
if id is None:
self.id = _generate_id()
else:
self.id = id
if created is None:
self.created = int(time.time())
else:
self.created = created
if response_ms:
self._response_ms = response_ms
else:
self._response_ms = None
self.model = model
if usage:
self.usage = usage
else:
self.usage = Usage()
super(ModelResponse, self).__init__(**params)
def to_dict_recursive(self):
d = super().to_dict_recursive()
d["choices"] = [choice.to_dict_recursive() for choice in self.choices]
return d
def cost(self):
# for non streaming responses
return completion_cost(completion_response=self)
class EmbeddingResponse(OpenAIObject):
def __init__(self, id=None, choices=None, created=None, model=None, usage=None, stream=False, response_ms=None, **params):
self.object = "list"
if response_ms:
self._response_ms = response_ms
else:
self._response_ms = None
self.data = []
self.model = model
def to_dict_recursive(self):
d = super().to_dict_recursive()
return d
############################################################
def print_verbose(print_statement):
if litellm.set_verbose:
print(f"LiteLLM: {print_statement}")
####### LOGGING ###################
from enum import Enum
class CallTypes(Enum):
embedding = 'embedding'
completion = 'completion'
# Logging function -> log the exact model details + what's being sent | Non-Blocking
class Logging:
global supabaseClient, liteDebuggerClient, promptLayerLogger, weightsBiasesLogger, langsmithLogger, capture_exception, add_breadcrumb
def __init__(self, model, messages, stream, call_type, start_time, litellm_call_id, function_id):
if call_type not in [item.value for item in CallTypes]:
allowed_values = ", ".join([item.value for item in CallTypes])
raise ValueError(f"Invalid call_type {call_type}. Allowed values: {allowed_values}")
self.model = model
self.messages = messages
self.stream = stream
self.start_time = start_time # log the call start time
self.call_type = call_type
self.litellm_call_id = litellm_call_id
self.function_id = function_id
self.streaming_chunks = [] # for generating complete stream response
def update_environment_variables(self, model, user, optional_params, litellm_params):
self.optional_params = optional_params
self.model = model
self.user = user
self.litellm_params = litellm_params
self.logger_fn = litellm_params["logger_fn"]
print_verbose(f"self.optional_params: {self.optional_params}")
self.model_call_details = {
"model": self.model,
"messages": self.messages,
"optional_params": self.optional_params,
"litellm_params": self.litellm_params,
"start_time": self.start_time
}
def pre_call(self, input, api_key, model=None, additional_args={}):
# Log the exact input to the LLM API
print_verbose(f"Logging Details Pre-API Call for call id {self.litellm_call_id}")
litellm.error_logs['PRE_CALL'] = locals()
try:
# print_verbose(f"logging pre call for model: {self.model} with call type: {self.call_type}")
self.model_call_details["input"] = input
self.model_call_details["api_key"] = api_key
self.model_call_details["additional_args"] = additional_args
self.model_call_details["log_event_type"] = "pre_api_call"
if (
model
): # if model name was changes pre-call, overwrite the initial model call name with the new one
self.model_call_details["model"] = model
# User Logging -> if you pass in a custom logging function
print_verbose(f"model call details: {self.model_call_details}")
if self.logger_fn and callable(self.logger_fn):
try:
self.logger_fn(
self.model_call_details
) # Expectation: any logger function passed in by the user should accept a dict object
except Exception as e:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while logging {traceback.format_exc()}"
)
if litellm.max_budget and self.stream:
start_time = self.start_time
end_time = self.start_time # no time has passed as the call hasn't been made yet
time_diff = (end_time - start_time).total_seconds()
float_diff = float(time_diff)
litellm._current_cost += litellm.completion_cost(model=self.model, prompt="".join(message["content"] for message in self.messages), completion="", total_time=float_diff)
# Input Integration Logging -> If you want to log the fact that an attempt to call the model was made
for callback in litellm.input_callback:
try:
if callback == "supabase":
print_verbose("reaches supabase for logging!")
model = self.model_call_details["model"]
messages = self.model_call_details["input"]
print_verbose(f"supabaseClient: {supabaseClient}")
supabaseClient.input_log_event(
model=model,
messages=messages,
end_user=self.model_call_details.get("user", "default"),
litellm_call_id=self.litellm_params["litellm_call_id"],
print_verbose=print_verbose,
)
elif callback == "lite_debugger":
print_verbose(f"reaches litedebugger for logging! - model_call_details {self.model_call_details}")
model = self.model_call_details["model"]
messages = self.model_call_details["input"]
print_verbose(f"liteDebuggerClient: {liteDebuggerClient}")
liteDebuggerClient.input_log_event(
model=model,
messages=messages,
end_user=self.model_call_details.get("user", "default"),
litellm_call_id=self.litellm_params["litellm_call_id"],
litellm_params=self.model_call_details["litellm_params"],
optional_params=self.model_call_details["optional_params"],
print_verbose=print_verbose,
call_type=self.call_type
)
elif callback == "sentry" and add_breadcrumb:
print_verbose("reaches sentry breadcrumbing")
add_breadcrumb(
category="litellm.llm_call",
message=f"Model Call Details pre-call: {self.model_call_details}",
level="info",
)
elif callable(callback): # custom logger functions
customLogger.log_input_event(
model=self.model,
messages=self.messages,
kwargs=self.model_call_details,
print_verbose=print_verbose,
callback_func=callback
)
except Exception as e:
traceback.print_exc()
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while input logging with integrations {traceback.format_exc()}"
)
print_verbose(
f"LiteLLM.Logging: is sentry capture exception initialized {capture_exception}"
)
if capture_exception: # log this error to sentry for debugging
capture_exception(e)
except:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while logging {traceback.format_exc()}"
)
print_verbose(
f"LiteLLM.Logging: is sentry capture exception initialized {capture_exception}"
)
if capture_exception: # log this error to sentry for debugging
capture_exception(e)
def post_call(self, original_response, input=None, api_key=None, additional_args={}):
# Log the exact result from the LLM API, for streaming - log the type of response received
litellm.error_logs['POST_CALL'] = locals()
try:
self.model_call_details["input"] = input
self.model_call_details["api_key"] = api_key
self.model_call_details["original_response"] = original_response
self.model_call_details["additional_args"] = additional_args
self.model_call_details["log_event_type"] = "post_api_call"
# User Logging -> if you pass in a custom logging function
print_verbose(f"model call details: {self.model_call_details}")
print_verbose(
f"Logging Details Post-API Call: logger_fn - {self.logger_fn} | callable(logger_fn) - {callable(self.logger_fn)}"
)
if self.logger_fn and callable(self.logger_fn):
try:
self.logger_fn(
self.model_call_details
) # Expectation: any logger function passed in by the user should accept a dict object
except Exception as e:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while logging {traceback.format_exc()}"
)
# Input Integration Logging -> If you want to log the fact that an attempt to call the model was made
for callback in litellm.input_callback:
try:
if callback == "lite_debugger":
print_verbose("reaches litedebugger for post-call logging!")
print_verbose(f"liteDebuggerClient: {liteDebuggerClient}")
liteDebuggerClient.post_call_log_event(
original_response=original_response,
litellm_call_id=self.litellm_params["litellm_call_id"],
print_verbose=print_verbose,
call_type = self.call_type,
stream = self.stream,
)
elif callback == "sentry" and add_breadcrumb:
print_verbose("reaches sentry breadcrumbing")
add_breadcrumb(
category="litellm.llm_call",
message=f"Model Call Details post-call: {self.model_call_details}",
level="info",
)
except:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while post-call logging with integrations {traceback.format_exc()}"
)
print_verbose(
f"LiteLLM.Logging: is sentry capture exception initialized {capture_exception}"
)
if capture_exception: # log this error to sentry for debugging
capture_exception(e)
except:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while logging {traceback.format_exc()}"
)
pass
def success_handler(self, result=None, start_time=None, end_time=None, **kwargs):
print_verbose(
f"Logging Details LiteLLM-Success Call"
)
try:
if start_time is None:
start_time = self.start_time
if end_time is None:
end_time = datetime.datetime.now()
self.model_call_details["log_event_type"] = "successful_api_call"
self.model_call_details["end_time"] = end_time
complete_streaming_response = None
## BUILD COMPLETE STREAMED RESPONSE
if self.stream:
if result.choices[0].finish_reason: # if it's the last chunk
self.streaming_chunks.append(result)
complete_streaming_response = litellm.stream_chunk_builder(self.streaming_chunks)
else:
self.streaming_chunks.append(result)
if complete_streaming_response:
self.model_call_details["complete_streaming_response"] = complete_streaming_response
print_verbose(f"success callbacks: {litellm.success_callback}")
if litellm.max_budget and self.stream:
time_diff = (end_time - start_time).total_seconds()
float_diff = float(time_diff)
litellm._current_cost += litellm.completion_cost(model=self.model, prompt="", completion=result["content"], total_time=float_diff)
for callback in litellm.success_callback:
try:
if callback == "lite_debugger":
print_verbose("reaches lite_debugger for logging!")
print_verbose(f"liteDebuggerClient: {liteDebuggerClient}")
print_verbose(f"liteDebuggerClient details function {self.call_type} and stream set to {self.stream}")
liteDebuggerClient.log_event(
end_user=kwargs.get("user", "default"),
response_obj=result,
start_time=start_time,
end_time=end_time,
litellm_call_id=self.litellm_call_id,
print_verbose=print_verbose,
call_type = self.call_type,
stream = self.stream,
)
if callback == "api_manager":
print_verbose("reaches api manager for updating model cost")
litellm.apiManager.update_cost(completion_obj=result, user=self.user)
if callback == "cache":
# print("entering logger first time")
# print(self.litellm_params["stream_response"])
if litellm.cache != None and self.model_call_details.get('optional_params', {}).get('stream', False) == True:
litellm_call_id = self.litellm_params["litellm_call_id"]
if litellm_call_id in self.litellm_params["stream_response"]:
# append for the given call_id
if self.litellm_params["stream_response"][litellm_call_id]["choices"][0]["message"]["content"] == "default":
self.litellm_params["stream_response"][litellm_call_id]["choices"][0]["message"]["content"] = result["content"] # handle first try
else:
self.litellm_params["stream_response"][litellm_call_id]["choices"][0]["message"]["content"] += result["content"]
else: # init a streaming response for this call id
new_model_response = ModelResponse(choices=[Choices(message=Message(content="default"))])
#print("creating new model response")
#print(new_model_response)
self.litellm_params["stream_response"][litellm_call_id] = new_model_response
#print("adding to cache for", litellm_call_id)
litellm.cache.add_cache(self.litellm_params["stream_response"][litellm_call_id], **self.model_call_details)
if callback == "promptlayer":
print_verbose("reaches promptlayer for logging!")
promptLayerLogger.log_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
if callback == "supabase":
print_verbose("reaches supabase for logging!")
kwargs=self.model_call_details
# this only logs streaming once, complete_streaming_response exists i.e when stream ends
if self.stream:
if "complete_streaming_response" not in kwargs:
return
else:
print_verbose("reaches supabase for streaming logging!")
result = kwargs["complete_streaming_response"]
# print(kwargs)
model = kwargs["model"]
messages = kwargs["messages"]
optional_params = kwargs.get("optional_params", {})
litellm_params = kwargs.get("litellm_params", {})
supabaseClient.log_event(
model=model,
messages=messages,
end_user=optional_params.get("user", "default"),
response_obj=result,
start_time=start_time,
end_time=end_time,
litellm_call_id=litellm_params.get("litellm_call_id", str(uuid.uuid4())),
print_verbose=print_verbose,
)
if callback == "wandb":
print_verbose("reaches wandb for logging!")
weightsBiasesLogger.log_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
if callback == "langsmith":
print_verbose("reaches langsmtih for logging!")
langsmithLogger.log_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
if callable(callback): # custom logger functions
customLogger.log_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
callback_func=callback
)
except Exception as e:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while success logging with integrations {traceback.format_exc()}"
)
print_verbose(
f"LiteLLM.Logging: is sentry capture exception initialized {capture_exception}"
)
if capture_exception: # log this error to sentry for debugging
capture_exception(e)
except:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while success logging {traceback.format_exc()}"
)
pass
def failure_handler(self, exception, traceback_exception, start_time=None, end_time=None):
print_verbose(
f"Logging Details LiteLLM-Failure Call"
)
try:
if start_time is None:
start_time = self.start_time
if end_time is None:
end_time = datetime.datetime.now()
# on some exceptions, model_call_details is not always initialized, this ensures that we still log those exceptions
if not hasattr(self, "model_call_details"):
self.model_call_details = {}
self.model_call_details["log_event_type"] = "failed_api_call"
self.model_call_details["exception"] = exception
self.model_call_details["traceback_exception"] = traceback_exception
self.model_call_details["end_time"] = end_time
result = None # result sent to all loggers, init this to None incase it's not created
for callback in litellm.failure_callback:
try:
if callback == "lite_debugger":
print_verbose("reaches lite_debugger for logging!")
print_verbose(f"liteDebuggerClient: {liteDebuggerClient}")
result = {
"model": self.model,
"created": time.time(),
"error": traceback_exception,
"usage": {
"prompt_tokens": prompt_token_calculator(
self.model, messages=self.messages
),
"completion_tokens": 0,
},
}
liteDebuggerClient.log_event(
model=self.model,
messages=self.messages,
end_user=self.model_call_details.get("user", "default"),
response_obj=result,
start_time=start_time,
end_time=end_time,
litellm_call_id=self.litellm_call_id,
print_verbose=print_verbose,
call_type = self.call_type,
stream = self.stream,
)
elif callback == "sentry":
print_verbose("sending exception to sentry")
if capture_exception:
capture_exception(exception)
else:
print_verbose(f"capture exception not initialized: {capture_exception}")
elif callable(callback): # custom logger functions
customLogger.log_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
callback_func=callback
)
except Exception as e:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while failure logging with integrations {traceback.format_exc()}"
)
print_verbose(
f"LiteLLM.Logging: is sentry capture exception initialized {capture_exception}"
)
if capture_exception: # log this error to sentry for debugging
capture_exception(e)
except:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while failure logging {traceback.format_exc()}"
)
pass
def exception_logging(
additional_args={},
logger_fn=None,
exception=None,
):
try:
model_call_details = {}
if exception:
model_call_details["exception"] = exception
model_call_details["additional_args"] = additional_args
# User Logging -> if you pass in a custom logging function or want to use sentry breadcrumbs
print_verbose(
f"Logging Details: logger_fn - {logger_fn} | callable(logger_fn) - {callable(logger_fn)}"
)
if logger_fn and callable(logger_fn):
try:
logger_fn(
model_call_details
) # Expectation: any logger function passed in by the user should accept a dict object
except Exception as e:
print(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while logging {traceback.format_exc()}"
)
except Exception as e:
print(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while logging {traceback.format_exc()}"
)
pass
####### CLIENT ###################
# make it easy to log if completion/embedding runs succeeded or failed + see what happened | Non-Blocking
def client(original_function):
global liteDebuggerClient, get_all_keys
def function_setup(
start_time, *args, **kwargs
): # just run once to check if user wants to send their data anywhere - PostHog/Sentry/Slack/etc.
try:
global callback_list, add_breadcrumb, user_logger_fn, Logging
function_id = kwargs["id"] if "id" in kwargs else None
if litellm.use_client or ("use_client" in kwargs and kwargs["use_client"] == True):
print_verbose(f"litedebugger initialized")
if "lite_debugger" not in litellm.input_callback:
litellm.input_callback.append("lite_debugger")
if "lite_debugger" not in litellm.success_callback:
litellm.success_callback.append("lite_debugger")
if "lite_debugger" not in litellm.failure_callback:
litellm.failure_callback.append("lite_debugger")
if (
len(litellm.input_callback) > 0
or len(litellm.success_callback) > 0
or len(litellm.failure_callback) > 0
) and len(callback_list) == 0:
callback_list = list(
set(
litellm.input_callback
+ litellm.success_callback
+ litellm.failure_callback
)
)
set_callbacks(
callback_list=callback_list,
function_id=function_id
)
if add_breadcrumb:
add_breadcrumb(
category="litellm.llm_call",
message=f"Positional Args: {args}, Keyword Args: {kwargs}",
level="info",
)
if "logger_fn" in kwargs:
user_logger_fn = kwargs["logger_fn"]
# CRASH REPORTING TELEMETRY
crash_reporting(*args, **kwargs)
# INIT LOGGER - for user-specified integrations
model = args[0] if len(args) > 0 else kwargs["model"]
call_type = original_function.__name__
if call_type == CallTypes.completion.value:
messages = args[1] if len(args) > 1 else kwargs["messages"]
elif call_type == CallTypes.embedding.value:
messages = args[1] if len(args) > 1 else kwargs["input"]
stream = True if "stream" in kwargs and kwargs["stream"] == True else False
logging_obj = Logging(model=model, messages=messages, stream=stream, litellm_call_id=kwargs["litellm_call_id"], function_id=function_id, call_type=call_type, start_time=start_time)
return logging_obj
except Exception as e: # DO NOT BLOCK running the function because of this
print_verbose(f"[Non-Blocking] {traceback.format_exc()}; args - {args}; kwargs - {kwargs}")
print(e)
pass
def crash_reporting(*args, **kwargs):
if litellm.telemetry:
try:
model = args[0] if len(args) > 0 else kwargs["model"]
exception = kwargs["exception"] if "exception" in kwargs else None
custom_llm_provider = (
kwargs["custom_llm_provider"]
if "custom_llm_provider" in kwargs
else None
)
safe_crash_reporting(
model=model,
exception=exception,
custom_llm_provider=custom_llm_provider,
) # log usage-crash details. Do not log any user details. If you want to turn this off, set `litellm.telemetry=False`.
except:
# [Non-Blocking Error]
pass
def wrapper(*args, **kwargs):
start_time = datetime.datetime.now()
result = None
# only set litellm_call_id if its not in kwargs
if "litellm_call_id" not in kwargs:
kwargs["litellm_call_id"] = str(uuid.uuid4())
try:
model = args[0] if len(args) > 0 else kwargs["model"]
except:
raise ValueError("model param not passed in.")
try:
logging_obj = function_setup(start_time, *args, **kwargs)
kwargs["litellm_logging_obj"] = logging_obj
# [OPTIONAL] CHECK BUDGET
if litellm.max_budget:
if litellm._current_cost > litellm.max_budget:
raise BudgetExceededError(current_cost=litellm._current_cost, max_budget=litellm.max_budget)
# [OPTIONAL] CHECK CACHE
# remove this after deprecating litellm.caching
print_verbose(f"litellm.caching: {litellm.caching}; litellm.caching_with_models: {litellm.caching_with_models}")
if (litellm.caching or litellm.caching_with_models) and litellm.cache is None:
litellm.cache = Cache()
print_verbose(f"kwargs[caching]: {kwargs.get('caching', False)}; litellm.cache: {litellm.cache}")
# if caching is false, don't run this
if (kwargs.get("caching", None) is None and litellm.cache is not None) or kwargs.get("caching", False) == True: # allow users to control returning cached responses from the completion function
# checking cache
if (litellm.cache != None or litellm.caching or litellm.caching_with_models):
print_verbose(f"Checking Cache")
cached_result = litellm.cache.get_cache(*args, **kwargs)
if cached_result != None:
print_verbose(f"Cache Hit!")
return cached_result
# MODEL CALL
result = original_function(*args, **kwargs)
end_time = datetime.datetime.now()
if "stream" in kwargs and kwargs["stream"] == True:
# TODO: Add to cache for streaming
return result
# [OPTIONAL] ADD TO CACHE
if litellm.caching or litellm.caching_with_models or litellm.cache != None: # user init a cache object
litellm.cache.add_cache(result, *args, **kwargs)
# [OPTIONAL] Return LiteLLM call_id
if litellm.use_client == True:
result['litellm_call_id'] = litellm_call_id
# LOG SUCCESS - handle streaming success logging in the _next_ object, remove `handle_success` once it's deprecated
logging_obj.success_handler(result, start_time, end_time)
# threading.Thread(target=logging_obj.success_handler, args=(result, start_time, end_time)).start()
my_thread = threading.Thread(
target=handle_success, args=(args, kwargs, result, start_time, end_time)
) # don't interrupt execution of main thread
my_thread.start()
# RETURN RESULT
result._response_ms = (end_time - start_time).total_seconds() * 1000 # return response latency in ms like openai
return result
except Exception as e:
traceback_exception = traceback.format_exc()
crash_reporting(*args, **kwargs, exception=traceback_exception)
end_time = datetime.datetime.now()
# LOG FAILURE - handle streaming failure logging in the _next_ object, remove `handle_failure` once it's deprecated
threading.Thread(target=logging_obj.failure_handler, args=(e, traceback_exception, start_time, end_time)).start()
my_thread = threading.Thread(
target=handle_failure,
args=(e, traceback_exception, start_time, end_time, args, kwargs),
) # don't interrupt execution of main thread
my_thread.start()
if hasattr(e, "message"):
if (
liteDebuggerClient and liteDebuggerClient.dashboard_url != None
): # make it easy to get to the debugger logs if you've initialized it
e.message += f"\n Check the log in your dashboard - {liteDebuggerClient.dashboard_url}"
raise e
return wrapper
####### USAGE CALCULATOR ################
# Extract the number of billion parameters from the model name
# only used for together_computer LLMs
def get_model_params_and_category(model_name):
import re
params_match = re.search(r'(\d+b)', model_name) # catch all decimals like 3b, 70b, etc
category = None
if params_match != None:
params_match = params_match.group(1)
params_match = params_match.replace("b", "")
params_billion = float(params_match)
# Determine the category based on the number of parameters
if params_billion <= 3.0:
category = "together-ai-up-to-3b"
elif params_billion <= 7.0:
category = "together-ai-3.1b-7b"
elif params_billion <= 20.0:
category = "together-ai-7.1b-20b"
elif params_billion <= 40.0:
category = "together-ai-20.1b-40b"
elif params_billion <= 70.0:
category = "together-ai-40.1b-70b"
return category
return None
def get_replicate_completion_pricing(completion_response=None, total_time=0.0):
# see https://replicate.com/pricing
a100_40gb_price_per_second_public = 0.001150
# for all litellm currently supported LLMs, almost all requests go to a100_80gb
a100_80gb_price_per_second_public = 0.001400 # assume all calls sent to A100 80GB for now
if total_time == 0.0:
start_time = completion_response['created']
end_time = completion_response["ended"]
total_time = end_time - start_time
return a100_80gb_price_per_second_public*total_time
def _select_tokenizer(model: str):
# cohere
import pkg_resources
if model in litellm.cohere_models:
tokenizer = Tokenizer.from_pretrained("Cohere/command-nightly")
return {"type": "huggingface_tokenizer", "tokenizer": tokenizer}
# anthropic
elif model in litellm.anthropic_models:
# Read the JSON file
filename = pkg_resources.resource_filename(__name__, 'llms/tokenizers/anthropic_tokenizer.json')
with open(filename, 'r') as f:
json_data = json.load(f)
# Decode the JSON data from utf-8
json_data_decoded = json.dumps(json_data, ensure_ascii=False)
# Convert to str
json_str = str(json_data_decoded)
# load tokenizer
tokenizer = Tokenizer.from_str(json_str)
return {"type": "huggingface_tokenizer", "tokenizer": tokenizer}
# llama2
elif "llama-2" in model.lower():
tokenizer = Tokenizer.from_pretrained("hf-internal-testing/llama-tokenizer")
return {"type": "huggingface_tokenizer", "tokenizer": tokenizer}
# default - tiktoken
else:
return {"type": "openai_tokenizer", "tokenizer": encoding}
def encode(model: str, text: str):
"""
Encodes the given text using the specified model.
Args:
model (str): The name of the model to use for tokenization.
text (str): The text to be encoded.
Returns:
enc: The encoded text.
"""
tokenizer_json = _select_tokenizer(model=model)
enc = tokenizer_json["tokenizer"].encode(text)
return enc
def decode(model: str, tokens: List[int]):
tokenizer_json = _select_tokenizer(model=model)
dec = tokenizer_json["tokenizer"].decode(tokens)
return dec
def token_counter(model="", text=None, messages: Optional[List] = None):
"""
Count the number of tokens in a given text using a specified model.
Args:
model (str): The name of the model to use for tokenization. Default is an empty string.
text (str): The raw text string to be passed to the model. Default is None.
messages (Optional[List[Dict[str, str]]]): Alternative to passing in text. A list of dictionaries representing messages with "role" and "content" keys. Default is None.
Returns:
int: The number of tokens in the text.
"""
# use tiktoken, anthropic, cohere or llama2's tokenizer depending on the model
if text == None:
if messages is not None:
text = "".join([message["content"] for message in messages])
else:
raise ValueError("text and messages cannot both be None")
num_tokens = 0
if model is not None:
tokenizer_json = _select_tokenizer(model=model)
if tokenizer_json["type"] == "huggingface_tokenizer":
enc = tokenizer_json["tokenizer"].encode(text)
num_tokens = len(enc.ids)
elif tokenizer_json["type"] == "openai_tokenizer":
enc = tokenizer_json["tokenizer"].encode(text)
num_tokens = len(enc)
else:
num_tokens = len(encoding.encode(text))
return num_tokens
def cost_per_token(model="gpt-3.5-turbo", prompt_tokens=0, completion_tokens=0):
"""
Calculates the cost per token for a given model, prompt tokens, and completion tokens.
Parameters:
model (str): The name of the model to use. Default is "gpt-3.5-turbo".
prompt_tokens (int): The number of tokens in the prompt.
completion_tokens (int): The number of tokens in the completion.
Returns:
tuple: A tuple containing the cost in USD dollars for prompt tokens and completion tokens, respectively.
"""
# given
prompt_tokens_cost_usd_dollar = 0
completion_tokens_cost_usd_dollar = 0
model_cost_ref = litellm.model_cost
if model in model_cost_ref:
prompt_tokens_cost_usd_dollar = (
model_cost_ref[model]["input_cost_per_token"] * prompt_tokens
)
completion_tokens_cost_usd_dollar = (
model_cost_ref[model]["output_cost_per_token"] * completion_tokens
)
return prompt_tokens_cost_usd_dollar, completion_tokens_cost_usd_dollar
else:
# calculate average input cost
input_cost_sum = 0
output_cost_sum = 0
model_cost_ref = litellm.model_cost
for model in model_cost_ref:
input_cost_sum += model_cost_ref[model]["input_cost_per_token"]
output_cost_sum += model_cost_ref[model]["output_cost_per_token"]
avg_input_cost = input_cost_sum / len(model_cost_ref.keys())
avg_output_cost = output_cost_sum / len(model_cost_ref.keys())
prompt_tokens_cost_usd_dollar = avg_input_cost * prompt_tokens
completion_tokens_cost_usd_dollar = avg_output_cost * completion_tokens
return prompt_tokens_cost_usd_dollar, completion_tokens_cost_usd_dollar
def completion_cost(
completion_response=None,
model="gpt-3.5-turbo",
prompt="",
messages: List = [],
completion="",
total_time=0.0, # used for replicate
):
"""
Calculate the cost of a given completion call fot GPT-3.5-turbo, llama2, any litellm supported llm.
Parameters:
completion_response (litellm.ModelResponses): [Required] The response received from a LiteLLM completion request.
[OPTIONAL PARAMS]
model (str): Optional. The name of the language model used in the completion calls
prompt (str): Optional. The input prompt passed to the llm
completion (str): Optional. The output completion text from the llm
total_time (float): Optional. (Only used for Replicate LLMs) The total time used for the request in seconds
Returns:
float: The cost in USD dollars for the completion based on the provided parameters.
Note:
- If completion_response is provided, the function extracts token information and the model name from it.
- If completion_response is not provided, the function calculates token counts based on the model and input text.
- The cost is calculated based on the model, prompt tokens, and completion tokens.
- For certain models containing "togethercomputer" in the name, prices are based on the model size.
- For Replicate models, the cost is calculated based on the total time used for the request.
Exceptions:
- If an error occurs during execution, the function returns 0.0 without blocking the user's execution path.
"""
try:
if messages != []:
prompt = " ".join([message["content"] for message in messages])
# Handle Inputs to completion_cost
prompt_tokens = 0
completion_tokens = 0
if completion_response != None:
# get input/output tokens from completion_response
prompt_tokens = completion_response['usage']['prompt_tokens']
completion_tokens = completion_response['usage']['completion_tokens']
model = completion_response['model'] # get model from completion_response
else:
prompt_tokens = token_counter(model=model, text=prompt)
completion_tokens = token_counter(model=model, text=completion)
# Calculate cost based on prompt_tokens, completion_tokens
if "togethercomputer" in model:
# together ai prices based on size of llm
# get_model_params_and_category takes a model name and returns the category of LLM size it is in model_prices_and_context_window.json
model = get_model_params_and_category(model)
# replicate llms are calculate based on time for request running
# see https://replicate.com/pricing
elif (
model in litellm.replicate_models or
"replicate" in model
):
return get_replicate_completion_pricing(completion_response, total_time)
prompt_tokens_cost_usd_dollar, completion_tokens_cost_usd_dollar = cost_per_token(
model=model, prompt_tokens=prompt_tokens, completion_tokens=completion_tokens
)
return prompt_tokens_cost_usd_dollar + completion_tokens_cost_usd_dollar
except:
return 0.0 # this should not block a users execution path
####### HELPER FUNCTIONS ################
def register_model(model_cost: Union[str, dict]):
"""
Register new / Override existing models (and their pricing) to specific providers.
Provide EITHER a model cost dictionary or a url to a hosted json blob
Example usage:
model_cost_dict = {
"gpt-4": {
"max_tokens": 8192,
"input_cost_per_token": 0.00003,
"output_cost_per_token": 0.00006,
"litellm_provider": "openai",
"mode": "chat"
},
}
"""
loaded_model_cost = {}
if isinstance(model_cost, dict):
loaded_model_cost = model_cost
elif isinstance(model_cost, str):
loaded_model_cost = litellm.get_model_cost_map(url=model_cost)
for key, value in loaded_model_cost.items():
## override / add new keys to the existing model cost dictionary
litellm.model_cost[key] = loaded_model_cost[key]
# add new model names to provider lists
if value.get('litellm_provider') == 'openai':
if key not in litellm.open_ai_chat_completion_models:
litellm.open_ai_chat_completion_models.append(key)
elif value.get('litellm_provider') == 'text-completion-openai':
if key not in litellm.open_ai_text_completion_models:
litellm.open_ai_text_completion_models.append(key)
elif value.get('litellm_provider') == 'cohere':
if key not in litellm.cohere_models:
litellm.cohere_models.append(key)
elif value.get('litellm_provider') == 'anthropic':
if key not in litellm.anthropic_models:
litellm.anthropic_models.append(key)
elif value.get('litellm_provider') == 'openrouter':
split_string = key.split('/', 1)
if key not in litellm.openrouter_models:
litellm.openrouter_models.append(split_string[1])
elif value.get('litellm_provider') == 'vertex_ai-text-models':
if key not in litellm.vertex_text_models:
litellm.vertex_text_models.append(key)
elif value.get('litellm_provider') == 'vertex_ai-code-text-models':
if key not in litellm.vertex_code_text_models:
litellm.vertex_code_text_models.append(key)
elif value.get('litellm_provider') == 'vertex_ai-chat-models':
if key not in litellm.vertex_chat_models:
litellm.vertex_chat_models.append(key)
elif value.get('litellm_provider') == 'vertex_ai-code-chat-models':
if key not in litellm.vertex_code_chat_models:
litellm.vertex_code_chat_models.append(key)
elif value.get('litellm_provider') == 'ai21':
if key not in litellm.ai21_models:
litellm.ai21_models.append(key)
elif value.get('litellm_provider') == 'nlp_cloud':
if key not in litellm.nlp_cloud_models:
litellm.nlp_cloud_models.append(key)
elif value.get('litellm_provider') == 'aleph_alpha':
if key not in litellm.aleph_alpha_models:
litellm.aleph_alpha_models.append(key)
elif value.get('litellm_provider') == 'bedrock':
if key not in litellm.bedrock_models:
litellm.bedrock_models.append(key)
return model_cost
def get_litellm_params(
return_async=False,
api_key=None,
force_timeout=600,
azure=False,
logger_fn=None,
verbose=False,
hugging_face=False,
replicate=False,
together_ai=False,
custom_llm_provider=None,
api_base=None,
litellm_call_id=None,
model_alias_map=None,
completion_call_id=None,
metadata=None
):
litellm_params = {
"return_async": return_async,
"api_key": api_key,
"force_timeout": force_timeout,
"logger_fn": logger_fn,
"verbose": verbose,
"custom_llm_provider": custom_llm_provider,
"api_base": api_base,
"litellm_call_id": litellm_call_id,
"model_alias_map": model_alias_map,
"completion_call_id": completion_call_id,
"metadata": metadata,
"stream_response": {} # litellm_call_id: ModelResponse Dict
}
return litellm_params
def get_optional_params( # use the openai defaults
# 12 optional params
functions=[],
function_call="",
temperature=None,
top_p=None,
n=None,
stream=False,
stop=None,
max_tokens=None,
presence_penalty=None,
frequency_penalty=0,
logit_bias={},
user="",
request_timeout=None,
deployment_id=None,
model=None,
custom_llm_provider="",
**kwargs
):
# retrieve all parameters passed to the function
passed_params = locals()
special_params = passed_params.pop("kwargs")
for k, v in special_params.items():
passed_params[k] = v
default_params = {
"functions":[],
"function_call":"",
"temperature":None,
"top_p":None,
"n":None,
"stream":None,
"stop":None,
"max_tokens":None,
"presence_penalty":None,
"frequency_penalty":None,
"logit_bias":{},
"user":"",
"deployment_id":None,
"request_timeout":None,
"model":None,
"custom_llm_provider":"",
}
# filter out those parameters that were passed with non-default values
non_default_params = {k: v for k, v in passed_params.items() if (k != "model" and k != "custom_llm_provider" and k in default_params and v != default_params[k])}
optional_params = {}
## raise exception if function calling passed in for a provider that doesn't support it
if "functions" in non_default_params or "function_call" in non_default_params:
if custom_llm_provider != "openai" and custom_llm_provider != "text-completion-openai" and custom_llm_provider != "azure":
if litellm.add_function_to_prompt: # if user opts to add it to prompt instead
optional_params["functions_unsupported_model"] = non_default_params.pop("functions")
else:
raise UnsupportedParamsError(status_code=500, message=f"Function calling is not supported by {custom_llm_provider}. To add it to the prompt, set `litellm.add_function_to_prompt = True`.")
def _check_valid_arg(supported_params):
print_verbose(f"checking params for {model}")
print_verbose(f"params passed in {passed_params}")
print_verbose(f"non-default params passed in {non_default_params}")
unsupported_params = {}
for k in non_default_params.keys():
if k not in supported_params:
if k == "n" and n == 1: # langchain sends n=1 as a default value
pass
# Always keeps this in elif code blocks
elif k == "request_timeout": # litellm handles request time outs
pass
else:
unsupported_params[k] = non_default_params[k]
if unsupported_params and not litellm.drop_params:
raise UnsupportedParamsError(status_code=500, message=f"{custom_llm_provider} does not support parameters: {unsupported_params}. To drop these, set `litellm.drop_params=True`.")
## raise exception if provider doesn't support passed in param
if custom_llm_provider == "anthropic":
## check if unsupported param passed in
supported_params = ["stream", "stop", "temperature", "top_p", "max_tokens"]
_check_valid_arg(supported_params=supported_params)
# handle anthropic params
if stream:
optional_params["stream"] = stream
if stop:
if type(stop) == str:
stop = [stop] # openai can accept str/list for stop
optional_params["stop_sequences"] = stop
if temperature:
optional_params["temperature"] = temperature
if top_p:
optional_params["top_p"] = top_p
if max_tokens:
optional_params["max_tokens_to_sample"] = max_tokens
elif custom_llm_provider == "cohere":
## check if unsupported param passed in
supported_params = ["stream", "temperature", "max_tokens", "logit_bias", "top_p", "frequency_penalty", "presence_penalty", "stop", "n"]
_check_valid_arg(supported_params=supported_params)
# handle cohere params
if stream:
optional_params["stream"] = stream
if temperature:
optional_params["temperature"] = temperature
if max_tokens:
optional_params["max_tokens"] = max_tokens
if n:
optional_params["num_generations"] = n
if logit_bias != {}:
optional_params["logit_bias"] = logit_bias
if top_p:
optional_params["p"] = top_p
if frequency_penalty:
optional_params["frequency_penalty"] = frequency_penalty
if presence_penalty:
optional_params["presence_penalty"] = presence_penalty
if stop:
optional_params["stop_sequences"] = stop
elif custom_llm_provider == "maritalk":
## check if unsupported param passed in
supported_params = ["stream", "temperature", "max_tokens", "top_p", "presence_penalty", "stop"]
_check_valid_arg(supported_params=supported_params)
# handle cohere params
if stream:
optional_params["stream"] = stream
if temperature:
optional_params["temperature"] = temperature
if max_tokens:
optional_params["max_tokens"] = max_tokens
if logit_bias != {}:
optional_params["logit_bias"] = logit_bias
if top_p:
optional_params["p"] = top_p
if presence_penalty:
optional_params["repetition_penalty"] = presence_penalty
if stop:
optional_params["stopping_tokens"] = stop
elif custom_llm_provider == "replicate":
## check if unsupported param passed in
supported_params = ["stream", "temperature", "max_tokens", "top_p", "stop", "seed"]
_check_valid_arg(supported_params=supported_params)
if stream:
optional_params["stream"] = stream
return optional_params
if max_tokens:
if "vicuna" in model or "flan" in model:
optional_params["max_length"] = max_tokens
elif "meta/codellama-13b" in model:
optional_params["max_tokens"] = max_tokens
else:
optional_params["max_new_tokens"] = max_tokens
if temperature:
optional_params["temperature"] = temperature
if top_p:
optional_params["top_p"] = top_p
if stop:
optional_params["stop_sequences"] = stop
elif custom_llm_provider == "huggingface":
## check if unsupported param passed in
supported_params = ["stream", "temperature", "max_tokens", "top_p", "stop", "n"]
_check_valid_arg(supported_params=supported_params)
if temperature:
optional_params["temperature"] = temperature
if top_p:
optional_params["top_p"] = top_p
if n:
optional_params["best_of"] = n
optional_params["do_sample"] = True # need to sample if you want best of for hf inference endpoints
if stream:
optional_params["stream"] = stream
if stop:
optional_params["stop"] = stop
if max_tokens:
optional_params["max_new_tokens"] = max_tokens
if n:
optional_params["best_of"] = n
if presence_penalty:
optional_params["repetition_penalty"] = presence_penalty
elif custom_llm_provider == "together_ai":
## check if unsupported param passed in
supported_params = ["stream", "temperature", "max_tokens", "top_p", "stop", "frequency_penalty"]
_check_valid_arg(supported_params=supported_params)
if stream:
optional_params["stream_tokens"] = stream
if temperature:
optional_params["temperature"] = temperature
if top_p:
optional_params["top_p"] = top_p
if max_tokens:
optional_params["max_tokens"] = max_tokens
if frequency_penalty:
optional_params["repetition_penalty"] = frequency_penalty # https://docs.together.ai/reference/inference
if stop:
optional_params["stop"] = stop
elif custom_llm_provider == "ai21":
## check if unsupported param passed in
supported_params = ["stream", "n", "temperature", "max_tokens", "top_p", "stop", "frequency_penalty", "presence_penalty"]
_check_valid_arg(supported_params=supported_params)
if stream:
optional_params["stream"] = stream
if n:
optional_params["numResults"] = n
if max_tokens:
optional_params["maxTokens"] = max_tokens
if temperature:
optional_params["temperature"] = temperature
if top_p:
optional_params["topP"] = top_p
if stop:
optional_params["stopSequences"] = stop
if frequency_penalty:
optional_params["frequencyPenalty"] = {"scale": frequency_penalty}
if presence_penalty:
optional_params["presencePenalty"] = {"scale": presence_penalty}
elif custom_llm_provider == "palm": # https://developers.generativeai.google/tutorials/curl_quickstart
## check if unsupported param passed in
supported_params = ["temperature", "top_p", "stream", "n", "stop", "max_tokens"]
_check_valid_arg(supported_params=supported_params)
if temperature:
optional_params["temperature"] = temperature
if top_p:
optional_params["top_p"] = top_p
if stream:
optional_params["stream"] = stream
if n:
optional_params["candidate_count"] = n
if stop:
optional_params["stopSequences"] = stop
if max_tokens:
optional_params["max_output_tokens"] = max_tokens
elif (
custom_llm_provider == "vertex_ai"
):
## check if unsupported param passed in
supported_params = ["temperature", "top_p", "max_tokens", "stream"]
_check_valid_arg(supported_params=supported_params)
if temperature:
optional_params["temperature"] = temperature
if top_p:
optional_params["top_p"] = top_p
if stream:
optional_params["stream"] = stream
if max_tokens:
optional_params["max_output_tokens"] = max_tokens
elif custom_llm_provider == "sagemaker":
if "llama-2" in model:
# llama-2 models on sagemaker support the following args
"""
max_new_tokens: Model generates text until the output length (excluding the input context length) reaches max_new_tokens. If specified, it must be a positive integer.
temperature: Controls the randomness in the output. Higher temperature results in output sequence with low-probability words and lower temperature results in output sequence with high-probability words. If temperature -> 0, it results in greedy decoding. If specified, it must be a positive float.
top_p: In each step of text generation, sample from the smallest possible set of words with cumulative probability top_p. If specified, it must be a float between 0 and 1.
return_full_text: If True, input text will be part of the output generated text. If specified, it must be boolean. The default value for it is False.
"""
## check if unsupported param passed in
supported_params = ["temperature", "max_tokens", "stream"]
_check_valid_arg(supported_params=supported_params)
if max_tokens:
optional_params["max_new_tokens"] = max_tokens
if temperature:
optional_params["temperature"] = temperature
if top_p:
optional_params["top_p"] = top_p
if stream:
optional_params["stream"] = stream
else:
## check if unsupported param passed in
supported_params = []
_check_valid_arg(supported_params=supported_params)
elif custom_llm_provider == "bedrock":
if "ai21" in model:
supported_params = ["max_tokens", "temperature", "stop", "top_p", "stream"]
_check_valid_arg(supported_params=supported_params)
# params "maxTokens":200,"temperature":0,"topP":250,"stop_sequences":[],
# https://us-west-2.console.aws.amazon.com/bedrock/home?region=us-west-2#/providers?model=j2-ultra
if max_tokens:
optional_params["maxTokens"] = max_tokens
if temperature:
optional_params["temperature"] = temperature
if stop:
optional_params["stop_sequences"] = stop
if top_p:
optional_params["topP"] = top_p
if stream:
optional_params["stream"] = stream
elif "anthropic" in model:
supported_params = ["max_tokens", "temperature", "stop", "top_p", "stream"]
_check_valid_arg(supported_params=supported_params)
# anthropic params on bedrock
# \"max_tokens_to_sample\":300,\"temperature\":0.5,\"top_p\":1,\"stop_sequences\":[\"\\\\n\\\\nHuman:\"]}"
if max_tokens:
optional_params["max_tokens_to_sample"] = max_tokens
if temperature:
optional_params["temperature"] = temperature
if top_p:
optional_params["top_p"] = top_p
if stop:
optional_params["stop_sequences"] = stop
if stream:
optional_params["stream"] = stream
elif "amazon" in model: # amazon titan llms
supported_params = ["max_tokens", "temperature", "stop", "top_p", "stream"]
_check_valid_arg(supported_params=supported_params)
# see https://us-west-2.console.aws.amazon.com/bedrock/home?region=us-west-2#/providers?model=titan-large
if max_tokens:
optional_params["maxTokenCount"] = max_tokens
if temperature:
optional_params["temperature"] = temperature
if stop:
optional_params["stopSequences"] = stop
if top_p:
optional_params["topP"] = top_p
if stream:
optional_params["stream"] = stream
elif "cohere" in model: # cohere models on bedrock
supported_params = ["stream", "temperature", "max_tokens", "logit_bias", "top_p", "frequency_penalty", "presence_penalty", "stop"]
_check_valid_arg(supported_params=supported_params)
# handle cohere params
if stream:
optional_params["stream"] = stream
if temperature:
optional_params["temperature"] = temperature
if max_tokens:
optional_params["max_tokens"] = max_tokens
if n:
optional_params["num_generations"] = n
if logit_bias != {}:
optional_params["logit_bias"] = logit_bias
if top_p:
optional_params["p"] = top_p
if frequency_penalty:
optional_params["frequency_penalty"] = frequency_penalty
if presence_penalty:
optional_params["presence_penalty"] = presence_penalty
if stop:
optional_params["stop_sequences"] = stop
elif model in litellm.aleph_alpha_models:
supported_params = ["max_tokens", "stream", "top_p", "temperature", "presence_penalty", "frequency_penalty", "n", "stop"]
_check_valid_arg(supported_params=supported_params)
if max_tokens:
optional_params["maximum_tokens"] = max_tokens
if stream:
optional_params["stream"] = stream
if temperature:
optional_params["temperature"] = temperature
if top_p:
optional_params["top_p"] = top_p
if presence_penalty:
optional_params["presence_penalty"] = presence_penalty
if frequency_penalty:
optional_params["frequency_penalty"] = frequency_penalty
if n:
optional_params["n"] = n
if stop:
optional_params["stop_sequences"] = stop
elif custom_llm_provider == "ollama":
supported_params = ["max_tokens", "stream", "top_p", "temperature", "frequency_penalty", "stop"]
_check_valid_arg(supported_params=supported_params)
if max_tokens:
optional_params["num_predict"] = max_tokens
if stream:
optional_params["stream"] = stream
if temperature:
optional_params["temperature"] = temperature
if top_p:
optional_params["top_p"] = top_p
if frequency_penalty:
optional_params["repeat_penalty"] = frequency_penalty
if stop:
optional_params["stop_sequences"] = stop
elif model in litellm.nlp_cloud_models or custom_llm_provider == "nlp_cloud":
supported_params = ["max_tokens", "stream", "temperature", "top_p", "presence_penalty", "frequency_penalty", "n", "stop"]
_check_valid_arg(supported_params=supported_params)
if max_tokens:
optional_params["max_length"] = max_tokens
if stream:
optional_params["stream"] = stream
if temperature:
optional_params["temperature"] = temperature
if top_p:
optional_params["top_p"] = top_p
if presence_penalty:
optional_params["presence_penalty"] = presence_penalty
if frequency_penalty:
optional_params["frequency_penalty"] = frequency_penalty
if n:
optional_params["num_return_sequences"] = n
if stop:
optional_params["stop_sequences"] = stop
elif model in litellm.petals_models or custom_llm_provider == "petals":
supported_params = ["max_tokens", "temperature", "top_p", "stream"]
_check_valid_arg(supported_params=supported_params)
# max_new_tokens=1,temperature=0.9, top_p=0.6
if max_tokens:
optional_params["max_new_tokens"] = max_tokens
if temperature:
optional_params["temperature"] = temperature
if top_p:
optional_params["top_p"] = top_p
if stream:
optional_params["stream"] = stream
elif custom_llm_provider == "deepinfra":
supported_params = ["temperature", "top_p", "n", "stream", "stop", "max_tokens", "presence_penalty", "frequency_penalty", "logit_bias", "user", "deployment_id", "request_timeout"]
_check_valid_arg(supported_params=supported_params)
optional_params = non_default_params
if temperature != None:
if temperature ==0 and model == "mistralai/Mistral-7B-Instruct-v0.1": # this model does no support temperature == 0
temperature = 0.0001 # close to 0
optional_params["temperature"] = temperature
else: # assume passing in params for openai/azure openai
supported_params = ["functions", "function_call", "temperature", "top_p", "n", "stream", "stop", "max_tokens", "presence_penalty", "frequency_penalty", "logit_bias", "user", "deployment_id", "request_timeout"]
_check_valid_arg(supported_params=supported_params)
optional_params = non_default_params
# if user passed in non-default kwargs for specific providers/models, pass them along
for k in passed_params.keys():
if k not in default_params.keys():
optional_params[k] = passed_params[k]
return optional_params
def get_llm_provider(model: str, custom_llm_provider: Optional[str] = None, api_base: Optional[str] = None):
try:
dynamic_api_key = None
# check if llm provider provided
if custom_llm_provider:
return model, custom_llm_provider, dynamic_api_key, api_base
# check if llm provider part of model name
if model.split("/",1)[0] in litellm.provider_list and model.split("/",1)[0] not in litellm.model_list:
custom_llm_provider = model.split("/", 1)[0]
model = model.split("/", 1)[1]
if custom_llm_provider == "perplexity":
# perplexity is openai compatible, we just need to set this to custom_openai and have the api_base be https://api.perplexity.ai
api_base = "https://api.perplexity.ai"
dynamic_api_key = os.getenv("PERPLEXITYAI_API_KEY")
custom_llm_provider = "custom_openai"
elif custom_llm_provider == "anyscale":
# anyscale is openai compatible, we just need to set this to custom_openai and have the api_base be https://api.endpoints.anyscale.com/v1
api_base = "https://api.endpoints.anyscale.com/v1"
dynamic_api_key = os.getenv("ANYSCALE_API_KEY")
custom_llm_provider = "custom_openai"
return model, custom_llm_provider, dynamic_api_key, api_base
# check if api base is a known openai compatible endpoint
if api_base:
for endpoint in litellm.openai_compatible_endpoints:
if endpoint in api_base:
custom_llm_provider = "custom_openai"
if endpoint == "api.perplexity.ai":
dynamic_api_key = os.getenv("PERPLEXITYAI_API_KEY")
return model, custom_llm_provider, dynamic_api_key, api_base
# check if model in known model provider list -> for huggingface models, raise exception as they don't have a fixed provider (can be togetherai, anyscale, baseten, runpod, et.)
## openai - chatcompletion + text completion
if model in litellm.open_ai_chat_completion_models or "ft:gpt-3.5-turbo" in model:
custom_llm_provider = "openai"
elif model in litellm.open_ai_text_completion_models:
custom_llm_provider = "text-completion-openai"
## anthropic
elif model in litellm.anthropic_models:
custom_llm_provider = "anthropic"
## cohere
elif model in litellm.cohere_models:
custom_llm_provider = "cohere"
## replicate
elif model in litellm.replicate_models or ":" in model:
model_parts = model.split(":")
if len(model_parts) > 1 and len(model_parts[1])==64: ## checks if model name has a 64 digit code - e.g. "meta/llama-2-70b-chat:02e509c789964a7ea8736978a43525956ef40397be9033abf9fd2badfe68c9e3"
custom_llm_provider = "replicate"
elif model in litellm.replicate_models:
custom_llm_provider = "replicate"
## openrouter
elif model in litellm.openrouter_models:
custom_llm_provider = "openrouter"
## openrouter
elif model in litellm.maritalk_models:
custom_llm_provider = "maritalk"
## vertex - text + chat models
elif model in litellm.vertex_chat_models or model in litellm.vertex_text_models:
custom_llm_provider = "vertex_ai"
## ai21
elif model in litellm.ai21_models:
custom_llm_provider = "ai21"
## aleph_alpha
elif model in litellm.aleph_alpha_models:
custom_llm_provider = "aleph_alpha"
## baseten
elif model in litellm.baseten_models:
custom_llm_provider = "baseten"
## nlp_cloud
elif model in litellm.nlp_cloud_models:
custom_llm_provider = "nlp_cloud"
## petals
elif model in litellm.petals_models:
custom_llm_provider = "petals"
## bedrock
elif model in litellm.bedrock_models:
custom_llm_provider = "bedrock"
# openai embeddings
elif model in litellm.open_ai_embedding_models:
custom_llm_provider = "openai"
# cohere embeddings
elif model in litellm.cohere_embedding_models:
custom_llm_provider = "cohere"
if custom_llm_provider is None or custom_llm_provider=="":
print()
print("\033[1;31mProvider List: https://docs.litellm.ai/docs/providers\033[0m")
print()
raise ValueError(f"LLM Provider NOT provided. Pass in the LLM provider you are trying to call. E.g. For 'Huggingface' inference endpoints pass in `completion(model='huggingface/{model}',..)` Learn more: https://docs.litellm.ai/docs/providers")
return model, custom_llm_provider, dynamic_api_key, api_base
except Exception as e:
raise e
def get_api_key(llm_provider: str, dynamic_api_key: Optional[str]):
api_key = (dynamic_api_key or litellm.api_key)
# openai
if llm_provider == "openai" or llm_provider == "text-completion-openai":
api_key = (
api_key or
litellm.openai_key or
get_secret("OPENAI_API_KEY")
)
# anthropic
elif llm_provider == "anthropic":
api_key = (
api_key or
litellm.anthropic_key or
get_secret("ANTHROPIC_API_KEY")
)
# ai21
elif llm_provider == "ai21":
api_key = (
api_key or
litellm.ai21_key or
get_secret("AI211_API_KEY")
)
# aleph_alpha
elif llm_provider == "aleph_alpha":
api_key = (
api_key or
litellm.aleph_alpha_key or
get_secret("ALEPH_ALPHA_API_KEY")
)
# baseten
elif llm_provider == "baseten":
api_key = (
api_key or
litellm.baseten_key or
get_secret("BASETEN_API_KEY")
)
# cohere
elif llm_provider == "cohere":
api_key = (
api_key or
litellm.cohere_key or
get_secret("COHERE_API_KEY")
)
# huggingface
elif llm_provider == "huggingface":
api_key = (
api_key or
litellm.huggingface_key or
get_secret("HUGGINGFACE_API_KEY")
)
# nlp_cloud
elif llm_provider == "nlp_cloud":
api_key = (
api_key or
litellm.nlp_cloud_key or
get_secret("NLP_CLOUD_API_KEY")
)
# replicate
elif llm_provider == "replicate":
api_key = (
api_key or
litellm.replicate_key or
get_secret("REPLICATE_API_KEY")
)
# together_ai
elif llm_provider == "together_ai":
api_key = (
api_key or
litellm.togetherai_api_key or
get_secret("TOGETHERAI_API_KEY") or
get_secret("TOGETHER_AI_TOKEN")
)
return api_key
def get_max_tokens(model: str):
"""
Get a dict for the maximum tokens (context window),
input_cost_per_token, output_cost_per_token for a given model.
Parameters:
model (str): The name of the model.
Returns:
dict: A dictionary containing the following information:
- max_tokens (int): The maximum number of tokens allowed for the given model.
- input_cost_per_token (float): The cost per token for input.
- output_cost_per_token (float): The cost per token for output.
- litellm_provider (str): The provider of the model (e.g., "openai").
- mode (str): The mode of the model (e.g., "chat" or "completion").
Raises:
Exception: If the model is not mapped yet.
Example:
>>> get_max_tokens("gpt-4")
{
"max_tokens": 8192,
"input_cost_per_token": 0.00003,
"output_cost_per_token": 0.00006,
"litellm_provider": "openai",
"mode": "chat"
}
"""
try:
return litellm.model_cost[model]
except:
raise Exception("This model isn't mapped yet. Add it here - https://github.com/BerriAI/litellm/blob/main/model_prices_and_context_window.json")
def json_schema_type(python_type_name: str):
"""Converts standard python types to json schema types
Parameters
----------
python_type_name : str
__name__ of type
Returns
-------
str
a standard JSON schema type, "string" if not recognized.
"""
python_to_json_schema_types = {
str.__name__: "string",
int.__name__: "integer",
float.__name__: "number",
bool.__name__: "boolean",
list.__name__: "array",
dict.__name__: "object",
"NoneType": "null",
}
return python_to_json_schema_types.get(python_type_name, "string")
def function_to_dict(input_function): # noqa: C901
"""Using type hints and numpy-styled docstring,
produce a dictionnary usable for OpenAI function calling
Parameters
----------
input_function : function
A function with a numpy-style docstring
Returns
-------
dictionnary
A dictionnary to add to the list passed to `functions` parameter of `litellm.completion`
"""
# Get function name and docstring
try:
import inspect
from numpydoc.docscrape import NumpyDocString
from ast import literal_eval
except Exception as e:
raise e
name = input_function.__name__
docstring = inspect.getdoc(input_function)
numpydoc = NumpyDocString(docstring)
description = "\n".join([s.strip() for s in numpydoc["Summary"]])
# Get function parameters and their types from annotations and docstring
parameters = {}
required_params = []
param_info = inspect.signature(input_function).parameters
for param_name, param in param_info.items():
if hasattr(param, "annotation"):
param_type = json_schema_type(param.annotation.__name__)
else:
param_type = None
param_description = None
param_enum = None
# Try to extract param description from docstring using numpydoc
for param_data in numpydoc["Parameters"]:
if param_data.name == param_name:
if hasattr(param_data, "type"):
# replace type from docstring rather than annotation
param_type = param_data.type
if "optional" in param_type:
param_type = param_type.split(",")[0]
elif "{" in param_type:
# may represent a set of acceptable values
# translating as enum for function calling
try:
param_enum = str(list(literal_eval(param_type)))
param_type = "string"
except Exception:
pass
param_type = json_schema_type(param_type)
param_description = "\n".join([s.strip() for s in param_data.desc])
param_dict = {
"type": param_type,
"description": param_description,
"enum": param_enum,
}
parameters[param_name] = dict(
[(k, v) for k, v in param_dict.items() if isinstance(v, str)]
)
# Check if the parameter has no default value (i.e., it's required)
if param.default == param.empty:
required_params.append(param_name)
# Create the dictionary
result = {
"name": name,
"description": description,
"parameters": {
"type": "object",
"properties": parameters,
},
}
# Add "required" key if there are required parameters
if required_params:
result["parameters"]["required"] = required_params
return result
def load_test_model(
model: str,
custom_llm_provider: str = "",
api_base: str = "",
prompt: str = "",
num_calls: int = 0,
force_timeout: int = 0,
):
test_prompt = "Hey, how's it going"
test_calls = 100
if prompt:
test_prompt = prompt
if num_calls:
test_calls = num_calls
messages = [[{"role": "user", "content": test_prompt}] for _ in range(test_calls)]
start_time = time.time()
try:
litellm.batch_completion(
model=model,
messages=messages,
custom_llm_provider=custom_llm_provider,
api_base=api_base,
force_timeout=force_timeout,
)
end_time = time.time()
response_time = end_time - start_time
return {
"total_response_time": response_time,
"calls_made": 100,
"status": "success",
"exception": None,
}
except Exception as e:
end_time = time.time()
response_time = end_time - start_time
return {
"total_response_time": response_time,
"calls_made": 100,
"status": "failed",
"exception": e,
}
def validate_environment(model: Optional[str]=None) -> dict:
"""
Checks if the environment variables are valid for the given model.
Args:
model (Optional[str]): The name of the model. Defaults to None.
Returns:
dict: A dictionary containing the following keys:
- keys_in_environment (bool): True if all the required keys are present in the environment, False otherwise.
- missing_keys (List[str]): A list of missing keys in the environment.
"""
keys_in_environment = False
missing_keys: List[str] = []
if model is None:
return {"keys_in_environment": keys_in_environment, "missing_keys": missing_keys}
## EXTRACT LLM PROVIDER - if model name provided
custom_llm_provider = get_llm_provider(model=model)
# # check if llm provider part of model name
# if model.split("/",1)[0] in litellm.provider_list:
# custom_llm_provider = model.split("/", 1)[0]
# model = model.split("/", 1)[1]
# custom_llm_provider_passed_in = True
if custom_llm_provider:
if custom_llm_provider == "openai":
if "OPENAI_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("OPENAI_API_KEY")
elif custom_llm_provider == "azure":
if ("AZURE_API_BASE" in os.environ
and "AZURE_API_VERSION" in os.environ
and "AZURE_API_KEY" in os.environ):
keys_in_environment = True
else:
missing_keys.extend(["AZURE_API_BASE", "AZURE_API_VERSION", "AZURE_API_KEY"])
elif custom_llm_provider == "anthropic":
if "ANTHROPIC_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("ANTHROPIC_API_KEY")
elif custom_llm_provider == "cohere":
if "COHERE_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("COHERE_API_KEY")
elif custom_llm_provider == "replicate":
if "REPLICATE_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("REPLICATE_API_KEY")
elif custom_llm_provider == "openrouter":
if "OPENROUTER_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("OPENROUTER_API_KEY")
elif custom_llm_provider == "vertex_ai":
if ("VERTEXAI_PROJECT" in os.environ
and "VERTEXAI_LOCATION" in os.environ):
keys_in_environment = True
else:
missing_keys.extend(["VERTEXAI_PROJECT", "VERTEXAI_PROJECT"])
elif custom_llm_provider == "huggingface":
if "HUGGINGFACE_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("HUGGINGFACE_API_KEY")
elif custom_llm_provider == "ai21":
if "AI21_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("AI21_API_KEY")
elif custom_llm_provider == "together_ai":
if "TOGETHERAI_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("TOGETHERAI_API_KEY")
elif custom_llm_provider == "aleph_alpha":
if "ALEPH_ALPHA_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("ALEPH_ALPHA_API_KEY")
elif custom_llm_provider == "baseten":
if "BASETEN_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("BASETEN_API_KEY")
elif custom_llm_provider == "nlp_cloud":
if "NLP_CLOUD_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("NLP_CLOUD_API_KEY")
elif custom_llm_provider == "bedrock":
if "AWS_ACCESS_KEY_ID" in os.environ and "AWS_SECRET_ACCESS_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("AWS_ACCESS_KEY_ID")
missing_keys.append("AWS_SECRET_ACCESS_KEY")
else:
## openai - chatcompletion + text completion
if model in litellm.open_ai_chat_completion_models or litellm.open_ai_text_completion_models:
if "OPENAI_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("OPENAI_API_KEY")
## anthropic
elif model in litellm.anthropic_models:
if "ANTHROPIC_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("ANTHROPIC_API_KEY")
## cohere
elif model in litellm.cohere_models:
if "COHERE_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("COHERE_API_KEY")
## replicate
elif model in litellm.replicate_models:
if "REPLICATE_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("REPLICATE_API_KEY")
## openrouter
elif model in litellm.openrouter_models:
if "OPENROUTER_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("OPENROUTER_API_KEY")
## vertex - text + chat models
elif model in litellm.vertex_chat_models or model in litellm.vertex_text_models:
if ("VERTEXAI_PROJECT" in os.environ
and "VERTEXAI_LOCATION" in os.environ):
keys_in_environment = True
else:
missing_keys.extend(["VERTEXAI_PROJECT", "VERTEXAI_PROJECT"])
## huggingface
elif model in litellm.huggingface_models:
if "HUGGINGFACE_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("HUGGINGFACE_API_KEY")
## ai21
elif model in litellm.ai21_models:
if "AI21_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("AI21_API_KEY")
## together_ai
elif model in litellm.together_ai_models:
if "TOGETHERAI_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("TOGETHERAI_API_KEY")
## aleph_alpha
elif model in litellm.aleph_alpha_models:
if "ALEPH_ALPHA_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("ALEPH_ALPHA_API_KEY")
## baseten
elif model in litellm.baseten_models:
if "BASETEN_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("BASETEN_API_KEY")
## nlp_cloud
elif model in litellm.nlp_cloud_models:
if "NLP_CLOUD_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("NLP_CLOUD_API_KEY")
return {"keys_in_environment": keys_in_environment, "missing_keys": missing_keys}
def set_callbacks(callback_list, function_id=None):
global sentry_sdk_instance, capture_exception, add_breadcrumb, posthog, slack_app, alerts_channel, traceloopLogger, heliconeLogger, aispendLogger, berrispendLogger, supabaseClient, liteDebuggerClient, llmonitorLogger, promptLayerLogger, langFuseLogger, customLogger, weightsBiasesLogger, langsmithLogger
try:
for callback in callback_list:
print_verbose(f"callback: {callback}")
if callback == "sentry":
try:
import sentry_sdk
except ImportError:
print_verbose("Package 'sentry_sdk' is missing. Installing it...")
subprocess.check_call(
[sys.executable, "-m", "pip", "install", "sentry_sdk"]
)
import sentry_sdk
sentry_sdk_instance = sentry_sdk
sentry_trace_rate = (
os.environ.get("SENTRY_API_TRACE_RATE")
if "SENTRY_API_TRACE_RATE" in os.environ
else "1.0"
)
sentry_sdk_instance.init(
dsn=os.environ.get("SENTRY_DSN"),
traces_sample_rate=float(sentry_trace_rate),
)
capture_exception = sentry_sdk_instance.capture_exception
add_breadcrumb = sentry_sdk_instance.add_breadcrumb
elif callback == "posthog":
try:
from posthog import Posthog
except ImportError:
print_verbose("Package 'posthog' is missing. Installing it...")
subprocess.check_call(
[sys.executable, "-m", "pip", "install", "posthog"]
)
from posthog import Posthog
posthog = Posthog(
project_api_key=os.environ.get("POSTHOG_API_KEY"),
host=os.environ.get("POSTHOG_API_URL"),
)
elif callback == "slack":
try:
from slack_bolt import App
except ImportError:
print_verbose("Package 'slack_bolt' is missing. Installing it...")
subprocess.check_call(
[sys.executable, "-m", "pip", "install", "slack_bolt"]
)
from slack_bolt import App
slack_app = App(
token=os.environ.get("SLACK_API_TOKEN"),
signing_secret=os.environ.get("SLACK_API_SECRET"),
)
alerts_channel = os.environ["SLACK_API_CHANNEL"]
print_verbose(f"Initialized Slack App: {slack_app}")
elif callback == "traceloop":
traceloopLogger = TraceloopLogger()
elif callback == "helicone":
heliconeLogger = HeliconeLogger()
elif callback == "llmonitor":
llmonitorLogger = LLMonitorLogger()
elif callback == "promptlayer":
promptLayerLogger = PromptLayerLogger()
elif callback == "langfuse":
langFuseLogger = LangFuseLogger()
elif callback == "wandb":
weightsBiasesLogger = WeightsBiasesLogger()
elif callback == "langsmith":
langsmithLogger = LangsmithLogger()
elif callback == "aispend":
aispendLogger = AISpendLogger()
elif callback == "berrispend":
berrispendLogger = BerriSpendLogger()
elif callback == "supabase":
print_verbose(f"instantiating supabase")
supabaseClient = Supabase()
elif callback == "lite_debugger":
print_verbose(f"instantiating lite_debugger")
if function_id:
liteDebuggerClient = LiteDebugger(email=function_id)
elif litellm.token:
liteDebuggerClient = LiteDebugger(email=litellm.token)
elif litellm.email:
liteDebuggerClient = LiteDebugger(email=litellm.email)
else:
liteDebuggerClient = LiteDebugger(email=str(uuid.uuid4()))
elif callable(callback):
customLogger = CustomLogger()
except Exception as e:
raise e
def handle_failure(exception, traceback_exception, start_time, end_time, args, kwargs):
global sentry_sdk_instance, capture_exception, add_breadcrumb, posthog, slack_app, alerts_channel, aispendLogger, berrispendLogger, supabaseClient, liteDebuggerClient, llmonitorLogger
try:
# print_verbose(f"handle_failure args: {args}")
# print_verbose(f"handle_failure kwargs: {kwargs}")
success_handler = additional_details.pop("success_handler", None)
failure_handler = additional_details.pop("failure_handler", None)
additional_details["Event_Name"] = additional_details.pop(
"failed_event_name", "litellm.failed_query"
)
print_verbose(f"self.failure_callback: {litellm.failure_callback}")
for callback in litellm.failure_callback:
try:
if callback == "slack":
slack_msg = ""
if len(kwargs) > 0:
for key in kwargs:
slack_msg += f"{key}: {kwargs[key]}\n"
if len(args) > 0:
for i, arg in enumerate(args):
slack_msg += f"LiteLLM_Args_{str(i)}: {arg}"
for detail in additional_details:
slack_msg += f"{detail}: {additional_details[detail]}\n"
slack_msg += f"Traceback: {traceback_exception}"
slack_app.client.chat_postMessage(
channel=alerts_channel, text=slack_msg
)
elif callback == "sentry":
capture_exception(exception)
elif callback == "posthog":
print_verbose(
f"inside posthog, additional_details: {len(additional_details.keys())}"
)
ph_obj = {}
if len(kwargs) > 0:
ph_obj = kwargs
if len(args) > 0:
for i, arg in enumerate(args):
ph_obj["litellm_args_" + str(i)] = arg
for detail in additional_details:
ph_obj[detail] = additional_details[detail]
event_name = additional_details["Event_Name"]
print_verbose(f"ph_obj: {ph_obj}")
print_verbose(f"PostHog Event Name: {event_name}")
if "user_id" in additional_details:
posthog.capture(
additional_details["user_id"], event_name, ph_obj
)
else: # PostHog calls require a unique id to identify a user - https://posthog.com/docs/libraries/python
unique_id = str(uuid.uuid4())
posthog.capture(unique_id, event_name)
print_verbose(f"successfully logged to PostHog!")
elif callback == "berrispend":
print_verbose("reaches berrispend for logging!")
model = args[0] if len(args) > 0 else kwargs["model"]
messages = args[1] if len(args) > 1 else kwargs["messages"]
result = {
"model": model,
"created": time.time(),
"error": traceback_exception,
"usage": {
"prompt_tokens": prompt_token_calculator(
model, messages=messages
),
"completion_tokens": 0,
},
}
berrispendLogger.log_event(
model=model,
messages=messages,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
elif callback == "aispend":
print_verbose("reaches aispend for logging!")
model = args[0] if len(args) > 0 else kwargs["model"]
messages = args[1] if len(args) > 1 else kwargs["messages"]
result = {
"model": model,
"created": time.time(),
"usage": {
"prompt_tokens": prompt_token_calculator(
model, messages=messages
),
"completion_tokens": 0,
},
}
aispendLogger.log_event(
model=model,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
elif callback == "llmonitor":
print_verbose("reaches llmonitor for logging error!")
model = args[0] if len(args) > 0 else kwargs["model"]
input = (
args[1]
if len(args) > 1
else kwargs.get("messages", kwargs.get("input", None))
)
type = "embed" if "input" in kwargs else "llm"
llmonitorLogger.log_event(
type=type,
event="error",
user_id=kwargs.get("user", "default"),
model=model,
input=input,
error=traceback_exception,
run_id=kwargs["litellm_call_id"],
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
elif callback == "supabase":
print_verbose("reaches supabase for logging!")
print_verbose(f"supabaseClient: {supabaseClient}")
model = args[0] if len(args) > 0 else kwargs["model"]
messages = args[1] if len(args) > 1 else kwargs["messages"]
result = {
"model": model,
"created": time.time(),
"error": traceback_exception,
"usage": {
"prompt_tokens": prompt_token_calculator(
model, messages=messages
),
"completion_tokens": 0,
},
}
supabaseClient.log_event(
model=model,
messages=messages,
end_user=kwargs.get("user", "default"),
response_obj=result,
start_time=start_time,
end_time=end_time,
litellm_call_id=kwargs["litellm_call_id"],
print_verbose=print_verbose,
)
except:
print_verbose(
f"Error Occurred while logging failure: {traceback.format_exc()}"
)
pass
if failure_handler and callable(failure_handler):
call_details = {
"exception": exception,
"additional_details": additional_details,
}
failure_handler(call_details)
pass
except Exception as e:
# LOGGING
exception_logging(logger_fn=user_logger_fn, exception=e)
pass
# NOTE: DEPRECATING this in favor of using success_handler() in Logging:
def handle_success(args, kwargs, result, start_time, end_time):
global heliconeLogger, aispendLogger, supabaseClient, liteDebuggerClient, llmonitorLogger
try:
model = args[0] if len(args) > 0 else kwargs["model"]
input = (
args[1]
if len(args) > 1
else kwargs.get("messages", kwargs.get("input", None))
)
success_handler = additional_details.pop("success_handler", None)
failure_handler = additional_details.pop("failure_handler", None)
additional_details["Event_Name"] = additional_details.pop(
"successful_event_name", "litellm.succes_query"
)
for callback in litellm.success_callback:
try:
if callback == "posthog":
ph_obj = {}
for detail in additional_details:
ph_obj[detail] = additional_details[detail]
event_name = additional_details["Event_Name"]
if "user_id" in additional_details:
posthog.capture(
additional_details["user_id"], event_name, ph_obj
)
else: # PostHog calls require a unique id to identify a user - https://posthog.com/docs/libraries/python
unique_id = str(uuid.uuid4())
posthog.capture(unique_id, event_name, ph_obj)
pass
elif callback == "slack":
slack_msg = ""
for detail in additional_details:
slack_msg += f"{detail}: {additional_details[detail]}\n"
slack_app.client.chat_postMessage(
channel=alerts_channel, text=slack_msg
)
elif callback == "helicone":
print_verbose("reaches helicone for logging!")
model = args[0] if len(args) > 0 else kwargs["model"]
messages = args[1] if len(args) > 1 else kwargs["messages"]
heliconeLogger.log_success(
model=model,
messages=messages,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
elif callback == "llmonitor":
print_verbose("reaches llmonitor for logging!")
model = args[0] if len(args) > 0 else kwargs["model"]
input = (
args[1]
if len(args) > 1
else kwargs.get("messages", kwargs.get("input", None))
)
# if contains input, it's 'embedding', otherwise 'llm'
type = "embed" if "input" in kwargs else "llm"
llmonitorLogger.log_event(
type=type,
event="end",
model=model,
input=input,
user_id=kwargs.get("user", "default"),
response_obj=result,
start_time=start_time,
end_time=end_time,
run_id=kwargs["litellm_call_id"],
print_verbose=print_verbose,
)
elif callback == "langfuse":
print_verbose("reaches langfuse for logging!")
langFuseLogger.log_event(
kwargs=kwargs,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
elif callback == "traceloop":
traceloopLogger.log_event(
kwargs=kwargs,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
elif callback == "aispend":
print_verbose("reaches aispend for logging!")
model = args[0] if len(args) > 0 else kwargs["model"]
aispendLogger.log_event(
model=model,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
except Exception as e:
# LOGGING
exception_logging(logger_fn=user_logger_fn, exception=e)
print_verbose(
f"[Non-Blocking] Success Callback Error - {traceback.format_exc()}"
)
pass
if success_handler and callable(success_handler):
success_handler(args, kwargs)
pass
except Exception as e:
# LOGGING
exception_logging(logger_fn=user_logger_fn, exception=e)
print_verbose(
f"[Non-Blocking] Success Callback Error - {traceback.format_exc()}"
)
pass
def acreate(*args, **kwargs): ## Thin client to handle the acreate langchain call
return litellm.acompletion(*args, **kwargs)
def prompt_token_calculator(model, messages):
# use tiktoken or anthropic's tokenizer depending on the model
text = " ".join(message["content"] for message in messages)
num_tokens = 0
if "claude" in model:
try:
import anthropic
except:
Exception("Anthropic import failed please run `pip install anthropic`")
from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT
anthropic = Anthropic()
num_tokens = anthropic.count_tokens(text)
else:
num_tokens = len(encoding.encode(text))
return num_tokens
def valid_model(model):
try:
# for a given model name, check if the user has the right permissions to access the model
if (
model in litellm.open_ai_chat_completion_models
or model in litellm.open_ai_text_completion_models
):
openai.Model.retrieve(model)
else:
messages = [{"role": "user", "content": "Hello World"}]
litellm.completion(model=model, messages=messages)
except:
raise InvalidRequestError(message="", model=model, llm_provider="")
def check_valid_key(model: str, api_key: str):
"""
Checks if a given API key is valid for a specific model by making a litellm.completion call with max_tokens=10
Args:
model (str): The name of the model to check the API key against.
api_key (str): The API key to be checked.
Returns:
bool: True if the API key is valid for the model, False otherwise.
"""
messages = [{"role": "user", "content": "Hey, how's it going?"}]
try:
litellm.completion(model=model, messages=messages, api_key=api_key, max_tokens=10)
return True
except AuthenticationError as e:
return False
except Exception as e:
return False
# integration helper function
def modify_integration(integration_name, integration_params):
global supabaseClient
if integration_name == "supabase":
if "table_name" in integration_params:
Supabase.supabase_table_name = integration_params["table_name"]
# custom prompt helper function
def register_prompt_template(model: str, roles: dict, initial_prompt_value: str = "", final_prompt_value: str = ""):
"""
Register a prompt template to follow your custom format for a given model
Args:
model (str): The name of the model.
roles (dict): A dictionary mapping roles to their respective prompt values.
initial_prompt_value (str, optional): The initial prompt value. Defaults to "".
final_prompt_value (str, optional): The final prompt value. Defaults to "".
Returns:
dict: The updated custom prompt dictionary.
Example usage:
```
import litellm
litellm.register_prompt_template(
model="llama-2",
initial_prompt_value="You are a good assistant" # [OPTIONAL]
roles={
"system": {
"pre_message": "[INST] <<SYS>>\n", # [OPTIONAL]
"post_message": "\n<</SYS>>\n [/INST]\n" # [OPTIONAL]
},
"user": {
"pre_message": "[INST] ", # [OPTIONAL]
"post_message": " [/INST]" # [OPTIONAL]
},
"assistant": {
"pre_message": "\n" # [OPTIONAL]
"post_message": "\n" # [OPTIONAL]
}
}
final_prompt_value="Now answer as best you can:" # [OPTIONAL]
)
```
"""
model, _ = get_llm_provider(model=model)
litellm.custom_prompt_dict[model] = {
"roles": roles,
"initial_prompt_value": initial_prompt_value,
"final_prompt_value": final_prompt_value
}
return litellm.custom_prompt_dict
####### [BETA] HOSTED PRODUCT ################ - https://docs.litellm.ai/docs/debugging/hosted_debugging
def get_all_keys(llm_provider=None):
try:
global last_fetched_at_keys
# if user is using hosted product -> instantiate their env with their hosted api keys - refresh every 5 minutes
print_verbose(f"Reaches get all keys, llm_provider: {llm_provider}")
user_email = (
os.getenv("LITELLM_EMAIL")
or litellm.email
or litellm.token
or os.getenv("LITELLM_TOKEN")
)
if user_email:
time_delta = 0
if last_fetched_at_keys != None:
current_time = time.time()
time_delta = current_time - last_fetched_at_keys
if (
time_delta > 300 or last_fetched_at_keys == None or llm_provider
): # if the llm provider is passed in , assume this happening due to an AuthError for that provider
# make the api call
last_fetched_at = time.time()
print_verbose(f"last_fetched_at: {last_fetched_at}")
response = requests.post(
url="http://api.litellm.ai/get_all_keys",
headers={"content-type": "application/json"},
data=json.dumps({"user_email": user_email}),
)
print_verbose(f"get model key response: {response.text}")
data = response.json()
# update model list
for key, value in data[
"model_keys"
].items(): # follows the LITELLM API KEY format - <UPPERCASE_PROVIDER_NAME>_API_KEY - e.g. HUGGINGFACE_API_KEY
os.environ[key] = value
# set model alias map
for model_alias, value in data["model_alias_map"].items():
litellm.model_alias_map[model_alias] = value
return "it worked!"
return None
return None
except:
print_verbose(
f"[Non-Blocking Error] get_all_keys error - {traceback.format_exc()}"
)
pass
def get_model_list():
global last_fetched_at
try:
# if user is using hosted product -> get their updated model list
user_email = (
os.getenv("LITELLM_EMAIL")
or litellm.email
or litellm.token
or os.getenv("LITELLM_TOKEN")
)
if user_email:
# make the api call
last_fetched_at = time.time()
print(f"last_fetched_at: {last_fetched_at}")
response = requests.post(
url="http://api.litellm.ai/get_model_list",
headers={"content-type": "application/json"},
data=json.dumps({"user_email": user_email}),
)
print_verbose(f"get_model_list response: {response.text}")
data = response.json()
# update model list
model_list = data["model_list"]
# # check if all model providers are in environment
# model_providers = data["model_providers"]
# missing_llm_provider = None
# for item in model_providers:
# if f"{item.upper()}_API_KEY" not in os.environ:
# missing_llm_provider = item
# break
# # update environment - if required
# threading.Thread(target=get_all_keys, args=(missing_llm_provider)).start()
return model_list
return [] # return empty list by default
except:
print_verbose(
f"[Non-Blocking Error] get_model_list error - {traceback.format_exc()}"
)
####### EXCEPTION MAPPING ################
def exception_type(
model,
original_exception,
custom_llm_provider,
completion_kwargs={},
):
global user_logger_fn, liteDebuggerClient
exception_mapping_worked = False
print()
print("\033[1;31mGive Feedback / Get Help: https://github.com/BerriAI/litellm/issues/new\033[0m")
print("LiteLLM.Info: If you need to debug this error, use `litellm.set_verbose=True'.")
print()
try:
if isinstance(original_exception, OriginalError):
# Handle the OpenAIError
exception_mapping_worked = True
if custom_llm_provider == "openrouter":
if original_exception.http_status == 413:
raise InvalidRequestError(
message=str(original_exception),
model=model,
llm_provider="openrouter"
)
original_exception.llm_provider = "openrouter"
elif custom_llm_provider == "azure":
original_exception.llm_provider = "azure"
else:
original_exception.llm_provider = "openai"
if "This model's maximum context length is" in original_exception._message:
raise ContextWindowExceededError(
message=str(original_exception),
model=model,
llm_provider=original_exception.llm_provider
)
raise original_exception
elif model:
error_str = str(original_exception)
if isinstance(original_exception, BaseException):
exception_type = type(original_exception).__name__
else:
exception_type = ""
if custom_llm_provider == "anthropic": # one of the anthropics
if hasattr(original_exception, "message"):
if "prompt is too long" in original_exception.message:
exception_mapping_worked = True
raise ContextWindowExceededError(
message=original_exception.message,
model=model,
llm_provider="anthropic"
)
if "Invalid API Key" in original_exception.message:
exception_mapping_worked = True
raise AuthenticationError(
message=original_exception.message,
model=model,
llm_provider="anthropic"
)
if hasattr(original_exception, "status_code"):
print_verbose(f"status_code: {original_exception.status_code}")
if original_exception.status_code == 401:
exception_mapping_worked = True
raise AuthenticationError(
message=f"AnthropicException - {original_exception.message}",
llm_provider="anthropic",
model=model
)
elif original_exception.status_code == 400:
exception_mapping_worked = True
raise InvalidRequestError(
message=f"AnthropicException - {original_exception.message}",
model=model,
llm_provider="anthropic",
)
elif original_exception.status_code == 408:
exception_mapping_worked = True
raise Timeout(
message=f"AnthropicException - {original_exception.message}",
model=model,
llm_provider="anthropic"
)
elif original_exception.status_code == 413:
exception_mapping_worked = True
raise InvalidRequestError(
message=f"AnthropicException - {original_exception.message}",
model=model,
llm_provider="anthropic",
)
elif original_exception.status_code == 429:
exception_mapping_worked = True
raise RateLimitError(
message=f"AnthropicException - {original_exception.message}",
llm_provider="anthropic",
model=model
)
elif original_exception.status_code == 500:
exception_mapping_worked = True
raise ServiceUnavailableError(
message=f"AnthropicException - {original_exception.message}",
llm_provider="anthropic",
model=model
)
else:
exception_mapping_worked = True
raise APIError(
status_code=original_exception.status_code,
message=f"AnthropicException - {original_exception.message}",
llm_provider="anthropic",
model=model
)
elif custom_llm_provider == "replicate":
if "Incorrect authentication token" in error_str:
exception_mapping_worked = True
raise AuthenticationError(
message=f"ReplicateException - {error_str}",
llm_provider="replicate",
model=model
)
elif "input is too long" in error_str:
exception_mapping_worked = True
raise ContextWindowExceededError(
message=f"ReplicateException - {error_str}",
model=model,
llm_provider="replicate",
)
elif exception_type == "ModelError":
exception_mapping_worked = True
raise InvalidRequestError(
message=f"ReplicateException - {error_str}",
model=model,
llm_provider="replicate",
)
elif "Request was throttled" in error_str:
exception_mapping_worked = True
raise RateLimitError(
message=f"ReplicateException - {error_str}",
llm_provider="replicate",
model=model
)
elif hasattr(original_exception, "status_code"):
if original_exception.status_code == 401:
exception_mapping_worked = True
raise AuthenticationError(
message=f"ReplicateException - {original_exception.message}",
llm_provider="replicate",
model=model
)
elif original_exception.status_code == 400 or original_exception.status_code == 422:
exception_mapping_worked = True
raise InvalidRequestError(
message=f"ReplicateException - {original_exception.message}",
model=model,
llm_provider="replicate",
)
elif original_exception.status_code == 408:
exception_mapping_worked = True
raise Timeout(
message=f"ReplicateException - {original_exception.message}",
model=model,
llm_provider="replicate"
)
elif original_exception.status_code == 413:
exception_mapping_worked = True
raise InvalidRequestError(
message=f"ReplicateException - {original_exception.message}",
model=model,
llm_provider="replicate",
)
elif original_exception.status_code == 429:
exception_mapping_worked = True
raise RateLimitError(
message=f"ReplicateException - {original_exception.message}",
llm_provider="replicate",
model=model
)
elif original_exception.status_code == 500:
exception_mapping_worked = True
raise ServiceUnavailableError(
message=f"ReplicateException - {original_exception.message}",
llm_provider="replicate",
model=model
)
exception_mapping_worked = True
raise APIError(
status_code=500,
message=f"ReplicateException - {str(original_exception)}",
llm_provider="replicate",
model=model
)
elif custom_llm_provider == "bedrock":
if "Unable to locate credentials" in error_str:
exception_mapping_worked = True
raise InvalidRequestError(
message=f"BedrockException - {error_str}",
model=model,
llm_provider="bedrock"
)
if "The security token included in the request is invalid" in error_str:
exception_mapping_worked = True
raise AuthenticationError(
message=f"BedrockException Invalid Authentication - {error_str}",
model=model,
llm_provider="bedrock"
)
if "throttlingException" in error_str:
exception_mapping_worked = True
raise RateLimitError(
message=f"BedrockException: Rate Limit Error - {error_str}",
model=model,
llm_provider="bedrock"
)
elif custom_llm_provider == "sagemaker":
if "Unable to locate credentials" in error_str:
exception_mapping_worked = True
raise InvalidRequestError(
message=f"SagemakerException - {error_str}",
model=model,
llm_provider="sagemaker"
)
elif custom_llm_provider == "vertex_ai":
if "Vertex AI API has not been used in project" in error_str or "Unable to find your project" in error_str:
exception_mapping_worked = True
raise InvalidRequestError(
message=f"VertexAIException - {error_str}",
model=model,
llm_provider="vertex_ai"
)
elif custom_llm_provider == "palm":
if "503 Getting metadata" in error_str:
# auth errors look like this
# 503 Getting metadata from plugin failed with error: Reauthentication is needed. Please run `gcloud auth application-default login` to reauthenticate.
exception_mapping_worked = True
raise InvalidRequestError(
message=f"PalmException - Invalid api key",
model=model,
llm_provider="palm"
)
if "400 Request payload size exceeds" in error_str:
exception_mapping_worked = True
raise ContextWindowExceededError(
message=f"PalmException - {error_str}",
model=model,
llm_provider="palm",
)
# Dailed: Error occurred: 400 Request payload size exceeds the limit: 20000 bytes
elif custom_llm_provider == "cohere": # Cohere
if (
"invalid api token" in error_str
or "No API key provided." in error_str
):
exception_mapping_worked = True
raise AuthenticationError(
message=f"CohereException - {original_exception.message}",
llm_provider="cohere",
model=model
)
elif "too many tokens" in error_str:
exception_mapping_worked = True
raise ContextWindowExceededError(
message=f"CohereException - {original_exception.message}",
model=model,
llm_provider="cohere",
)
elif hasattr(original_exception, "status_code"):
if original_exception.status_code == 400 or original_exception.status_code == 498:
exception_mapping_worked = True
raise InvalidRequestError(
message=f"CohereException - {original_exception.message}",
llm_provider="cohere",
model=model
)
elif original_exception.status_code == 500:
exception_mapping_worked = True
raise ServiceUnavailableError(
message=f"CohereException - {original_exception.message}",
llm_provider="cohere",
model=model
)
elif (
"CohereConnectionError" in exception_type
): # cohere seems to fire these errors when we load test it (1k+ messages / min)
exception_mapping_worked = True
raise RateLimitError(
message=f"CohereException - {original_exception.message}",
llm_provider="cohere",
model=model
)
elif "invalid type:" in error_str:
exception_mapping_worked = True
raise InvalidRequestError(
message=f"CohereException - {original_exception.message}",
llm_provider="cohere",
model=model
)
elif "Unexpected server error" in error_str:
exception_mapping_worked = True
raise ServiceUnavailableError(
message=f"CohereException - {original_exception.message}",
llm_provider="cohere",
model=model
)
else:
if hasattr(original_exception, "status_code"):
exception_mapping_worked = True
raise APIError(
status_code=original_exception.status_code,
message=f"CohereException - {original_exception.message}",
llm_provider="cohere",
model=model
)
raise original_exception
elif custom_llm_provider == "huggingface":
if "length limit exceeded" in error_str:
exception_mapping_worked = True
raise ContextWindowExceededError(
message=error_str,
model=model,
llm_provider="huggingface"
)
elif "A valid user token is required" in error_str:
exception_mapping_worked = True
raise InvalidRequestError(
message=error_str,
llm_provider="huggingface",
model=model
)
if hasattr(original_exception, "status_code"):
if original_exception.status_code == 401:
exception_mapping_worked = True
raise AuthenticationError(
message=f"HuggingfaceException - {original_exception.message}",
llm_provider="huggingface",
model=model
)
elif original_exception.status_code == 400:
exception_mapping_worked = True
raise InvalidRequestError(
message=f"HuggingfaceException - {original_exception.message}",
model=model,
llm_provider="huggingface",
)
elif original_exception.status_code == 408:
exception_mapping_worked = True
raise Timeout(
message=f"HuggingfaceException - {original_exception.message}",
model=model,
llm_provider="huggingface"
)
elif original_exception.status_code == 429:
exception_mapping_worked = True
raise RateLimitError(
message=f"HuggingfaceException - {original_exception.message}",
llm_provider="huggingface",
model=model
)
else:
exception_mapping_worked = True
raise APIError(
status_code=original_exception.status_code,
message=f"HuggingfaceException - {original_exception.message}",
llm_provider="huggingface",
model=model
)
exception_mapping_worked = True
raise APIError(status_code=500, message=error_str, model=model, llm_provider=custom_llm_provider)
elif custom_llm_provider == "ai21":
if hasattr(original_exception, "message"):
if "Prompt has too many tokens" in original_exception.message:
exception_mapping_worked = True
raise ContextWindowExceededError(
message=f"AI21Exception - {original_exception.message}",
model=model,
llm_provider="ai21"
)
if "Bad or missing API token." in original_exception.message:
exception_mapping_worked = True
raise InvalidRequestError(
message=f"AI21Exception - {original_exception.message}",
model=model,
llm_provider="ai21"
)
if hasattr(original_exception, "status_code"):
if original_exception.status_code == 401:
exception_mapping_worked = True
raise AuthenticationError(
message=f"AI21Exception - {original_exception.message}",
llm_provider="ai21",
model=model
)
elif original_exception.status_code == 408:
exception_mapping_worked = True
raise Timeout(
message=f"AI21Exception - {original_exception.message}",
model=model,
llm_provider="ai21"
)
if original_exception.status_code == 422:
exception_mapping_worked = True
raise InvalidRequestError(
message=f"AI21Exception - {original_exception.message}",
model=model,
llm_provider="ai21",
)
elif original_exception.status_code == 429:
exception_mapping_worked = True
raise RateLimitError(
message=f"AI21Exception - {original_exception.message}",
llm_provider="ai21",
)
else:
exception_mapping_worked = True
raise APIError(
status_code=original_exception.status_code,
message=f"AI21Exception - {original_exception.message}",
llm_provider="ai21",
model=model
)
elif custom_llm_provider == "nlp_cloud":
if "detail" in error_str:
if "Input text length should not exceed" in error_str:
exception_mapping_worked = True
raise ContextWindowExceededError(
message=f"NLPCloudException - {error_str}",
model=model,
llm_provider="nlp_cloud"
)
elif "value is not a valid" in error_str:
exception_mapping_worked = True
raise InvalidRequestError(
message=f"NLPCloudException - {error_str}",
model=model,
llm_provider="nlp_cloud"
)
else:
exception_mapping_worked = True
raise APIError(
status_code=500,
message=f"NLPCloudException - {error_str}",
model=model,
llm_provider="nlp_cloud"
)
if hasattr(original_exception, "status_code"): # https://docs.nlpcloud.com/?shell#errors
if original_exception.status_code == 400 or original_exception.status_code == 406 or original_exception.status_code == 413 or original_exception.status_code == 422:
exception_mapping_worked = True
raise InvalidRequestError(
message=f"NLPCloudException - {original_exception.message}",
llm_provider="nlp_cloud",
model=model
)
elif original_exception.status_code == 401 or original_exception.status_code == 403:
exception_mapping_worked = True
raise AuthenticationError(
message=f"NLPCloudException - {original_exception.message}",
llm_provider="nlp_cloud",
model=model
)
elif original_exception.status_code == 522 or original_exception.status_code == 524:
exception_mapping_worked = True
raise Timeout(
message=f"NLPCloudException - {original_exception.message}",
model=model,
llm_provider="nlp_cloud"
)
elif original_exception.status_code == 429 or original_exception.status_code == 402:
exception_mapping_worked = True
raise RateLimitError(
message=f"NLPCloudException - {original_exception.message}",
llm_provider="nlp_cloud",
)
elif original_exception.status_code == 500 or original_exception.status_code == 503:
exception_mapping_worked = True
raise APIError(
status_code=original_exception.status_code,
message=f"NLPCloudException - {original_exception.message}",
llm_provider="nlp_cloud",
model=model
)
elif original_exception.status_code == 504 or original_exception.status_code == 520:
exception_mapping_worked = True
raise ServiceUnavailableError(
message=f"NLPCloudException - {original_exception.message}",
model=model,
llm_provider="nlp_cloud"
)
else:
exception_mapping_worked = True
raise APIError(
status_code=original_exception.status_code,
message=f"NLPCloudException - {original_exception.message}",
llm_provider="nlp_cloud",
model=model
)
elif custom_llm_provider == "together_ai":
import json
error_response = json.loads(error_str)
if "error" in error_response and "`inputs` tokens + `max_new_tokens` must be <=" in error_response["error"]:
exception_mapping_worked = True
raise ContextWindowExceededError(
message=f"TogetherAIException - {error_response['error']}",
model=model,
llm_provider="together_ai"
)
elif "error" in error_response and "invalid private key" in error_response["error"]:
exception_mapping_worked = True
raise AuthenticationError(
message=f"TogetherAIException - {error_response['error']}",
llm_provider="together_ai",
model=model
)
elif "error" in error_response and "INVALID_ARGUMENT" in error_response["error"]:
exception_mapping_worked = True
raise InvalidRequestError(
message=f"TogetherAIException - {error_response['error']}",
model=model,
llm_provider="together_ai"
)
elif "error" in error_response and "API key doesn't match expected format." in error_response["error"]:
exception_mapping_worked = True
raise InvalidRequestError(
message=f"TogetherAIException - {error_response['error']}",
model=model,
llm_provider="together_ai"
)
elif "error_type" in error_response and error_response["error_type"] == "validation":
exception_mapping_worked = True
raise InvalidRequestError(
message=f"TogetherAIException - {error_response['error']}",
model=model,
llm_provider="together_ai"
)
elif original_exception.status_code == 408:
exception_mapping_worked = True
raise Timeout(
message=f"TogetherAIException - {original_exception.message}",
model=model,
llm_provider="together_ai"
)
elif original_exception.status_code == 429:
exception_mapping_worked = True
raise RateLimitError(
message=f"TogetherAIException - {original_exception.message}",
llm_provider="together_ai",
model=model
)
else:
exception_mapping_worked = True
raise APIError(
status_code=original_exception.status_code,
message=f"TogetherAIException - {original_exception.message}",
llm_provider="together_ai",
model=model
)
elif custom_llm_provider == "aleph_alpha":
if "This is longer than the model's maximum context length" in error_str:
exception_mapping_worked = True
raise ContextWindowExceededError(
message=f"AlephAlphaException - {original_exception.message}",
llm_provider="aleph_alpha",
model=model
)
elif "InvalidToken" in error_str or "No token provided" in error_str:
exception_mapping_worked = True
raise InvalidRequestError(
message=f"AlephAlphaException - {original_exception.message}",
llm_provider="aleph_alpha",
model=model
)
elif hasattr(original_exception, "status_code"):
print(f"status code: {original_exception.status_code}")
if original_exception.status_code == 401:
exception_mapping_worked = True
raise AuthenticationError(
message=f"AlephAlphaException - {original_exception.message}",
llm_provider="aleph_alpha",
model=model
)
elif original_exception.status_code == 400:
exception_mapping_worked = True
raise InvalidRequestError(
message=f"AlephAlphaException - {original_exception.message}",
llm_provider="aleph_alpha",
model=model
)
elif original_exception.status_code == 429:
exception_mapping_worked = True
raise RateLimitError(
message=f"AlephAlphaException - {original_exception.message}",
llm_provider="aleph_alpha",
model=model
)
elif original_exception.status_code == 500:
exception_mapping_worked = True
raise ServiceUnavailableError(
message=f"AlephAlphaException - {original_exception.message}",
llm_provider="aleph_alpha",
model=model
)
raise original_exception
raise original_exception
elif custom_llm_provider == "ollama":
if isinstance(original_exception, dict):
error_str = original_exception.get("error", "")
else:
error_str = str(original_exception)
if "no such file or directory" in error_str:
exception_mapping_worked = True
raise InvalidRequestError(
message=f"OllamaException: Invalid Model/Model not loaded - {original_exception}",
model=model,
llm_provider="ollama"
)
elif "Failed to establish a new connection" in error_str:
exception_mapping_worked = True
raise ServiceUnavailableError(
message=f"OllamaException: {original_exception}",
llm_provider="ollama",
model=model
)
elif "Invalid response object from API" in error_str:
exception_mapping_worked = True
raise InvalidRequestError(
message=f"OllamaException: {original_exception}",
llm_provider="ollama",
model=model
)
elif custom_llm_provider == "vllm":
if hasattr(original_exception, "status_code"):
if original_exception.status_code == 0:
exception_mapping_worked = True
raise APIConnectionError(
message=f"VLLMException - {original_exception.message}",
llm_provider="vllm",
model=model
)
elif custom_llm_provider == "ollama":
if "no attribute 'async_get_ollama_response_stream" in error_str:
raise ImportError("Import error - trying to use async for ollama. import async_generator failed. Try 'pip install async_generator'")
elif custom_llm_provider == "custom_openai" or custom_llm_provider == "maritalk":
if hasattr(original_exception, "status_code"):
exception_mapping_worked = True
if original_exception.status_code == 401:
exception_mapping_worked = True
raise AuthenticationError(
message=f"CustomOpenAIException - {original_exception.message}",
llm_provider="custom_openai",
model=model
)
elif original_exception.status_code == 408:
exception_mapping_worked = True
raise Timeout(
message=f"CustomOpenAIException - {original_exception.message}",
model=model,
llm_provider="custom_openai"
)
if original_exception.status_code == 422:
exception_mapping_worked = True
raise InvalidRequestError(
message=f"CustomOpenAIException - {original_exception.message}",
model=model,
llm_provider="custom_openai",
)
elif original_exception.status_code == 429:
exception_mapping_worked = True
raise RateLimitError(
message=f"CustomOpenAIException - {original_exception.message}",
model=model,
llm_provider="custom_openai",
)
else:
exception_mapping_worked = True
raise APIError(
status_code=original_exception.status_code,
message=f"CustomOpenAIException - {original_exception.message}",
llm_provider="custom_openai",
model=model
)
exception_mapping_worked = True
if "InvalidRequestError.__init__() missing 1 required positional argument: 'param'" in str(original_exception): # deal with edge-case invalid request error bug in openai-python sdk
raise InvalidRequestError(
message=f"OpenAIException: This can happen due to missing AZURE_API_VERSION: {str(original_exception)}",
model=model,
llm_provider=custom_llm_provider
)
else:
raise APIError(status_code=500, message=str(original_exception), llm_provider=custom_llm_provider, model=model)
except Exception as e:
# LOGGING
exception_logging(
logger_fn=user_logger_fn,
additional_args={
"exception_mapping_worked": exception_mapping_worked,
"original_exception": original_exception,
},
exception=e,
)
## AUTH ERROR
if isinstance(e, AuthenticationError) and (
litellm.email or "LITELLM_EMAIL" in os.environ
):
threading.Thread(target=get_all_keys, args=(e.llm_provider,)).start()
# don't let an error with mapping interrupt the user from receiving an error from the llm api calls
if exception_mapping_worked:
raise e
else:
raise original_exception
####### CRASH REPORTING ################
def safe_crash_reporting(model=None, exception=None, custom_llm_provider=None):
data = {
"model": model,
"exception": str(exception),
"custom_llm_provider": custom_llm_provider,
}
threading.Thread(target=litellm_telemetry, args=(data,), daemon=True).start()
def get_or_generate_uuid():
temp_dir = os.path.join(os.path.abspath(os.sep), "tmp")
uuid_file = os.path.join(temp_dir, "litellm_uuid.txt")
try:
# Try to open the file and load the UUID
with open(uuid_file, "r") as file:
uuid_value = file.read()
if uuid_value:
uuid_value = uuid_value.strip()
else:
raise FileNotFoundError
except FileNotFoundError:
# Generate a new UUID if the file doesn't exist or is empty
try:
new_uuid = uuid.uuid4()
uuid_value = str(new_uuid)
with open(uuid_file, "w") as file:
file.write(uuid_value)
except: # if writing to tmp/litellm_uuid.txt then retry writing to litellm_uuid.txt
try:
new_uuid = uuid.uuid4()
uuid_value = str(new_uuid)
with open("litellm_uuid.txt", "w") as file:
file.write(uuid_value)
except: # if this 3rd attempt fails just pass
# Good first issue for someone to improve this function :)
return
except:
# [Non-Blocking Error]
return
return uuid_value
def litellm_telemetry(data):
# Load or generate the UUID
uuid_value = ""
try:
uuid_value = get_or_generate_uuid()
except:
uuid_value = str(uuid.uuid4())
try:
# Prepare the data to send to litellm logging api
try:
pkg_version = importlib.metadata.version("litellm")
except:
pkg_version = None
if "model" not in data:
data["model"] = None
payload = {
"uuid": uuid_value,
"data": data,
"version:": pkg_version
}
# Make the POST request to litellm logging api
response = requests.post(
"https://litellm.berri.ai/logging",
headers={"Content-Type": "application/json"},
json=payload,
)
response.raise_for_status() # Raise an exception for HTTP errors
except:
# [Non-Blocking Error]
return
######### Secret Manager ############################
# checks if user has passed in a secret manager client
# if passed in then checks the secret there
def get_secret(secret_name):
if litellm.secret_manager_client != None:
# TODO: check which secret manager is being used
# currently only supports Infisical
try:
secret = litellm.secret_manager_client.get_secret(secret_name).secret_value
except:
secret = None
return secret
else:
return os.environ.get(secret_name)
######## Streaming Class ############################
# wraps the completion stream to return the correct format for the model
# replicate/anthropic/cohere
class CustomStreamWrapper:
def __init__(self, completion_stream, model, custom_llm_provider=None, logging_obj=None):
self.model = model
self.custom_llm_provider = custom_llm_provider
self.logging_obj = logging_obj
self.completion_stream = completion_stream
self.sent_first_chunk = False
self.sent_last_chunk = False
if self.logging_obj:
# Log the type of the received item
self.logging_obj.post_call(str(type(completion_stream)))
def __iter__(self):
return self
def __aiter__(self):
return self
def logging(self, text):
if self.logging_obj:
self.logging_obj.post_call(text)
def handle_anthropic_chunk(self, chunk):
str_line = chunk.decode("utf-8") # Convert bytes to string
text = ""
is_finished = False
finish_reason = None
if str_line.startswith("data:"):
data_json = json.loads(str_line[5:])
text = data_json.get("completion", "")
if data_json.get("stop_reason", None):
is_finished = True
finish_reason = data_json["stop_reason"]
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
elif "error" in str_line:
raise ValueError(f"Unable to parse response. Original response: {str_line}")
else:
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
def handle_together_ai_chunk(self, chunk):
chunk = chunk.decode("utf-8")
text = ""
is_finished = False
finish_reason = None
if "text" in chunk:
text_index = chunk.find('"text":"') # this checks if text: exists
text_start = text_index + len('"text":"')
text_end = chunk.find('"}', text_start)
if text_index != -1 and text_end != -1:
extracted_text = chunk[text_start:text_end]
text = extracted_text
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
elif "[DONE]" in chunk:
return {"text": text, "is_finished": True, "finish_reason": "stop"}
elif "error" in chunk:
raise ValueError(chunk)
else:
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
def handle_huggingface_chunk(self, chunk):
try:
chunk = chunk.decode("utf-8")
text = ""
is_finished = False
finish_reason = ""
print_verbose(f"chunk: {chunk}")
if chunk.startswith("data:"):
data_json = json.loads(chunk[5:])
print_verbose(f"data json: {data_json}")
if "token" in data_json and "text" in data_json["token"]:
text = data_json["token"]["text"]
if data_json.get("details", False) and data_json["details"].get("finish_reason", False):
is_finished = True
finish_reason = data_json["details"]["finish_reason"]
elif data_json.get("generated_text", False): # if full generated text exists, then stream is complete
text = "" # don't return the final bos token
is_finished = True
finish_reason = "stop"
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
elif "error" in chunk:
raise ValueError(chunk)
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
except Exception as e:
traceback.print_exc()
# raise(e)
def handle_ai21_chunk(self, chunk): # fake streaming
chunk = chunk.decode("utf-8")
data_json = json.loads(chunk)
try:
text = data_json["completions"][0]["data"]["text"]
is_finished = True
finish_reason = "stop"
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
except:
raise ValueError(f"Unable to parse response. Original response: {chunk}")
def handle_maritalk_chunk(self, chunk): # fake streaming
chunk = chunk.decode("utf-8")
data_json = json.loads(chunk)
try:
text = data_json["answer"]
is_finished = True
finish_reason = "stop"
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
except:
raise ValueError(f"Unable to parse response. Original response: {chunk}")
def handle_nlp_cloud_chunk(self, chunk):
chunk = chunk.decode("utf-8")
data_json = json.loads(chunk)
try:
text = data_json["generated_text"]
is_finished = True
finish_reason = "stop"
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
except:
raise ValueError(f"Unable to parse response. Original response: {chunk}")
def handle_aleph_alpha_chunk(self, chunk):
chunk = chunk.decode("utf-8")
data_json = json.loads(chunk)
try:
text = data_json["completions"][0]["completion"]
is_finished = True
finish_reason = "stop"
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
except:
raise ValueError(f"Unable to parse response. Original response: {chunk}")
def handle_cohere_chunk(self, chunk):
chunk = chunk.decode("utf-8")
data_json = json.loads(chunk)
try:
text = ""
is_finished = False
finish_reason = ""
if "text" in data_json:
text = data_json["text"]
elif "is_finished" in data_json:
is_finished = data_json["is_finished"]
finish_reason = data_json["finish_reason"]
else:
raise Exception(data_json)
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
except:
raise ValueError(f"Unable to parse response. Original response: {chunk}")
def handle_replicate_chunk(self, chunk):
try:
text = ""
is_finished = False
finish_reason = ""
if "output" in chunk:
text = chunk['output']
if "status" in chunk:
if chunk["status"] == "succeeded":
is_finished = True
finish_reason = "stop"
elif chunk.get("error", None):
raise Exception(chunk["error"])
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
except:
raise ValueError(f"Unable to parse response. Original response: {chunk}")
def handle_custom_openai_chat_completion_chunk(self, chunk):
try:
str_line = chunk.decode("utf-8") # Convert bytes to string
text = ""
is_finished = False
finish_reason = None
if str_line.startswith("data:"):
data_json = json.loads(str_line[5:])
print(f"delta content: {data_json['choices'][0]['delta']}")
text = data_json["choices"][0]["delta"].get("content", "")
if data_json["choices"][0].get("finish_reason", None):
is_finished = True
finish_reason = data_json["choices"][0]["finish_reason"]
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
elif "error" in str_line:
raise ValueError(f"Unable to parse response. Original response: {str_line}")
else:
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
except:
traceback.print_exc()
pass
def handle_openai_text_completion_chunk(self, chunk):
try:
return chunk["choices"][0]["text"]
except:
raise ValueError(f"Unable to parse response. Original response: {chunk}")
def handle_openai_chat_completion_chunk(self, chunk):
try:
return chunk["choices"][0]["delta"]["content"]
except:
return ""
def handle_baseten_chunk(self, chunk):
try:
chunk = chunk.decode("utf-8")
if len(chunk) > 0:
if chunk.startswith("data:"):
data_json = json.loads(chunk[5:])
if "token" in data_json and "text" in data_json["token"]:
return data_json["token"]["text"]
else:
return ""
data_json = json.loads(chunk)
if "model_output" in data_json:
if isinstance(data_json["model_output"], dict) and "data" in data_json["model_output"] and isinstance(data_json["model_output"]["data"], list):
return data_json["model_output"]["data"][0]
elif isinstance(data_json["model_output"], str):
return data_json["model_output"]
elif "completion" in data_json and isinstance(data_json["completion"], str):
return data_json["completion"]
else:
raise ValueError(f"Unable to parse response. Original response: {chunk}")
else:
return ""
else:
return ""
except:
traceback.print_exc()
return ""
def handle_bedrock_stream(self, chunk):
chunk = chunk.get('chunk')
if chunk:
chunk_data = json.loads(chunk.get('bytes').decode())
text = ""
is_finished = False
finish_reason = ""
if "outputText" in chunk_data:
text = chunk_data['outputText']
# anthropic mapping
elif "completion" in chunk_data:
text = chunk_data['completion'] # bedrock.anthropic
stop_reason = chunk_data.get("stop_reason", None)
if stop_reason != None:
is_finished = True
finish_reason = stop_reason
elif chunk_data.get("completionReason", None):
is_finished = True
finish_reason = chunk_data["completionReason"]
elif chunk.get("error", None):
raise Exception(chunk["error"])
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
return ""
## needs to handle the empty string case (even starting chunk can be an empty string)
def __next__(self):
model_response = ModelResponse(stream=True, model=self.model)
try:
while True: # loop until a non-empty string is found
# return this for all models
completion_obj = {"content": ""}
if self.custom_llm_provider and self.custom_llm_provider == "anthropic":
chunk = next(self.completion_stream)
response_obj = self.handle_anthropic_chunk(chunk)
completion_obj["content"] = response_obj["text"]
if response_obj["is_finished"]:
model_response.choices[0].finish_reason = response_obj["finish_reason"]
elif self.model == "replicate" or self.custom_llm_provider == "replicate":
chunk = next(self.completion_stream)
response_obj = self.handle_replicate_chunk(chunk)
completion_obj["content"] = response_obj["text"]
if response_obj["is_finished"]:
model_response.choices[0].finish_reason = response_obj["finish_reason"]
elif (
self.custom_llm_provider and self.custom_llm_provider == "together_ai"):
chunk = next(self.completion_stream)
response_obj = self.handle_together_ai_chunk(chunk)
completion_obj["content"] = response_obj["text"]
if response_obj["is_finished"]:
model_response.choices[0].finish_reason = response_obj["finish_reason"]
elif self.custom_llm_provider and self.custom_llm_provider == "huggingface":
chunk = next(self.completion_stream)
response_obj = self.handle_huggingface_chunk(chunk)
completion_obj["content"] = response_obj["text"]
if response_obj["is_finished"]:
model_response.choices[0].finish_reason = response_obj["finish_reason"]
elif self.custom_llm_provider and self.custom_llm_provider == "baseten": # baseten doesn't provide streaming
chunk = next(self.completion_stream)
completion_obj["content"] = self.handle_baseten_chunk(chunk)
elif self.custom_llm_provider and self.custom_llm_provider == "ai21": #ai21 doesn't provide streaming
chunk = next(self.completion_stream)
response_obj = self.handle_ai21_chunk(chunk)
completion_obj["content"] = response_obj["text"]
if response_obj["is_finished"]:
model_response.choices[0].finish_reason = response_obj["finish_reason"]
elif self.custom_llm_provider and self.custom_llm_provider == "maritalk":
chunk = next(self.completion_stream)
response_obj = self.handle_maritalk_chunk(chunk)
completion_obj["content"] = response_obj["text"]
if response_obj["is_finished"]:
model_response.choices[0].finish_reason = response_obj["finish_reason"]
elif self.custom_llm_provider and self.custom_llm_provider == "vllm":
chunk = next(self.completion_stream)
completion_obj["content"] = chunk[0].outputs[0].text
elif self.custom_llm_provider and self.custom_llm_provider == "aleph_alpha": #aleph alpha doesn't provide streaming
chunk = next(self.completion_stream)
response_obj = self.handle_aleph_alpha_chunk(chunk)
completion_obj["content"] = response_obj["text"]
if response_obj["is_finished"]:
model_response.choices[0].finish_reason = response_obj["finish_reason"]
elif self.custom_llm_provider and self.custom_llm_provider == "text-completion-openai":
chunk = next(self.completion_stream)
completion_obj["content"] = self.handle_openai_text_completion_chunk(chunk)
elif self.model in litellm.nlp_cloud_models or self.custom_llm_provider == "nlp_cloud":
try:
chunk = next(self.completion_stream)
response_obj = self.handle_nlp_cloud_chunk(chunk)
completion_obj["content"] = response_obj["text"]
if response_obj["is_finished"]:
model_response.choices[0].finish_reason = response_obj["finish_reason"]
except Exception as e:
if self.sent_last_chunk:
raise e
else:
if self.sent_first_chunk is False:
raise Exception("An unknown error occurred with the stream")
model_response.choices[0].finish_reason = "stop"
self.sent_last_chunk = True
elif self.custom_llm_provider and self.custom_llm_provider == "vertex_ai":
try:
chunk = next(self.completion_stream)
completion_obj["content"] = str(chunk)
except StopIteration as e:
if self.sent_last_chunk:
raise e
else:
model_response.choices[0].finish_reason = "stop"
self.sent_last_chunk = True
elif self.custom_llm_provider == "cohere":
chunk = next(self.completion_stream)
response_obj = self.handle_cohere_chunk(chunk)
completion_obj["content"] = response_obj["text"]
if response_obj["is_finished"]:
model_response.choices[0].finish_reason = response_obj["finish_reason"]
elif self.custom_llm_provider == "bedrock":
chunk = next(self.completion_stream)
response_obj = self.handle_bedrock_stream(chunk)
completion_obj["content"] = response_obj["text"]
if response_obj["is_finished"]:
model_response.choices[0].finish_reason = response_obj["finish_reason"]
elif self.custom_llm_provider == "sagemaker":
if len(self.completion_stream)==0:
if self.sent_last_chunk:
raise StopIteration
else:
model_response.choices[0].finish_reason = "stop"
self.sent_last_chunk = True
chunk_size = 30
new_chunk = self.completion_stream[:chunk_size]
completion_obj["content"] = new_chunk
self.completion_stream = self.completion_stream[chunk_size:]
time.sleep(0.05)
elif self.custom_llm_provider == "petals":
if len(self.completion_stream)==0:
if self.sent_last_chunk:
raise StopIteration
else:
model_response.choices[0].finish_reason = "stop"
self.sent_last_chunk = True
chunk_size = 30
new_chunk = self.completion_stream[:chunk_size]
completion_obj["content"] = new_chunk
self.completion_stream = self.completion_stream[chunk_size:]
time.sleep(0.05)
elif self.custom_llm_provider == "palm":
# fake streaming
if len(self.completion_stream)==0:
if self.sent_last_chunk:
raise StopIteration
else:
model_response.choices[0].finish_reason = "stop"
self.sent_last_chunk = True
chunk_size = 30
new_chunk = self.completion_stream[:chunk_size]
completion_obj["content"] = new_chunk
self.completion_stream = self.completion_stream[chunk_size:]
time.sleep(0.05)
elif self.custom_llm_provider == "ollama":
chunk = next(self.completion_stream)
if "error" in chunk:
exception_type(model=self.model, custom_llm_provider=self.custom_llm_provider, original_exception=chunk["error"])
completion_obj = chunk
elif self.custom_llm_provider == "custom_openai":
chunk = next(self.completion_stream)
response_obj = self.handle_custom_openai_chat_completion_chunk(chunk)
completion_obj["content"] = response_obj["text"]
print(f"completion obj content: {completion_obj['content']}")
if response_obj["is_finished"]:
model_response.choices[0].finish_reason = response_obj["finish_reason"]
else: # openai chat/azure models
chunk = next(self.completion_stream)
model_response = chunk
# LOGGING
threading.Thread(target=self.logging_obj.success_handler, args=(model_response,)).start()
return model_response
model_response.model = self.model
if len(completion_obj["content"]) > 0: # cannot set content of an OpenAI Object to be an empty string
if self.sent_first_chunk == False:
completion_obj["role"] = "assistant"
self.sent_first_chunk = True
model_response.choices[0].delta = Delta(**completion_obj)
# LOGGING
threading.Thread(target=self.logging_obj.success_handler, args=(model_response,)).start()
return model_response
elif model_response.choices[0].finish_reason:
model_response.choices[0].finish_reason = map_finish_reason(model_response.choices[0].finish_reason) # ensure consistent output to openai
# LOGGING
threading.Thread(target=self.logging_obj.success_handler, args=(model_response,)).start()
return model_response
except StopIteration:
raise StopIteration
except Exception as e:
traceback_exception = traceback.print_exc()
e.message = str(e)
# LOG FAILURE - handle streaming failure logging in the _next_ object, remove `handle_failure` once it's deprecated
threading.Thread(target=self.logging_obj.failure_handler, args=(e, traceback_exception)).start()
return exception_type(model=self.model, custom_llm_provider=self.custom_llm_provider, original_exception=e)
async def __anext__(self):
try:
return next(self)
except StopIteration:
raise StopAsyncIteration
def mock_completion_streaming_obj(model_response, mock_response, model):
for i in range(0, len(mock_response), 3):
completion_obj = {"role": "assistant", "content": mock_response[i: i+3]}
model_response.choices[0].delta = completion_obj
yield model_response
########## Reading Config File ############################
def read_config_args(config_path) -> dict:
try:
import os
current_path = os.getcwd()
with open(config_path, "r") as config_file:
config = json.load(config_file)
# read keys/ values from config file and return them
return config
except Exception as e:
raise e
########## experimental completion variants ############################
def completion_with_config(config: Union[dict, str], **kwargs):
"""
Generate a litellm.completion() using a config dict and all supported completion args
Example config;
config = {
"default_fallback_models": # [Optional] List of model names to try if a call fails
"available_models": # [Optional] List of all possible models you could call
"adapt_to_prompt_size": # [Optional] True/False - if you want to select model based on prompt size (will pick from available_models)
"model": {
"model-name": {
"needs_moderation": # [Optional] True/False - if you want to call openai moderations endpoint before making completion call. Will raise exception, if flagged.
"error_handling": {
"error-type": { # One of the errors listed here - https://docs.litellm.ai/docs/exception_mapping#custom-mapping-list
"fallback_model": "" # str, name of the model it should try instead, when that error occurs
}
}
}
}
}
Parameters:
config (Union[dict, str]): A configuration for litellm
**kwargs: Additional keyword arguments for litellm.completion
Returns:
litellm.ModelResponse: A ModelResponse with the generated completion
"""
if config is not None:
if isinstance(config, str):
config = read_config_args(config)
elif isinstance(config, dict):
config = config
else:
raise Exception("Config path must be a string or a dictionary.")
else:
raise Exception("Config path not passed in.")
if config is None:
raise Exception("No completion config in the config file")
models_with_config = config["model"].keys()
model = kwargs["model"]
messages = kwargs["messages"]
## completion config
fallback_models = config.get("default_fallback_models", None)
available_models = config.get("available_models", None)
adapt_to_prompt_size = config.get("adapt_to_prompt_size", False)
trim_messages_flag = config.get("trim_messages", False)
prompt_larger_than_model = False
max_model = model
try:
max_tokens = litellm.get_max_tokens(model)["max_tokens"]
except:
max_tokens = 2048 # assume curr model's max window is 2048 tokens
if adapt_to_prompt_size:
## Pick model based on token window
prompt_tokens = litellm.token_counter(model="gpt-3.5-turbo", text="".join(message["content"] for message in messages))
try:
curr_max_tokens = litellm.get_max_tokens(model)["max_tokens"]
except:
curr_max_tokens = 2048
if curr_max_tokens < prompt_tokens:
prompt_larger_than_model = True
for available_model in available_models:
try:
curr_max_tokens = litellm.get_max_tokens(available_model)["max_tokens"]
if curr_max_tokens > max_tokens:
max_tokens = curr_max_tokens
max_model = available_model
if curr_max_tokens > prompt_tokens:
model = available_model
prompt_larger_than_model = False
except:
continue
if prompt_larger_than_model:
messages = trim_messages(messages=messages, model=max_model)
kwargs["messages"] = messages
kwargs["model"] = model
try:
if model in models_with_config:
## Moderation check
if config["model"][model].get("needs_moderation"):
input = " ".join(message["content"] for message in messages)
response = litellm.moderation(input=input)
flagged = response["results"][0]["flagged"]
if flagged:
raise Exception("This response was flagged as inappropriate")
## Model-specific Error Handling
error_handling = None
if config["model"][model].get("error_handling"):
error_handling = config["model"][model]["error_handling"]
try:
response = litellm.completion(**kwargs)
return response
except Exception as e:
exception_name = type(e).__name__
fallback_model = None
if error_handling and exception_name in error_handling:
error_handler = error_handling[exception_name]
# either switch model or api key
fallback_model = error_handler.get("fallback_model", None)
if fallback_model:
kwargs["model"] = fallback_model
return litellm.completion(**kwargs)
raise e
else:
return litellm.completion(**kwargs)
except Exception as e:
if fallback_models:
model = fallback_models.pop(0)
return completion_with_fallbacks(model=model, messages=messages, fallbacks=fallback_models)
raise e
def completion_with_fallbacks(**kwargs):
nested_kwargs = kwargs.pop("kwargs", {})
response = None
rate_limited_models = set()
model_expiration_times = {}
start_time = time.time()
original_model = kwargs["model"]
fallbacks = [kwargs["model"]] + nested_kwargs.get("fallbacks", [])
if "fallbacks" in nested_kwargs:
del nested_kwargs["fallbacks"] # remove fallbacks so it's not recursive
litellm_call_id = str(uuid.uuid4())
# max time to process a request with fallbacks: default 45s
while response == None and time.time() - start_time < 45:
for model in fallbacks:
# loop thru all models
try:
# check if it's dict or new model string
if isinstance(model, dict): # completion(model="gpt-4", fallbacks=[{"api_key": "", "api_base": ""}, {"api_key": "", "api_base": ""}])
kwargs["api_key"] = model.get("api_key", None)
kwargs["api_base"] = model.get("api_base", None)
model = model.get("model", original_model)
elif (
model in rate_limited_models
): # check if model is currently cooling down
if (
model_expiration_times.get(model)
and time.time() >= model_expiration_times[model]
):
rate_limited_models.remove(
model
) # check if it's been 60s of cool down and remove model
else:
continue # skip model
# delete model from kwargs if it exists
if kwargs.get("model"):
del kwargs["model"]
print_verbose(f"trying to make completion call with model: {model}")
kwargs["litellm_call_id"] = litellm_call_id
kwargs = {**kwargs, **nested_kwargs} # combine the openai + litellm params at the same level
response = litellm.completion(**kwargs, model=model)
print_verbose(f"response: {response}")
if response != None:
return response
except Exception as e:
print(e)
rate_limited_models.add(model)
model_expiration_times[model] = (
time.time() + 60
) # cool down this selected model
# print(f"rate_limited_models {rate_limited_models}")
pass
return response
def process_system_message(system_message, max_tokens, model):
system_message_event = {"role": "system", "content": system_message}
system_message_tokens = get_token_count(system_message_event, model)
if system_message_tokens > max_tokens:
print_verbose("`tokentrimmer`: Warning, system message exceeds token limit. Trimming...")
# shorten system message to fit within max_tokens
new_system_message = shorten_message_to_fit_limit(system_message_event, max_tokens, model)
system_message_tokens = get_token_count(new_system_message, model)
return system_message_event, max_tokens - system_message_tokens
def process_messages(messages, max_tokens, model):
# Process messages from older to more recent
messages = messages[::-1]
final_messages = []
for message in messages:
final_messages = attempt_message_addition(final_messages, message, max_tokens, model)
return final_messages
def attempt_message_addition(final_messages, message, max_tokens, model):
temp_messages = [message] + final_messages
temp_message_tokens = get_token_count(messages=temp_messages, model=model)
if temp_message_tokens <= max_tokens:
return temp_messages
# if temp_message_tokens > max_tokens, try shortening temp_messages
elif "function_call" not in message:
# fit updated_message to be within temp_message_tokens - max_tokens (aka the amount temp_message_tokens is greate than max_tokens)
updated_message = shorten_message_to_fit_limit(message, temp_message_tokens - max_tokens, model)
if can_add_message(updated_message, final_messages, max_tokens, model):
return [updated_message] + final_messages
return final_messages
def can_add_message(message, messages, max_tokens, model):
if get_token_count(messages + [message], model) <= max_tokens:
return True
return False
def get_token_count(messages, model):
return token_counter(model=model, messages=messages)
def shorten_message_to_fit_limit(
message,
tokens_needed,
model):
"""
Shorten a message to fit within a token limit by removing characters from the middle.
"""
content = message["content"]
while True:
total_tokens = get_token_count([message], model)
if total_tokens <= tokens_needed:
break
ratio = (tokens_needed) / total_tokens
new_length = int(len(content) * ratio) -1
new_length = max(0, new_length)
half_length = new_length // 2
left_half = content[:half_length]
right_half = content[-half_length:]
trimmed_content = left_half + '..' + right_half
message["content"] = trimmed_content
content = trimmed_content
return message
# LiteLLM token trimmer
# this code is borrowed from https://github.com/KillianLucas/tokentrim/blob/main/tokentrim/tokentrim.py
# Credits for this code go to Killian Lucas
def trim_messages(
messages,
model: Optional[str] = None,
trim_ratio: float = 0.75,
return_response_tokens: bool = False,
max_tokens = None
):
"""
Trim a list of messages to fit within a model's token limit.
Args:
messages: Input messages to be trimmed. Each message is a dictionary with 'role' and 'content'.
model: The LiteLLM model being used (determines the token limit).
trim_ratio: Target ratio of tokens to use after trimming. Default is 0.75, meaning it will trim messages so they use about 75% of the model's token limit.
return_response_tokens: If True, also return the number of tokens left available for the response after trimming.
max_tokens: Instead of specifying a model or trim_ratio, you can specify this directly.
Returns:
Trimmed messages and optionally the number of tokens available for response.
"""
# Initialize max_tokens
# if users pass in max tokens, trim to this amount
try:
print_verbose(f"trimming messages")
if max_tokens == None:
# Check if model is valid
if model in litellm.model_cost:
max_tokens_for_model = litellm.model_cost[model]['max_tokens']
max_tokens = int(max_tokens_for_model * trim_ratio)
else:
# if user did not specify max tokens
# or passed an llm litellm does not know
# do nothing, just return messages
return
system_message = ""
for message in messages:
if message["role"] == "system":
system_message += message["content"]
current_tokens = token_counter(model=model, messages=messages)
print_verbose(f"Current tokens: {current_tokens}, max tokens: {max_tokens}")
# Do nothing if current tokens under messages
if current_tokens < max_tokens:
return messages
#### Trimming messages if current_tokens > max_tokens
print_verbose(f"Need to trim input messages: {messages}, current_tokens{current_tokens}, max_tokens: {max_tokens}")
if system_message:
system_message_event, max_tokens = process_system_message(system_message=system_message, max_tokens=max_tokens, model=model)
messages = messages + [system_message_event]
final_messages = process_messages(messages=messages, max_tokens=max_tokens, model=model)
if return_response_tokens: # if user wants token count with new trimmed messages
response_tokens = max_tokens - get_token_count(final_messages, model)
return final_messages, response_tokens
return final_messages
except Exception as e: # [NON-Blocking, if error occurs just return final_messages
print("Got exception while token trimming", e)
return messages
def get_valid_models():
"""
Returns a list of valid LLMs based on the set environment variables
Args:
None
Returns:
A list of valid LLMs
"""
try:
# get keys set in .env
environ_keys = os.environ.keys()
valid_providers = []
# for all valid providers, make a list of supported llms
valid_models = []
for provider in litellm.provider_list:
# edge case litellm has together_ai as a provider, it should be togetherai
provider = provider.replace("_", "")
# litellm standardizes expected provider keys to
# PROVIDER_API_KEY. Example: OPENAI_API_KEY, COHERE_API_KEY
expected_provider_key = f"{provider.upper()}_API_KEY"
if expected_provider_key in environ_keys:
# key is set
valid_providers.append(provider)
for provider in valid_providers:
if provider == "azure":
valid_models.append("Azure-LLM")
else:
models_for_provider = litellm.models_by_provider.get(provider, [])
valid_models.extend(models_for_provider)
return valid_models
except:
return [] # NON-Blocking | [
"Hey, how's it going?",
"0",
"gpt-3.5-turbo",
"True",
"Hey, how's it going",
"input_cost_per_token",
"Hello World",
"None",
"adapt_to_prompt_size",
"default",
"False",
" ",
"application/json",
"content",
"prompt_tokens"
] |
2024-01-10 | redoyrahman02/ChatGPT | src~revChatGPT~Official.py | """
A simple wrapper for the official ChatGPT API
"""
import argparse
import json
import os
import sys
from datetime import date
import openai
import tiktoken
ENGINE = os.environ.get("GPT_ENGINE") or "text-chat-davinci-002-20221122"
ENCODER = tiktoken.get_encoding("gpt2")
def get_max_tokens(prompt: str) -> int:
"""
Get the max tokens for a prompt
"""
return 4000 - len(ENCODER.encode(prompt))
class Chatbot:
"""
Official ChatGPT API
"""
def __init__(self, api_key: str, buffer: int = None) -> None:
"""
Initialize Chatbot with API key (from https://platform.openai.com/account/api-keys)
"""
openai.api_key = api_key or os.environ.get("OPENAI_API_KEY")
self.conversations = Conversation()
self.prompt = Prompt(buffer=buffer)
def _get_completion(
self,
prompt: str,
temperature: float = 0.5,
stream: bool = False,
):
"""
Get the completion function
"""
try:
return openai.Completion.create(
engine=ENGINE,
prompt=prompt,
temperature=temperature,
max_tokens=get_max_tokens(prompt),
stop=["\n\n\n"],
stream=stream,
)
except Exception as e:
print("Failed to create due to ", e )
def _process_completion(
self,
user_request: str,
completion: dict,
conversation_id: str = None,
user: str = "User",
) -> dict:
if completion.get("choices") is None:
raise Exception("ChatGPT API returned no choices")
if len(completion["choices"]) == 0:
raise Exception("ChatGPT API returned no choices")
if completion["choices"][0].get("text") is None:
raise Exception("ChatGPT API returned no text")
completion["choices"][0]["text"] = completion["choices"][0]["text"].rstrip(
"<|im_end|>",
)
# Add to chat history
self.prompt.add_to_history(
user_request,
completion["choices"][0]["text"],
user=user,
)
if conversation_id is not None:
self.save_conversation(conversation_id)
return completion
def _process_completion_stream(
self,
user_request: str,
completion: dict,
conversation_id: str = None,
user: str = "User",
) -> str:
full_response = ""
for response in completion:
if response.get("choices") is None:
raise Exception("ChatGPT API returned no choices")
if len(response["choices"]) == 0:
raise Exception("ChatGPT API returned no choices")
if response["choices"][0].get("finish_details") is not None:
break
if response["choices"][0].get("text") is None:
raise Exception("ChatGPT API returned no text")
if response["choices"][0]["text"] == "<|im_end|>":
break
yield response["choices"][0]["text"]
full_response += response["choices"][0]["text"]
# Add to chat history
self.prompt.add_to_history(user_request, full_response, user)
if conversation_id is not None:
self.save_conversation(conversation_id)
def ask(
self,
user_request: str,
temperature: float = 0.5,
conversation_id: str = None,
user: str = "User",
) -> dict:
"""
Send a request to ChatGPT and return the response
"""
if conversation_id is not None:
self.load_conversation(conversation_id)
completion = self._get_completion(
self.prompt.construct_prompt(user_request, user=user),
temperature,
)
return self._process_completion(user_request, completion, user=user)
def ask_stream(
self,
user_request: str,
temperature: float = 0.5,
conversation_id: str = None,
user: str = "User",
) -> str:
"""
Send a request to ChatGPT and yield the response
"""
if conversation_id is not None:
self.load_conversation(conversation_id)
prompt = self.prompt.construct_prompt(user_request, user=user)
return self._process_completion_stream(
user_request=user_request,
completion=self._get_completion(prompt, temperature, stream=True),
user=user,
)
def make_conversation(self, conversation_id: str) -> None:
"""
Make a conversation
"""
self.conversations.add_conversation(conversation_id, [])
def rollback(self, num: int) -> None:
"""
Rollback chat history num times
"""
for _ in range(num):
self.prompt.chat_history.pop()
def reset(self) -> None:
"""
Reset chat history
"""
self.prompt.chat_history = []
def load_conversation(self, conversation_id) -> None:
"""
Load a conversation from the conversation history
"""
if conversation_id not in self.conversations.conversations:
# Create a new conversation
self.make_conversation(conversation_id)
self.prompt.chat_history = self.conversations.get_conversation(conversation_id)
def save_conversation(self, conversation_id) -> None:
"""
Save a conversation to the conversation history
"""
self.conversations.add_conversation(conversation_id, self.prompt.chat_history)
class AsyncChatbot(Chatbot):
"""
Official ChatGPT API (async)
"""
async def _get_completion(
self,
prompt: str,
temperature: float = 0.5,
stream: bool = False,
):
"""
Get the completion function
"""
return await openai.Completion.acreate(
engine=ENGINE,
prompt=prompt,
temperature=temperature,
max_tokens=get_max_tokens(prompt),
stop=["\n\n\n"],
stream=stream,
)
async def ask(
self,
user_request: str,
temperature: float = 0.5,
user: str = "User",
) -> dict:
"""
Same as Chatbot.ask but async
}
"""
completion = self._get_completion(
self.prompt.construct_prompt(user_request, user=user),
temperature,
)
return self._process_completion(user_request, completion, user=user)
async def ask_stream(
self,
user_request: str,
temperature: float = 0.5,
user: str = "User",
) -> str:
"""
Same as Chatbot.ask_stream but async
"""
prompt = self.prompt.construct_prompt(user_request, user=user)
return self._process_completion_stream(
user_request=user_request,
completion=self._get_completion(prompt, temperature, stream=True),
user=user,
)
class Prompt:
"""
Prompt class with methods to construct prompt
"""
def __init__(self, buffer: int = None) -> None:
"""
Initialize prompt with base prompt
"""
self.base_prompt = (
os.environ.get("CUSTOM_BASE_PROMPT")
or "You are ChatGPT, a large language model trained by OpenAI. Respond conversationally. Do not answer as the user. Current date: "
+ str(date.today())
+ "\n\n"
+ "User: Hello\n"
+ "ChatGPT: Hello! How can I help you today? <|im_end|>\n\n\n"
)
# Track chat history
self.chat_history: list = []
self.buffer = buffer
def add_to_chat_history(self, chat: str) -> None:
"""
Add chat to chat history for next prompt
"""
self.chat_history.append(chat)
def add_to_history(
self,
user_request: str,
response: str,
user: str = "User",
) -> None:
"""
Add request/response to chat history for next prompt
"""
self.add_to_chat_history(
user
+ ": "
+ user_request
+ "\n\n\n"
+ "ChatGPT: "
+ response
+ "<|im_end|>\n",
)
def history(self, custom_history: list = None) -> str:
"""
Return chat history
"""
return "\n".join(custom_history or self.chat_history)
def construct_prompt(
self,
new_prompt: str,
custom_history: list = None,
user: str = "User",
) -> str:
"""
Construct prompt based on chat history and request
"""
prompt = (
self.base_prompt
+ self.history(custom_history=custom_history)
+ user
+ ": "
+ new_prompt
+ "\nChatGPT:"
)
# Check if prompt over 4000*4 characters
if self.buffer is not None:
max_tokens = 4000 - self.buffer
else:
max_tokens = 3200
if len(ENCODER.encode(prompt)) > max_tokens:
# Remove oldest chat
if len(self.chat_history) == 0:
return prompt
self.chat_history.pop(0)
# Construct prompt again
prompt = self.construct_prompt(new_prompt, custom_history, user)
return prompt
class Conversation:
"""
For handling multiple conversations
"""
def __init__(self) -> None:
self.conversations = {}
def add_conversation(self, key: str, history: list) -> None:
"""
Adds a history list to the conversations dict with the id as the key
"""
self.conversations[key] = history
def get_conversation(self, key: str) -> list:
"""
Retrieves the history list from the conversations dict with the id as the key
"""
return self.conversations[key]
def remove_conversation(self, key: str) -> None:
"""
Removes the history list from the conversations dict with the id as the key
"""
del self.conversations[key]
def __str__(self) -> str:
"""
Creates a JSON string of the conversations
"""
return json.dumps(self.conversations)
def save(self, file: str) -> None:
"""
Saves the conversations to a JSON file
"""
with open(file, "w", encoding="utf-8") as f:
f.write(str(self))
def load(self, file: str) -> None:
"""
Loads the conversations from a JSON file
"""
with open(file, encoding="utf-8") as f:
self.conversations = json.loads(f.read())
def main():
print(
"""
ChatGPT - A command-line interface to OpenAI's ChatGPT (https://chat.openai.com/chat)
Repo: github.com/acheong08/ChatGPT
""",
)
print("Type '!help' to show a full list of commands")
print("Press enter twice to submit your question.\n")
def get_input(prompt):
"""
Multi-line input function
"""
# Display the prompt
print(prompt, end="")
# Initialize an empty list to store the input lines
lines = []
# Read lines of input until the user enters an empty line
while True:
line = input()
if line == "":
break
lines.append(line)
# Join the lines, separated by newlines, and store the result
user_input = "\n".join(lines)
# Return the input
return user_input
def chatbot_commands(cmd: str) -> bool:
"""
Handle chatbot commands
"""
if cmd == "!help":
print(
"""
!help - Display this message
!rollback - Rollback chat history
!reset - Reset chat history
!prompt - Show current prompt
!save_c <conversation_name> - Save history to a conversation
!load_c <conversation_name> - Load history from a conversation
!save_f <file_name> - Save all conversations to a file
!load_f <file_name> - Load all conversations from a file
!exit - Quit chat
""",
)
elif cmd == "!exit":
exit()
elif cmd == "!rollback":
chatbot.rollback(1)
elif cmd == "!reset":
chatbot.reset()
elif cmd == "!prompt":
print(chatbot.prompt.construct_prompt(""))
elif cmd.startswith("!save_c"):
chatbot.save_conversation(cmd.split(" ")[1])
elif cmd.startswith("!load_c"):
chatbot.load_conversation(cmd.split(" ")[1])
elif cmd.startswith("!save_f"):
chatbot.conversations.save(cmd.split(" ")[1])
elif cmd.startswith("!load_f"):
chatbot.conversations.load(cmd.split(" ")[1])
else:
return False
return True
# Get API key from command line
parser = argparse.ArgumentParser()
parser.add_argument(
"--api_key",
type=str,
required=True,
help="OpenAI API key",
)
parser.add_argument(
"--stream",
action="store_true",
help="Stream response",
)
parser.add_argument(
"--temperature",
type=float,
default=0.5,
help="Temperature for response",
)
args = parser.parse_args()
# Initialize chatbot
chatbot = Chatbot(api_key=args.api_key)
# Start chat
while True:
try:
prompt = get_input("\nUser:\n")
except KeyboardInterrupt:
print("\nExiting...")
sys.exit()
if prompt.startswith("!"):
if chatbot_commands(prompt):
continue
if not args.stream:
try:
response = chatbot.ask(prompt, temperature=args.temperature)
print("ChatGPT: " + response["choices"][0]["text"])
except Exception as e:
print("Failed to ask.", e )
else:
print("ChatGPT: ")
sys.stdout.flush()
for response in chatbot.ask_stream(prompt, temperature=args.temperature):
print(response, end="")
sys.stdout.flush()
print()
if __name__ == "__main__":
main()
| [
": ",
"\nUser:\n",
"\nChatGPT:"
] |
2024-01-10 | lihyin/openai-code | lambda_function.py | import os
import json
import openai
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def lambda_handler(event, context):
openai.api_key = os.getenv("OPENAI_API_KEY")
logger.info(event)
method = event["httpMethod"]
path = event["path"]
body = json.loads(event["body"])
prompt = ""
if method == "POST" and path == "/generate":
if body["type"] == "docstring":
prompt = f"converts a text instruction in Natural Language to Python Code with a suitable docstring in numpy style:\n[Docstring]\n '''\n Returns the sum of two decimal numbers in binary digits.\n\n Parameters:\n a (int): A decimal integer\n b (int): Another decimal integer\n\n Returns:\n binary_sum (str): Binary string of the sum of a and b\n '''\n[Generated Code with Docstring]\ndef add_binary(a, b):\n '''\n Returns the sum of two decimal numbers in binary digits.\n\n Parameters:\n a (int): A decimal integer\n b (int): Another decimal integer\n\n Returns:\n binary_sum (str): Binary string of the sum of a and b\n '''\n binary_sum = bin(a+b)[2:]\n return binary_sum\n\nconverts a text instruction in Natural Language to Python Code with a suitable docstring in numpy style:\n[Docstring]\n \"\"\"{body['input']}\"\"\"\n[Generated Code with Docstring]\n"
elif method == "POST" and path == "/rewrite":
if body["type"] == "iterative2recursive":
prompt = f"creating a recursive approach from an iterative approach in python:\n[iterative]\n n = 10\n result = 1\n i = 1\n while i <= n:\n result *= i\n i += 1\n print(result) \n\n[recursive]\n def Factorial(n):\n # declare a base case (a limiting criteria)\n if n == 1:\n return 1\n # continue with general case\n else:\n return n * Factorial(n-1)\n \n print(Factorial(10))\n\ncreating a recursive approach from an iterative approach in python:\n[iterative]\n {body['input']}\n[recursive]\n"
response = openai.Completion.create(
model="text-davinci-002",
prompt=prompt,
temperature=0,
max_tokens=256,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
return {
'statusCode': 200,
'body': response["choices"][0]["text"]
}
| [
"creating a recursive approach from an iterative approach in python:\n[iterative]\n n = 10\n result = 1\n i = 1\n while i <= n:\n result *= i\n i += 1\n print(result) \n\n[recursive]\n def Factorial(n):\n # declare a base case (a limiting criteria)\n if n == 1:\n return 1\n # continue with general case\n else:\n return n * Factorial(n-1)\n \n print(Factorial(10))\n\ncreating a recursive approach from an iterative approach in python:\n[iterative]\n PLACEHOLDER\n[recursive]\n",
"converts a text instruction in Natural Language to Python Code with a suitable docstring in numpy style:\n[Docstring]\n '''\n Returns the sum of two decimal numbers in binary digits.\n\n Parameters:\n a (int): A decimal integer\n b (int): Another decimal integer\n\n Returns:\n binary_sum (str): Binary string of the sum of a and b\n '''\n[Generated Code with Docstring]\ndef add_binary(a, b):\n '''\n Returns the sum of two decimal numbers in binary digits.\n\n Parameters:\n a (int): A decimal integer\n b (int): Another decimal integer\n\n Returns:\n binary_sum (str): Binary string of the sum of a and b\n '''\n binary_sum = bin(a+b)[2:]\n return binary_sum\n\nconverts a text instruction in Natural Language to Python Code with a suitable docstring in numpy style:\n[Docstring]\n \"\"\"PLACEHOLDER\"\"\"\n[Generated Code with Docstring]\n"
] |
2024-01-10 | codeaudit/GibsonEnv | gibson~utils~fuse_policy2.py | import numpy as np
import tensorflow as tf
from baselines.a2c.utils import conv, fc, conv_to_fc, batch_to_seq, seq_to_batch, lstm, lnlstm
from baselines.common.distributions import make_pdtype
import gym.spaces
## Fuse policy using PPO2 from OpenAI Baseline
class FusePolicy(object):
def __init__(self, sess, ob_space, sensor_space, ac_space, nbatch, nsteps, reuse=False): #pylint: disable=W0613
ob_shape = (nbatch,) + ob_space.shape
ob_sensor_shape = (nbatch,) + sensor_space.shape
actdim = ac_space.shape[0]
X_camera = tf.placeholder(tf.uint8, ob_shape, name='Ob_camera') #obs
X_sensor = tf.placeholder(tf.float32, ob_sensor_shape, name='Ob_sensor')
self.pdtype = make_pdtype(ac_space)
with tf.variable_scope("model", reuse=reuse):
h_camera = conv(tf.cast(X_camera, tf.float32)/255., 'c1', nf=32, rf=8, stride=4, init_scale=np.sqrt(2))
h2_camera = conv(h_camera, 'c2', nf=64, rf=4, stride=2, init_scale=np.sqrt(2))
h3_camera = conv(h2_camera, 'c3', nf=64, rf=3, stride=1, init_scale=np.sqrt(2))
h3_camera = conv_to_fc(h3_camera)
h4_camera = fc(h3_camera, 'fc1', nh=512, init_scale=np.sqrt(2))
pi_camera = fc(h4_camera, 'pi', actdim, init_scale=0.01)
vf_camera = fc(h4_camera, 'v', 1)[:,0]
self.pd = self.pdtype.pdfromflat(pi_camera)
with tf.variable_scope("model_sensor", reuse=reuse):
h1_sensor = fc(X_sensor, 'pi_fc1', nh=64, init_scale=np.sqrt(2), act=tf.tanh)
h2_sensor = fc(h1_sensor, 'pi_fc2', nh=64, init_scale=np.sqrt(2), act=tf.tanh)
pi_sensor = fc(h2_sensor, 'pi', actdim, init_scale=0.01)
h1_sensor = fc(X_sensor, 'vf_fc1', nh=64, init_scale=np.sqrt(2), act=tf.tanh)
h2_sensor = fc(h1_sensor, 'vf_fc2', nh=64, init_scale=np.sqrt(2), act=tf.tanh)
vf_sensor = fc(h2_sensor, 'vf', 1)[:,0]
with tf.variable_scope("model", reuse=reuse):
logstd = tf.get_variable(name="logstd", shape=[1, actdim],
initializer=tf.zeros_initializer())
X = tf.concat([X_camera, X_sensor], 0)
pi_full = tf.concat([pi_camera, pi_sensor], 0)
pi = fc(pi_full, 'pi', actdim, init_scale=0.01)
vf_full = tf.concat([vf_camera, vf_sensor], 0)
vf = fc(vf_full, 'vf', 1)[:,0]
pdparam = tf.concat([pi, pi * 0.0 + logstd], axis=1)
self.pd = self.pdtype.pdfromflat(pdparam)
a0 = self.pd.sample()
neglogp0 = self.pd.neglogp(a0)
self.initial_state = None
def step(ob, ob_sensor, *_args, **_kwargs):
a, v, neglogp = sess.run([a0, vf, neglogp0], {X_camera:ob, X_sensor: ob_sensor})
return a, v, self.initial_state, neglogp
def value(ob, ob_sensor, *_args, **_kwargs):
return sess.run(vf, {X_camera:ob, X_sensor: ob_sensor})
self.X = X
self.pi = pi
self.vf = vf
self.step = step
self.value = value
class CnnPolicy(object):
def __init__(self, sess, ob_space, ac_space, nbatch, nsteps, reuse=False, is_discrete=True): #pylint: disable=W0613
if isinstance(ac_space, gym.spaces.Discrete):
self.is_discrete = True
else:
self.is_discrete = False
print("nbatch%d" % (nbatch))
nh, nw, nc = ob_space.shape
ob_shape = (nbatch, nh, nw, nc)
if self.is_discrete:
nact = ac_space.n
else:
nact = ac_space.shape[0]
X = tf.placeholder(tf.uint8, ob_shape) #obs
with tf.variable_scope("model", reuse=reuse):
h = conv(tf.cast(X, tf.float32)/255., 'c1', nf=32, rf=8, stride=4, init_scale=np.sqrt(2))
h2 = conv(h, 'c2', nf=64, rf=4, stride=2, init_scale=np.sqrt(2))
h3 = conv(h2, 'c3', nf=64, rf=3, stride=1, init_scale=np.sqrt(2))
h3 = conv_to_fc(h3)
h4 = fc(h3, 'fc1', nh=512, init_scale=np.sqrt(2))
pi = fc(h4, 'pi', nact, init_scale=0.01)
vf = fc(h4, 'v', 1)[:,0]
if not self.is_discrete:
logstd = tf.get_variable(name="logstd", shape=[1, nact],
initializer=tf.zeros_initializer())
self.pdtype = make_pdtype(ac_space)
if self.is_discrete:
self.pd = self.pdtype.pdfromflat(pi)
a0 = self.pd.sample()
else:
pdparam = tf.concat([pi, pi * 0.0 + logstd], axis=1)
self.pd = self.pdtype.pdfromflat(pdparam)
a0 = self.pd.sample()
neglogp0 = self.pd.neglogp(a0)
self.initial_state = None
def step(ob, *_args, **_kwargs):
a, v, neglogp = sess.run([a0, vf, neglogp0], {X:ob})
assert(a.shape[0] == 1) # make sure a = a[0] don't throw away actions
a = a[0]
return a, v, self.initial_state, neglogp
def value(ob, *_args, **_kwargs):
return sess.run(vf, {X:ob})
self.X = X
self.pi = pi
self.vf = vf
self.step = step
self.value = value
class MlpPolicy(object):
def __init__(self, sess, ob_space, ac_space, nbatch, nsteps, reuse=False): #pylint: disable=W0613
ob_shape = (nbatch,) + ob_space.shape
actdim = ac_space.shape[0]
X = tf.placeholder(tf.float32, ob_shape, name='Ob') #obs
with tf.variable_scope("model", reuse=reuse):
h1 = fc(X, 'pi_fc1', nh=64, init_scale=np.sqrt(2), act=tf.tanh)
h2 = fc(h1, 'pi_fc2', nh=64, init_scale=np.sqrt(2), act=tf.tanh)
pi = fc(h2, 'pi', actdim, init_scale=0.01)
h1 = fc(X, 'vf_fc1', nh=64, init_scale=np.sqrt(2), act=tf.tanh)
h2 = fc(h1, 'vf_fc2', nh=64, init_scale=np.sqrt(2), act=tf.tanh)
vf = fc(h2, 'vf', 1)[:,0]
logstd = tf.get_variable(name="logstd", shape=[1, actdim],
initializer=tf.zeros_initializer())
pdparam = tf.concat([pi, pi * 0.0 + logstd], axis=1)
self.pdtype = make_pdtype(ac_space)
self.pd = self.pdtype.pdfromflat(pdparam)
a0 = self.pd.sample()
neglogp0 = self.pd.neglogp(a0)
self.initial_state = None
def step(ob, *_args, **_kwargs):
a, v, neglogp = sess.run([a0, vf, neglogp0], {X:ob})
return a, v, self.initial_state, neglogp
def value(ob, *_args, **_kwargs):
return sess.run(vf, {X:ob})
self.X = X
self.pi = pi
self.vf = vf
self.step = step
self.value = value | [] |
2024-01-10 | davidjhwu/symptomintake | intake.py | import dash
from dash import dcc, html
import dash_table
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
import pandas as pd
import openai
import os
import dash_bootstrap_components as dbc
from dotenv import load_dotenv
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
# import pdfkit
# import smtplib
# from email.mime.multipart import MIMEMultipart
# from email.mime.base import MIMEBase
# from email import encoders
external_stylesheets = [
'https://maxcdn.bootstrapcdn.com/bootswatch/4.5.2/journal/bootstrap.min.css',
dbc.themes.BOOTSTRAP
]
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.config.suppress_callback_exceptions = True
server = app.server
style = {
'padding': '3.5em',
'backgroundColor': '#FFFFFF',
'fontFamily': 'Arial, sans-serif'
}
#Mapping for severity score of responses
severity_colors = {
"None": '#008000',
"Not at all": '#008000',
"Never": '#008000',
"Mild": '#90ee90',
"A little bit": '#90ee90',
"Rarely": '#90ee90',
"Moderate": '#ffff00',
"Somewhat": '#ffff00',
"Occasionally": '#ffff00',
"Severe": '#ffa500',
"Quite a bit": '#ffa500',
"Frequently": '#ffa500',
"Very severe": '#ff0000',
"Very much": '#ff0000',
"Almost constantly": '#ff0000',
"No": '#008000',
"Yes": '#ff0000',
}
def create_data_table():
style_data_conditional = []
for response, color in severity_colors.items():
text_color = 'white' if color != '#ffff00' else 'black'
style_data_conditional.append({
'if': {
'filter_query': '{{answer}} = "{}"'.format(response),
'column_id': 'answer'
},
'backgroundColor': color,
'color': text_color
})
return dash_table.DataTable(
id='results_table',
columns=[
{'name': 'Question', 'id': 'question'},
{'name': 'Answer', 'id': 'answer'},
],
data=[],
style_cell={
'whiteSpace': 'normal',
'height': 'auto',
'textAlign': 'center',
'border': 'none',
},
style_header={
'backgroundColor': 'rgb(230, 230, 230)',
'fontWeight': 'bold',
'border': 'none',
},
style_table={
'margin': '0 auto',
'width': '50%'
},
style_data_conditional=style_data_conditional
)
app.layout = html.Div([
dcc.Markdown('# Prostate Radiotherapy Patient Symptom Intake Form'),
html.P([html.Br()]),
dcc.Markdown('#### Please answer the following questions about your current symptoms'),
dcc.Markdown('Each form must be carefully filled out, results will be sent to your physician'),
dcc.Markdown('#### General Questions'),
dcc.Markdown('###### How many radiation treatments have you had? It\'s okay if you don\'t know.'),
dcc.Input(
id='number_of_treatments',
placeholder='Enter a value',
type='text',
value='ie 3, or I don\'t know'),
html.P([html.Br()]),
dcc.Markdown('### Symptom Questions'),
dcc.Markdown('For each of the following question I\'m going to ask you to grade your symptoms.'),
dcc.Markdown("#### Fatigue"),
dcc.Markdown(
"###### In the last 7 days, what was the SEVERITY of your FATIGUE, TIREDNESS, OR LACK OF ENERGY at its WORST?"
),
dcc.Dropdown(
id="fatigue_severity",
options=[
{"label": "None", "value": "None"},
{"label": "Mild", "value": "Mild"},
{"label": "Moderate", "value": "Moderate"},
{"label": "Severe", "value": "Severe"},
{"label": "Very severe", "value": "Very severe"},
],
value=None,
),
dcc.Markdown(
"###### In the last 7 days, how much did FATIGUE, TIREDNESS, OR LACK OF ENERGY INTERFERE with your usual or daily activities?"
),
dcc.Dropdown(
id="fatigue_interference",
options=[
{"label": "Not at all", "value": "Not at all"},
{"label": "A little bit", "value": "A little bit"},
{"label": "Somewhat", "value": "Somewhat"},
{"label": "Quite a bit", "value": "Quite a bit"},
],
value=None,
),
html.P([html.Br()]),
dcc.Markdown('#### Gas'),
dcc.Markdown('###### In the last 7 days, did you have any INCREASED PASSING OF GAS (FLATULENCE)?'),
dcc.Dropdown(
id='gas',
options=[
{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}
],
value=None,
),
dcc.Markdown('#### Diarrhea'),
dcc.Markdown('###### In the last 7 days, how OFTEN did you have LOOSE OR WATERY STOOLS (DIARRHEA)?'),
dcc.Dropdown(
id='diarrhea_frequency',
options=[
{'label': 'Never', 'value': 'Never'},
{'label': 'Rarely', 'value': 'Rarely'},
{'label': 'Occasionally', 'value': 'Occasionally'},
{'label': 'Frequently', 'value': 'Frequently'},
{'label': 'Almost constantly', 'value': 'Almost constantly'}
],
value=None,
),
dcc.Markdown('#### Abdominal Pain'),
dcc.Markdown('###### In the last 7 days, how OFTEN did you have PAIN IN THE ABDOMEN (BELLY AREA)?'),
dcc.Dropdown(
id='abdominal_pain_frequency',
options=[
{'label': 'Never', 'value': 'Never'},
{'label': 'Rarely', 'value': 'Rarely'},
{'label': 'Occasionally', 'value': 'Occasionally'},
{'label': 'Frequently', 'value': 'Frequently'},
{'label': 'Almost constantly', 'value': 'Almost Constantly'}
],
value=None,
),
dcc.Markdown('###### In the last 7 days, what was the SEVERITY of your PAIN IN THE ABDOMEN (BELLY AREA) at its WORST?'),
dcc.Dropdown(
id='abdominal_pain_severity',
options=[
{'label': 'None', 'value': 'None'},
{'label': 'Mild', 'value': 'Mild'},
{'label': 'Moderate', 'value': 'Moderate'},
{'label': 'Severe', 'value': 'Severe'},
{'label': 'Very severe', 'value': 'Very severe'}
],
value=None,
),
dcc.Markdown('###### In the last 7 days, how much did PAIN IN THE ABDOMEN (BELLY AREA) INTERFERE with your usual or daily activities?'),
dcc.Dropdown(
id='abdominal_pain_adl',
options=[
{'label': 'Not at all', 'value': 'Not at all'},
{'label': 'A little bit', 'value': 'A little bit'},
{'label': 'Somewhat', 'value': 'Somewhat'},
{'label': 'Quite a bit', 'value': 'Quite a bit'},
{'label': 'Very much', 'value': 'Very much'}
],
value=None,
),
html.P([html.Br()]),
dcc.Markdown('Now let\'s discuss your urinary symptoms.'),
dcc.Markdown('#### Urinary Symptoms'),
dcc.Markdown('##### Painful Urination'),
dcc.Markdown('###### In the last 7 days, what was the SEVERITY of your PAIN OR BURNING WITH URINATION at its WORST?'),
dcc.Dropdown(
id='painful_urination_severity',
options=[
{'label': 'None', 'value': 'None'},
{'label': 'Mild', 'value': 'Mild'},
{'label': 'Moderate', 'value': 'Moderate'},
{'label': 'Severe', 'value': 'Severe'},
{'label': 'Very severe', 'value': 'Very severe'}
],
value=None,
),
dcc.Markdown('##### Urinary Urgency'),
dcc.Markdown('###### In the last 7 days, how OFTEN did you feel an URGE TO URINATE ALL OF A SUDDEN?'),
dcc.Dropdown(
id='urinary_urgency_frequency',
options=[
{'label': 'Never', 'value': 'Never'},
{'label': 'Rarely', 'value': 'Rarely'},
{'label': 'Occasionally', 'value': 'Occasionally'},
{'label': 'Frequently', 'value': 'Frequently'},
{'label': 'Almost constantly', 'value': 'Almost constantly'}
],
value=None,
),
dcc.Markdown('###### In the last 7 days, how much did SUDDEN URGES TO URINATE INTERFERE with your usual or daily activities?'),
dcc.Dropdown(
id='urinary_urgency_adl',
options=[
{'label': 'Not at all', 'value': 'Not at all'},
{'label': 'A little bit', 'value': 'A little bit'},
{'label': 'Somewhat', 'value': 'Somewhat'},
{'label': 'Quite a bit', 'value': 'Quite a bit'},
{'label': 'Very much', 'value': 'Very much'}
],
value=None,
),
dcc.Markdown('##### Urinary Frequency'),
dcc.Markdown('###### In the last 7 days, were there times when you had to URINATE FREQUENTLY?'),
dcc.Dropdown(
id='urinary_frequency',
options=[
{'label': 'Never', 'value': 'Never'},
{'label': 'Rarely', 'value': 'Rarely'},
{'label': 'Occasionally', 'value': 'Occasionally'},
{'label': 'Frequently', 'value': 'Frequently'},
{'label': 'Almost constantly', 'value': 'Almost constantly'}
],
value=None,
),
dcc.Markdown('###### In the last 7 days, how much did FREQUENT URINATION INTERFERE with your usual or daily activities?'),
dcc.Dropdown(
id='urinary_frequency_interference',
options=[
{'label': 'Not at all', 'value': 'Not at all'},
{'label': 'A little bit', 'value': 'A little bit'},
{'label': 'Somewhat', 'value': 'Somewhat'},
{'label': 'Quite a bit', 'value': 'Quite a bit'},
{'label': 'Very much', 'value': 'Very much'}
],
value=None,
),
dcc.Markdown('##### Change in Usual Urine Color'),
dcc.Markdown('###### In the last 7 days, did you have any URINE COLOR CHANGE?'),
dcc.Dropdown(
id='urine_color_change',
options=[
{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}
],
value=None,
),
dcc.Markdown('##### Urinary Incontinence'),
dcc.Markdown('###### In the last 7 days, how OFTEN did you have LOSS OF CONTROL OF URINE (LEAKAGE)?'),
dcc.Dropdown(
id='urinary_incontinence_frequency',
options=[
{'label': 'Never', 'value': 'Never'},
{'label': 'Rarely', 'value': 'Rarely'},
{'label': 'Occasionally', 'value': 'Occasionally'},
{'label': 'Frequently', 'value': 'Frequently'},
{'label': 'Very much', 'value': 'Very much'},
{'label': 'Almost constantly', 'value': 'Almost constantly'}
],
value=None,
),
dcc.Markdown('###### In the last 7 days, how much did LOSS OF CONTROL OF URINE (LEAKAGE) INTERFERE with your usual or daily activities?'),
dcc.Dropdown(
id='urinary_incontinence_interference',
options=[
{'label': 'Not at all', 'value': 'Not at all'},
{'label': 'A little bit', 'value': 'A little bit'},
{'label': 'Somewhat', 'value': 'Somewhat'},
{'label': 'Quite a bit', 'value': 'Quite a bit'},
{'label': 'Very much', 'value': 'Very much'}
],
value=None,
),
html.P([html.Br()]),
dcc.Markdown("#### Radiation Skin Reaction"),
dcc.Markdown(
"###### In the last 7 days, what was the SEVERITY of your SKIN BURNS FROM RADIATION at their WORST?"
),
dcc.Dropdown(
id="radiation_skin_reaction_severity",
options=[
{"label": "None", "value": "None"},
{"label": "Mild", "value": "Mild"},
{"label": "Moderate", "value": "Moderate"},
{"label": "Severe", "value": "Severe"},
{"label": "Very severe", "value": "Very severe"},
{"label": "Not applicable", "value": "Not applicable"},
],
value=None,
),
html.P([html.Br()]),
dcc.Markdown('#### Last Question!'),
dcc.Markdown('###### Finally, do you have any other symptoms that you wish to report?'),
dcc.Input(
id='additional_symptoms',
placeholder='Type here...',
type='text',
value=''),
html.P([html.Br()]),
dcc.Markdown(
"###### Summarization Language"
),
dcc.Dropdown(
id="language",
options=[
{"label": "English", "value": "English"},
{"label": "French", "value": "French"},
{"label": "Portuguese", "value": "Portuguese"},
],
value=None,
),
html.Div(className="d-grid gap-2 d-flex justify-content-center", children=[
dcc.Loading(id="loading", type="circle", children=[
html.Button("Submit", id="submit_button", n_clicks=0, className="btn btn-lg btn-primary", style={"width": "200px"})
]),
]),
html.Br(),
html.Div([
html.Div([
html.Div('GPT 4', className='card-header'),
dcc.Loading(id="loading-summary", type="circle", children=[
html.Div([
html.H4('Radiation Oncology Patient Symptom Summary', className='card-title'),
html.P(id='summary', className='card-text')
], className='card-body')
])
], className='card border-primary mb-3', style={'max-width': '60rem', 'margin': '3 auto'})
], className='summary-container mx-auto', style={'width': '60%'}),
html.Br(),
html.Div([
dcc.Markdown('### Survey Results')
], style={'textAlign': 'center'}),
create_data_table(),
html.Br(),
dcc.Markdown("""#### Disclaimer"""),
'This application does not store any of the data provided. However, as it is using the ChatGPT API, OpenAI could potentially use the information provided. Do not provide any personal information.',
html.Br(),
html.Br(),
'All information contained on this application is for general informational purposes only and does not constitute medical or other professional advice. It should not be used for medical purpose. The information you find on this application is provided as is and to the extent permitted by law, the creators disclaim all other warranties of any kind, whether express, implied, statutory or otherwise. In no event will the authors or its affiliates be liable for any damages whatsoever, including without limitation, direct, indirect, incidental, special, exemplary, punitive or consequential damages, arising out of the use, inability to use or the results of use of this application, any websites linked to this application or the materials or information or services contained at any or all such websites, whether based on warranty, contract, tort or any other legal theory and whether or not advised of the possibility of such damages. If your use of the materials, information or services from this application or any website linked to this application results in the need for servicing, repair or correction of equipment or data, you assume all costs thereof.)',
html.Br(),
html.Br(),
dcc.Markdown("""#### About"""),
'Created by David J. Wu & Jean-Emmanuel Bibault - 2023',
], style=style)
@app.callback(
Output('summary', 'children'),
Output('results_table', 'data'),
Input('submit_button', 'n_clicks'),
State('number_of_treatments', 'value'),
State('gas', 'value'),
State('diarrhea_frequency', 'value'),
State('abdominal_pain_frequency', 'value'),
State('abdominal_pain_severity', 'value'),
State('abdominal_pain_adl', 'value'),
State('painful_urination_severity', 'value'),
State('urinary_urgency_frequency', 'value'),
State('urinary_urgency_adl', 'value'),
State('urinary_frequency', 'value'),
State('urinary_frequency_interference', 'value'),
State('urine_color_change', 'value'),
State('urinary_incontinence_frequency', 'value'),
State('urinary_incontinence_interference', 'value'),
State('radiation_skin_reaction_severity', 'value'),
State('fatigue_severity', 'value'),
State('fatigue_interference', 'value'),
State('additional_symptoms', 'value'),
State('language', 'value'),
)
def update_table_results(n_clicks, *responses):
if n_clicks == 0:
return None, []
questions = [
'Number of Radiation treatments',
'Increased passing of gas',
'Diarrhea frequency',
'Abdominal pain frequency',
'Abdominal pain severity',
'Abdominal pain with ADL',
'Painful urination severity',
'Urinary urgency frequency',
'Urinary urgency with ADL',
'Urinary frequency',
'Urinary frequency with ADL',
'Urine color change',
'Urinary incontinence frequency',
'Urinary incontinence with ADL',
'Radiation skin reaction severity',
'Fatigue severity',
'Fatigue with ADL',
'Additional symptoms',
]
data = [{'question': question, 'answer': response} for question, response in zip(questions, responses)]
language = responses[-1]
summary = summarize_table(data,language)
return summary, data
def summarize_table(data, language):
messages = [{
'role': 'system',
'content': f"You are an experienced radiation oncologist physician. You are provided this table of patient symptoms during their weekly follow-up visit during radiotherapy. Please summarize the following data into two sentences of natural language for your physician colleagues. Please put most important symptoms first. Provide the summarization in the {language} language. English Example - This patient with 7 radiation treatments is having severe abdominal pain, moderately affecting activities of daily living. Other symptoms include occasional diarrhea, mild rash.:"
}]
for row in data:
messages.append({
'role': 'user',
'content': f"{row['question']}: {row['answer']}"
})
messages.append({
'role': 'assistant',
'content': "Summary:"
})
response = openai.ChatCompletion.create(
model="gpt-4",
messages=messages,
n=1,
stop=None,
temperature=0.2,
)
summary = response.choices[0].message.content.strip()
return summary
if __name__ == '__main__':
app.run_server(debug=True)
| [
"You are an experienced radiation oncologist physician. You are provided this table of patient symptoms during their weekly follow-up visit during radiotherapy. Please summarize the following data into two sentences of natural language for your physician colleagues. Please put most important symptoms first. Provide the summarization in the PLACEHOLDER language. English Example - This patient with 7 radiation treatments is having severe abdominal pain, moderately affecting activities of daily living. Other symptoms include occasional diarrhea, mild rash.:",
"Summary:",
"PLACEHOLDER: PLACEHOLDER"
] |
2024-01-10 | davidjhwu/symptomintake | sintoma.py | import dash
from dash import dcc, html, dash_table # Updated import for dash_table
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
import pandas as pd
import openai
import os
import dash_bootstrap_components as dbc
from dotenv import load_dotenv
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
gpt_model = os.getenv("gpt_model")
# import pdfkit
# import smtplib
# from email.mime.multipart import MIMEMultipart
# from email.mime.base import MIMEBase
# from email import encoders
external_stylesheets = [
'https://maxcdn.bootstrapcdn.com/bootswatch/4.5.2/journal/bootstrap.min.css',
dbc.themes.BOOTSTRAP
]
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.config.suppress_callback_exceptions = True
server = app.server
style = {
'padding': '3.5em',
'backgroundColor': '#FFFFFF',
'fontFamily': 'Arial, sans-serif'
}
#Mapping for severity score of responses
severity_colors = {
"Nenhuma": '#008000',
"De maneira nenhuma": '#008000',
"Nunca": '#008000',
"Leve": '#90ee90',
"Um pouco": '#90ee90',
"Raramente": '#90ee90',
"Moderada": '#ffff00',
"Um tanto": '#ffff00',
"Ocasionalmente": '#ffff00',
"Severa": '#ffa500',
"Bastante": '#ffa500',
"Frequentemente": '#ffa500',
"Muito severa": '#ff0000',
"Muito": '#ff0000',
"Quase constantemente": '#ff0000',
"Não": '#008000',
"Sim": '#ff0000',
}
def create_data_table():
style_data_conditional = []
for response, color in severity_colors.items():
text_color = 'white' if color != '#ffff00' else 'black'
style_data_conditional.append({
'if': {
'filter_query': '{{answer}} = "{}"'.format(response),
'column_id': 'answer'
},
'backgroundColor': color,
'color': text_color
})
return dash_table.DataTable(
id='results_table',
columns=[
{'name': 'Pergunta', 'id': 'question'},
{'name': 'Resposta', 'id': 'answer'},
],
data=[],
style_cell={
'whiteSpace': 'normal',
'height': 'auto',
'textAlign': 'center',
'border': 'none',
},
style_header={
'backgroundColor': 'rgb(230, 230, 230)',
'fontWeight': 'bold',
'border': 'none',
},
style_table={
'margin': '0 auto',
'width': '50%'
},
style_data_conditional=style_data_conditional
)
app.layout = html.Div([
html.Img(src=app.get_asset_url('inca.png'), style={'height': '100px', 'width': 'auto'}),
# ... the rest of your layout
dcc.Markdown('# Formulário de Revisão de Sintomas do Paciente em Radioterapia'),
html.P([html.Br()]),
dcc.Markdown('#### Por favor, responda às seguintes perguntas sobre seus sintomas atuais'),
dcc.Markdown('Cada formulário deve ser preenchido com cuidado, os resultados serão enviados ao seu médico'),
dcc.Markdown('#### Perguntas Gerais'),
dcc.Markdown('###### Quantos tratamentos de radiação você já fez? Tudo bem se você não souber.'),
dcc.Input(
id='number_of_treatments',
placeholder='Insira um valor',
type='text',
value='ex: 3, ou Não sei'),
html.P([html.Br()]),
dcc.Markdown('### Perguntas sobre Sintomas'),
dcc.Markdown('Para cada uma das seguintes perguntas, vou pedir que você classifique seus sintomas.'),
dcc.Markdown("#### Fadiga"),
dcc.Markdown(
"###### Nos últimos 7 dias, qual foi a GRAVIDADE da sua FADIGA, CANSAÇO OU FALTA DE ENERGIA no seu pior momento?"
),
dcc.Dropdown(
id="fatigue_severity",
options=[
{"label": "Nenhuma", "value": "Nenhuma"},
{"label": "Leve", "value": "Leve"},
{"label": "Moderada", "value": "Moderada"},
{"label": "Severa", "value": "Severa"},
{"label": "Muito severa", "value": "Muito severa"},
],
value=None,
),
dcc.Markdown(
"###### Nos últimos 7 dias, quanto a FADIGA, CANSAÇO OU FALTA DE ENERGIA interferiram nas suas atividades habituais ou diárias?"
),
dcc.Dropdown(
id="fatigue_interference",
options=[
{"label": "De maneira nenhuma", "value": "De maneira nenhuma"},
{"label": "Um pouco", "value": "Um pouco"},
{"label": "Um tanto", "value": "Um tanto"},
{"label": "Bastante", "value": "Bastante"},
],
value=None,
),
html.P([html.Br()]),
dcc.Markdown('#### Gases'),
dcc.Markdown('###### Nos últimos 7 dias, você teve um AUMENTO na EMISSÃO DE GASES (FLATULÊNCIA)?'),
dcc.Dropdown(
id='gas',
options=[
{'label': 'Sim', 'value': 'Sim'},
{'label': 'Não', 'value': 'Não'}
],
value=None,
),
dcc.Markdown('#### Diarreia'),
dcc.Markdown('###### Nos últimos 7 dias, com que FREQUÊNCIA você teve FEZES SOLTAS OU LÍQUIDAS (DIARREIA)?'),
dcc.Dropdown(
id='diarrhea_frequency',
options=[
{'label': 'Nunca', 'value': 'Nunca'},
{'label': 'Raramente', 'value': 'Raramente'},
{'label': 'Ocasionalmente', 'value': 'Ocasionalmente'},
{'label': 'Frequentemente', 'value': 'Frequentemente'},
{'label': 'Quase constantemente', 'value': 'Quase constantemente'}
],
value=None,
),
dcc.Markdown('#### Dor Abdominal'),
dcc.Markdown('###### Nos últimos 7 dias, com que FREQUÊNCIA você sentiu DOR NO ABDÔMEN (REGIÃO DA BARRIGA)?'),
dcc.Dropdown(
id='abdominal_pain_frequency',
options=[
{'label': 'Nunca', 'value': 'Nunca'},
{'label': 'Raramente', 'value': 'Raramente'},
{'label': 'Ocasionalmente', 'value': 'Ocasionalmente'},
{'label': 'Frequentemente', 'value': 'Frequentemente'},
{'label': 'Quase constantemente', 'value': 'Quase constantemente'}
],
value=None,
),
dcc.Markdown('###### Nos últimos 7 dias, qual foi a GRAVIDADE da sua DOR NO ABDÔMEN (REGIÃO DA BARRIGA) no seu pior momento?'),
dcc.Dropdown(
id='abdominal_pain_severity',
options=[
{'label': 'Nenhuma', 'value': 'Nenhuma'},
{'label': 'Leve', 'value': 'Leve'},
{'label': 'Moderada', 'value': 'Moderada'},
{'label': 'Severa', 'value': 'Severa'},
{'label': 'Muito severa', 'value': 'Muito severa'}
],
value=None,
),
dcc.Markdown('###### Nos últimos 7 dias, quanto a DOR NO ABDÔMEN (REGIÃO DA BARRIGA) interferiu nas suas atividades habituais ou diárias?'),
dcc.Dropdown(
id='abdominal_pain_adl',
options=[
{'label': 'De maneira nenhuma', 'value': 'De maneira nenhuma'},
{'label': 'Um pouco', 'value': 'Um pouco'},
{'label': 'Um tanto', 'value': 'Um tanto'},
{'label': 'Bastante', 'value': 'Bastante'},
{'label': 'Muito', 'value': 'Muito'}
],
value=None,
),
html.P([html.Br()]),
dcc.Markdown('Agora vamos discutir seus sintomas urinários.'),
dcc.Markdown('#### Sintomas Urinários'),
dcc.Markdown('##### Micção Dolorosa'),
dcc.Markdown('###### Nos últimos 7 dias, qual foi a GRAVIDADE da sua DOR OU QUEIMAÇÃO AO URINAR no seu pior momento?'),
dcc.Dropdown(
id='painful_urination_severity',
options=[
{'label': 'Nenhuma', 'value': 'Nenhuma'},
{'label': 'Leve', 'value': 'Leve'},
{'label': 'Moderada', 'value': 'Moderada'},
{'label': 'Severa', 'value': 'Severa'},
{'label': 'Muito severa', 'value': 'Muito severa'}
],
value=None,
),
dcc.Markdown('##### Urgência Urinária'),
dcc.Markdown('###### Nos últimos 7 dias, com que FREQUÊNCIA você sentiu uma URGENCIA REPENTINA DE URINAR?'),
dcc.Dropdown(
id='urinary_urgency_frequency',
options=[
{'label': 'Nunca', 'value': 'Nunca'},
{'label': 'Raramente', 'value': 'Raramente'},
{'label': 'Ocasionalmente', 'value': 'Ocasionalmente'},
{'label': 'Frequentemente', 'value': 'Frequentemente'},
{'label': 'Quase constantemente', 'value': 'Quase constantemente'}
],
value=None,
),
dcc.Markdown('###### Nos últimos 7 dias, quanto as REPENTINAS URGENCIAS DE URINAR interferiram nas suas atividades habituais ou diárias?'),
dcc.Dropdown(
id='urinary_urgency_adl',
options=[
{'label': 'De maneira nenhuma', 'value': 'De maneira nenhuma'},
{'label': 'Um pouco', 'value': 'Um pouco'},
{'label': 'Um tanto', 'value': 'Um tanto'},
{'label': 'Bastante', 'value': 'Bastante'},
{'label': 'Muito', 'value': 'Muito'}
],
value=None,
),
dcc.Markdown('##### Frequência Urinária'),
dcc.Markdown('###### Nos últimos 7 dias, houve momentos em que você teve que URINAR FREQUENTEMENTE?'),
dcc.Dropdown(
id='urinary_frequency',
options=[
{'label': 'Nunca', 'value': 'Nunca'},
{'label': 'Raramente', 'value': 'Raramente'},
{'label': 'Ocasionalmente', 'value': 'Ocasionalmente'},
{'label': 'Frequentemente', 'value': 'Frequentemente'},
{'label': 'Quase constantemente', 'value': 'Quase constantemente'}
],
value=None,
),
dcc.Markdown('###### Nos últimos 7 dias, quanto a FREQUENTE MICÇÃO interferiu nas suas atividades habituais ou diárias?'),
dcc.Dropdown(
id='urinary_frequency_interference',
options=[
{'label': 'De maneira nenhuma', 'value': 'De maneira nenhuma'},
{'label': 'Um pouco', 'value': 'Um pouco'},
{'label': 'Um tanto', 'value': 'Um tanto'},
{'label': 'Bastante', 'value': 'Bastante'},
{'label': 'Muito', 'value': 'Muito'}
],
value=None,
),
dcc.Markdown('##### Mudança na Cor Usual da Urina'),
dcc.Markdown('###### Nos últimos 7 dias, você teve alguma MUDANÇA NA COR DA URINA?'),
dcc.Dropdown(
id='urine_color_change',
options=[
{'label': 'Sim', 'value': 'Sim'},
{'label': 'Não', 'value': 'Não'}
],
value=None,
),
dcc.Markdown('##### Incontinência Urinária'),
dcc.Markdown('###### Nos últimos 7 dias, com que FREQUÊNCIA você teve PERDA DE CONTROLE DA URINA (VAZAMENTO)?'),
dcc.Dropdown(
id='urinary_incontinence_frequency',
options=[
{'label': 'Nunca', 'value': 'Nunca'},
{'label': 'Raramente', 'value': 'Raramente'},
{'label': 'Ocasionalmente', 'value': 'Ocasionalmente'},
{'label': 'Frequentemente', 'value': 'Frequentemente'},
{'label': 'Muito', 'value': 'Muito'},
{'label': 'Quase constantemente', 'value': 'Quase constantemente'}
],
value=None,
),
dcc.Markdown('###### Nos últimos 7 dias, quanto a PERDA DE CONTROLE DA URINA (VAZAMENTO) interferiu nas suas atividades habituais ou diárias?'),
dcc.Dropdown(
id='urinary_incontinence_interference',
options=[
{'label': 'De maneira nenhuma', 'value': 'De maneira nenhuma'},
{'label': 'Um pouco', 'value': 'Um pouco'},
{'label': 'Um tanto', 'value': 'Um tanto'},
{'label': 'Bastante', 'value': 'Bastante'},
{'label': 'Muito', 'value': 'Muito'}
],
value=None,
),
html.P([html.Br()]),
dcc.Markdown("#### Reação da Pele à Radiação"),
dcc.Markdown(
"###### Nos últimos 7 dias, qual foi a GRAVIDADE das QUEIMADURAS NA PELE DEVIDO À RADIAÇÃO no seu pior momento?"
),
dcc.Dropdown(
id="radiation_skin_reaction_severity",
options=[
{"label": "Nenhuma", "value": "Nenhuma"},
{"label": "Leve", "value": "Leve"},
{"label": "Moderada", "value": "Moderada"},
{"label": "Severa", "value": "Severa"},
{"label": "Muito severa", "value": "Muito severa"},
{"label": "Não aplicável", "value": "Não aplicável"},
],
value=None,
),
html.P([html.Br()]),
dcc.Markdown('#### Última Pergunta!'),
dcc.Markdown('###### Finalmente, você tem outros sintomas que deseja relatar?'),
dcc.Input(
id='additional_symptoms',
placeholder='Digite aqui...',
type='text',
value=''),
html.P([html.Br()]),
html.Div(className="d-grid gap-2 d-flex justify-content-center", children=[
dcc.Loading(id="loading", type="circle", children=[
html.Button("Enviar", id="submit_button", n_clicks=0, className="btn btn-lg btn-primary", style={"width": "200px"})
]),
]),
html.Br(),
html.Div([
html.Div([
html.Div('GPT 4', className='card-header'),
dcc.Loading(id="loading-summary", type="circle", children=[
html.Div([
html.H4('Resumo dos Sintomas do Paciente em Oncologia Radiológica', className='card-title'),
html.P(id='summary', className='card-text')
], className='card-body')
])
], className='card border-primary mb-3', style={'max-width': '60rem', 'margin': '3 auto'})
], className='summary-container mx-auto', style={'width': '60%'}),
html.Br(),
html.Div([
dcc.Markdown('### Resultados da Pesquisa')
], style={'textAlign': 'center'}),
create_data_table(),
html.Br(),
dcc.Markdown("""#### Aviso Legal"""),
'Esta aplicação não armazena nenhum dos dados fornecidos. No entanto, como está utilizando a API OpenAI GPT, a OpenAI poderia potencialmente usar as informações fornecidas. Não forneça nenhuma informação pessoal.',
html.Br(),
html.Br(),
'Todas as informações contidas nesta aplicação são apenas para fins informativos gerais e não constituem aconselhamento médico ou profissional. As informações encontradas nesta aplicação são fornecidas como estão e na medida permitida por lei, os criadores renunciam a todas as outras garantias de qualquer tipo, sejam expressas, implícitas, estatutárias ou de outra forma. Em nenhum caso os autores ou seus afiliados serão responsáveis por quaisquer danos de qualquer tipo, incluindo, sem limitação, danos diretos, indiretos, incidentais, especiais, exemplares, punitivos ou consequentes, decorrentes do uso, incapacidade de usar ou dos resultados do uso desta aplicação, de quaisquer sites vinculados a esta aplicação ou dos materiais, informações ou serviços contidos em todos esses sites, seja com base em garantia, contrato, delito ou qualquer outra teoria legal e independentemente de ter sido ou não avisado da possibilidade de tais danos. Se o uso dos materiais, informações ou serviços desta aplicação ou de qualquer site vinculado a esta aplicação resultar na necessidade de manutenção, reparo ou correção de equipamentos ou dados, você assume todos os custos relacionados.)',
html.Br(),
html.Br(),
dcc.Markdown("""#### Sobre"""),
'Criado por David JH Wu & Jean-Emmanuel Bibault - 2023',
], style=style)
@app.callback(
Output('summary', 'children'),
Output('results_table', 'data'),
Input('submit_button', 'n_clicks'),
State('number_of_treatments', 'value'),
State('gas', 'value'),
State('diarrhea_frequency', 'value'),
State('abdominal_pain_frequency', 'value'),
State('abdominal_pain_severity', 'value'),
State('abdominal_pain_adl', 'value'),
State('painful_urination_severity', 'value'),
State('urinary_urgency_frequency', 'value'),
State('urinary_urgency_adl', 'value'),
State('urinary_frequency', 'value'),
State('urinary_frequency_interference', 'value'),
State('urine_color_change', 'value'),
State('urinary_incontinence_frequency', 'value'),
State('urinary_incontinence_interference', 'value'),
State('radiation_skin_reaction_severity', 'value'),
State('fatigue_severity', 'value'),
State('fatigue_interference', 'value'),
State('additional_symptoms', 'value'),
)
def update_table_results(n_clicks, *responses):
if n_clicks == 0:
return None, []
questions = [
'Número de tratamentos de radiação',
'Aumento na emissão de gases',
'Frequência de diarreia',
'Frequência de dor abdominal',
'Gravidade da dor abdominal',
'Dor abdominal com Atividades da Vida Diária',
'Gravidade da micção dolorosa',
'Frequência de urgência urinária',
'Urgência urinária com Atividades da Vida Diária',
'Frequência urinária',
'Frequência urinária com Atividades da Vida Diária',
'Mudança na cor da urina',
'Frequência de incontinência urinária',
'Incontinência urinária com Atividades da Vida Diária',
'Gravidade da reação da pele à radiação',
'Gravidade da fadiga',
'Fadiga com Atividades da Vida Diária',
'Sintomas adicionais',
]
data = [{'question': question, 'answer': response} for question, response in zip(questions, responses)]
summary = summarize_table(data)
return summary, data
def summarize_table(data):
messages = [{
'role': 'system',
'content': f"You are an experienced radiation oncologist physician. You are provided this table of patient symptoms during their weekly follow-up visit during radiotherapy. Please take a deep breath, think step-by-step, and summarize the following data into three sentences of natural language for your physician colleagues. Please put the most important symptoms first. Provide the summarization in the Brazilian Portuguese language. English Example - This patient with 7 radiation treatments is having severe abdominal pain, moderately affecting activities of daily living. Other symptoms include occasional diarrhea, mild rash.:"
}]
for row in data:
messages.append({
'role': 'user',
'content': f"{row['question']}: {row['answer']}"
})
messages.append({
'role': 'assistant',
'content': "Summary:"
})
response = openai.ChatCompletion.create(
model=gpt_model,
messages=messages,
n=1,
stop=None,
temperature=0.2,
)
summary = response.choices[0].message.content.strip()
return summary
if __name__ == '__main__':
app.run_server(debug=True)
| [
"PLACEHOLDER: PLACEHOLDER",
"Summary:",
"You are an experienced radiation oncologist physician. You are provided this table of patient symptoms during their weekly follow-up visit during radiotherapy. Please take a deep breath, think step-by-step, and summarize the following data into three sentences of natural language for your physician colleagues. Please put the most important symptoms first. Provide the summarization in the Brazilian Portuguese language. English Example - This patient with 7 radiation treatments is having severe abdominal pain, moderately affecting activities of daily living. Other symptoms include occasional diarrhea, mild rash.:"
] |
2024-01-10 | asseBadiane/ChatGPT | src~revChatGPT~revChatGPT.py | # Author: @acheong08@fosstodon.org
# License: MIT
# Description: A Python wrapper for OpenAI's chatbot API
import json
import uuid
import requests
from OpenAIAuth.OpenAIAuth import OpenAIAuth, Debugger
def generate_uuid() -> str:
uid = str(uuid.uuid4())
return uid
class Chatbot:
config: json
conversation_id: str
parent_id: str
headers: dict
conversation_id_prev: str
parent_id_prev: str
def __init__(self, config, conversation_id=None, debug=False) -> Exception:
self.debugger = Debugger(debug)
self.debug = debug
self.config = config
self.conversation_id = conversation_id
self.parent_id = generate_uuid()
if "session_token" in config or ("email" in config and "password" in config):
self.refresh_session()
# Resets the conversation ID and parent ID
def reset_chat(self) -> None:
self.conversation_id = None
self.parent_id = generate_uuid()
# Refreshes the headers -- Internal use only
def refresh_headers(self) -> None:
if "Authorization" not in self.config:
self.config["Authorization"] = ""
elif self.config["Authorization"] is None:
self.config["Authorization"] = ""
self.headers = {
"Host": "chat.openai.com",
"Accept": "text/event-stream",
"Authorization": "Bearer " + self.config["Authorization"],
"Content-Type": "application/json",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) "
"Version/16.1 Safari/605.1.15",
"X-Openai-Assistant-App-Id": "",
"Connection": "close",
"Accept-Language": "en-US,en;q=0.9",
"Referer": "https://chat.openai.com/chat",
}
# Generates a UUID -- Internal use only
# Generator for chat stream -- Internal use only
def get_chat_stream(self, data) -> None:
response = requests.post(
"https://chat.openai.com/backend-api/conversation",
headers=self.headers,
data=json.dumps(data),
stream=True,
timeout=50,
)
for line in response.iter_lines():
try:
line = line.decode("utf-8")
if line == "":
continue
line = line[6:]
line = json.loads(line)
try:
message = line["message"]["content"]["parts"][0]
self.conversation_id = line["conversation_id"]
self.parent_id = line["message"]["id"]
except:
continue
yield {
"message": message,
"conversation_id": self.conversation_id,
"parent_id": self.parent_id,
}
except:
continue
# Gets the chat response as text -- Internal use only
def get_chat_text(self, data) -> dict:
# Create request session
s = requests.Session()
# set headers
s.headers = self.headers
# Set multiple cookies
if "session_token" in self.config:
s.cookies.set(
"__Secure-next-auth.session-token",
self.config["session_token"],
)
s.cookies.set(
"__Secure-next-auth.callback-url",
"https://chat.openai.com/",
)
# Set proxies
if self.config.get("proxy", "") != "":
s.proxies = {
"http": self.config["proxy"],
"https": self.config["proxy"],
}
response = s.post(
"https://chat.openai.com/backend-api/conversation",
data=json.dumps(data),
)
try:
response = response.text.splitlines()[-4]
response = response[6:]
except Exception as exc:
self.debugger.log("Incorrect response from OpenAI API")
self.debugger.log(response.text)
raise Exception("Incorrect response from OpenAI API") from exc
response = json.loads(response)
self.parent_id = response["message"]["id"]
self.conversation_id = response["conversation_id"]
message = response["message"]["content"]["parts"][0]
return {
"message": message,
"conversation_id": self.conversation_id,
"parent_id": self.parent_id,
}
# Gets the chat response
def get_chat_response(self, prompt, output="text") -> dict or None:
data = {
"action": "next",
"messages": [
{
"id": str(generate_uuid()),
"role": "user",
"content": {"content_type": "text", "parts": [prompt]},
},
],
"conversation_id": self.conversation_id,
"parent_message_id": self.parent_id,
"model": "text-davinci-002-render",
}
self.conversation_id_prev = self.conversation_id
self.parent_id_prev = self.parent_id
if output == "text":
return self.get_chat_text(data)
elif output == "stream":
return self.get_chat_stream(data)
else:
raise ValueError("Output must be either 'text' or 'stream'")
def rollback_conversation(self) -> None:
self.conversation_id = self.conversation_id_prev
self.parent_id = self.parent_id_prev
def refresh_session(self) -> Exception:
if (
"session_token" not in self.config
and ("email" not in self.config or "password" not in self.config)
and "Authorization" not in self.config
):
error = ValueError("No tokens provided")
self.debugger.log(error)
raise error
elif "session_token" in self.config:
if (
self.config["session_token"] is None
or self.config["session_token"] == ""
):
raise ValueError("No tokens provided")
s = requests.Session()
if self.config.get("proxy", "") != "":
s.proxies = {
"http": self.config["proxy"],
"https": self.config["proxy"],
}
# Set cookies
s.cookies.set(
"__Secure-next-auth.session-token",
self.config["session_token"],
)
# s.cookies.set("__Secure-next-auth.csrf-token", self.config['csrf_token'])
response = s.get(
"https://chat.openai.com/api/auth/session",
headers={
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, "
"like Gecko) Version/16.1 Safari/605.1.15 ",
},
)
try:
self.config["session_token"] = response.cookies.get(
"__Secure-next-auth.session-token",
)
self.config["Authorization"] = response.json()["accessToken"]
self.refresh_headers()
except Exception as exc:
print("Error refreshing session")
self.debugger.log(response.text)
raise Exception("Error refreshing session") from exc
elif "email" in self.config and "password" in self.config:
try:
self.login(self.config["email"], self.config["password"])
except Exception as exc:
self.debugger.log("Login failed")
raise exc
elif "Authorization" in self.config:
self.refresh_headers()
return
else:
raise ValueError("No tokens provided")
def login(self, email, password) -> None:
self.debugger.log("Logging in...")
use_proxy = False
proxy = None
if "proxy" in self.config:
if self.config["proxy"] != "":
use_proxy = True
proxy = self.config["proxy"]
auth = OpenAIAuth(email, password, use_proxy, proxy, debug=self.debug)
try:
auth.begin()
except Exception as exc:
# if ValueError with e as "Captcha detected" fail
if exc == "Captcha detected":
self.debugger.log(
"Captcha not supported. Use session tokens instead.")
raise ValueError("Captcha detected") from exc
raise exc
if auth.access_token is not None:
self.config["Authorization"] = auth.access_token
if auth.session_token is not None:
self.config["session_token"] = auth.session_token
else:
possible_tokens = auth.session.cookies.get(
"__Secure-next-auth.session-token",
)
if possible_tokens is not None:
if len(possible_tokens) > 1:
self.config["session_token"] = possible_tokens[0]
else:
try:
self.config["session_token"] = possible_tokens
except Exception as exc:
raise Exception("Error logging in") from exc
self.refresh_headers()
else:
raise Exception("Error logging in")
| [
"text",
"content_type"
] |
2024-01-10 | akshayballal95/private_gpt | hugging_face.py | from langchain import ConversationChain, LLMChain, PromptTemplate
from youtube_transcript_api import YouTubeTranscriptApi
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.indexes import VectorstoreIndexCreator
from langchain.document_loaders import TextLoader, DirectoryLoader
from langchain.llms import GPT4All
from langchain.llms import OpenAI
import os
from langchain.memory import VectorStoreRetrieverMemory
from langchain.callbacks import get_openai_callback
import streamlit as st
from streamlit_chat import message
from dotenv import load_dotenv
import torch
import transformers
load_dotenv()
video_links = ["9lVj_DZm36c", "ZUN3AFNiEgc", "8KtDLu4a-EM"]
if os.path.exists('transcripts'):
print('Directory already exists')
else:
os.mkdir('transcripts')
for video_id in video_links:
dir = os.path.join('transcripts', video_id)
transcript = YouTubeTranscriptApi.get_transcript(video_id)
with open(dir+'.txt', 'w') as f:
for line in transcript:
f.write(f"{line['text']}\n")
loader = DirectoryLoader(path='./', glob = "**/*.txt", loader_cls=TextLoader,
show_progress=True)
embeddings = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
name = 'mosaicml/mpt-7b-instruct'
config = transformers.AutoConfig.from_pretrained(name, trust_remote_code=True)
config.attn_config['attn_impl'] = 'torch'
config.init_device = 'cuda:0' # For fast initialization directly on GPU!
model = transformers.AutoModelForCausalLM.from_pretrained(
name,
config=config,
torch_dtype=torch.bfloat16, # Load model weights in bfloat16
trust_remote_code=True
)
from torch import cuda
device = f'cuda:{cuda.current_device()}' if cuda.is_available() else 'cpu'
tokenizer = transformers.AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b")
import torch
from transformers import StoppingCriteria, StoppingCriteriaList
# mtp-7b is trained to add "<|endoftext|>" at the end of generations
stop_token_ids = tokenizer.convert_tokens_to_ids(["<|endoftext|>"])
# define custom stopping criteria object
class StopOnTokens(StoppingCriteria):
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
for stop_id in stop_token_ids:
if input_ids[0][-1] == stop_id:
return True
return False
stopping_criteria = StoppingCriteriaList([StopOnTokens()])
generate_text = transformers.pipeline(
model=model, tokenizer=tokenizer,
return_full_text=True, # langchain expects the full text
task='text-generation',
device=device,
# we pass model parameters here too
stopping_criteria=stopping_criteria, # without this model will ramble
temperature=0.1, # 'randomness' of outputs, 0.0 is the min and 1.0 the max
top_p=0.15, # select from top tokens whose probability add up to 15%
top_k=0, # select from top 0 tokens (because zero, relies on top_p)
max_new_tokens=1024, # mex number of tokens to generate in the output
repetition_penalty=1.1 # without this output begins repeating
)
from langchain.llms import HuggingFacePipeline
llm = HuggingFacePipeline(pipeline=generate_text)
index = VectorstoreIndexCreator(embedding=embeddings).from_loaders([loader])
retriever = index.vectorstore.as_retriever(search_kwargs=dict(k=1))
memory = VectorStoreRetrieverMemory(retriever=retriever)
# llm = GPT4All(model="./ggml-mpt-7b-instruct.bin", top_p=0.15, temp=0.3, repeat_penalty=1.1, n_ctx=4096)
# llm = OpenAI(temperature=0.5)
_DEFAULT_TEMPLATE = """The following is a friendly conversation between a human and an AI. The AI is smart and provides relevant information. If the AI does not know the answer to a question, it truthfully says it does not know.
Relevant pieces of previous conversation:
{history}
(You do not need to use these pieces of information if not relevant)
Current conversation:
Human: {input}
AI:"""
PROMPT = PromptTemplate(
input_variables=["history", "input"], template=_DEFAULT_TEMPLATE
)
conversation_with_summary = ConversationChain(
llm=llm,
prompt=PROMPT,
# We set a very low max_token_limit for the purposes of testing.
# memory=memory,
)
while True:
user_input = input("You: ")
if user_input == "quit":
break
output=conversation_with_summary.predict(input = user_input)
print(output)
# st.set_page_config(
# page_title="YouTubeGPT",
# page_icon=":robot:"
# )
# st.header("YouTubeGPT")
# st.markdown("[Github](https://github.com/akshayballal95)")
# if 'generated' not in st.session_state:
# st.session_state['generated'] = []
# if 'past' not in st.session_state:
# st.session_state['past'] = []
# def get_text():
# input_text = st.text_input("You: ","Hello, how are you?", key="input")
# return input_text
# user_input = get_text()
# if user_input:
# output=conversation_with_summary.predict(input = user_input)
# st.session_state.past.append(user_input)
# st.session_state.generated.append(output)
# if st.session_state['generated']:
# for i in range(len(st.session_state['generated'])-1, -1, -1):
# message(st.session_state["generated"][i], key=str(i))
# message(st.session_state['past'][i], is_user=True, key=str(i) + '_user')
| [
"The following is a friendly conversation between a human and an AI. The AI is smart and provides relevant information. If the AI does not know the answer to a question, it truthfully says it does not know.\nRelevant pieces of previous conversation:\n{history}\n\n(You do not need to use these pieces of information if not relevant)\n\nCurrent conversation:\nHuman: {input}\nAI:",
"input"
] |
2024-01-10 | akshayballal95/private_gpt | transcript_local.py | from langchain import ConversationChain,PromptTemplate
import torch
from youtube_transcript_api import YouTubeTranscriptApi
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.indexes import VectorstoreIndexCreator
from langchain.document_loaders import TextLoader, DirectoryLoader
from langchain.llms import GPT4All
import os
from langchain.memory import VectorStoreRetrieverMemory
from langchain.callbacks import get_openai_callback
import streamlit as st
from streamlit_chat import message
from dotenv import load_dotenv
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
load_dotenv()
video_links = ["9lVj_DZm36c", "ZUN3AFNiEgc", "8KtDLu4a-EM"]
if os.path.exists('transcripts'):
print('Directory already exists')
else:
os.mkdir('transcripts')
for video_id in video_links:
dir = os.path.join('transcripts', video_id)
transcript = YouTubeTranscriptApi.get_transcript(video_id)
with open(dir+'.txt', 'w') as f:
for line in transcript:
f.write(f"{line['text']}\n")
loader = DirectoryLoader(path='./', glob = "**/*.txt", loader_cls=TextLoader,
show_progress=True)
embeddings = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
index = VectorstoreIndexCreator(embedding=embeddings).from_loaders([loader])
retriever = index.vectorstore.as_retriever(search_kwargs=dict(k=1))
memory = VectorStoreRetrieverMemory(retriever=retriever)
llm = GPT4All(model="./ggml-mpt-7b-instruct.bin", n_ctx=2048,
top_p=0.15, temp=0.3, repeat_penalty=1.1, n_threads = 12, n_batch= 8
)
_DEFAULT_TEMPLATE = """
Below is an instruction that describes a task. Write a response that appropriately completes the request.
###Instruction: The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
Do not make up answers and provide only information that you have.
Relevant pieces of previous conversation:
{history}
(You do not need to use these pieces of information if not relevant)
{input}
### Response:
"""
PROMPT = PromptTemplate(
input_variables=["history", "input"], template=_DEFAULT_TEMPLATE
)
conversation_with_summary = ConversationChain(
llm=llm,
prompt=PROMPT,
# We set a very low max_token_limit for the purposes of testing.
memory=memory,
callbacks=[StreamingStdOutCallbackHandler()]
)
st.set_page_config(
page_title="YouTubeGPT",
page_icon=":robot:"
)
st.header("YouTubeGPT")
st.markdown("[Github](https://github.com/akshayballal95)")
if 'generated' not in st.session_state:
st.session_state['generated'] = []
if 'past' not in st.session_state:
st.session_state['past'] = []
def get_text():
input_text = st.text_input("You: ","Hello, how are you?", key="input")
return input_text
user_input = get_text()
def writeText(output):
st.session_state.generated.append(output)
if user_input:
with torch.inference_mode():
st.session_state.past.append(user_input)
st.session_state.generated.append(conversation_with_summary.predict(input = user_input))
if st.session_state['generated']:
for i in range(len(st.session_state['generated'])-1, -1, -1):
message(st.session_state["generated"][i], key=str(i))
message(st.session_state['past'][i], is_user=True, key=str(i) + '_user')
| [
"\nBelow is an instruction that describes a task. Write a response that appropriately completes the request.\n###Instruction: The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\nDo not make up answers and provide only information that you have.\nRelevant pieces of previous conversation:\n{history}\n\n(You do not need to use these pieces of information if not relevant)\n{input}\n\n### Response:\n",
"input"
] |
2024-01-10 | akshayballal95/private_gpt | private_gpt.py | from langchain import ConversationChain,PromptTemplate
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.indexes import VectorstoreIndexCreator
from langchain.document_loaders import TextLoader, DirectoryLoader
from langchain.llms import GPT4All
from langchain.memory import VectorStoreRetrieverMemory
import streamlit as st
from streamlit_chat import message
import torch
loader = DirectoryLoader('D:/OneDrive/Documents/Obsidian/Projects/myVault/', glob="**/*.md",recursive=True, show_progress=True, use_multithreading=True, loader_cls=TextLoader)
docs = loader.load()
len(docs)
embeddings = HuggingFaceEmbeddings(model_name="all-mpnet-base-v2")
llm = GPT4All(model="./ggml-mpt-7b-instruct.bin", top_p=0.5, top_k=0, temp=0.5, repeat_penalty=1.1, n_threads=12, n_batch=8, n_ctx=2048)
index = VectorstoreIndexCreator(embedding=embeddings).from_loaders([loader])
retriever = index.vectorstore.as_retriever(search_kwargs=dict(k=2))
memory = VectorStoreRetrieverMemory(retriever=retriever)
_DEFAULT_TEMPLATE = """
Below is an instruction that describes a task. Write a response that appropriately completes the request.
###Instruction: The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
Do not make up answers and provide only information that you have.
Relevant pieces of previous conversation:
{history}
(You do not need to use these pieces of information if not relevant)
{input}
### Response:
"""
PROMPT = PromptTemplate(
input_variables=[ "history", "input"], template=_DEFAULT_TEMPLATE
)
conversation_with_summary = ConversationChain(
llm=llm,
prompt=PROMPT,
# We set a very low max_token_limit for the purposes of testing.
memory = memory,
verbose=True
)
st.set_page_config(
page_title="PrivateGPT",
page_icon=":robot:"
)
st.header("PrivateGPT")
if 'generated' not in st.session_state:
st.session_state['generated'] = []
if 'past' not in st.session_state:
st.session_state['past'] = []
def get_text():
input_text = st.text_input("You: ","Hello, how are you?", key="input")
return input_text
user_input = get_text()
def writeText(output):
st.session_state.generated.append(output)
if user_input:
with torch.inference_mode():
st.session_state.past.append(user_input)
st.session_state.generated.append(conversation_with_summary.predict(input = user_input))
if st.session_state['generated']:
for i in range(len(st.session_state['generated'])-1, -1, -1):
message(st.session_state["generated"][i], key=str(i))
message(st.session_state['past'][i], is_user=True, key=str(i) + '_user')
| [
"\nBelow is an instruction that describes a task. Write a response that appropriately completes the request.\n###Instruction: The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\nDo not make up answers and provide only information that you have.\nRelevant pieces of previous conversation:\n{history}\n\n(You do not need to use these pieces of information if not relevant)\n{input}\n\n### Response:\n",
"input"
] |
2024-01-10 | adalyuf/topranks | ranker~tasks.py | from django.core.management import call_command
from django.utils import timezone, html
from django.conf import settings
from django.core.files.storage import default_storage
from django.contrib.postgres.search import SearchVector, SearchQuery, SearchRank
import os, openai, markdown, json, re, tldextract, requests
from celery import shared_task
from celery.utils.log import get_task_logger
from ranker.models import Message, Keyword, Domain, Brand, BrandKeyword, Statistic, add_value, Sitemap, AIModel, Answer
from django.db.models import Count, Avg
logger = get_task_logger(__name__)
@shared_task(bind=True, autoretry_for=(Exception,), retry_backoff=True, retry_kwargs={'max_retries': 8}, rate_limit='500/m')
def call_openai(self, prompt):
openai.api_key = os.getenv("OPENAI_API_KEY")
message_array = [{"role": "system", "content": "You are a helpful assistant."}] #by using async we forgo ability to have each message be dependent on previous messages and there is no guarantee of time order
message_array.append({"role": "user", "content": prompt})
try:
response = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=message_array, temperature=0.6,)
except openai.error.APIError as e:
#Handle API error here, e.g. retry or log
print(f"OpenAI API returned an API Error: {e}")
logger.warning("OpenAI API returned an API Error: %s", e)
raise e
except openai.error.APIConnectionError as e:
#Handle connection error here
print(f"Failed to connect to OpenAI API: {e}")
logger.warning("Failed to connect to OpenAI API: %s", e)
raise e
except openai.error.RateLimitError as e:
#Handle rate limit error (we recommend using exponential backoff)
print(f"OpenAI API request exceeded rate limit: {e}")
logger.warning("OpenAI API request exceeded rate limit: %s", e)
raise e
try:
return response['choices'][0]['message']['content']
except:
print(f"Couldn't get a response. We retried several times. Sorry. Better luck next time.")
logger.warning("Multiple attempts failed, response: %s", response)
raise "Multiple retries attempted, all failed."
@shared_task(queue="express")
def save_message_response(response, message_id):
message = Message.objects.get(id = message_id)
try:
message.response = response
if message.template_item.mode == 'markdown':
try:
message.markdown_response = html.format_html(markdown.markdown(message.response, extensions=['tables']))
except:
print("Error saving as markdown. Did you mean to save as JSON?")
elif message.template_item.mode == 'json':
try:
start_pos = message.response.find('{')
end_pos = message.response.rfind('}')
json_string = message.response[start_pos:end_pos+1]
json_object = json.loads(json_string)
message.json_response = json_object
except:
print("Error in json parsing.")
message.answered_at = timezone.now()
message.save()
print(f"Message: {message.prompt} took {(message.answered_at-message.requested_at).total_seconds()} seconds.")
except:
message.requested_at = None
message.save()
print(f"Couldn't get response from OpenAI API. Resetting requested_at to null.")
conversation = message.conversation
if (conversation.message_set.filter(answered_at__isnull=False).count() > 0 & conversation.message_set.filter(answered_at=None).count() == 0):
conversation.answered_at = timezone.now()
conversation.save()
@shared_task(queue="express")
def save_keyword_answer(api_response, ai_model_id, keyword_id):
ai_model = AIModel.objects.get(id = ai_model_id)
keyword = Keyword.objects.get(id = keyword_id)
answer = Answer.objects.update_or_create(
answer = api_response,
keyword = keyword,
ai_model = ai_model
)
@shared_task(queue="express")
def save_keyword_response(api_response, keyword_id):
keyword = Keyword.objects.get(id = keyword_id)
try:
start_pos = api_response.find('{')
end_pos = api_response.rfind('}')
json_string = api_response[start_pos:end_pos+1]
json_object = json.loads(json_string)
keyword.user_intent = json_object['user_intent']
keyword.natural_language_question = json_object['natural_language_question']
keyword.ai_answer = json_object['ai_answer']
keyword.likely_previous_queries = json_object['likely_previous_queries']
keyword.likely_next_queries = json_object['likely_next_queries']
keyword.answered_at = timezone.now()
keyword.save()
print(f"{keyword} took {(keyword.answered_at-keyword.requested_at).total_seconds()} seconds.")
# Remove one from the pending keywords stat and add one to the answered keywords stat
add_value('keywords_pending' , -1)
add_value('keywords_answered', 1)
return "Keyword saved"
except:
print("Couldn't assign response to columns. Resetting requested_at to null.")
keyword.requested_at = None
keyword.save()
return "Keyword not saved"
@shared_task(queue="express")
def save_business_json(api_response, domain_id):
domain = Domain.objects.get(id = domain_id)
domain.business_api_response = api_response
domain.save()
try:
start_pos = api_response.find('{')
end_pos = api_response.rfind('}')
json_string = api_response[start_pos:end_pos+1]
json_object = json.loads(json_string)
domain.business_json = json_object
domain.business_name = domain.business_json['business_name']
domain.naics_6 = domain.business_json['naics_6']
for brand in domain.business_json['company_brands']:
# lowercase, try to pull the brand, if fail, add brand
# Add domain to brand once established
brand = brand.lower()
try:
our_brand = Brand.objects.get(brand=brand)
except:
our_brand = Brand(brand=brand)
print(f"Couldn't find {brand}, adding to database.")
our_brand.save()
our_brand.domain.add(domain, through_defaults={'type': 'brand'})
for brand in domain.business_json['company_products']:
#TODO: If this becomes more complicated, try and DRY this up with above.
brand = brand.lower()
try:
our_brand = Brand.objects.get(brand=brand)
except:
our_brand = Brand(brand=brand)
print(f"Couldn't find {brand}, adding to database.")
our_brand.save()
our_brand.domain.add(domain, through_defaults={'type': 'product'})
for orig_comp_domain in domain.business_json['competitor_domains']:
try:
comp_domain = tldextract.extract(orig_comp_domain).registered_domain
try:
competitor = Domain.objects.get(domain=comp_domain)
except:
print(f"Couldn't find match: {orig_comp_domain} - skipping.")
domain.competitors.add(competitor)
except:
print(f"Couldn't find matching competitor domain: {orig_comp_domain} for source domain: {domain.domain}")
domain.save()
except Exception as e:
print("Couldn't save business json.")
domain.business_api_response = f"ERROR: Couldn't save business json: {repr(e)}."
domain.save()
@shared_task(queue="steamroller")
def index_brands(batch_size):
print(f'Batch size: {batch_size}')
if Brand.objects.filter(keyword_indexed_at__exact=None).count() > 0:
brand_list = Brand.objects.filter(keyword_indexed_at__exact=None)[:batch_size]
else:
brand_list = Brand.objects.all().order_by('keyword_indexed_at')[:batch_size]
print(f"Found {len(brand_list)} brands")
for brand in brand_list:
brand.indexing_requested_at = timezone.now()
brand.keyword_indexed_at = None
Brand.objects.bulk_update(brand_list, ['indexing_requested_at', 'keyword_indexed_at'])
i = 0
for brand in brand_list:
i += 1
start_time = timezone.now()
# keyword_list = Keyword.objects.filter(ai_answer__icontains=brand.brand)
search_query = SearchQuery(brand.brand, search_type="websearch")
keyword_count = Keyword.objects.filter(search_vector=search_query).count()
if keyword_count > 50000:
print(f"Too many matches ({keyword_count}). Likely a generic brand. Deleting: {brand.brand}")
brand.delete()
continue
keyword_list = Keyword.objects.filter(search_vector=search_query)
brand.keyword.add(*keyword_list)
brand.keyword_indexed_at = timezone.now()
brand.save()
end_time = timezone.now()
print(f"({i}/{len(brand_list)} - {int((end_time-start_time).total_seconds())} sec - {timezone.now()}) Brand ID {brand.pk} ({brand.brand}): {len(keyword_list)} keywords updated")
@shared_task(queue="steamroller")
def refill_keyword_queue():
if os.getenv("ENVIRONMENT") == "production":
kw_batch_size = 10000
max_queue = 100000
else:
kw_batch_size = 100
max_queue = 300
kw_batch_size = kw_batch_size
queued = Keyword.objects.filter(answered_at__isnull=True).filter(requested_at__isnull=False).count()
if queued + kw_batch_size >= max_queue:
kw_batch_size = max(max_queue-queued, 0)
keyword_list = Keyword.objects.filter(requested_at=None)[:kw_batch_size]
logger.info(f"Requesting responses for {kw_batch_size} keywords.")
# Remove batch size from keywords available to be queued stat and add to pending keywords stat
add_value('keywords_available' , -kw_batch_size)
add_value('keywords_pending' , kw_batch_size)
item_list = []
for keyword in keyword_list:
keyword.requested_at = timezone.now()
item_list.append(keyword)
Keyword.objects.bulk_update(item_list, ["requested_at"], batch_size=5000)
for keyword in keyword_list:
prompt = "If a user searches for @currentKeyword, what is their user intent, how would you rephrase this as a natural language question, please provide a thorough and detailed answer to the natural language question, what was their likely previous query, and what could be their next query? Provide your response as a simple JSON object, with keys \"user_intent\", \"natural_language_question\", \"ai_answer\", \"likely_previous_queries\", and \"likely_next_queries\". If this is likely to be their first or last query in their journey, answer \"none\" in the field"
prompt = prompt.replace("@currentKeyword", keyword.keyword)
call_openai.apply_async( (prompt,), link=save_keyword_response.s(keyword.id)) #note the comma in arguments, critical to imply tuple, otherwise thinks array, passes response as first argument
@shared_task(queue="steamroller")
def build_sitemaps():
# Generic implementation to build sitemaps for a model
def sitemaps_for_model(category, objects, batch_size=10000):
num_pages = int(objects.count()/batch_size) + 1
Sitemap.objects.filter(category=category).delete()
for page in range(num_pages):
with default_storage.open(f'sitemaps/{category}/sitemap-{category}-{1000+page}.xml', 'w') as the_file:
the_file.write("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n")
the_file.write("<urlset xmlns=\"http://www.sitemaps.org/schemas/sitemap/0.9\" xmlns:xhtml=\"http://www.w3.org/1999/xhtml\">\n")
for obj in objects[batch_size*page:batch_size*(page+1)]:
the_file.write("<url>\n")
the_file.write(f"\t<loc>https://topranks.ai{obj.get_absolute_url()}</loc>\n")
the_file.write(f"\t<lastmod>{obj.updated_at.strftime('%Y-%m-%d')}</lastmod>\n")
the_file.write("</url>\n")
the_file.write("</urlset>")
redirect = "https://topranks.ai/" + the_file.obj.key
sitemap = Sitemap.objects.create(url=redirect, category=category)
print(f"Sitemap ({sitemap.category}): {sitemap.url}")
#Each model desired, pass in category and context
keywords = Keyword.objects.filter(answered_at__isnull=False)
sitemaps_for_model('keywords', keywords)
@shared_task(queue="steamroller")
def keyword_volumes():
print("Starting task: Keyword Volumes")
key_list = Keyword.objects.filter(search_volume=None).all().values_list('id', flat=True)
key_list = list(set(key_list))
batch_size = 10000
num_runs = int(len(key_list)/batch_size)+1
for i in range(num_runs):
loop_list = key_list[i*batch_size:(i+1)*batch_size]
keys = Keyword.objects.filter(pk__in=loop_list).annotate(sv=Avg('keywordposition__search_volume'))
for key in keys:
key.search_volume = key.sv
Keyword.objects.bulk_update(keys, ['search_volume'])
keys = Keyword.objects.filter(pk__in=loop_list).annotate(brands=Count('brandkeyword'))
for key in keys:
key.num_brands = key.brands
Keyword.objects.bulk_update(keys, ['num_brands'])
print(f"[{i}/{num_runs}] Updated {len(keys)} keywords with search volume and number of brands from {min(loop_list)} to {max(loop_list)}") | [
"@currentKeyword",
"You are a helpful assistant.",
"If a user searches for @currentKeyword, what is their user intent, how would you rephrase this as a natural language question, please provide a thorough and detailed answer to the natural language question, what was their likely previous query, and what could be their next query? Provide your response as a simple JSON object, with keys \"user_intent\", \"natural_language_question\", \"ai_answer\", \"likely_previous_queries\", and \"likely_next_queries\". If this is likely to be their first or last query in their journey, answer \"none\" in the field"
] |
2024-01-10 | ZubairQazi/Anomaly_Detection | subreddit_scraper.py | import praw
import json
import zstandard as zstd
import datetime
import os
import openai
import nltk
from nltk.tokenize import sent_tokenize
nltk.download('punkt')
import string
from langdetect import detect
# Gather credentials from config
with open('config.json') as f:
config = json.load(f)
client_id = config['reddit_client_id']
client_secret = config['reddit_client_secret']
user_agent = config['reddit_user_agent']
username = config['reddit_username']
password = config['reddit_password']
openai_api_key = config['openai_api_key']
# Authenticate using your Reddit account credentials
reddit = praw.Reddit(client_id=client_id,
client_secret=client_secret,
user_agent=user_agent,
username=username,
password=password)
# Authenticate OpenAI API
openai.api_key = openai_api_key
# explainlikeimfive askscience askphilosophy
time_filter = 'all' # Can be one of: 'hour', 'day', 'week', 'month', 'year', 'all'
# Set the number of posts to grab
num_posts = 100
# Set the number of comments to retreive
k = 5
# Convert date to ignore data after to Unix timestamp
date_obj = datetime.datetime.strptime('2022-10-31', "%Y-%m-%d")
unix_timestamp = int(date_obj.timestamp())
# Define the subreddits you want to scrape
subreddit_names = input('Enter subreddits (space separated): ').split()
# Get the subreddit objects
subreddits = [reddit.subreddit(name) for name in subreddit_names]
# Get the top posts that have more than 1000 upvotes and were created before October 2022
top_posts = []
for subreddit in subreddits:
top_posts.extend(subreddit.top(time_filter=time_filter, limit=100))
print(f'\nGathering top {num_posts} posts that satisfy criteria...')
# Get the top posts that have more than 1000 upvotes, created before timestamp, SFW, ending with ?, and in English
filtered_posts = [post for post in top_posts if post.score > 1000 \
and post.created_utc < unix_timestamp \
and not post.over_18 \
and '?' in post.title \
and detect(post.title) == 'en' \
and not post.is_deleted \
and len(post.title) > 1]
print(f'\nFiltered out {len(top_posts) - len(filtered_posts)} posts from {len(top_posts)} original posts')
print(f'\nRetrieving top {k} comments from each post...')
data = []
for post in filtered_posts:
post_data = {
'title': post.title,
'score': post.score,
'gpt': '',
'comments': []
}
# Generate response using GPT-3.5 API
# prompt = f'Post: {post.title}\nResponse:'
# response = openai.Completion.create(
# engine='text-davinci-003',
# prompt=prompt,
# max_tokens=50,
# temperature=0.7
# )
# # Get top ranked output message
# generated_response = response.choices[0].text.strip()
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": f"You are a frequent user of the subreddits {' '.join(subreddit_names)}. Answer anything relevant."},
{"role": "user", "content": post.title}
],
temperature=0.7,
max_tokens=100
)
# Get top ranked output message
generated_response = response.choices[0].message.content
sentences = sent_tokenize(generated_response)
# Filter out incomplete sentences
complete_sentences = [sentence for sentence in sentences if sentence.endswith('.')]
# complete_sentences = []
# for sentence in sentences:
# if sentence[-1] in string.punctuation:
# complete_sentences.append(sentence)
post_data['gpt'] = ' '.join(complete_sentences)
post.comments.replace_more(limit=0)
comments = post.comments.list()
comments_sorted = sorted(comments, key=lambda comment: comment.score, reverse=True)
for comment in comments_sorted[:k]:
# Check if comment is in english and is not deleted
if detect(comment.body) == 'en' and comment.author is not None and len(comment.body) > 1:
comment_data = {
'score': comment.score,
'body': comment.body
}
post_data['comments'].append(comment_data)
data.append(post_data)
# create dataset folder if it doesn't exist
if not os.path.exists('datasets/reddit_datasets'):
os.makedirs('datasets/reddit_datasets')
print('\nWriting data to compressed file...')
# Compress the json data and store it in a file
compressed_data = zstd.compress(json.dumps(data).encode('utf-8'))
subreddit_name_string = '+'.join(subreddit_names)
with open(f'datasets/reddit_datasets/{subreddit_name_string}_{date_obj.date()}_top-{k}-comments_json.zst', 'wb') as f:
f.write(compressed_data)
| [
" "
] |
2024-01-10 | ZubairQazi/Anomaly_Detection | async_scraper.py | import asyncpraw
import json
import zstandard as zstd
import datetime
import os
import sys
import openai
import nltk
from nltk.tokenize import sent_tokenize
nltk.download('punkt')
import string
from langdetect import detect
import asyncio
import aiofiles
from tqdm.asyncio import tqdm_asyncio
from tqdm import tqdm
# Gather credentials from config
with open('config.json') as f:
config = json.load(f)
client_id = config['reddit_client_id']
client_secret = config['reddit_client_secret']
user_agent = config['reddit_user_agent']
username = config['reddit_username']
password = config['reddit_password']
openai_api_key = config['openai_api_key']
# Authenticate OpenAI API
openai.api_key = openai_api_key
time_filter = 'all' # Can be one of: 'hour', 'day', 'week', 'month', 'year', 'all'
# Set the number of posts to grab
num_posts = 500
# Set the number of comments to retrieve
k = 5
# Set the minimum upvotes
min_upvotes = 1000
# Convert date to ignore data after to Unix timestamp
date_obj = datetime.datetime.strptime('2022-10-31', "%Y-%m-%d")
unix_timestamp = int(date_obj.timestamp())
# Define the subreddits you want to scrape
subreddit_names = sorted(input('Enter subreddits (space separated): ').split())
# explainlikeimfive askscience askhistorians
# Set the maximum number of requests allowed per minute
max_requests_per_minute = 3000
# Counter for tracking the number of requests made
request_counter = 0
class ProgressBar:
def __init__(self, total):
self.pbar = tqdm(total=total)
def update(self, value):
self.pbar.update(value)
def close(self):
self.pbar.close()
async def process_post(post_data, pbar):
global request_counter
retry_attempts = 5
for _ in range(retry_attempts):
try:
if request_counter >= max_requests_per_minute:
print("Reached maximum requests per minute. Waiting for 1 minute...")
await asyncio.sleep(60)
request_counter = 0
# Generate response using GPT-3.5 API
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": f"You are a frequent user of the subreddits {' '.join(subreddit_names)}. Answer anything relevant."},
{"role": "user", "content": post_data['title']}
],
temperature=0.7,
max_tokens=100,
timeout=10
)
generated_response = response.choices[0].message.content
sentences = sent_tokenize(generated_response)
complete_sentences = [sentence for sentence in sentences if sentence.endswith('.')]
post_data['gpt'] = ' '.join(complete_sentences)
request_counter += 1
# If everything succeeded, break out of the retry loop
pbar.update(1)
return post_data
except openai.error.RateLimitError as e:
print(f"GPT rate limit exceeded. Waiting for 1 minute...")
await asyncio.sleep(60)
except openai.error.APIError as e:
print(f"Error occurred: {e}. Retrying in 1 minute...")
await asyncio.sleep(60)
except asyncio.exceptions.TimeoutError as e:
print('Timeout error. Retrying in 1 minute...')
await asyncio.sleep(60)
print(f"Exceeded maximum retry attempts for post: {post_data['title']}")
pbar.update(1)
return None if post_data['gpt'] != '' else post_data
async def process_posts(posts, pbar):
tasks = [asyncio.create_task(process_post(post_data, pbar)) for post_data in posts]
results = []
for task in tasks:
value = await task
results.append(value)
return results
async def process_subreddit(subreddit):
top_posts = []
# Counter for the number of filtered posts
count = 0
retrieved_post_ids = set()
# Initial number of posts pulled in each iteration
num_posts_per_search = num_posts
pbar = ProgressBar(num_posts)
# Continue until desired number of posts or number of posts searched is too large
while count < num_posts and num_posts_per_search < 10000:
async for post in subreddit.top(time_filter=time_filter, limit=num_posts_per_search):
if count >= num_posts:
break
# If this post has been seen before
if post.id in retrieved_post_ids:
continue
# Get the top posts that satisfy criteria below
# print(post.title)
# print(post.created_utc < unix_timestamp, post.author, detect(post.title), post.over_18, post.score, '\n')
if post.score > min_upvotes \
and post.created_utc < unix_timestamp \
and not post.over_18 \
and detect(post.title) == 'en' \
and post.author is not None \
and '?' in post.title \
and len(post.title) > 1:
post_data = {
'title': post.title,
'score': post.score,
'subreddit': post.subreddit.display_name,
'post_id': post.id,
'gpt': '',
'comments': []
}
await post.load()
# No comments to gather, so we don't want the post
if not post.comments:
continue
await post.comments.replace_more(limit=0)
comments = post.comments.list()
comments_sorted = sorted(comments, key=lambda comment: getattr(comment, 'score', 0), reverse=True)
comments_stored = 0
for comment in comments_sorted:
try:
if detect(comment.body) == 'en' and comment.author is not None and len(comment.body) > 1:
comment_data = {
'score': comment.score,
'body': comment.body
}
post_data['comments'].append(comment_data)
comments_stored += 1
# If we have stored k comments
if comments_stored >= k:
break
except:
print('Encountered invalid comment body, skipping to next comment...')
continue
top_posts.append(post_data)
retrieved_post_ids.add(post.id)
count += 1
pbar.update(1)
num_posts_per_search *= 2
# await asyncio.sleep(0.05)
print(f'Gathered {len(top_posts[:num_posts])} posts from {subreddit.display_name}')
pbar.close()
return top_posts[:num_posts]
async def write_data_to_file(file_path, data):
compressed_data = zstd.compress(json.dumps(data).encode('utf-8'))
async with aiofiles.open(file_path, 'wb') as f:
await f.write(compressed_data)
async def main():
# Authenticate using your Reddit account credentials
reddit = asyncpraw.Reddit(
client_id=client_id,
client_secret=client_secret,
user_agent=user_agent,
username=username,
password=password,
timeout=60
)
subreddits = [await reddit.subreddit(name) for name in subreddit_names]
tasks = []
for subreddit in subreddits:
task = asyncio.create_task(process_subreddit(subreddit))
tasks.append(task)
print(f'\nGathering top {num_posts} posts from each subreddit which satisfy criteria...')
top_posts = []
for task in asyncio.as_completed(tasks):
result = await task
top_posts.extend(result)
print(f'\nRetrieving top {k} comments from each post and GPT response...')
pbar = ProgressBar(len(top_posts))
# Process the posts and retrieve the data
results = None
try:
results = await process_posts(top_posts, pbar)
except Exception as e:
if len(results):
print(f'Encountered exception: {str(e)}. Writing existing {len(results)} posts...')
else:
print(f'Encountered exception: {str(e)}')
await reddit.close()
return
pbar.close()
# Create dataset folder if it doesn't exist
if not os.path.exists('datasets/reddit_datasets'):
os.makedirs('datasets/reddit_datasets')
print('\nWriting data to compressed file...')
subreddit_name_string = '+'.join(subreddit_names)
file_path = f'datasets/reddit_datasets/{subreddit_name_string}_{date_obj.date()}_top-{k}-comments_json.zst'
await write_data_to_file(file_path, results)
await reddit.close()
if __name__ == "__main__":
try:
asyncio.run(main())
except Exception as e:
print(f"An error occurred: {e}")
| [
" "
] |
2024-01-10 | chriscarrollsmith/gpt-json | gpt_json~tests~test_gpt.py | import asyncio
from json import dumps as json_dumps
from time import time
from unittest.mock import AsyncMock, patch
import openai
import pytest
from pydantic import BaseModel, Field
from openai.error import Timeout as OpenAITimeout
from gpt_json.gpt import GPTJSON
from gpt_json.models import FixTransforms, GPTMessage, GPTMessageRole, GPTModelVersion
from gpt_json.tests.shared import MySchema, MySubSchema
from gpt_json.transformations import JsonFixEnum
def test_throws_error_if_no_model_specified():
with pytest.raises(
ValueError, match="needs to be instantiated with a schema model"
):
GPTJSON(None)
@pytest.mark.parametrize(
"role_type,expected",
[
(GPTMessageRole.SYSTEM, "system"),
(GPTMessageRole.USER, "user"),
(GPTMessageRole.ASSISTANT, "assistant"),
],
)
def test_cast_message_to_gpt_format(role_type: GPTMessageRole, expected: str):
parser = GPTJSON[MySchema](None)
assert (
parser.message_to_dict(
GPTMessage(
role=role_type,
content="test",
)
)["role"]
== expected
)
@pytest.mark.asyncio
@pytest.mark.parametrize(
"schema_typehint,response_raw,parsed,expected_transformations",
[
(
MySchema,
"""
Your response is as follows:
{
"text": "Test",
"items": ["Item 1", "Item 2"],
"numerical": 123,
"sub_element": {
"name": "Test"
},
"reason": true
}
Your response is above.
""",
MySchema(
text="Test",
items=["Item 1", "Item 2"],
numerical=123,
sub_element=MySubSchema(name="Test"),
reason=True,
),
FixTransforms(),
),
(
list[MySchema],
"""
Your response is as follows:
[
{
"text": "Test",
"items": ["Item 1", "Item 2"],
"numerical": 123,
"sub_element": {
"name": "Test"
},
"reason": true
}
]
Your response is above.
""",
[
MySchema(
text="Test",
items=["Item 1", "Item 2"],
numerical=123,
sub_element=MySubSchema(name="Test"),
reason=True,
)
],
FixTransforms(),
),
(
MySchema,
"""
Your response is as follows:
{
"text": "Test",
"numerical": 123,
"reason": True,
"sub_element": {
"name": "Test"
},
"items": ["Item 1", "Item 2
""",
MySchema(
text="Test",
items=["Item 1", "Item 2"],
numerical=123,
sub_element=MySubSchema(name="Test"),
reason=True,
),
FixTransforms(
fixed_bools=True, fixed_truncation=JsonFixEnum.UNCLOSED_VALUE
),
),
],
)
async def test_acreate(
schema_typehint, response_raw, parsed, expected_transformations: FixTransforms
):
model_version = GPTModelVersion.GPT_3_5
messages = [
GPTMessage(
role=GPTMessageRole.USER,
content="Input prompt",
)
]
model = GPTJSON[schema_typehint](
None,
model=model_version,
temperature=0.0,
timeout=60,
)
# Define mock response
mock_response = {
"choices": [
{
"message": {
"role": "assistant",
"content": response_raw,
},
"index": 0,
"finish_reason": "stop",
}
]
}
# Create the mock
with patch.object(openai.ChatCompletion, "acreate") as mock_acreate:
# Make the mock function asynchronous
mock_acreate.return_value = mock_response
# Call the function and pass the expected parameters
response, transformations = await model.run(messages=messages)
# Assert that the mock function was called with the expected parameters
mock_acreate.assert_called_with(
model=model_version.value,
messages=[
{
"role": message.role.value,
"content": message.content,
}
for message in messages
],
temperature=0.0,
api_key=None,
stream=False,
)
assert response == parsed
assert transformations == expected_transformations
@pytest.mark.parametrize(
"input_messages,expected_output_messages",
[
# Messages fit within max_tokens, no change expected
(
[
GPTMessage(role=GPTMessageRole.SYSTEM, content="Hello"),
GPTMessage(role=GPTMessageRole.USER, content="World!"),
],
[
GPTMessage(role=GPTMessageRole.SYSTEM, content="Hello"),
GPTMessage(role=GPTMessageRole.USER, content="World!"),
],
),
# All messages trimmed to fit max_tokens
(
[
GPTMessage(role=GPTMessageRole.SYSTEM, content="Hello"),
GPTMessage(role=GPTMessageRole.USER, content="World" * 10000),
],
[
GPTMessage(role=GPTMessageRole.SYSTEM, content="Hello"),
GPTMessage(role=GPTMessageRole.USER, content="World" * (8192 - 1)),
],
),
],
)
def test_trim_messages(input_messages, expected_output_messages):
gpt = GPTJSON[MySchema](None, auto_trim=True, auto_trim_response_overhead=0)
output_messages = gpt.trim_messages(input_messages, n=8192)
assert len(output_messages) == len(expected_output_messages)
for output_message, expected_output_message in zip(
output_messages, expected_output_messages
):
assert output_message.role == expected_output_message.role
assert output_message.content == expected_output_message.content
def test_two_gptjsons():
class TestSchema1(BaseModel):
field1: str
class TestSchema2(BaseModel):
field2: str
gptjson1 = GPTJSON[TestSchema1](None)
# Shouldn't allow instantion without a schema
# We already expect a mypy error here, which is why we need a `type ignore`
# butr we also want to make sure that the error is raised at runtime
with pytest.raises(ValueError):
gptjson2 = GPTJSON(None) # type: ignore
gptjson2 = GPTJSON[TestSchema2](None)
assert gptjson1.schema_model == TestSchema1
assert gptjson2.schema_model == TestSchema2
def test_fill_message_template():
class TestTemplateSchema(BaseModel):
template_field: str = Field(description="Max length {max_length}")
gpt = GPTJSON[TestTemplateSchema](None)
assert gpt.fill_message_template(
GPTMessage(
role=GPTMessageRole.USER,
content="Variable: {max_length}\nMy schema is here: {json_schema}",
),
dict(
max_length=100,
),
) == GPTMessage(
role=GPTMessageRole.USER,
content='Variable: 100\nMy schema is here: {\n"template_field": str // Max length 100\n}',
)
@pytest.mark.asyncio
async def test_extracted_json_is_None():
gpt = GPTJSON[MySchema](None)
with patch.object(
gpt,
"submit_request",
return_value={"choices": [{"message": {"content": "some content"}}]},
), patch.object(
gpt, "extract_json", return_value=(None, FixTransforms(None, False))
):
result, _ = await gpt.run(
[GPTMessage(GPTMessageRole.SYSTEM, "message content")]
)
assert result is None
@pytest.mark.asyncio
async def test_no_valid_results_from_remote_request():
gpt = GPTJSON[MySchema](None)
with patch.object(gpt, "submit_request", return_value={"choices": []}):
result, _ = await gpt.run(
[GPTMessage(GPTMessageRole.SYSTEM, "message content")]
)
assert result is None
@pytest.mark.asyncio
async def test_unable_to_find_valid_json_payload():
gpt = GPTJSON[MySchema](None)
with patch.object(
gpt,
"submit_request",
return_value={"choices": [{"message": {"content": "some content"}}]},
), patch.object(
gpt, "extract_json", return_value=(None, FixTransforms(None, False))
):
result, _ = await gpt.run(
[GPTMessage(GPTMessageRole.SYSTEM, "message content")]
)
assert result is None
@pytest.mark.asyncio
async def test_unknown_model_to_infer_max_tokens():
with pytest.raises(ValueError):
GPTJSON[MySchema](model="UnknownModel", auto_trim=True)
@pytest.mark.asyncio
async def test_timeout():
class MockResponse:
"""
We need to build an actual response class here because the internal openai
code postprocesses with the aiohttp response.
"""
def __init__(self, response_text: str):
self.status = 200
self.headers: dict[str, str] = {}
self.response_text = response_text
async def read(self):
mock_response = {
"choices": [
{
"message": {
"role": "assistant",
"content": self.response_text,
},
"index": 0,
"finish_reason": "stop",
}
]
}
return json_dumps(mock_response).encode()
with patch("aiohttp.ClientSession.request", new_callable=AsyncMock) as mock_request:
# Mock a stalling request
async def side_effect(*args, **kwargs):
await asyncio.sleep(4)
return MockResponse("TEST_RESPONSE")
mock_request.side_effect = side_effect
gpt = GPTJSON[MySchema](api_key="ABC", timeout=2)
start_time = time()
with pytest.raises(OpenAITimeout):
await gpt.run(
[GPTMessage(GPTMessageRole.SYSTEM, "message content")],
)
end_time = time()
duration = end_time - start_time
# Assert duration is about 2 seconds
pytest.approx(duration, 2, 0.2)
| [
"World!",
"some content",
"test",
"Input prompt",
"WorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorld",
"Variable: 100\nMy schema is here: {\n\"template_field\": str // Max length 100\n}",
"WorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorldWorld",
"Max length {max_length}",
"Variable: {max_length}\nMy schema is here: {json_schema}",
"Hello",
"message content"
] |
2024-01-10 | aliozturkseksen/Chat-bot | Chat-bot-GPT-streamlit~useapp.py | import openai
def get_initial_message():
'''
(Function)
Esta funcion genera la lista de diccionarios con mensajes iniciales
para poder pre-cargar de manera correcta la IA.
Aqui se asigna el rol del sistema, del usuario y del asistente.
'''
messages=[
{"role": "system", "content": "You are a helpful AI Tutor. Who anwers brief questions about AI."},
{"role": "user", "content": "I want to learn AI"},
{"role": "assistant", "content": "Thats awesome, what do you want to know aboout AI"}
]
return messages
def get_chatgpt_response(messages, model="gpt-3.5-turbo"):
'''
(Function)
Esta funcion genera la respuesta de un modelo en base al hilo de
la conversacion.
(Parameters)
- messages: [Clase de streamlit] la cual contiene el hilo de la conversacion
- model: [str] El modelo que usaremos para que nos conteste
(Returns)
- [str] que es la respuesta del modelo al query ingresado.
'''
# Imprime el modelo que va a responder el query
print("model: ", model)
# Se genera la respuesta del modelo, en base al hilo de mensajes
response = openai.ChatCompletion.create(
# Modelo a usar
model=model,
# Hilo de conversacion
messages=messages
)
'''En este momento response es un json que se parece a lo siguiente
{
"choices": [
{
"finish_reason": "stop",
"index": 0,
"message": {
"content": "The 2020 World Series was played in Texas at Globe Life Field in Arlington.",
"role": "assistant"
}
}
],
"created": 1677664795,
"id": "chatcmpl-7QyqpwdfhqwajicIEznoc6Q47XAyW",
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion",
"usage": {
"completion_tokens": 17,
"prompt_tokens": 57,
"total_tokens": 74
}
}
Por lo que lo que necesitamos (mensaje) se obtiene con el siguiente comando'''
return response['choices'][0]['message']['content']
def update_chat(messages, role, content):
'''
(Function)
Esta funcion actualiza los mensajes de la conversacion bot-human
(Parameters)
- messages: [Clase de streamlit] la cual contiene el hilo de la conversacion
- role: [str] Quien emite el mensaje
- content: [str] Contenido del mensaje
(Returns)
- messages (vea parameters de esta funcion para mas info.)
'''
messages.append({"role": role, "content": content})
return messages
| [
"I want to learn AI",
"You are a helpful AI Tutor. Who anwers brief questions about AI.",
"Thats awesome, what do you want to know aboout AI"
] |
2024-01-10 | aliozturkseksen/Chat-bot | streamlite_app.py | # st es una clase muy parecida a Tk, por lo que piensa asi en ella.
import streamlit as st
# En general st.session_state es un diccionario.
from streamlit_chat import message
from utils import get_initial_message, get_chatgpt_response, update_chat
import os
from dotenv import load_dotenv
load_dotenv()
import openai
openai.api_key = "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"
st.title("Chatbot : ChatGPT and Streamlit Chat Hector")
st.subheader("AI Tutor:")
# Caja donde se mostrara una caja de seleccion mostrando la opciones
model = st.selectbox(
# Titulo de caja
"Select a model",
# Opciones a elegir
("gpt-3.5-turbo", "gpt-4")
)
# En caso de estar generada o expirada la sesion, la declaramos vacia.
if 'generated' not in st.session_state:
st.session_state['generated'] = []
if 'past' not in st.session_state:
st.session_state['past'] = []
# Aqui se recoje el texto que entra al chatbot
query = st.text_input("Query: ", key="input")
# Con esto inicialiso los mensajes en la estado de la sesion
if 'messages' not in st.session_state:
st.session_state['messages'] = get_initial_message()
# Si el query no esta vacio
if query:
# Muestra temporalmente un texto mientras genera un bloque de codigo
with st.spinner("generating..."):
# Extraemos el ultimo hilo de la conversacion.
messages = st.session_state['messages']
# Actualizamos el hilo de la conversacion para el usario
messages = update_chat(messages, "user", query)
# st.write("Before making the API call")
# st.write(messages)
# Generamos la respuesta del modelo en base al hilo
response = get_chatgpt_response(messages,model)
# Actualizamos el hilo de la conversacion para el asistente
messages = update_chat(messages, "assistant", response)
# Se incluyen los nuevos mensajes en el estado de la sesion correspondientemente
st.session_state.past.append(query)
st.session_state.generated.append(response)
if st.session_state['generated']:
for i in range(len(st.session_state['generated'])-1, -1, -1):
# If is_user=True, se mostrara el mensaje del lado derecho.
message(st.session_state['past'][i], is_user=True, key=str(i) + '_user')
message(st.session_state["generated"][i], key=str(i))
with st.expander("Show Messages"):
st.write(messages)
| [] |
2024-01-10 | victorrbravo/iconizer | convert.py | #!/usr/bin/env python3
# Author: Victor Bravo victor.bravo@wizeline.com
# Description: This script will convert an image to text and then to an icon
# Usage: python convert.py input.jpg output.jpg
# Dependencies: pip install openai, pip install pillow, pip install requests
# Notes: This script is using the OpenAI API, you need to have an account and an API key
# https://beta.openai.com/
# https://beta.openai.com/docs/api-reference
# https://beta.openai.com/docs/introduction
# https://beta.openai.com/docs/developer-quickstart/your-first-request
# https://beta.openai.com/docs/developer-quickstart/authentication
# https://beta.openai.com/docs/developer-quickstart/environment-variables
# https://beta.openai.com/docs/developer-quickstart/python
import base64
from io import BytesIO
from PIL import Image
import sys
from openai import OpenAI
import os
import requests
import tempfile
import secrets
import string
import argparse
def decode_base64_image(base64_string):
# Remove data prefix if present
base64_string = base64_string.replace("data:image/jpeg;base64,", "")
# Decode base64 into bytes
image_bytes = base64.b64decode(base64_string)
# Create an image from the bytes
image = Image.open(BytesIO(image_bytes))
return image
def encode_image_to_base64(image_path):
try:
# Open the image file
with open(image_path, "rb") as image_file:
# Read the image data
image_data = image_file.read()
# Encode the image data in Base64
encoded_data = base64.b64encode(image_data)
# Convert bytes to a UTF-8 string
base64_string = encoded_data.decode("utf-8")
return base64_string
except Exception as e:
print(f"Error: {e}")
return None
def image_to_text(image_path, client):
image_64 = encode_image_to_base64(image_path)
image_url = f"data:image/jpeg;base64,{image_64}"
response = client.chat.completions.create(
model="gpt-4-vision-preview",
messages=[{"role": "user", "content": [{ "type": "text", "text": "Describe the image.Complete the final paragraph if this is necessary"},
{"type": "image_url", "image_url": { "url": image_url} } ]
}],
max_tokens=100,
)
value_text = response.choices[0].message.content
return value_text
def text_to_icon(value_text, client):
image_size = 1024
prompt_text = f"{value_text}.\n Your role is an assistant, identify the most important element of previous sentences and draw it as an ICON, you have to use only vectorial elements like lines and circles."
response = client.images.generate(
model="dall-e-3",
prompt=prompt_text,
size=f"{image_size}x{image_size}"
)
model_data = response.model_dump()
print("model_data", model_data)
image_url = model_data['data'][0]['url']
return image_url
def download_image(url, local_filename):
try:
# Send a GET request to the URL
response = requests.get(url, stream=True)
response.raise_for_status() # Raise an HTTPError for bad responses
# Open the local file for writing in binary mode
with open(local_filename, 'wb') as file:
# Write the content to the local file in chunks
for chunk in response.iter_content(chunk_size=8192):
file.write(chunk)
print(f"Image downloaded successfully and saved at {local_filename}")
except requests.exceptions.RequestException as e:
print(f"Error downloading image: {e}")
def process_image(image_base64, output_file_path):
try:
if not image_base64:
print("Error: image_base64 is empty.")
return
# Decode base64 to image
img = decode_base64_image(image_base64)
# Save the image as JPEG
img.save(output_file_path, 'JPEG')
print(f"JPEG image saved successfully at {output_file_path}")
except Exception as e:
print(f"Error: {e}")
def resize_image(input_path, output_path):
try:
# Open the image file
image = Image.open(input_path)
# Get the original size
original_size = image.size
# Calculate the new size (50% reduction)
new_size = (int(original_size[0] * 0.25), int(original_size[1] * 0.25))
# Resize the image
resized_image = image.resize(new_size)
# Save the resized image
resized_image.save(output_path)
print(f"Image resized successfully and saved at {output_path}")
except Exception as e:
print(f"Error resizing image: {e}")
def generate_random_string(length):
characters = string.ascii_letters + string.digits
random_string = ''.join(secrets.choice(characters) for _ in range(length))
return random_string
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Transform a photo into an icon using OpenAI. It also generates a description for the input image')
parser.add_argument('-f', '--filename', help='Enter the file path for the JPEG image.', required=True)
parser.add_argument('-o', '--output', help='Specify the file path for the generated icon.', required=True)
args = vars(parser.parse_args())
print("args", args)
client = OpenAI(
# defaults to os.environ.get("OPENAI_API_KEY")
# or you can explicitly pass in the key (NOT RECOMMENDED)
api_key=os.getenv("OPENAI_KEY"),
)
input_image = args['filename']
output_image = args['output']
temp_directory = tempfile.gettempdir()
temp_image = f"{temp_directory}/temp_{ generate_random_string(8)}"
value_text = image_to_text(input_image,client)
print("DESCRIPTION:\n", value_text, '\n--------------------\n')
image_url = text_to_icon(value_text, client)
print("tem_image", temp_image)
download_image(image_url, temp_image)
resize_image(temp_image, output_image)
print(f"Image resized to {output_image}")
| [
"PLACEHOLDER.\n Your role is an assistant, identify the most important element of previous sentences and draw it as an ICON, you have to use only vectorial elements like lines and circles.",
"[{'type': 'text', 'text': 'Describe the image.Complete the final paragraph if this is necessary'}, {'type': 'image_url', 'image_url': {'url': 'data:image/jpeg;base64,PLACEHOLDER'}}]"
] |
2024-01-10 | joshwa71/LangChain | IceBreaker~agents~linkedin_lookup_agent.py | from langchain.prompts import PromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain.agents import initialize_agent, Tool, AgentType
from tools.tools import get_profile_url
def lookup(name: str) -> str:
llm = ChatOpenAI(temperature=0, model="gpt-4")
template = """given the full name {name_of_person}, I want you to get me a link to their
LinkedIn profile page. Your answer should contain only the URL, do not include anything like I have found... Make sure your response is only the url - this is very important."""
prompt_template = PromptTemplate(
input_variables=["name_of_person"], template=template
)
tools_for_agent = [
Tool(
name="Crawl google for linkedin profile page.",
func=get_profile_url,
description="Useful for getting linkedin page url",
)
]
agent = initialize_agent(
tools=tools_for_agent,
llm=llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
)
linkedin_profile_url = agent.run(prompt_template.format_prompt(name_of_person=name))
# print(linkedin_profile_url)
return linkedin_profile_url
| [
"given the full name {name_of_person}, I want you to get me a link to their \n LinkedIn profile page. Your answer should contain only the URL, do not include anything like I have found... Make sure your response is only the url - this is very important.",
"name_of_person"
] |
2024-01-10 | joshwa71/LangChain | IceBreaker~ice_breaker.py | from langchain.prompts import PromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain.chains import LLMChain
from tools.linkedin import scrape_linkedin_profile
from agents.linkedin_lookup_agent import lookup as linkedin_lookup_agent
from output_parsers import person_intel_parser, PersonIntel
def icebreak(name: str) -> PersonIntel:
linkedin_profile_url = linkedin_lookup_agent(name="Josh O'hara")
summary_template = """given some information: {information}
about a person, I want you to create:
1. a short summary of the person
2. two interesting facts about the person
3. a topic of interest of the person
4. an ice breaker to open conversation with the person
\n{format_instructions}
"""
prompt_template = PromptTemplate(
input_variables=["information"],
template=summary_template,
partial_variables={"format_instructions": person_intel_parser.get_format_instructions()}
)
llm = ChatOpenAI(temperature=0, model="gpt-4")
chain = LLMChain(llm=llm, prompt=prompt_template)
linkedin_data = scrape_linkedin_profile(linkedin_profile_url)
result = chain.run(information=linkedin_data)
print(result)
return person_intel_parser.parse(result)
if __name__ == "__main__":
icebreak(name="Josh O'hara")
| [
"information",
"format_instructions",
"given some information: {information}\n about a person, I want you to create:\n 1. a short summary of the person\n 2. two interesting facts about the person\n 3. a topic of interest of the person\n 4. an ice breaker to open conversation with the person\n \n{format_instructions}\n "
] |
2024-01-10 | lobrien/mastodon_nlp | sentiment.py | import os
import openai
from mastodon import Mastodon, AttribAccessDict
from typing import Iterable
def mastodon_client(credential_file : str, token_file : str) -> Mastodon:
# Create a new app if necessary
if not os.path.exists(credential_file):
Mastodon.create_app(
'sentiment_nlp',
api_base_url = os.environ['MASTODON_BASE_URL'],
to_file = credential_file
)
# Log in
mastodon = Mastodon(
client_id = credential_file,
api_base_url = os.environ['MASTODON_BASE_URL']
)
mastodon.log_in(
os.environ['MASTODON_USER'],
os.environ['MASTODON_PASS'],
to_file = token_file
)
return mastodon
def extract_contents(tl : Iterable[AttribAccessDict]) -> list[str]:
contents = []
for toot in tl:
# `toot` is a `mastodon.Mastodon.AttribAccessDict`
# `content` is HTML formatted, so ultimately you might want to strip tags, but I think OpenAI can handle it
contents.append(toot['content'])
return contents
def sentiment_nlp(content : str) -> str:
openai.api_key = os.environ['OPENAI_KEY']
# OpenAI prompt for sentiment analysis
prompt = f"""Label the sentiment of this sentence:\n\n{content}\n\nPositive\nNeutral\nNegative\n\nLabel:"""
response = openai.Completion.create(
engine = 'text-davinci-002',
prompt = prompt,
temperature = 0,
max_tokens = 1,
top_p = 1.0,
frequency_penalty = 0.0,
presence_penalty = 0.0,
best_of = 1
)
return response['choices'][0]['text'].strip()
def main():
credential_file = 'sentiment_nlp_clientcred.secret'
token_file = 'sentiment_nlp_usercred.secret'
mastodon = mastodon_client(credential_file, token_file)
tl = mastodon.timeline('local', limit=10)
contents = extract_contents(tl)
sentiments = []
for content in contents:
sentiment = sentiment_nlp(content)
sentiments.append(sentiment)
for (content,sentiment) in zip(contents,sentiments):
print(f"{sentiment} : {content}")
if __name__ == '__main__':
main() | [
"Label the sentiment of this sentence:\n\nPLACEHOLDER\n\nPositive\nNeutral\nNegative\n\nLabel:"
] |
2024-01-10 | soyelmismo/chatgpTG | bot~functions~openai_front~phone_specs.py | from bot.src.utils.constants import ERRFUNC, FUNCNOARG
from bot.src.utils.gen_utils.openai.openai_functions_extraction import openaifunc
from bot.src.apis import smart_gsm
@openaifunc
async def search_smartphone_info(self, model: str) -> str:
"""
Receives the device name and makes a search in the smart_gsm website returning all the device info.
Args:
model (str): only the device model, without extra text.
Returns:
str: all the device specifications to be tell to the user
"""
if model:
try:
return await smart_gsm.get_device(self, query = model)
except Exception: return ERRFUNC
else: return FUNCNOARG
| [] |
2024-01-10 | soyelmismo/chatgpTG | bot~src~utils~gen_utils~make_transcription.py | async def write(self, audio_file):
from bot.src.utils import config
import secrets
import openai
if self.api not in config.api["available_transcript"]:
index = secrets.randbelow(len(config.api["available_transcript"]))
self.api = config.api["available_transcript"][index]
openai.api_key = config.api["info"][self.api]["key"]
openai.api_base = config.api["info"][self.api]["url"]
if self.proxies is not None:
openai.proxy = {f'{config.proxy_raw.split("://")[0]}': f'{config.proxy_raw}'}
r = await openai.Audio.atranscribe("whisper-1", audio_file)
return r["text"] | [] |
2024-01-10 | soyelmismo/chatgpTG | bot~src~utils~gen_utils~make_image.py | async def gen(self, prompt, model, current_api, style, ratio, seed=None, negative=None):
try:
from bot.src.utils.proxies import config
from bot.src.utils.constants import Style
prompt=f'{prompt + Style[style].value[3]}'
from bot.src.apis import stablehorde
api_key = config.api["info"][current_api].get("key", None)
if current_api == "stablehorde":
if isinstance(negative, str):
prompt += f" ### {negative}"
image, seed, model = await stablehorde.main(self, api_key, prompt=prompt, model=model, seed=seed)
return image, seed, model
import openai
if self.proxies is not None:
openai.proxy = {f'{config.proxy_raw.split("://")[0]}': f'{config.proxy_raw}'}
openai.api_key = api_key
openai.api_base = config.api["info"][current_api]["url"]
r = await openai.Image.acreate(prompt=prompt, n=config.n_images, size="1024x1024")
image_urls = [item.url for item in r.data]
return image_urls, None, None
except Exception as e:
raise RuntimeError(f'make_image.gen > {e}') | [
" ### PLACEHOLDER"
] |
2024-01-10 | soyelmismo/chatgpTG | bot~functions~openai_front~web_search.py | from bot.src.utils.constants import ERRFUNC, FUNCNOARG
from bot.src.utils.gen_utils.openai.openai_functions_extraction import openaifunc
from bot.src.apis import duckduckgo
@openaifunc
async def search_on_internet(self, query: str, search_type: str, timelimit: str = None) -> str:
"""
Search information/recommendations and news on internet
Reveives a search query to search information and recommendations on the web. talk freely about the results.
Args:
query (str): the text that will be searched
search_type (str): use "text" or "news" depending of what the user has requested
timelimit (str): use "d" if latest results from today, for other time limits: "w", "m", "y". Defaults to None. they are d(day), w(week), m(month), y(year).
Returns:
str: the search / news results to inform the user
"""
if query:
try:
return await duckduckgo.search(self, query = query, gptcall = True, timelimit = timelimit, type = search_type)
except Exception: return ERRFUNC
else: return FUNCNOARG | [] |
2024-01-10 | soyelmismo/chatgpTG | bot~functions~openai_front~weather.py | from bot.src.utils.constants import ERRFUNC, FUNCNOARG
from bot.src.utils.gen_utils.openai.openai_functions_extraction import openaifunc
from bot.src.apis import wttr
@openaifunc
async def lookup_weather(self, location: str, unit: str) -> str:
"""
Search actual weather info.
Args:
location (str): the city. mandatory.
unit: "C" or "F". mandatory, and depends of the city
Returns:
str: all the weather info to be tell to the user
"""
if location:
try:
return await wttr.getweather(location = location, unit = unit)
except Exception: return ERRFUNC
else: return FUNCNOARG
| [] |
2024-01-10 | MohammedFadin/semantic-kernel-easier-start | semantic-kernel-demo.py | from termcolor import colored
import asyncio
import semantic_kernel as sk
from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion # You can import OpenAI OpenAIChatCompletion
from plugins.StocksReaderPlugin.stocks_reader_plugin import Stocks # Importing a plugin from the plugins folder
from plugins.OrchestratorPlugin.Orchestrator import Orchestrator # Importing the Orchestrator plugin from the plugins folder
async def main():
# Create a semantic kernel builder
kernel = sk.Kernel()
deployment, api_key, endpoint = sk.azure_openai_settings_from_dot_env()
kernel.add_chat_service("my_console_chat", AzureChatCompletion(deployment, endpoint, api_key))
# Import a semantic plugin (previously called skills) from the plugins folder
plugins_directory = "./plugins"
kernel.import_semantic_skill_from_directory(plugins_directory, "OrchestratorPlugin")
orchestrator_plugin = kernel.import_skill(Orchestrator(kernel), "OrchestratorPlugin")
# Import a native plugin like a stocks plugin to fetch stock data from Dubai Financial Market
kernel.import_skill(Stocks(), "StocksReaderPlugin")
# Run the kernel to load the plugins and output the response in a chat console style
while True:
try:
# Run the prompt and get the user input
console_user_input = input("How can I help you? \nUser:")
kernel_output = await kernel.run_async(
orchestrator_plugin["RouteRequest"], input_str = console_user_input
)
print(colored(kernel_output, "blue"))
if console_user_input == "exit":
break
except KeyboardInterrupt:
print("Bye!")
break
asyncio.run(main()) | [] |
2024-01-10 | MohammedFadin/semantic-kernel-easier-start | semantic-kernel-example.py | import asyncio
import semantic_kernel as sk
from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion # You can import OpenAI OpenAIChatCompletion
from plugins.OrchestratorPlugin.Orchestrator import Orchestrator # You can import your own plugin as well
#### ADD-HERE-STEP ####
from plugins.YourPluginFolderName.YourPluginFileName import your_plugin_class_name # Importing a plugin from the plugins folder
async def main():
#### MANDATORY-STEP #### Create a semantic kernel builder
kernel = sk.Kernel()
deployment, api_key, endpoint = sk.azure_openai_settings_from_dot_env()
kernel.add_chat_service("Your-Application-Name", AzureChatCompletion(deployment, endpoint, api_key)) #
#### MANDATORY-STEP ####
# Import a semantic plugin (previously called skills) from the plugins folder
# This step is mandatory and allows you to add custom functionality to your chatbot
plugins_directory = "./plugins"
kernel.import_semantic_skill_from_directory(plugins_directory, "OrchestratorPlugin")
orchestrator_plugin = kernel.import_skill(Orchestrator(kernel), "OrchestratorPlugin")
#### ADD-HERE-STEP ####
# Import your own custom plugin from the plugins folder
# This step is optional and allows you to add additional functionality to your chatbot
# Replace "your_native_plugin_folder_name" with the name of your plugin folder name
# Replace "your_plugin_class_name" with the name of your plugin
# Example: my_plugin = kernel.import_skill(my_plugin_class_name(), "MyPluginFolderName")
# Note: Make sure your plugin is defined in a separate Python file in the plugins folder
# and you have imported the plugin at the top of this file
# Example: your_plugin_class_name() should be defined in a file called my_plugin_function.py
# located in the plugins folder
your_native_plugin_name = kernel.import_skill(your_plugin_class_name(), "your_native_plugin_folder_name")
#### ADD-HERE-STEP ####
# Pass user input to the orchestrator plugin and get the output
# This step is mandatory and allows the chatbot to process user input and generate a response
# Replace "kernel_output" with a variable name that describes the output of the orchestrator plugin
# Replace "user_input" with the name of the variable that stores user input
# Example: output = await kernel.run_async(orchestrator_plugin["RouteRequest"], input_str = user_input)
# Note: The "RouteRequest" function is defined in the Orchestrator plugin and is responsible for
# processing user input and generating a response (think of it as a middleware function)
user_input = input("How can I help you? ")
kernel_output = await kernel.run_async(orchestrator_plugin["RouteRequest"], input_str = user_input)
print(your_input_to_kernel)
asyncio.run(main()) | [] |
2024-01-10 | nollied/basalt-2022-behavioural-cloning-baseline | behavioural_cloning.py | # Basic behavioural cloning
# Note: this uses gradient accumulation in batches of ones
# to perform training.
# This will fit inside even smaller GPUs (tested on 8GB one),
# but is slow.
from argparse import ArgumentParser
import pickle
import time
import gym
import minerl
import torch as th
import numpy as np
from openai_vpt.agent import PI_HEAD_KWARGS, MineRLAgent
from data_loader import DataLoader
from openai_vpt.lib.tree_util import tree_map
EPOCHS = 2
# Needs to be <= number of videos
BATCH_SIZE = 16
# Ideally more than batch size to create
# variation in datasets (otherwise, you will
# get a bunch of consecutive samples)
# Decrease this (and batch_size) if you run out of memory
N_WORKERS = 20
DEVICE = "cuda"
LOSS_REPORT_RATE = 100
# Tuned with bit of trial and error
LEARNING_RATE = 0.000181
# OpenAI VPT BC weight decay
# WEIGHT_DECAY = 0.039428
WEIGHT_DECAY = 0.0
# KL loss to the original model was not used in OpenAI VPT
KL_LOSS_WEIGHT = 1.0
MAX_GRAD_NORM = 5.0
def load_model_parameters(path_to_model_file):
agent_parameters = pickle.load(open(path_to_model_file, "rb"))
policy_kwargs = agent_parameters["model"]["args"]["net"]["args"]
pi_head_kwargs = agent_parameters["model"]["args"]["pi_head_opts"]
pi_head_kwargs["temperature"] = float(pi_head_kwargs["temperature"])
return policy_kwargs, pi_head_kwargs
def behavioural_cloning_train(data_dir, in_model, in_weights, out_weights):
agent_policy_kwargs, agent_pi_head_kwargs = load_model_parameters(in_model)
# To create model with the right environment.
# All basalt environments have the same settings, so any of them works here
env = gym.make("MineRLBasaltFindCave-v0")
agent = MineRLAgent(env, device=DEVICE, policy_kwargs=agent_policy_kwargs, pi_head_kwargs=agent_pi_head_kwargs)
agent.load_weights(in_weights)
# Create a copy which will have the original parameters
original_agent = MineRLAgent(env, device=DEVICE, policy_kwargs=agent_policy_kwargs, pi_head_kwargs=agent_pi_head_kwargs)
original_agent.load_weights(in_weights)
env.close()
policy = agent.policy
original_policy = original_agent.policy
# Freeze most params
for param in policy.parameters():
param.requires_grad = False
# Unfreeze final layers
trainable_parameters = []
for param in policy.net.lastlayer.parameters():
param.requires_grad = True
trainable_parameters.append(param)
for param in policy.pi_head.parameters():
param.requires_grad = True
trainable_parameters.append(param)
# Parameters taken from the OpenAI VPT paper
optimizer = th.optim.Adam(
trainable_parameters,
lr=LEARNING_RATE,
weight_decay=WEIGHT_DECAY
)
data_loader = DataLoader(
dataset_dir=data_dir,
n_workers=N_WORKERS,
batch_size=BATCH_SIZE,
n_epochs=EPOCHS
)
start_time = time.time()
# Keep track of the hidden state per episode/trajectory.
# DataLoader provides unique id for each episode, which will
# be different even for the same trajectory when it is loaded
# up again
episode_hidden_states = {}
dummy_first = th.from_numpy(np.array((False,))).to(DEVICE)
loss_sum = 0
for batch_i, (batch_images, batch_actions, batch_episode_id) in enumerate(data_loader):
batch_loss = 0
for image, action, episode_id in zip(batch_images, batch_actions, batch_episode_id):
agent_action = agent._env_action_to_agent(action, to_torch=True, check_if_null=True)
if agent_action is None:
# Action was null
continue
agent_obs = agent._env_obs_to_agent({"pov": image})
if episode_id not in episode_hidden_states:
# TODO need to clean up this hidden state after worker is done with the work item.
# Leaks memory, but not tooooo much at these scales (will be a problem later).
episode_hidden_states[episode_id] = policy.initial_state(1)
agent_state = episode_hidden_states[episode_id]
pi_distribution, _, new_agent_state = policy.get_output_for_observation(
agent_obs,
agent_state,
dummy_first
)
with th.no_grad():
original_pi_distribution, _, _ = original_policy.get_output_for_observation(
agent_obs,
agent_state,
dummy_first
)
log_prob = policy.get_logprob_of_action(pi_distribution, agent_action)
kl_div = policy.get_kl_of_action_dists(pi_distribution, original_pi_distribution)
# Make sure we do not try to backprop through sequence
# (fails with current accumulation)
new_agent_state = tree_map(lambda x: x.detach(), new_agent_state)
episode_hidden_states[episode_id] = new_agent_state
# Finally, update the agent to increase the probability of the
# taken action.
# Remember to take mean over batch losses
loss = (-log_prob + KL_LOSS_WEIGHT * kl_div) / BATCH_SIZE
batch_loss += loss.item()
loss.backward()
th.nn.utils.clip_grad_norm_(trainable_parameters, MAX_GRAD_NORM)
optimizer.step()
optimizer.zero_grad()
loss_sum += batch_loss
if batch_i % LOSS_REPORT_RATE == 0:
time_since_start = time.time() - start_time
print(f"Time: {time_since_start:.2f}, Batches: {batch_i}, Avrg loss: {loss_sum / LOSS_REPORT_RATE:.4f}")
loss_sum = 0
state_dict = policy.state_dict()
th.save(state_dict, out_weights)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--data-dir", type=str, required=True, help="Path to the directory containing recordings to be trained on")
parser.add_argument("--in-model", required=True, type=str, help="Path to the .model file to be finetuned")
parser.add_argument("--in-weights", required=True, type=str, help="Path to the .weights file to be finetuned")
parser.add_argument("--out-weights", required=True, type=str, help="Path where finetuned weights will be saved")
args = parser.parse_args()
behavioural_cloning_train(args.data_dir, args.in_model, args.in_weights, args.out_weights)
| [] |
2024-01-10 | nollied/basalt-2022-behavioural-cloning-baseline | data_loader.py | # Code for loading OpenAI MineRL VPT datasets
# (NOTE: Not the original code!)
import json
import glob
import os
import random
from multiprocessing import Process, Queue, Event
import numpy as np
import cv2
from openai_vpt.agent import ACTION_TRANSFORMER_KWARGS, resize_image, AGENT_RESOLUTION
from openai_vpt.lib.actions import ActionTransformer
QUEUE_TIMEOUT = 10
CURSOR_FILE = os.path.join(os.path.dirname(__file__), "cursors", "mouse_cursor_white_16x16.png")
# Mapping from JSON keyboard buttons to MineRL actions
KEYBOARD_BUTTON_MAPPING = {
"key.keyboard.escape" :"ESC",
"key.keyboard.s" :"back",
"key.keyboard.q" :"drop",
"key.keyboard.w" :"forward",
"key.keyboard.1" :"hotbar.1",
"key.keyboard.2" :"hotbar.2",
"key.keyboard.3" :"hotbar.3",
"key.keyboard.4" :"hotbar.4",
"key.keyboard.5" :"hotbar.5",
"key.keyboard.6" :"hotbar.6",
"key.keyboard.7" :"hotbar.7",
"key.keyboard.8" :"hotbar.8",
"key.keyboard.9" :"hotbar.9",
"key.keyboard.e" :"inventory",
"key.keyboard.space" :"jump",
"key.keyboard.a" :"left",
"key.keyboard.d" :"right",
"key.keyboard.left.shift" :"sneak",
"key.keyboard.left.control" :"sprint",
"key.keyboard.f" :"swapHands",
}
# Template action
NOOP_ACTION = {
"ESC": 0,
"back": 0,
"drop": 0,
"forward": 0,
"hotbar.1": 0,
"hotbar.2": 0,
"hotbar.3": 0,
"hotbar.4": 0,
"hotbar.5": 0,
"hotbar.6": 0,
"hotbar.7": 0,
"hotbar.8": 0,
"hotbar.9": 0,
"inventory": 0,
"jump": 0,
"left": 0,
"right": 0,
"sneak": 0,
"sprint": 0,
"swapHands": 0,
"camera": np.array([0, 0]),
"attack": 0,
"use": 0,
"pickItem": 0,
}
MINEREC_ORIGINAL_HEIGHT_PX = 720
# Matches a number in the MineRL Java code
# search the code Java code for "constructMouseState"
# to find explanations
CAMERA_SCALER = 360.0 / 2400.0
# If GUI is open, mouse dx/dy need also be adjusted with these scalers.
# If data version is not present, assume it is 1.
MINEREC_VERSION_SPECIFIC_SCALERS = {
"5.7": 0.5,
"5.8": 0.5,
"6.7": 2.0,
"6.8": 2.0,
"6.9": 2.0,
}
def json_action_to_env_action(json_action):
"""
Converts a json action into a MineRL action.
Returns (minerl_action, is_null_action)
"""
# This might be slow...
env_action = NOOP_ACTION.copy()
# As a safeguard, make camera action again so we do not override anything
env_action["camera"] = np.array([0, 0])
is_null_action = True
keyboard_keys = json_action["keyboard"]["keys"]
for key in keyboard_keys:
# You can have keys that we do not use, so just skip them
# NOTE in original training code, ESC was removed and replaced with
# "inventory" action if GUI was open.
# Not doing it here, as BASALT uses ESC to quit the game.
if key in KEYBOARD_BUTTON_MAPPING:
env_action[KEYBOARD_BUTTON_MAPPING[key]] = 1
is_null_action = False
mouse = json_action["mouse"]
camera_action = env_action["camera"]
camera_action[0] = mouse["dy"] * CAMERA_SCALER
camera_action[1] = mouse["dx"] * CAMERA_SCALER
if mouse["dx"] != 0 or mouse["dy"] != 0:
is_null_action = False
else:
if abs(camera_action[0]) > 180:
camera_action[0] = 0
if abs(camera_action[1]) > 180:
camera_action[1] = 0
mouse_buttons = mouse["buttons"]
if 0 in mouse_buttons:
env_action["attack"] = 1
is_null_action = False
if 1 in mouse_buttons:
env_action["use"] = 1
is_null_action = False
if 2 in mouse_buttons:
env_action["pickItem"] = 1
is_null_action = False
return env_action, is_null_action
def composite_images_with_alpha(image1, image2, alpha, x, y):
"""
Draw image2 over image1 at location x,y, using alpha as the opacity for image2.
Modifies image1 in-place
"""
ch = min(0, image1.shape[0] - y, image2.shape[0])
cw = min(0, image1.shape[1] - x, image2.shape[1])
if ch == 0 or cw == 0:
return
alpha = alpha[:ch, :cw]
image1[y:y + ch, x:x + cw, :] = (image1[y:y + ch, x:x + cw, :] * (1 - alpha) + image2[:ch, :cw, :] * alpha).astype(np.uint8)
def data_loader_worker(tasks_queue, output_queue, quit_workers_event):
"""
Worker for the data loader.
"""
cursor_image = cv2.imread(CURSOR_FILE, cv2.IMREAD_UNCHANGED)
# Assume 16x16
cursor_image = cursor_image[:16, :16, :]
cursor_alpha = cursor_image[:, :, 3:] / 255.0
cursor_image = cursor_image[:, :, :3]
while True:
task = tasks_queue.get()
if task is None:
break
trajectory_id, video_path, json_path = task
video = cv2.VideoCapture(video_path)
# Note: In some recordings, the game seems to start
# with attack always down from the beginning, which
# is stuck down until player actually presses attack
attack_is_stuck = False
# Scrollwheel is allowed way to change items, but this is
# not captured by the recorder.
# Work around this by keeping track of selected hotbar item
# and updating "hotbar.#" actions when hotbar selection changes.
last_hotbar = 0
with open(json_path) as json_file:
json_lines = json_file.readlines()
json_data = "[" + ",".join(json_lines) + "]"
json_data = json.loads(json_data)
for i in range(len(json_data)):
if quit_workers_event.is_set():
break
step_data = json_data[i]
if i == 0:
# Check if attack will be stuck down
if step_data["mouse"]["newButtons"] == [0]:
attack_is_stuck = True
elif attack_is_stuck:
# Check if we press attack down, then it might not be stuck
if 0 in step_data["mouse"]["newButtons"]:
attack_is_stuck = False
# If still stuck, remove the action
if attack_is_stuck:
step_data["mouse"]["buttons"] = [button for button in step_data["mouse"]["buttons"] if button != 0]
action, is_null_action = json_action_to_env_action(step_data)
# Update hotbar selection
current_hotbar = step_data["hotbar"]
if current_hotbar != last_hotbar:
action["hotbar.{}".format(current_hotbar + 1)] = 1
last_hotbar = current_hotbar
# Read frame even if this is null so we progress forward
ret, frame = video.read()
if ret:
# Skip null actions as done in the VPT paper
# NOTE: in VPT paper, this was checked _after_ transforming into agent's action-space.
# We do this here as well to reduce amount of data sent over.
if is_null_action:
continue
if step_data["isGuiOpen"]:
camera_scaling_factor = frame.shape[0] / MINEREC_ORIGINAL_HEIGHT_PX
cursor_x = int(step_data["mouse"]["x"] * camera_scaling_factor)
cursor_y = int(step_data["mouse"]["y"] * camera_scaling_factor)
composite_images_with_alpha(frame, cursor_image, cursor_alpha, cursor_x, cursor_y)
cv2.cvtColor(frame, code=cv2.COLOR_BGR2RGB, dst=frame)
frame = np.asarray(np.clip(frame, 0, 255), dtype=np.uint8)
frame = resize_image(frame, AGENT_RESOLUTION)
output_queue.put((trajectory_id, frame, action), timeout=QUEUE_TIMEOUT)
else:
print(f"Could not read frame from video {video_path}")
video.release()
if quit_workers_event.is_set():
break
# Tell that we ended
output_queue.put(None)
class DataLoader:
"""
Generator class for loading batches from a dataset
This only returns a single step at a time per worker; no sub-sequences.
Idea is that you keep track of the model's hidden state and feed that in,
along with one sample at a time.
+ Simpler loader code
+ Supports lower end hardware
- Not very efficient (could be faster)
- No support for sub-sequences
- Loads up individual files as trajectory files (i.e. if a trajectory is split into multiple files,
this code will load it up as a separate item).
"""
def __init__(self, dataset_dir, n_workers=8, batch_size=8, n_epochs=1, max_queue_size=16):
assert n_workers >= batch_size, "Number of workers must be equal or greater than batch size"
self.dataset_dir = dataset_dir
self.n_workers = n_workers
self.n_epochs = n_epochs
self.batch_size = batch_size
self.max_queue_size = max_queue_size
unique_ids = glob.glob(os.path.join(dataset_dir, "*.mp4"))
unique_ids = list(set([os.path.basename(x).split(".")[0] for x in unique_ids]))
self.unique_ids = unique_ids
# Create tuples of (video_path, json_path) for each unique_id
demonstration_tuples = []
for unique_id in unique_ids:
video_path = os.path.abspath(os.path.join(dataset_dir, unique_id + ".mp4"))
json_path = os.path.abspath(os.path.join(dataset_dir, unique_id + ".jsonl"))
demonstration_tuples.append((video_path, json_path))
assert n_workers <= len(demonstration_tuples), f"n_workers should be lower or equal than number of demonstrations {len(demonstration_tuples)}"
# Repeat dataset for n_epochs times, shuffling the order for
# each epoch
self.demonstration_tuples = []
for i in range(n_epochs):
random.shuffle(demonstration_tuples)
self.demonstration_tuples += demonstration_tuples
self.task_queue = Queue()
self.n_steps_processed = 0
for trajectory_id, task in enumerate(self.demonstration_tuples):
self.task_queue.put((trajectory_id, *task))
for _ in range(n_workers):
self.task_queue.put(None)
self.output_queues = [Queue(maxsize=max_queue_size) for _ in range(n_workers)]
self.quit_workers_event = Event()
self.processes = [
Process(
target=data_loader_worker,
args=(
self.task_queue,
output_queue,
self.quit_workers_event,
),
daemon=True
)
for output_queue in self.output_queues
]
for process in self.processes:
process.start()
def __iter__(self):
return self
def __next__(self):
batch_frames = []
batch_actions = []
batch_episode_id = []
for i in range(self.batch_size):
workitem = self.output_queues[self.n_steps_processed % self.n_workers].get(timeout=QUEUE_TIMEOUT)
if workitem is None:
# Stop iteration when first worker runs out of work to do.
# Yes, this has a chance of cutting out a lot of the work,
# but this ensures batches will remain diverse, instead
# of having bad ones in the end where potentially
# one worker outputs all samples to the same batch.
raise StopIteration()
trajectory_id, frame, action = workitem
batch_frames.append(frame)
batch_actions.append(action)
batch_episode_id.append(trajectory_id)
self.n_steps_processed += 1
return batch_frames, batch_actions, batch_episode_id
def __del__(self):
for process in self.processes:
process.terminate()
process.join()
| [] |
2024-01-10 | PHACDataHub/privacy_rac_demo | agent~blawxagent.py | from dotenv import load_dotenv
import langchain
from langchain.chat_models import ChatOpenAI
from langchain.agents import initialize_agent, AgentType
from agent.tools.ontology import ontology_tool
from agent.tools.interview import PAInterview
import os
from langchain.prompts import MessagesPlaceholder
from langchain.memory import ConversationBufferMemory
agent_kwargs = {
"extra_prompt_messages": [MessagesPlaceholder(variable_name="memory")],
}
memory = ConversationBufferMemory(memory_key="memory", return_messages=True)
#langchain.debug = True
load_dotenv()
openai_api_key=os.environ['OPENAI_API_KEY']
# Because we are using functions, we need to use model gpt-4-0613
llm=ChatOpenAI(openai_api_key=openai_api_key,temperature=0, model="gpt-4-0613")
tools = [ontology_tool,PAInterview()]
agent = initialize_agent(tools, llm, agent=AgentType.OPENAI_FUNCTIONS, verbose=True, agent_kwargs=agent_kwargs, memory=memory)
| [] |
2024-01-10 | PHACDataHub/privacy_rac_demo | agent~tools~interview.py | import json
import requests
from iteration_utilities import deepflatten
from typing import List, Optional, Type, Union
from pydantic import BaseModel
from langchain.tools import BaseTool
class BlawxCategory(BaseModel):
"""
The data structure for a fact about membership in a category.
"""
from_ontology: bool
type: str
category: str
object: str
class BlawxAttributeBinary(BaseModel):
"""
The data structure for a binary attribute.
"""
from_ontology: bool
type: str
attribute: str
object: str
value: str
class BlawxAttributeUnary(BaseModel):
"""
The data structure for a unary attribute.
"""
from_ontology: bool
type: str
attribute: str
object: str
class BlawxRelationship(BaseModel):
"""
The data structure for a relationship fact.
"""
from_ontology: bool
type: str
relationship: str
parameter1: str
parameter2: str
parameter3: str
# This would need to be extended later.
BlawxFact = Union[BlawxCategory, BlawxRelationship, BlawxAttributeBinary, BlawxAttributeUnary]
class BlawxFacts(BaseModel):
"""
This documents the data structure used by the interview and run endpoints.
"""
facts: List[BlawxFact]
class PAInterview(BaseTool):
name = "permitted_uses"
description = """
Useful for finding out whether a use of information is permitted under the Privacy Act.
Requires you to know the ontology first.
The "type" of each fact should be the string "true", and the "from_ontology" value should be set to boolean false.
Pieces of information, individuals, government institutions and purposes must be defined in category facts before they are used in attribute and relationship facts.
All text values should be strings that start with lowercase letters and do not contain spaces.
The facts must exclusively use the category, attribute, and relationship predicates set out in the ontology.
Values should not be provided for boolean attributes.
If an entity is described as a government entity, include it in both the "entity" and "government_institution"
categories.
If there is an information, also specify in the facts that it is recorded.
Encode only facts that were obtained from the user. Do not encode facts derived from previous answers.
"""
def _run(self, facts):
return privacy_interview({"facts": facts})
def _arun(self, input):
raise NotImplementedError("The permitted_uses tool does not support asynchronous requests.")
args_schema: Optional[Type[BaseModel]] = BlawxFacts
def privacy_interview(input):
response = requests.post('https://dev.blawx.com/jason/privacy-act/test/permitted_use/run/',json=input)
#print(response.text)
package = json.loads(response.text)
if len(package['Answers']):
explanation = ''.join(list(deepflatten(package['Answers'][0]['Models'][0]['Tree'])))
gch = explanation.find('The global constraints hold')
if gch:
explanation = explanation[:gch]
return "I should use only the following information to answer the question: " + package['Answers'][0]['Variables']['Info'] + " can be used by " + package['Answers'][0]['Variables']['Inst'] + " for the purpose of " + package['Answers'][0]['Variables']['Purpose'] + " because " + explanation
else:
return "I should use only the following information to answer the question: There is no evidence based on these facts to conclude that there is any permitted use of information under the AITA." | [
"\nUseful for finding out whether a use of information is permitted under the Privacy Act.\n\nRequires you to know the ontology first.\n\nThe \"type\" of each fact should be the string \"true\", and the \"from_ontology\" value should be set to boolean false.\n\nPieces of information, individuals, government institutions and purposes must be defined in category facts before they are used in attribute and relationship facts.\n\nAll text values should be strings that start with lowercase letters and do not contain spaces.\n\nThe facts must exclusively use the category, attribute, and relationship predicates set out in the ontology.\nValues should not be provided for boolean attributes.\n\nIf an entity is described as a government entity, include it in both the \"entity\" and \"government_institution\"\ncategories.\n\nIf there is an information, also specify in the facts that it is recorded.\n\nEncode only facts that were obtained from the user. Do not encode facts derived from previous answers.\n"
] |
2024-01-10 | PHACDataHub/privacy_rac_demo | agent~tools~ontology.py | import requests
import json
from langchain.agents import Tool
def ontology(input):
# input is ignored
response = requests.get('https://dev.blawx.com/jason/privacy-act/test/permitted_use/onto/')
#print(response)
package = json.loads(response.text)
output = "The categories which take only an object as a parameters are " + ", ".join(package['Categories']) + ".\n"
output = "The attributes that take only an object are " + ", ".join([(a['Attribute'] + " which applies to an object of category " + a['Category']) for a in package['Attributes'] if a['Type'] == "boolean"]) + ".\n"
output += "The attributes that take an object and a value are " + ', '.join([(a['Attribute'] + " which applies to an object of category " + a['Category'] + " and accepts a value of type " + a['Type']) for a in package['Attributes'] if a['Type'] != "boolean"]) + '.\n'
output += "The relationships I know about are "
for r in package['Relationships']:
output += r['Relationship'] + ", which accepts "
index = 1
while "Parameter"+str(index) in r:
output += "a " + r['Parameter'+str(index)] + ","
index += 1
output += ";"
return output
ontology_tool = Tool.from_function(
func=ontology,
name="Ontology",
description="useful for when you need to know the categories, attributes, and relationships available in the permitted_uses tool. Does not require input."
) | [] |
2024-01-10 | activeloopai/deepmemory_synthetic_queries_benchmarking | synthetic_queries.py | from completion import OpenAICompletion
import random
from synthetic_query_messages import SYSTEM_MESSAGE, USER_MESSAGE
from tqdm import tqdm
import functools
import time
def max_retry(max_attempts):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
attempts = 0
while attempts < max_attempts:
try:
return func(*args, **kwargs)
except Exception as e:
attempts += 1
print(f"Attempt {attempts} failed: {e}")
time.sleep(1) # optional: add a delay before retrying
raise Exception(f"Failed after {max_attempts} attempts")
return wrapper
return decorator
OPENAI_MODELS = ["gpt-3.5-turbo-1106", "gpt-4", "gpt-4-turbo"]
class SyntheticQueryCreator:
def __init__(self, system_message, human_message, model, client):
self.system_message = system_message
self.human_message = human_message
if model in OPENAI_MODELS:
self.model = OpenAICompletion(system_message, human_message, model, client)
else:
raise ValueError("Model not supported yet.")
def run(self, corpus, number_of_questions=100):
docs = corpus.dataset.text.text()
ids = corpus.dataset.id.text()
questions = []
relevance = []
pbar = tqdm(total=number_of_questions)
# Randomly draw the documents that we will generate questions for
doc_indices = random.sample(range(len(docs)), number_of_questions)
for d in doc_indices:
text, label = docs[d], ids[d]
question = self._create_single_query(text)
questions.append(question)
relevance.append(label)
pbar.update(1)
pbar.close()
return questions, relevance
@max_retry(5)
def _create_single_query(self, text):
return self.model.run(text)
def create_synthetic_queries(
corpus,
client,
system_message=SYSTEM_MESSAGE,
human_message=USER_MESSAGE,
model="gpt-3.5-turbo-1106",
number_of_questions=100,
save_to_file=True,
dataset_name="",
):
creator = SyntheticQueryCreator(system_message, human_message, model, client=client)
questions, relevance = creator.run(corpus, number_of_questions)
if save_to_file:
if dataset_name:
dataset_name = f'{dataset_name}_'
with open(f'{dataset_name}questions_{number_of_questions}.txt', 'w') as f:
f.write('\n'.join(questions))
with open(f'{dataset_name}relevance_{number_of_questions}.txt', 'w') as f:
f.write('\n'.join(relevance))
return questions, relevance
def load_synthetic_queries(path_to_questions, path_to_relevance):
with open(path_to_questions, "r") as f:
questions = f.read()
questions = questions.split("\n")
with open(path_to_relevance, "r") as f:
relevance = f.read()
relevance = relevance.split("\n")
return questions, relevance
| [] |
2024-01-10 | OpenNyAI/jugalbandi | packages~jb-legal-library~jugalbandi~legal_library~legal_library.py | from enum import Enum
import operator
from typing import Dict, List, Optional
from datetime import date
from pydantic import BaseModel
from jugalbandi.library import DocumentMetaData, Library, DocumentSection
from jugalbandi.storage import Storage
from cachetools import TTLCache
from jugalbandi.core import aiocachedmethod
from jugalbandi.core.errors import (
IncorrectInputException,
InternalServerException,
)
from jugalbandi.jiva_repository import JivaRepository
from sklearn.feature_extraction.text import TfidfVectorizer
from langchain.vectorstores.faiss import FAISS
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.docstore.document import Document
import re
import os
import openai
import json
import roman
import numpy as np
import tiktoken
class InvalidActMetaData(Exception):
pass
class LegalDocumentType(Enum):
ACT = "act"
AMENDMENT = "amendment"
RULES = "rules"
REGULATION = "regulation"
OTHER = "other"
class LegalKeys(str, Enum):
LEGAL_DOC_TYPE = "legal_doc_type"
LEGAL_ACT_NO = "legal_act_no"
LEGAL_ACT_YEAR = "legal_act_year"
LEGAL_ACT_JURISDICTION = "legal_act_jurisdiction"
LEGAL_ACT_TITLE = "legal_act_title"
LEGAL_MINISTRY = "legal_ministry"
LEGAL_LAST_AMENDMENT_DATE = "legal_last_amendment_date"
LEGAL_PASS_DATE = "legal_pass_date"
LEGAL_EFFECTIVE_DATE = "legal_effective_date"
class Jurisdiction(str, Enum):
CENTER = "center"
KARNATAKA = "karnataka"
class ActMetaData(BaseModel):
id: str
no: str
year: str
title: str
description: Optional[str] = None
passing_date: date
effective_from_date: date
jurisdiction: Jurisdiction
documents: List[DocumentMetaData] = []
def add_document(self, doc_metadata: DocumentMetaData):
self.documents.append(doc_metadata)
@classmethod
def get_act_id(cls, doc_metadata: DocumentMetaData) -> Optional[str]:
jurisdiction = doc_metadata.get_extra_data(
LegalKeys.LEGAL_ACT_JURISDICTION.value)
act_no = doc_metadata.get_extra_data(LegalKeys.LEGAL_ACT_NO.value)
act_year = doc_metadata.get_extra_data(LegalKeys.LEGAL_ACT_YEAR.value)
if act_no is None or jurisdiction is None or act_year is None:
return None
return f"{jurisdiction}-{act_no}-{act_year}"
@classmethod
def from_document_metadata(cls, doc_metadata: DocumentMetaData) -> "ActMetaData":
id = cls.get_act_id(doc_metadata)
act_no = doc_metadata.get_extra_data(LegalKeys.LEGAL_ACT_NO.value)
act_year = doc_metadata.get_extra_data(LegalKeys.LEGAL_ACT_YEAR.value)
jurisdiction_value = doc_metadata.get_extra_data(
LegalKeys.LEGAL_ACT_JURISDICTION
)
if act_no is None or jurisdiction_value is None:
raise InvalidActMetaData(
"act_no / jurisdiction missing in document metadata for "
f"document {doc_metadata.id}"
)
title = doc_metadata.get_extra_data(LegalKeys.LEGAL_ACT_TITLE.value) or ""
id = f"{jurisdiction_value}-{act_no}-{act_year}"
return ActMetaData(
id=id,
no=act_no,
year=act_year,
title=title,
passing_date=date.today(),
effective_from_date=date.today(),
jurisdiction=Jurisdiction(jurisdiction_value),
documents=[],
)
class LegalLibrary(Library):
def __init__(self, id: str, store: Storage):
super(LegalLibrary, self).__init__(id, store)
self._act_cache: TTLCache = TTLCache(2, 900)
self.jiva_repository = JivaRepository()
@aiocachedmethod(operator.attrgetter("_act_cache"))
async def act_catalog(self) -> Dict[str, ActMetaData]:
catalog = await self.catalog()
act_catalog: Dict[str, ActMetaData] = {}
for _, doc_md in catalog.items():
act_id = ActMetaData.get_act_id(doc_md)
if act_id is not None:
if act_id in act_catalog:
act_md = act_catalog[act_id]
act_md.add_document(doc_md)
else:
act_md = ActMetaData.from_document_metadata(doc_md)
act_md.add_document(doc_md)
act_catalog[act_id] = act_md
return act_catalog
async def _abbreviate_query(self, query: str):
openai.api_key = os.environ["OPENAI_API_KEY"]
system_rules = (
"You are a helpful assistant who helps with expanding "
"the abbreviations present in the given sentence. "
"Do not change anything else in the given sentence."
)
result = openai.ChatCompletion.create(
model="gpt-4",
messages=[
{"role": "system", "content": system_rules},
{"role": "user", "content": query},
],
)
return result["choices"][0]["message"]["content"]
async def _preprocess_query(self, query: str) -> str:
query = await self._abbreviate_query(query)
words = ["Give me", "Give", "Find me", "Find", "Get me", "Get",
"Tell me", "Tell"]
for word in words:
pattern = re.compile(re.escape(word), re.IGNORECASE)
query = pattern.sub("", query)
return query.strip()
async def _preprocess_section_number(self, section_number: str) -> str:
try:
result = int(section_number)
except ValueError:
try:
result = roman.fromRoman(section_number)
except Exception:
raise IncorrectInputException("Incorrect section number format")
return str(result)
async def _get_document_section(self, section_number: str,
document_id: str, document_metadata:
DocumentMetaData) -> DocumentSection:
document = self.get_document(document_id)
byte_sections = await document.read_sections()
sections = json.loads(byte_sections.decode('utf-8'))
for section in sections:
if section["Section number"] == section_number:
return DocumentSection(section_id=section["Full section name"],
section_name=section["Section name"],
start_page=section["Start page"],
metadata=document_metadata)
async def _generate_response(self, docs: List[Document], query: str,
email_id: str, past_conversations_history: bool):
contexts = [document.page_content for document in docs]
augmented_query = (
"Information to search for answers:\n\n"
"\n\n-----\n\n".join(context for context in contexts) +
"\n\n-----\n\nQuery: " + query
)
system_rules = (
"You are a helpful assistant who helps with answering questions "
"based on the provided text. Extract and return the answer from the "
"provided text and do not paraphrase the answer. "
"If the answer cannot be found in the provided text, "
"you admit that you do not know."
)
messages = [{"role": "system", "content": system_rules}]
if past_conversations_history:
past_conversations = await self.jiva_repository.get_conversation_logs(
email_id=email_id)
for convo in past_conversations:
messages.append({"role": "user", "content": convo['query']})
messages.append({"role": "assistant", "content": convo['response']})
encoding = tiktoken.get_encoding('cl100k_base')
num_tokens = len(encoding.encode(augmented_query))
print(num_tokens)
# if num_tokens > 3500:
# augmented_query = (
# "Information to search for answers:\n\n"
# "\n\n-----\n\n".join(contexts[i] for i in range(len(contexts)-1)) +
# "\n\n-----\n\nQuery: " + query
# )
messages.append({"role": "user", "content": augmented_query})
res = openai.ChatCompletion.create(
model="gpt-4-1106-preview",
messages=messages,
)
response = res["choices"][0]["message"]["content"]
# await self.jiva_repository.insert_conversation_logs(email_id=email_id,
# query=query,
# response=response)
return response
async def search_titles(self, query: str) -> List[DocumentMetaData]:
processed_query = await self._preprocess_query(query)
processed_query = processed_query.strip()
catalog = await self.catalog()
titles_list = [catalog[cat].title for cat in catalog]
vectorizer = TfidfVectorizer()
tfidf_matrix = vectorizer.fit_transform(titles_list)
title_vector = vectorizer.transform([processed_query])
cosine_similarities = tfidf_matrix.dot(title_vector.T).toarray().flatten()
top_3_indices = np.argsort(cosine_similarities)[-3:][::-1]
result = []
for i in top_3_indices:
title = titles_list[i]
for cat in catalog:
if catalog[cat].title == title:
result.append(catalog[cat])
return result
async def search_sections(self, query: str):
processed_query = await self._preprocess_query(query)
processed_query = processed_query.strip()
pattern = re.compile(r'\b[Ss]ec(?:tion)? (\d+|[IVXLCDM]+[A-Z]{0,3})',
re.IGNORECASE)
matches = re.search(pattern, processed_query)
if matches:
section_number = matches.group(1)
split_string = pattern.split(processed_query)
split_string = list(filter(lambda x: x != "" and x != section_number,
split_string))
title = split_string[0].strip()
title = re.sub(r'(?i)of', "", title)
section_number = await self._preprocess_section_number(section_number)
documents_metadata = await self.search_titles(title)
document_metadata = documents_metadata[0]
document_id = document_metadata.id
document_sections = []
document_sections.append(await self._get_document_section(
section_number,
document_id,
document_metadata))
if document_sections[0] is None:
raise InternalServerException("Cannot find section and page number")
act_id = (document_metadata.extra_data["legal_act_jurisdiction"] + "-" +
document_metadata.extra_data["legal_act_no"] + "-" +
document_metadata.extra_data["legal_act_year"])
act_catalog = await self.act_catalog()
acts = act_catalog.values()
for act in acts:
if act.id == act_id:
relevant_act = act
break
for act_document in relevant_act.documents:
new_document_id = act_document.id
if new_document_id != document_id:
new_document = self.get_document(new_document_id)
new_document_metadata = await new_document.read_metadata()
document_sections.append(await self._get_document_section(
section_number,
new_document_id,
new_document_metadata))
return document_sections
else:
raise IncorrectInputException("Incorrect input query format")
async def test_response(self, query: str):
processed_query = await self._preprocess_query(query)
processed_query = processed_query.strip()
await self.download_index_files("index.faiss", "index.pkl")
vector_db = FAISS.load_local("indexes", OpenAIEmbeddings())
docs = vector_db.similarity_search(query=query, k=10)
contexts = []
unique_chunks = []
for document in docs:
file_name = document.metadata['file_name']
contexts.append(document.page_content)
if file_name not in unique_chunks:
unique_chunks.append(file_name)
if len(unique_chunks) >= 5:
response = (
"Please provide act name along with the query to search "
"for answers"
)
else:
augmented_query = (
"Information to search for answers:\n\n"
"\n\n-----\n\n".join(context for context in contexts) +
"\n\n-----\n\nQuery: " + query
)
system_rules = (
"You are a helpful assistant who helps with answering questions "
"based on the provided text. Extract and return the answer from the "
"provided text and do not paraphrase the answer. "
"If the answer cannot be found in the provided text, "
"you admit that you do not know."
)
messages = [{"role": "system", "content": system_rules}]
encoding = tiktoken.get_encoding('cl100k_base')
num_tokens = len(encoding.encode(augmented_query))
print(num_tokens)
messages.append({"role": "user", "content": augmented_query})
res = openai.ChatCompletion.create(
model="gpt-4-1106-preview",
messages=messages,
)
response = res["choices"][0]["message"]["content"]
await self.jiva_repository.insert_retriever_testing_logs(query=query,
response=response)
return response
async def general_search(self, query: str, email_id: str):
processed_query = await self._preprocess_query(query)
processed_query = processed_query.strip()
await self.download_index_files("index.faiss", "index.pkl")
vector_db = FAISS.load_local("indexes", OpenAIEmbeddings())
docs = vector_db.similarity_search(query=query, k=10)
return await self._generate_response(docs=docs, query=processed_query,
email_id=email_id,
past_conversations_history=False)
| [
"response"
] |
2024-01-10 | OpenNyAI/jugalbandi | packages~jb-qa~jugalbandi~qa~query_with_gptindex.py | import openai
import json
from llama_index import load_index_from_storage, StorageContext
from jugalbandi.core.errors import InternalServerException, ServiceUnavailableException
from jugalbandi.document_collection import DocumentCollection
async def querying_with_gptindex(document_collection: DocumentCollection, query: str):
index_content = await document_collection.read_index_file("gpt-index", "index.json")
index_content = index_content.decode('utf-8')
index_dict = json.loads(index_content)
storage_context = StorageContext.from_dict(index_dict)
index = load_index_from_storage(storage_context=storage_context)
query_engine = index.as_query_engine()
try:
response = query_engine.query(query)
source_nodes = response.source_nodes
source_text = []
for i in range(len(source_nodes)):
text = source_nodes[i].node.get_text().strip()
source_text.append(text)
return str(response).strip(), source_text
except openai.error.RateLimitError as e:
raise ServiceUnavailableException(
f"OpenAI API request exceeded rate limit: {e}"
)
except (openai.error.APIError, openai.error.ServiceUnavailableError):
raise ServiceUnavailableException(
"Server is overloaded or unable to answer your request at the moment."
" Please try again later"
)
except Exception as e:
raise InternalServerException(e.__str__())
| [] |
2024-01-10 | OpenNyAI/jugalbandi | jb-jiva-service~tools~sections_splitting_chunking.py | import os
import re
import fitz
from oauth2client.service_account import ServiceAccountCredentials
from langchain.text_splitter import RecursiveCharacterTextSplitter
from dotenv import load_dotenv
import gspread
import regex
import json
# Regex pattern to remove page numbers from parsed PDF text
page_number_pattern = r'^[\n\s]*\d+[\n\s]*(?!.)'
# Regex pattern to remove extra spaces from parsed PDF text
spaces_regex = r"(?<!\n\s)\n(?!\n| \n)"
# Function to get full sections data from the google sheets (Karnataka and Central)
def get_data_from_google_sheets(required_sheet_name: str):
credentials_path = os.environ["GOOGLE_APPLICATION_CREDENTIALS"]
scope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive']
creds = ServiceAccountCredentials.from_json_keyfile_name(credentials_path, scope)
client = gspread.authorize(creds)
sheet_url = os.environ["GOOGLE_SHEET_URL"]
sheet = client.open_by_url(sheet_url)
sheet_names = sheet.worksheets()
for worksheet in sheet_names:
if worksheet.title == required_sheet_name:
data = worksheet.get_all_values()
keys = data[0]
values = data[1:]
section_names_data = [dict(zip(keys, row)) for row in values]
return section_names_data
# Fuction to get only the section names from the extracted google sheets' sections data
def get_data_from_sheet_data(filename: str, section_names_data: list):
# Regex pattern to find section names which starts with digits
pattern = r'(\n*\d+-?\[?[A-Z]{0,3}\..*)'
for section in section_names_data:
if section['Filename'] == filename:
matches = re.findall(pattern, section['Section'])
section_list = [match.replace("\n", "") for match in matches]
return section_list
# Function to find the substring with fuzzy search
def fuzzy_substring_search(minor: str, major: str, errs: int = 10):
errs_ = 0
s = regex.search(f"({minor}){{e<={errs_}}}", major)
while s is None and errs_ <= errs:
errs_ += 1
s = regex.search(f"({minor}){{e<={errs_}}}", major)
return s
# Function to find matching strings with fuzzy search
def find_string_with_fuzzy(A_string: str, B_string: str,
large_string: str, is_first_section: bool, is_last_section: bool):
match_a = re.search(A_string, large_string, re.IGNORECASE)
if match_a is None:
match_a = fuzzy_substring_search(A_string, large_string)
if is_last_section is False:
match_b = re.search(B_string, large_string, re.IGNORECASE)
if match_b is None:
match_b = fuzzy_substring_search(B_string, large_string)
else:
match_b = re.search(B_string, large_string)
if match_b is None:
return large_string[match_a.start():]
else:
return [large_string[match_a.start():match_b.start()], large_string[match_b.start():]]
if is_first_section:
return [large_string[0: match_a.start()], large_string[match_a.start():match_b.start()]]
result = large_string[match_a.start():match_b.start()]
return result
# Function to split the parsed PDF text section wise for the Karnataka Acts
def get_karnataka_section_splits(root_dir: str, section_names_data: list):
section_dict = {}
file_names = os.listdir(root_dir)
# Regex pattern to find the table of contents' section data in the first page of parsed PDF text
pattern = r'statement(?:s)?\s*of\s*object(?:s)?\s*and\s*reason(?:s)?'
for filename in file_names:
# Skipping the files which can not be split into sections by this logic
if filename not in ["THE KARNATAKA STAMP ACT, 1957-ENG.pdf", "21 of 1964 (E).pdf", "32 of 1963 (E).pdf",
"THE KARNATAKA SCHEDULED CASTES SUB-ALLOCATION AND TRIBAL...ETC 2013-ENG.pdf", "THE KARNATAKA SALES TAX ACT, 1957-ENG.pdf", "11of1959(E).pdf",
"A1860-45 (E).pdf", "THE KARNATAKA LEGISLATURE SALARIES, PENSIONS AND ALLOWANCES ACT, 1956 -ENG.pdf",
"THE KARNATAKA HOUSING BOARD ACT, 1962-ENG.pdf", "THE KARNATAKA LAND REVENUE ACT, 1964-ENG.pdf", "A1908-05 (E).pdf", "27 of 1966 (E) emblem.pdf",
"19 of 1979 Rules (E) Debt.pdf", "17 of 2019 Rules (E).pdf", "23 of 2013 Rules (E).pdf", "A1974-02 (E).pdf", "COI (E).pdf"]:
print("\nFilename:", filename)
doc = fitz.open(os.path.join(root_dir, filename))
content = "\n"
content_list = []
# Iterating through all the pages of parsed PDF text
for i in range(len(doc)):
flag = False
page = doc[i]
text = page.get_text("text", textpage=None, sort=False)
text = re.sub(page_number_pattern, '', text)
if i == 0:
matches = list(re.finditer(pattern, text, re.IGNORECASE))
# Checking if the table of contents is fully present in the first page of parsed PDF text
if len(matches) == 2:
split_text = re.split(pattern, text, flags=re.IGNORECASE)
new_text = split_text.pop(-1)
text = " ".join(split_text)
# Flag to stop the table of contents sections' data from getting added to the text content
flag = True
if flag is False:
split_text = re.split(pattern, text, flags=re.IGNORECASE)
# Check if the text is split into two parts. If yes, then the first part is the section names' table of content
if len(split_text) == 2:
# The last part of the split_text is the rest of the act information
new_text = split_text.pop(-1)
text = " ".join(split_text)
# Flag to stop the table of contents sections' data from getting added to the text content
flag = True
content += text
if flag is True:
# Append the table of contents sections' data to the content_list
content_list.append(content)
# Make the content from the last page as the split_text
content = new_text
# Append rest of the full act information from the content to the content_list
content_list.append(content)
# Checking if the content_list has more than one element
if len(content_list) > 1:
# The last element of the content_list is the needed rest of the act information
text = content_list.pop(-1)
text = re.sub(spaces_regex, '', text)
sections_list = [" ".join(content_list)]
else:
# The only element of the content_list is the needed act information
text = re.sub(spaces_regex, '', content_list[0])
sections_list = []
# Extracting only the section names from the google sheets' sections data
data = get_data_from_sheet_data(filename, section_names_data)
# Iterating through all the section names
for i in range(len(data)):
exception_encounter = False
if i == 0:
is_first_section = True
else:
is_first_section = False
# Taking the previous section name as the A_section_name
A_section_name = data[i].split(" ")
A_section_string = r"\s*".join(A_section_name)
if i == len(data)-1:
is_last_section = True
B_section_string = "SCHEDULE"
else:
is_last_section = False
# Taking the next section name as the B_section_name
B_section_name = data[i+1].split(" ")
B_section_string = r"\s*".join(B_section_name)
try:
# Calling the find_string_with_fuzzy function to find the section data between the given A_section_name and B_section_name
extracted_text = find_string_with_fuzzy(A_section_string.strip(" "),
B_section_string.strip(" "),
text, is_first_section, is_last_section)
# Checking if the extracted_text is a list or not
if isinstance(extracted_text, list):
sections_list += extracted_text
else:
sections_list.append(extracted_text)
except Exception:
# If any exception occurs, then the exception_encounter flag is set to True
exception_encounter = True
print(filename, "is problematic and not fully done")
print("Remaining Uncompleted Sections:", len(data)-i)
# Break the loop
break
print("Total Completed Sections:", len(sections_list))
# If exception_encounter is False, then add the sections_list to the section_dict with filename as key
if exception_encounter is False:
section_dict[filename] = sections_list
# Finally dump the section_dict for all Karnataka acts to a json file
with open("karnataka_section_splits.json", 'w') as json_file:
json.dump(section_dict, json_file)
# Function to split the parsed PDF text section wise for the Central Acts
def get_central_section_splits(root_dir: str, section_names_data: list):
section_dict = {}
# Regex pattern to find the table of contents sections' data in the first page of parsed PDF text
pattern = r"ARR\D{1,3}EMENT OF SECT\D{0,2}NS{0,1}"
file_names = os.listdir(root_dir)
# Iterating through all the files in the central root_dir
for filename in file_names:
print("\nFilename:", filename)
data_list = []
doc = fitz.open(os.path.join(root_dir, filename))
# Extracting the first page of parsed PDF text
first_page = doc[0].get_text("text", textpage=None, sort=False)
# Checking if the pattern is present in the first page of parsed PDF text
if re.search(pattern, first_page):
# Splitting the first page of parsed PDF text into title and sections
title, sections = re.split(pattern, first_page)
# Removing the page numbers and other outliers from the title
title = re.sub(r'(?:^|\s|\n)\d{1,2}(?=\s|\n|$)', '', title)
title = title.replace("_", "").strip()
title = title.replace("\n", "")
sections = title + sections
sections = sections.replace("SECTIONS", "").replace("_", "").strip()
# Iterating through rest of the pages of parsed PDF text
for i in range(1, len(doc)):
page = doc[i]
text = page.get_text("text", textpage=None, sort=False)
text = re.sub(page_number_pattern, '', text)
# Checking if title is present in the parsed text
if title in text:
# Appending the so far collected sections (table of contents data) to the data_list
data_list.append(sections)
# Making the sections as empty string for rest of the act information
sections = ""
sections += text
# Appending the rest of the act information to the data_list
data_list.append(sections)
else:
print("CANNOT FIND TITLES INSIDE THIS FILE: ", filename)
continue
# Checking if the data_list has more than one element
if len(data_list) > 1:
# The last element of the data_list is the needed rest of the act information
text = data_list.pop(-1)
sections_list = [" ".join(data_list)]
else:
# The only element of the data_list is the needed act information
text = data_list[0]
sections_list = []
# Removing extra spaces from the text
text = re.sub(spaces_regex, '', text)
# Extracting only the section names from the google sheets' sections data
data = get_data_from_sheet_data(filename, section_names_data)
for i in range(len(data)):
if i == 0:
is_first_section = True
else:
is_first_section = False
# Taking the previous section name as the A_section_name
A_section_name = data[i].split()
A_section_string = r"\s*".join(A_section_name)
if i == len(data)-1:
is_last_section = True
B_section_string = "THE SCHEDULE"
else:
is_last_section = False
# Taking the next section name as the B_section_name
B_section_name = data[i+1].split()
B_section_string = r"\s*".join(B_section_name)
try:
# Calling the find_string_with_fuzzy function to find the section data between the given A_section_name and B_section_name
extracted_text = find_string_with_fuzzy(A_section_string.strip(" "),
B_section_string.strip(" "),
text, is_first_section, is_last_section)
# Checking if the extracted_text is a list or not
if isinstance(extracted_text, list):
sections_list += extracted_text
else:
sections_list.append(extracted_text)
except Exception:
print(filename, "is problematic and not fully done")
print("Remaining Uncompleted Sections:", len(data)-i)
# Break the loop
break
print("Total Completed Sections:", len(sections_list))
section_dict[filename] = sections_list
# Finally dump the section_dict for all Central acts to a json file
with open("central_section_splits.json", 'w') as json_file:
json.dump(section_dict, json_file)
# Function to chunk the sections which are more than 4000 characters and also group the sections which are less than 4000 characters
def sections_chunking():
# Reading the Karnataka section splits json file
with open("karnataka_section_splits.json", 'r') as file:
json_data = file.read()
sections_dict = json.loads(json_data)
# Regex pattern to find section numbers paragraph from parsed PDF text
digit_pattern = r'^(\n*\d+-?\[?[A-Z]{0,3}\..*)'
# Regex pattern to find section numbers from parsed PDF text
section_number_pattern = r'^(\n*\d+-?\[?[A-Z]{0,3}\.)'
# The following sentences are added as a precursor to the chunks
precursor_sentence_one = "The following contents are part of the {}"
precursor_sentence_two = "The following sections are part of the {}"
precursor_sentence_three = "The following section is part of the {}"
precursor_sentence_four = "The following contents are continuation of section {} of the {}"
# Initializing the RecursiveCharacterTextSplitter which will split the chunks based on the given separators
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=4000,
chunk_overlap=0,
separators=["\n \n", "\n\n", "\n", ".", " "]
)
# Reading the google sheets' metadata for Karnataka acts
meta_data_list = get_data_from_google_sheets("All Acts")
result_dict = {}
for key, value in sections_dict.items():
print("Filename:", key)
for meta_data in meta_data_list:
# Getting the title of the act from the google sheets' metadata for the given filename
if meta_data['File Name'] == key:
title = meta_data['Doc title']
# Store the sections data list in section_doc
section_doc = value
# Initializing the new_chunks list with the first element of section_doc as it is the title and table of contents of the act
new_chunks = [section_doc[0]]
i = 1
# Iterating through the rest of the sections data list
while i < len(section_doc):
section = section_doc[i]
# Checking if the section data starts with a digit
matches = re.findall(digit_pattern, section)
# Checking if there is only one section paragraph in the section data
if len(matches) == 1:
# Checking if the section data is less than 4000 characters
if len(section) < 4000:
flag = False
# Adding the precursor sentence to the section data as it is a section paragraph
section = precursor_sentence_two.format(title) + "\n\n\nSection " + section
# Checking if the section data is the last element of the section_doc
if i == len(section_doc)-1:
new_chunks.append(section)
# Break the loop
break
new_section = section
j = 1
# Iterating through the rest of the section_doc to find the sections which when combined with existing section, are less than 4000 characters
while True:
# Checking if the combined index is greater than or equal to the length of section_doc
if i+j >= len(section_doc):
flag = True
break
# Adding 'Section' word to the front of the section data
new_section += "\n\n\nSection " + section_doc[i+j]
# Checking if the combined section data is greater than 4000 characters
if len(new_section) > 4000:
# Removing the last section data from the new_section as it is greater than 4000 characters
new_section = new_section.replace("\n\n\nSection " + section_doc[i+j], "")
flag = True
j -= 1
break
j += 1
# Checking if the flag is True
if flag is True:
# Appending the new_section to the new_chunks list
new_chunks.append(new_section)
i += j
else:
# Appending the section to the new_chunks list
new_chunks.append(section)
else:
# Getting the section paragraph data from the section data
section_number_match = re.search(section_number_pattern, section)
section_number = section[section_number_match.start():section_number_match.end()]
# Splitting the section data into chunks of 4000 characters
section_splits = text_splitter.split_text(section)
sections_list = []
for k in range(len(section_splits)):
if k == 0:
# Adding the precursor sentence and 'Section' word to the first chunk of the section data
section_split = precursor_sentence_three.format(title) + "\n\n\nSection " + section_splits[k]
else:
# Adding the precursor sentence to the rest of the chunks of the section data
section_split = precursor_sentence_four.format(section_number, title) + "\n\n\n" + section_splits[k]
sections_list.append(section_split)
new_chunks += sections_list
else:
# Checking if the section data is greater than 4000 characters
if len(section) > 4000:
# Splitting the section data into chunks of 4000 characters
section_splits = text_splitter.split_text(section)
section_splits = [precursor_sentence_one.format(title) + "\n\n\n" + section_split for section_split in section_splits]
new_chunks += section_splits
else:
section = precursor_sentence_one.format(title) + "\n\n\n" + section
new_chunks.append(section)
i += 1
# Adding the new_chunks list to the result_dict with filename as key
result_dict[key] = new_chunks
# Finally dump the result_dict for all Karnataka acts chunks to a json file
with open("karnataka_docs_chunks.json", 'w') as json_file:
json.dump(result_dict, json_file)
if __name__ == "__main__":
load_dotenv()
root_dir = os.environ["ROOT_DIR"]
section_names_data = get_data_from_google_sheets("Sheet Name")
# get_karnataka_section_splits(root_dir, section_names_data)
# get_central_section_splits(root_dir, section_names_data)
# sections_chunking()
| [] |
2024-01-10 | OpenNyAI/jugalbandi | packages~jb-qa~jugalbandi~qa~indexing.py | from abc import ABC, abstractmethod
import tempfile
import aiofiles
import openai
from jugalbandi.core.errors import InternalServerException, ServiceUnavailableException
from llama_index import VectorStoreIndex, SimpleDirectoryReader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.docstore.document import Document
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores.faiss import FAISS
from jugalbandi.document_collection import (
DocumentCollection,
DocumentFormat,
)
import json
class Indexer(ABC):
@abstractmethod
async def index(self, document_collection: DocumentCollection):
pass
class GPTIndexer(Indexer):
async def index(self, document_collection: DocumentCollection):
try:
files = [document_collection.local_file_path(file)
async for file in document_collection.list_files()]
documents = SimpleDirectoryReader(input_files=files).load_data()
index = VectorStoreIndex.from_documents(documents)
index_content = index.storage_context.to_dict()
index_str = json.dumps(index_content)
await document_collection.write_index_file("gpt-index", "index.json",
bytes(index_str, "utf-8"))
except openai.error.RateLimitError as e:
raise ServiceUnavailableException(
f"OpenAI API request exceeded rate limit: {e}"
)
except (openai.error.APIError, openai.error.ServiceUnavailableError):
raise ServiceUnavailableException(
"Server is overloaded or unable to answer your request at the moment."
" Please try again later"
)
except Exception as e:
raise InternalServerException(e.__str__())
class LangchainIndexer(Indexer):
def __init__(self):
self.splitter = RecursiveCharacterTextSplitter(
chunk_size=4 * 1024, chunk_overlap=0, separators=["\n", ".", ""]
)
async def index(self, doc_collection: DocumentCollection):
source_chunks = []
counter = 0
async for filename in doc_collection.list_files():
content = await doc_collection.read_file(filename, DocumentFormat.TEXT)
public_text_url = await doc_collection.public_url(filename,
DocumentFormat.TEXT)
content = content.decode('utf-8')
content = content.replace("\\n", "\n")
for chunk in self.splitter.split_text(content):
new_metadata = {
"source": str(counter),
"document_name": filename,
"txt_file_url": public_text_url,
}
source_chunks.append(
Document(page_content=chunk, metadata=new_metadata)
)
counter += 1
try:
search_index = FAISS.from_documents(source_chunks,
OpenAIEmbeddings(client=""))
await self._save_index_files(search_index, doc_collection)
except openai.error.RateLimitError as e:
raise ServiceUnavailableException(
f"OpenAI API request exceeded rate limit: {e}"
)
except (openai.error.APIError, openai.error.ServiceUnavailableError):
raise ServiceUnavailableException(
"Server is overloaded or unable to answer your request at the moment."
" Please try again later"
)
except Exception as e:
raise InternalServerException(e.__str__())
async def _save_index_files(
self, search_index: FAISS, doc_collection: DocumentCollection
):
with tempfile.TemporaryDirectory() as temp_dir:
# save in temporary directory
search_index.save_local(temp_dir)
async with aiofiles.open(f"{temp_dir}/index.pkl", "rb") as f:
content = await f.read()
await doc_collection.write_index_file("langchain", "index.pkl",
content)
async with aiofiles.open(f"{temp_dir}/index.faiss", "rb") as f:
content = await f.read()
await doc_collection.write_index_file("langchain", "index.faiss",
content)
| [] |
2024-01-10 | OpenNyAI/jugalbandi | packages~jb-qa~jugalbandi~qa~query_with_langchain.py | from typing import List
import openai
from langchain.chains.qa_with_sources import load_qa_with_sources_chain
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores.faiss import FAISS
from langchain.prompts import PromptTemplate
from langchain.llms.openai import OpenAI
from langchain.chains import LLMChain
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import TruncatedSVD
from sklearn.preprocessing import normalize
import numpy as np
from jugalbandi.core.errors import (
InternalServerException,
ServiceUnavailableException
)
from jugalbandi.document_collection import DocumentCollection
async def rephrased_question(user_query: str):
template = (
"""Write the same question as user input and """
"""make it more descriptive without adding """
"""new information and without making the facts incorrect.
User: {question}
Rephrased User input:"""
)
prompt = PromptTemplate(template=template, input_variables=["question"])
llm_chain = LLMChain(prompt=prompt, llm=OpenAI(temperature=0), # type: ignore
verbose=False)
response = llm_chain.predict(question=user_query)
return response.strip()
async def latent_semantic_analysis(response: str, documents: List):
vectorizer = TfidfVectorizer()
tfidf_matrix = vectorizer.fit_transform(documents)
svd = TruncatedSVD(n_components=300)
lsa_matrix = svd.fit_transform(tfidf_matrix)
lsa_matrix = normalize(lsa_matrix)
response_vector = vectorizer.transform([response])
response_lsa = svd.transform(response_vector)
response_lsa = normalize(response_lsa)
scores = []
for i, doc_lsa in enumerate(lsa_matrix):
score = np.dot(response_lsa, doc_lsa.T)
scores.append((i, score))
scores = sorted(scores, key=lambda x: x[1], reverse=True)
similarity_scores = []
for score in scores:
scores_list = list(score[1])
similarity_scores.append([score[0], scores_list[0]])
return similarity_scores
async def querying_with_langchain(document_collection: DocumentCollection, query: str):
await document_collection.download_index_files("langchain", "index.faiss",
"index.pkl")
index_folder_path = document_collection.local_index_folder("langchain")
try:
search_index = FAISS.load_local(index_folder_path,
OpenAIEmbeddings()) # type: ignore
chain = load_qa_with_sources_chain(
OpenAI(temperature=0), chain_type="map_reduce" # type: ignore
)
paraphrased_query = await rephrased_question(query)
documents = search_index.similarity_search(paraphrased_query, k=5)
answer = chain({"input_documents": documents, "question": query})
answer_list = answer["output_text"].split("\nSOURCES:")
final_answer = answer_list[0].strip()
source_ids = answer_list[1]
source_ids = source_ids.replace(" ", "")
source_ids = source_ids.replace(".", "")
source_ids = source_ids.split(",")
final_source_text = []
for document in documents:
if document.metadata["source"] in source_ids:
final_source_text.append(document.page_content)
return final_answer, final_source_text
except openai.error.RateLimitError as e:
raise ServiceUnavailableException(
f"OpenAI API request exceeded rate limit: {e}"
)
except (openai.error.APIError, openai.error.ServiceUnavailableError):
raise ServiceUnavailableException(
"Server is overloaded or unable to answer your request at the moment."
" Please try again later"
)
except Exception as e:
raise InternalServerException(e.__str__())
async def querying_with_langchain_gpt4(document_collection: DocumentCollection,
query: str,
prompt: str):
await document_collection.download_index_files("langchain", "index.faiss",
"index.pkl")
index_folder_path = document_collection.local_index_folder("langchain")
try:
search_index = FAISS.load_local(index_folder_path,
OpenAIEmbeddings()) # type: ignore
documents = search_index.similarity_search(query, k=5)
contexts = [document.page_content for document in documents]
augmented_query = augmented_query = (
"Information to search for answers:\n\n"
"\n\n-----\n\n".join(contexts) +
"\n\n-----\n\nQuery:" + query
)
if prompt != "":
system_rules = prompt
else:
system_rules = (
"You are a helpful assistant who helps with answering questions "
"based on the provided information. If the information cannot be found "
"in the text provided, you admit that I don't know"
)
res = openai.ChatCompletion.create(
model="gpt-4",
messages=[
{"role": "system", "content": system_rules},
{"role": "user", "content": augmented_query},
],
)
return res["choices"][0]["message"]["content"], []
except openai.error.RateLimitError as e:
raise ServiceUnavailableException(
f"OpenAI API request exceeded rate limit: {e}"
)
except (openai.error.APIError, openai.error.ServiceUnavailableError):
raise ServiceUnavailableException(
"Server is overloaded or unable to answer your request at the moment."
" Please try again later"
)
except Exception as e:
raise InternalServerException(e.__str__())
async def querying_with_langchain_gpt3_5(document_collection: DocumentCollection,
query: str,
prompt: str,
source_text_filtering: bool,
model_size: str):
await document_collection.download_index_files("langchain", "index.faiss",
"index.pkl")
index_folder_path = document_collection.local_index_folder("langchain")
if model_size == "16k":
model_name = "gpt-3.5-turbo-16k"
else:
model_name = "gpt-3.5-turbo"
try:
search_index = FAISS.load_local(index_folder_path,
OpenAIEmbeddings()) # type: ignore
documents = search_index.similarity_search(query, k=5)
if prompt != "":
system_rules = prompt
else:
system_rules = (
"You are a helpful assistant who helps with answering questions "
"based on the provided information. If the information cannot be found "
"in the text provided, you admit that you don't know"
)
try:
contexts = [document.page_content for document in documents]
augmented_query = (
"Information to search for answers:\n\n"
"\n\n-----\n\n".join(contexts) +
"\n\n-----\n\nQuery:" + query
)
response = openai.ChatCompletion.create(
model=model_name,
messages=[
{"role": "system", "content": system_rules},
{"role": "user", "content": augmented_query},
],
)
except openai.error.InvalidRequestError:
contexts = [documents[i].page_content for i in range(len(documents)-2)]
augmented_query = (
"Information to search for answers:\n\n"
"\n\n-----\n\n".join(contexts) +
"\n\n-----\n\nQuery:" + query
)
response = openai.ChatCompletion.create(
model=model_name,
messages=[
{"role": "system", "content": system_rules},
{"role": "user", "content": augmented_query},
],
)
result = response["choices"][0]["message"]["content"]
if source_text_filtering:
files_dict = {}
if len(documents) == 1:
document = documents[0]
if "txt_file_url" in document.metadata.keys():
source_text_link = document.metadata["txt_file_url"]
files_dict[source_text_link] = {
"source_text_link": source_text_link,
"source_text_name": document.metadata["document_name"],
"chunks": [document.page_content],
}
else:
similarity_scores = await latent_semantic_analysis(result, contexts)
for score in similarity_scores:
if score[1] > 0.85:
document = documents[score[0]]
if "txt_file_url" in document.metadata.keys():
source_text_link = document.metadata["txt_file_url"]
if source_text_link not in files_dict:
files_dict[source_text_link] = {
"source_text_link": source_text_link,
"source_text_name": document.metadata[
"document_name"
],
"chunks": [],
}
content = document.page_content.replace("\\n", "\n")
files_dict[source_text_link]["chunks"].append(content)
source_text_list = [files_dict[i] for i in files_dict]
else:
source_text_list = []
return result, source_text_list
except openai.error.RateLimitError as e:
raise ServiceUnavailableException(
f"OpenAI API request exceeded rate limit: {e}"
)
except (openai.error.APIError, openai.error.ServiceUnavailableError):
raise ServiceUnavailableException(
"Server is overloaded or unable to answer your request at the moment."
" Please try again later"
)
except Exception as e:
raise InternalServerException(e.__str__())
| [
"question",
"Write the same question as user input and make it more descriptive without adding new information and without making the facts incorrect.\n\n User: {question}\n Rephrased User input:"
] |
2024-01-10 | Soundoffear/DataChat | chat_bot.py | import openai
import json
openai.api_key = "<YOUR OPENAI API KEY>"
class ChatBot:
def __init__(self, system="", filename="messages.json", max_messages=10):
self.system = system
self.messages = []
self.filename = filename
self.max_messages = max_messages
self.completion_tokens = 0
self.prompt_tokens = 0
self.total_tokens = 0
if self.system:
self.messages.append({"role":"system", "content":system})
def __call__(self, message, functions=None):
self.messages.append({"role":"user",
"content":message})
result = self.execute(functions=functions)
print("RESULT", result)
result_save = result["choices"][0]["message"]["content"]
print("RESULT SAVE", result_save)
if result_save == None and result["choices"][0]["message"]["function_call"]["name"] != "data_analysis":
result_save = f"Calling Function and executing: {message}"
else:
if result["choices"][0]["message"].get("function_call") is not None:
msg = result["choices"][0]["message"]["function_call"]["arguments"]
result_save = f"Calling Function and executing: {msg}"
else:
result_save = f"Calling Function and executing: {result_save}"
self.messages.append({"role":"assistant", "content":result_save})
with open(self.filename, "a") as f:
f.write(json.dumps(self.messages[-2:]) + "\n")
if len(self.messages) > self.max_messages:
self.messages = self.messages[-self.max_messages:]
return result
def execute(self, functions=None):
completion = openai.ChatCompletion.create(model="gpt-3.5-turbo-0613",
messages=self.messages,
functions=functions,
function_call="auto")
print("COMPLETION", completion)
count = 0
while True:
content = completion["choices"][0]["message"]["content"]
if completion["choices"][0]["message"].get("function_call") is not None:
if completion["choices"][0]["message"]["function_call"]["name"] == "db_questions":
return completion
if content == None or completion["choices"][0]["message"]["function_call"]["name"] == "data_analysis":
count = 0
break
completion = openai.ChatCompletion.create(model="gpt-3.5-turbo-0613",
messages=self.messages,
functions=functions,
function_call="auto")
else:
break
if count > 5:
count = 0
break
print(count)
count += 1
for msg in self.messages:
print(msg)
return completion
def get_all_messages(self):
return self.messages
def get_message_count(self):
return len(self.messages)
def get_token_usage(self):
return {
"completion_tokens": self.completion_tokens,
"prompt_tokens": self.prompt_tokens,
"total_tokens": self.total_tokens
} | [] |
2024-01-10 | huajien/ZXJY_DK | gptReport.py |
#来源openai接口来源 https://github.com/chatanywhere/GPT_API_free/blob/main/demo.py
import openai
import os
import json
import random
import time
import requests
import pushMessage
import sha256Encode
import main
def gpt_35_api_stream(messages: list):
try:
response = openai.ChatCompletion.create(
model='gpt-3.5-turbo',
messages=messages,
stream=True,
)
completion = {'role': '', 'content': ''}
for event in response:
if event['choices'][0]['finish_reason'] == 'stop':
return completion # 直接返回结果
for delta_k, delta_v in event['choices'][0]['delta'].items():
completion[delta_k] += delta_v
except Exception as err:
return {'error': f'OpenAI API 异常: {err}'}
def getJobTitle(user,uid,token):
data = {
"dtype": "1",
"uid": uid
}
# Sign = sha256Encode.encodeSha256('Anything_2023', json.dumps(data, separators=(',', ':')) + token)
Sign = sha256Encode.encodeSha256('Anything_2023', json.dumps(data) + token)
headers = {
"os": "android",
"phone": user["deviceModel"],
"appversion": "59",
"sign": Sign,
"timestamp": str(int(time.time() * 1000)),
"token": token,
"cl_ip": f"192.168.31.{random.randint(10, 200)}",
"content-type": "application/json;charset=utf-8",
"Content-Length": str(len(str(data))),
"accept-encoding": "gzip, deflate",
"user-agent": "okhttp/3.14.9"
}
url = "https://sxbaapp.zcj.jyt.henan.gov.cn/api/shixi_student_check.ashx"
try:
response = requests.post(url, headers=headers, data=json.dumps(data))
response.raise_for_status()
data = response.json()
try:
jobTitle = data['data']['bmlist'][0]['gwName']
workingState = data['data']['bmlist'][0]['bmstatus']
if workingState == "正在实习":
print(f"当前状态是{workingState}")
return True,jobTitle
else:
print(f"你没有实习中")
pushMessage.pushMessage("职校家园提醒",f"你没有实习或者未开始",user["pushKey"])
return False,f"你没有在实习中"
except KeyError as error:
print(f"找不到{error}键对应数据")
except requests.exceptions.RequestException as error:
return f"找不到{error}键对应数据"
def loadUserFiles():
path = os.path.join(os.getcwd(), "aiReport.json")
if os.path.exists(path):
with open(path, encoding="utf-8") as file:
return json.load(file)
else:
localPath = os.listdir(os.getcwd())
print(f"\033[93m无法找到 {path} 文件,\n请先执行addUser.py:\n{localPath}\033[0m")
return False
def gptConfig(Position):
apt_key = loadUserFiles()
if apt_key:
api_key = apt_key[0]['api_key']
openai.api_key = api_key
openai.api_base = "https://api.chatanywhere.com.cn/v1"
ReturnGptData = gpt_35_api_stream([{'role': 'user', 'content': f'{Position}'}, ])
# print(ReturnGptData["content"])
time.sleep(int(random.uniform(15, 30)))
return ReturnGptData["content"]
if __name__ == '__main__':
main.main() | [
"PLACEHOLDER"
] |
2024-01-10 | williamfzc/git-file-keyword | git_file_keyword~plugin_llm.py | import itertools
import pathlib
import time
import typing
import openai
from bardapi import Bard
from loguru import logger
from pydantic import BaseModel
from git_file_keyword.config import ExtractConfig
from git_file_keyword.plugin import BasePlugin
from git_file_keyword.result import Result, FileResult
from git_file_keyword.utils import split_list
class Ask(BaseModel):
file_list: typing.List[pathlib.Path]
request_txt: str
class BaseLLMPlugin(BasePlugin):
# todo: LLM usage takes some money. Maybe the results should be cached.
prompt = """
Generate concise (<30 words) descriptions for each source file based on their associated keywords,
summarizing/guessing the function of each file.
Avoid comments, quotes, or explanations.
Sample Input:
```
- somedir/clipboard.java: cut, paste, auto-sync
- somedir/webview.java: ...
```
Sample Output:
```
- somedir/clipboard.java: A auto-sync clipboard implementation contains cut/paste.
- somedir/webview.java: ...
```
"""
def plugin_id(self) -> str:
return "llm"
def gen_ask_group(self, result: Result) -> typing.Dict[pathlib.Path, Ask]:
merged_dict: typing.Dict[pathlib.Path, typing.Dict[pathlib.Path, FileResult]] = {}
for file_path, file_result in result.file_results.items():
dir_path = file_path.parent
if dir_path not in merged_dict:
merged_dict[dir_path] = dict()
merged_dict[dir_path][file_path] = file_result
ask_dict = dict()
for directory_path, group_dict in merged_dict.items():
# if the whole group does not contain any changes, ignore it
if all((each.cached for each in group_dict.values())):
continue
logger.info(f"prepare llm response for {directory_path}")
# send the group to AI
if len(group_dict) < 10:
lines = [
f"- {filepath}: {list(result.keywords)}"
for filepath, result in group_dict.items()
]
request_txt = "\n".join(lines)
ask_dict[directory_path] = Ask(
file_list=list(group_dict.keys()), request_txt=request_txt
)
else:
# split into multi parts
chunks = list(split_list(list(group_dict.items()), 10))
for i, chunk in enumerate(chunks, start=1):
lines = [
f"- {filepath}: {list(result.keywords)}"
for filepath, result in chunk
]
request_txt = "\n".join(lines)
ask_dict[pathlib.Path(f"{directory_path.as_posix()}_{i}")] = Ask(
file_list=[filepath for filepath, _ in chunk],
request_txt=request_txt,
)
return ask_dict
class BardLLMPlugin(BaseLLMPlugin):
# todo: current bard model looks too bad to use ...
# and there is no official sdk
token = ""
proxies = {
"http": "http://127.0.0.1:7890",
}
def __init__(self):
self.bard = Bard(
token=self.token,
proxies=self.proxies,
)
def plugin_id(self) -> str:
return "llm-bard"
def apply(self, config: ExtractConfig, result: Result):
ask_dict = self.gen_ask_group(result)
answer_dict = dict()
for each_dir, each_ask in ask_dict.items():
resp = self.bard.get_answer(f"{self.prompt}\n{each_ask.request_txt}")
responses = resp["content"]
valid_lines = [
each for each in responses.split("\n") if each.startswith("-")
]
for line in valid_lines:
file_path, description = line.split(": ", 1)
file_path = file_path.lstrip("- ")
# fix this file_path
# model sometimes returns the name only
for each_valid_file_path in each_ask["file_list"]:
if file_path == each_valid_file_path.name:
file_path = each_valid_file_path
break
else:
# maybe it's correct ...
file_path = pathlib.Path(file_path)
answer_dict[file_path] = description
# update to result
for each_path, each_desc in answer_dict.items():
if each_path not in result.file_results:
logger.warning(f"{each_path} not in result")
continue
result.file_results[each_path].plugin_output[self.plugin_id()] = each_desc
class OpenAILLMPlugin(BaseLLMPlugin):
token = ""
model = "gpt-3.5-turbo"
rate_limit_wait = 20
def plugin_id(self) -> str:
return "llm-openai"
def apply(self, config: ExtractConfig, result: Result):
ask_dict = self.gen_ask_group(result)
answer_dict = dict()
openai.api_key = self.token
logger.info(f"total llm requests to go: {len(ask_dict)}")
for cur, (each_dir, each_ask) in enumerate(ask_dict.items()):
completion = openai.ChatCompletion.create(
model=self.model,
messages=[
{
"role": "system",
"content": self.prompt,
},
{"role": "user", "content": each_ask.request_txt},
],
)
responses: str = completion.choices[0].message.content
valid_lines = [
each for each in responses.split("\n") if each.startswith("-")
]
for line in valid_lines:
file_path, description = line.split(": ", 1)
file_path = file_path.lstrip("- ")
# fix this file_path
# model sometimes returns the name only
for each_valid_file_path in each_ask.file_list:
if file_path == each_valid_file_path.name:
file_path = each_valid_file_path
break
else:
# maybe it's correct ...
file_path = pathlib.Path(file_path)
answer_dict[file_path] = description
# by default, trial api key rate limit: 3/min
# means 20s / request
logger.info(f"{cur + 1}/{len(ask_dict)} finished, resp: {responses}")
time.sleep(self.rate_limit_wait)
# update to result
for each_path, each_desc in answer_dict.items():
if each_path not in result.file_results:
logger.warning(f"{each_path} not in result")
continue
result.file_results[each_path].description = each_desc
result.file_results[each_path].plugin_output[self.plugin_id()] = each_desc
| [
"\nGenerate concise (<30 words) descriptions for each source file based on their associated keywords, \nsummarizing/guessing the function of each file. \nAvoid comments, quotes, or explanations.\n\nSample Input:\n\n```\n- somedir/clipboard.java: cut, paste, auto-sync\n- somedir/webview.java: ...\n```\n\nSample Output:\n\n```\n- somedir/clipboard.java: A auto-sync clipboard implementation contains cut/paste.\n- somedir/webview.java: ...\n```\n "
] |
2024-01-10 | pooyahrtn/RunpodOllama | runpod_ollama~runpod_serverless_llm.py | import time
from typing import Any, Dict, List, Mapping, Optional
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.chat_models.base import BaseChatModel
from langchain.schema import ChatResult, ChatGeneration
from langchain.schema.messages import BaseMessage
import requests
from langchain.schema.messages import (
AIMessage,
BaseMessage,
ChatMessage,
HumanMessage,
SystemMessage,
)
class RunpodRepository:
def __init__(self, api_key: str, pod_id: str):
self.api_key = api_key
self.pod_id = pod_id
self.active_request_id: Optional[str] = None
def generate(self, prompt: str, stop: Optional[List[str]] = None) -> str:
headers = self._request_headers()
default_stop = ["<|im_start|>", "<|im_end|>"]
input = {
"method_name": "generate",
"input": {
"prompt": prompt,
"options": {"stop": (stop or []) + default_stop},
},
}
# TODO: Handle network errors
out = requests.post(
f"{self._request_base_url()}/run",
headers=headers,
json={"input": input},
).json()
self.active_request_id = out["id"]
while out["status"] != "COMPLETED":
out = requests.get(
f"{self._request_base_url()}/status/{self.active_request_id}",
headers=headers,
).json()
time.sleep(1)
return out["output"]["response"]
def cancel_requests(self):
if not self.active_request_id:
return
headers = self._request_headers()
return requests.post(
f"{self._request_base_url()}/cancel/{self.active_request_id}",
headers=headers,
)
def _request_base_url(self) -> str:
return f"https://api.runpod.ai/v2/{self.pod_id}"
def _request_headers(self) -> Mapping[str, str]:
return {
"accept": "application/json",
"content-type": "application/json",
"authorization": self.api_key,
}
class RunpodServerlessLLM(LLM):
pod_id: str
api_key: str
@property
def _llm_type(self) -> str:
return "runpod-serverless"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
response = self._runpod_repository.generate(prompt, stop=stop)
return response
@property
def _runpod_repository(self) -> RunpodRepository:
return RunpodRepository(api_key=self.api_key, pod_id=self.pod_id)
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {"pod_id": self._runpod_repository.pod_id}
class ChatRunpodServerless(BaseChatModel):
pod_id: str
api_key: str
def _generate(
self,
messages: List[BaseMessage],
stop: List[str] | None = None,
run_manager: CallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> ChatResult:
prompts = list(map(self._convert_one_message_to_text, messages))
result = self._llm.generate(
prompts=prompts,
stop=stop,
).generations[
0
][0]
chat_generation = ChatGeneration(
message=AIMessage(content=result.text),
generation_info=result.generation_info,
)
return ChatResult(generations=[chat_generation])
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "runpod-serverless-chat"
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {"pod_id": self.pod_id}
@property
def _llm(self) -> LLM:
return RunpodServerlessLLM(pod_id=self.pod_id, api_key=self.api_key)
def _convert_one_message_to_text(self, message: BaseMessage) -> str:
if isinstance(message, ChatMessage):
message_text = f"\n\n{message.role.capitalize()}: {message.content}"
elif isinstance(message, HumanMessage):
message_text = f"[INST] {message.content} [/INST]"
elif isinstance(message, AIMessage):
message_text = f"{message.content}"
elif isinstance(message, SystemMessage):
message_text = f"<<SYS>> {message.content} <</SYS>>"
else:
raise ValueError(f"Got unknown type {message}")
return message_text
| [
"application/json"
] |
2024-01-10 | jashwanth0712/before-you-sign | backend~action_model.py | import os
import openai
import json
from dotenv import load_dotenv
load_dotenv()
openai.api_key = os.getenv("OPENAI_KEY")
def action_function(legal_document_text):
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "system","content":"Output all the key sentences that are really important in a legal,financial,fiduciary and regulatory perspective as a proper JSON array, say or do nothing else, just output the list"},
{"role": "user", "content":legal_document_text}],
temperature = 0.5
)
#returns output as a list
return json.loads(completion["choices"][0]["message"]["content"])
| [
"Output all the key sentences that are really important in a legal,financial,fiduciary and regulatory perspective as a proper JSON array, say or do nothing else, just output the list"
] |
2024-01-10 | jashwanth0712/before-you-sign | backend~chat_with_bot.py | import os
import openai
from dotenv import load_dotenv
# Load environment variables from .env file
load_dotenv()
# Set the OpenAI API key
openai.api_key = os.getenv("OPENAI_KEY")
def chat_with_openai(lists,init_text):
"""
Send a series of messages to the OpenAI API and get a response.
Parameters:
- lists: A list of lists where each sub-list has two elements:
1. A 0 or 1, where 0 indicates "assistant" and 1 indicates "user".
2. The message content.
Returns:
- A string containing the assistant's response.
"""
print("Chatting with OpenAI...")
# Convert the list of lists to the desired format for the API
messages = []
messages.append({"role": "system","content":"You are a legal chatbot that is designed to help a user understand "
"the nuances of a legal document, read the document attached in this "
"message and answer all the queries of the user clearly, remember to "
"always act like the legal chatbot that you are, always answer the question of "
"the user to the point and do not include information they didn't ask"
"unless they explicitly asked to explain and start with a very small and brief"
"welcome message to the user by summarizing only the very key nuances of the "
"document Document:"+init_text})
for item in lists:
role = "assistant" if item[0] == 0 else "user"
content = item[1]
messages.append({"role": role, "content": content})
# Send the formatted messages to the OpenAI API
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages
)
# Extract the assistant's response from the API completion and return it
return completion["choices"][0]["message"]["content"]
# Driver Code
# if __name__ == "__main__":
# # Initial message list
# messages_list = []
# with open('sample.txt', 'r') as file:
# legal_document = file.read()
# while True:
# # Get assistant's response based on the current message list
# response = chat_with_openai(messages_list,legal_document)
# # Display the assistant's response
# print(f"Assistant: {response}")
# # Add assistant's response to the list for context in future interactions
# messages_list.append([0, response])
# # Get user input
# user_message = input("You: ")
# # Add user message to the list
# messages_list.append([1, user_message])
# # Check if user wants to continue
# continue_chat = input("Continue chatting? (yes/no): ").strip().lower()
# if continue_chat != "yes":
# print("Goodbye!")
# break
| [
"You are a legal chatbot that is designed to help a user understand the nuances of a legal document, read the document attached in this message and answer all the queries of the user clearly, remember to always act like the legal chatbot that you are, always answer the question of the user to the point and do not include information they didn't askunless they explicitly asked to explain and start with a very small and briefwelcome message to the user by summarizing only the very key nuances of the document Document:PLACEHOLDER"
] |
2024-01-10 | Haoqing-Wu/pose-diffusion | posediff~modules~diffusion~gaussian_diffusion.py | # Modified from OpenAI's diffusion repos
# GLIDE: https://github.com/openai/glide-text2im/blob/main/glide_text2im/gaussian_diffusion.py
# ADM: https://github.com/openai/guided-diffusion/blob/main/guided_diffusion
# IDDPM: https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py
import math
import numpy as np
import torch as th
import enum
from .diffusion_utils import discretized_gaussian_log_likelihood, normal_kl
def mean_flat(tensor):
"""
Take the mean over all non-batch dimensions.
"""
return tensor.mean(dim=list(range(1, len(tensor.shape))))
class ModelMeanType(enum.Enum):
"""
Which type of output the model predicts.
"""
PREVIOUS_X = enum.auto() # the model predicts x_{t-1}
START_X = enum.auto() # the model predicts x_0
EPSILON = enum.auto() # the model predicts epsilon
class ModelVarType(enum.Enum):
"""
What is used as the model's output variance.
The LEARNED_RANGE option has been added to allow the model to predict
values between FIXED_SMALL and FIXED_LARGE, making its job easier.
"""
LEARNED = enum.auto()
FIXED_SMALL = enum.auto()
FIXED_LARGE = enum.auto()
LEARNED_RANGE = enum.auto()
class LossType(enum.Enum):
MSE = enum.auto() # use raw MSE loss (and KL when learning variances)
RESCALED_MSE = (
enum.auto()
) # use raw MSE loss (with RESCALED_KL when learning variances)
KL = enum.auto() # use the variational lower-bound
RESCALED_KL = enum.auto() # like KL, but rescale to estimate the full VLB
def is_vb(self):
return self == LossType.KL or self == LossType.RESCALED_KL
def _warmup_beta(beta_start, beta_end, num_diffusion_timesteps, warmup_frac):
betas = beta_end * np.ones(num_diffusion_timesteps, dtype=np.float64)
warmup_time = int(num_diffusion_timesteps * warmup_frac)
betas[:warmup_time] = np.linspace(beta_start, beta_end, warmup_time, dtype=np.float64)
return betas
def get_beta_schedule(beta_schedule, *, beta_start, beta_end, num_diffusion_timesteps):
"""
This is the deprecated API for creating beta schedules.
See get_named_beta_schedule() for the new library of schedules.
"""
if beta_schedule == "quad":
betas = (
np.linspace(
beta_start ** 0.5,
beta_end ** 0.5,
num_diffusion_timesteps,
dtype=np.float64,
)
** 2
)
elif beta_schedule == "linear":
betas = np.linspace(beta_start, beta_end, num_diffusion_timesteps, dtype=np.float64)
elif beta_schedule == "warmup10":
betas = _warmup_beta(beta_start, beta_end, num_diffusion_timesteps, 0.1)
elif beta_schedule == "warmup30":
betas = _warmup_beta(beta_start, beta_end, num_diffusion_timesteps, 0.3)
elif beta_schedule == "warmup50":
betas = _warmup_beta(beta_start, beta_end, num_diffusion_timesteps, 0.5)
elif beta_schedule == "const":
betas = beta_end * np.ones(num_diffusion_timesteps, dtype=np.float64)
elif beta_schedule == "jsd": # 1/T, 1/(T-1), 1/(T-2), ..., 1
betas = 1.0 / np.linspace(
num_diffusion_timesteps, 1, num_diffusion_timesteps, dtype=np.float64
)
else:
raise NotImplementedError(beta_schedule)
assert betas.shape == (num_diffusion_timesteps,)
return betas
def get_named_beta_schedule(schedule_name, num_diffusion_timesteps):
"""
Get a pre-defined beta schedule for the given name.
The beta schedule library consists of beta schedules which remain similar
in the limit of num_diffusion_timesteps.
Beta schedules may be added, but should not be removed or changed once
they are committed to maintain backwards compatibility.
"""
if schedule_name == "linear":
# Linear schedule from Ho et al, extended to work for any number of
# diffusion steps.
scale = 1000 / num_diffusion_timesteps
return get_beta_schedule(
"linear",
beta_start=scale * 0.0001,
beta_end=scale * 0.02,
num_diffusion_timesteps=num_diffusion_timesteps,
)
elif schedule_name == "squaredcos_cap_v2":
return betas_for_alpha_bar(
num_diffusion_timesteps,
lambda t: math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2,
)
else:
raise NotImplementedError(f"unknown beta schedule: {schedule_name}")
def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999):
"""
Create a beta schedule that discretizes the given alpha_t_bar function,
which defines the cumulative product of (1-beta) over time from t = [0,1].
:param num_diffusion_timesteps: the number of betas to produce.
:param alpha_bar: a lambda that takes an argument t from 0 to 1 and
produces the cumulative product of (1-beta) up to that
part of the diffusion process.
:param max_beta: the maximum beta to use; use values lower than 1 to
prevent singularities.
"""
betas = []
for i in range(num_diffusion_timesteps):
t1 = i / num_diffusion_timesteps
t2 = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
return np.array(betas)
class GaussianDiffusion:
"""
Utilities for training and sampling diffusion models.
Original ported from this codebase:
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/diffusion_utils_2.py#L42
:param betas: a 1-D numpy array of betas for each diffusion timestep,
starting at T and going to 1.
"""
def __init__(
self,
*,
betas,
model_mean_type,
model_var_type,
loss_type
):
self.model_mean_type = model_mean_type
self.model_var_type = model_var_type
self.loss_type = loss_type
# Use float64 for accuracy.
betas = np.array(betas, dtype=np.float64)
self.betas = betas
assert len(betas.shape) == 1, "betas must be 1-D"
assert (betas > 0).all() and (betas <= 1).all()
self.num_timesteps = int(betas.shape[0])
alphas = 1.0 - betas
self.alphas_cumprod = np.cumprod(alphas, axis=0)
self.alphas_cumprod_prev = np.append(1.0, self.alphas_cumprod[:-1])
self.alphas_cumprod_next = np.append(self.alphas_cumprod[1:], 0.0)
assert self.alphas_cumprod_prev.shape == (self.num_timesteps,)
# calculations for diffusion q(x_t | x_{t-1}) and others
self.sqrt_alphas_cumprod = np.sqrt(self.alphas_cumprod)
self.sqrt_one_minus_alphas_cumprod = np.sqrt(1.0 - self.alphas_cumprod)
self.log_one_minus_alphas_cumprod = np.log(1.0 - self.alphas_cumprod)
self.sqrt_recip_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod)
self.sqrt_recipm1_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod - 1)
# calculations for posterior q(x_{t-1} | x_t, x_0)
self.posterior_variance = (
betas * (1.0 - self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod)
)
# below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
self.posterior_log_variance_clipped = np.log(
np.append(self.posterior_variance[1], self.posterior_variance[1:])
) if len(self.posterior_variance) > 1 else np.array([])
self.posterior_mean_coef1 = (
betas * np.sqrt(self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod)
)
self.posterior_mean_coef2 = (
(1.0 - self.alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - self.alphas_cumprod)
)
def q_mean_variance(self, x_start, t):
"""
Get the distribution q(x_t | x_0).
:param x_start: the [N x C x ...] tensor of noiseless inputs.
:param t: the number of diffusion steps (minus 1). Here, 0 means one step.
:return: A tuple (mean, variance, log_variance), all of x_start's shape.
"""
mean = _extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
variance = _extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape)
log_variance = _extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape)
return mean, variance, log_variance
def q_sample(self, x_start, t, noise=None):
"""
Diffuse the data for a given number of diffusion steps.
In other words, sample from q(x_t | x_0).
:param x_start: the initial data batch.
:param t: the number of diffusion steps (minus 1). Here, 0 means one step.
:param noise: if specified, the split-out normal noise.
:return: A noisy version of x_start.
"""
if noise is None:
noise = th.randn_like(x_start)
assert noise.shape == x_start.shape
return (
_extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
+ _extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise
)
def q_posterior_mean_variance(self, x_start, x_t, t):
"""
Compute the mean and variance of the diffusion posterior:
q(x_{t-1} | x_t, x_0)
"""
assert x_start.shape == x_t.shape
posterior_mean = (
_extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start
+ _extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t
)
posterior_variance = _extract_into_tensor(self.posterior_variance, t, x_t.shape)
posterior_log_variance_clipped = _extract_into_tensor(
self.posterior_log_variance_clipped, t, x_t.shape
)
assert (
posterior_mean.shape[0]
== posterior_variance.shape[0]
== posterior_log_variance_clipped.shape[0]
== x_start.shape[0]
)
return posterior_mean, posterior_variance, posterior_log_variance_clipped
def p_mean_variance(self, model, x, t, clip_denoised=True, denoised_fn=None, model_kwargs=None):
"""
Apply the model to get p(x_{t-1} | x_t), as well as a prediction of
the initial x, x_0.
:param model: the model, which takes a signal and a batch of timesteps
as input.
:param x: the [N x C x ...] tensor at time t.
:param t: a 1-D Tensor of timesteps.
:param clip_denoised: if True, clip the denoised signal into [-1, 1].
:param denoised_fn: if not None, a function which applies to the
x_start prediction before it is used to sample. Applies before
clip_denoised.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:return: a dict with the following keys:
- 'mean': the model mean output.
- 'variance': the model variance output.
- 'log_variance': the log of 'variance'.
- 'pred_xstart': the prediction for x_0.
"""
if model_kwargs is None:
model_kwargs = {}
B, C = x.shape[:2]
assert t.shape == (B,)
model_output = model(x, t, **model_kwargs)
if isinstance(model_output, tuple):
model_output, extra = model_output
else:
extra = None
if self.model_var_type in [ModelVarType.LEARNED, ModelVarType.LEARNED_RANGE]:
assert model_output.shape == (B, C * 2, *x.shape[2:])
model_output, model_var_values = th.split(model_output, C, dim=1)
min_log = _extract_into_tensor(self.posterior_log_variance_clipped, t, x.shape)
max_log = _extract_into_tensor(np.log(self.betas), t, x.shape)
# The model_var_values is [-1, 1] for [min_var, max_var].
frac = (model_var_values + 1) / 2
model_log_variance = frac * max_log + (1 - frac) * min_log
model_variance = th.exp(model_log_variance)
else:
model_variance, model_log_variance = {
# for fixedlarge, we set the initial (log-)variance like so
# to get a better decoder log likelihood.
ModelVarType.FIXED_LARGE: (
np.append(self.posterior_variance[1], self.betas[1:]),
np.log(np.append(self.posterior_variance[1], self.betas[1:])),
),
ModelVarType.FIXED_SMALL: (
self.posterior_variance,
self.posterior_log_variance_clipped,
),
}[self.model_var_type]
model_variance = _extract_into_tensor(model_variance, t, x.shape)
model_log_variance = _extract_into_tensor(model_log_variance, t, x.shape)
def process_xstart(x):
if denoised_fn is not None:
x = denoised_fn(x)
if clip_denoised:
return x.clamp(-1, 1)
return x
if self.model_mean_type == ModelMeanType.START_X:
pred_xstart = process_xstart(model_output)
else:
pred_xstart = process_xstart(
self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output)
)
model_mean, _, _ = self.q_posterior_mean_variance(x_start=pred_xstart, x_t=x, t=t)
assert model_mean.shape == model_log_variance.shape == pred_xstart.shape == x.shape
return {
"mean": model_mean,
"variance": model_variance,
"log_variance": model_log_variance,
"pred_xstart": pred_xstart,
"extra": extra,
}
def _predict_xstart_from_eps(self, x_t, t, eps):
assert x_t.shape == eps.shape
return (
_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t
- _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * eps
)
def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
return (
_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart
) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
def condition_mean(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
"""
Compute the mean for the previous step, given a function cond_fn that
computes the gradient of a conditional log probability with respect to
x. In particular, cond_fn computes grad(log(p(y|x))), and we want to
condition on y.
This uses the conditioning strategy from Sohl-Dickstein et al. (2015).
"""
gradient = cond_fn(x, t, **model_kwargs)
new_mean = p_mean_var["mean"].float() + p_mean_var["variance"] * gradient.float()
return new_mean
def condition_score(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
"""
Compute what the p_mean_variance output would have been, should the
model's score function be conditioned by cond_fn.
See condition_mean() for details on cond_fn.
Unlike condition_mean(), this instead uses the conditioning strategy
from Song et al (2020).
"""
alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
eps = self._predict_eps_from_xstart(x, t, p_mean_var["pred_xstart"])
eps = eps - (1 - alpha_bar).sqrt() * cond_fn(x, t, **model_kwargs)
out = p_mean_var.copy()
out["pred_xstart"] = self._predict_xstart_from_eps(x, t, eps)
out["mean"], _, _ = self.q_posterior_mean_variance(x_start=out["pred_xstart"], x_t=x, t=t)
return out
def p_sample(
self,
model,
x,
t,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
):
"""
Sample x_{t-1} from the model at the given timestep.
:param model: the model to sample from.
:param x: the current tensor at x_{t-1}.
:param t: the value of t, starting at 0 for the first diffusion step.
:param clip_denoised: if True, clip the x_start prediction to [-1, 1].
:param denoised_fn: if not None, a function which applies to the
x_start prediction before it is used to sample.
:param cond_fn: if not None, this is a gradient function that acts
similarly to the model.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:return: a dict containing the following keys:
- 'sample': a random sample from the model.
- 'pred_xstart': a prediction of x_0.
"""
out = self.p_mean_variance(
model,
x,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_kwargs=model_kwargs,
)
noise = th.randn_like(x)
nonzero_mask = (
(t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))
) # no noise when t == 0
if cond_fn is not None:
out["mean"] = self.condition_mean(cond_fn, out, x, t, model_kwargs=model_kwargs)
sample = out["mean"] + nonzero_mask * th.exp(0.5 * out["log_variance"]) * noise
return {"sample": sample, "pred_xstart": out["pred_xstart"]}
def p_sample_loop(
self,
model,
shape,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
device=None,
progress=False,
):
"""
Generate samples from the model.
:param model: the model module.
:param shape: the shape of the samples, (N, C, H, W).
:param noise: if specified, the noise from the encoder to sample.
Should be of the same shape as `shape`.
:param clip_denoised: if True, clip x_start predictions to [-1, 1].
:param denoised_fn: if not None, a function which applies to the
x_start prediction before it is used to sample.
:param cond_fn: if not None, this is a gradient function that acts
similarly to the model.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:param device: if specified, the device to create the samples on.
If not specified, use a model parameter's device.
:param progress: if True, show a tqdm progress bar.
:return: a non-differentiable batch of samples.
"""
final = None
traj_samples = []
for sample in self.p_sample_loop_progressive(
model,
shape,
noise=noise,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn=cond_fn,
model_kwargs=model_kwargs,
device=device,
progress=progress,
):
final = sample
traj_samples.append(final["sample"])
return traj_samples
def p_sample_loop_progressive(
self,
model,
shape,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
device=None,
progress=False,
):
"""
Generate samples from the model and yield intermediate samples from
each timestep of diffusion.
Arguments are the same as p_sample_loop().
Returns a generator over dicts, where each dict is the return value of
p_sample().
"""
if device is None:
device = next(model.parameters()).device
assert isinstance(shape, (tuple, list))
if noise is not None:
img = noise
else:
img = th.randn(*shape, device=device)
indices = list(range(self.num_timesteps))[::-1]
if progress:
# Lazy import so that we don't depend on tqdm.
from tqdm.auto import tqdm
indices = tqdm(indices)
for i in indices:
t = th.tensor([i] * shape[0], device=device)
with th.no_grad():
out = self.p_sample(
model,
img,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn=cond_fn,
model_kwargs=model_kwargs,
)
yield out
img = out["sample"]
def ddim_sample(
self,
model,
x,
t,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
eta=0.0,
):
"""
Sample x_{t-1} from the model using DDIM.
Same usage as p_sample().
"""
out = self.p_mean_variance(
model,
x,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_kwargs=model_kwargs,
)
if cond_fn is not None:
out = self.condition_score(cond_fn, out, x, t, model_kwargs=model_kwargs)
# Usually our model outputs epsilon, but we re-derive it
# in case we used x_start or x_prev prediction.
eps = self._predict_eps_from_xstart(x, t, out["pred_xstart"])
alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
alpha_bar_prev = _extract_into_tensor(self.alphas_cumprod_prev, t, x.shape)
sigma = (
eta
* th.sqrt((1 - alpha_bar_prev) / (1 - alpha_bar))
* th.sqrt(1 - alpha_bar / alpha_bar_prev)
)
# Equation 12.
noise = th.randn_like(x)
mean_pred = (
out["pred_xstart"] * th.sqrt(alpha_bar_prev)
+ th.sqrt(1 - alpha_bar_prev - sigma ** 2) * eps
)
nonzero_mask = (
(t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))
) # no noise when t == 0
sample = mean_pred + nonzero_mask * sigma * noise
return {"sample": sample, "pred_xstart": out["pred_xstart"]}
def ddim_reverse_sample(
self,
model,
x,
t,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
eta=0.0,
):
"""
Sample x_{t+1} from the model using DDIM reverse ODE.
"""
assert eta == 0.0, "Reverse ODE only for deterministic path"
out = self.p_mean_variance(
model,
x,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_kwargs=model_kwargs,
)
if cond_fn is not None:
out = self.condition_score(cond_fn, out, x, t, model_kwargs=model_kwargs)
# Usually our model outputs epsilon, but we re-derive it
# in case we used x_start or x_prev prediction.
eps = (
_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x.shape) * x
- out["pred_xstart"]
) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x.shape)
alpha_bar_next = _extract_into_tensor(self.alphas_cumprod_next, t, x.shape)
# Equation 12. reversed
mean_pred = out["pred_xstart"] * th.sqrt(alpha_bar_next) + th.sqrt(1 - alpha_bar_next) * eps
return {"sample": mean_pred, "pred_xstart": out["pred_xstart"]}
def ddim_sample_loop(
self,
model,
shape,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
device=None,
progress=False,
eta=0.0,
):
"""
Generate samples from the model using DDIM.
Same usage as p_sample_loop().
"""
final = None
for sample in self.ddim_sample_loop_progressive(
model,
shape,
noise=noise,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn=cond_fn,
model_kwargs=model_kwargs,
device=device,
progress=progress,
eta=eta,
):
final = sample
return final["sample"]
def ddim_sample_loop_progressive(
self,
model,
shape,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
device=None,
progress=False,
eta=0.0,
):
"""
Use DDIM to sample from the model and yield intermediate samples from
each timestep of DDIM.
Same usage as p_sample_loop_progressive().
"""
if device is None:
device = next(model.parameters()).device
assert isinstance(shape, (tuple, list))
if noise is not None:
img = noise
else:
img = th.randn(*shape, device=device)
indices = list(range(self.num_timesteps))[::-1]
if progress:
# Lazy import so that we don't depend on tqdm.
from tqdm.auto import tqdm
indices = tqdm(indices)
for i in indices:
t = th.tensor([i] * shape[0], device=device)
with th.no_grad():
out = self.ddim_sample(
model,
img,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn=cond_fn,
model_kwargs=model_kwargs,
eta=eta,
)
yield out
img = out["sample"]
def _vb_terms_bpd(
self, model, x_start, x_t, t, clip_denoised=True, model_kwargs=None
):
"""
Get a term for the variational lower-bound.
The resulting units are bits (rather than nats, as one might expect).
This allows for comparison to other papers.
:return: a dict with the following keys:
- 'output': a shape [N] tensor of NLLs or KLs.
- 'pred_xstart': the x_0 predictions.
"""
true_mean, _, true_log_variance_clipped = self.q_posterior_mean_variance(
x_start=x_start, x_t=x_t, t=t
)
out = self.p_mean_variance(
model, x_t, t, clip_denoised=clip_denoised, model_kwargs=model_kwargs
)
kl = normal_kl(
true_mean, true_log_variance_clipped, out["mean"], out["log_variance"]
)
kl = mean_flat(kl) / np.log(2.0)
decoder_nll = -discretized_gaussian_log_likelihood(
x_start, means=out["mean"], log_scales=0.5 * out["log_variance"]
)
assert decoder_nll.shape == x_start.shape
decoder_nll = mean_flat(decoder_nll) / np.log(2.0)
# At the first timestep return the decoder NLL,
# otherwise return KL(q(x_{t-1}|x_t,x_0) || p(x_{t-1}|x_t))
output = th.where((t == 0), decoder_nll, kl)
return {"output": output, "pred_xstart": out["pred_xstart"]}
def training_losses(self, model, x_start, t, model_kwargs=None, noise=None):
"""
Compute training losses for a single timestep.
:param model: the model to evaluate loss on.
:param x_start: the [N x C x ...] tensor of inputs.
:param t: a batch of timestep indices.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:param noise: if specified, the specific Gaussian noise to try to remove.
:return: a dict with the key "loss" containing a tensor of shape [N].
Some mean or variance settings may also have other keys.
"""
if model_kwargs is None:
model_kwargs = {}
if noise is None:
noise = th.randn_like(x_start)
x_t = self.q_sample(x_start, t, noise=noise)
terms = {}
if self.loss_type == LossType.KL or self.loss_type == LossType.RESCALED_KL:
terms["loss"] = self._vb_terms_bpd(
model=model,
x_start=x_start,
x_t=x_t,
t=t,
clip_denoised=False,
model_kwargs=model_kwargs,
)["output"]
if self.loss_type == LossType.RESCALED_KL:
terms["loss"] *= self.num_timesteps
elif self.loss_type == LossType.MSE or self.loss_type == LossType.RESCALED_MSE:
model_output = model(x_t, t, **model_kwargs)
if self.model_var_type in [
ModelVarType.LEARNED,
ModelVarType.LEARNED_RANGE,
]:
B, C = x_t.shape[:2]
assert model_output.shape == (B, C * 2, *x_t.shape[2:])
model_output, model_var_values = th.split(model_output, C, dim=1)
# Learn the variance using the variational bound, but don't let
# it affect our mean prediction.
frozen_out = th.cat([model_output.detach(), model_var_values], dim=1)
terms["vb"] = self._vb_terms_bpd(
model=lambda *args, r=frozen_out: r,
x_start=x_start,
x_t=x_t,
t=t,
clip_denoised=False,
)["output"]
if self.loss_type == LossType.RESCALED_MSE:
# Divide by 1000 for equivalence with initial implementation.
# Without a factor of 1/1000, the VB term hurts the MSE term.
terms["vb"] *= self.num_timesteps / 1000.0
target = {
ModelMeanType.PREVIOUS_X: self.q_posterior_mean_variance(
x_start=x_start, x_t=x_t, t=t
)[0],
ModelMeanType.START_X: x_start,
ModelMeanType.EPSILON: noise,
}[self.model_mean_type]
assert model_output.shape == target.shape == x_start.shape
terms["mse"] = mean_flat((target - model_output) ** 2)
if "vb" in terms:
terms["loss"] = terms["mse"] + terms["vb"]
else:
terms["loss"] = terms["mse"]
else:
raise NotImplementedError(self.loss_type)
return terms
def _prior_bpd(self, x_start):
"""
Get the prior KL term for the variational lower-bound, measured in
bits-per-dim.
This term can't be optimized, as it only depends on the encoder.
:param x_start: the [N x C x ...] tensor of inputs.
:return: a batch of [N] KL values (in bits), one per batch element.
"""
batch_size = x_start.shape[0]
t = th.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)
qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)
kl_prior = normal_kl(
mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0
)
return mean_flat(kl_prior) / np.log(2.0)
def calc_bpd_loop(self, model, x_start, clip_denoised=True, model_kwargs=None):
"""
Compute the entire variational lower-bound, measured in bits-per-dim,
as well as other related quantities.
:param model: the model to evaluate loss on.
:param x_start: the [N x C x ...] tensor of inputs.
:param clip_denoised: if True, clip denoised samples.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:return: a dict containing the following keys:
- total_bpd: the total variational lower-bound, per batch element.
- prior_bpd: the prior term in the lower-bound.
- vb: an [N x T] tensor of terms in the lower-bound.
- xstart_mse: an [N x T] tensor of x_0 MSEs for each timestep.
- mse: an [N x T] tensor of epsilon MSEs for each timestep.
"""
device = x_start.device
batch_size = x_start.shape[0]
vb = []
xstart_mse = []
mse = []
for t in list(range(self.num_timesteps))[::-1]:
t_batch = th.tensor([t] * batch_size, device=device)
noise = th.randn_like(x_start)
x_t = self.q_sample(x_start=x_start, t=t_batch, noise=noise)
# Calculate VLB term at the current timestep
with th.no_grad():
out = self._vb_terms_bpd(
model,
x_start=x_start,
x_t=x_t,
t=t_batch,
clip_denoised=clip_denoised,
model_kwargs=model_kwargs,
)
vb.append(out["output"])
xstart_mse.append(mean_flat((out["pred_xstart"] - x_start) ** 2))
eps = self._predict_eps_from_xstart(x_t, t_batch, out["pred_xstart"])
mse.append(mean_flat((eps - noise) ** 2))
vb = th.stack(vb, dim=1)
xstart_mse = th.stack(xstart_mse, dim=1)
mse = th.stack(mse, dim=1)
prior_bpd = self._prior_bpd(x_start)
total_bpd = vb.sum(dim=1) + prior_bpd
return {
"total_bpd": total_bpd,
"prior_bpd": prior_bpd,
"vb": vb,
"xstart_mse": xstart_mse,
"mse": mse,
}
def _extract_into_tensor(arr, timesteps, broadcast_shape):
"""
Extract values from a 1-D numpy array for a batch of indices.
:param arr: the 1-D numpy array.
:param timesteps: a tensor of indices into the array to extract.
:param broadcast_shape: a larger shape of K dimensions with the batch
dimension equal to the length of timesteps.
:return: a tensor of shape [batch_size, 1, ...] where the shape has K dims.
"""
res = th.from_numpy(arr).to(device=timesteps.device)[timesteps].float()
while len(res.shape) < len(broadcast_shape):
res = res[..., None]
return res + th.zeros(broadcast_shape, device=timesteps.device)
| [] |
2024-01-10 | Haoqing-Wu/pose-diffusion | posediff~modules~diffusion~respace.py | # Modified from OpenAI's diffusion repos
# GLIDE: https://github.com/openai/glide-text2im/blob/main/glide_text2im/gaussian_diffusion.py
# ADM: https://github.com/openai/guided-diffusion/blob/main/guided_diffusion
# IDDPM: https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py
import numpy as np
import torch as th
from .gaussian_diffusion import GaussianDiffusion
def space_timesteps(num_timesteps, section_counts):
"""
Create a list of timesteps to use from an original diffusion process,
given the number of timesteps we want to take from equally-sized portions
of the original process.
For example, if there's 300 timesteps and the section counts are [10,15,20]
then the first 100 timesteps are strided to be 10 timesteps, the second 100
are strided to be 15 timesteps, and the final 100 are strided to be 20.
If the stride is a string starting with "ddim", then the fixed striding
from the DDIM paper is used, and only one section is allowed.
:param num_timesteps: the number of diffusion steps in the original
process to divide up.
:param section_counts: either a list of numbers, or a string containing
comma-separated numbers, indicating the step count
per section. As a special case, use "ddimN" where N
is a number of steps to use the striding from the
DDIM paper.
:return: a set of diffusion steps from the original process to use.
"""
if isinstance(section_counts, str):
if section_counts.startswith("ddim"):
desired_count = int(section_counts[len("ddim") :])
for i in range(1, num_timesteps):
if len(range(0, num_timesteps, i)) == desired_count:
return set(range(0, num_timesteps, i))
raise ValueError(
f"cannot create exactly {num_timesteps} steps with an integer stride"
)
section_counts = [int(x) for x in section_counts.split(",")]
size_per = num_timesteps // len(section_counts)
extra = num_timesteps % len(section_counts)
start_idx = 0
all_steps = []
for i, section_count in enumerate(section_counts):
size = size_per + (1 if i < extra else 0)
if size < section_count:
raise ValueError(
f"cannot divide section of {size} steps into {section_count}"
)
if section_count <= 1:
frac_stride = 1
else:
frac_stride = (size - 1) / (section_count - 1)
cur_idx = 0.0
taken_steps = []
for _ in range(section_count):
taken_steps.append(start_idx + round(cur_idx))
cur_idx += frac_stride
all_steps += taken_steps
start_idx += size
return set(all_steps)
class SpacedDiffusion(GaussianDiffusion):
"""
A diffusion process which can skip steps in a base diffusion process.
:param use_timesteps: a collection (sequence or set) of timesteps from the
original diffusion process to retain.
:param kwargs: the kwargs to create the base diffusion process.
"""
def __init__(self, use_timesteps, **kwargs):
self.use_timesteps = set(use_timesteps)
self.timestep_map = []
self.original_num_steps = len(kwargs["betas"])
base_diffusion = GaussianDiffusion(**kwargs) # pylint: disable=missing-kwoa
last_alpha_cumprod = 1.0
new_betas = []
for i, alpha_cumprod in enumerate(base_diffusion.alphas_cumprod):
if i in self.use_timesteps:
new_betas.append(1 - alpha_cumprod / last_alpha_cumprod)
last_alpha_cumprod = alpha_cumprod
self.timestep_map.append(i)
kwargs["betas"] = np.array(new_betas)
super().__init__(**kwargs)
def p_mean_variance(
self, model, *args, **kwargs
): # pylint: disable=signature-differs
return super().p_mean_variance(self._wrap_model(model), *args, **kwargs)
def training_losses(
self, model, *args, **kwargs
): # pylint: disable=signature-differs
return super().training_losses(self._wrap_model(model), *args, **kwargs)
def condition_mean(self, cond_fn, *args, **kwargs):
return super().condition_mean(self._wrap_model(cond_fn), *args, **kwargs)
def condition_score(self, cond_fn, *args, **kwargs):
return super().condition_score(self._wrap_model(cond_fn), *args, **kwargs)
def _wrap_model(self, model):
if isinstance(model, _WrappedModel):
return model
return _WrappedModel(
model, self.timestep_map, self.original_num_steps
)
def _scale_timesteps(self, t):
# Scaling is done by the wrapped model.
return t
class _WrappedModel:
def __init__(self, model, timestep_map, original_num_steps):
self.model = model
self.timestep_map = timestep_map
# self.rescale_timesteps = rescale_timesteps
self.original_num_steps = original_num_steps
def __call__(self, x, ts, **kwargs):
map_tensor = th.tensor(self.timestep_map, device=ts.device, dtype=ts.dtype)
new_ts = map_tensor[ts]
# if self.rescale_timesteps:
# new_ts = new_ts.float() * (1000.0 / self.original_num_steps)
return self.model(x, new_ts, kwargs)
| [] |
2024-01-10 | bcrudele/Chatbot | gpt_import.py | import config # Has API & File Path
import openai # ChatGPT API
import json
# import personalities # Personalities file, integrate later
# import google_tts # for testing
# Loads the conversation history from a file
def load_conversation_history(file_path):
try:
with open(file_path, 'r') as file:
conversation_history = json.load(file)
return conversation_history
except FileNotFoundError:
return []
# Saves the conversation history to a file
def save_conversation_history(file_path, conversation_history):
with open(file_path, 'w') as file:
json.dump(conversation_history, file, indent=1)
# Local active memory list
conversation_history = load_conversation_history("conversation_history.json")
def communicate_with_openai(prompt):
openai.api_key = config.OPENAI_API_KEY
conversation_history.append({"role": "user", "content": prompt})
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages= conversation_history,
max_tokens = 100
)
ai_response = response['choices'][0]['message']['content']
# Add to memory
conversation_history.append({"role": "assistant", "content": ai_response})
# Save json
save_conversation_history("conversation_history.json", conversation_history)
return ai_response
'''
if __name__ == "__main__":
iter = 0
while True:
prompt = input("Enter a prompt: ")
result = communicate_with_openai(prompt)
google_tts.tts(result, iter)
iter += 1
print(result)
''' | [] |
Subsets and Splits