response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Do subtraction between two numbers.
Args:
a (int): The minuend in subtraction.
b (int): The subtrahend in subtraction.
Returns:
integer: The result of subtracting :obj:`b` from :obj:`a`. | def sub(a: int, b: int) -> int:
r"""Do subtraction between two numbers.
Args:
a (int): The minuend in subtraction.
b (int): The subtrahend in subtraction.
Returns:
integer: The result of subtracting :obj:`b` from :obj:`a`.
"""
return a - b |
Multiplies two integers.
Args:
a (int): The multiplier in the multiplication.
b (int): The multiplicand in the multiplication.
Returns:
integer: The product of the two numbers. | def mul(a: int, b: int) -> int:
r"""Multiplies two integers.
Args:
a (int): The multiplier in the multiplication.
b (int): The multiplicand in the multiplication.
Returns:
integer: The product of the two numbers.
"""
return a * b |
Remove a key from a dictionary recursively. | def _remove_a_key(d: Dict, remove_key: Any) -> None:
r"""Remove a key from a dictionary recursively."""
if isinstance(d, dict):
for key in list(d.keys()):
if key == remove_key:
del d[key]
else:
_remove_a_key(d[key], remove_key) |
Generates a schema dict for an OpenAI function based on its signature.
This function is deprecated and will be replaced by
:obj:`get_openai_tool_schema()` in future versions. It parses the
function's parameters and docstring to construct a JSON schema-like
dictionary.
Args:
func (Callable): The OpenAI function to generate the schema for.
Returns:
Dict[str, Any]: A dictionary representing the JSON schema of the
function, including its name, description, and parameter
specifications. | def get_openai_function_schema(func: Callable) -> Dict[str, Any]:
r"""Generates a schema dict for an OpenAI function based on its signature.
This function is deprecated and will be replaced by
:obj:`get_openai_tool_schema()` in future versions. It parses the
function's parameters and docstring to construct a JSON schema-like
dictionary.
Args:
func (Callable): The OpenAI function to generate the schema for.
Returns:
Dict[str, Any]: A dictionary representing the JSON schema of the
function, including its name, description, and parameter
specifications.
"""
openai_function_schema = get_openai_tool_schema(func)["function"]
return openai_function_schema |
Generates an OpenAI JSON schema from a given Python function.
This function creates a schema compatible with OpenAI's API specifications,
based on the provided Python function. It processes the function's
parameters, types, and docstrings, and constructs a schema accordingly.
Note:
- Each parameter in `func` must have a type annotation; otherwise, it's
treated as 'Any'.
- Variable arguments (*args) and keyword arguments (**kwargs) are not
supported and will be ignored.
- A functional description including a brief and detailed explanation
should be provided in the docstring of `func`.
- All parameters of `func` must be described in its docstring.
- Supported docstring styles: ReST, Google, Numpydoc, and Epydoc.
Args:
func (Callable): The Python function to be converted into an OpenAI
JSON schema.
Returns:
Dict[str, Any]: A dictionary representing the OpenAI JSON schema of
the provided function.
See Also:
`OpenAI API Reference
<https://platform.openai.com/docs/api-reference/assistants/object>`_ | def get_openai_tool_schema(func: Callable) -> Dict[str, Any]:
r"""Generates an OpenAI JSON schema from a given Python function.
This function creates a schema compatible with OpenAI's API specifications,
based on the provided Python function. It processes the function's
parameters, types, and docstrings, and constructs a schema accordingly.
Note:
- Each parameter in `func` must have a type annotation; otherwise, it's
treated as 'Any'.
- Variable arguments (*args) and keyword arguments (**kwargs) are not
supported and will be ignored.
- A functional description including a brief and detailed explanation
should be provided in the docstring of `func`.
- All parameters of `func` must be described in its docstring.
- Supported docstring styles: ReST, Google, Numpydoc, and Epydoc.
Args:
func (Callable): The Python function to be converted into an OpenAI
JSON schema.
Returns:
Dict[str, Any]: A dictionary representing the OpenAI JSON schema of
the provided function.
See Also:
`OpenAI API Reference
<https://platform.openai.com/docs/api-reference/assistants/object>`_
"""
params: Mapping[str, Parameter] = signature(func).parameters
fields: Dict[str, Tuple[type, FieldInfo]] = {}
for param_name, p in params.items():
param_type = p.annotation
param_default = p.default
param_kind = p.kind
param_annotation = p.annotation
# Variable parameters are not supported
if (
param_kind == Parameter.VAR_POSITIONAL
or param_kind == Parameter.VAR_KEYWORD
):
continue
# If the parameter type is not specified, it defaults to typing.Any
if param_annotation is Parameter.empty:
param_type = Any
# Check if the parameter has a default value
if param_default is Parameter.empty:
fields[param_name] = (param_type, FieldInfo())
else:
fields[param_name] = (param_type, FieldInfo(default=param_default))
# Applying `create_model()` directly will result in a mypy error,
# create an alias to avoid this.
def _create_mol(name, field):
return create_model(name, **field)
model = _create_mol(to_pascal(func.__name__), fields)
# NOTE: Method `.schema()` is deprecated in pydantic v2.
# the result would be identical to `.model_json_schema()` in v2
if PYDANTIC_V2:
parameters_dict = model.model_json_schema()
else:
parameters_dict = model.schema()
# The `"title"` is generated by `model.model_json_schema()`
# but is useless for openai json schema
_remove_a_key(parameters_dict, "title")
docstring = parse(func.__doc__ or "")
for param in docstring.params:
if (name := param.arg_name) in parameters_dict["properties"] and (
description := param.description
):
parameters_dict["properties"][name]["description"] = description
short_description = docstring.short_description or ""
long_description = docstring.long_description or ""
if long_description:
func_description = f"{short_description}\n{long_description}"
else:
func_description = short_description
openai_function_schema = {
"name": func.__name__,
"description": func_description,
"parameters": parameters_dict,
}
openai_tool_schema = {
"type": "function",
"function": openai_function_schema,
}
return openai_tool_schema |
Search the entity in WikiPedia and return the summary of the
required page, containing factual information about the given entity.
Args:
entity (str): The entity to be searched.
Returns:
str: The search result. If the page corresponding to the entity
exists, return the summary of this entity in a string. | def search_wiki(entity: str) -> str:
r"""Search the entity in WikiPedia and return the summary of the
required page, containing factual information about the given entity.
Args:
entity (str): The entity to be searched.
Returns:
str: The search result. If the page corresponding to the entity
exists, return the summary of this entity in a string.
"""
try:
import wikipedia
except ImportError:
raise ImportError(
"Please install `wikipedia` first. You can install it by running "
"`pip install wikipedia`."
)
result: str
try:
result = wikipedia.summary(entity, sentences=5, auto_suggest=False)
except wikipedia.exceptions.DisambiguationError as e:
result = wikipedia.summary(
e.options[0], sentences=5, auto_suggest=False
)
except wikipedia.exceptions.PageError:
result = (
"There is no page in Wikipedia corresponding to entity "
f"{entity}, please specify another word to describe the"
" entity to be searched."
)
except wikipedia.exceptions.WikipediaException as e:
result = f"An exception occurred during the search: {e}"
return result |
Use Google search engine to search information for the given query.
Args:
query (str): The query to be searched.
Returns:
List[Dict[str, Any]]: A list of dictionaries where each dictionary
represents a website.
Each dictionary contains the following keys:
- 'result_id': A number in order.
- 'title': The title of the website.
- 'description': A brief description of the website.
- 'long_description': More detail of the website.
- 'url': The URL of the website.
Example:
{
'result_id': 1,
'title': 'OpenAI',
'description': 'An organization focused on ensuring that
artificial general intelligence benefits all of humanity.',
'long_description': 'OpenAI is a non-profit artificial
intelligence research company. Our goal is to advance digital
intelligence in the way that is most likely to benefit humanity
as a whole',
'url': 'https://www.openai.com'
}
title, description, url of a website. | def search_google(query: str) -> List[Dict[str, Any]]:
r"""Use Google search engine to search information for the given query.
Args:
query (str): The query to be searched.
Returns:
List[Dict[str, Any]]: A list of dictionaries where each dictionary
represents a website.
Each dictionary contains the following keys:
- 'result_id': A number in order.
- 'title': The title of the website.
- 'description': A brief description of the website.
- 'long_description': More detail of the website.
- 'url': The URL of the website.
Example:
{
'result_id': 1,
'title': 'OpenAI',
'description': 'An organization focused on ensuring that
artificial general intelligence benefits all of humanity.',
'long_description': 'OpenAI is a non-profit artificial
intelligence research company. Our goal is to advance digital
intelligence in the way that is most likely to benefit humanity
as a whole',
'url': 'https://www.openai.com'
}
title, description, url of a website.
"""
import requests
# https://developers.google.com/custom-search/v1/overview
GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
# https://cse.google.com/cse/all
SEARCH_ENGINE_ID = os.getenv("SEARCH_ENGINE_ID")
# Using the first page
start_page_idx = 1
# Different language may get different result
search_language = "en"
# How many pages to return
num_result_pages = 10
# Constructing the URL
# Doc: https://developers.google.com/custom-search/v1/using_rest
url = (
f"https://www.googleapis.com/customsearch/v1?"
f"key={GOOGLE_API_KEY}&cx={SEARCH_ENGINE_ID}&q={query}&start="
f"{start_page_idx}&lr={search_language}&num={num_result_pages}"
)
responses = []
# Fetch the results given the URL
try:
# Make the get
result = requests.get(url)
data = result.json()
# Get the result items
if "items" in data:
search_items = data.get("items")
# Iterate over 10 results found
for i, search_item in enumerate(search_items, start=1):
if "og:description" in search_item["pagemap"]["metatags"][0]:
long_description = search_item["pagemap"]["metatags"][0][
"og:description"
]
else:
long_description = "N/A"
# Get the page title
title = search_item.get("title")
# Page snippet
snippet = search_item.get("snippet")
# Extract the page url
link = search_item.get("link")
response = {
"result_id": i,
"title": title,
"description": snippet,
"long_description": long_description,
"url": link,
}
responses.append(response)
else:
responses.append({"error": "google search failed."})
except requests.RequestException:
responses.append({"error": "google search failed."})
return responses |
Get the text information from given url.
Args:
url (str): The website you want to search.
Returns:
str: All texts extract from the web. | def text_extract_from_web(url: str) -> str:
r"""Get the text information from given url.
Args:
url (str): The website you want to search.
Returns:
str: All texts extract from the web.
"""
import requests
from bs4 import BeautifulSoup
try:
# Request the target page
response_text = requests.get(url).text
# Parse the obtained page
soup = BeautifulSoup(response_text, features="html.parser")
for script in soup(["script", "style"]):
script.extract()
text = soup.get_text()
# Strip text
lines = (line.strip() for line in text.splitlines())
chunks = (
phrase.strip() for line in lines for phrase in line.split(" ")
)
text = ".".join(chunk for chunk in chunks if chunk)
except requests.RequestException:
text = f"can't access {url}"
return text |
Returns successive n-sized chunks from provided text."
Args:
text (str): The text to be split.
n (int): The max length of a single chunk.
Returns:
List[str]: A list of split texts. | def create_chunks(text: str, n: int) -> List[str]:
r"""Returns successive n-sized chunks from provided text."
Args:
text (str): The text to be split.
n (int): The max length of a single chunk.
Returns:
List[str]: A list of split texts.
"""
chunks = []
i = 0
while i < len(text):
# Find the nearest end of sentence within a range of 0.5 * n
# and 1.5 * n tokens
j = min(i + int(1.2 * n), len(text))
while j > i + int(0.8 * n):
# Decode the tokens and check for full stop or newline
chunk = text[i:j]
if chunk.endswith(".") or chunk.endswith("\n"):
break
j -= 1
# If no end of sentence found, use n tokens as the chunk size
if j == i + int(0.8 * n):
j = min(i + n, len(text))
chunks.append(text[i:j])
i = j
return chunks |
Prompt a single-step agent to summarize texts or answer a question. | def prompt_single_step_agent(prompt: str) -> str:
"""Prompt a single-step agent to summarize texts or answer a question."""
assistant_sys_msg = BaseMessage.make_assistant_message(
role_name="Assistant",
content="You are a helpful assistant.",
)
agent = ChatAgent(assistant_sys_msg)
agent.reset()
user_msg = BaseMessage.make_user_message(
role_name="User",
content=prompt,
)
assistant_response = agent.step(user_msg)
if assistant_response.msgs is not None:
return assistant_response.msg.content
return "" |
Summarize the information from the text, base on the query if query is
given.
Args:
text (str): Text to summarize.
query (str): What information you want.
Returns:
str: Strings with information. | def summarize_text(text: str, query: str) -> str:
r"""Summarize the information from the text, base on the query if query is
given.
Args:
text (str): Text to summarize.
query (str): What information you want.
Returns:
str: Strings with information.
"""
summary_prompt = TextPrompt(
'''Gather information from this text that relative to the question, but
do not directly answer the question.\nquestion: {query}\ntext '''
)
summary_prompt = summary_prompt.format(query=query)
# Max length of each chunk
max_len = 3000
results = ""
chunks = create_chunks(text, max_len)
# Summarize
for i, chunk in enumerate(chunks, start=1):
prompt = summary_prompt + str(i) + ": " + chunk
result = prompt_single_step_agent(prompt)
results += result + "\n"
# Final summarise
final_prompt = TextPrompt(
'''Here are some summarized texts which split from one text, Using the
information to answer the question: {query}.\n\nText: '''
)
final_prompt = final_prompt.format(query=query)
prompt = final_prompt + results
response = prompt_single_step_agent(prompt)
return response |
Search webs for information. Given a query, this function will use
the Google search engine to search for related information from the
internet, and then return a summarized answer.
Args:
query (str): Question you want to be answered.
Returns:
str: Summarized information from webs. | def search_google_and_summarize(query: str) -> str:
r"""Search webs for information. Given a query, this function will use
the Google search engine to search for related information from the
internet, and then return a summarized answer.
Args:
query (str): Question you want to be answered.
Returns:
str: Summarized information from webs.
"""
# Google search will return a list of urls
responses = search_google(query)
for item in responses:
if "url" in item:
url = item.get("url")
# Extract text
text = text_extract_from_web(str(url))
# Using chatgpt summarise text
answer = summarize_text(text, query)
# Let chatgpt decide whether to continue search or not
prompt = TextPrompt(
'''Do you think the answer: {answer} can answer the query:
{query}. Use only 'yes' or 'no' to answer.'''
)
prompt = prompt.format(answer=answer, query=query)
reply = prompt_single_step_agent(prompt)
if "yes" in str(reply).lower():
return answer
return "Failed to find the answer from google search." |
Queries Wolfram|Alpha and returns the result. Wolfram|Alpha is an
answer engine developed by Wolfram Research. It is offered as an online
service that answers factual queries by computing answers from externally
sourced data.
Args:
query (str): The query to send to Wolfram Alpha.
is_detailed (bool): Whether to include additional details in the
result.
Returns:
str: The result from Wolfram Alpha, formatted as a string. | def query_wolfram_alpha(query: str, is_detailed: bool) -> str:
r"""Queries Wolfram|Alpha and returns the result. Wolfram|Alpha is an
answer engine developed by Wolfram Research. It is offered as an online
service that answers factual queries by computing answers from externally
sourced data.
Args:
query (str): The query to send to Wolfram Alpha.
is_detailed (bool): Whether to include additional details in the
result.
Returns:
str: The result from Wolfram Alpha, formatted as a string.
"""
try:
import wolframalpha
except ImportError:
raise ImportError(
"Please install `wolframalpha` first. You can install it by running `pip install wolframalpha`."
)
WOLFRAMALPHA_APP_ID = os.environ.get('WOLFRAMALPHA_APP_ID')
if not WOLFRAMALPHA_APP_ID:
raise ValueError(
"`WOLFRAMALPHA_APP_ID` not found in environment "
"variables. Get `WOLFRAMALPHA_APP_ID` here: "
"`https://products.wolframalpha.com/api/`."
)
try:
client = wolframalpha.Client(WOLFRAMALPHA_APP_ID)
res = client.query(query)
assumption = next(res.pods).text or "No assumption made."
answer = next(res.results).text or "No answer found."
except Exception as e:
if isinstance(e, StopIteration):
return "Wolfram Alpha wasn't able to answer it"
else:
error_message = f"Wolfram Alpha wasn't able to answer it" f"{e!s}."
return error_message
result = f"Assumption:\n{assumption}\n\nAnswer:\n{answer}"
# Add additional details in the result
if is_detailed:
result += '\n'
for pod in res.pods:
result += '\n' + pod['@title'] + ':\n'
for sub in pod.subpods:
result += (sub.plaintext or "None") + '\n'
return result.rstrip() |
Retrieve the Twitter API key and secret from environment variables.
Returns:
Tuple[str, str]: A tuple containing the Twitter API key and secret.
Raises:
ValueError: If the API key or secret is not found in the environment
variables. | def get_twitter_api_key() -> Tuple[str, str]:
r"""Retrieve the Twitter API key and secret from environment variables.
Returns:
Tuple[str, str]: A tuple containing the Twitter API key and secret.
Raises:
ValueError: If the API key or secret is not found in the environment
variables.
"""
# Get `TWITTER_CONSUMER_KEY` and `TWITTER_CONSUMER_SECRET` here:
# https://developer.twitter.com/en/portal/products/free
TWITTER_CONSUMER_KEY = os.environ.get("TWITTER_CONSUMER_KEY")
TWITTER_CONSUMER_SECRET = os.environ.get("TWITTER_CONSUMER_SECRET")
if not TWITTER_CONSUMER_KEY or not TWITTER_CONSUMER_SECRET:
missing_keys = ", ".join(
[
"TWITTER_CONSUMER_KEY" if not TWITTER_CONSUMER_KEY else "",
"TWITTER_CONSUMER_SECRET"
if not TWITTER_CONSUMER_SECRET
else "",
]
).strip(", ")
raise ValueError(
f"{missing_keys} not found in environment variables. Get them "
"here: `https://developer.twitter.com/en/portal/products/free`."
)
return TWITTER_CONSUMER_KEY, TWITTER_CONSUMER_SECRET |
Initiates an OAuth1Session with Twitter's API and returns it.
The function first fetches a request token, then prompts the user to
authorize the application. After the user has authorized the application
and provided a verifier (PIN), the function fetches an access token.
Finally, a new OAuth1Session is created with the access token and returned.
Raises:
RuntimeError: If an error occurs while fetching the OAuth access token
or the OAuth request token.
Returns:
requests_oauthlib.OAuth1Session: An OAuth1Session object authenticated
with the user's access token.
Reference:
https://github.com/twitterdev/Twitter-API-v2-sample-code/blob/main/Manage-Tweets/create_tweet.py
https://github.com/twitterdev/Twitter-API-v2-sample-code/blob/main/User-Lookup/get_users_me_user_context.py | def get_oauth_session() -> requests.Session:
r'''Initiates an OAuth1Session with Twitter's API and returns it.
The function first fetches a request token, then prompts the user to
authorize the application. After the user has authorized the application
and provided a verifier (PIN), the function fetches an access token.
Finally, a new OAuth1Session is created with the access token and returned.
Raises:
RuntimeError: If an error occurs while fetching the OAuth access token
or the OAuth request token.
Returns:
requests_oauthlib.OAuth1Session: An OAuth1Session object authenticated
with the user's access token.
Reference:
https://github.com/twitterdev/Twitter-API-v2-sample-code/blob/main/Manage-Tweets/create_tweet.py
https://github.com/twitterdev/Twitter-API-v2-sample-code/blob/main/User-Lookup/get_users_me_user_context.py
'''
try:
from requests_oauthlib import OAuth1Session
except ImportError:
raise ImportError(
"Please install `requests_oauthlib` first. You can "
"install it by running `pip install "
"requests_oauthlib`."
)
consumer_key, consumer_secret = get_twitter_api_key()
# Get request token
request_token_url = (
"https://api.twitter.com/oauth/request_token"
"?oauth_callback=oob&x_auth_access_type=write"
)
oauth = OAuth1Session(consumer_key, client_secret=consumer_secret)
try:
fetch_response = oauth.fetch_request_token(request_token_url)
except Exception as e:
raise RuntimeError(
f"Error occurred while fetching the OAuth access token: {e}"
)
resource_owner_key = fetch_response.get("oauth_token")
resource_owner_secret = fetch_response.get("oauth_token_secret")
# Get authorization
base_authorization_url = "https://api.twitter.com/oauth/authorize"
authorization_url = oauth.authorization_url(base_authorization_url)
print("Please go here and authorize: %s" % authorization_url)
verifier = input("Paste the PIN here: ")
# Get the access token
access_token_url = "https://api.twitter.com/oauth/access_token"
oauth = OAuth1Session(
consumer_key,
client_secret=consumer_secret,
resource_owner_key=resource_owner_key,
resource_owner_secret=resource_owner_secret,
verifier=verifier,
)
try:
oauth_tokens = oauth.fetch_access_token(access_token_url)
except Exception as e:
raise RuntimeError(
f"Error occurred while fetching the OAuth request token: {e}"
)
# Create a new OAuth1Session with the access token
oauth = OAuth1Session(
consumer_key,
client_secret=consumer_secret,
resource_owner_key=oauth_tokens["oauth_token"],
resource_owner_secret=oauth_tokens["oauth_token_secret"],
)
return oauth |
Handles the HTTP response by checking the status code and returning an
appropriate message if there is an error.
Args:
response (requests.Response): The HTTP response to handle.
Returns:
str: A string describing the error, if any. If there is no error, the
function returns an "Unexpected Exception" message.
Reference:
https://github.com/tweepy/tweepy/blob/master/tweepy/client.py#L64 | def handle_http_error(response: requests.Response) -> str:
r"""Handles the HTTP response by checking the status code and returning an
appropriate message if there is an error.
Args:
response (requests.Response): The HTTP response to handle.
Returns:
str: A string describing the error, if any. If there is no error, the
function returns an "Unexpected Exception" message.
Reference:
https://github.com/tweepy/tweepy/blob/master/tweepy/client.py#L64
"""
if response.status_code in responses:
# For 5xx server errors, return "Twitter Server Error"
if 500 <= response.status_code < 600:
return "Twitter Server Error"
else:
error_message = responses[response.status_code] + " Error"
return error_message
elif not 200 <= response.status_code < 300:
return "HTTP Exception"
else:
return "Unexpected Exception" |
Creates a new tweet, optionally including a poll or a quote tweet, or
simply a text-only tweet.
This function sends a POST request to the Twitter API to create a new
tweet. The tweet can be a text-only tweet, or optionally include a poll or
be a quote tweet. A confirmation prompt is presented to the user before the
tweet is created.
Args:
text (str): The text of the tweet. The Twitter character limit for a
single tweet is 280 characters.
poll_options (Optional[List[str]]): A list of poll options for a tweet
with a poll.
poll_duration_minutes (Optional[int]): Duration of the poll in minutes
for a tweet with a poll. This is only required if the request
includes poll_options.
quote_tweet_id (Optional[Union[int, str]]): Link to the tweet being
quoted.
Note:
You can only provide either the `quote_tweet_id` parameter or the pair
of `poll_duration_minutes` and `poll_options` parameters, not both.
Returns:
str: A message indicating the success of the tweet creation, including
the tweet ID and text. If the request to the Twitter API is not
successful, the return is an error message.
Reference:
https://developer.twitter.com/en/docs/twitter-api/tweets/manage-tweets/api-reference/post-tweets
https://github.com/xdevplatform/Twitter-API-v2-sample-code/blob/main/Manage-Tweets/create_tweet.py | def create_tweet(
*,
text: str,
poll_options: Optional[List[str]] = None,
poll_duration_minutes: Optional[int] = None,
quote_tweet_id: Optional[Union[int, str]] = None,
) -> str:
r"""Creates a new tweet, optionally including a poll or a quote tweet, or
simply a text-only tweet.
This function sends a POST request to the Twitter API to create a new
tweet. The tweet can be a text-only tweet, or optionally include a poll or
be a quote tweet. A confirmation prompt is presented to the user before the
tweet is created.
Args:
text (str): The text of the tweet. The Twitter character limit for a
single tweet is 280 characters.
poll_options (Optional[List[str]]): A list of poll options for a tweet
with a poll.
poll_duration_minutes (Optional[int]): Duration of the poll in minutes
for a tweet with a poll. This is only required if the request
includes poll_options.
quote_tweet_id (Optional[Union[int, str]]): Link to the tweet being
quoted.
Note:
You can only provide either the `quote_tweet_id` parameter or the pair
of `poll_duration_minutes` and `poll_options` parameters, not both.
Returns:
str: A message indicating the success of the tweet creation, including
the tweet ID and text. If the request to the Twitter API is not
successful, the return is an error message.
Reference:
https://developer.twitter.com/en/docs/twitter-api/tweets/manage-tweets/api-reference/post-tweets
https://github.com/xdevplatform/Twitter-API-v2-sample-code/blob/main/Manage-Tweets/create_tweet.py
"""
# validate text
if text is None:
return "Text cannot be None"
elif len(text) > TWEET_TEXT_LIMIT:
return "Text must not exceed 280 characters."
# Validate poll options and duration
if (poll_options is None) != (poll_duration_minutes is None):
return (
"Error: Both `poll_options` and `poll_duration_minutes` must "
"be provided together or not at all."
)
# Validate exclusive parameters
if quote_tweet_id is not None and (poll_options or poll_duration_minutes):
return (
"Error: Cannot provide both `quote_tweet_id` and "
"(`poll_options` or `poll_duration_minutes`)."
)
# Print the parameters that are not None
params = {
"text": text,
"poll_options": poll_options,
"poll_duration_minutes": poll_duration_minutes,
"quote_tweet_id": quote_tweet_id,
}
print("You are going to create a tweet with following parameters:")
for key, value in params.items():
if value is not None:
print(f"{key}: {value}")
# Add a confirmation prompt at the beginning of the function
confirm = input("Are you sure you want to create this tweet? (yes/no): ")
if confirm.lower() != "yes":
return "Execution cancelled by the user."
oauth = get_oauth_session()
json_data = {}
if poll_options is not None and poll_duration_minutes is not None:
json_data["poll"] = {
"options": poll_options,
"duration_minutes": poll_duration_minutes,
}
if quote_tweet_id is not None:
json_data["quote_tweet_id"] = str(quote_tweet_id) # type: ignore[assignment]
json_data["text"] = text # type: ignore[assignment]
# Making the request
response = oauth.post(
"https://api.twitter.com/2/tweets",
json=json_data,
)
if response.status_code != HTTPStatus.CREATED:
error_type = handle_http_error(response)
# use string concatenation to satisfy flake8
return (
"Request returned a(n) "
+ str(error_type)
+ ": "
+ str(response.status_code)
+ " "
+ response.text
)
# Saving the response as JSON
json_response = response.json()
tweet_id = json_response["data"]["id"]
tweet_text = json_response["data"]["text"]
response_str = (
f"Create tweet successful. "
f"The tweet ID is: {tweet_id}. "
f"The tweet text is: '{tweet_text}'."
)
return response_str |
Deletes a tweet with the specified ID for an authorized user.
This function sends a DELETE request to the Twitter API to delete
a tweet with the specified ID. Before sending the request, it
prompts the user to confirm the deletion.
Args:
tweet_id (str): The ID of the tweet to delete.
Returns:
str: A message indicating the result of the deletion. If the
deletion was successful, the message includes the ID of the
deleted tweet. If the deletion was not successful, the message
includes an error message.
Reference:
https://developer.twitter.com/en/docs/twitter-api/tweets/manage-tweets/api-reference/delete-tweets-id | def delete_tweet(tweet_id: str) -> str:
r"""Deletes a tweet with the specified ID for an authorized user.
This function sends a DELETE request to the Twitter API to delete
a tweet with the specified ID. Before sending the request, it
prompts the user to confirm the deletion.
Args:
tweet_id (str): The ID of the tweet to delete.
Returns:
str: A message indicating the result of the deletion. If the
deletion was successful, the message includes the ID of the
deleted tweet. If the deletion was not successful, the message
includes an error message.
Reference:
https://developer.twitter.com/en/docs/twitter-api/tweets/manage-tweets/api-reference/delete-tweets-id
"""
# Print the parameters that are not None
if tweet_id is not None:
print(
f"You are going to delete a tweet with the following "
f"ID: {tweet_id}"
)
# Add a confirmation prompt at the beginning of the function
confirm = input("Are you sure you want to delete this tweet? (yes/no): ")
if confirm.lower() != "yes":
return "Execution cancelled by the user."
oauth = get_oauth_session()
# Making the request
response = oauth.delete(
f"https://api.twitter.com/2/tweets/{tweet_id}",
)
if response.status_code != HTTPStatus.OK:
error_type = handle_http_error(response)
# use string concatenation to satisfy flake8
return (
"Request returned a(n) "
+ str(error_type)
+ ": "
+ str(response.status_code)
+ " "
+ response.text
)
# Saving the response as JSON
json_response = response.json()
# `deleted_status` may be True or False. Defaults to False if not found.
deleted_status = json_response.get("data", {}).get("deleted", False)
response_str = (
f"Delete tweet successful: {deleted_status}. "
f"The tweet ID is: {tweet_id}. "
)
return response_str |
Retrieves and formats the authenticated user's Twitter profile info.
This function sends a GET request to the Twitter API to retrieve the
authenticated user's profile information, including their pinned tweet.
It then formats this information into a readable report.
Returns:
str: A formatted report of the authenticated user's Twitter profile
information. This includes their ID, name, username, description,
location, most recent tweet ID, profile image URL, account creation
date, protection status, verification type, public metrics, and
pinned tweet information. If the request to the Twitter API is not
successful, the return is an error message.
Reference:
https://developer.twitter.com/en/docs/twitter-api/users/lookup/api-reference/get-users-me | def get_my_user_profile() -> str:
r"""Retrieves and formats the authenticated user's Twitter profile info.
This function sends a GET request to the Twitter API to retrieve the
authenticated user's profile information, including their pinned tweet.
It then formats this information into a readable report.
Returns:
str: A formatted report of the authenticated user's Twitter profile
information. This includes their ID, name, username, description,
location, most recent tweet ID, profile image URL, account creation
date, protection status, verification type, public metrics, and
pinned tweet information. If the request to the Twitter API is not
successful, the return is an error message.
Reference:
https://developer.twitter.com/en/docs/twitter-api/users/lookup/api-reference/get-users-me
"""
oauth = get_oauth_session()
tweet_fields = ["created_at", "text"]
user_fields = [
"created_at",
"description",
"id",
"location",
"most_recent_tweet_id",
"name",
"pinned_tweet_id",
"profile_image_url",
"protected",
"public_metrics",
"url",
"username",
"verified_type",
]
params = {
"expansions": "pinned_tweet_id",
"tweet.fields": ",".join(tweet_fields),
"user.fields": ",".join(user_fields),
}
response = oauth.get("https://api.twitter.com/2/users/me", params=params)
if response.status_code != HTTPStatus.OK:
error_type = handle_http_error(response)
error_message = "Request returned a(n) {}: {} {}".format(
error_type, response.status_code, response.text
)
return error_message
json_response = response.json()
user_info = json_response.get('data', {})
tweets = json_response.get('includes', {}).get('tweets', [{}])[0]
user_report = ""
user_report += f"ID: {user_info['id']}. "
user_report += f"Name: {user_info['name']}. "
user_report += f"Username: {user_info['username']}. "
# Define the part of keys that need to be repeatedly processed
user_info_keys = [
'description',
'location',
'most_recent_tweet_id',
'profile_image_url',
]
for key in user_info_keys:
value = user_info.get(key)
if user_info.get(key):
user_report += f"{key.replace('_', ' ').capitalize()}: {value}. "
if 'created_at' in user_info:
created_at = datetime.datetime.strptime(
user_info['created_at'], "%Y-%m-%dT%H:%M:%S.%fZ"
)
date_str = created_at.strftime('%B %d, %Y at %H:%M:%S')
user_report += f"Account created at: {date_str}. "
protection_status = "private" if user_info['protected'] else "public"
user_report += f"Protected: This user's Tweets are {protection_status}. "
verification_messages = {
'blue': (
"The user has a blue verification, typically reserved for "
"public figures, celebrities, or global brands. "
),
'business': (
"The user has a business verification, typically "
"reserved for businesses and corporations. "
),
'government': (
"The user has a government verification, typically "
"reserved for government officials or entities. "
),
'none': "The user is not verified. ",
}
verification_type = user_info.get('verified_type', 'none')
user_report += (
f"Verified type: {verification_messages.get(verification_type)}"
)
if 'public_metrics' in user_info:
user_report += "Public metrics: "
metrics = user_info['public_metrics']
user_report += (
f"The user has {metrics.get('followers_count', 0)} followers, "
f"is following {metrics.get('following_count', 0)} users, "
f"has made {metrics.get('tweet_count', 0)} tweets, "
f"is listed in {metrics.get('listed_count', 0)} lists, "
f"and has received {metrics.get('like_count', 0)} likes. "
)
if 'pinned_tweet_id' in user_info:
user_report += f"Pinned tweet ID: {user_info['pinned_tweet_id']}. "
if 'created_at' in tweets and 'text' in tweets:
user_report += "\nPinned tweet information: "
tweet_created_at = datetime.datetime.strptime(
tweets['created_at'], "%Y-%m-%dT%H:%M:%S.%fZ"
)
user_report += (
f"Pinned tweet created at "
f"{tweet_created_at.strftime('%B %d, %Y at %H:%M:%S')} "
f"with text: '{tweets['text']}'."
)
return user_report |
Retrieve the OpenWeatherMap API key from environment variables.
Returns:
str: The OpenWeatherMap API key.
Raises:
ValueError: If the API key is not found in the environment variables. | def get_openweathermap_api_key() -> str:
r"""Retrieve the OpenWeatherMap API key from environment variables.
Returns:
str: The OpenWeatherMap API key.
Raises:
ValueError: If the API key is not found in the environment variables.
"""
# Get `OPENWEATHERMAP_API_KEY` here: https://openweathermap.org
OPENWEATHERMAP_API_KEY = os.environ.get('OPENWEATHERMAP_API_KEY')
if not OPENWEATHERMAP_API_KEY:
raise ValueError(
"`OPENWEATHERMAP_API_KEY` not found in environment "
"variables. Get `OPENWEATHERMAP_API_KEY` here: "
"`https://openweathermap.org`."
)
return OPENWEATHERMAP_API_KEY |
Fetch and return a comprehensive weather report for a given city as a
string. The report includes current weather conditions, temperature,
wind details, visibility, and sunrise/sunset times, all formatted as
The function interacts with the OpenWeatherMap API to retrieve the data.
Args:
city (str): The name of the city for which the weather information
is desired. Format "City, CountryCode" (e.g., "Paris, FR"
for Paris, France). If the country code is not provided,
the API will search for the city in all countries, which
may yield incorrect results if multiple cities with the
same name exist.
temp_units (Literal['kelvin', 'celsius', 'fahrenheit']): Units for
temperature. (default: :obj:`kelvin`)
wind_units (Literal['meters_sec', 'miles_hour', 'knots', 'beaufort']):
Units for wind speed. (default: :obj:`meters_sec`)
visibility_units (Literal['meters', 'miles']): Units for visibility
distance. (default: :obj:`meters`)
time_units (Literal['unix', 'iso', 'date']): Format for sunrise and
sunset times. (default: :obj:`unix`)
Returns:
str: A string containing the fetched weather data, formatted in a
readable manner. If an error occurs, a message indicating the
error will be returned instead.
Example of return string:
"Weather in Paris, FR: 15°C, feels like 13°C. Max temp: 17°C, Min temp
: 12°C.
Wind: 5 m/s at 270 degrees. Visibility: 10 kilometers.
Sunrise at 05:46:05 (UTC), Sunset at 18:42:20 (UTC)."
Note:
Please ensure that the API key is valid and has permissions to access
the weather data. | def get_weather_data(
city: str,
temp_units: Literal['kelvin', 'celsius', 'fahrenheit'] = 'kelvin',
wind_units: Literal[
'meters_sec', 'miles_hour', 'knots', 'beaufort'
] = 'meters_sec',
visibility_units: Literal['meters', 'miles'] = 'meters',
time_units: Literal['unix', 'iso', 'date'] = 'unix',
) -> str:
r"""Fetch and return a comprehensive weather report for a given city as a
string. The report includes current weather conditions, temperature,
wind details, visibility, and sunrise/sunset times, all formatted as
The function interacts with the OpenWeatherMap API to retrieve the data.
Args:
city (str): The name of the city for which the weather information
is desired. Format "City, CountryCode" (e.g., "Paris, FR"
for Paris, France). If the country code is not provided,
the API will search for the city in all countries, which
may yield incorrect results if multiple cities with the
same name exist.
temp_units (Literal['kelvin', 'celsius', 'fahrenheit']): Units for
temperature. (default: :obj:`kelvin`)
wind_units (Literal['meters_sec', 'miles_hour', 'knots', 'beaufort']):
Units for wind speed. (default: :obj:`meters_sec`)
visibility_units (Literal['meters', 'miles']): Units for visibility
distance. (default: :obj:`meters`)
time_units (Literal['unix', 'iso', 'date']): Format for sunrise and
sunset times. (default: :obj:`unix`)
Returns:
str: A string containing the fetched weather data, formatted in a
readable manner. If an error occurs, a message indicating the
error will be returned instead.
Example of return string:
"Weather in Paris, FR: 15°C, feels like 13°C. Max temp: 17°C, Min temp
: 12°C.
Wind: 5 m/s at 270 degrees. Visibility: 10 kilometers.
Sunrise at 05:46:05 (UTC), Sunset at 18:42:20 (UTC)."
Note:
Please ensure that the API key is valid and has permissions to access
the weather data.
"""
# NOTE: This tool may not work as expected since the input arguments like
# `time_units` should be enum types which are not supported yet.
try:
import pyowm
except ImportError:
raise ImportError(
"Please install `pyowm` first. You can install it by running "
"`pip install pyowm`."
)
OPENWEATHERMAP_API_KEY = get_openweathermap_api_key()
owm = pyowm.OWM(OPENWEATHERMAP_API_KEY)
mgr = owm.weather_manager()
try:
observation = mgr.weather_at_place(city)
weather = observation.weather
# Temperature
temperature = weather.temperature(temp_units)
# Wind
wind_data = observation.weather.wind(unit=wind_units)
wind_speed = wind_data.get('speed')
# 'N/A' if the degree is not available
wind_deg = wind_data.get('deg', 'N/A')
# Visibility
visibility_distance = observation.weather.visibility_distance
visibility = (
str(visibility_distance)
if visibility_units == 'meters'
else str(observation.weather.visibility(unit='miles'))
)
# Sunrise and Sunset
sunrise_time = str(weather.sunrise_time(timeformat=time_units))
sunset_time = str(weather.sunset_time(timeformat=time_units))
# Compile all the weather details into a report string
weather_report = (
f"Weather in {city}: {temperature['temp']}°{temp_units.title()}, "
f"feels like {temperature['feels_like']}°{temp_units.title()}. "
f"Max temp: {temperature['temp_max']}°{temp_units.title()}, "
f"Min temp: {temperature['temp_min']}°{temp_units.title()}. "
f"Wind: {wind_speed} {wind_units} at {wind_deg} degrees. "
f"Visibility: {visibility} {visibility_units}. "
f"Sunrise at {sunrise_time}, Sunset at {sunset_time}."
)
return weather_report
except Exception as e:
error_message = (
f"An error occurred while fetching weather data for {city}: "
f"{e!s}."
)
return error_message |
Strips consecutive newlines from a string.
Args:
text (str): The string to strip.
Returns:
str: The string with consecutive newlines stripped. | def strip_consecutive_newlines(text: str) -> str:
r"""Strips consecutive newlines from a string.
Args:
text (str): The string to strip.
Returns:
str: The string with consecutive newlines stripped.
"""
return re.sub(r"\s*\n\s*", "\n", text) |
Reads an uploaded file and returns a File object.
Args:
file (BytesIO): A BytesIO object representing the contents of the file.
Returns:
File: A File object. | def read_file(file: BytesIO) -> File:
r"""Reads an uploaded file and returns a File object.
Args:
file (BytesIO): A BytesIO object representing the contents of the file.
Returns:
File: A File object.
"""
# Determine the file type based on the file extension
if file.name.lower().endswith(".docx"):
return DocxFile.from_bytes(file)
elif file.name.lower().endswith(".pdf"):
return PdfFile.from_bytes(file)
elif file.name.lower().endswith(".txt"):
return TxtFile.from_bytes(file)
elif file.name.lower().endswith(".json"):
return JsonFile.from_bytes(file)
elif file.name.lower().endswith(".html"):
return HtmlFile.from_bytes(file)
else:
raise NotImplementedError(
f"File type {file.name.split('.')[-1]} not supported"
) |
Wrapper that converts the return value of a function to an input
class instance if it's a string.
Args:
cls (Any): The class to convert to.
func (Callable): The function to decorate.
Returns:
Callable[..., Union[Any, str]]: Decorated function that
returns the decorated class instance if the return value is a
string. | def return_prompt_wrapper(
cls: Any,
func: Callable,
) -> Callable[..., Union[Any, tuple]]:
r"""Wrapper that converts the return value of a function to an input
class instance if it's a string.
Args:
cls (Any): The class to convert to.
func (Callable): The function to decorate.
Returns:
Callable[..., Union[Any, str]]: Decorated function that
returns the decorated class instance if the return value is a
string.
"""
def wrapper(*args: Any, **kwargs: Any) -> Union[Any, str]:
r"""Wrapper function that performs the conversion to :obj:`TextPrompt`
instance.
Args:
*args (Any): Variable length argument list.
**kwargs (Any): Arbitrary keyword arguments.
Returns:
Union[Any, str]: The converted return value.
"""
result = func(*args, **kwargs)
if isinstance(result, str) and not isinstance(result, cls):
return cls(result)
elif isinstance(result, tuple):
new_result = tuple(
cls(item)
if isinstance(item, str) and not isinstance(item, cls)
else item
for item in result
)
return new_result
return result
# # Preserve the original function's attributes
wrapper.__name__ = func.__name__
wrapper.__doc__ = func.__doc__
return wrapper |
Decorator that wraps functions of a class inherited from :obj:`str`
with the :obj:`return_text_prompt` decorator.
Args:
cls (type): The class to decorate.
Returns:
type: Decorated class with wrapped functions. | def wrap_prompt_functions(cls: T) -> T:
r"""Decorator that wraps functions of a class inherited from :obj:`str`
with the :obj:`return_text_prompt` decorator.
Args:
cls (type): The class to decorate.
Returns:
type: Decorated class with wrapped functions.
"""
excluded_attrs = {'__init__', '__new__', '__str__', '__repr__'}
for attr_name in dir(cls):
attr_value = getattr(cls, attr_name)
if callable(attr_value) and attr_name not in excluded_attrs:
if inspect.isroutine(attr_value):
setattr(cls, attr_name, return_prompt_wrapper(cls, attr_value))
return cls |
Decorator that checks if the OpenAI API key is available in the
environment variables.
Args:
func (callable): The function to be wrapped.
Returns:
callable: The decorated function.
Raises:
ValueError: If the OpenAI API key is not found in the environment
variables. | def api_key_required(func: F) -> F:
r"""Decorator that checks if the OpenAI API key is available in the
environment variables.
Args:
func (callable): The function to be wrapped.
Returns:
callable: The decorated function.
Raises:
ValueError: If the OpenAI API key is not found in the environment
variables.
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
if self.model_type.is_openai:
if 'OPENAI_API_KEY' not in os.environ:
raise ValueError('OpenAI API key not found.')
return func(self, *args, **kwargs)
elif self.model_type.is_anthropic:
if 'ANTHROPIC_API_KEY' not in os.environ:
raise ValueError('Anthropic API key not found.')
return func(self, *args, **kwargs)
else:
raise ValueError('Unsupported model type.')
return cast(F, wrapper) |
Prints the given text with an animated effect.
Args:
text (str): The text to print.
delay (float, optional): The delay between each character printed.
(default: :obj:`0.02`)
end (str, optional): The end character to print after each
character of text. (default: :obj:`""`) | def print_text_animated(text, delay: float = 0.02, end: str = ""):
r"""Prints the given text with an animated effect.
Args:
text (str): The text to print.
delay (float, optional): The delay between each character printed.
(default: :obj:`0.02`)
end (str, optional): The end character to print after each
character of text. (default: :obj:`""`)
"""
for char in text:
print(char, end=end, flush=True)
time.sleep(delay)
print('\n') |
Given a string template containing curly braces {}, return a set of
the words inside the braces.
Args:
template (str): A string containing curly braces.
Returns:
List[str]: A list of the words inside the curly braces.
Example:
>>> get_prompt_template_key_words('Hi, {name}! How are you {status}?')
{'name', 'status'} | def get_prompt_template_key_words(template: str) -> Set[str]:
r"""Given a string template containing curly braces {}, return a set of
the words inside the braces.
Args:
template (str): A string containing curly braces.
Returns:
List[str]: A list of the words inside the curly braces.
Example:
>>> get_prompt_template_key_words('Hi, {name}! How are you {status}?')
{'name', 'status'}
"""
return set(re.findall(r'{([^}]*)}', template)) |
Returns the first integer number found in the given string.
If no integer number is found, returns None.
Args:
string (str): The input string.
Returns:
int or None: The first integer number found in the string, or None if
no integer number is found. | def get_first_int(string: str) -> Optional[int]:
r"""Returns the first integer number found in the given string.
If no integer number is found, returns None.
Args:
string (str): The input string.
Returns:
int or None: The first integer number found in the string, or None if
no integer number is found.
"""
match = re.search(r'\d+', string)
if match:
return int(match.group())
else:
return None |
Downloads task-related files from a specified URL and extracts them.
This function downloads a zip file containing tasks based on the specified
`task` type from a predefined URL, saves it to `folder_path`, and then
extracts the contents of the zip file into the same folder. After
extraction, the zip file is deleted.
Args:
task (TaskType): An enum representing the type of task to download.
folder_path (str): The path of the folder where the zip file will be
downloaded and extracted. | def download_tasks(task: TaskType, folder_path: str) -> None:
r"""Downloads task-related files from a specified URL and extracts them.
This function downloads a zip file containing tasks based on the specified
`task` type from a predefined URL, saves it to `folder_path`, and then
extracts the contents of the zip file into the same folder. After
extraction, the zip file is deleted.
Args:
task (TaskType): An enum representing the type of task to download.
folder_path (str): The path of the folder where the zip file will be
downloaded and extracted.
"""
# Define the path to save the zip file
zip_file_path = os.path.join(folder_path, "tasks.zip")
# Download the zip file from the Google Drive link
response = requests.get(
"https://huggingface.co/datasets/camel-ai/"
f"metadata/resolve/main/{task.value}_tasks.zip"
)
# Save the zip file
with open(zip_file_path, "wb") as f:
f.write(response.content)
with zipfile.ZipFile(zip_file_path, "r") as zip_ref:
zip_ref.extractall(folder_path)
# Delete the zip file
os.remove(zip_file_path) |
Parse the response of the Agent and return task list.
Args:
task_response (str): The string response of the Agent.
Returns:
List[str]: A list of the string tasks. | def get_task_list(task_response: str) -> List[str]:
r"""Parse the response of the Agent and return task list.
Args:
task_response (str): The string response of the Agent.
Returns:
List[str]: A list of the string tasks.
"""
new_tasks_list = []
task_string_list = task_response.strip().split('\n')
# each task starts with #.
for task_string in task_string_list:
task_parts = task_string.strip().split(".", 1)
if len(task_parts) == 2:
task_id = ''.join(s for s in task_parts[0] if s.isnumeric())
task_name = re.sub(r'[^\w\s_]+', '', task_parts[1]).strip()
if task_name.strip() and task_id.isnumeric():
new_tasks_list.append(task_name)
return new_tasks_list |
Check whether the port refered by the URL to the server
is open.
Args:
server_url (str): The URL to the server running LLM inference
service.
Returns:
bool: Whether the port is open for packets (server is running). | def check_server_running(server_url: str) -> bool:
r"""Check whether the port refered by the URL to the server
is open.
Args:
server_url (str): The URL to the server running LLM inference
service.
Returns:
bool: Whether the port is open for packets (server is running).
"""
parsed_url = urlparse(server_url)
url_tuple = (parsed_url.hostname, parsed_url.port)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex(url_tuple)
sock.close()
# if the port is open, the result should be 0.
return result == 0 |
Gathers information about the operating system.
Returns:
dict: A dictionary containing various pieces of OS information. | def get_system_information():
r"""Gathers information about the operating system.
Returns:
dict: A dictionary containing various pieces of OS information.
"""
sys_info = {
"OS Name": os.name,
"System": platform.system(),
"Release": platform.release(),
"Version": platform.version(),
"Machine": platform.machine(),
"Processor": platform.processor(),
"Platform": platform.platform(),
}
return sys_info |
Convert a snake_case string to PascalCase.
Args:
snake (str): The snake_case string to be converted.
Returns:
str: The converted PascalCase string. | def to_pascal(snake: str) -> str:
"""Convert a snake_case string to PascalCase.
Args:
snake (str): The snake_case string to be converted.
Returns:
str: The converted PascalCase string.
"""
# Check if the string is already in PascalCase
if re.match(r'^[A-Z][a-zA-Z0-9]*([A-Z][a-zA-Z0-9]*)*$', snake):
return snake
# Remove leading and trailing underscores
snake = snake.strip('_')
# Replace multiple underscores with a single one
snake = re.sub('_+', '_', snake)
# Convert to PascalCase
return re.sub(
'_([0-9A-Za-z])',
lambda m: m.group(1).upper(),
snake.title(),
) |
Initializes and conducts a `RolePlaying` with `FunctionCallingConfig`
session. The function creates an interactive and dynamic role-play session
where the AI Assistant and User engage based on the given task, roles, and
available functions. It demonstrates the versatility of AI in handling
diverse tasks and user interactions within a structured `RolePlaying`
framework.
Args:
task_prompt (str): The initial task or scenario description to start
the `RolePlaying` session. Defaults to a prompt involving the
estimation of KAUST's age and weather information.
function_list (list): A list of functions that the agent can utilize
during the session. Defaults to a combination of math, search, and
weather functions.
model_type (ModelType): The type of chatbot model used for both the
assistant and the user. Defaults to `GPT-4 Turbo`.
chat_turn_limit (int): The maximum number of turns (exchanges) in the
chat session. Defaults to 10.
assistant_role_name (str): The role name assigned to the AI Assistant.
Defaults to 'Searcher'.
user_role_name (str): The role name assigned to the User. Defaults to
'Professor'.
Returns:
None: This function does not return any value but prints out the
session's dialogues and outputs. | def role_playing_with_function(
task_prompt: str = (
"Assume now is 2024 in the Gregorian calendar, "
"estimate the current age of University of Oxford "
"and then add 10 more years to this age, "
"and get the current weather of the city where "
"the University is located. And tell me what time "
"zone University of Oxford is in. And use my twitter "
"account infomation to create a tweet. "
),
function_list: Optional[List] = None,
model_type=None,
chat_turn_limit=10,
assistant_role_name: str = "Searcher",
user_role_name: str = "Professor",
) -> None:
r"""Initializes and conducts a `RolePlaying` with `FunctionCallingConfig`
session. The function creates an interactive and dynamic role-play session
where the AI Assistant and User engage based on the given task, roles, and
available functions. It demonstrates the versatility of AI in handling
diverse tasks and user interactions within a structured `RolePlaying`
framework.
Args:
task_prompt (str): The initial task or scenario description to start
the `RolePlaying` session. Defaults to a prompt involving the
estimation of KAUST's age and weather information.
function_list (list): A list of functions that the agent can utilize
during the session. Defaults to a combination of math, search, and
weather functions.
model_type (ModelType): The type of chatbot model used for both the
assistant and the user. Defaults to `GPT-4 Turbo`.
chat_turn_limit (int): The maximum number of turns (exchanges) in the
chat session. Defaults to 10.
assistant_role_name (str): The role name assigned to the AI Assistant.
Defaults to 'Searcher'.
user_role_name (str): The role name assigned to the User. Defaults to
'Professor'.
Returns:
None: This function does not return any value but prints out the
session's dialogues and outputs.
"""
# Run lazy import
if function_list is None:
function_list = get_lazy_imported_functions_module()
if model_type is None:
model_type = get_lazy_imported_types_module()
from colorama import Fore
from camel.agents.chat_agent import FunctionCallingRecord
from camel.configs import ChatGPTConfig, FunctionCallingConfig
from camel.societies import RolePlaying
task_prompt = task_prompt
user_model_config = ChatGPTConfig(temperature=0.0)
function_list = function_list
assistant_model_config = FunctionCallingConfig.from_openai_function_list(
function_list=function_list,
kwargs=dict(temperature=0.0),
)
role_play_session = RolePlaying(
assistant_role_name=assistant_role_name,
user_role_name=user_role_name,
assistant_agent_kwargs=dict(
model_type=model_type,
model_config=assistant_model_config,
function_list=function_list,
),
user_agent_kwargs=dict(
model_type=model_type,
model_config=user_model_config,
),
task_prompt=task_prompt,
with_task_specify=False,
)
print(
Fore.GREEN
+ f"AI Assistant sys message:\n{role_play_session.assistant_sys_msg}\n"
)
print(
Fore.BLUE + f"AI User sys message:\n{role_play_session.user_sys_msg}\n"
)
print(Fore.YELLOW + f"Original task prompt:\n{task_prompt}\n")
print(
Fore.CYAN
+ f"Specified task prompt:\n{role_play_session.specified_task_prompt}\n"
)
print(Fore.RED + f"Final task prompt:\n{role_play_session.task_prompt}\n")
n = 0
input_msg = role_play_session.init_chat()
while n < chat_turn_limit:
n += 1
assistant_response, user_response = role_play_session.step(input_msg)
if assistant_response.terminated:
print(
Fore.GREEN
+ (
"AI Assistant terminated. Reason: "
f"{assistant_response.info['termination_reasons']}."
)
)
break
if user_response.terminated:
print(
Fore.GREEN
+ (
"AI User terminated. "
f"Reason: {user_response.info['termination_reasons']}."
)
)
break
# Print output from the user
print_text_animated(
Fore.BLUE + f"AI User:\n\n{user_response.msg.content}\n"
)
# Print output from the assistant, including any function
# execution information
print_text_animated(Fore.GREEN + "AI Assistant:")
called_functions: List[FunctionCallingRecord] = assistant_response.info[
'called_functions'
]
for func_record in called_functions:
print_text_animated(f"{func_record}")
print_text_animated(f"{assistant_response.msg.content}\n")
if "CAMEL_TASK_DONE" in user_response.msg.content:
break
input_msg = assistant_response.msg |
Parse the message list into a single prompt following model-specifc
formats.
Args:
messages (List[OpenAIMessage]): Message list with the chat history
in OpenAI API format.
model (ModelType): Model type for which messages will be parsed.
Returns:
str: A single prompt summarizing all the messages. | def messages_to_prompt(messages: List[OpenAIMessage], model: ModelType) -> str:
r"""Parse the message list into a single prompt following model-specifc
formats.
Args:
messages (List[OpenAIMessage]): Message list with the chat history
in OpenAI API format.
model (ModelType): Model type for which messages will be parsed.
Returns:
str: A single prompt summarizing all the messages.
"""
system_message = messages[0]["content"]
ret: str
if model == ModelType.LLAMA_2:
# reference: https://github.com/facebookresearch/llama/blob/cfc3fc8c1968d390eb830e65c63865e980873a06/llama/generation.py#L212
seps = [" ", " </s><s>"]
role_map = {"user": "[INST]", "assistant": "[/INST]"}
system_prompt = f"[INST] <<SYS>>\n{system_message}\n<</SYS>>\n\n"
ret = ""
for i, msg in enumerate(messages[1:]):
role = role_map[msg["role"]]
content = msg["content"]
if content:
if not isinstance(content, str):
raise ValueError(
"Currently multimodal context is not "
"supported by the token counter."
)
if i == 0:
ret += system_prompt + content
else:
ret += role + " " + content + seps[i % 2]
else:
ret += role
return ret
elif model == ModelType.VICUNA or model == ModelType.VICUNA_16K:
seps = [" ", "</s>"]
role_map = {"user": "USER", "assistant": "ASSISTANT"}
system_prompt = f"{system_message}"
ret = system_prompt + seps[0]
for i, msg in enumerate(messages[1:]):
role = role_map[msg["role"]]
content = msg["content"]
if not isinstance(content, str):
raise ValueError(
"Currently multimodal context is not "
"supported by the token counter."
)
if content:
ret += role + ": " + content + seps[i % 2]
else:
ret += role + ":"
return ret
else:
raise ValueError(f"Invalid model type: {model}") |
Get model encoding from tiktoken.
Args:
value_for_tiktoken: Model value for tiktoken.
Returns:
tiktoken.Encoding: Model encoding. | def get_model_encoding(value_for_tiktoken: str):
r"""Get model encoding from tiktoken.
Args:
value_for_tiktoken: Model value for tiktoken.
Returns:
tiktoken.Encoding: Model encoding.
"""
import tiktoken
try:
encoding = tiktoken.encoding_for_model(value_for_tiktoken)
except KeyError:
print("Model not found. Using cl100k_base encoding.")
encoding = tiktoken.get_encoding("cl100k_base")
return encoding |
Count image tokens for OpenAI vision model. An :obj:`"auto"`
resolution model will be treated as :obj:`"high"`. All images with
:obj:`"low"` detail cost 85 tokens each. Images with :obj:`"high"` detail
are first scaled to fit within a 2048 x 2048 square, maintaining their
aspect ratio. Then, they are scaled such that the shortest side of the
image is 768px long. Finally, we count how many 512px squares the image
consists of. Each of those squares costs 170 tokens. Another 85 tokens are
always added to the final total. For more details please refer to `OpenAI
vision docs <https://platform.openai.com/docs/guides/vision>`_
Args:
image (PIL.Image.Image): Image to count number of tokens.
detail (OpenAIImageDetailType): Image detail type to count
number of tokens.
Returns:
int: Number of tokens for the image given a detail type. | def count_tokens_from_image(
image: Image.Image, detail: OpenAIImageDetailType
) -> int:
r"""Count image tokens for OpenAI vision model. An :obj:`"auto"`
resolution model will be treated as :obj:`"high"`. All images with
:obj:`"low"` detail cost 85 tokens each. Images with :obj:`"high"` detail
are first scaled to fit within a 2048 x 2048 square, maintaining their
aspect ratio. Then, they are scaled such that the shortest side of the
image is 768px long. Finally, we count how many 512px squares the image
consists of. Each of those squares costs 170 tokens. Another 85 tokens are
always added to the final total. For more details please refer to `OpenAI
vision docs <https://platform.openai.com/docs/guides/vision>`_
Args:
image (PIL.Image.Image): Image to count number of tokens.
detail (OpenAIImageDetailType): Image detail type to count
number of tokens.
Returns:
int: Number of tokens for the image given a detail type.
"""
if detail == OpenAIImageDetailType.LOW:
return LOW_DETAIL_TOKENS
width, height = image.size
if width > FIT_SQUARE_PIXELS or height > FIT_SQUARE_PIXELS:
scaling_factor = max(width, height) / FIT_SQUARE_PIXELS
width = int(width / scaling_factor)
height = int(height / scaling_factor)
scaling_factor = min(width, height) / SHORTEST_SIDE_PIXELS
scaled_width = int(width / scaling_factor)
scaled_height = int(height / scaling_factor)
h = ceil(scaled_height / SQUARE_PIXELS)
w = ceil(scaled_width / SQUARE_PIXELS)
total = EXTRA_TOKENS + SQUARE_TOKENS * h * w
return total |
Format a conversation into a string.
Args:
conversation (Dict): A dictionary containing
information about the conversation.
Returns:
str: A string containing the specified task and
all messages in the conversation.
Raises:
ValueError: If an unknown role name is encountered
in the conversation.
The conversation is formatted in the following format:
Task: <specified_task>
User (<role_1>): <message_1>
Assistant (<role_2>): <message_2>
...
Example:
>>> conversation = {
... 'num_messages': 2,
... 'message_1': {'role_name': 'Engineer', 'content': 'Hello'},
... 'message_2': {'role_name': 'Programmer',
'content': 'Hi there!'},
... 'specified_task': 'Answer a greeting'
... }
>>> flatten_conversation(conversation)
'Task: Answer a greeting
User (Engineer): Hello
Assistant (Programmer): Hi there!' | def flatten_conversation(conversation: Dict) -> str:
r"""Format a conversation into a string.
Args:
conversation (Dict): A dictionary containing
information about the conversation.
Returns:
str: A string containing the specified task and
all messages in the conversation.
Raises:
ValueError: If an unknown role name is encountered
in the conversation.
The conversation is formatted in the following format:
Task: <specified_task>
User (<role_1>): <message_1>
Assistant (<role_2>): <message_2>
...
Example:
>>> conversation = {
... 'num_messages': 2,
... 'message_1': {'role_name': 'Engineer', 'content': 'Hello'},
... 'message_2': {'role_name': 'Programmer',
'content': 'Hi there!'},
... 'specified_task': 'Answer a greeting'
... }
>>> flatten_conversation(conversation)
'Task: Answer a greeting
User (Engineer): Hello
Assistant (Programmer): Hi there!'
"""
num_messages = conversation['num_messages']
assert num_messages >= 2
role_1 = conversation['message_1']['role_name']
role_2 = conversation['message_2']['role_name']
task = conversation['specified_task']
messages = []
for i in range(1, num_messages + 1):
if conversation[f'message_{i}']['role_name'] == role_1:
message = (
f"User ({role_1}): " + conversation[f'message_{i}']['content']
)
elif conversation[f'message_{i}']['role_name'] == role_2:
message = (
f"Assistant ({role_2}): "
+ conversation[f'message_{i}']['content']
)
else:
raise ValueError(
"Unknown role name: "
f"{conversation[f'message_{i}']['role_name']}"
)
messages.append(message)
joined_messages = '\n'.join(messages)
formatted_data = f"Task: {task}\n{joined_messages}"
return formatted_data |
Adds two numbers.
Args:
a (int): The first number to be added.
b (int): The second number to be added.
Returns:
integer: The sum of the two numbers. | def add_with_doc(a: int, b: int) -> int:
r"""Adds two numbers.
Args:
a (int): The first number to be added.
b (int): The second number to be added.
Returns:
integer: The sum of the two numbers.
"""
return a + b |
Adds two numbers.
Args:
a (int): The first number to be added.
Returns:
int: The sum of the two numbers. | def add_with_wrong_doc(a: int, b: int) -> int:
r"""Adds two numbers.
Args:
a (int): The first number to be added.
Returns:
int: The sum of the two numbers.
"""
return a + b |
Test that chain returns direct results.
Tested graph.query and graph.refresh_schema. | def test_cypher_return_correct_schema() -> None:
r"""Test that chain returns direct results.
Tested graph.query and graph.refresh_schema.
"""
graph = Neo4jGraph(
url=url,
username=username,
password=password,
)
# Delete all nodes in the graph
graph.query("MATCH (n) DETACH DELETE n")
# Create two nodes and a relationship
graph.query("""
CREATE (la:LabelA {property_a: 'a'})
CREATE (lb:LabelB)
CREATE (lc:LabelC)
MERGE (la)-[:REL_TYPE]-> (lb)
MERGE (la)-[:REL_TYPE {rel_prop: 'abc'}]-> (lc)
""")
# Refresh schema information
graph.refresh_schema()
node_properties = graph.query(
NODE_PROPERTY_QUERY, params={"EXCLUDED_LABELS": [BASE_ENTITY_LABEL]}
)
relationships_properties = graph.query(
REL_PROPERTY_QUERY, params={"EXCLUDED_LABELS": [BASE_ENTITY_LABEL]}
)
relationships = graph.query(
REL_QUERY, params={"EXCLUDED_LABELS": [BASE_ENTITY_LABEL]}
)
expected_node_properties = [
{
"output": {
"properties": [{"property": "property_a", "type": "STRING"}],
"labels": "LabelA",
}
}
]
expected_relationships_properties = [
{
"output": {
"type": "REL_TYPE",
"properties": [{"property": "rel_prop", "type": "STRING"}],
}
}
]
expected_relationships = [
{"output": {"start": "LabelA", "type": "REL_TYPE", "end": "LabelB"}},
{"output": {"start": "LabelA", "type": "REL_TYPE", "end": "LabelC"}},
]
assert node_properties == expected_node_properties
assert relationships_properties == expected_relationships_properties
# Order is not guaranteed with Neo4j returns
assert (
sorted(relationships, key=lambda x: x["output"]["end"])
== expected_relationships
) |
Test that neo4j uses the timeout correctly. | def test_neo4j_timeout() -> None:
r"""Test that neo4j uses the timeout correctly."""
graph = Neo4jGraph(
url=url, username=username, password=password, timeout=0.1
)
try:
graph.query("UNWIND range(0,100000,1) AS i MERGE (:Foo {id:i})")
except Exception as e:
assert (
e.code # type: ignore[attr-defined]
== "Neo.ClientError.Transaction."
"TransactionTimedOutClientConfiguration"
) |
Test that neo4j uses the timeout correctly. | def test_neo4j_truncate_values() -> None:
r"""Test that neo4j uses the timeout correctly."""
graph = Neo4jGraph(
url=url, username=username, password=password, truncate=True
)
# Delete all nodes in the graph
graph.query("MATCH (n) DETACH DELETE n")
# Create two nodes and a relationship
graph.query("""
CREATE (la:LabelA {property_a: 'a'})
CREATE (lb:LabelB)
CREATE (lc:LabelC)
MERGE (la)-[:REL_TYPE]-> (lb)
MERGE (la)-[:REL_TYPE {rel_prop: 'abc'}]-> (lc)
""")
graph.refresh_schema()
output = graph.query("RETURN range(0,130,1) AS result")
assert output == [{}] |
Test that neo4j correctly import graph element. | def test_neo4j_add_data() -> None:
r"""Test that neo4j correctly import graph element."""
graph = Neo4jGraph(
url=url, username=username, password=password, truncate=True
)
# Delete all nodes in the graph
graph.query("MATCH (n) DETACH DELETE n")
# Remove all constraints
graph.query("CALL apoc.schema.assert({}, {})")
graph.refresh_schema()
# Create two nodes and a relationship
graph.add_graph_elements(test_data)
output = graph.query(
"MATCH (n) RETURN labels(n) AS label, count(*) AS count ORDER BY label"
)
assert output == [
{"label": ["type_obj"], "count": 1},
{"label": ["type_subj"], "count": 1},
]
assert graph.structured_schema["metadata"]["constraint"] == [] |
Test that neo4j correctly import graph element with source. | def test_neo4j_add_data_source() -> None:
r"""Test that neo4j correctly import graph element with source."""
graph = Neo4jGraph(
url=url, username=username, password=password, truncate=True
)
# Delete all nodes in the graph
graph.query("MATCH (n) DETACH DELETE n")
# Remove all constraints
graph.query("CALL apoc.schema.assert({}, {})")
graph.refresh_schema()
# Create two nodes and a relationship
graph.add_graph_elements(test_data, include_source=True)
output = graph.query(
"MATCH (n) RETURN labels(n) AS label, count(*) AS count ORDER BY label"
)
assert output == [
{"label": ["Element"], "count": 1},
{"label": ["type_obj"], "count": 1},
{"label": ["type_subj"], "count": 1},
]
assert graph.structured_schema["metadata"]["constraint"] == [] |
Test that neo4j correctly import graph element with base_entity. | def test_neo4j_add_data_base() -> None:
r"""Test that neo4j correctly import graph element with base_entity."""
graph = Neo4jGraph(
url=url, username=username, password=password, truncate=True
)
# Delete all nodes in the graph
graph.query("MATCH (n) DETACH DELETE n")
# Remove all constraints
graph.query("CALL apoc.schema.assert({}, {})")
graph.refresh_schema()
# Create two nodes and a relationship
graph.add_graph_elements(test_data, base_entity_label=True)
output = graph.query(
"MATCH (n) RETURN apoc.coll.sort(labels(n)) AS label, "
"count(*) AS count ORDER BY label"
)
assert output == [
{"label": [BASE_ENTITY_LABEL, "type_obj"], "count": 1},
{"label": [BASE_ENTITY_LABEL, "type_subj"], "count": 1},
]
assert graph.structured_schema["metadata"]["constraint"] != [] |
Test that neo4j correctly import graph element with base_entity and
source. | def test_neo4j_add_data_base_source() -> None:
r"""Test that neo4j correctly import graph element with base_entity and
source."""
graph = Neo4jGraph(
url=url, username=username, password=password, truncate=True
)
# Delete all nodes in the graph
graph.query("MATCH (n) DETACH DELETE n")
# Remove all constraints
graph.query("CALL apoc.schema.assert({}, {})")
graph.refresh_schema()
# Create two nodes and a relationship
graph.add_graph_elements(
test_data, base_entity_label=True, include_source=True
)
output = graph.query(
"MATCH (n) RETURN apoc.coll.sort(labels(n)) AS label, "
"count(*) AS count ORDER BY label"
)
assert output == [
{"label": ["Element"], "count": 1},
{"label": [BASE_ENTITY_LABEL, "type_obj"], "count": 1},
{"label": [BASE_ENTITY_LABEL, "type_subj"], "count": 1},
]
assert graph.structured_schema["metadata"]["constraint"] != [] |
Test that neo4j correctly filters excluded labels. | def test_neo4j_filtering_labels() -> None:
r"""Test that neo4j correctly filters excluded labels."""
graph = Neo4jGraph(
url=url, username=username, password=password, truncate=True
)
# Delete all nodes in the graph
graph.query("MATCH (n) DETACH DELETE n")
# Remove all constraints
graph.query("CALL apoc.schema.assert({}, {})")
graph.query(
"CREATE (:`Excluded_Label_A`)-[:`Excluded_Rel_A`]->"
"(:`Excluded_Label_B`)"
)
graph.refresh_schema()
# Assert both are empty
assert graph.structured_schema["node_props"] == {}
assert graph.structured_schema["relationships"] == [] |
Extract metadata information from ``$dist/__init__.py``. | def parse_dist_meta():
"""Extract metadata information from ``$dist/__init__.py``."""
pats = {re_meta: _add_default, re_doc: _add_doc}
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, NAME, '__init__.py')) as meta_fh:
distmeta = {}
for line in meta_fh:
if line.strip() == '# -eof meta-':
break
for pattern, handler in pats.items():
m = pattern.match(line.strip())
if m:
distmeta.update(handler(m))
return distmeta |
Parse requirement file.
Example:
reqs('default.txt') # requirements/default.txt
reqs('extras', 'redis.txt') # requirements/extras/redis.txt
Returns:
List[str]: list of requirements specified in the file. | def reqs(*f):
"""Parse requirement file.
Example:
reqs('default.txt') # requirements/default.txt
reqs('extras', 'redis.txt') # requirements/extras/redis.txt
Returns:
List[str]: list of requirements specified in the file.
"""
return [req for subreq in _reqs(*f) for req in subreq] |
Parse requirement in the requirements/extras/ directory. | def extras(*p):
"""Parse requirement in the requirements/extras/ directory."""
return reqs('extras', *p) |
Get list of requirements required for installation. | def install_requires():
"""Get list of requirements required for installation."""
return reqs('default.txt') |
Get map of all extra requirements. | def extras_require():
"""Get map of all extra requirements."""
return {x: extras(x + '.txt') for x in EXTENSIONS} |
Return embedded clock service.
Arguments:
thread (bool): Run threaded instead of as a separate process.
Uses :mod:`multiprocessing` by default, if available. | def EmbeddedService(app, max_interval=None, **kwargs):
"""Return embedded clock service.
Arguments:
thread (bool): Run threaded instead of as a separate process.
Uses :mod:`multiprocessing` by default, if available.
"""
if kwargs.pop('thread', False) or _Process is None:
# Need short max interval to be able to stop thread
# in reasonable time.
return _Threaded(app, max_interval=1, **kwargs)
return _Process(app, max_interval=max_interval, **kwargs) |
Unroll group with only one member.
This allows treating a group of a single task as if it
was a single task without pre-knowledge. | def maybe_unroll_group(group):
"""Unroll group with only one member.
This allows treating a group of a single task as if it
was a single task without pre-knowledge."""
# Issue #1656
try:
size = len(group.tasks)
except TypeError:
try:
size = group.tasks.__length_hint__()
except (AttributeError, TypeError):
return group
else:
return list(group.tasks)[0] if size == 1 else group
else:
return group.tasks[0] if size == 1 else group |
When stamping a sequence of tasks created by a generator,
we use this function to stamp each task in the generator
without exhausting it. | def _stamp_regen_task(task, visitor, append_stamps, **headers):
"""When stamping a sequence of tasks created by a generator,
we use this function to stamp each task in the generator
without exhausting it."""
task.stamp(visitor, append_stamps, **headers)
return task |
Merge two dictionaries recursively into the first one.
Example:
>>> d1 = {'dict': {'a': 1}, 'list': [1, 2], 'tuple': (1, 2)}
>>> d2 = {'dict': {'b': 2}, 'list': [3, 4], 'set': {'a', 'b'}}
>>> _merge_dictionaries(d1, d2)
d1 will be modified to: {
'dict': {'a': 1, 'b': 2},
'list': [1, 2, 3, 4],
'tuple': (1, 2),
'set': {'a', 'b'}
}
Arguments:
d1 (dict): Dictionary to merge into.
d2 (dict): Dictionary to merge from.
aggregate_duplicates (bool):
If True, aggregate duplicated items (by key) into a list of all values in d1 in the same key.
If False, duplicate keys will be taken from d2 and override the value in d1. | def _merge_dictionaries(d1, d2, aggregate_duplicates=True):
"""Merge two dictionaries recursively into the first one.
Example:
>>> d1 = {'dict': {'a': 1}, 'list': [1, 2], 'tuple': (1, 2)}
>>> d2 = {'dict': {'b': 2}, 'list': [3, 4], 'set': {'a', 'b'}}
>>> _merge_dictionaries(d1, d2)
d1 will be modified to: {
'dict': {'a': 1, 'b': 2},
'list': [1, 2, 3, 4],
'tuple': (1, 2),
'set': {'a', 'b'}
}
Arguments:
d1 (dict): Dictionary to merge into.
d2 (dict): Dictionary to merge from.
aggregate_duplicates (bool):
If True, aggregate duplicated items (by key) into a list of all values in d1 in the same key.
If False, duplicate keys will be taken from d2 and override the value in d1.
"""
if not d2:
return
for key, value in d1.items():
if key in d2:
if isinstance(value, dict):
_merge_dictionaries(d1[key], d2[key])
else:
if isinstance(value, (int, float, str)):
d1[key] = [value] if aggregate_duplicates else value
if isinstance(d2[key], list) and isinstance(d1[key], list):
d1[key].extend(d2[key])
elif aggregate_duplicates:
if d1[key] is None:
d1[key] = []
else:
d1[key] = list(d1[key])
d1[key].append(d2[key])
for key, value in d2.items():
if key not in d1:
d1[key] = value |
Create new signature.
- if the first argument is a signature already then it's cloned.
- if the first argument is a dict, then a Signature version is returned.
Returns:
Signature: The resulting signature. | def signature(varies, *args, **kwargs):
"""Create new signature.
- if the first argument is a signature already then it's cloned.
- if the first argument is a dict, then a Signature version is returned.
Returns:
Signature: The resulting signature.
"""
app = kwargs.get('app')
if isinstance(varies, dict):
if isinstance(varies, abstract.CallableSignature):
return varies.clone()
return Signature.from_dict(varies, app=app)
return Signature(varies, *args, **kwargs) |
Ensure obj is a signature, or None.
Arguments:
d (Optional[Union[abstract.CallableSignature, Mapping]]):
Signature or dict-serialized signature.
app (celery.Celery):
App to bind signature to.
clone (bool):
If d' is already a signature, the signature
will be cloned when this flag is enabled.
Returns:
Optional[abstract.CallableSignature] | def maybe_signature(d, app=None, clone=False):
"""Ensure obj is a signature, or None.
Arguments:
d (Optional[Union[abstract.CallableSignature, Mapping]]):
Signature or dict-serialized signature.
app (celery.Celery):
App to bind signature to.
clone (bool):
If d' is already a signature, the signature
will be cloned when this flag is enabled.
Returns:
Optional[abstract.CallableSignature]
"""
if d is not None:
if isinstance(d, abstract.CallableSignature):
if clone:
d = d.clone()
elif isinstance(d, dict):
d = signature(d)
if app is not None:
d._app = app
return d |
Reraise exception. | def reraise(tp, value, tb=None):
"""Reraise exception."""
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value |
Try to import and return module.
Returns None if the module does not exist. | def try_import(module, default=None):
"""Try to import and return module.
Returns None if the module does not exist.
"""
try:
return import_module(module)
except ImportError:
return default |
Attempt to evaluate promise, even if obj is not a promise. | def maybe_evaluate(obj):
"""Attempt to evaluate promise, even if obj is not a promise."""
try:
return obj.__maybe_evaluate__()
except AttributeError:
return obj |
Get attribute from current_app recursively.
Example: ``getappattr('amqp.get_task_consumer')``. | def getappattr(path):
"""Get attribute from current_app recursively.
Example: ``getappattr('amqp.get_task_consumer')``.
"""
from celery import current_app
return current_app._rgetattr(path) |
Return string identifying the current Python implementation. | def pyimplementation():
"""Return string identifying the current Python implementation."""
if hasattr(_platform, 'python_implementation'):
return _platform.python_implementation()
elif sys.platform.startswith('java'):
return 'Jython ' + sys.platform
elif hasattr(sys, 'pypy_version_info'):
v = '.'.join(str(p) for p in sys.pypy_version_info[:3])
if sys.pypy_version_info[3:]:
v += '-' + ''.join(str(p) for p in sys.pypy_version_info[3:])
return 'PyPy ' + v
else:
return 'CPython' |
Create and verify pidfile.
If the pidfile already exists the program exits with an error message,
however if the process it refers to isn't running anymore, the pidfile
is deleted and the program continues.
This function will automatically install an :mod:`atexit` handler
to release the lock at exit, you can skip this by calling
:func:`_create_pidlock` instead.
Returns:
Pidfile: used to manage the lock.
Example:
>>> pidlock = create_pidlock('/var/run/app.pid') | def create_pidlock(pidfile):
"""Create and verify pidfile.
If the pidfile already exists the program exits with an error message,
however if the process it refers to isn't running anymore, the pidfile
is deleted and the program continues.
This function will automatically install an :mod:`atexit` handler
to release the lock at exit, you can skip this by calling
:func:`_create_pidlock` instead.
Returns:
Pidfile: used to manage the lock.
Example:
>>> pidlock = create_pidlock('/var/run/app.pid')
"""
pidlock = _create_pidlock(pidfile)
atexit.register(pidlock.release)
return pidlock |
Return a list of file descriptors.
This method returns list of file descriptors corresponding to
file paths passed in paths variable.
Arguments:
paths: List[str]: List of file paths.
Returns:
List[int]: List of file descriptors.
Example:
>>> keep = fd_by_path(['/dev/urandom', '/my/precious/']) | def fd_by_path(paths):
"""Return a list of file descriptors.
This method returns list of file descriptors corresponding to
file paths passed in paths variable.
Arguments:
paths: List[str]: List of file paths.
Returns:
List[int]: List of file descriptors.
Example:
>>> keep = fd_by_path(['/dev/urandom', '/my/precious/'])
"""
stats = set()
for path in paths:
try:
fd = os.open(path, os.O_RDONLY)
except OSError:
continue
try:
stats.add(os.fstat(fd)[1:3])
finally:
os.close(fd)
def fd_in_stats(fd):
try:
return os.fstat(fd)[1:3] in stats
except OSError:
return False
return [_fd for _fd in range(get_fdmax(2048)) if fd_in_stats(_fd)] |
Detach the current process in the background (daemonize).
Arguments:
logfile (str): Optional log file.
The ability to write to this file
will be verified before the process is detached.
pidfile (str): Optional pid file.
The pidfile won't be created,
as this is the responsibility of the child. But the process will
exit if the pid lock exists and the pid written is still running.
uid (int, str): Optional user id or user name to change
effective privileges to.
gid (int, str): Optional group id or group name to change
effective privileges to.
umask (str, int): Optional umask that'll be effective in
the child process.
workdir (str): Optional new working directory.
fake (bool): Don't actually detach, intended for debugging purposes.
**opts (Any): Ignored.
Example:
>>> from celery.platforms import detached, create_pidlock
>>> with detached(
... logfile='/var/log/app.log',
... pidfile='/var/run/app.pid',
... uid='nobody'):
... # Now in detached child process with effective user set to nobody,
... # and we know that our logfile can be written to, and that
... # the pidfile isn't locked.
... pidlock = create_pidlock('/var/run/app.pid')
...
... # Run the program
... program.run(logfile='/var/log/app.log') | def detached(logfile=None, pidfile=None, uid=None, gid=None, umask=0,
workdir=None, fake=False, **opts):
"""Detach the current process in the background (daemonize).
Arguments:
logfile (str): Optional log file.
The ability to write to this file
will be verified before the process is detached.
pidfile (str): Optional pid file.
The pidfile won't be created,
as this is the responsibility of the child. But the process will
exit if the pid lock exists and the pid written is still running.
uid (int, str): Optional user id or user name to change
effective privileges to.
gid (int, str): Optional group id or group name to change
effective privileges to.
umask (str, int): Optional umask that'll be effective in
the child process.
workdir (str): Optional new working directory.
fake (bool): Don't actually detach, intended for debugging purposes.
**opts (Any): Ignored.
Example:
>>> from celery.platforms import detached, create_pidlock
>>> with detached(
... logfile='/var/log/app.log',
... pidfile='/var/run/app.pid',
... uid='nobody'):
... # Now in detached child process with effective user set to nobody,
... # and we know that our logfile can be written to, and that
... # the pidfile isn't locked.
... pidlock = create_pidlock('/var/run/app.pid')
...
... # Run the program
... program.run(logfile='/var/log/app.log')
"""
if not resource:
raise RuntimeError('This platform does not support detach.')
workdir = os.getcwd() if workdir is None else workdir
signals.reset('SIGCLD') # Make sure SIGCLD is using the default handler.
maybe_drop_privileges(uid=uid, gid=gid)
def after_chdir_do():
# Since without stderr any errors will be silently suppressed,
# we need to know that we have access to the logfile.
logfile and open(logfile, 'a').close()
# Doesn't actually create the pidfile, but makes sure it's not stale.
if pidfile:
_create_pidlock(pidfile).release()
return DaemonContext(
umask=umask, workdir=workdir, fake=fake, after_chdir=after_chdir_do,
) |
Parse user id.
Arguments:
uid (str, int): Actual uid, or the username of a user.
Returns:
int: The actual uid. | def parse_uid(uid):
"""Parse user id.
Arguments:
uid (str, int): Actual uid, or the username of a user.
Returns:
int: The actual uid.
"""
try:
return int(uid)
except ValueError:
try:
return pwd.getpwnam(uid).pw_uid
except (AttributeError, KeyError):
raise KeyError(f'User does not exist: {uid}') |
Parse group id.
Arguments:
gid (str, int): Actual gid, or the name of a group.
Returns:
int: The actual gid of the group. | def parse_gid(gid):
"""Parse group id.
Arguments:
gid (str, int): Actual gid, or the name of a group.
Returns:
int: The actual gid of the group.
"""
try:
return int(gid)
except ValueError:
try:
return grp.getgrnam(gid).gr_gid
except (AttributeError, KeyError):
raise KeyError(f'Group does not exist: {gid}') |
Set active groups from a list of group ids. | def setgroups(groups):
"""Set active groups from a list of group ids."""
max_groups = None
try:
max_groups = os.sysconf('SC_NGROUPS_MAX')
except Exception: # pylint: disable=broad-except
pass
try:
return _setgroups_hack(groups[:max_groups])
except OSError as exc:
if exc.errno != errno.EPERM:
raise
if any(group not in groups for group in os.getgroups()):
# we shouldn't be allowed to change to this group.
raise |
Init process group permissions.
Compat version of :func:`os.initgroups` that was first
added to Python 2.7. | def initgroups(uid, gid):
"""Init process group permissions.
Compat version of :func:`os.initgroups` that was first
added to Python 2.7.
"""
if not pwd: # pragma: no cover
return
username = pwd.getpwuid(uid)[0]
if hasattr(os, 'initgroups'): # Python 2.7+
return os.initgroups(username, gid)
groups = [gr.gr_gid for gr in grp.getgrall()
if username in gr.gr_mem]
setgroups(groups) |
Version of :func:`os.setgid` supporting group names. | def setgid(gid):
"""Version of :func:`os.setgid` supporting group names."""
os.setgid(parse_gid(gid)) |
Version of :func:`os.setuid` supporting usernames. | def setuid(uid):
"""Version of :func:`os.setuid` supporting usernames."""
os.setuid(parse_uid(uid)) |
Change process privileges to new user/group.
If UID and GID is specified, the real user/group is changed.
If only UID is specified, the real user is changed, and the group is
changed to the users primary group.
If only GID is specified, only the group is changed. | def maybe_drop_privileges(uid=None, gid=None):
"""Change process privileges to new user/group.
If UID and GID is specified, the real user/group is changed.
If only UID is specified, the real user is changed, and the group is
changed to the users primary group.
If only GID is specified, only the group is changed.
"""
if sys.platform == 'win32':
return
if os.geteuid():
# no point trying to setuid unless we're root.
if not os.getuid():
raise SecurityError('contact support')
uid = uid and parse_uid(uid)
gid = gid and parse_gid(gid)
if uid:
_setuid(uid, gid)
else:
gid and setgid(gid)
if uid and not os.getuid() and not os.geteuid():
raise SecurityError('Still root uid after drop privileges!')
if gid and not os.getgid() and not os.getegid():
raise SecurityError('Still root gid after drop privileges!') |
Return name of signal from signal number. | def signal_name(signum):
"""Return name of signal from signal number."""
return SIGMAP[signum][3:] |
Sends signal ``name`` to process when parent process terminates. | def set_pdeathsig(name):
"""Sends signal ``name`` to process when parent process terminates."""
if signals.supported('SIGKILL'):
try:
_set_pdeathsig(signals.signum('SIGKILL'))
except OSError:
# We ignore when OS does not support set_pdeathsig
pass |
Set the :command:`ps` name for the currently running process.
Only works if :pypi:`setproctitle` is installed. | def set_process_title(progname, info=None):
"""Set the :command:`ps` name for the currently running process.
Only works if :pypi:`setproctitle` is installed.
"""
proctitle = f'[{progname}]'
proctitle = f'{proctitle} {info}' if info else proctitle
if _setproctitle:
_setproctitle.setproctitle(safe_str(proctitle))
return proctitle |
Get errno for string (e.g., ``ENOENT``). | def get_errno_name(n):
"""Get errno for string (e.g., ``ENOENT``)."""
if isinstance(n, str):
return getattr(errno, n)
return n |
Context manager to ignore specific POSIX error codes.
Takes a list of error codes to ignore: this can be either
the name of the code, or the code integer itself::
>>> with ignore_errno('ENOENT'):
... with open('foo', 'r') as fh:
... return fh.read()
>>> with ignore_errno(errno.ENOENT, errno.EPERM):
... pass
Arguments:
types (Tuple[Exception]): A tuple of exceptions to ignore
(when the errno matches). Defaults to :exc:`Exception`. | def ignore_errno(*errnos, **kwargs):
"""Context manager to ignore specific POSIX error codes.
Takes a list of error codes to ignore: this can be either
the name of the code, or the code integer itself::
>>> with ignore_errno('ENOENT'):
... with open('foo', 'r') as fh:
... return fh.read()
>>> with ignore_errno(errno.ENOENT, errno.EPERM):
... pass
Arguments:
types (Tuple[Exception]): A tuple of exceptions to ignore
(when the errno matches). Defaults to :exc:`Exception`.
"""
types = kwargs.get('types') or (Exception,)
errnos = [get_errno_name(errno) for errno in errnos]
try:
yield
except types as exc:
if not hasattr(exc, 'errno'):
raise
if exc.errno not in errnos:
raise |
Deserialize result from tuple. | def result_from_tuple(r, app=None):
"""Deserialize result from tuple."""
# earlier backends may just pickle, so check if
# result is already prepared.
app = app_or_default(app)
Result = app.AsyncResult
if not isinstance(r, ResultBase):
res, nodes = r
id, parent = res if isinstance(res, (list, tuple)) else (res, None)
if parent:
parent = result_from_tuple(parent, app)
if nodes is not None:
return app.GroupResult(
id, [result_from_tuple(child, app) for child in nodes],
parent=parent,
)
return Result(id, parent=parent)
return r |
Return schedule from number, timedelta, or actual schedule. | def maybe_schedule(
s: int | float | timedelta | BaseSchedule, relative: bool = False,
app: Celery | None = None) -> float | timedelta | BaseSchedule:
"""Return schedule from number, timedelta, or actual schedule."""
if s is not None:
if isinstance(s, (float, int)):
s = timedelta(seconds=s)
if isinstance(s, timedelta):
return schedule(s, relative, app=app)
else:
s.app = app
return s |
Get the precedence index for state.
Lower index means higher precedence. | def precedence(state: str) -> int:
"""Get the precedence index for state.
Lower index means higher precedence.
"""
try:
return PRECEDENCE_LOOKUP[state]
except KeyError:
return NONE_PRECEDENCE |
Connect callback to be called when any app is finalized. | def connect_on_app_finalize(callback):
"""Connect callback to be called when any app is finalized."""
_on_app_finalizers.add(callback)
return callback |
Set default app. | def set_default_app(app):
"""Set default app."""
global default_app
default_app = app |
Currently executing task. | def get_current_task():
"""Currently executing task."""
return _task_stack.top |
Currently executing task, that was applied by the worker.
This is used to differentiate between the actual task
executed by the worker and any task that was called within
a task (using ``task.__call__`` or ``task.apply``) | def get_current_worker_task():
"""Currently executing task, that was applied by the worker.
This is used to differentiate between the actual task
executed by the worker and any task that was called within
a task (using ``task.__call__`` or ``task.apply``)
"""
for task in reversed(_task_stack.stack):
if not task.request.called_directly:
return task |
Enable tracing of app instances. | def enable_trace():
"""Enable tracing of app instances."""
global app_or_default
app_or_default = _app_or_default_trace |
Disable tracing of app instances. | def disable_trace():
"""Disable tracing of app instances."""
global app_or_default
app_or_default = _app_or_default |
Search argv for options specifying short and longopt alternatives.
Returns:
str: value for option found
Raises:
KeyError: if option not found. | def _find_option_with_arg(argv, short_opts=None, long_opts=None):
"""Search argv for options specifying short and longopt alternatives.
Returns:
str: value for option found
Raises:
KeyError: if option not found.
"""
for i, arg in enumerate(argv):
if arg.startswith('-'):
if long_opts and arg.startswith('--'):
name, sep, val = arg.partition('=')
if name in long_opts:
return val if sep else argv[i + 1]
if short_opts and arg in short_opts:
return argv[i + 1]
raise KeyError('|'.join(short_opts or [] + long_opts or [])) |
Apply eventlet/gevent monkeypatches.
With short and long opt alternatives that specify the command line
option to set the pool, this makes sure that anything that needs
to be patched is completed as early as possible.
(e.g., eventlet/gevent monkey patches). | def maybe_patch_concurrency(argv=None, short_opts=None,
long_opts=None, patches=None):
"""Apply eventlet/gevent monkeypatches.
With short and long opt alternatives that specify the command line
option to set the pool, this makes sure that anything that needs
to be patched is completed as early as possible.
(e.g., eventlet/gevent monkey patches).
"""
argv = argv if argv else sys.argv
short_opts = short_opts if short_opts else ['-P']
long_opts = long_opts if long_opts else ['--pool']
patches = patches if patches else {'eventlet': _patch_eventlet,
'gevent': _patch_gevent}
try:
pool = _find_option_with_arg(argv, short_opts, long_opts)
except KeyError:
pass
else:
try:
patcher = patches[pool]
except KeyError:
pass
else:
patcher()
# set up eventlet/gevent environments ASAP
from celery import concurrency
if pool in concurrency.get_available_pool_names():
concurrency.get_implementation(pool) |
Entrypoint to the ``celery`` umbrella command. | def main() -> None:
"""Entrypoint to the ``celery`` umbrella command."""
if 'multi' not in sys.argv:
maybe_patch_concurrency()
from celery.bin.celery import main as _main
sys.exit(_main()) |
Expand the :setting:`task_annotations` setting. | def prepare(annotations):
"""Expand the :setting:`task_annotations` setting."""
def expand_annotation(annotation):
if isinstance(annotation, dict):
return MapAnnotation(annotation)
elif isinstance(annotation, str):
return mlazy(instantiate, annotation)
return annotation
if annotations is None:
return ()
elif not isinstance(annotations, (list, tuple)):
annotations = (annotations,)
return [expand_annotation(anno) for anno in annotations] |
Resolve all pending annotations. | def resolve_all(anno, task):
"""Resolve all pending annotations."""
return (x for x in (_first_match(anno, task), _first_match_any(anno)) if x) |
Wrap task's `run` method with auto-retry functionality. | def add_autoretry_behaviour(task, **options):
"""Wrap task's `run` method with auto-retry functionality."""
autoretry_for = tuple(
options.get('autoretry_for',
getattr(task, 'autoretry_for', ()))
)
dont_autoretry_for = tuple(
options.get('dont_autoretry_for',
getattr(task, 'dont_autoretry_for', ()))
)
retry_kwargs = options.get(
'retry_kwargs', getattr(task, 'retry_kwargs', {})
)
retry_backoff = float(
options.get('retry_backoff',
getattr(task, 'retry_backoff', False))
)
retry_backoff_max = int(
options.get('retry_backoff_max',
getattr(task, 'retry_backoff_max', 600))
)
retry_jitter = options.get(
'retry_jitter', getattr(task, 'retry_jitter', True)
)
if autoretry_for and not hasattr(task, '_orig_run'):
@wraps(task.run)
def run(*args, **kwargs):
try:
return task._orig_run(*args, **kwargs)
except Ignore:
# If Ignore signal occurs task shouldn't be retried,
# even if it suits autoretry_for list
raise
except Retry:
raise
except dont_autoretry_for:
raise
except autoretry_for as exc:
if retry_backoff:
retry_kwargs['countdown'] = \
get_exponential_backoff_interval(
factor=int(max(1.0, retry_backoff)),
retries=task.request.retries,
maximum=retry_backoff_max,
full_jitter=retry_jitter)
# Override max_retries
if hasattr(task, 'override_max_retries'):
retry_kwargs['max_retries'] = getattr(task,
'override_max_retries',
task.max_retries)
ret = task.retry(exc=exc, **retry_kwargs)
# Stop propagation
if hasattr(task, 'override_max_retries'):
delattr(task, 'override_max_retries')
raise ret
task._orig_run, task.run = task.run, run |
Get backend class by name/alias. | def by_name(backend=None, loader=None,
extension_namespace='celery.result_backends'):
"""Get backend class by name/alias."""
backend = backend or 'disabled'
loader = loader or current_app.loader
aliases = dict(BACKEND_ALIASES, **loader.override_backends)
aliases.update(load_extension_class_names(extension_namespace))
try:
cls = symbol_by_name(backend, aliases)
except ValueError as exc:
reraise(ImproperlyConfigured, ImproperlyConfigured(
UNKNOWN_BACKEND.strip().format(backend, exc)), sys.exc_info()[2])
if isinstance(cls, types.ModuleType):
raise ImproperlyConfigured(UNKNOWN_BACKEND.strip().format(
backend, 'is a Python module, not a backend class.'))
return cls |
Get backend class by URL. | def by_url(backend=None, loader=None):
"""Get backend class by URL."""
url = None
if backend and '://' in backend:
url = backend
scheme, _, _ = url.partition('://')
if '+' in scheme:
backend, url = url.split('+', 1)
else:
backend = scheme
return by_name(backend, loader), url |
Return true if app has customized method `attr`.
Note:
This is used for optimizations in cases where we know
how the default behavior works, but need to account
for someone using inheritance to override a method/property. | def app_has_custom(app, attr):
"""Return true if app has customized method `attr`.
Note:
This is used for optimizations in cases where we know
how the default behavior works, but need to account
for someone using inheritance to override a method/property.
"""
return mro_lookup(app.__class__, attr, stop={Celery, object},
monkey_patched=[__name__]) |
Unpickle app. | def _unpickle_appattr(reverse_name, args):
"""Unpickle app."""
# Given an attribute name and a list of args, gets
# the attribute from the current app and calls it.
return get_current_app()._rgetattr(reverse_name)(*args) |
Task used to clean up expired results.
If the configured backend requires periodic cleanup this task is also
automatically configured to run every day at 4am (requires
:program:`celery beat` to be running). | def add_backend_cleanup_task(app):
"""Task used to clean up expired results.
If the configured backend requires periodic cleanup this task is also
automatically configured to run every day at 4am (requires
:program:`celery beat` to be running).
"""
@app.task(name='celery.backend_cleanup', shared=False, lazy=False)
def backend_cleanup():
app.backend.cleanup()
return backend_cleanup |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.