response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Adds the optimizer hooks from a DeepSpeed ZeRO-3 model. | def add_hooks(model: "DeepSpeedEngine") -> None:
"""Adds the optimizer hooks from a DeepSpeed ZeRO-3 model."""
if model.optimizer is not None and hasattr(model.optimizer, "parameter_offload"):
optimizer_offload = model.optimizer.parameter_offload
elif model.optimizer is not None:
optimizer_offload = model.optimizer
optimizer_offload._register_hooks_recursively(optimizer_offload.module) |
Context manager to unwrap a model for generation.
For ZeRO-3 models, we gather the weights once to speed up generation. | def unwrap_model_for_generation(
model: Union["DistributedDataParallel", "DeepSpeedEngine"], accelerator: "Accelerator", is_peft_model: bool = False
) -> Union["PreTrainedModelWrapper", "DeepSpeedEngine"]:
"""Context manager to unwrap a model for generation.
For ZeRO-3 models, we gather the weights once to speed up generation.
"""
unwrapped_model = accelerator.unwrap_model(model)
if is_peft_model:
unwrapped_model.pretrained_model.disable_adapter()
if accelerator.state.deepspeed_plugin is not None and accelerator.state.deepspeed_plugin.zero_stage == 3:
with deepspeed.zero.GatheredParameters(model.parameters()):
remove_hooks(model)
yield model
add_hooks(model)
else:
yield unwrapped_model |
Creates mismatched pairs of prompts and completions for the KL dataset by reversing the order of completions. | def _get_kl_dataset(batch: Dict[str, List[Any]]) -> Dict[str, List[Any]]:
"""Creates mismatched pairs of prompts and completions for the KL dataset by reversing the order of completions."""
batch["answer_input_ids"] = batch["answer_input_ids"][::-1]
batch["answer_attention_mask"] = batch["answer_attention_mask"][::-1]
return batch |
Tokenize a batch from a KTO/BCO specific dataset. | def _tokenize(
batch: Dict[str, List[Any]],
tokenizer: "PreTrainedTokenizer",
embedding_tokenizer: Optional["PreTrainedTokenizer"] = None,
) -> Dict[str, List[Any]]:
"""Tokenize a batch from a KTO/BCO specific dataset."""
prompt_tokenized = tokenizer(batch["prompt"], add_special_tokens=False)
prompt_input_ids = prompt_tokenized["input_ids"]
prompt_attention_mask = prompt_tokenized["attention_mask"]
prompt_and_completion = [prompt + completion for prompt, completion in zip(batch["prompt"], batch["completion"])]
full_tokenized = tokenizer(prompt_and_completion, add_special_tokens=False)
full_input_ids = full_tokenized["input_ids"]
full_attention_mask = full_tokenized["attention_mask"]
answer_input_ids = [f[len(p) :] for f, p in zip(full_input_ids, prompt_input_ids)]
answer_attention_mask = [f[len(p) :] for f, p in zip(full_attention_mask, prompt_attention_mask)]
# Concat tokens to form `enc(a) + enc(a + b)[len(enc(a)):]`
full_concat_input_ids = [np.concatenate([p, a]) for p, a in zip(prompt_input_ids, answer_input_ids)]
# Prepare input tokens for token by token comparison
full_input_ids = [np.array(f) for f in full_input_ids]
for full, concat in zip(full_input_ids, full_concat_input_ids):
if len(full) != len(concat):
raise ValueError("Prompt input ids and answer input ids should have the same length.")
# On some tokenizers, like Llama-2 tokenizer, there are occasions where tokens
# can be merged together when tokenizing prompt+answer. This could result
# on the last token from the prompt being different when tokenized on its own
# vs when done as prompt+answer.
response_token_ids_start_idx = [len(p) for p in prompt_input_ids]
# If tokenized prompt is different than both prompt+answer, then it means the
# last token has changed due to merging.
for idx, (p, f, r) in enumerate(zip(prompt_input_ids, full_input_ids, response_token_ids_start_idx)):
if not np.array_equal(p, f[:r]):
response_token_ids_start_idx[idx] -= 1
prompt_input_ids = [f[:r] for f, r in zip(full_input_ids, response_token_ids_start_idx)]
prompt_attention_mask = [f[:r] for f, r in zip(full_attention_mask, response_token_ids_start_idx)]
for p, m in zip(prompt_input_ids, prompt_attention_mask):
if len(p) != len(m):
raise ValueError("Prompt input ids and attention mask should have the same length.")
answer_input_ids = [f[r:] for f, r in zip(full_input_ids, response_token_ids_start_idx)]
answer_attention_mask = [f[r:] for f, r in zip(full_attention_mask, response_token_ids_start_idx)]
output = dict(
prompt_input_ids=prompt_input_ids,
prompt_attention_mask=prompt_attention_mask,
answer_input_ids=answer_input_ids,
answer_attention_mask=answer_attention_mask,
)
if embedding_tokenizer is not None:
embedding_tokenized = embedding_tokenizer(batch["prompt"], truncation=True, add_special_tokens=False)
output.update(
{
"embedding_input_ids": embedding_tokenized["input_ids"],
"embedding_attention_mask": embedding_tokenized["attention_mask"],
}
)
return output |
Process tokens of a KTO specific dataset.
At this stage, we don't convert to PyTorch tensors yet; we just handle the truncation
in case the prompt + completion responses is/are too long. First
we truncate the prompt; if we're still too long, we truncate the completion.
We also create the labels for the completion responses, which are of length equal to
the sum of the length of the prompt and the completion response, with
label_pad_token_id for the prompt tokens. | def _process_tokens(example: Dict[str, Any], model: "PreTrainedModel" = None, **kwargs) -> Dict:
"""Process tokens of a KTO specific dataset.
At this stage, we don't convert to PyTorch tensors yet; we just handle the truncation
in case the prompt + completion responses is/are too long. First
we truncate the prompt; if we're still too long, we truncate the completion.
We also create the labels for the completion responses, which are of length equal to
the sum of the length of the prompt and the completion response, with
label_pad_token_id for the prompt tokens.
"""
prompt = example["prompt"]
completion = example["completion"]
batch = {
f"{kwargs['prefix']}prompt": prompt,
f"{kwargs['prefix']}completion": completion,
f"{kwargs['prefix']}label": example["label"],
}
if not kwargs["is_encoder_decoder"]:
# Check issues below for more details
# 1. https://github.com/huggingface/trl/issues/907
# 2. https://github.com/EleutherAI/lm-evaluation-harness/pull/531#issuecomment-1595586257
# 3. https://github.com/LianjiaTech/BELLE/issues/337
if not isinstance(prompt, str):
raise ValueError(f"prompt should be an str but got {type(prompt)}")
if not isinstance(completion, str):
raise ValueError(f"completion should be an str but got {type(completion)}")
# keys of format prompt_* refers to just the prompt and answer_* refers to just the answer
all_tokens = {
"prompt_input_ids": example["prompt_input_ids"],
"prompt_attention_mask": example["prompt_attention_mask"],
"answer_input_ids": example["answer_input_ids"],
"answer_attention_mask": example["answer_attention_mask"],
}
max_length = kwargs["max_length"] - 2
# if combined sequence is too long (> max_length - 1 for BOS token - 1 for EOS), truncate the prompt
if len(all_tokens["prompt_input_ids"]) + len(all_tokens["answer_input_ids"]) > max_length:
for k in ["prompt_input_ids", "prompt_attention_mask"]:
if kwargs["truncation_mode"] == "keep_start":
all_tokens[k] = all_tokens[k][: kwargs["max_prompt_length"]]
elif kwargs["truncation_mode"] == "keep_end":
all_tokens[k] = all_tokens[k][-kwargs["max_prompt_length"] :]
else:
raise ValueError(f"Unknown truncation mode: {kwargs['truncation_mode']}")
# if that's still too long, truncate the response
if len(all_tokens["prompt_input_ids"]) + len(all_tokens["answer_input_ids"]) > max_length:
for k in ["answer_input_ids", "answer_attention_mask"]:
all_tokens[k] = all_tokens[k][: max_length - kwargs["max_prompt_length"]]
# for legacy reasons, use the completion_* prefix to now refer to the joint sequence
batch[f"{kwargs['prefix']}prompt_input_ids"] = [kwargs["tokenizer"].bos_token_id] + all_tokens[
"prompt_input_ids"
]
batch[f"{kwargs['prefix']}prompt_attention_mask"] = [1] + all_tokens["prompt_attention_mask"]
batch[f"{kwargs['prefix']}completion_input_ids"] = (
[kwargs["tokenizer"].bos_token_id]
+ all_tokens["prompt_input_ids"]
+ all_tokens["answer_input_ids"]
+ [kwargs["tokenizer"].eos_token_id]
)
batch[f"{kwargs['prefix']}completion_attention_mask"] = (
[1] + all_tokens["prompt_attention_mask"] + all_tokens["answer_attention_mask"] + [1]
)
batch[f"{kwargs['prefix']}completion_labels"] = batch[f"{kwargs['prefix']}completion_input_ids"][:]
batch[f"{kwargs['prefix']}completion_labels"][: len(batch[f"{kwargs['prefix']}prompt_input_ids"])] = [
kwargs["label_pad_token_id"]
] * len(batch[f"{kwargs['prefix']}prompt_input_ids"])
else:
completion_tokens = kwargs["tokenizer"](
completion, truncation=True, max_length=kwargs["max_completion_length"], add_special_tokens=True
)
prompt_tokens = kwargs["tokenizer"](
prompt, truncation=True, max_length=kwargs["max_prompt_length"], add_special_tokens=True
)
batch[f"{kwargs['prefix']}prompt_input_ids"] = prompt_tokens["input_ids"]
batch[f"{kwargs['prefix']}prompt_attention_mask"] = prompt_tokens["attention_mask"]
batch[f"{kwargs['prefix']}completion_labels"] = completion_tokens["input_ids"]
batch[f"{kwargs['prefix']}completion_attention_mask"] = completion_tokens["attention_mask"]
if model is not None and hasattr(model, "prepare_decoder_input_ids_from_labels"):
batch[f"{kwargs['prefix']}completion_decoder_input_ids"] = model.prepare_decoder_input_ids_from_labels(
labels=torch.tensor(batch["completion_labels"])
)
return batch |
Computes element-wise mean and variance of the tensor across processes. Reference:
https://github.com/OpenLMLab/MOSS-RLHF/blob/40b91eb2f2b71b16919addede0341d2bef70825d/utils.py#L57C1-L73C75 | def get_global_statistics(accelerator, xs: torch.Tensor, mask=None, device="cpu") -> Tuple[float, float, int]:
"""
Computes element-wise mean and variance of the tensor across processes. Reference:
https://github.com/OpenLMLab/MOSS-RLHF/blob/40b91eb2f2b71b16919addede0341d2bef70825d/utils.py#L57C1-L73C75
"""
xs = xs.to(accelerator.device)
sum_and_count = torch.tensor([xs.sum(), (xs.numel() if mask is None else mask.sum())], device=xs.device)
sum_and_count = accelerator.reduce(sum_and_count)
global_sum, count = sum_and_count
global_mean = global_sum / count
sum_var = torch.sum(((xs - global_mean) ** 2).mul(1 if mask is None else mask))
sum_var = accelerator.reduce(sum_var)
global_var = sum_var / count
return global_mean.to(device), global_var.to(device), count.to(device) |
Implements the NEFTune forward pass for the model using forward hooks. Note this works only for
torch.nn.Embedding layers. This method is slightly adapted from the original source code
that can be found here: https://github.com/neelsjain/NEFTune
Simply add it to your model as follows:
```python
model = ...
model.embed_tokens.neftune_noise_alpha = 0.1
model.embed_tokens.register_forward_hook(neftune_post_forward_hook)
```
Args:
module (`torch.nn.Module`):
The embedding module where the hook is attached. Note that you need to set
`module.neftune_noise_alpha` to the desired noise alpha value.
input (`torch.Tensor`):
The input tensor to the model.
output (`torch.Tensor`):
The output tensor of the model (i.e. the embeddings). | def neftune_post_forward_hook(module, input, output):
"""
Implements the NEFTune forward pass for the model using forward hooks. Note this works only for
torch.nn.Embedding layers. This method is slightly adapted from the original source code
that can be found here: https://github.com/neelsjain/NEFTune
Simply add it to your model as follows:
```python
model = ...
model.embed_tokens.neftune_noise_alpha = 0.1
model.embed_tokens.register_forward_hook(neftune_post_forward_hook)
```
Args:
module (`torch.nn.Module`):
The embedding module where the hook is attached. Note that you need to set
`module.neftune_noise_alpha` to the desired noise alpha value.
input (`torch.Tensor`):
The input tensor to the model.
output (`torch.Tensor`):
The output tensor of the model (i.e. the embeddings).
"""
if module.training:
dims = torch.tensor(output.size(1) * output.size(2))
mag_norm = module.neftune_noise_alpha / torch.sqrt(dims)
output = output + torch.zeros_like(output).uniform_(-mag_norm, mag_norm)
return output |
Print errors to stdout
| def error(_error, message):
""" Print errors to stdout
"""
print("[-] {}: {}".format(_error, message))
sys.exit(0) |
Error checking
| def check(args):
""" Error checking
"""
if args.username is not None or args.userlist or args.members_list:
if args.verified:
error("Contradicting Args",
"Please use --verified in combination with -s.")
if args.userid:
error("Contradicting Args",
"--userid and -u cannot be used together.")
if args.all:
error("Contradicting Args",
"--all and -u cannot be used together.")
elif args.search and args.timeline:
error("Contradicting Args",
"--s and --tl cannot be used together.")
elif args.timeline and not args.username:
error("Error", "-tl cannot be used without -u.")
elif args.search is None:
if args.custom_query is not None:
pass
elif (args.geo or args.near) is None and not (args.all or args.userid):
error("Error", "Please use at least -u, -s, -g or --near.")
elif args.all and args.userid:
error("Contradicting Args",
"--all and --userid cannot be used together")
if args.output is None:
if args.csv:
error("Error", "Please specify an output file (Example: -o file.csv).")
elif args.json:
error("Error", "Please specify an output file (Example: -o file.json).")
if args.backoff_exponent <= 0:
error("Error", "Please specifiy a positive value for backoff_exponent")
if args.min_wait_time < 0:
error("Error", "Please specifiy a non negative value for min_wait_time") |
Concatenate users
| def loadUserList(ul, _type):
""" Concatenate users
"""
if os.path.exists(os.path.abspath(ul)):
userlist = open(os.path.abspath(ul), "r").read().splitlines()
else:
userlist = ul.split(",")
if _type == "search":
un = ""
for user in userlist:
un += "%20OR%20from%3A" + user
return un[15:]
return userlist |
Set default values for config from args
| def initialize(args):
""" Set default values for config from args
"""
c = config.Config()
c.Username = args.username
c.User_id = args.userid
c.Search = args.search
c.Geo = args.geo
c.Location = args.location
c.Near = args.near
c.Lang = args.lang
c.Output = args.output
c.Elasticsearch = args.elasticsearch
c.Year = args.year
c.Since = args.since
c.Until = args.until
c.Email = args.email
c.Phone = args.phone
c.Verified = args.verified
c.Store_csv = args.csv
c.Tabs = args.tabs
c.Store_json = args.json
c.Show_hashtags = args.hashtags
c.Show_cashtags = args.cashtags
c.Limit = args.limit
c.Count = args.count
c.Stats = args.stats
c.Database = args.database
c.To = args.to
c.All = args.all
c.Essid = args.essid
c.Format = args.format
c.User_full = args.user_full
# c.Profile_full = args.profile_full
c.Pandas_type = args.pandas_type
c.Index_tweets = args.index_tweets
c.Index_follow = args.index_follow
c.Index_users = args.index_users
c.Debug = args.debug
c.Resume = args.resume
c.Images = args.images
c.Videos = args.videos
c.Media = args.media
c.Replies = args.replies
c.Pandas_clean = args.pandas_clean
c.Proxy_host = args.proxy_host
c.Proxy_port = args.proxy_port
c.Proxy_type = args.proxy_type
c.Tor_control_port = args.tor_control_port
c.Tor_control_password = args.tor_control_password
c.Retweets = args.retweets
c.Custom_query = args.custom_query
c.Popular_tweets = args.popular_tweets
c.Skip_certs = args.skip_certs
c.Hide_output = args.hide_output
c.Native_retweets = args.native_retweets
c.Min_likes = args.min_likes
c.Min_retweets = args.min_retweets
c.Min_replies = args.min_replies
c.Links = args.links
c.Source = args.source
c.Members_list = args.members_list
c.Filter_retweets = args.filter_retweets
c.Translate = args.translate
c.TranslateDest = args.translate_dest
c.Backoff_exponent = args.backoff_exponent
c.Min_wait_time = args.min_wait_time
return c |
Parse arguments
| def options():
""" Parse arguments
"""
ap = argparse.ArgumentParser(prog="twint",
usage="python3 %(prog)s [options]",
description="TWINT - An Advanced Twitter Scraping Tool.")
ap.add_argument("-u", "--username", help="User's Tweets you want to scrape.")
ap.add_argument("-s", "--search", help="Search for Tweets containing this word or phrase.")
ap.add_argument("-g", "--geo", help="Search for geocoded Tweets.")
ap.add_argument("--near", help="Near a specified city.")
ap.add_argument("--location", help="Show user's location (Experimental).", action="store_true")
ap.add_argument("-l", "--lang", help="Search for Tweets in a specific language.")
ap.add_argument("-o", "--output", help="Save output to a file.")
ap.add_argument("-es", "--elasticsearch", help="Index to Elasticsearch.")
ap.add_argument("--year", help="Filter Tweets before specified year.")
ap.add_argument("--since", help="Filter Tweets sent since date (Example: \"2017-12-27 20:30:15\" or 2017-12-27).",
metavar="DATE")
ap.add_argument("--until", help="Filter Tweets sent until date (Example: \"2017-12-27 20:30:15\" or 2017-12-27).",
metavar="DATE")
ap.add_argument("--email", help="Filter Tweets that might have email addresses", action="store_true")
ap.add_argument("--phone", help="Filter Tweets that might have phone numbers", action="store_true")
ap.add_argument("--verified", help="Display Tweets only from verified users (Use with -s).",
action="store_true")
ap.add_argument("--csv", help="Write as .csv file.", action="store_true")
ap.add_argument("--tabs", help="Separate CSV fields with tab characters, not commas.", action="store_true")
ap.add_argument("--json", help="Write as .json file", action="store_true")
ap.add_argument("--hashtags", help="Output hashtags in seperate column.", action="store_true")
ap.add_argument("--cashtags", help="Output cashtags in seperate column.", action="store_true")
ap.add_argument("--userid", help="Twitter user id.")
ap.add_argument("--limit", help="Number of Tweets to pull (Increments of 20).")
ap.add_argument("--count", help="Display number of Tweets scraped at the end of session.",
action="store_true")
ap.add_argument("--stats", help="Show number of replies, retweets, and likes.",
action="store_true")
ap.add_argument("-db", "--database", help="Store Tweets in a sqlite3 database.")
ap.add_argument("--to", help="Search Tweets to a user.", metavar="USERNAME")
ap.add_argument("--all", help="Search all Tweets associated with a user.", metavar="USERNAME")
ap.add_argument("--followers", help="Scrape a person's followers.", action="store_true")
ap.add_argument("--following", help="Scrape a person's follows", action="store_true")
ap.add_argument("--favorites", help="Scrape Tweets a user has liked.", action="store_true")
ap.add_argument("--proxy-type", help="Socks5, HTTP, etc.")
ap.add_argument("--proxy-host", help="Proxy hostname or IP.")
ap.add_argument("--proxy-port", help="The port of the proxy server.")
ap.add_argument("--tor-control-port", help="If proxy-host is set to tor, this is the control port", default=9051)
ap.add_argument("--tor-control-password",
help="If proxy-host is set to tor, this is the password for the control port",
default="my_password")
ap.add_argument("--essid",
help="Elasticsearch Session ID, use this to differentiate scraping sessions.",
nargs="?", default="")
ap.add_argument("--userlist", help="Userlist from list or file.")
ap.add_argument("--retweets",
help="Include user's Retweets (Warning: limited).",
action="store_true")
ap.add_argument("--format", help="Custom output format (See wiki for details).")
ap.add_argument("--user-full",
help="Collect all user information (Use with followers or following only).",
action="store_true")
# I am removing this this feature for the time being, because it is no longer required, default method will do this
# ap.add_argument("--profile-full",
# help="Slow, but effective method of collecting a user's Tweets and RT.",
# action="store_true")
ap.add_argument(
"-tl",
"--timeline",
help="Collects every tweet from a User's Timeline. (Tweets, RTs & Replies)",
action="store_true",
)
ap.add_argument("--translate",
help="Get tweets translated by Google Translate.",
action="store_true")
ap.add_argument("--translate-dest", help="Translate tweet to language (ISO2).",
default="en")
ap.add_argument("--store-pandas", help="Save Tweets in a DataFrame (Pandas) file.")
ap.add_argument("--pandas-type",
help="Specify HDF5 or Pickle (HDF5 as default)", nargs="?", default="HDF5")
ap.add_argument("-it", "--index-tweets",
help="Custom Elasticsearch Index name for Tweets.", nargs="?", default="twinttweets")
ap.add_argument("-if", "--index-follow",
help="Custom Elasticsearch Index name for Follows.",
nargs="?", default="twintgraph")
ap.add_argument("-iu", "--index-users", help="Custom Elasticsearch Index name for Users.",
nargs="?", default="twintuser")
ap.add_argument("--debug",
help="Store information in debug logs", action="store_true")
ap.add_argument("--resume", help="Resume from Tweet ID.", metavar="TWEET_ID")
ap.add_argument("--videos", help="Display only Tweets with videos.", action="store_true")
ap.add_argument("--images", help="Display only Tweets with images.", action="store_true")
ap.add_argument("--media",
help="Display Tweets with only images or videos.", action="store_true")
ap.add_argument("--replies", help="Display replies to a subject.", action="store_true")
ap.add_argument("-pc", "--pandas-clean",
help="Automatically clean Pandas dataframe at every scrape.")
ap.add_argument("-cq", "--custom-query", help="Custom search query.")
ap.add_argument("-pt", "--popular-tweets", help="Scrape popular tweets instead of recent ones.",
action="store_true")
ap.add_argument("-sc", "--skip-certs", help="Skip certs verification, useful for SSC.", action="store_false")
ap.add_argument("-ho", "--hide-output", help="Hide output, no tweets will be displayed.", action="store_true")
ap.add_argument("-nr", "--native-retweets", help="Filter the results for retweets only.", action="store_true")
ap.add_argument("--min-likes", help="Filter the tweets by minimum number of likes.")
ap.add_argument("--min-retweets", help="Filter the tweets by minimum number of retweets.")
ap.add_argument("--min-replies", help="Filter the tweets by minimum number of replies.")
ap.add_argument("--links", help="Include or exclude tweets containing one o more links. If not specified" +
" you will get both tweets that might contain links or not.")
ap.add_argument("--source", help="Filter the tweets for specific source client.")
ap.add_argument("--members-list", help="Filter the tweets sent by users in a given list.")
ap.add_argument("-fr", "--filter-retweets", help="Exclude retweets from the results.", action="store_true")
ap.add_argument("--backoff-exponent", help="Specify a exponent for the polynomial backoff in case of errors.",
type=float, default=3.0)
ap.add_argument("--min-wait-time", type=float, default=15,
help="specifiy a minimum wait time in case of scraping limit error. This value will be adjusted by twint if the value provided does not satisfy the limits constraints")
args = ap.parse_args()
return args |
Main
| def main():
""" Main
"""
args = options()
check(args)
if args.pandas_clean:
storage.panda.clean()
c = initialize(args)
if args.userlist:
c.Query = loadUserList(args.userlist, "search")
if args.pandas_clean:
storage.panda.clean()
if args.favorites:
if args.userlist:
_userlist = loadUserList(args.userlist, "favorites")
for _user in _userlist:
args.username = _user
c = initialize(args)
run.Favorites(c)
else:
run.Favorites(c)
elif args.following:
if args.userlist:
_userlist = loadUserList(args.userlist, "following")
for _user in _userlist:
args.username = _user
c = initialize(args)
run.Following(c)
else:
run.Following(c)
elif args.followers:
if args.userlist:
_userlist = loadUserList(args.userlist, "followers")
for _user in _userlist:
args.username = _user
c = initialize(args)
run.Followers(c)
else:
run.Followers(c)
elif args.retweets: # or args.profile_full:
if args.userlist:
_userlist = loadUserList(args.userlist, "profile")
for _user in _userlist:
args.username = _user
c = initialize(args)
run.Profile(c)
else:
run.Profile(c)
elif args.user_full:
if args.userlist:
_userlist = loadUserList(args.userlist, "userlist")
for _user in _userlist:
args.username = _user
c = initialize(args)
run.Lookup(c)
else:
run.Lookup(c)
elif args.timeline:
run.Profile(c)
else:
run.Search(c) |
Extract mentions from tweet
| def _get_mentions(tw):
"""Extract mentions from tweet
"""
logme.debug(__name__ + ':get_mentions')
try:
mentions = [
{
'screen_name': _mention['screen_name'],
'name': _mention['name'],
'id': _mention['id_str'],
} for _mention in tw['entities']['user_mentions']
if tw['display_text_range'][0] < _mention['indices'][0]
]
except KeyError:
mentions = []
return mentions |
Replace some text
| def getText(tw):
"""Replace some text
"""
logme.debug(__name__ + ':getText')
text = tw['full_text']
text = text.replace("http", " http")
text = text.replace("pic.twitter", " pic.twitter")
text = text.replace("\n", " ")
return text |
Create Tweet object
| def Tweet(tw, config):
"""Create Tweet object
"""
logme.debug(__name__ + ':Tweet')
t = tweet()
t.id = int(tw['id_str'])
t.id_str = tw["id_str"]
t.conversation_id = tw["conversation_id_str"]
# parsing date to user-friendly format
_dt = tw['created_at']
_dt = datetime.strptime(_dt, '%a %b %d %H:%M:%S %z %Y')
_dt = utc_to_local(_dt)
t.datetime = str(_dt.strftime(Tweet_formats['datetime']))
# date is of the format year,
t.datestamp = _dt.strftime(Tweet_formats['datestamp'])
t.timestamp = _dt.strftime(Tweet_formats['timestamp'])
t.user_id = int(tw["user_id_str"])
t.user_id_str = tw["user_id_str"]
t.username = tw["user_data"]['screen_name']
t.name = tw["user_data"]['name']
t.place = tw['geo'] if 'geo' in tw and tw['geo'] else ""
t.timezone = strftime("%z", localtime())
t.mentions = _get_mentions(tw)
t.reply_to = _get_reply_to(tw)
try:
t.urls = [_url['expanded_url'] for _url in tw['entities']['urls']]
except KeyError:
t.urls = []
try:
t.photos = [_img['media_url_https'] for _img in tw['entities']['media'] if _img['type'] == 'photo' and
_img['expanded_url'].find('/photo/') != -1]
except KeyError:
t.photos = []
try:
t.video = 1 if len(tw['extended_entities']['media']) else 0
except KeyError:
t.video = 0
try:
t.thumbnail = tw['extended_entities']['media'][0]['media_url_https']
except KeyError:
t.thumbnail = ''
t.tweet = getText(tw)
t.lang = tw['lang']
try:
t.hashtags = [hashtag['text'] for hashtag in tw['entities']['hashtags']]
except KeyError:
t.hashtags = []
try:
t.cashtags = [cashtag['text'] for cashtag in tw['entities']['symbols']]
except KeyError:
t.cashtags = []
t.replies_count = tw['reply_count']
t.retweets_count = tw['retweet_count']
t.likes_count = tw['favorite_count']
t.link = f"https://twitter.com/{t.username}/status/{t.id}"
try:
if 'user_rt_id' in tw['retweet_data']:
t.retweet = True
t.retweet_id = tw['retweet_data']['retweet_id']
t.retweet_date = tw['retweet_data']['retweet_date']
t.user_rt = tw['retweet_data']['user_rt']
t.user_rt_id = tw['retweet_data']['user_rt_id']
except KeyError:
t.retweet = False
t.retweet_id = ''
t.retweet_date = ''
t.user_rt = ''
t.user_rt_id = ''
try:
t.quote_url = tw['quoted_status_permalink']['expanded'] if tw['is_quote_status'] else ''
except KeyError:
# means that the quoted tweet have been deleted
t.quote_url = 0
t.near = config.Near if config.Near else ""
t.geo = config.Geo if config.Geo else ""
t.source = config.Source if config.Source else ""
t.translate = ''
t.trans_src = ''
t.trans_dest = ''
if config.Translate:
try:
ts = translator.translate(text=t.tweet, dest=config.TranslateDest)
t.translate = ts.text
t.trans_src = ts.src
t.trans_dest = ts.dest
# ref. https://github.com/SuniTheFish/ChainTranslator/blob/master/ChainTranslator/__main__.py#L31
except ValueError as e:
logme.debug(__name__ + ':Tweet:translator.translate:' + str(e))
raise Exception("Invalid destination language: {} / Tweet: {}".format(config.TranslateDest, t.tweet))
return t |
Benchmark and return information regarding the relative performance of a
protocol which does not use the buffering mixin and a protocol which
does.
@type scale: C{int}
@param scale: A multiplier to the amount of work to perform
@return: A Deferred which will fire with a dictionary mapping each of
the two unicode strings C{u'buffered'} and C{u'unbuffered'} to
dictionaries describing the performance of a protocol of each type.
These value dictionaries will map the unicode strings C{u'connected'}
and C{u'disconnected'} to the times at which each of those events
occurred and C{u'duration'} two the difference between these two values. | def benchmark(scale=1):
"""
Benchmark and return information regarding the relative performance of a
protocol which does not use the buffering mixin and a protocol which
does.
@type scale: C{int}
@param scale: A multiplier to the amount of work to perform
@return: A Deferred which will fire with a dictionary mapping each of
the two unicode strings C{u'buffered'} and C{u'unbuffered'} to
dictionaries describing the performance of a protocol of each type.
These value dictionaries will map the unicode strings C{u'connected'}
and C{u'disconnected'} to the times at which each of those events
occurred and C{u'duration'} two the difference between these two values.
"""
overallResult = {}
byteCount = 1024
bufferedDeferred = _benchmarkBuffered(byteCount * scale)
def didBuffered(bufferedResult):
overallResult["buffered"] = bufferedResult
unbufferedDeferred = _benchmarkUnbuffered(byteCount * scale)
def didUnbuffered(unbufferedResult):
overallResult["unbuffered"] = unbufferedResult
return overallResult
unbufferedDeferred.addCallback(didUnbuffered)
return unbufferedDeferred
bufferedDeferred.addCallback(didBuffered)
return bufferedDeferred |
Perform a single benchmark run, starting and stopping the reactor and
logging system as necessary. | def main(args=None):
"""
Perform a single benchmark run, starting and stopping the reactor and
logging system as necessary.
"""
startLogging(stdout)
options = BufferingBenchmark()
options.parseOptions(args)
d = benchmark(options["scale"])
def cbBenchmark(result):
pprint(result)
def ebBenchmark(err):
print(err.getTraceback())
d.addCallbacks(cbBenchmark, ebBenchmark)
def stopReactor(ign):
reactor.stop()
d.addBoth(stopReactor)
reactor.run() |
A decorator for benchmark functions that measure a single iteration
count. Registers the function with the given iteration count to the global
benchmarkFuncs list | def benchmarkFunc(iter, args=()):
"""
A decorator for benchmark functions that measure a single iteration
count. Registers the function with the given iteration count to the global
benchmarkFuncs list
"""
def decorator(func):
benchmarkFuncs.append((func, args, iter))
return func
return decorator |
A decorator for benchmark functions that measure multiple iteration
counts. Registers the function with the given iteration count to the global
benchmarkFuncs list. | def benchmarkNFunc(iter, ns):
"""
A decorator for benchmark functions that measure multiple iteration
counts. Registers the function with the given iteration count to the global
benchmarkFuncs list.
"""
def decorator(func):
for n in ns:
benchmarkFuncs.append((func, (n,), iter))
return func
return decorator |
Only create a deferred | def instantiate():
"""
Only create a deferred
"""
d = defer.Deferred() |
Create a deferred and give it a normal result | def instantiateShootCallback():
"""
Create a deferred and give it a normal result
"""
d = defer.Deferred()
d.callback(1) |
Create a deferred and give it an exception result. To avoid Unhandled
Errors, also register an errback that eats the error | def instantiateShootErrback():
"""
Create a deferred and give it an exception result. To avoid Unhandled
Errors, also register an errback that eats the error
"""
d = defer.Deferred()
try:
1 / 0
except BaseException:
d.errback()
d.addErrback(lambda x: None) |
Creates a deferred and adds a trivial callback/errback/both to it the given
number of times. | def instantiateAddCallbacksNoResult(n):
"""
Creates a deferred and adds a trivial callback/errback/both to it the given
number of times.
"""
d = defer.Deferred()
def f(result):
return result
for i in range(n):
d.addCallback(f)
d.addErrback(f)
d.addBoth(f)
d.addCallbacks(f, f) |
Create a deferred and adds a trivial callback/errback/both to it the given
number of times, and then shoots a result through all of the callbacks. | def instantiateAddCallbacksBeforeResult(n):
"""
Create a deferred and adds a trivial callback/errback/both to it the given
number of times, and then shoots a result through all of the callbacks.
"""
d = defer.Deferred()
def f(result):
return result
for i in range(n):
d.addCallback(f)
d.addErrback(f)
d.addBoth(f)
d.addCallbacks(f)
d.callback(1) |
Create a deferred, shoots it and then adds a trivial callback/errback/both
to it the given number of times. The result is processed through the
callbacks as they are added. | def instantiateAddCallbacksAfterResult(n):
"""
Create a deferred, shoots it and then adds a trivial callback/errback/both
to it the given number of times. The result is processed through the
callbacks as they are added.
"""
d = defer.Deferred()
def f(result):
return result
d.callback(1)
for i in range(n):
d.addCallback(f)
d.addErrback(f)
d.addBoth(f)
d.addCallbacks(f) |
Adds the given number of callbacks/errbacks/both to a deferred while it is
paused, and unpauses it, trigerring the processing of the value through the
callbacks. | def pauseUnpause(n):
"""
Adds the given number of callbacks/errbacks/both to a deferred while it is
paused, and unpauses it, trigerring the processing of the value through the
callbacks.
"""
d = defer.Deferred()
def f(result):
return result
d.callback(1)
d.pause()
for i in range(n):
d.addCallback(f)
d.addErrback(f)
d.addBoth(f)
d.addCallbacks(f)
d.unpause() |
Run all of the benchmarks registered in the benchmarkFuncs list | def benchmark():
"""
Run all of the benchmarks registered in the benchmarkFuncs list
"""
print(defer.Deferred.__module__)
for func, args, iter in benchmarkFuncs:
print(func.__name__, args, timeit(func, iter, *args)) |
L{LoopingCall} should not take long to skip a lot of iterations. | def test_performance():
"""
L{LoopingCall} should not take long to skip a lot of iterations.
"""
clock = task.Clock()
call = task.LoopingCall(lambda: None)
call.clock = clock
call.start(0.1)
clock.advance(1000000) |
timeit(func, iter = 1000 *args, **kwargs) -> elapsed time
calls func iter times with args and kwargs, returns time elapsed | def timeit(func, iter=1000, *args, **kwargs):
"""
timeit(func, iter = 1000 *args, **kwargs) -> elapsed time
calls func iter times with args and kwargs, returns time elapsed
"""
from time import time as currentTime
r = range(iter)
t = currentTime()
for i in r:
func(*args, **kwargs)
return currentTime() - t |
Run a simple echo pb server to test the checker. It defines a custom query
for dealing with sqlite special quoting, but otherwise it's a
straightforward use of the object.
You can test it running C{pbechoclient.py}. | def main():
"""
Run a simple echo pb server to test the checker. It defines a custom query
for dealing with sqlite special quoting, but otherwise it's a
straightforward use of the object.
You can test it running C{pbechoclient.py}.
"""
import sys
from twisted.python import log
log.startLogging(sys.stdout)
import os
if os.path.isfile("testcred"):
os.remove("testcred")
from twisted.enterprise import adbapi
pool = adbapi.ConnectionPool("pysqlite2.dbapi2", "testcred")
# Create the table that will be used
query1 = """CREATE TABLE user (
username string,
password string
)"""
# Insert a test user
query2 = """INSERT INTO user VALUES ('guest', 'guest')"""
def cb(res):
pool.runQuery(query2)
pool.runQuery(query1).addCallback(cb)
checker = DBCredentialsChecker(
pool.runQuery, query="SELECT username, password FROM user WHERE username = ?"
)
import pbecho
from twisted.cred.portal import Portal
from twisted.spread import pb
portal = Portal(pbecho.SimpleRealm())
portal.registerChecker(checker)
reactor.listenTCP(pb.portno, pb.PBServerFactory(portal)) |
This runs the protocol on port 8000 | def main():
"""This runs the protocol on port 8000"""
factory = protocol.ServerFactory()
factory.protocol = Echo
reactor.listenTCP(8000, factory)
reactor.run() |
Return true if user is a valid user, false otherwise | def synchronousIsValidUser(user):
"""
Return true if user is a valid user, false otherwise
"""
return user in ["Alice", "Angus", "Agnes"] |
Catch-all errback which simply logs the failure. This isn't expected to
be invoked in the normal case for this example. | def error(why, msg):
"""
Catch-all errback which simply logs the failure. This isn't expected to
be invoked in the normal case for this example.
"""
err(why, msg) |
Login callback which invokes the remote "foo" method on the perspective
which the server returned. | def connected(perspective):
"""
Login callback which invokes the remote "foo" method on the perspective
which the server returned.
"""
print("got perspective1 ref:", perspective)
print("asking it to foo(13)")
return perspective.callRemote("foo", 13) |
Callback invoked when both logins and method calls have finished to shut
down the reactor so the example exits. | def finished(ignored):
"""
Callback invoked when both logins and method calls have finished to shut
down the reactor so the example exits.
"""
reactor.stop() |
Connect to a PB server running on port 8800 on localhost and log in to
it, both anonymously and using a username/password it will recognize. | def main():
"""
Connect to a PB server running on port 8800 on localhost and log in to
it, both anonymously and using a username/password it will recognize.
"""
startLogging(stdout)
factory = PBClientFactory()
reactor.connectTCP("localhost", 8800, factory)
anonymousLogin = factory.login(Anonymous())
anonymousLogin.addCallback(connected)
anonymousLogin.addErrback(error, "Anonymous login failed")
usernameLogin = factory.login(UsernamePassword("user1", "pass1"))
usernameLogin.addCallback(connected)
usernameLogin.addErrback(error, "Username/password login failed")
bothDeferreds = gatherResults([anonymousLogin, usernameLogin])
bothDeferreds.addCallback(finished)
reactor.run() |
Create a PB server using MyRealm and run it on port 8800. | def main():
"""
Create a PB server using MyRealm and run it on port 8800.
"""
startLogging(stdout)
p = Portal(MyRealm())
# Here the username/password checker is registered.
c1 = InMemoryUsernamePasswordDatabaseDontUse(user1="pass1", user2="pass2")
p.registerChecker(c1)
# Here the anonymous checker is registered.
c2 = AllowAnonymousAccess()
p.registerChecker(c2)
reactor.listenTCP(8800, PBServerFactory(p))
reactor.run() |
Return a service that will be attached to the application. | def makeService(config):
"""Return a service that will be attached to the application."""
if config["file"]: # If I was given a "file" option...
# Read quotes from a file, selecting a random one each time,
quoter = quoters.FortuneQuoter([config["file"]])
else: # otherwise,
# read a single quote from the command line (or use the default).
quoter = quoters.StaticQuoter(config["static"])
port = int(config["port"]) # TCP port to listen on
factory = quoteproto.QOTDFactory(quoter) # here we create a QOTDFactory
# Finally, set up our factory, with its custom quoter, to create QOTD
# protocol instances when events arrive on the specified port.
return internet.TCPServer(port, factory) |
Initial callback - invoked after the server sends us its greet message. | def cbServerGreeting(proto, username, password):
"""
Initial callback - invoked after the server sends us its greet message.
"""
# Hook up stdio
tp = TrivialPrompter()
stdio.StandardIO(tp)
# And make it easily accessible
proto.prompt = tp.prompt
proto.display = tp.display
# Try to authenticate securely
return (
proto.authenticate(password)
.addCallback(cbAuthentication, proto)
.addErrback(ebAuthentication, proto, username, password)
) |
Fallback error-handler. If anything goes wrong, log it and quit. | def ebConnection(reason):
"""
Fallback error-handler. If anything goes wrong, log it and quit.
"""
log.startLogging(sys.stdout)
log.err(reason)
return reason |
Callback after authentication has succeeded.
Lists a bunch of mailboxes. | def cbAuthentication(result, proto):
"""
Callback after authentication has succeeded.
Lists a bunch of mailboxes.
"""
return proto.list("", "*").addCallback(cbMailboxList, proto) |
Errback invoked when authentication fails.
If it failed because no SASL mechanisms match, offer the user the choice
of logging in insecurely.
If you are trying to connect to your Gmail account, you will be here! | def ebAuthentication(failure, proto, username, password):
"""
Errback invoked when authentication fails.
If it failed because no SASL mechanisms match, offer the user the choice
of logging in insecurely.
If you are trying to connect to your Gmail account, you will be here!
"""
failure.trap(imap4.NoSupportedAuthentication)
return proto.prompt(
"No secure authentication available. Login insecurely? (y/N) "
).addCallback(cbInsecureLogin, proto, username, password) |
Callback for "insecure-login" prompt. | def cbInsecureLogin(result, proto, username, password):
"""
Callback for "insecure-login" prompt.
"""
if result.lower() == "y":
# If they said yes, do it.
return proto.login(username, password).addCallback(cbAuthentication, proto)
return defer.fail(Exception("Login failed for security reasons.")) |
Callback invoked when a list of mailboxes has been retrieved. | def cbMailboxList(result, proto):
"""
Callback invoked when a list of mailboxes has been retrieved.
"""
result = [e[2] for e in result]
s = "\n".join(["%d. %s" % (n + 1, m) for (n, m) in zip(range(len(result)), result)])
if not s:
return defer.fail(Exception("No mailboxes exist on server!"))
return proto.prompt(s + "\nWhich mailbox? [1] ").addCallback(
cbPickMailbox, proto, result
) |
When the user selects a mailbox, "examine" it. | def cbPickMailbox(result, proto, mboxes):
"""
When the user selects a mailbox, "examine" it.
"""
mbox = mboxes[int(result or "1") - 1]
return proto.examine(mbox).addCallback(cbExamineMbox, proto) |
Callback invoked when examine command completes.
Retrieve the subject header of every message in the mailbox. | def cbExamineMbox(result, proto):
"""
Callback invoked when examine command completes.
Retrieve the subject header of every message in the mailbox.
"""
return proto.fetchSpecific(
"1:*",
headerType="HEADER.FIELDS",
headerArgs=["SUBJECT"],
).addCallback(cbFetch, proto) |
Finally, display headers. | def cbFetch(result, proto):
"""
Finally, display headers.
"""
if result:
keys = sorted(result)
for k in keys:
proto.display(f"{k} {result[k][0][2]}")
else:
print("Hey, an empty mailbox!")
return proto.logout() |
Close the connection when we finish everything. | def cbClose(result):
"""
Close the connection when we finish everything.
"""
from twisted.internet import reactor
reactor.stop() |
Send email to one or more addresses. | def send(message, subject, sender, recipients, host):
"""
Send email to one or more addresses.
"""
msg = MIMEText(message)
msg["Subject"] = subject
msg["From"] = sender
msg["To"] = ", ".join(recipients)
dfr = sendmail(host, sender, recipients, msg.as_string())
def success(r):
reactor.stop()
def error(e):
print(e)
reactor.stop()
dfr.addCallback(success)
dfr.addErrback(error)
reactor.run() |
@param authenticationUsername: The username with which to authenticate.
@param authenticationSecret: The password with which to authenticate.
@param fromAddress: The SMTP reverse path (ie, MAIL FROM)
@param toAddress: The SMTP forward path (ie, RCPT TO)
@param messageFile: A file-like object containing the headers and body of
the message to send.
@param smtpHost: The MX host to which to connect.
@param smtpPort: The port number to which to connect.
@return: A Deferred which will be called back when the message has been
sent or which will errback if it cannot be sent. | def sendmail(
authenticationUsername,
authenticationSecret,
fromAddress,
toAddress,
messageFile,
smtpHost,
smtpPort=25,
):
"""
@param authenticationUsername: The username with which to authenticate.
@param authenticationSecret: The password with which to authenticate.
@param fromAddress: The SMTP reverse path (ie, MAIL FROM)
@param toAddress: The SMTP forward path (ie, RCPT TO)
@param messageFile: A file-like object containing the headers and body of
the message to send.
@param smtpHost: The MX host to which to connect.
@param smtpPort: The port number to which to connect.
@return: A Deferred which will be called back when the message has been
sent or which will errback if it cannot be sent.
"""
# Create a TLS context factory.
contextFactory = optionsForClientTLS(smtpHost.decode("utf8"))
resultDeferred = Deferred()
senderFactory = ESMTPSenderFactory(
authenticationUsername,
authenticationSecret,
fromAddress,
toAddress,
messageFile,
resultDeferred,
contextFactory=contextFactory,
)
reactor.connectTCP(smtpHost, smtpPort, senderFactory)
return resultDeferred |
Called when the message has been sent.
Report success to the user and then stop the reactor. | def cbSentMessage(result):
"""
Called when the message has been sent.
Report success to the user and then stop the reactor.
"""
print("Message sent")
reactor.stop() |
Called if the message cannot be sent.
Report the failure to the user and then stop the reactor. | def ebSentMessage(err):
"""
Called if the message cannot be sent.
Report the failure to the user and then stop the reactor.
"""
err.printTraceback()
reactor.stop() |
Parse arguments and send an email based on them. | def main(args=None):
"""
Parse arguments and send an email based on them.
"""
o = SendmailOptions()
try:
o.parseOptions(args)
except UsageError as e:
raise SystemExit(e)
else:
from twisted.python import log
log.startLogging(sys.stdout)
result = sendmail(
o["username"],
o["password"],
o["from-address"],
o["to-address"],
o["message"],
o["smtp-host"],
o["smtp-port"],
)
result.addCallbacks(cbSentMessage, ebSentMessage)
reactor.run() |
Print the SRV records for the domainname or an error message if no
SRV records were found. | def printResult(records, domainname):
"""
Print the SRV records for the domainname or an error message if no
SRV records were found.
"""
answers, authority, additional = records
if answers:
sys.stdout.write(
domainname + " IN \n " + "\n ".join(str(x.payload) for x in answers) + "\n"
)
else:
sys.stderr.write(f"ERROR: No SRV records found for name {domainname!r}\n") |
Print a friendly error message if the domainname could not be
resolved. | def printError(failure, domainname):
"""
Print a friendly error message if the domainname could not be
resolved.
"""
failure.trap(error.DNSNameError)
sys.stderr.write(f"ERROR: domain name not found {domainname!r}\n") |
Print the IP address or an error message if an IP address was not
found. | def printResult(address, hostname):
"""
Print the IP address or an error message if an IP address was not
found.
"""
if address:
sys.stdout.write(address + "\n")
else:
sys.stderr.write(f"ERROR: No IP addresses found for name {hostname!r}\n") |
Print a friendly error message if the hostname could not be
resolved. | def printError(failure, hostname):
"""
Print a friendly error message if the hostname could not be
resolved.
"""
failure.trap(error.DNSNameError)
sys.stderr.write(f"ERROR: hostname not found {hostname!r}\n") |
Return a reverse domain name for the given IPv4 address. | def reverseNameFromIPv4Address(address):
"""
Return a reverse domain name for the given IPv4 address.
"""
tokens = list(reversed(address.split("."))) + ["in-addr", "arpa", ""]
return ".".join(tokens) |
Return a reverse domain name for the given IPv6 address. | def reverseNameFromIPv6Address(address):
"""
Return a reverse domain name for the given IPv6 address.
"""
# Expand addresses that are in compressed format eg ::1
fullHex = "".join(
f"{ord(c):02x}" for c in socket.inet_pton(socket.AF_INET6, address)
)
tokens = list(reversed(fullHex)) + ["ip6", "arpa", ""]
return ".".join(tokens) |
Return a reverse domain name for the given IP address. | def reverseNameFromIPAddress(address):
"""
Return a reverse domain name for the given IP address.
"""
try:
socket.inet_pton(socket.AF_INET, address)
except OSError:
return reverseNameFromIPv6Address(address)
else:
return reverseNameFromIPv4Address(address) |
Print a comma separated list of reverse domain names and associated pointer
records. | def printResult(result):
"""
Print a comma separated list of reverse domain names and associated pointer
records.
"""
answers, authority, additional = result
if answers:
sys.stdout.write(
", ".join(f"{a.name.name} IN {a.payload}" for a in answers) + "\n"
) |
Print a summary showing the total number of responses and queries. | def printSummary(results):
"""
Print a summary showing the total number of responses and queries.
"""
statuses = zip(*results)[0]
sys.stdout.write(
f"{statuses.count(True)} responses to {len(statuses)} queries" + "\n"
) |
Extract only the answer records and return them as a neatly
formatted string beneath the given heading. | def formatRecords(records, heading):
"""
Extract only the answer records and return them as a neatly
formatted string beneath the given heading.
"""
answers, authority, additional = records
lines = ["# " + heading]
for a in answers:
line = [
a.name,
dns.QUERY_CLASSES.get(a.cls, "UNKNOWN (%d)" % (a.cls,)),
a.payload,
]
lines.append(" ".join(str(word) for word in line))
return "\n".join(line for line in lines) |
Print the formatted results for each DNS record type. | def printResults(results, domainname):
"""
Print the formatted results for each DNS record type.
"""
sys.stdout.write(f"# Domain Summary for {domainname!r}\n")
sys.stdout.write("\n\n".join(results) + "\n") |
Print a friendly error message if the hostname could not be
resolved. | def printError(failure, domainname):
"""
Print a friendly error message if the hostname could not be
resolved.
"""
failure.trap(defer.FirstError)
failure = failure.value.subFailure
failure.trap(error.DNSNameError)
sys.stderr.write(f"ERROR: domain name not found {domainname!r}\n") |
Run the server. | def main():
"""
Run the server.
"""
factory = server.DNSServerFactory(
clients=[DynamicResolver(), client.Resolver(resolv="/etc/resolv.conf")]
)
protocol = dns.DNSDatagramProtocol(controller=factory)
reactor.listenUDP(10053, protocol)
reactor.listenTCP(10053, factory)
reactor.run() |
Run the server. | def main():
"""
Run the server.
"""
factory = server.DNSServerFactory(
clients=[client.Resolver(resolv="/etc/resolv.conf")]
)
protocol = dns.DNSDatagramProtocol(controller=factory)
reactor.listenUDP(10053, protocol)
reactor.listenTCP(10053, factory)
reactor.run() |
We create a custom UserAgent and send a GET request to a web server. | def main(reactor, url):
"""
We create a custom UserAgent and send a GET request to a web server.
"""
url = url.encode("ascii")
userAgent = f"Twisted/{version.short()} (httpclient.py)".encode("ascii")
agent = Agent(reactor)
d = agent.request(b"GET", url, Headers({b"user-agent": [userAgent]}))
def cbResponse(response):
"""
Prints out the response returned by the web server.
"""
pprint(vars(response))
proto = WriteToStdout()
if response.length is not UNKNOWN_LENGTH:
print("The response body will consist of", response.length, "bytes.")
else:
print("The response body length is unknown.")
response.deliverBody(proto)
return proto.onConnLost
d.addCallback(cbResponse)
d.addErrback(log.err)
d.addBoth(lambda ign: reactor.callWhenRunning(reactor.stop))
reactor.run() |
Quit succeeded, shut down the reactor. | def quitServer(ignored):
"""Quit succeeded, shut down the reactor."""
reactor.stop() |
Left the group successfully, quit the server. | def leftGroup(ignored, avatar):
"""Left the group successfully, quit the server."""
q = avatar.quit()
q.addCallback(quitServer)
return q |
Sent the message successfully, leave the group. | def sentMessage(ignored, group, avatar):
"""Sent the message successfully, leave the group."""
l = group.leave()
l.addCallback(leftGroup, avatar)
return l |
Joined the group successfully, send a stupid message. | def joinedGroup(group, avatar):
"""Joined the group successfully, send a stupid message."""
s = group.send({"text": "Hello, monkeys"})
s.addCallback(sentMessage, group, avatar)
return s |
Logged in successfully, join a group. | def loggedIn(avatar, group):
"""Logged in successfully, join a group."""
j = avatar.join(group)
j.addCallback(joinedGroup, avatar)
return j |
Something went awry, log it and shutdown. | def errorOccurred(err):
"""Something went awry, log it and shutdown."""
log.err(err)
try:
reactor.stop()
except RuntimeError:
pass |
Create a mind and factory and set things in motion. | def run(host, port, username, password, group):
"""Create a mind and factory and set things in motion."""
m = DemoMind()
f = pb.PBClientFactory()
f.unsafeTracebacks = True
l = f.login(credentials.UsernamePassword(username, password), m)
l.addCallback(loggedIn, group)
l.addErrback(errorOccurred)
reactor.connectTCP(host, int(port), f) |
Set up logging, have the real main function run, and start the reactor. | def main():
"""
Set up logging, have the real main function run, and start the reactor.
"""
if len(sys.argv) != 6:
raise SystemExit(f"Usage: {sys.argv[0]} host port username password group")
log.startLogging(sys.stdout)
host, port, username, password, group = sys.argv[1:]
port = int(port)
username = username.decode(sys.stdin.encoding)
group = group.decode(sys.stdin.encoding)
reactor.callWhenRunning(run, host, port, username, password, group)
reactor.run() |
Connect to the given Jabber ID and return a L{Deferred} which will be
called back when the connection is over.
@param reactor: The reactor to use for the connection.
@param jid: A L{JID} to connect to.
@param secret: A C{str} | def main(reactor, jid, secret):
"""
Connect to the given Jabber ID and return a L{Deferred} which will be
called back when the connection is over.
@param reactor: The reactor to use for the connection.
@param jid: A L{JID} to connect to.
@param secret: A C{str}
"""
return Client(reactor, JID(jid), secret).finished |
Compute all the possible loadable plugins, while loading as few as
possible and hitting the filesystem as little as possible.
@param module: a Python module object. This represents a package to search
for plugins.
@return: a dictionary mapping module names to L{CachedDropin} instances. | def getCache(module):
"""
Compute all the possible loadable plugins, while loading as few as
possible and hitting the filesystem as little as possible.
@param module: a Python module object. This represents a package to search
for plugins.
@return: a dictionary mapping module names to L{CachedDropin} instances.
"""
allCachesCombined = {}
mod = getModule(module.__name__)
# don't want to walk deep, only immediate children.
buckets = {}
# Fill buckets with modules by related entry on the given package's
# __path__. There's an abstraction inversion going on here, because this
# information is already represented internally in twisted.python.modules,
# but it's simple enough that I'm willing to live with it. If anyone else
# wants to fix up this iteration so that it's one path segment at a time,
# be my guest. --glyph
for plugmod in mod.iterModules():
fpp = plugmod.filePath.parent()
if fpp not in buckets:
buckets[fpp] = []
bucket = buckets[fpp]
bucket.append(plugmod)
for pseudoPackagePath, bucket in buckets.items():
dropinPath = pseudoPackagePath.child("dropin.cache")
try:
lastCached = dropinPath.getModificationTime()
with dropinPath.open("r") as f:
dropinDotCache = pickle.load(f)
except BaseException:
dropinDotCache = {}
lastCached = 0
needsWrite = False
existingKeys = {}
for pluginModule in bucket:
pluginKey = pluginModule.name.split(".")[-1]
existingKeys[pluginKey] = True
if (pluginKey not in dropinDotCache) or (
pluginModule.filePath.getModificationTime() >= lastCached
):
needsWrite = True
try:
provider = pluginModule.load()
except BaseException:
# dropinDotCache.pop(pluginKey, None)
log.err()
else:
entry = _generateCacheEntry(provider)
dropinDotCache[pluginKey] = entry
# Make sure that the cache doesn't contain any stale plugins.
for pluginKey in list(dropinDotCache.keys()):
if pluginKey not in existingKeys:
del dropinDotCache[pluginKey]
needsWrite = True
if needsWrite:
try:
dropinPath.setContent(pickle.dumps(dropinDotCache))
except OSError as e:
log.msg(
format=(
"Unable to write to plugin cache %(path)s: error "
"number %(errno)d"
),
path=dropinPath.path,
errno=e.errno,
)
except BaseException:
log.err(None, "Unexpected error while writing cache file")
allCachesCombined.update(dropinDotCache)
return allCachesCombined |
Retrieve all plugins implementing the given interface beneath the given module.
@param interface: An interface class. Only plugins which implement this
interface will be returned.
@param package: A package beneath which plugins are installed. For
most uses, the default value is correct.
@return: An iterator of plugins. | def getPlugins(
interface: Type[_TInterface], package: Optional[types.ModuleType] = None
) -> Iterable[_TInterface]:
"""
Retrieve all plugins implementing the given interface beneath the given module.
@param interface: An interface class. Only plugins which implement this
interface will be returned.
@param package: A package beneath which plugins are installed. For
most uses, the default value is correct.
@return: An iterator of plugins.
"""
if package is None:
package = _pluginsPackage()
allDropins = getCache(package)
for key, dropin in allDropins.items():
for plugin in dropin.plugins:
try:
adapted = interface(plugin, None)
except BaseException:
log.err()
else:
if adapted is not None:
yield adapted |
Return a list of additional directories which should be searched for
modules to be included as part of the named plugin package.
@type name: C{str}
@param name: The fully-qualified Python name of a plugin package, eg
C{'twisted.plugins'}.
@rtype: C{list} of C{str}
@return: The absolute paths to other directories which may contain plugin
modules for the named plugin package. | def pluginPackagePaths(name):
"""
Return a list of additional directories which should be searched for
modules to be included as part of the named plugin package.
@type name: C{str}
@param name: The fully-qualified Python name of a plugin package, eg
C{'twisted.plugins'}.
@rtype: C{list} of C{str}
@return: The absolute paths to other directories which may contain plugin
modules for the named plugin package.
"""
package = name.split(".")
# Note that this may include directories which do not exist. It may be
# preferable to remove such directories at this point, rather than allow
# them to be searched later on.
#
# Note as well that only '__init__.py' will be considered to make a
# directory a package (and thus exclude it from this list). This means
# that if you create a master plugin package which has some other kind of
# __init__ (eg, __init__.pyc) it will be incorrectly treated as a
# supplementary plugin directory.
return [
os.path.abspath(os.path.join(x, *package))
for x in sys.path
if not os.path.exists(os.path.join(x, *package + ["__init__.py"]))
] |
Start the reactor, using profiling if specified by the configuration, and
log any error happening in the process.
@param config: configuration of the twistd application.
@type config: L{ServerOptions}
@param oldstdout: initial value of C{sys.stdout}.
@type oldstdout: C{file}
@param oldstderr: initial value of C{sys.stderr}.
@type oldstderr: C{file}
@param profiler: object used to run the reactor with profiling.
@type profiler: L{AppProfiler}
@param reactor: The reactor to use. If L{None}, the global reactor will
be used. | def runReactorWithLogging(config, oldstdout, oldstderr, profiler=None, reactor=None):
"""
Start the reactor, using profiling if specified by the configuration, and
log any error happening in the process.
@param config: configuration of the twistd application.
@type config: L{ServerOptions}
@param oldstdout: initial value of C{sys.stdout}.
@type oldstdout: C{file}
@param oldstderr: initial value of C{sys.stderr}.
@type oldstderr: C{file}
@param profiler: object used to run the reactor with profiling.
@type profiler: L{AppProfiler}
@param reactor: The reactor to use. If L{None}, the global reactor will
be used.
"""
if reactor is None:
from twisted.internet import reactor
try:
if config["profile"]:
if profiler is not None:
profiler.run(reactor)
elif config["debug"]:
sys.stdout = oldstdout
sys.stderr = oldstderr
if runtime.platformType == "posix":
signal.signal(signal.SIGUSR2, lambda *args: pdb.set_trace())
signal.signal(signal.SIGINT, lambda *args: pdb.set_trace())
fixPdb()
pdb.runcall(reactor.run)
else:
reactor.run()
except BaseException:
close = False
if config["nodaemon"]:
file = oldstdout
else:
file = open("TWISTD-CRASH.log", "a")
close = True
try:
traceback.print_exc(file=file)
file.flush()
finally:
if close:
file.close() |
Force the application to terminate with the specified signal by replacing
the signal handler with the default and sending the signal to ourselves.
@param sig: Signal to use to terminate the process with C{os.kill}.
@type sig: C{int} | def _exitWithSignal(sig):
"""
Force the application to terminate with the specified signal by replacing
the signal handler with the default and sending the signal to ourselves.
@param sig: Signal to use to terminate the process with C{os.kill}.
@type sig: C{int}
"""
signal.signal(sig, signal.SIG_DFL)
os.kill(os.getpid(), sig) |
@return: the argument, or the global reactor if the argument is L{None}. | def _maybeGlobalReactor(maybeReactor):
"""
@return: the argument, or the global reactor if the argument is L{None}.
"""
if maybeReactor is None:
from twisted.internet import reactor
return reactor
else:
return maybeReactor |
A timeout policy for L{ClientService} which computes an exponential backoff
interval with configurable parameters.
@since: 16.1.0
@param initialDelay: Delay for the first reconnection attempt (default
1.0s).
@type initialDelay: L{float}
@param maxDelay: Maximum number of seconds between connection attempts
(default 60 seconds, or one minute). Note that this value is before
jitter is applied, so the actual maximum possible delay is this value
plus the maximum possible result of C{jitter()}.
@type maxDelay: L{float}
@param factor: A multiplicative factor by which the delay grows on each
failed reattempt. Default: 1.5.
@type factor: L{float}
@param jitter: A 0-argument callable that introduces noise into the delay.
By default, C{random.random}, i.e. a pseudorandom floating-point value
between zero and one.
@type jitter: 0-argument callable returning L{float}
@return: a 1-argument callable that, given an attempt count, returns a
floating point number; the number of seconds to delay.
@rtype: see L{ClientService.__init__}'s C{retryPolicy} argument. | def backoffPolicy(
initialDelay=1.0, maxDelay=60.0, factor=1.5, jitter=_goodEnoughRandom
):
"""
A timeout policy for L{ClientService} which computes an exponential backoff
interval with configurable parameters.
@since: 16.1.0
@param initialDelay: Delay for the first reconnection attempt (default
1.0s).
@type initialDelay: L{float}
@param maxDelay: Maximum number of seconds between connection attempts
(default 60 seconds, or one minute). Note that this value is before
jitter is applied, so the actual maximum possible delay is this value
plus the maximum possible result of C{jitter()}.
@type maxDelay: L{float}
@param factor: A multiplicative factor by which the delay grows on each
failed reattempt. Default: 1.5.
@type factor: L{float}
@param jitter: A 0-argument callable that introduces noise into the delay.
By default, C{random.random}, i.e. a pseudorandom floating-point value
between zero and one.
@type jitter: 0-argument callable returning L{float}
@return: a 1-argument callable that, given an attempt count, returns a
floating point number; the number of seconds to delay.
@rtype: see L{ClientService.__init__}'s C{retryPolicy} argument.
"""
def policy(attempt):
try:
delay = min(initialDelay * (factor ** min(100, attempt)), maxDelay)
except OverflowError:
delay = maxDelay
return delay + jitter()
return policy |
Return the first element of a generator and exhaust it.
C{MethodicalMachine.upon}'s C{collector} argument takes a generator of
output results. If the generator is exhausted, the later outputs aren't
actually run.
@param gen: Generator to extract values from
@return: The first element of the generator. | def _firstResult(gen):
"""
Return the first element of a generator and exhaust it.
C{MethodicalMachine.upon}'s C{collector} argument takes a generator of
output results. If the generator is exhausted, the later outputs aren't
actually run.
@param gen: Generator to extract values from
@return: The first element of the generator.
"""
return list(gen)[0] |
Return an iterator of L{IReactorInstaller} plugins. | def getReactorTypes() -> Iterable[IReactorInstaller]:
"""
Return an iterator of L{IReactorInstaller} plugins.
"""
return getPlugins(IReactorInstaller) |
Install the reactor with the given C{shortName} attribute.
@raise NoSuchReactor: If no reactor is found with a matching C{shortName}.
@raise Exception: Anything that the specified reactor can raise when installed. | def installReactor(shortName: str) -> IReactorCore:
"""
Install the reactor with the given C{shortName} attribute.
@raise NoSuchReactor: If no reactor is found with a matching C{shortName}.
@raise Exception: Anything that the specified reactor can raise when installed.
"""
for installer in getReactorTypes():
if installer.shortName == shortName:
installer.install()
from twisted.internet import reactor
return cast(IReactorCore, reactor)
raise NoSuchReactor(shortName) |
Return a compound class.
Return an object supporting the L{IService}, L{IServiceCollection},
L{IProcess} and L{sob.IPersistable} interfaces, with the given
parameters. Always access the return value by explicit casting to
one of the interfaces. | def Application(name, uid=None, gid=None):
"""
Return a compound class.
Return an object supporting the L{IService}, L{IServiceCollection},
L{IProcess} and L{sob.IPersistable} interfaces, with the given
parameters. Always access the return value by explicit casting to
one of the interfaces.
"""
ret = components.Componentized()
availableComponents = [MultiService(), Process(uid, gid), sob.Persistent(ret, name)]
for comp in availableComponents:
ret.addComponent(comp, ignoreClass=1)
IService(ret).setName(name)
return ret |
Load Application from a given file.
The serialization format it was saved in should be given as
C{kind}, and is one of C{pickle}, C{source}, C{xml} or C{python}. If
C{passphrase} is given, the application was encrypted with the
given passphrase.
@type filename: C{str}
@type kind: C{str}
@type passphrase: C{str} | def loadApplication(filename, kind, passphrase=None):
"""
Load Application from a given file.
The serialization format it was saved in should be given as
C{kind}, and is one of C{pickle}, C{source}, C{xml} or C{python}. If
C{passphrase} is given, the application was encrypted with the
given passphrase.
@type filename: C{str}
@type kind: C{str}
@type passphrase: C{str}
"""
if kind == "python":
application = sob.loadValueFromFile(filename, "application")
else:
application = sob.load(filename, kind)
return application |
Return the service corresponding to a description.
@param description: The description of the listening port, in the syntax
described by L{twisted.internet.endpoints.serverFromString}.
@type description: C{str}
@param factory: The protocol factory which will build protocols for
connections to this service.
@type factory: L{twisted.internet.interfaces.IProtocolFactory}
@rtype: C{twisted.application.service.IService}
@return: the service corresponding to a description of a reliable stream
server.
@see: L{twisted.internet.endpoints.serverFromString} | def service(
description: str,
factory: interfaces.IProtocolFactory,
reactor: Optional[interfaces.IReactorCore] = None,
) -> StreamServerEndpointService:
"""
Return the service corresponding to a description.
@param description: The description of the listening port, in the syntax
described by L{twisted.internet.endpoints.serverFromString}.
@type description: C{str}
@param factory: The protocol factory which will build protocols for
connections to this service.
@type factory: L{twisted.internet.interfaces.IProtocolFactory}
@rtype: C{twisted.application.service.IService}
@return: the service corresponding to a description of a reliable stream
server.
@see: L{twisted.internet.endpoints.serverFromString}
"""
if reactor is None:
reactor = _getReactor()
svc = StreamServerEndpointService(
endpoints.serverFromString(reactor, description), factory
)
svc._raiseSynchronously = True
return svc |
Listen on a port corresponding to a description.
@param description: The description of the connecting port, in the syntax
described by L{twisted.internet.endpoints.serverFromString}.
@type description: L{str}
@param factory: The protocol factory which will build protocols on
connection.
@type factory: L{twisted.internet.interfaces.IProtocolFactory}
@rtype: L{twisted.internet.interfaces.IListeningPort}
@return: the port corresponding to a description of a reliable virtual
circuit server.
@see: L{twisted.internet.endpoints.serverFromString} | def listen(
description: str, factory: interfaces.IProtocolFactory
) -> interfaces.IListeningPort:
"""
Listen on a port corresponding to a description.
@param description: The description of the connecting port, in the syntax
described by L{twisted.internet.endpoints.serverFromString}.
@type description: L{str}
@param factory: The protocol factory which will build protocols on
connection.
@type factory: L{twisted.internet.interfaces.IProtocolFactory}
@rtype: L{twisted.internet.interfaces.IListeningPort}
@return: the port corresponding to a description of a reliable virtual
circuit server.
@see: L{twisted.internet.endpoints.serverFromString}
"""
from twisted.internet import reactor
name, args, kw = endpoints._parseServer(description, factory)
return cast(
interfaces.IListeningPort, getattr(reactor, "listen" + name)(*args, **kw)
) |
Exit the python interpreter with the given status and an optional message.
@param status: An exit status. An appropriate value from L{ExitStatus} is
recommended.
@param message: An optional message to print. | def exit(status: Union[int, ExitStatus], message: str = "") -> "typing.NoReturn":
"""
Exit the python interpreter with the given status and an optional message.
@param status: An exit status. An appropriate value from L{ExitStatus} is
recommended.
@param message: An optional message to print.
"""
if message:
if status == ExitStatus.EX_OK:
out = stdout
else:
out = stderr
out.write(message)
out.write("\n")
sysexit(status) |
Decorator for tests that are not expected to work on all platforms.
Calling L{PIDFile.isRunning} currently raises L{NotImplementedError} on
non-POSIX platforms.
On an unsupported platform, we expect to see any test that calls
L{PIDFile.isRunning} to raise either L{NotImplementedError}, L{SkipTest},
or C{self.failureException}.
(C{self.failureException} may occur in a test that checks for a specific
exception but it gets NotImplementedError instead.)
@param f: The test method to decorate.
@return: The wrapped callable. | def ifPlatformSupported(f: Callable[..., Any]) -> Callable[..., Any]:
"""
Decorator for tests that are not expected to work on all platforms.
Calling L{PIDFile.isRunning} currently raises L{NotImplementedError} on
non-POSIX platforms.
On an unsupported platform, we expect to see any test that calls
L{PIDFile.isRunning} to raise either L{NotImplementedError}, L{SkipTest},
or C{self.failureException}.
(C{self.failureException} may occur in a test that checks for a specific
exception but it gets NotImplementedError instead.)
@param f: The test method to decorate.
@return: The wrapped callable.
"""
@wraps(f)
def wrapper(self: Any, *args: Any, **kwargs: Any) -> Any:
supported = platform.getType() == "posix"
if supported:
return f(self, *args, **kwargs)
else:
e = self.assertRaises(
(NotImplementedError, SkipTest, self.failureException),
f,
self,
*args,
**kwargs,
)
if isinstance(e, NotImplementedError):
self.assertTrue(str(e).startswith("isRunning is not implemented on "))
return wrapper |
A fake target function for testing TimerService which does nothing. | def fakeTargetFunction():
"""
A fake target function for testing TimerService which does nothing.
"""
pass |
Make a sample endpoint for testing.
@param fireImmediately: If true, fire all L{Deferred}s returned from
C{connect} immedaitely.
@return: a 2-tuple of C{(information, endpoint)}, where C{information} is a
L{ConnectInformation} describing the operations in progress on
C{endpoint}. | def endpointForTesting(fireImmediately=False):
"""
Make a sample endpoint for testing.
@param fireImmediately: If true, fire all L{Deferred}s returned from
C{connect} immedaitely.
@return: a 2-tuple of C{(information, endpoint)}, where C{information} is a
L{ConnectInformation} describing the operations in progress on
C{endpoint}.
"""
@implementer(IStreamClientEndpoint)
class ClientTestEndpoint:
def connect(self, factory):
result = Deferred()
info.passedFactories.append(factory)
@result.addCallback
def createProtocol(ignored):
protocol = factory.buildProtocol(None)
info.constructedProtocols.append(protocol)
transport = StringTransport()
protocol.makeConnection(transport)
return protocol
info.connectQueue.append(result)
if fireImmediately:
result.callback(None)
return result
info = ConnectInformation()
return info, ClientTestEndpoint() |
Catch the global log stream.
@param testCase: The test case to add a cleanup to.
@param logPublisher: the log publisher to add and remove observers for.
@return: a 0-argument callable that returns a list of textual log messages
for comparison.
@rtype: L{list} of L{unicode} | def catchLogs(testCase, logPublisher=globalLogPublisher):
"""
Catch the global log stream.
@param testCase: The test case to add a cleanup to.
@param logPublisher: the log publisher to add and remove observers for.
@return: a 0-argument callable that returns a list of textual log messages
for comparison.
@rtype: L{list} of L{unicode}
"""
logs = []
logPublisher.addObserver(logs.append)
testCase.addCleanup(lambda: logPublisher.removeObserver(logs.append))
return lambda: [formatEvent(event) for event in logs] |
Update the docstring of a method that implements an option.
The string is dedented and the given keyword arguments are substituted. | def _update_doc(opt: Callable[["TwistOptions", str], None], **kwargs: str) -> None:
"""
Update the docstring of a method that implements an option.
The string is dedented and the given keyword arguments are substituted.
"""
opt.__doc__ = dedent(opt.__doc__ or "").format(**kwargs) |
Lookup a user by name in a L{pwd}-style database.
@param userdb: The user database.
@param username: Identifying name in bytes. This will be decoded according
to the filesystem encoding, as the L{pwd} module does internally.
@raises KeyError: when the user doesn't exist | def _lookupUser(userdb: UserDB, username: bytes) -> UserRecord:
"""
Lookup a user by name in a L{pwd}-style database.
@param userdb: The user database.
@param username: Identifying name in bytes. This will be decoded according
to the filesystem encoding, as the L{pwd} module does internally.
@raises KeyError: when the user doesn't exist
"""
return userdb.getpwnam(username.decode(sys.getfilesystemencoding())) |
Look up a user in the /etc/passwd database using the pwd module. If the
pwd module is not available, return None.
@param username: the username of the user to return the passwd database
information for.
@returns: A L{pwd.struct_passwd}, where field 1 may contain a crypted
password, or L{None} when the L{pwd} database is unavailable.
@raises KeyError: when no such user exists | def _pwdGetByName(username: str) -> Optional[CryptedPasswordRecord]:
"""
Look up a user in the /etc/passwd database using the pwd module. If the
pwd module is not available, return None.
@param username: the username of the user to return the passwd database
information for.
@returns: A L{pwd.struct_passwd}, where field 1 may contain a crypted
password, or L{None} when the L{pwd} database is unavailable.
@raises KeyError: when no such user exists
"""
if pwd is None:
return None
return cast(CryptedPasswordRecord, pwd.getpwnam(username)) |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.