Search is not available for this dataset
identifier
stringlengths 1
155
| parameters
stringlengths 2
6.09k
| docstring
stringlengths 11
63.4k
| docstring_summary
stringlengths 0
63.4k
| function
stringlengths 29
99.8k
| function_tokens
sequence | start_point
sequence | end_point
sequence | language
stringclasses 1
value | docstring_language
stringlengths 2
7
| docstring_language_predictions
stringlengths 18
23
| is_langid_reliable
stringclasses 2
values |
---|---|---|---|---|---|---|---|---|---|---|---|
IndentingFormatter.get_message_start | (self, formatted, levelno) |
Return the start of the formatted log message (not counting the
prefix to add to each line).
|
Return the start of the formatted log message (not counting the
prefix to add to each line).
| def get_message_start(self, formatted, levelno):
# type: (str, int) -> str
"""
Return the start of the formatted log message (not counting the
prefix to add to each line).
"""
if levelno < logging.WARNING:
return ""
if formatted.startswith(DEPRECATION_MSG_PREFIX):
# Then the message already has a prefix. We don't want it to
# look like "WARNING: DEPRECATION: ...."
return ""
if levelno < logging.ERROR:
return "WARNING: "
return "ERROR: " | [
"def",
"get_message_start",
"(",
"self",
",",
"formatted",
",",
"levelno",
")",
":",
"# type: (str, int) -> str",
"if",
"levelno",
"<",
"logging",
".",
"WARNING",
":",
"return",
"\"\"",
"if",
"formatted",
".",
"startswith",
"(",
"DEPRECATION_MSG_PREFIX",
")",
":",
"# Then the message already has a prefix. We don't want it to",
"# look like \"WARNING: DEPRECATION: ....\"",
"return",
"\"\"",
"if",
"levelno",
"<",
"logging",
".",
"ERROR",
":",
"return",
"\"WARNING: \"",
"return",
"\"ERROR: \""
] | [
107,
4
] | [
122,
24
] | python | en | ['en', 'error', 'th'] | False |
IndentingFormatter.format | (self, record) |
Calls the standard formatter, but will indent all of the log message
lines by our current indentation level.
|
Calls the standard formatter, but will indent all of the log message
lines by our current indentation level.
| def format(self, record):
# type: (logging.LogRecord) -> str
"""
Calls the standard formatter, but will indent all of the log message
lines by our current indentation level.
"""
formatted = super().format(record)
message_start = self.get_message_start(formatted, record.levelno)
formatted = message_start + formatted
prefix = ""
if self.add_timestamp:
prefix = f"{self.formatTime(record)} "
prefix += " " * get_indentation()
formatted = "".join([prefix + line for line in formatted.splitlines(True)])
return formatted | [
"def",
"format",
"(",
"self",
",",
"record",
")",
":",
"# type: (logging.LogRecord) -> str",
"formatted",
"=",
"super",
"(",
")",
".",
"format",
"(",
"record",
")",
"message_start",
"=",
"self",
".",
"get_message_start",
"(",
"formatted",
",",
"record",
".",
"levelno",
")",
"formatted",
"=",
"message_start",
"+",
"formatted",
"prefix",
"=",
"\"\"",
"if",
"self",
".",
"add_timestamp",
":",
"prefix",
"=",
"f\"{self.formatTime(record)} \"",
"prefix",
"+=",
"\" \"",
"*",
"get_indentation",
"(",
")",
"formatted",
"=",
"\"\"",
".",
"join",
"(",
"[",
"prefix",
"+",
"line",
"for",
"line",
"in",
"formatted",
".",
"splitlines",
"(",
"True",
")",
"]",
")",
"return",
"formatted"
] | [
124,
4
] | [
139,
24
] | python | en | ['en', 'error', 'th'] | False |
ColorizedStreamHandler._using_stdout | (self) |
Return whether the handler is using sys.stdout.
|
Return whether the handler is using sys.stdout.
| def _using_stdout(self):
# type: () -> bool
"""
Return whether the handler is using sys.stdout.
"""
if WINDOWS and colorama:
# Then self.stream is an AnsiToWin32 object.
stream = cast(colorama.AnsiToWin32, self.stream)
return stream.wrapped is sys.stdout
return self.stream is sys.stdout | [
"def",
"_using_stdout",
"(",
"self",
")",
":",
"# type: () -> bool",
"if",
"WINDOWS",
"and",
"colorama",
":",
"# Then self.stream is an AnsiToWin32 object.",
"stream",
"=",
"cast",
"(",
"colorama",
".",
"AnsiToWin32",
",",
"self",
".",
"stream",
")",
"return",
"stream",
".",
"wrapped",
"is",
"sys",
".",
"stdout",
"return",
"self",
".",
"stream",
"is",
"sys",
".",
"stdout"
] | [
171,
4
] | [
181,
40
] | python | en | ['en', 'error', 'th'] | False |
RequirementSet.__init__ | (self, check_supported_wheels: bool = True) | Create a RequirementSet.
| Create a RequirementSet.
| def __init__(self, check_supported_wheels: bool = True) -> None:
"""Create a RequirementSet.
"""
self.requirements: Dict[str, InstallRequirement] = OrderedDict()
self.check_supported_wheels = check_supported_wheels
self.unnamed_requirements: List[InstallRequirement] = [] | [
"def",
"__init__",
"(",
"self",
",",
"check_supported_wheels",
":",
"bool",
"=",
"True",
")",
"->",
"None",
":",
"self",
".",
"requirements",
":",
"Dict",
"[",
"str",
",",
"InstallRequirement",
"]",
"=",
"OrderedDict",
"(",
")",
"self",
".",
"check_supported_wheels",
"=",
"check_supported_wheels",
"self",
".",
"unnamed_requirements",
":",
"List",
"[",
"InstallRequirement",
"]",
"=",
"[",
"]"
] | [
16,
4
] | [
23,
64
] | python | en | ['en', 'en', 'en'] | True |
RequirementSet.add_requirement | (
self,
install_req: InstallRequirement,
parent_req_name: Optional[str] = None,
extras_requested: Optional[Iterable[str]] = None
) | Add install_req as a requirement to install.
:param parent_req_name: The name of the requirement that needed this
added. The name is used because when multiple unnamed requirements
resolve to the same name, we could otherwise end up with dependency
links that point outside the Requirements set. parent_req must
already be added. Note that None implies that this is a user
supplied requirement, vs an inferred one.
:param extras_requested: an iterable of extras used to evaluate the
environment markers.
:return: Additional requirements to scan. That is either [] if
the requirement is not applicable, or [install_req] if the
requirement is applicable and has just been added.
| Add install_req as a requirement to install. | def add_requirement(
self,
install_req: InstallRequirement,
parent_req_name: Optional[str] = None,
extras_requested: Optional[Iterable[str]] = None
) -> Tuple[List[InstallRequirement], Optional[InstallRequirement]]:
"""Add install_req as a requirement to install.
:param parent_req_name: The name of the requirement that needed this
added. The name is used because when multiple unnamed requirements
resolve to the same name, we could otherwise end up with dependency
links that point outside the Requirements set. parent_req must
already be added. Note that None implies that this is a user
supplied requirement, vs an inferred one.
:param extras_requested: an iterable of extras used to evaluate the
environment markers.
:return: Additional requirements to scan. That is either [] if
the requirement is not applicable, or [install_req] if the
requirement is applicable and has just been added.
"""
# If the markers do not match, ignore this requirement.
if not install_req.match_markers(extras_requested):
logger.info(
"Ignoring %s: markers '%s' don't match your environment",
install_req.name, install_req.markers,
)
return [], None
# If the wheel is not supported, raise an error.
# Should check this after filtering out based on environment markers to
# allow specifying different wheels based on the environment/OS, in a
# single requirements file.
if install_req.link and install_req.link.is_wheel:
wheel = Wheel(install_req.link.filename)
tags = compatibility_tags.get_supported()
if (self.check_supported_wheels and not wheel.supported(tags)):
raise InstallationError(
"{} is not a supported wheel on this platform.".format(
wheel.filename)
)
# This next bit is really a sanity check.
assert not install_req.user_supplied or parent_req_name is None, (
"a user supplied req shouldn't have a parent"
)
# Unnamed requirements are scanned again and the requirement won't be
# added as a dependency until after scanning.
if not install_req.name:
self.add_unnamed_requirement(install_req)
return [install_req], None
try:
existing_req: Optional[InstallRequirement] = self.get_requirement(
install_req.name)
except KeyError:
existing_req = None
has_conflicting_requirement = (
parent_req_name is None and
existing_req and
not existing_req.constraint and
existing_req.extras == install_req.extras and
existing_req.req and
install_req.req and
existing_req.req.specifier != install_req.req.specifier
)
if has_conflicting_requirement:
raise InstallationError(
"Double requirement given: {} (already in {}, name={!r})"
.format(install_req, existing_req, install_req.name)
)
# When no existing requirement exists, add the requirement as a
# dependency and it will be scanned again after.
if not existing_req:
self.add_named_requirement(install_req)
# We'd want to rescan this requirement later
return [install_req], install_req
# Assume there's no need to scan, and that we've already
# encountered this for scanning.
if install_req.constraint or not existing_req.constraint:
return [], existing_req
does_not_satisfy_constraint = (
install_req.link and
not (
existing_req.link and
install_req.link.path == existing_req.link.path
)
)
if does_not_satisfy_constraint:
raise InstallationError(
"Could not satisfy constraints for '{}': "
"installation from path or url cannot be "
"constrained to a version".format(install_req.name)
)
# If we're now installing a constraint, mark the existing
# object for real installation.
existing_req.constraint = False
# If we're now installing a user supplied requirement,
# mark the existing object as such.
if install_req.user_supplied:
existing_req.user_supplied = True
existing_req.extras = tuple(sorted(
set(existing_req.extras) | set(install_req.extras)
))
logger.debug(
"Setting %s extras to: %s",
existing_req, existing_req.extras,
)
# Return the existing requirement for addition to the parent and
# scanning again.
return [existing_req], existing_req | [
"def",
"add_requirement",
"(",
"self",
",",
"install_req",
":",
"InstallRequirement",
",",
"parent_req_name",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"extras_requested",
":",
"Optional",
"[",
"Iterable",
"[",
"str",
"]",
"]",
"=",
"None",
")",
"->",
"Tuple",
"[",
"List",
"[",
"InstallRequirement",
"]",
",",
"Optional",
"[",
"InstallRequirement",
"]",
"]",
":",
"# If the markers do not match, ignore this requirement.",
"if",
"not",
"install_req",
".",
"match_markers",
"(",
"extras_requested",
")",
":",
"logger",
".",
"info",
"(",
"\"Ignoring %s: markers '%s' don't match your environment\"",
",",
"install_req",
".",
"name",
",",
"install_req",
".",
"markers",
",",
")",
"return",
"[",
"]",
",",
"None",
"# If the wheel is not supported, raise an error.",
"# Should check this after filtering out based on environment markers to",
"# allow specifying different wheels based on the environment/OS, in a",
"# single requirements file.",
"if",
"install_req",
".",
"link",
"and",
"install_req",
".",
"link",
".",
"is_wheel",
":",
"wheel",
"=",
"Wheel",
"(",
"install_req",
".",
"link",
".",
"filename",
")",
"tags",
"=",
"compatibility_tags",
".",
"get_supported",
"(",
")",
"if",
"(",
"self",
".",
"check_supported_wheels",
"and",
"not",
"wheel",
".",
"supported",
"(",
"tags",
")",
")",
":",
"raise",
"InstallationError",
"(",
"\"{} is not a supported wheel on this platform.\"",
".",
"format",
"(",
"wheel",
".",
"filename",
")",
")",
"# This next bit is really a sanity check.",
"assert",
"not",
"install_req",
".",
"user_supplied",
"or",
"parent_req_name",
"is",
"None",
",",
"(",
"\"a user supplied req shouldn't have a parent\"",
")",
"# Unnamed requirements are scanned again and the requirement won't be",
"# added as a dependency until after scanning.",
"if",
"not",
"install_req",
".",
"name",
":",
"self",
".",
"add_unnamed_requirement",
"(",
"install_req",
")",
"return",
"[",
"install_req",
"]",
",",
"None",
"try",
":",
"existing_req",
":",
"Optional",
"[",
"InstallRequirement",
"]",
"=",
"self",
".",
"get_requirement",
"(",
"install_req",
".",
"name",
")",
"except",
"KeyError",
":",
"existing_req",
"=",
"None",
"has_conflicting_requirement",
"=",
"(",
"parent_req_name",
"is",
"None",
"and",
"existing_req",
"and",
"not",
"existing_req",
".",
"constraint",
"and",
"existing_req",
".",
"extras",
"==",
"install_req",
".",
"extras",
"and",
"existing_req",
".",
"req",
"and",
"install_req",
".",
"req",
"and",
"existing_req",
".",
"req",
".",
"specifier",
"!=",
"install_req",
".",
"req",
".",
"specifier",
")",
"if",
"has_conflicting_requirement",
":",
"raise",
"InstallationError",
"(",
"\"Double requirement given: {} (already in {}, name={!r})\"",
".",
"format",
"(",
"install_req",
",",
"existing_req",
",",
"install_req",
".",
"name",
")",
")",
"# When no existing requirement exists, add the requirement as a",
"# dependency and it will be scanned again after.",
"if",
"not",
"existing_req",
":",
"self",
".",
"add_named_requirement",
"(",
"install_req",
")",
"# We'd want to rescan this requirement later",
"return",
"[",
"install_req",
"]",
",",
"install_req",
"# Assume there's no need to scan, and that we've already",
"# encountered this for scanning.",
"if",
"install_req",
".",
"constraint",
"or",
"not",
"existing_req",
".",
"constraint",
":",
"return",
"[",
"]",
",",
"existing_req",
"does_not_satisfy_constraint",
"=",
"(",
"install_req",
".",
"link",
"and",
"not",
"(",
"existing_req",
".",
"link",
"and",
"install_req",
".",
"link",
".",
"path",
"==",
"existing_req",
".",
"link",
".",
"path",
")",
")",
"if",
"does_not_satisfy_constraint",
":",
"raise",
"InstallationError",
"(",
"\"Could not satisfy constraints for '{}': \"",
"\"installation from path or url cannot be \"",
"\"constrained to a version\"",
".",
"format",
"(",
"install_req",
".",
"name",
")",
")",
"# If we're now installing a constraint, mark the existing",
"# object for real installation.",
"existing_req",
".",
"constraint",
"=",
"False",
"# If we're now installing a user supplied requirement,",
"# mark the existing object as such.",
"if",
"install_req",
".",
"user_supplied",
":",
"existing_req",
".",
"user_supplied",
"=",
"True",
"existing_req",
".",
"extras",
"=",
"tuple",
"(",
"sorted",
"(",
"set",
"(",
"existing_req",
".",
"extras",
")",
"|",
"set",
"(",
"install_req",
".",
"extras",
")",
")",
")",
"logger",
".",
"debug",
"(",
"\"Setting %s extras to: %s\"",
",",
"existing_req",
",",
"existing_req",
".",
"extras",
",",
")",
"# Return the existing requirement for addition to the parent and",
"# scanning again.",
"return",
"[",
"existing_req",
"]",
",",
"existing_req"
] | [
55,
4
] | [
169,
43
] | python | en | ['en', 'en', 'en'] | True |
MailHandler.get_opt | (self, option, optiontype=str) | Parse an option from config.ini | Parse an option from config.ini | def get_opt(self, option, optiontype=str):
"Parse an option from config.ini"
log.debug("Querying option: {}.".format(option))
section = self.account
if not self.config.has_section(section):
section = "DEFAULT"
log.debug("Section {} not found. Using DEFAULT".format(section))
if optiontype == int:
return self.config.getint(section, option)
elif optiontype == float:
return self.config.getfloat(section, option)
elif optiontype == bool:
return self.config.getboolean(section, option)
elif optiontype == str:
return self.config.get(section, option)
else:
log.error("Invalid option type: {} ({}).".format(option,
optiontype)) | [
"def",
"get_opt",
"(",
"self",
",",
"option",
",",
"optiontype",
"=",
"str",
")",
":",
"log",
".",
"debug",
"(",
"\"Querying option: {}.\"",
".",
"format",
"(",
"option",
")",
")",
"section",
"=",
"self",
".",
"account",
"if",
"not",
"self",
".",
"config",
".",
"has_section",
"(",
"section",
")",
":",
"section",
"=",
"\"DEFAULT\"",
"log",
".",
"debug",
"(",
"\"Section {} not found. Using DEFAULT\"",
".",
"format",
"(",
"section",
")",
")",
"if",
"optiontype",
"==",
"int",
":",
"return",
"self",
".",
"config",
".",
"getint",
"(",
"section",
",",
"option",
")",
"elif",
"optiontype",
"==",
"float",
":",
"return",
"self",
".",
"config",
".",
"getfloat",
"(",
"section",
",",
"option",
")",
"elif",
"optiontype",
"==",
"bool",
":",
"return",
"self",
".",
"config",
".",
"getboolean",
"(",
"section",
",",
"option",
")",
"elif",
"optiontype",
"==",
"str",
":",
"return",
"self",
".",
"config",
".",
"get",
"(",
"section",
",",
"option",
")",
"else",
":",
"log",
".",
"error",
"(",
"\"Invalid option type: {} ({}).\"",
".",
"format",
"(",
"option",
",",
"optiontype",
")",
")"
] | [
67,
4
] | [
84,
73
] | python | en | ['en', 'en', 'en'] | True |
MailHandler.print_options | (self) | Print all available options. For debugging purposes. | Print all available options. For debugging purposes. | def print_options(self):
"Print all available options. For debugging purposes."
for i in self.config.options(self.account):
print i + ":", self.config.get(self.account, i) | [
"def",
"print_options",
"(",
"self",
")",
":",
"for",
"i",
"in",
"self",
".",
"config",
".",
"options",
"(",
"self",
".",
"account",
")",
":",
"print",
"i",
"+",
"\":\"",
",",
"self",
".",
"config",
".",
"get",
"(",
"self",
".",
"account",
",",
"i",
")"
] | [
86,
4
] | [
89,
59
] | python | en | ['en', 'en', 'en'] | True |
MailHandler.get_mail | (self) | Get the mail. Uses poplib as GMX Freemail does not allow imap. | Get the mail. Uses poplib as GMX Freemail does not allow imap. | def get_mail(self):
"Get the mail. Uses poplib as GMX Freemail does not allow imap."
log.info("Getting mail for {}".format(self.account))
if not self.username:
self.username = self.account
password = getpass("Password for {}: ".format(self.username))
server = self.get_opt("incserver")
port = self.get_opt("incport", int)
# Unnecessarily check if we'll use SSL.
if self.get_opt("incsecurity") == "SSL":
session = poplib.POP3_SSL(server, port)
else:
session = poplib.POP3(server, port)
# If the loglevel is DEBUG (10), enable verbose logging.
if logging.getLogger().getEffectiveLevel() == 10:
session.set_debuglevel(1)
try:
session.user(self.username)
session.pass_(password)
except poplib.error_proto:
log.error("Authentification for {} failed. Wrong credentials?"
.format(self.account))
sys.exit(1)
messages = [session.retr(i) for i in range(1, len(session.list()[1]))]
messages = ["\n".join(msg[1]) for msg in messages]
messages = [parser.Parser().parsestr(msg) for msg in messages]
# TODO: Make this prettier. Example:
# http://g33k.wordpress.com/2009/02/04/check-gmail-the-python-way/
print "You have {} new messages.".format(len(messages))
for m in messages:
print "{}, [{}], ({})".format(m["From"], m["Subject"], m["Date"])
session.quit() | [
"def",
"get_mail",
"(",
"self",
")",
":",
"log",
".",
"info",
"(",
"\"Getting mail for {}\"",
".",
"format",
"(",
"self",
".",
"account",
")",
")",
"if",
"not",
"self",
".",
"username",
":",
"self",
".",
"username",
"=",
"self",
".",
"account",
"password",
"=",
"getpass",
"(",
"\"Password for {}: \"",
".",
"format",
"(",
"self",
".",
"username",
")",
")",
"server",
"=",
"self",
".",
"get_opt",
"(",
"\"incserver\"",
")",
"port",
"=",
"self",
".",
"get_opt",
"(",
"\"incport\"",
",",
"int",
")",
"# Unnecessarily check if we'll use SSL.",
"if",
"self",
".",
"get_opt",
"(",
"\"incsecurity\"",
")",
"==",
"\"SSL\"",
":",
"session",
"=",
"poplib",
".",
"POP3_SSL",
"(",
"server",
",",
"port",
")",
"else",
":",
"session",
"=",
"poplib",
".",
"POP3",
"(",
"server",
",",
"port",
")",
"# If the loglevel is DEBUG (10), enable verbose logging.",
"if",
"logging",
".",
"getLogger",
"(",
")",
".",
"getEffectiveLevel",
"(",
")",
"==",
"10",
":",
"session",
".",
"set_debuglevel",
"(",
"1",
")",
"try",
":",
"session",
".",
"user",
"(",
"self",
".",
"username",
")",
"session",
".",
"pass_",
"(",
"password",
")",
"except",
"poplib",
".",
"error_proto",
":",
"log",
".",
"error",
"(",
"\"Authentification for {} failed. Wrong credentials?\"",
".",
"format",
"(",
"self",
".",
"account",
")",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"messages",
"=",
"[",
"session",
".",
"retr",
"(",
"i",
")",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"session",
".",
"list",
"(",
")",
"[",
"1",
"]",
")",
")",
"]",
"messages",
"=",
"[",
"\"\\n\"",
".",
"join",
"(",
"msg",
"[",
"1",
"]",
")",
"for",
"msg",
"in",
"messages",
"]",
"messages",
"=",
"[",
"parser",
".",
"Parser",
"(",
")",
".",
"parsestr",
"(",
"msg",
")",
"for",
"msg",
"in",
"messages",
"]",
"# TODO: Make this prettier. Example:",
"# http://g33k.wordpress.com/2009/02/04/check-gmail-the-python-way/",
"print",
"\"You have {} new messages.\"",
".",
"format",
"(",
"len",
"(",
"messages",
")",
")",
"for",
"m",
"in",
"messages",
":",
"print",
"\"{}, [{}], ({})\"",
".",
"format",
"(",
"m",
"[",
"\"From\"",
"]",
",",
"m",
"[",
"\"Subject\"",
"]",
",",
"m",
"[",
"\"Date\"",
"]",
")",
"session",
".",
"quit",
"(",
")"
] | [
91,
4
] | [
129,
22
] | python | en | ['en', 'en', 'en'] | True |
MailHandler.send_mail | (self, recipient, header, message,
sign, encrypt, attachkey, dryrun) | Sends a mail via SMTP. | Sends a mail via SMTP. | def send_mail(self, recipient, header, message,
sign, encrypt, attachkey, dryrun):
"Sends a mail via SMTP."
log.info("Sending mail to {} ({}). Sign/Encrypt/AttachKey: {}/{}/{}."
.format(recipient, header, sign, encrypt, attachkey))
recipients = {i for i in recipient.split(",") if "@" in i}
if not recipients:
log.error("No valid recipients in {}.".format(recipients))
return
# TODO: Hash the password with sha256+salt and only ask once at start-
# up, if we implement a curse UI.
if not self.username:
self.username = self.account
password = getpass("Password for {}: ".format(self.username))
server = self.get_opt("outserver")
port = self.get_opt("outport", int)
# Split header into CC, BCC and Subject.
cc, bcc = "", ""
header = header.split("::")
if len(header) == 3:
cc, bcc, subject = header[0], header[1], header[2]
elif len(header) == 2:
cc, subject = header[0], header[1]
else:
subject = header[0]
cc = {i for i in cc.split(",") if "@" in i}
bcc = {i for i in bcc.split(",") if "@" in i}
# Initialize our message to attach signatures/keyfiles, body etc to.
msg = MIMEMultipart()
if sign or encrypt:
gpg = gnupg.GPG()
keyid = self.get_opt("keyid")
keyfp = self.get_opt("keyfp")
for i in gpg.list_keys():
if keyid in i["keyid"]:
break
else:
log.error("{} not found in gpg.list_keys().".format(keyid))
sys.exit(1)
if sign and encrypt:
encrypted = str(gpg.encrypt(message, self.FPs["MaxDemian"],
sign=keyfp))
if encrypted:
encryptedtext = MIMEText(
_text=encrypted,
_subtype=self.content_subtype,
_charset=self.content_charset
)
msg.attach(encryptedtext)
else:
log.error("Failed to encrypt the message.")
sys.exit(1)
elif sign:
# message = msg.as_string().replace('\n', '\r\n')
signed = str(gpg.sign(message, keyid=keyid))
if signed:
signedtext = MIMEText(
_text=signed,
_subtype=self.content_subtype,
_charset=self.content_charset
)
msg.attach(signedtext)
else:
log.error("Failed to sign the message.")
sys.exit(1)
elif encrypt:
encrypted = str(gpg.encrypt(message, self.FPs["MaxDemian"]))
if encrypted:
encryptedtext = MIMEText(
_text=encrypted,
_subtype=self.content_subtype,
_charset=self.content_charset
)
msg.attach(encryptedtext)
else:
log.error("Failed to encrypt the message.")
sys.exit(1)
else:
log.error("No GPG keys found.")
pubkeyloc = None
if attachkey: # Attach GPG Public attachkey.
pubkeyfile = self.get_opt("keyfile")
if os.path.isfile(pubkeyfile):
pubkeyloc = pubkeyfile
elif os.path.isfile(os.path.join(self.configdir, pubkeyfile)):
pubkeyloc = os.path.join(self.configdir, pubkeyfile)
else:
log.error("Public attachkey '{}' could not be found."
.format(pubkeyfile))
if pubkeyloc:
ctype, encoding = mimetypes.guess_type(pubkeyloc)
if ctype is None or encoding is not None:
ctype = 'application/octet-stream'
maintype, subtype = ctype.split('/', 1)
if maintype == 'text':
with open(pubkeyloc) as f:
# keyatt = f.read()
keyatt = MIMEText(
f.read(),
_subtype=subtype,
_charset=self.content_charset)
keyatt.add_header(
'Content-Disposition',
'attachment',
filename=pubkeyfile
)
msg.attach(keyatt)
log.info("Attached public attachkey {} to message."
.format(pubkeyfile))
else:
log.error("{} is not a textfile. Sure it's a GPG Key?"
.format(pubkeyloc))
# Add Mime infos to the message.
msg["From"] = self.account
msg["To"] = ", ".join(recipients)
if cc:
msg["Cc"] = ", ".join(cc)
msg["Date"] = formatdate(time.time())
msg["User-Agent"] = self.user_agent
msg["Subject"] = subject
# If --dryrun is enabled, we exit here.
if dryrun:
print msg
sys.exit()
session = smtplib.SMTP(server, port)
# If the loglevel is DEBUG (10), enable verbose logging.
# if logging.getLogger().getEffectiveLevel() == 10:
# session.set_debuglevel(1)
if self.get_opt("outsecurity"):
session.ehlo()
session.starttls()
session.ehlo()
# Union of the three sets.
recipients = recipients | cc | bcc
try:
session.login(self.username, password)
except smtplib.SMTPAuthenticationError:
log.error("Authentication failed. Wrong credentials?")
sys.exit(1)
# TODO: Add footer (with user-agent, timestamp?)
session.sendmail(self.account, recipients, msg.as_string())
log.info("Mail sent from {} to {} ({}).".format(self.account,
recipients, subject))
session.quit() | [
"def",
"send_mail",
"(",
"self",
",",
"recipient",
",",
"header",
",",
"message",
",",
"sign",
",",
"encrypt",
",",
"attachkey",
",",
"dryrun",
")",
":",
"log",
".",
"info",
"(",
"\"Sending mail to {} ({}). Sign/Encrypt/AttachKey: {}/{}/{}.\"",
".",
"format",
"(",
"recipient",
",",
"header",
",",
"sign",
",",
"encrypt",
",",
"attachkey",
")",
")",
"recipients",
"=",
"{",
"i",
"for",
"i",
"in",
"recipient",
".",
"split",
"(",
"\",\"",
")",
"if",
"\"@\"",
"in",
"i",
"}",
"if",
"not",
"recipients",
":",
"log",
".",
"error",
"(",
"\"No valid recipients in {}.\"",
".",
"format",
"(",
"recipients",
")",
")",
"return",
"# TODO: Hash the password with sha256+salt and only ask once at start-",
"# up, if we implement a curse UI.",
"if",
"not",
"self",
".",
"username",
":",
"self",
".",
"username",
"=",
"self",
".",
"account",
"password",
"=",
"getpass",
"(",
"\"Password for {}: \"",
".",
"format",
"(",
"self",
".",
"username",
")",
")",
"server",
"=",
"self",
".",
"get_opt",
"(",
"\"outserver\"",
")",
"port",
"=",
"self",
".",
"get_opt",
"(",
"\"outport\"",
",",
"int",
")",
"# Split header into CC, BCC and Subject.",
"cc",
",",
"bcc",
"=",
"\"\"",
",",
"\"\"",
"header",
"=",
"header",
".",
"split",
"(",
"\"::\"",
")",
"if",
"len",
"(",
"header",
")",
"==",
"3",
":",
"cc",
",",
"bcc",
",",
"subject",
"=",
"header",
"[",
"0",
"]",
",",
"header",
"[",
"1",
"]",
",",
"header",
"[",
"2",
"]",
"elif",
"len",
"(",
"header",
")",
"==",
"2",
":",
"cc",
",",
"subject",
"=",
"header",
"[",
"0",
"]",
",",
"header",
"[",
"1",
"]",
"else",
":",
"subject",
"=",
"header",
"[",
"0",
"]",
"cc",
"=",
"{",
"i",
"for",
"i",
"in",
"cc",
".",
"split",
"(",
"\",\"",
")",
"if",
"\"@\"",
"in",
"i",
"}",
"bcc",
"=",
"{",
"i",
"for",
"i",
"in",
"bcc",
".",
"split",
"(",
"\",\"",
")",
"if",
"\"@\"",
"in",
"i",
"}",
"# Initialize our message to attach signatures/keyfiles, body etc to.",
"msg",
"=",
"MIMEMultipart",
"(",
")",
"if",
"sign",
"or",
"encrypt",
":",
"gpg",
"=",
"gnupg",
".",
"GPG",
"(",
")",
"keyid",
"=",
"self",
".",
"get_opt",
"(",
"\"keyid\"",
")",
"keyfp",
"=",
"self",
".",
"get_opt",
"(",
"\"keyfp\"",
")",
"for",
"i",
"in",
"gpg",
".",
"list_keys",
"(",
")",
":",
"if",
"keyid",
"in",
"i",
"[",
"\"keyid\"",
"]",
":",
"break",
"else",
":",
"log",
".",
"error",
"(",
"\"{} not found in gpg.list_keys().\"",
".",
"format",
"(",
"keyid",
")",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"if",
"sign",
"and",
"encrypt",
":",
"encrypted",
"=",
"str",
"(",
"gpg",
".",
"encrypt",
"(",
"message",
",",
"self",
".",
"FPs",
"[",
"\"MaxDemian\"",
"]",
",",
"sign",
"=",
"keyfp",
")",
")",
"if",
"encrypted",
":",
"encryptedtext",
"=",
"MIMEText",
"(",
"_text",
"=",
"encrypted",
",",
"_subtype",
"=",
"self",
".",
"content_subtype",
",",
"_charset",
"=",
"self",
".",
"content_charset",
")",
"msg",
".",
"attach",
"(",
"encryptedtext",
")",
"else",
":",
"log",
".",
"error",
"(",
"\"Failed to encrypt the message.\"",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"elif",
"sign",
":",
"# message = msg.as_string().replace('\\n', '\\r\\n')",
"signed",
"=",
"str",
"(",
"gpg",
".",
"sign",
"(",
"message",
",",
"keyid",
"=",
"keyid",
")",
")",
"if",
"signed",
":",
"signedtext",
"=",
"MIMEText",
"(",
"_text",
"=",
"signed",
",",
"_subtype",
"=",
"self",
".",
"content_subtype",
",",
"_charset",
"=",
"self",
".",
"content_charset",
")",
"msg",
".",
"attach",
"(",
"signedtext",
")",
"else",
":",
"log",
".",
"error",
"(",
"\"Failed to sign the message.\"",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"elif",
"encrypt",
":",
"encrypted",
"=",
"str",
"(",
"gpg",
".",
"encrypt",
"(",
"message",
",",
"self",
".",
"FPs",
"[",
"\"MaxDemian\"",
"]",
")",
")",
"if",
"encrypted",
":",
"encryptedtext",
"=",
"MIMEText",
"(",
"_text",
"=",
"encrypted",
",",
"_subtype",
"=",
"self",
".",
"content_subtype",
",",
"_charset",
"=",
"self",
".",
"content_charset",
")",
"msg",
".",
"attach",
"(",
"encryptedtext",
")",
"else",
":",
"log",
".",
"error",
"(",
"\"Failed to encrypt the message.\"",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"else",
":",
"log",
".",
"error",
"(",
"\"No GPG keys found.\"",
")",
"pubkeyloc",
"=",
"None",
"if",
"attachkey",
":",
"# Attach GPG Public attachkey.",
"pubkeyfile",
"=",
"self",
".",
"get_opt",
"(",
"\"keyfile\"",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"pubkeyfile",
")",
":",
"pubkeyloc",
"=",
"pubkeyfile",
"elif",
"os",
".",
"path",
".",
"isfile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"configdir",
",",
"pubkeyfile",
")",
")",
":",
"pubkeyloc",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"configdir",
",",
"pubkeyfile",
")",
"else",
":",
"log",
".",
"error",
"(",
"\"Public attachkey '{}' could not be found.\"",
".",
"format",
"(",
"pubkeyfile",
")",
")",
"if",
"pubkeyloc",
":",
"ctype",
",",
"encoding",
"=",
"mimetypes",
".",
"guess_type",
"(",
"pubkeyloc",
")",
"if",
"ctype",
"is",
"None",
"or",
"encoding",
"is",
"not",
"None",
":",
"ctype",
"=",
"'application/octet-stream'",
"maintype",
",",
"subtype",
"=",
"ctype",
".",
"split",
"(",
"'/'",
",",
"1",
")",
"if",
"maintype",
"==",
"'text'",
":",
"with",
"open",
"(",
"pubkeyloc",
")",
"as",
"f",
":",
"# keyatt = f.read()",
"keyatt",
"=",
"MIMEText",
"(",
"f",
".",
"read",
"(",
")",
",",
"_subtype",
"=",
"subtype",
",",
"_charset",
"=",
"self",
".",
"content_charset",
")",
"keyatt",
".",
"add_header",
"(",
"'Content-Disposition'",
",",
"'attachment'",
",",
"filename",
"=",
"pubkeyfile",
")",
"msg",
".",
"attach",
"(",
"keyatt",
")",
"log",
".",
"info",
"(",
"\"Attached public attachkey {} to message.\"",
".",
"format",
"(",
"pubkeyfile",
")",
")",
"else",
":",
"log",
".",
"error",
"(",
"\"{} is not a textfile. Sure it's a GPG Key?\"",
".",
"format",
"(",
"pubkeyloc",
")",
")",
"# Add Mime infos to the message.",
"msg",
"[",
"\"From\"",
"]",
"=",
"self",
".",
"account",
"msg",
"[",
"\"To\"",
"]",
"=",
"\", \"",
".",
"join",
"(",
"recipients",
")",
"if",
"cc",
":",
"msg",
"[",
"\"Cc\"",
"]",
"=",
"\", \"",
".",
"join",
"(",
"cc",
")",
"msg",
"[",
"\"Date\"",
"]",
"=",
"formatdate",
"(",
"time",
".",
"time",
"(",
")",
")",
"msg",
"[",
"\"User-Agent\"",
"]",
"=",
"self",
".",
"user_agent",
"msg",
"[",
"\"Subject\"",
"]",
"=",
"subject",
"# If --dryrun is enabled, we exit here.",
"if",
"dryrun",
":",
"print",
"msg",
"sys",
".",
"exit",
"(",
")",
"session",
"=",
"smtplib",
".",
"SMTP",
"(",
"server",
",",
"port",
")",
"# If the loglevel is DEBUG (10), enable verbose logging.",
"# if logging.getLogger().getEffectiveLevel() == 10:",
"# session.set_debuglevel(1)",
"if",
"self",
".",
"get_opt",
"(",
"\"outsecurity\"",
")",
":",
"session",
".",
"ehlo",
"(",
")",
"session",
".",
"starttls",
"(",
")",
"session",
".",
"ehlo",
"(",
")",
"# Union of the three sets.",
"recipients",
"=",
"recipients",
"|",
"cc",
"|",
"bcc",
"try",
":",
"session",
".",
"login",
"(",
"self",
".",
"username",
",",
"password",
")",
"except",
"smtplib",
".",
"SMTPAuthenticationError",
":",
"log",
".",
"error",
"(",
"\"Authentication failed. Wrong credentials?\"",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"# TODO: Add footer (with user-agent, timestamp?)",
"session",
".",
"sendmail",
"(",
"self",
".",
"account",
",",
"recipients",
",",
"msg",
".",
"as_string",
"(",
")",
")",
"log",
".",
"info",
"(",
"\"Mail sent from {} to {} ({}).\"",
".",
"format",
"(",
"self",
".",
"account",
",",
"recipients",
",",
"subject",
")",
")",
"session",
".",
"quit",
"(",
")"
] | [
132,
4
] | [
289,
22
] | python | en | ['en', 'pt', 'en'] | True |
FallbackStorage._get | (self, *args, **kwargs) |
Get a single list of messages from all storage backends.
|
Get a single list of messages from all storage backends.
| def _get(self, *args, **kwargs):
"""
Get a single list of messages from all storage backends.
"""
all_messages = []
for storage in self.storages:
messages, all_retrieved = storage._get()
# If the backend hasn't been used, no more retrieval is necessary.
if messages is None:
break
if messages:
self._used_storages.add(storage)
all_messages.extend(messages)
# If this storage class contained all the messages, no further
# retrieval is necessary
if all_retrieved:
break
return all_messages, all_retrieved | [
"def",
"_get",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"all_messages",
"=",
"[",
"]",
"for",
"storage",
"in",
"self",
".",
"storages",
":",
"messages",
",",
"all_retrieved",
"=",
"storage",
".",
"_get",
"(",
")",
"# If the backend hasn't been used, no more retrieval is necessary.",
"if",
"messages",
"is",
"None",
":",
"break",
"if",
"messages",
":",
"self",
".",
"_used_storages",
".",
"add",
"(",
"storage",
")",
"all_messages",
".",
"extend",
"(",
"messages",
")",
"# If this storage class contained all the messages, no further",
"# retrieval is necessary",
"if",
"all_retrieved",
":",
"break",
"return",
"all_messages",
",",
"all_retrieved"
] | [
18,
4
] | [
35,
42
] | python | en | ['en', 'error', 'th'] | False |
FallbackStorage._store | (self, messages, response, *args, **kwargs) |
Store the messages and return any unstored messages after trying all
backends.
For each storage backend, any messages not stored are passed on to the
next backend.
|
Store the messages and return any unstored messages after trying all
backends. | def _store(self, messages, response, *args, **kwargs):
"""
Store the messages and return any unstored messages after trying all
backends.
For each storage backend, any messages not stored are passed on to the
next backend.
"""
for storage in self.storages:
if messages:
messages = storage._store(messages, response, remove_oldest=False)
# Even if there are no more messages, continue iterating to ensure
# storages which contained messages are flushed.
elif storage in self._used_storages:
storage._store([], response)
self._used_storages.remove(storage)
return messages | [
"def",
"_store",
"(",
"self",
",",
"messages",
",",
"response",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"for",
"storage",
"in",
"self",
".",
"storages",
":",
"if",
"messages",
":",
"messages",
"=",
"storage",
".",
"_store",
"(",
"messages",
",",
"response",
",",
"remove_oldest",
"=",
"False",
")",
"# Even if there are no more messages, continue iterating to ensure",
"# storages which contained messages are flushed.",
"elif",
"storage",
"in",
"self",
".",
"_used_storages",
":",
"storage",
".",
"_store",
"(",
"[",
"]",
",",
"response",
")",
"self",
".",
"_used_storages",
".",
"remove",
"(",
"storage",
")",
"return",
"messages"
] | [
37,
4
] | [
53,
23
] | python | en | ['en', 'error', 'th'] | False |
check_for_occlusions | (task, user_input, keep_space_around_bodies=True) | Returns true if user_input occludes scene objects. | Returns true if user_input occludes scene objects. | def check_for_occlusions(task, user_input, keep_space_around_bodies=True):
"""Returns true if user_input occludes scene objects."""
if not isinstance(task, bytes):
task = serialize(task)
if isinstance(user_input, scene_if.UserInput):
return simulator_bindings.check_for_occlusions_general(
task, serialize(user_input), keep_space_around_bodies)
else:
points, rectangulars, balls = _prepare_user_input(*user_input)
return simulator_bindings.check_for_occlusions(
task, points, rectangulars, balls, keep_space_around_bodies) | [
"def",
"check_for_occlusions",
"(",
"task",
",",
"user_input",
",",
"keep_space_around_bodies",
"=",
"True",
")",
":",
"if",
"not",
"isinstance",
"(",
"task",
",",
"bytes",
")",
":",
"task",
"=",
"serialize",
"(",
"task",
")",
"if",
"isinstance",
"(",
"user_input",
",",
"scene_if",
".",
"UserInput",
")",
":",
"return",
"simulator_bindings",
".",
"check_for_occlusions_general",
"(",
"task",
",",
"serialize",
"(",
"user_input",
")",
",",
"keep_space_around_bodies",
")",
"else",
":",
"points",
",",
"rectangulars",
",",
"balls",
"=",
"_prepare_user_input",
"(",
"*",
"user_input",
")",
"return",
"simulator_bindings",
".",
"check_for_occlusions",
"(",
"task",
",",
"points",
",",
"rectangulars",
",",
"balls",
",",
"keep_space_around_bodies",
")"
] | [
80,
0
] | [
90,
72
] | python | en | ['it', 'fy', 'en'] | False |
add_user_input_to_scene | (scene,
user_input,
keep_space_around_bodies=True,
allow_occlusions=False) | Converts user input to objects in the scene.
Args:
scene: scene_if.Scene.
user_input: scene_if.UserInput or a triple (points, rectangulars, balls).
keep_space_around_bodies: bool, if True extra empty space will be
enforced around scene bodies.
Returns:
task_simulation: task_if.TaskSimulation.
| Converts user input to objects in the scene. | def add_user_input_to_scene(scene,
user_input,
keep_space_around_bodies=True,
allow_occlusions=False):
"""Converts user input to objects in the scene.
Args:
scene: scene_if.Scene.
user_input: scene_if.UserInput or a triple (points, rectangulars, balls).
keep_space_around_bodies: bool, if True extra empty space will be
enforced around scene bodies.
Returns:
task_simulation: task_if.TaskSimulation.
"""
if not isinstance(user_input, scene_if.UserInput):
user_input = build_user_input(*user_input)
return deserialize(
scene_if.Scene(),
simulator_bindings.add_user_input_to_scene(serialize(scene),
serialize(user_input),
keep_space_around_bodies,
allow_occlusions)) | [
"def",
"add_user_input_to_scene",
"(",
"scene",
",",
"user_input",
",",
"keep_space_around_bodies",
"=",
"True",
",",
"allow_occlusions",
"=",
"False",
")",
":",
"if",
"not",
"isinstance",
"(",
"user_input",
",",
"scene_if",
".",
"UserInput",
")",
":",
"user_input",
"=",
"build_user_input",
"(",
"*",
"user_input",
")",
"return",
"deserialize",
"(",
"scene_if",
".",
"Scene",
"(",
")",
",",
"simulator_bindings",
".",
"add_user_input_to_scene",
"(",
"serialize",
"(",
"scene",
")",
",",
"serialize",
"(",
"user_input",
")",
",",
"keep_space_around_bodies",
",",
"allow_occlusions",
")",
")"
] | [
93,
0
] | [
116,
69
] | python | en | ['en', 'en', 'en'] | True |
simulate_task_with_input | (task,
user_input,
steps=DEFAULT_MAX_STEPS,
stride=DEFAULT_STRIDE,
keep_space_around_bodies=True) | Check a solution for a task and return SimulationResult.
This is un-optimized version of magic_ponies that should be used for
debugging or vizualization purposes only.
| Check a solution for a task and return SimulationResult. | def simulate_task_with_input(task,
user_input,
steps=DEFAULT_MAX_STEPS,
stride=DEFAULT_STRIDE,
keep_space_around_bodies=True):
"""Check a solution for a task and return SimulationResult.
This is un-optimized version of magic_ponies that should be used for
debugging or vizualization purposes only.
"""
if not isinstance(user_input, scene_if.UserInput):
user_input = build_user_input(*user_input)
# Creating a shallow copy.
task = copy.copy(task)
task.scene = add_user_input_to_scene(task.scene, user_input,
keep_space_around_bodies)
return simulate_task(task, steps, stride) | [
"def",
"simulate_task_with_input",
"(",
"task",
",",
"user_input",
",",
"steps",
"=",
"DEFAULT_MAX_STEPS",
",",
"stride",
"=",
"DEFAULT_STRIDE",
",",
"keep_space_around_bodies",
"=",
"True",
")",
":",
"if",
"not",
"isinstance",
"(",
"user_input",
",",
"scene_if",
".",
"UserInput",
")",
":",
"user_input",
"=",
"build_user_input",
"(",
"*",
"user_input",
")",
"# Creating a shallow copy.",
"task",
"=",
"copy",
".",
"copy",
"(",
"task",
")",
"task",
".",
"scene",
"=",
"add_user_input_to_scene",
"(",
"task",
".",
"scene",
",",
"user_input",
",",
"keep_space_around_bodies",
")",
"return",
"simulate_task",
"(",
"task",
",",
"steps",
",",
"stride",
")"
] | [
119,
0
] | [
136,
45
] | python | en | ['en', 'en', 'en'] | True |
scene_to_raster | (scene) | Convert scene to a integer array height x width containing color codes.
| Convert scene to a integer array height x width containing color codes.
| def scene_to_raster(scene):
"""Convert scene to a integer array height x width containing color codes.
"""
pixels = simulator_bindings.render(serialize(scene))
return np.array(pixels).reshape((scene.height, scene.width)) | [
"def",
"scene_to_raster",
"(",
"scene",
")",
":",
"pixels",
"=",
"simulator_bindings",
".",
"render",
"(",
"serialize",
"(",
"scene",
")",
")",
"return",
"np",
".",
"array",
"(",
"pixels",
")",
".",
"reshape",
"(",
"(",
"scene",
".",
"height",
",",
"scene",
".",
"width",
")",
")"
] | [
139,
0
] | [
143,
64
] | python | en | ['en', 'en', 'en'] | True |
scene_to_featurized_objects | (scene) | Convert scene to a FeaturizedObjects containing featurs of size
num_objects x OBJECT_FEATURE_SIZE. | Convert scene to a FeaturizedObjects containing featurs of size
num_objects x OBJECT_FEATURE_SIZE. | def scene_to_featurized_objects(scene):
"""Convert scene to a FeaturizedObjects containing featurs of size
num_objects x OBJECT_FEATURE_SIZE."""
object_vector = simulator_bindings.featurize_scene(serialize(scene))
object_vector = np.array(object_vector, dtype=np.float32).reshape(
(-1, OBJECT_FEATURE_SIZE))
return phyre.simulation.FeaturizedObjects(
phyre.simulation.finalize_featurized_objects(
np.expand_dims(object_vector, axis=0))) | [
"def",
"scene_to_featurized_objects",
"(",
"scene",
")",
":",
"object_vector",
"=",
"simulator_bindings",
".",
"featurize_scene",
"(",
"serialize",
"(",
"scene",
")",
")",
"object_vector",
"=",
"np",
".",
"array",
"(",
"object_vector",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"OBJECT_FEATURE_SIZE",
")",
")",
"return",
"phyre",
".",
"simulation",
".",
"FeaturizedObjects",
"(",
"phyre",
".",
"simulation",
".",
"finalize_featurized_objects",
"(",
"np",
".",
"expand_dims",
"(",
"object_vector",
",",
"axis",
"=",
"0",
")",
")",
")"
] | [
146,
0
] | [
154,
51
] | python | en | ['en', 'en', 'en'] | True |
magic_ponies | (task,
user_input,
steps=DEFAULT_MAX_STEPS,
stride=DEFAULT_STRIDE,
keep_space_around_bodies=True,
with_times=False,
need_images=False,
need_featurized_objects=False,
perturb_step=-1,
stop_after_solved=True) | Check a solution for a task and return intermidiate images.
Args:
task: task_if.Task or bytes, in the latter case a serialzed task is
expected.
user_input: scene_if.UserInput or a triple(points, rectangulars, balls)
points: None or a list or an array of points. Should be of shape
(N, 2). In the latter case is assumed to be in
row-major format.
rectangulars: A list of lists of 4 verticies. Each
vertix should be a pair of floats. Vertices must be on positive
order and must form a convex polygon. Otherwise the input
will be deemed invalid.
balls: A list of triples (x, y, radius).
steps: Maximum number of steps to simulate for.
stride: Stride for the returned image array. Negative values will
produce not images.
keep_space_around_bodies: bool, if True extra empty space will be
enforced around scene bodies.
with_times: A boolean flag indicating whether timing info is required.
need_images: A boolean flag indicating whether images should be returned.
need_featurized_objects: A boolean flag indicating whether objects should be returned.
perturb_step: Perturb the simulation at that time step. Set to -1 to
not perturb at all.
stop_after_solved: Stop the simulation once solved. Set to False
to keep it running, for training forward models better.
Returns:
A tuple (is_solved, had_occlusions, images, objects) if with_times is False.
is_solved: bool.
had_occlusions: bool.
images a numpy arrays of shape (num_steps, height, width).
objects is a numpy array of shape (num_steps, num_objects, feature_size).
A tuple (is_solved, had_occlusions, images, scenes, simulation_time, pack_time)
if with_times is set.
simulation_time: time spent inside C++ code to unpack and simulate.
pack_time: time spent inside C++ code to pack the result.
| Check a solution for a task and return intermidiate images. | def magic_ponies(task,
user_input,
steps=DEFAULT_MAX_STEPS,
stride=DEFAULT_STRIDE,
keep_space_around_bodies=True,
with_times=False,
need_images=False,
need_featurized_objects=False,
perturb_step=-1,
stop_after_solved=True):
"""Check a solution for a task and return intermidiate images.
Args:
task: task_if.Task or bytes, in the latter case a serialzed task is
expected.
user_input: scene_if.UserInput or a triple(points, rectangulars, balls)
points: None or a list or an array of points. Should be of shape
(N, 2). In the latter case is assumed to be in
row-major format.
rectangulars: A list of lists of 4 verticies. Each
vertix should be a pair of floats. Vertices must be on positive
order and must form a convex polygon. Otherwise the input
will be deemed invalid.
balls: A list of triples (x, y, radius).
steps: Maximum number of steps to simulate for.
stride: Stride for the returned image array. Negative values will
produce not images.
keep_space_around_bodies: bool, if True extra empty space will be
enforced around scene bodies.
with_times: A boolean flag indicating whether timing info is required.
need_images: A boolean flag indicating whether images should be returned.
need_featurized_objects: A boolean flag indicating whether objects should be returned.
perturb_step: Perturb the simulation at that time step. Set to -1 to
not perturb at all.
stop_after_solved: Stop the simulation once solved. Set to False
to keep it running, for training forward models better.
Returns:
A tuple (is_solved, had_occlusions, images, objects) if with_times is False.
is_solved: bool.
had_occlusions: bool.
images a numpy arrays of shape (num_steps, height, width).
objects is a numpy array of shape (num_steps, num_objects, feature_size).
A tuple (is_solved, had_occlusions, images, scenes, simulation_time, pack_time)
if with_times is set.
simulation_time: time spent inside C++ code to unpack and simulate.
pack_time: time spent inside C++ code to pack the result.
"""
if isinstance(task, bytes):
serialized_task = task
height, width = creator.SCENE_HEIGHT, creator.SCENE_WIDTH
else:
serialized_task = serialize(task)
height, width = task.scene.height, task.scene.width
if isinstance(user_input, scene_if.UserInput):
is_solved, had_occlusions, packed_images, packed_featurized_objects, number_objects, sim_time, pack_time = (
simulator_bindings.magic_ponies_general(serialized_task,
serialize(user_input),
keep_space_around_bodies,
steps, stride, need_images,
need_featurized_objects,
perturb_step,
stop_after_solved))
else:
points, rectangulars, balls = _prepare_user_input(*user_input)
is_solved, had_occlusions, packed_images, packed_featurized_objects, number_objects, sim_time, pack_time = (
simulator_bindings.magic_ponies(serialized_task, points,
rectangulars, balls,
keep_space_around_bodies, steps,
stride, need_images,
need_featurized_objects,
perturb_step,
stop_after_solved))
packed_images = np.array(packed_images, dtype=np.uint8)
images = packed_images.reshape((-1, height, width))
packed_featurized_objects = np.array(packed_featurized_objects,
dtype=np.float32)
packed_featurized_objects = packed_featurized_objects.reshape(
(-1, number_objects, OBJECT_FEATURE_SIZE))
packed_featurized_objects = phyre.simulation.finalize_featurized_objects(
packed_featurized_objects)
if with_times:
return is_solved, had_occlusions, images, packed_featurized_objects, sim_time, pack_time
else:
return is_solved, had_occlusions, images, packed_featurized_objects | [
"def",
"magic_ponies",
"(",
"task",
",",
"user_input",
",",
"steps",
"=",
"DEFAULT_MAX_STEPS",
",",
"stride",
"=",
"DEFAULT_STRIDE",
",",
"keep_space_around_bodies",
"=",
"True",
",",
"with_times",
"=",
"False",
",",
"need_images",
"=",
"False",
",",
"need_featurized_objects",
"=",
"False",
",",
"perturb_step",
"=",
"-",
"1",
",",
"stop_after_solved",
"=",
"True",
")",
":",
"if",
"isinstance",
"(",
"task",
",",
"bytes",
")",
":",
"serialized_task",
"=",
"task",
"height",
",",
"width",
"=",
"creator",
".",
"SCENE_HEIGHT",
",",
"creator",
".",
"SCENE_WIDTH",
"else",
":",
"serialized_task",
"=",
"serialize",
"(",
"task",
")",
"height",
",",
"width",
"=",
"task",
".",
"scene",
".",
"height",
",",
"task",
".",
"scene",
".",
"width",
"if",
"isinstance",
"(",
"user_input",
",",
"scene_if",
".",
"UserInput",
")",
":",
"is_solved",
",",
"had_occlusions",
",",
"packed_images",
",",
"packed_featurized_objects",
",",
"number_objects",
",",
"sim_time",
",",
"pack_time",
"=",
"(",
"simulator_bindings",
".",
"magic_ponies_general",
"(",
"serialized_task",
",",
"serialize",
"(",
"user_input",
")",
",",
"keep_space_around_bodies",
",",
"steps",
",",
"stride",
",",
"need_images",
",",
"need_featurized_objects",
",",
"perturb_step",
",",
"stop_after_solved",
")",
")",
"else",
":",
"points",
",",
"rectangulars",
",",
"balls",
"=",
"_prepare_user_input",
"(",
"*",
"user_input",
")",
"is_solved",
",",
"had_occlusions",
",",
"packed_images",
",",
"packed_featurized_objects",
",",
"number_objects",
",",
"sim_time",
",",
"pack_time",
"=",
"(",
"simulator_bindings",
".",
"magic_ponies",
"(",
"serialized_task",
",",
"points",
",",
"rectangulars",
",",
"balls",
",",
"keep_space_around_bodies",
",",
"steps",
",",
"stride",
",",
"need_images",
",",
"need_featurized_objects",
",",
"perturb_step",
",",
"stop_after_solved",
")",
")",
"packed_images",
"=",
"np",
".",
"array",
"(",
"packed_images",
",",
"dtype",
"=",
"np",
".",
"uint8",
")",
"images",
"=",
"packed_images",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"height",
",",
"width",
")",
")",
"packed_featurized_objects",
"=",
"np",
".",
"array",
"(",
"packed_featurized_objects",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"packed_featurized_objects",
"=",
"packed_featurized_objects",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"number_objects",
",",
"OBJECT_FEATURE_SIZE",
")",
")",
"packed_featurized_objects",
"=",
"phyre",
".",
"simulation",
".",
"finalize_featurized_objects",
"(",
"packed_featurized_objects",
")",
"if",
"with_times",
":",
"return",
"is_solved",
",",
"had_occlusions",
",",
"images",
",",
"packed_featurized_objects",
",",
"sim_time",
",",
"pack_time",
"else",
":",
"return",
"is_solved",
",",
"had_occlusions",
",",
"images",
",",
"packed_featurized_objects"
] | [
187,
0
] | [
273,
75
] | python | en | ['en', 'en', 'en'] | True |
_default_key_normalizer | (key_class, request_context) |
Create a pool key out of a request context dictionary.
According to RFC 3986, both the scheme and host are case-insensitive.
Therefore, this function normalizes both before constructing the pool
key for an HTTPS request. If you wish to change this behaviour, provide
alternate callables to ``key_fn_by_scheme``.
:param key_class:
The class to use when constructing the key. This should be a namedtuple
with the ``scheme`` and ``host`` keys at a minimum.
:type key_class: namedtuple
:param request_context:
A dictionary-like object that contain the context for a request.
:type request_context: dict
:return: A namedtuple that can be used as a connection pool key.
:rtype: PoolKey
|
Create a pool key out of a request context dictionary. | def _default_key_normalizer(key_class, request_context):
"""
Create a pool key out of a request context dictionary.
According to RFC 3986, both the scheme and host are case-insensitive.
Therefore, this function normalizes both before constructing the pool
key for an HTTPS request. If you wish to change this behaviour, provide
alternate callables to ``key_fn_by_scheme``.
:param key_class:
The class to use when constructing the key. This should be a namedtuple
with the ``scheme`` and ``host`` keys at a minimum.
:type key_class: namedtuple
:param request_context:
A dictionary-like object that contain the context for a request.
:type request_context: dict
:return: A namedtuple that can be used as a connection pool key.
:rtype: PoolKey
"""
# Since we mutate the dictionary, make a copy first
context = request_context.copy()
context["scheme"] = context["scheme"].lower()
context["host"] = context["host"].lower()
# These are both dictionaries and need to be transformed into frozensets
for key in ("headers", "_proxy_headers", "_socks_options"):
if key in context and context[key] is not None:
context[key] = frozenset(context[key].items())
# The socket_options key may be a list and needs to be transformed into a
# tuple.
socket_opts = context.get("socket_options")
if socket_opts is not None:
context["socket_options"] = tuple(socket_opts)
# Map the kwargs to the names in the namedtuple - this is necessary since
# namedtuples can't have fields starting with '_'.
for key in list(context.keys()):
context["key_" + key] = context.pop(key)
# Default to ``None`` for keys missing from the context
for field in key_class._fields:
if field not in context:
context[field] = None
return key_class(**context) | [
"def",
"_default_key_normalizer",
"(",
"key_class",
",",
"request_context",
")",
":",
"# Since we mutate the dictionary, make a copy first",
"context",
"=",
"request_context",
".",
"copy",
"(",
")",
"context",
"[",
"\"scheme\"",
"]",
"=",
"context",
"[",
"\"scheme\"",
"]",
".",
"lower",
"(",
")",
"context",
"[",
"\"host\"",
"]",
"=",
"context",
"[",
"\"host\"",
"]",
".",
"lower",
"(",
")",
"# These are both dictionaries and need to be transformed into frozensets",
"for",
"key",
"in",
"(",
"\"headers\"",
",",
"\"_proxy_headers\"",
",",
"\"_socks_options\"",
")",
":",
"if",
"key",
"in",
"context",
"and",
"context",
"[",
"key",
"]",
"is",
"not",
"None",
":",
"context",
"[",
"key",
"]",
"=",
"frozenset",
"(",
"context",
"[",
"key",
"]",
".",
"items",
"(",
")",
")",
"# The socket_options key may be a list and needs to be transformed into a",
"# tuple.",
"socket_opts",
"=",
"context",
".",
"get",
"(",
"\"socket_options\"",
")",
"if",
"socket_opts",
"is",
"not",
"None",
":",
"context",
"[",
"\"socket_options\"",
"]",
"=",
"tuple",
"(",
"socket_opts",
")",
"# Map the kwargs to the names in the namedtuple - this is necessary since",
"# namedtuples can't have fields starting with '_'.",
"for",
"key",
"in",
"list",
"(",
"context",
".",
"keys",
"(",
")",
")",
":",
"context",
"[",
"\"key_\"",
"+",
"key",
"]",
"=",
"context",
".",
"pop",
"(",
"key",
")",
"# Default to ``None`` for keys missing from the context",
"for",
"field",
"in",
"key_class",
".",
"_fields",
":",
"if",
"field",
"not",
"in",
"context",
":",
"context",
"[",
"field",
"]",
"=",
"None",
"return",
"key_class",
"(",
"*",
"*",
"context",
")"
] | [
77,
0
] | [
123,
31
] | python | en | ['en', 'error', 'th'] | False |
PoolManager._new_pool | (self, scheme, host, port, request_context=None) |
Create a new :class:`urllib3.connectionpool.ConnectionPool` based on host, port, scheme, and
any additional pool keyword arguments.
If ``request_context`` is provided, it is provided as keyword arguments
to the pool class used. This method is used to actually create the
connection pools handed out by :meth:`connection_from_url` and
companion methods. It is intended to be overridden for customization.
|
Create a new :class:`urllib3.connectionpool.ConnectionPool` based on host, port, scheme, and
any additional pool keyword arguments. | def _new_pool(self, scheme, host, port, request_context=None):
"""
Create a new :class:`urllib3.connectionpool.ConnectionPool` based on host, port, scheme, and
any additional pool keyword arguments.
If ``request_context`` is provided, it is provided as keyword arguments
to the pool class used. This method is used to actually create the
connection pools handed out by :meth:`connection_from_url` and
companion methods. It is intended to be overridden for customization.
"""
pool_cls = self.pool_classes_by_scheme[scheme]
if request_context is None:
request_context = self.connection_pool_kw.copy()
# Although the context has everything necessary to create the pool,
# this function has historically only used the scheme, host, and port
# in the positional args. When an API change is acceptable these can
# be removed.
for key in ("scheme", "host", "port"):
request_context.pop(key, None)
if scheme == "http":
for kw in SSL_KEYWORDS:
request_context.pop(kw, None)
return pool_cls(host, port, **request_context) | [
"def",
"_new_pool",
"(",
"self",
",",
"scheme",
",",
"host",
",",
"port",
",",
"request_context",
"=",
"None",
")",
":",
"pool_cls",
"=",
"self",
".",
"pool_classes_by_scheme",
"[",
"scheme",
"]",
"if",
"request_context",
"is",
"None",
":",
"request_context",
"=",
"self",
".",
"connection_pool_kw",
".",
"copy",
"(",
")",
"# Although the context has everything necessary to create the pool,",
"# this function has historically only used the scheme, host, and port",
"# in the positional args. When an API change is acceptable these can",
"# be removed.",
"for",
"key",
"in",
"(",
"\"scheme\"",
",",
"\"host\"",
",",
"\"port\"",
")",
":",
"request_context",
".",
"pop",
"(",
"key",
",",
"None",
")",
"if",
"scheme",
"==",
"\"http\"",
":",
"for",
"kw",
"in",
"SSL_KEYWORDS",
":",
"request_context",
".",
"pop",
"(",
"kw",
",",
"None",
")",
"return",
"pool_cls",
"(",
"host",
",",
"port",
",",
"*",
"*",
"request_context",
")"
] | [
187,
4
] | [
212,
54
] | python | en | ['en', 'error', 'th'] | False |
PoolManager.clear | (self) |
Empty our store of pools and direct them all to close.
This will not affect in-flight connections, but they will not be
re-used after completion.
|
Empty our store of pools and direct them all to close. | def clear(self):
"""
Empty our store of pools and direct them all to close.
This will not affect in-flight connections, but they will not be
re-used after completion.
"""
self.pools.clear() | [
"def",
"clear",
"(",
"self",
")",
":",
"self",
".",
"pools",
".",
"clear",
"(",
")"
] | [
214,
4
] | [
221,
26
] | python | en | ['en', 'error', 'th'] | False |
PoolManager.connection_from_host | (self, host, port=None, scheme="http", pool_kwargs=None) |
Get a :class:`urllib3.connectionpool.ConnectionPool` based on the host, port, and scheme.
If ``port`` isn't given, it will be derived from the ``scheme`` using
``urllib3.connectionpool.port_by_scheme``. If ``pool_kwargs`` is
provided, it is merged with the instance's ``connection_pool_kw``
variable and used to create the new connection pool, if one is
needed.
|
Get a :class:`urllib3.connectionpool.ConnectionPool` based on the host, port, and scheme. | def connection_from_host(self, host, port=None, scheme="http", pool_kwargs=None):
"""
Get a :class:`urllib3.connectionpool.ConnectionPool` based on the host, port, and scheme.
If ``port`` isn't given, it will be derived from the ``scheme`` using
``urllib3.connectionpool.port_by_scheme``. If ``pool_kwargs`` is
provided, it is merged with the instance's ``connection_pool_kw``
variable and used to create the new connection pool, if one is
needed.
"""
if not host:
raise LocationValueError("No host specified.")
request_context = self._merge_pool_kwargs(pool_kwargs)
request_context["scheme"] = scheme or "http"
if not port:
port = port_by_scheme.get(request_context["scheme"].lower(), 80)
request_context["port"] = port
request_context["host"] = host
return self.connection_from_context(request_context) | [
"def",
"connection_from_host",
"(",
"self",
",",
"host",
",",
"port",
"=",
"None",
",",
"scheme",
"=",
"\"http\"",
",",
"pool_kwargs",
"=",
"None",
")",
":",
"if",
"not",
"host",
":",
"raise",
"LocationValueError",
"(",
"\"No host specified.\"",
")",
"request_context",
"=",
"self",
".",
"_merge_pool_kwargs",
"(",
"pool_kwargs",
")",
"request_context",
"[",
"\"scheme\"",
"]",
"=",
"scheme",
"or",
"\"http\"",
"if",
"not",
"port",
":",
"port",
"=",
"port_by_scheme",
".",
"get",
"(",
"request_context",
"[",
"\"scheme\"",
"]",
".",
"lower",
"(",
")",
",",
"80",
")",
"request_context",
"[",
"\"port\"",
"]",
"=",
"port",
"request_context",
"[",
"\"host\"",
"]",
"=",
"host",
"return",
"self",
".",
"connection_from_context",
"(",
"request_context",
")"
] | [
223,
4
] | [
244,
60
] | python | en | ['en', 'error', 'th'] | False |
PoolManager.connection_from_context | (self, request_context) |
Get a :class:`urllib3.connectionpool.ConnectionPool` based on the request context.
``request_context`` must at least contain the ``scheme`` key and its
value must be a key in ``key_fn_by_scheme`` instance variable.
|
Get a :class:`urllib3.connectionpool.ConnectionPool` based on the request context. | def connection_from_context(self, request_context):
"""
Get a :class:`urllib3.connectionpool.ConnectionPool` based on the request context.
``request_context`` must at least contain the ``scheme`` key and its
value must be a key in ``key_fn_by_scheme`` instance variable.
"""
scheme = request_context["scheme"].lower()
pool_key_constructor = self.key_fn_by_scheme.get(scheme)
if not pool_key_constructor:
raise URLSchemeUnknown(scheme)
pool_key = pool_key_constructor(request_context)
return self.connection_from_pool_key(pool_key, request_context=request_context) | [
"def",
"connection_from_context",
"(",
"self",
",",
"request_context",
")",
":",
"scheme",
"=",
"request_context",
"[",
"\"scheme\"",
"]",
".",
"lower",
"(",
")",
"pool_key_constructor",
"=",
"self",
".",
"key_fn_by_scheme",
".",
"get",
"(",
"scheme",
")",
"if",
"not",
"pool_key_constructor",
":",
"raise",
"URLSchemeUnknown",
"(",
"scheme",
")",
"pool_key",
"=",
"pool_key_constructor",
"(",
"request_context",
")",
"return",
"self",
".",
"connection_from_pool_key",
"(",
"pool_key",
",",
"request_context",
"=",
"request_context",
")"
] | [
246,
4
] | [
259,
87
] | python | en | ['en', 'error', 'th'] | False |
PoolManager.connection_from_pool_key | (self, pool_key, request_context=None) |
Get a :class:`urllib3.connectionpool.ConnectionPool` based on the provided pool key.
``pool_key`` should be a namedtuple that only contains immutable
objects. At a minimum it must have the ``scheme``, ``host``, and
``port`` fields.
|
Get a :class:`urllib3.connectionpool.ConnectionPool` based on the provided pool key. | def connection_from_pool_key(self, pool_key, request_context=None):
"""
Get a :class:`urllib3.connectionpool.ConnectionPool` based on the provided pool key.
``pool_key`` should be a namedtuple that only contains immutable
objects. At a minimum it must have the ``scheme``, ``host``, and
``port`` fields.
"""
with self.pools.lock:
# If the scheme, host, or port doesn't match existing open
# connections, open a new ConnectionPool.
pool = self.pools.get(pool_key)
if pool:
return pool
# Make a fresh ConnectionPool of the desired type
scheme = request_context["scheme"]
host = request_context["host"]
port = request_context["port"]
pool = self._new_pool(scheme, host, port, request_context=request_context)
self.pools[pool_key] = pool
return pool | [
"def",
"connection_from_pool_key",
"(",
"self",
",",
"pool_key",
",",
"request_context",
"=",
"None",
")",
":",
"with",
"self",
".",
"pools",
".",
"lock",
":",
"# If the scheme, host, or port doesn't match existing open",
"# connections, open a new ConnectionPool.",
"pool",
"=",
"self",
".",
"pools",
".",
"get",
"(",
"pool_key",
")",
"if",
"pool",
":",
"return",
"pool",
"# Make a fresh ConnectionPool of the desired type",
"scheme",
"=",
"request_context",
"[",
"\"scheme\"",
"]",
"host",
"=",
"request_context",
"[",
"\"host\"",
"]",
"port",
"=",
"request_context",
"[",
"\"port\"",
"]",
"pool",
"=",
"self",
".",
"_new_pool",
"(",
"scheme",
",",
"host",
",",
"port",
",",
"request_context",
"=",
"request_context",
")",
"self",
".",
"pools",
"[",
"pool_key",
"]",
"=",
"pool",
"return",
"pool"
] | [
261,
4
] | [
283,
19
] | python | en | ['en', 'error', 'th'] | False |
PoolManager.connection_from_url | (self, url, pool_kwargs=None) |
Similar to :func:`urllib3.connectionpool.connection_from_url`.
If ``pool_kwargs`` is not provided and a new pool needs to be
constructed, ``self.connection_pool_kw`` is used to initialize
the :class:`urllib3.connectionpool.ConnectionPool`. If ``pool_kwargs``
is provided, it is used instead. Note that if a new pool does not
need to be created for the request, the provided ``pool_kwargs`` are
not used.
|
Similar to :func:`urllib3.connectionpool.connection_from_url`. | def connection_from_url(self, url, pool_kwargs=None):
"""
Similar to :func:`urllib3.connectionpool.connection_from_url`.
If ``pool_kwargs`` is not provided and a new pool needs to be
constructed, ``self.connection_pool_kw`` is used to initialize
the :class:`urllib3.connectionpool.ConnectionPool`. If ``pool_kwargs``
is provided, it is used instead. Note that if a new pool does not
need to be created for the request, the provided ``pool_kwargs`` are
not used.
"""
u = parse_url(url)
return self.connection_from_host(
u.host, port=u.port, scheme=u.scheme, pool_kwargs=pool_kwargs
) | [
"def",
"connection_from_url",
"(",
"self",
",",
"url",
",",
"pool_kwargs",
"=",
"None",
")",
":",
"u",
"=",
"parse_url",
"(",
"url",
")",
"return",
"self",
".",
"connection_from_host",
"(",
"u",
".",
"host",
",",
"port",
"=",
"u",
".",
"port",
",",
"scheme",
"=",
"u",
".",
"scheme",
",",
"pool_kwargs",
"=",
"pool_kwargs",
")"
] | [
285,
4
] | [
299,
9
] | python | en | ['en', 'error', 'th'] | False |
PoolManager._merge_pool_kwargs | (self, override) |
Merge a dictionary of override values for self.connection_pool_kw.
This does not modify self.connection_pool_kw and returns a new dict.
Any keys in the override dictionary with a value of ``None`` are
removed from the merged dictionary.
|
Merge a dictionary of override values for self.connection_pool_kw. | def _merge_pool_kwargs(self, override):
"""
Merge a dictionary of override values for self.connection_pool_kw.
This does not modify self.connection_pool_kw and returns a new dict.
Any keys in the override dictionary with a value of ``None`` are
removed from the merged dictionary.
"""
base_pool_kwargs = self.connection_pool_kw.copy()
if override:
for key, value in override.items():
if value is None:
try:
del base_pool_kwargs[key]
except KeyError:
pass
else:
base_pool_kwargs[key] = value
return base_pool_kwargs | [
"def",
"_merge_pool_kwargs",
"(",
"self",
",",
"override",
")",
":",
"base_pool_kwargs",
"=",
"self",
".",
"connection_pool_kw",
".",
"copy",
"(",
")",
"if",
"override",
":",
"for",
"key",
",",
"value",
"in",
"override",
".",
"items",
"(",
")",
":",
"if",
"value",
"is",
"None",
":",
"try",
":",
"del",
"base_pool_kwargs",
"[",
"key",
"]",
"except",
"KeyError",
":",
"pass",
"else",
":",
"base_pool_kwargs",
"[",
"key",
"]",
"=",
"value",
"return",
"base_pool_kwargs"
] | [
301,
4
] | [
319,
31
] | python | en | ['en', 'error', 'th'] | False |
PoolManager._proxy_requires_url_absolute_form | (self, parsed_url) |
Indicates if the proxy requires the complete destination URL in the
request. Normally this is only needed when not using an HTTP CONNECT
tunnel.
|
Indicates if the proxy requires the complete destination URL in the
request. Normally this is only needed when not using an HTTP CONNECT
tunnel.
| def _proxy_requires_url_absolute_form(self, parsed_url):
"""
Indicates if the proxy requires the complete destination URL in the
request. Normally this is only needed when not using an HTTP CONNECT
tunnel.
"""
if self.proxy is None:
return False
return not connection_requires_http_tunnel(
self.proxy, self.proxy_config, parsed_url.scheme
) | [
"def",
"_proxy_requires_url_absolute_form",
"(",
"self",
",",
"parsed_url",
")",
":",
"if",
"self",
".",
"proxy",
"is",
"None",
":",
"return",
"False",
"return",
"not",
"connection_requires_http_tunnel",
"(",
"self",
".",
"proxy",
",",
"self",
".",
"proxy_config",
",",
"parsed_url",
".",
"scheme",
")"
] | [
321,
4
] | [
332,
9
] | python | en | ['en', 'error', 'th'] | False |
PoolManager._validate_proxy_scheme_url_selection | (self, url_scheme) |
Validates that were not attempting to do TLS in TLS connections on
Python2 or with unsupported SSL implementations.
|
Validates that were not attempting to do TLS in TLS connections on
Python2 or with unsupported SSL implementations.
| def _validate_proxy_scheme_url_selection(self, url_scheme):
"""
Validates that were not attempting to do TLS in TLS connections on
Python2 or with unsupported SSL implementations.
"""
if self.proxy is None or url_scheme != "https":
return
if self.proxy.scheme != "https":
return
if six.PY2 and not self.proxy_config.use_forwarding_for_https:
raise ProxySchemeUnsupported(
"Contacting HTTPS destinations through HTTPS proxies "
"'via CONNECT tunnels' is not supported in Python 2"
) | [
"def",
"_validate_proxy_scheme_url_selection",
"(",
"self",
",",
"url_scheme",
")",
":",
"if",
"self",
".",
"proxy",
"is",
"None",
"or",
"url_scheme",
"!=",
"\"https\"",
":",
"return",
"if",
"self",
".",
"proxy",
".",
"scheme",
"!=",
"\"https\"",
":",
"return",
"if",
"six",
".",
"PY2",
"and",
"not",
"self",
".",
"proxy_config",
".",
"use_forwarding_for_https",
":",
"raise",
"ProxySchemeUnsupported",
"(",
"\"Contacting HTTPS destinations through HTTPS proxies \"",
"\"'via CONNECT tunnels' is not supported in Python 2\"",
")"
] | [
334,
4
] | [
349,
13
] | python | en | ['en', 'error', 'th'] | False |
PoolManager.urlopen | (self, method, url, redirect=True, **kw) |
Same as :meth:`urllib3.HTTPConnectionPool.urlopen`
with custom cross-host redirect logic and only sends the request-uri
portion of the ``url``.
The given ``url`` parameter must be absolute, such that an appropriate
:class:`urllib3.connectionpool.ConnectionPool` can be chosen for it.
|
Same as :meth:`urllib3.HTTPConnectionPool.urlopen`
with custom cross-host redirect logic and only sends the request-uri
portion of the ``url``. | def urlopen(self, method, url, redirect=True, **kw):
"""
Same as :meth:`urllib3.HTTPConnectionPool.urlopen`
with custom cross-host redirect logic and only sends the request-uri
portion of the ``url``.
The given ``url`` parameter must be absolute, such that an appropriate
:class:`urllib3.connectionpool.ConnectionPool` can be chosen for it.
"""
u = parse_url(url)
self._validate_proxy_scheme_url_selection(u.scheme)
conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme)
kw["assert_same_host"] = False
kw["redirect"] = False
if "headers" not in kw:
kw["headers"] = self.headers.copy()
if self._proxy_requires_url_absolute_form(u):
response = conn.urlopen(method, url, **kw)
else:
response = conn.urlopen(method, u.request_uri, **kw)
redirect_location = redirect and response.get_redirect_location()
if not redirect_location:
return response
# Support relative URLs for redirecting.
redirect_location = urljoin(url, redirect_location)
# RFC 7231, Section 6.4.4
if response.status == 303:
method = "GET"
retries = kw.get("retries")
if not isinstance(retries, Retry):
retries = Retry.from_int(retries, redirect=redirect)
# Strip headers marked as unsafe to forward to the redirected location.
# Check remove_headers_on_redirect to avoid a potential network call within
# conn.is_same_host() which may use socket.gethostbyname() in the future.
if retries.remove_headers_on_redirect and not conn.is_same_host(
redirect_location
):
headers = list(six.iterkeys(kw["headers"]))
for header in headers:
if header.lower() in retries.remove_headers_on_redirect:
kw["headers"].pop(header, None)
try:
retries = retries.increment(method, url, response=response, _pool=conn)
except MaxRetryError:
if retries.raise_on_redirect:
response.drain_conn()
raise
return response
kw["retries"] = retries
kw["redirect"] = redirect
log.info("Redirecting %s -> %s", url, redirect_location)
response.drain_conn()
return self.urlopen(method, redirect_location, **kw) | [
"def",
"urlopen",
"(",
"self",
",",
"method",
",",
"url",
",",
"redirect",
"=",
"True",
",",
"*",
"*",
"kw",
")",
":",
"u",
"=",
"parse_url",
"(",
"url",
")",
"self",
".",
"_validate_proxy_scheme_url_selection",
"(",
"u",
".",
"scheme",
")",
"conn",
"=",
"self",
".",
"connection_from_host",
"(",
"u",
".",
"host",
",",
"port",
"=",
"u",
".",
"port",
",",
"scheme",
"=",
"u",
".",
"scheme",
")",
"kw",
"[",
"\"assert_same_host\"",
"]",
"=",
"False",
"kw",
"[",
"\"redirect\"",
"]",
"=",
"False",
"if",
"\"headers\"",
"not",
"in",
"kw",
":",
"kw",
"[",
"\"headers\"",
"]",
"=",
"self",
".",
"headers",
".",
"copy",
"(",
")",
"if",
"self",
".",
"_proxy_requires_url_absolute_form",
"(",
"u",
")",
":",
"response",
"=",
"conn",
".",
"urlopen",
"(",
"method",
",",
"url",
",",
"*",
"*",
"kw",
")",
"else",
":",
"response",
"=",
"conn",
".",
"urlopen",
"(",
"method",
",",
"u",
".",
"request_uri",
",",
"*",
"*",
"kw",
")",
"redirect_location",
"=",
"redirect",
"and",
"response",
".",
"get_redirect_location",
"(",
")",
"if",
"not",
"redirect_location",
":",
"return",
"response",
"# Support relative URLs for redirecting.",
"redirect_location",
"=",
"urljoin",
"(",
"url",
",",
"redirect_location",
")",
"# RFC 7231, Section 6.4.4",
"if",
"response",
".",
"status",
"==",
"303",
":",
"method",
"=",
"\"GET\"",
"retries",
"=",
"kw",
".",
"get",
"(",
"\"retries\"",
")",
"if",
"not",
"isinstance",
"(",
"retries",
",",
"Retry",
")",
":",
"retries",
"=",
"Retry",
".",
"from_int",
"(",
"retries",
",",
"redirect",
"=",
"redirect",
")",
"# Strip headers marked as unsafe to forward to the redirected location.",
"# Check remove_headers_on_redirect to avoid a potential network call within",
"# conn.is_same_host() which may use socket.gethostbyname() in the future.",
"if",
"retries",
".",
"remove_headers_on_redirect",
"and",
"not",
"conn",
".",
"is_same_host",
"(",
"redirect_location",
")",
":",
"headers",
"=",
"list",
"(",
"six",
".",
"iterkeys",
"(",
"kw",
"[",
"\"headers\"",
"]",
")",
")",
"for",
"header",
"in",
"headers",
":",
"if",
"header",
".",
"lower",
"(",
")",
"in",
"retries",
".",
"remove_headers_on_redirect",
":",
"kw",
"[",
"\"headers\"",
"]",
".",
"pop",
"(",
"header",
",",
"None",
")",
"try",
":",
"retries",
"=",
"retries",
".",
"increment",
"(",
"method",
",",
"url",
",",
"response",
"=",
"response",
",",
"_pool",
"=",
"conn",
")",
"except",
"MaxRetryError",
":",
"if",
"retries",
".",
"raise_on_redirect",
":",
"response",
".",
"drain_conn",
"(",
")",
"raise",
"return",
"response",
"kw",
"[",
"\"retries\"",
"]",
"=",
"retries",
"kw",
"[",
"\"redirect\"",
"]",
"=",
"redirect",
"log",
".",
"info",
"(",
"\"Redirecting %s -> %s\"",
",",
"url",
",",
"redirect_location",
")",
"response",
".",
"drain_conn",
"(",
")",
"return",
"self",
".",
"urlopen",
"(",
"method",
",",
"redirect_location",
",",
"*",
"*",
"kw",
")"
] | [
351,
4
] | [
416,
60
] | python | en | ['en', 'error', 'th'] | False |
ProxyManager._set_proxy_headers | (self, url, headers=None) |
Sets headers needed by proxies: specifically, the Accept and Host
headers. Only sets headers not provided by the user.
|
Sets headers needed by proxies: specifically, the Accept and Host
headers. Only sets headers not provided by the user.
| def _set_proxy_headers(self, url, headers=None):
"""
Sets headers needed by proxies: specifically, the Accept and Host
headers. Only sets headers not provided by the user.
"""
headers_ = {"Accept": "*/*"}
netloc = parse_url(url).netloc
if netloc:
headers_["Host"] = netloc
if headers:
headers_.update(headers)
return headers_ | [
"def",
"_set_proxy_headers",
"(",
"self",
",",
"url",
",",
"headers",
"=",
"None",
")",
":",
"headers_",
"=",
"{",
"\"Accept\"",
":",
"\"*/*\"",
"}",
"netloc",
"=",
"parse_url",
"(",
"url",
")",
".",
"netloc",
"if",
"netloc",
":",
"headers_",
"[",
"\"Host\"",
"]",
"=",
"netloc",
"if",
"headers",
":",
"headers_",
".",
"update",
"(",
"headers",
")",
"return",
"headers_"
] | [
506,
4
] | [
519,
23
] | python | en | ['en', 'error', 'th'] | False |
ProxyManager.urlopen | (self, method, url, redirect=True, **kw) | Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute. | Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute. | def urlopen(self, method, url, redirect=True, **kw):
"Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute."
u = parse_url(url)
if not connection_requires_http_tunnel(self.proxy, self.proxy_config, u.scheme):
# For connections using HTTP CONNECT, httplib sets the necessary
# headers on the CONNECT to the proxy. If we're not using CONNECT,
# we'll definitely need to set 'Host' at the very least.
headers = kw.get("headers", self.headers)
kw["headers"] = self._set_proxy_headers(url, headers)
return super(ProxyManager, self).urlopen(method, url, redirect=redirect, **kw) | [
"def",
"urlopen",
"(",
"self",
",",
"method",
",",
"url",
",",
"redirect",
"=",
"True",
",",
"*",
"*",
"kw",
")",
":",
"u",
"=",
"parse_url",
"(",
"url",
")",
"if",
"not",
"connection_requires_http_tunnel",
"(",
"self",
".",
"proxy",
",",
"self",
".",
"proxy_config",
",",
"u",
".",
"scheme",
")",
":",
"# For connections using HTTP CONNECT, httplib sets the necessary",
"# headers on the CONNECT to the proxy. If we're not using CONNECT,",
"# we'll definitely need to set 'Host' at the very least.",
"headers",
"=",
"kw",
".",
"get",
"(",
"\"headers\"",
",",
"self",
".",
"headers",
")",
"kw",
"[",
"\"headers\"",
"]",
"=",
"self",
".",
"_set_proxy_headers",
"(",
"url",
",",
"headers",
")",
"return",
"super",
"(",
"ProxyManager",
",",
"self",
")",
".",
"urlopen",
"(",
"method",
",",
"url",
",",
"redirect",
"=",
"redirect",
",",
"*",
"*",
"kw",
")"
] | [
521,
4
] | [
531,
86
] | python | en | ['en', 'en', 'nl'] | True |
BaseUserManager.normalize_email | (cls, email) |
Normalize the email address by lowercasing the domain part of it.
|
Normalize the email address by lowercasing the domain part of it.
| def normalize_email(cls, email):
"""
Normalize the email address by lowercasing the domain part of it.
"""
email = email or ''
try:
email_name, domain_part = email.strip().rsplit('@', 1)
except ValueError:
pass
else:
email = email_name + '@' + domain_part.lower()
return email | [
"def",
"normalize_email",
"(",
"cls",
",",
"email",
")",
":",
"email",
"=",
"email",
"or",
"''",
"try",
":",
"email_name",
",",
"domain_part",
"=",
"email",
".",
"strip",
"(",
")",
".",
"rsplit",
"(",
"'@'",
",",
"1",
")",
"except",
"ValueError",
":",
"pass",
"else",
":",
"email",
"=",
"email_name",
"+",
"'@'",
"+",
"domain_part",
".",
"lower",
"(",
")",
"return",
"email"
] | [
19,
4
] | [
30,
20
] | python | en | ['en', 'error', 'th'] | False |
BaseUserManager.make_random_password | (self, length=10,
allowed_chars='abcdefghjkmnpqrstuvwxyz'
'ABCDEFGHJKLMNPQRSTUVWXYZ'
'23456789') |
Generate a random password with the given length and given
allowed_chars. The default value of allowed_chars does not have "I" or
"O" or letters and digits that look similar -- just to avoid confusion.
|
Generate a random password with the given length and given
allowed_chars. The default value of allowed_chars does not have "I" or
"O" or letters and digits that look similar -- just to avoid confusion.
| def make_random_password(self, length=10,
allowed_chars='abcdefghjkmnpqrstuvwxyz'
'ABCDEFGHJKLMNPQRSTUVWXYZ'
'23456789'):
"""
Generate a random password with the given length and given
allowed_chars. The default value of allowed_chars does not have "I" or
"O" or letters and digits that look similar -- just to avoid confusion.
"""
return get_random_string(length, allowed_chars) | [
"def",
"make_random_password",
"(",
"self",
",",
"length",
"=",
"10",
",",
"allowed_chars",
"=",
"'abcdefghjkmnpqrstuvwxyz'",
"'ABCDEFGHJKLMNPQRSTUVWXYZ'",
"'23456789'",
")",
":",
"return",
"get_random_string",
"(",
"length",
",",
"allowed_chars",
")"
] | [
32,
4
] | [
41,
55
] | python | en | ['en', 'error', 'th'] | False |
AbstractBaseUser.get_username | (self) | Return the username for this User. | Return the username for this User. | def get_username(self):
"""Return the username for this User."""
return getattr(self, self.USERNAME_FIELD) | [
"def",
"get_username",
"(",
"self",
")",
":",
"return",
"getattr",
"(",
"self",
",",
"self",
".",
"USERNAME_FIELD",
")"
] | [
71,
4
] | [
73,
49
] | python | en | ['en', 'en', 'en'] | True |
AbstractBaseUser.is_anonymous | (self) |
Always return False. This is a way of comparing User objects to
anonymous users.
|
Always return False. This is a way of comparing User objects to
anonymous users.
| def is_anonymous(self):
"""
Always return False. This is a way of comparing User objects to
anonymous users.
"""
return False | [
"def",
"is_anonymous",
"(",
"self",
")",
":",
"return",
"False"
] | [
82,
4
] | [
87,
20
] | python | en | ['en', 'error', 'th'] | False |
AbstractBaseUser.is_authenticated | (self) |
Always return True. This is a way to tell if the user has been
authenticated in templates.
|
Always return True. This is a way to tell if the user has been
authenticated in templates.
| def is_authenticated(self):
"""
Always return True. This is a way to tell if the user has been
authenticated in templates.
"""
return True | [
"def",
"is_authenticated",
"(",
"self",
")",
":",
"return",
"True"
] | [
90,
4
] | [
95,
19
] | python | en | ['en', 'error', 'th'] | False |
AbstractBaseUser.check_password | (self, raw_password) |
Return a boolean of whether the raw_password was correct. Handles
hashing formats behind the scenes.
|
Return a boolean of whether the raw_password was correct. Handles
hashing formats behind the scenes.
| def check_password(self, raw_password):
"""
Return a boolean of whether the raw_password was correct. Handles
hashing formats behind the scenes.
"""
def setter(raw_password):
self.set_password(raw_password)
# Password hash upgrades shouldn't be considered password changes.
self._password = None
self.save(update_fields=["password"])
return check_password(raw_password, self.password, setter) | [
"def",
"check_password",
"(",
"self",
",",
"raw_password",
")",
":",
"def",
"setter",
"(",
"raw_password",
")",
":",
"self",
".",
"set_password",
"(",
"raw_password",
")",
"# Password hash upgrades shouldn't be considered password changes.",
"self",
".",
"_password",
"=",
"None",
"self",
".",
"save",
"(",
"update_fields",
"=",
"[",
"\"password\"",
"]",
")",
"return",
"check_password",
"(",
"raw_password",
",",
"self",
".",
"password",
",",
"setter",
")"
] | [
101,
4
] | [
111,
66
] | python | en | ['en', 'error', 'th'] | False |
AbstractBaseUser.has_usable_password | (self) |
Return False if set_unusable_password() has been called for this user.
|
Return False if set_unusable_password() has been called for this user.
| def has_usable_password(self):
"""
Return False if set_unusable_password() has been called for this user.
"""
return is_password_usable(self.password) | [
"def",
"has_usable_password",
"(",
"self",
")",
":",
"return",
"is_password_usable",
"(",
"self",
".",
"password",
")"
] | [
117,
4
] | [
121,
48
] | python | en | ['en', 'error', 'th'] | False |
AbstractBaseUser.get_session_auth_hash | (self) |
Return an HMAC of the password field.
|
Return an HMAC of the password field.
| def get_session_auth_hash(self):
"""
Return an HMAC of the password field.
"""
key_salt = "django.contrib.auth.models.AbstractBaseUser.get_session_auth_hash"
return salted_hmac(
key_salt,
self.password,
# RemovedInDjango40Warning: when the deprecation ends, replace
# with:
# algorithm='sha256',
algorithm=settings.DEFAULT_HASHING_ALGORITHM,
).hexdigest() | [
"def",
"get_session_auth_hash",
"(",
"self",
")",
":",
"key_salt",
"=",
"\"django.contrib.auth.models.AbstractBaseUser.get_session_auth_hash\"",
"return",
"salted_hmac",
"(",
"key_salt",
",",
"self",
".",
"password",
",",
"# RemovedInDjango40Warning: when the deprecation ends, replace",
"# with:",
"# algorithm='sha256',",
"algorithm",
"=",
"settings",
".",
"DEFAULT_HASHING_ALGORITHM",
",",
")",
".",
"hexdigest",
"(",
")"
] | [
128,
4
] | [
140,
21
] | python | en | ['en', 'error', 'th'] | False |
gen_ccl_spectra | (cosmo, n_of_zs, l_max=1001) |
Generates the theoretical weak lensing power spectra for a given cosmology.
:param cosmo: 1D array of cosmological parameters ordered as (Om, Ob, h, ns, sigma8, w0)
:param n_of_zs: 3D array of redshift distributions. The first axis enumerates the different distributions, the
second the redshift values and the last the actual number counts
:param l_max: maximum l value to calculate
:return: The theoretical power spectra of the survey and all cross spectra, the ordering is
(11, 12, ..., 1n, 22, .., 2n, ..., nn)
|
Generates the theoretical weak lensing power spectra for a given cosmology.
:param cosmo: 1D array of cosmological parameters ordered as (Om, Ob, h, ns, sigma8, w0)
:param n_of_zs: 3D array of redshift distributions. The first axis enumerates the different distributions, the
second the redshift values and the last the actual number counts
:param l_max: maximum l value to calculate
:return: The theoretical power spectra of the survey and all cross spectra, the ordering is
(11, 12, ..., 1n, 22, .., 2n, ..., nn)
| def gen_ccl_spectra(cosmo, n_of_zs, l_max=1001):
"""
Generates the theoretical weak lensing power spectra for a given cosmology.
:param cosmo: 1D array of cosmological parameters ordered as (Om, Ob, h, ns, sigma8, w0)
:param n_of_zs: 3D array of redshift distributions. The first axis enumerates the different distributions, the
second the redshift values and the last the actual number counts
:param l_max: maximum l value to calculate
:return: The theoretical power spectra of the survey and all cross spectra, the ordering is
(11, 12, ..., 1n, 22, .., 2n, ..., nn)
"""
# cosmo needs to be double
cosmo = cosmo.astype(np.float64)
# get the ccl
cosmo = ccl.Cosmology(Omega_c=cosmo[0] - cosmo[1],
Omega_b=cosmo[1],
h=cosmo[2],
n_s=cosmo[3],
sigma8=cosmo[4],
w0=cosmo[5])
# Create objects to represent tracers of the weak lensing signal with this
# number density (with has_intrinsic_alignment=False)
tracer = []
for i in range(4):
tracer.append(ccl.WeakLensingTracer(cosmo, dndz=(n_of_zs[i][:, 0], n_of_zs[i][:, 1])))
# Calculate the angular cross-spectrum of the two tracers as a function of ell
print("Calculating spectra...", flush=True)
ell = np.arange(2, l_max)
all_cl = []
for i in range(4):
for j in range(4):
if j >= i:
cl = ccl.angular_cl(cosmo, tracer[i], tracer[j], ell)
# append zeros
cl = np.concatenate([np.zeros(2), cl])
# append no binning
all_cl.append(cl)
# stack
cl = np.stack(all_cl, axis=0)
return cl | [
"def",
"gen_ccl_spectra",
"(",
"cosmo",
",",
"n_of_zs",
",",
"l_max",
"=",
"1001",
")",
":",
"# cosmo needs to be double",
"cosmo",
"=",
"cosmo",
".",
"astype",
"(",
"np",
".",
"float64",
")",
"# get the ccl",
"cosmo",
"=",
"ccl",
".",
"Cosmology",
"(",
"Omega_c",
"=",
"cosmo",
"[",
"0",
"]",
"-",
"cosmo",
"[",
"1",
"]",
",",
"Omega_b",
"=",
"cosmo",
"[",
"1",
"]",
",",
"h",
"=",
"cosmo",
"[",
"2",
"]",
",",
"n_s",
"=",
"cosmo",
"[",
"3",
"]",
",",
"sigma8",
"=",
"cosmo",
"[",
"4",
"]",
",",
"w0",
"=",
"cosmo",
"[",
"5",
"]",
")",
"# Create objects to represent tracers of the weak lensing signal with this",
"# number density (with has_intrinsic_alignment=False)",
"tracer",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"4",
")",
":",
"tracer",
".",
"append",
"(",
"ccl",
".",
"WeakLensingTracer",
"(",
"cosmo",
",",
"dndz",
"=",
"(",
"n_of_zs",
"[",
"i",
"]",
"[",
":",
",",
"0",
"]",
",",
"n_of_zs",
"[",
"i",
"]",
"[",
":",
",",
"1",
"]",
")",
")",
")",
"# Calculate the angular cross-spectrum of the two tracers as a function of ell",
"print",
"(",
"\"Calculating spectra...\"",
",",
"flush",
"=",
"True",
")",
"ell",
"=",
"np",
".",
"arange",
"(",
"2",
",",
"l_max",
")",
"all_cl",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"4",
")",
":",
"for",
"j",
"in",
"range",
"(",
"4",
")",
":",
"if",
"j",
">=",
"i",
":",
"cl",
"=",
"ccl",
".",
"angular_cl",
"(",
"cosmo",
",",
"tracer",
"[",
"i",
"]",
",",
"tracer",
"[",
"j",
"]",
",",
"ell",
")",
"# append zeros",
"cl",
"=",
"np",
".",
"concatenate",
"(",
"[",
"np",
".",
"zeros",
"(",
"2",
")",
",",
"cl",
"]",
")",
"# append no binning",
"all_cl",
".",
"append",
"(",
"cl",
")",
"# stack",
"cl",
"=",
"np",
".",
"stack",
"(",
"all_cl",
",",
"axis",
"=",
"0",
")",
"return",
"cl"
] | [
6,
0
] | [
49,
13
] | python | en | ['en', 'error', 'th'] | False |
create_GRF_samples | (spectra, data_mask, data_mask_pad, seed, pixwin=False, fwhm=0.0, lmax=1000,
return_fidu_spec=False, verbose=0) |
Creates a sample of GRF maps of given spectra that can be saved for later training.
:param spectra: A list of power spectra used to generate the GRF samples, if the shape of the spectra is [n_bins, N]
a tomographic sample is generated and the returned samples have the shape [N_pix, n_bins]
:param data_mask: a boolian array (or int) representing the observational mask on the sphere
:param data_mask_pad: a boolian mask of the observation includeing padding
:param seed: the random seed for ALL samples
:param pixwin: Convolve alm with pixel window function (default: False)
:param fwhm: Full width half maximum of the Gaussian smoothing applied to each sample, defaults to no smoothing
:param lmax: The maximum l used to generate the maps (default: 1000)
:param return_fidu_spec: return the numerically measured Cl of the map generated with spectra[0]
:param verbose: verbosity parameter forwarded to synfast, defaults to no output
:return: A list of maps the same length as spectra each containing sum(data_mask_pad) entries, if return_fidu_spec is
True, the power and cross spectra of the first entry in spectra will be returned.
|
Creates a sample of GRF maps of given spectra that can be saved for later training.
:param spectra: A list of power spectra used to generate the GRF samples, if the shape of the spectra is [n_bins, N]
a tomographic sample is generated and the returned samples have the shape [N_pix, n_bins]
:param data_mask: a boolian array (or int) representing the observational mask on the sphere
:param data_mask_pad: a boolian mask of the observation includeing padding
:param seed: the random seed for ALL samples
:param pixwin: Convolve alm with pixel window function (default: False)
:param fwhm: Full width half maximum of the Gaussian smoothing applied to each sample, defaults to no smoothing
:param lmax: The maximum l used to generate the maps (default: 1000)
:param return_fidu_spec: return the numerically measured Cl of the map generated with spectra[0]
:param verbose: verbosity parameter forwarded to synfast, defaults to no output
:return: A list of maps the same length as spectra each containing sum(data_mask_pad) entries, if return_fidu_spec is
True, the power and cross spectra of the first entry in spectra will be returned.
| def create_GRF_samples(spectra, data_mask, data_mask_pad, seed, pixwin=False, fwhm=0.0, lmax=1000,
return_fidu_spec=False, verbose=0):
"""
Creates a sample of GRF maps of given spectra that can be saved for later training.
:param spectra: A list of power spectra used to generate the GRF samples, if the shape of the spectra is [n_bins, N]
a tomographic sample is generated and the returned samples have the shape [N_pix, n_bins]
:param data_mask: a boolian array (or int) representing the observational mask on the sphere
:param data_mask_pad: a boolian mask of the observation includeing padding
:param seed: the random seed for ALL samples
:param pixwin: Convolve alm with pixel window function (default: False)
:param fwhm: Full width half maximum of the Gaussian smoothing applied to each sample, defaults to no smoothing
:param lmax: The maximum l used to generate the maps (default: 1000)
:param return_fidu_spec: return the numerically measured Cl of the map generated with spectra[0]
:param verbose: verbosity parameter forwarded to synfast, defaults to no output
:return: A list of maps the same length as spectra each containing sum(data_mask_pad) entries, if return_fidu_spec is
True, the power and cross spectra of the first entry in spectra will be returned.
"""
# get the nside
nside = hp.npix2nside(len(data_mask))
# invert the data mask
inv_mask = np.logical_not(data_mask)
ext_indices = np.arange(len(inv_mask))[data_mask_pad]
# cycle
maps_out = []
for num, spectrum in enumerate(spectra):
# set seed
np.random.seed(seed)
# non tomographic case
if spectrum.ndim == 1:
# get map
m = hp.synfast(cls=spectrum, fwhm=fwhm, nside=nside, pixwin=pixwin, lmax=lmax, verbose=verbose)
# reorder
m = hp.reorder(map_in=m, r2n=True)
# set sourrounding to zero
m[inv_mask] = 0.0
# get the measurement if necessary
if return_fidu_spec and num == 0:
m_ring = hp.reorder(map_in=m, n2r=True)
cl_fidu = hp.anafast(m_ring)
# append only wanted values
maps_out.append(m[ext_indices])
# tomographic case
else:
# get the number of bins
n_bins = (-1 + int(np.sqrt(1 + 8 * len(spectrum)))) // 2
if n_bins * (n_bins + 1) // 2 != len(spectrum):
raise ValueError("The number of spectra does not seem to be valid!")
# generate the maps following Raphael's paper
T_ij_s = []
for i in range(n_bins):
for j in range(n_bins):
if i == j:
index = utils.ij_to_list_index(i, j, n_bins)
T_ij = spectrum[index].copy()
for k in range(j):
index = utils.ij_to_list_index(k, i, n_bins)
T_ij -= T_ij_s[index] ** 2
T_ij_s.append(np.sqrt(T_ij))
elif j > i:
index = utils.ij_to_list_index(i, j, n_bins)
T_ij = spectrum[index].copy()
for k in range(i):
index_1 = utils.ij_to_list_index(k, j, n_bins)
index_2 = utils.ij_to_list_index(k, i, n_bins)
T_ij -= T_ij_s[index_1] * T_ij_s[index_2]
index = utils.ij_to_list_index(i, i, n_bins)
# set division through 0 to 0
T_ij = np.divide(T_ij, T_ij_s[index], out=np.zeros_like(T_ij),
where=T_ij_s[index] != 0)
T_ij_s.append(T_ij)
# now we generate the maps with the right states
T_ij_maps = []
counter = 0
for i in range(n_bins):
current_state = np.random.get_state()
for j in range(n_bins):
if j >= i:
np.random.set_state(current_state)
m = hp.synfast(cls=T_ij_s[counter] ** 2, fwhm=fwhm, nside=nside,
pixwin=pixwin, lmax=lmax, verbose=verbose)
T_ij_maps.append(m)
counter += 1
# list for maps
maps = []
# and now the output maps
for i in range(n_bins):
m = np.zeros(hp.nside2npix(nside=nside))
for j in range(n_bins):
if i >= j:
index = utils.ij_to_list_index(j, i, n_bins)
m += T_ij_maps[index]
# reorder
m = hp.reorder(map_in=m, r2n=True)
# set sourrounding to zero
m[inv_mask] = 0.0
# append only wanted values
maps.append(m[ext_indices])
# calculate all spectra and cross spectra
if num == 0 and return_fidu_spec:
# get the alms
alms = []
for i in range(n_bins):
# make the map
m = np.zeros(hp.nside2npix(nside=nside))
m[ext_indices] = maps[i]
# reorder and alm
m = hp.reorder(m, n2r=True)
alms.append(hp.map2alm(m))
# get the cl
cl_fidu = []
for i in range(n_bins):
for j in range(n_bins):
if j >= i:
cl_fidu.append(hp.alm2cl(alms1=alms[i], alms2=alms[j]))
# stack
cl_fidu = np.stack(cl_fidu, axis=-1)
# stack
maps_out.append(np.stack(maps, axis=-1))
# return the maps
if return_fidu_spec:
return maps_out, cl_fidu
else:
return maps_out | [
"def",
"create_GRF_samples",
"(",
"spectra",
",",
"data_mask",
",",
"data_mask_pad",
",",
"seed",
",",
"pixwin",
"=",
"False",
",",
"fwhm",
"=",
"0.0",
",",
"lmax",
"=",
"1000",
",",
"return_fidu_spec",
"=",
"False",
",",
"verbose",
"=",
"0",
")",
":",
"# get the nside",
"nside",
"=",
"hp",
".",
"npix2nside",
"(",
"len",
"(",
"data_mask",
")",
")",
"# invert the data mask",
"inv_mask",
"=",
"np",
".",
"logical_not",
"(",
"data_mask",
")",
"ext_indices",
"=",
"np",
".",
"arange",
"(",
"len",
"(",
"inv_mask",
")",
")",
"[",
"data_mask_pad",
"]",
"# cycle",
"maps_out",
"=",
"[",
"]",
"for",
"num",
",",
"spectrum",
"in",
"enumerate",
"(",
"spectra",
")",
":",
"# set seed",
"np",
".",
"random",
".",
"seed",
"(",
"seed",
")",
"# non tomographic case",
"if",
"spectrum",
".",
"ndim",
"==",
"1",
":",
"# get map",
"m",
"=",
"hp",
".",
"synfast",
"(",
"cls",
"=",
"spectrum",
",",
"fwhm",
"=",
"fwhm",
",",
"nside",
"=",
"nside",
",",
"pixwin",
"=",
"pixwin",
",",
"lmax",
"=",
"lmax",
",",
"verbose",
"=",
"verbose",
")",
"# reorder",
"m",
"=",
"hp",
".",
"reorder",
"(",
"map_in",
"=",
"m",
",",
"r2n",
"=",
"True",
")",
"# set sourrounding to zero",
"m",
"[",
"inv_mask",
"]",
"=",
"0.0",
"# get the measurement if necessary",
"if",
"return_fidu_spec",
"and",
"num",
"==",
"0",
":",
"m_ring",
"=",
"hp",
".",
"reorder",
"(",
"map_in",
"=",
"m",
",",
"n2r",
"=",
"True",
")",
"cl_fidu",
"=",
"hp",
".",
"anafast",
"(",
"m_ring",
")",
"# append only wanted values",
"maps_out",
".",
"append",
"(",
"m",
"[",
"ext_indices",
"]",
")",
"# tomographic case",
"else",
":",
"# get the number of bins",
"n_bins",
"=",
"(",
"-",
"1",
"+",
"int",
"(",
"np",
".",
"sqrt",
"(",
"1",
"+",
"8",
"*",
"len",
"(",
"spectrum",
")",
")",
")",
")",
"//",
"2",
"if",
"n_bins",
"*",
"(",
"n_bins",
"+",
"1",
")",
"//",
"2",
"!=",
"len",
"(",
"spectrum",
")",
":",
"raise",
"ValueError",
"(",
"\"The number of spectra does not seem to be valid!\"",
")",
"# generate the maps following Raphael's paper",
"T_ij_s",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"n_bins",
")",
":",
"for",
"j",
"in",
"range",
"(",
"n_bins",
")",
":",
"if",
"i",
"==",
"j",
":",
"index",
"=",
"utils",
".",
"ij_to_list_index",
"(",
"i",
",",
"j",
",",
"n_bins",
")",
"T_ij",
"=",
"spectrum",
"[",
"index",
"]",
".",
"copy",
"(",
")",
"for",
"k",
"in",
"range",
"(",
"j",
")",
":",
"index",
"=",
"utils",
".",
"ij_to_list_index",
"(",
"k",
",",
"i",
",",
"n_bins",
")",
"T_ij",
"-=",
"T_ij_s",
"[",
"index",
"]",
"**",
"2",
"T_ij_s",
".",
"append",
"(",
"np",
".",
"sqrt",
"(",
"T_ij",
")",
")",
"elif",
"j",
">",
"i",
":",
"index",
"=",
"utils",
".",
"ij_to_list_index",
"(",
"i",
",",
"j",
",",
"n_bins",
")",
"T_ij",
"=",
"spectrum",
"[",
"index",
"]",
".",
"copy",
"(",
")",
"for",
"k",
"in",
"range",
"(",
"i",
")",
":",
"index_1",
"=",
"utils",
".",
"ij_to_list_index",
"(",
"k",
",",
"j",
",",
"n_bins",
")",
"index_2",
"=",
"utils",
".",
"ij_to_list_index",
"(",
"k",
",",
"i",
",",
"n_bins",
")",
"T_ij",
"-=",
"T_ij_s",
"[",
"index_1",
"]",
"*",
"T_ij_s",
"[",
"index_2",
"]",
"index",
"=",
"utils",
".",
"ij_to_list_index",
"(",
"i",
",",
"i",
",",
"n_bins",
")",
"# set division through 0 to 0",
"T_ij",
"=",
"np",
".",
"divide",
"(",
"T_ij",
",",
"T_ij_s",
"[",
"index",
"]",
",",
"out",
"=",
"np",
".",
"zeros_like",
"(",
"T_ij",
")",
",",
"where",
"=",
"T_ij_s",
"[",
"index",
"]",
"!=",
"0",
")",
"T_ij_s",
".",
"append",
"(",
"T_ij",
")",
"# now we generate the maps with the right states",
"T_ij_maps",
"=",
"[",
"]",
"counter",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"n_bins",
")",
":",
"current_state",
"=",
"np",
".",
"random",
".",
"get_state",
"(",
")",
"for",
"j",
"in",
"range",
"(",
"n_bins",
")",
":",
"if",
"j",
">=",
"i",
":",
"np",
".",
"random",
".",
"set_state",
"(",
"current_state",
")",
"m",
"=",
"hp",
".",
"synfast",
"(",
"cls",
"=",
"T_ij_s",
"[",
"counter",
"]",
"**",
"2",
",",
"fwhm",
"=",
"fwhm",
",",
"nside",
"=",
"nside",
",",
"pixwin",
"=",
"pixwin",
",",
"lmax",
"=",
"lmax",
",",
"verbose",
"=",
"verbose",
")",
"T_ij_maps",
".",
"append",
"(",
"m",
")",
"counter",
"+=",
"1",
"# list for maps",
"maps",
"=",
"[",
"]",
"# and now the output maps",
"for",
"i",
"in",
"range",
"(",
"n_bins",
")",
":",
"m",
"=",
"np",
".",
"zeros",
"(",
"hp",
".",
"nside2npix",
"(",
"nside",
"=",
"nside",
")",
")",
"for",
"j",
"in",
"range",
"(",
"n_bins",
")",
":",
"if",
"i",
">=",
"j",
":",
"index",
"=",
"utils",
".",
"ij_to_list_index",
"(",
"j",
",",
"i",
",",
"n_bins",
")",
"m",
"+=",
"T_ij_maps",
"[",
"index",
"]",
"# reorder",
"m",
"=",
"hp",
".",
"reorder",
"(",
"map_in",
"=",
"m",
",",
"r2n",
"=",
"True",
")",
"# set sourrounding to zero",
"m",
"[",
"inv_mask",
"]",
"=",
"0.0",
"# append only wanted values",
"maps",
".",
"append",
"(",
"m",
"[",
"ext_indices",
"]",
")",
"# calculate all spectra and cross spectra",
"if",
"num",
"==",
"0",
"and",
"return_fidu_spec",
":",
"# get the alms",
"alms",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"n_bins",
")",
":",
"# make the map",
"m",
"=",
"np",
".",
"zeros",
"(",
"hp",
".",
"nside2npix",
"(",
"nside",
"=",
"nside",
")",
")",
"m",
"[",
"ext_indices",
"]",
"=",
"maps",
"[",
"i",
"]",
"# reorder and alm",
"m",
"=",
"hp",
".",
"reorder",
"(",
"m",
",",
"n2r",
"=",
"True",
")",
"alms",
".",
"append",
"(",
"hp",
".",
"map2alm",
"(",
"m",
")",
")",
"# get the cl",
"cl_fidu",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"n_bins",
")",
":",
"for",
"j",
"in",
"range",
"(",
"n_bins",
")",
":",
"if",
"j",
">=",
"i",
":",
"cl_fidu",
".",
"append",
"(",
"hp",
".",
"alm2cl",
"(",
"alms1",
"=",
"alms",
"[",
"i",
"]",
",",
"alms2",
"=",
"alms",
"[",
"j",
"]",
")",
")",
"# stack",
"cl_fidu",
"=",
"np",
".",
"stack",
"(",
"cl_fidu",
",",
"axis",
"=",
"-",
"1",
")",
"# stack",
"maps_out",
".",
"append",
"(",
"np",
".",
"stack",
"(",
"maps",
",",
"axis",
"=",
"-",
"1",
")",
")",
"# return the maps",
"if",
"return_fidu_spec",
":",
"return",
"maps_out",
",",
"cl_fidu",
"else",
":",
"return",
"maps_out"
] | [
51,
0
] | [
193,
23
] | python | en | ['en', 'error', 'th'] | False |
anti_fleet | (log) |
logs will print multi-times when calling Fleet API.
Only display single log and ignore the others.
|
logs will print multi-times when calling Fleet API.
Only display single log and ignore the others.
| def anti_fleet(log):
"""
logs will print multi-times when calling Fleet API.
Only display single log and ignore the others.
"""
def wrapper(fmt, *args):
if int(os.getenv("PADDLE_TRAINER_ID", 0)) == 0:
log(fmt, *args)
return wrapper | [
"def",
"anti_fleet",
"(",
"log",
")",
":",
"def",
"wrapper",
"(",
"fmt",
",",
"*",
"args",
")",
":",
"if",
"int",
"(",
"os",
".",
"getenv",
"(",
"\"PADDLE_TRAINER_ID\"",
",",
"0",
")",
")",
"==",
"0",
":",
"log",
"(",
"fmt",
",",
"*",
"args",
")",
"return",
"wrapper"
] | [
52,
0
] | [
62,
18
] | python | en | ['en', 'error', 'th'] | False |
scaler | (name, value, step, writer) |
This function will draw a scalar curve generated by the visualdl.
Usage: Install visualdl: pip3 install visualdl==2.0.0b4
and then:
visualdl --logdir ./scalar --host 0.0.0.0 --port 8830
to preview loss corve in real time.
|
This function will draw a scalar curve generated by the visualdl.
Usage: Install visualdl: pip3 install visualdl==2.0.0b4
and then:
visualdl --logdir ./scalar --host 0.0.0.0 --port 8830
to preview loss corve in real time.
| def scaler(name, value, step, writer):
"""
This function will draw a scalar curve generated by the visualdl.
Usage: Install visualdl: pip3 install visualdl==2.0.0b4
and then:
visualdl --logdir ./scalar --host 0.0.0.0 --port 8830
to preview loss corve in real time.
"""
writer.add_scalar(name, value, step) | [
"def",
"scaler",
"(",
"name",
",",
"value",
",",
"step",
",",
"writer",
")",
":",
"writer",
".",
"add_scalar",
"(",
"name",
",",
"value",
",",
"step",
")"
] | [
80,
0
] | [
88,
40
] | python | en | ['en', 'error', 'th'] | False |
advertise | () |
Show the advertising message like the following:
===========================================================
== PaddleClas is powered by PaddlePaddle ! ==
===========================================================
== ==
== For more info please go to the following website. ==
== ==
== https://github.com/PaddlePaddle/PaddleClas ==
===========================================================
|
Show the advertising message like the following: | def advertise():
"""
Show the advertising message like the following:
===========================================================
== PaddleClas is powered by PaddlePaddle ! ==
===========================================================
== ==
== For more info please go to the following website. ==
== ==
== https://github.com/PaddlePaddle/PaddleClas ==
===========================================================
"""
copyright = "PaddleClas is powered by PaddlePaddle !"
ad = "For more info please go to the following website."
website = "https://github.com/PaddlePaddle/PaddleClas"
AD_LEN = 6 + len(max([copyright, ad, website], key=len))
info(
coloring("\n{0}\n{1}\n{2}\n{3}\n{4}\n{5}\n{6}\n{7}\n".format(
"=" * (AD_LEN + 4),
"=={}==".format(copyright.center(AD_LEN)),
"=" * (AD_LEN + 4),
"=={}==".format(' ' * AD_LEN),
"=={}==".format(ad.center(AD_LEN)),
"=={}==".format(' ' * AD_LEN),
"=={}==".format(website.center(AD_LEN)),
"=" * (AD_LEN + 4), ), "RED")) | [
"def",
"advertise",
"(",
")",
":",
"copyright",
"=",
"\"PaddleClas is powered by PaddlePaddle !\"",
"ad",
"=",
"\"For more info please go to the following website.\"",
"website",
"=",
"\"https://github.com/PaddlePaddle/PaddleClas\"",
"AD_LEN",
"=",
"6",
"+",
"len",
"(",
"max",
"(",
"[",
"copyright",
",",
"ad",
",",
"website",
"]",
",",
"key",
"=",
"len",
")",
")",
"info",
"(",
"coloring",
"(",
"\"\\n{0}\\n{1}\\n{2}\\n{3}\\n{4}\\n{5}\\n{6}\\n{7}\\n\"",
".",
"format",
"(",
"\"=\"",
"*",
"(",
"AD_LEN",
"+",
"4",
")",
",",
"\"=={}==\"",
".",
"format",
"(",
"copyright",
".",
"center",
"(",
"AD_LEN",
")",
")",
",",
"\"=\"",
"*",
"(",
"AD_LEN",
"+",
"4",
")",
",",
"\"=={}==\"",
".",
"format",
"(",
"' '",
"*",
"AD_LEN",
")",
",",
"\"=={}==\"",
".",
"format",
"(",
"ad",
".",
"center",
"(",
"AD_LEN",
")",
")",
",",
"\"=={}==\"",
".",
"format",
"(",
"' '",
"*",
"AD_LEN",
")",
",",
"\"=={}==\"",
".",
"format",
"(",
"website",
".",
"center",
"(",
"AD_LEN",
")",
")",
",",
"\"=\"",
"*",
"(",
"AD_LEN",
"+",
"4",
")",
",",
")",
",",
"\"RED\"",
")",
")"
] | [
91,
0
] | [
119,
42
] | python | en | ['en', 'error', 'th'] | False |
VGP_Emu.__init__ | (self, objective=None, space=None, N_init=20, X_init=None, Y_init=None, normalize_Y=True, mean_only=False,
alpha=0.01, kern="matern52", num_restarts=10, verbosity=0, max_opt_iter=1000, full_restart=False,
ARD=False, learning_rate=1e-4, parameter_noise_scale=0.1, minimum_variance=1e-3) |
An class that fits a Gaussian process to a given objective function
:param objective: function used for the fitting (needs to estimate the noise as well!)
:param space: a GPy space for the prior
:param N_init: number of initial points
:param X_init: initial points in space (if set N_init is ignored)
:param Y_init: optionial with set with X_init the objective is not called on X_init
:param normalize_Y: normalize the Y coordinate to have zero mean and unit variance (standard)
:param mean_only: normalize Y only such that it has zero mean but leave std as is
:param alpha: alpha value in the acquisition function of Raul's paper
:param kern: kernel type, currently only Matern52 or Exponential
:param num_restarts: number of restarts for each GP optimization
:param verbosity: 0 -> print minimal output, higher value = more output
:param max_opt_iter: maximum iteration for a single GP optimization
:param full_restart: ignore the current kernel values for the next optimization
:param ARD: Auto Relevance Determination, use a lengthscale in the kernel for each dimension of the problem
:param learning_rate: learning rate for the Adam optimizer used for the optimization
:param parameter_noise_scale: noise std that is added to the parameter for optimization
:param minimum_variance: minimum of the allowed variance estimate, choosing this too small leads to numerical
instabilities.
|
An class that fits a Gaussian process to a given objective function
:param objective: function used for the fitting (needs to estimate the noise as well!)
:param space: a GPy space for the prior
:param N_init: number of initial points
:param X_init: initial points in space (if set N_init is ignored)
:param Y_init: optionial with set with X_init the objective is not called on X_init
:param normalize_Y: normalize the Y coordinate to have zero mean and unit variance (standard)
:param mean_only: normalize Y only such that it has zero mean but leave std as is
:param alpha: alpha value in the acquisition function of Raul's paper
:param kern: kernel type, currently only Matern52 or Exponential
:param num_restarts: number of restarts for each GP optimization
:param verbosity: 0 -> print minimal output, higher value = more output
:param max_opt_iter: maximum iteration for a single GP optimization
:param full_restart: ignore the current kernel values for the next optimization
:param ARD: Auto Relevance Determination, use a lengthscale in the kernel for each dimension of the problem
:param learning_rate: learning rate for the Adam optimizer used for the optimization
:param parameter_noise_scale: noise std that is added to the parameter for optimization
:param minimum_variance: minimum of the allowed variance estimate, choosing this too small leads to numerical
instabilities.
| def __init__(self, objective=None, space=None, N_init=20, X_init=None, Y_init=None, normalize_Y=True, mean_only=False,
alpha=0.01, kern="matern52", num_restarts=10, verbosity=0, max_opt_iter=1000, full_restart=False,
ARD=False, learning_rate=1e-4, parameter_noise_scale=0.1, minimum_variance=1e-3):
"""
An class that fits a Gaussian process to a given objective function
:param objective: function used for the fitting (needs to estimate the noise as well!)
:param space: a GPy space for the prior
:param N_init: number of initial points
:param X_init: initial points in space (if set N_init is ignored)
:param Y_init: optionial with set with X_init the objective is not called on X_init
:param normalize_Y: normalize the Y coordinate to have zero mean and unit variance (standard)
:param mean_only: normalize Y only such that it has zero mean but leave std as is
:param alpha: alpha value in the acquisition function of Raul's paper
:param kern: kernel type, currently only Matern52 or Exponential
:param num_restarts: number of restarts for each GP optimization
:param verbosity: 0 -> print minimal output, higher value = more output
:param max_opt_iter: maximum iteration for a single GP optimization
:param full_restart: ignore the current kernel values for the next optimization
:param ARD: Auto Relevance Determination, use a lengthscale in the kernel for each dimension of the problem
:param learning_rate: learning rate for the Adam optimizer used for the optimization
:param parameter_noise_scale: noise std that is added to the parameter for optimization
:param minimum_variance: minimum of the allowed variance estimate, choosing this too small leads to numerical
instabilities.
"""
# some sanity checks
if (objective is None or space is None) and (X_init is None and Y_init is None):
raise ValueError("If there is no initial dataset, one has to provide an objective function and a space!")
self.objective = objective
self.verbosity = verbosity
# how to start
if X_init is None:
initial_design = GPyOpt.experiment_design.initial_design('latin', space, N_init)
initial_Y = objective(initial_design)
elif Y_init is None:
initial_design = X_init
initial_Y = objective(initial_design)
else:
initial_design = X_init
initial_Y = Y_init
# we need to split off the variance estimates
initial_Y, initial_var = np.split(initial_Y, axis=1, indices_or_sections=2)
# tfp prior
self.space = space
if self.space is not None:
a_min = np.asarray(self.space.get_bounds(), dtype=self.default_np_float).T[0]
a_max = np.asarray(self.space.get_bounds(), dtype=self.default_np_float).T[1]
self.tfp_prior = tfp.distributions.Uniform(low=a_min, high=a_max)
# normalize
if normalize_Y:
self.Y_mean = np.mean(initial_Y)
if mean_only:
self.Y_std = 1.0
else:
self.Y_std = np.std(initial_Y)
self.Y_all = (initial_Y - self.Y_mean) / self.Y_std
else:
self.Y_mean = 0.0
self.Y_std = 1.0
self.Y_all = initial_Y
self.normalize_Y = normalize_Y
self.mean_only = mean_only
# now we need to take care of the variance estimates
self.var_estimates = initial_var / self.Y_std ** 2
# normalization
self.params, self.rot_mat, self.rot_mean, self.rot_std = self.normalize_params(initial_design)
# kernel
self.dims = int(X_init.shape[-1])
self.kern_type = kern
if ARD:
lengthscales = [1.0 for _ in range(self.dims)]
else:
lengthscales = 1.0
if kern == "matern52":
self.kern = gpflow.kernels.Matern52(lengthscales=lengthscales)
elif kern == "exponential":
self.kern = gpflow.kernels.Exponential(lengthscales=lengthscales)
else:
raise IOError("Unkown kernel")
self.lengthscale_shape = self.kern.lengthscales.shape
if num_restarts is None or num_restarts < 1:
print("Number of restarts is set to 1!")
self.num_restarts = self.default_np_int(1)
else:
self.num_restarts = self.default_np_int(num_restarts)
# get the likelihood
self.likelihood = HeteroskedasticGaussian()
# model (if you get a matrix inversion error here increase number of initial params)
self.minimum_variance = minimum_variance
data = np.concatenate([self.Y_all, np.maximum(self.var_estimates, self.minimum_variance)], axis=1)
self.model = gpflow.models.VGP((self.params.astype(self.default_np_float),
data.astype(self.default_np_float)),
kernel=self.kern, likelihood=self.likelihood, num_latent_gps=1)
# We turn off training for q as it is trained with natgrad
gpflow.utilities.set_trainable(self.model.q_mu, False)
gpflow.utilities.set_trainable(self.model.q_sqrt, False)
# summary
print_summary(self.model)
# save params
self.learning_rate = learning_rate
self.parameter_noise_scale = parameter_noise_scale
self.max_opt_iter = max_opt_iter
self.full_restart = full_restart
self.optimize_model()
# for acquisition
self.current_transform = lambda x: self.transform_params(x, self.rot_mat, self.rot_mean, self.rot_std)
self.alpha = alpha | [
"def",
"__init__",
"(",
"self",
",",
"objective",
"=",
"None",
",",
"space",
"=",
"None",
",",
"N_init",
"=",
"20",
",",
"X_init",
"=",
"None",
",",
"Y_init",
"=",
"None",
",",
"normalize_Y",
"=",
"True",
",",
"mean_only",
"=",
"False",
",",
"alpha",
"=",
"0.01",
",",
"kern",
"=",
"\"matern52\"",
",",
"num_restarts",
"=",
"10",
",",
"verbosity",
"=",
"0",
",",
"max_opt_iter",
"=",
"1000",
",",
"full_restart",
"=",
"False",
",",
"ARD",
"=",
"False",
",",
"learning_rate",
"=",
"1e-4",
",",
"parameter_noise_scale",
"=",
"0.1",
",",
"minimum_variance",
"=",
"1e-3",
")",
":",
"# some sanity checks",
"if",
"(",
"objective",
"is",
"None",
"or",
"space",
"is",
"None",
")",
"and",
"(",
"X_init",
"is",
"None",
"and",
"Y_init",
"is",
"None",
")",
":",
"raise",
"ValueError",
"(",
"\"If there is no initial dataset, one has to provide an objective function and a space!\"",
")",
"self",
".",
"objective",
"=",
"objective",
"self",
".",
"verbosity",
"=",
"verbosity",
"# how to start",
"if",
"X_init",
"is",
"None",
":",
"initial_design",
"=",
"GPyOpt",
".",
"experiment_design",
".",
"initial_design",
"(",
"'latin'",
",",
"space",
",",
"N_init",
")",
"initial_Y",
"=",
"objective",
"(",
"initial_design",
")",
"elif",
"Y_init",
"is",
"None",
":",
"initial_design",
"=",
"X_init",
"initial_Y",
"=",
"objective",
"(",
"initial_design",
")",
"else",
":",
"initial_design",
"=",
"X_init",
"initial_Y",
"=",
"Y_init",
"# we need to split off the variance estimates",
"initial_Y",
",",
"initial_var",
"=",
"np",
".",
"split",
"(",
"initial_Y",
",",
"axis",
"=",
"1",
",",
"indices_or_sections",
"=",
"2",
")",
"# tfp prior",
"self",
".",
"space",
"=",
"space",
"if",
"self",
".",
"space",
"is",
"not",
"None",
":",
"a_min",
"=",
"np",
".",
"asarray",
"(",
"self",
".",
"space",
".",
"get_bounds",
"(",
")",
",",
"dtype",
"=",
"self",
".",
"default_np_float",
")",
".",
"T",
"[",
"0",
"]",
"a_max",
"=",
"np",
".",
"asarray",
"(",
"self",
".",
"space",
".",
"get_bounds",
"(",
")",
",",
"dtype",
"=",
"self",
".",
"default_np_float",
")",
".",
"T",
"[",
"1",
"]",
"self",
".",
"tfp_prior",
"=",
"tfp",
".",
"distributions",
".",
"Uniform",
"(",
"low",
"=",
"a_min",
",",
"high",
"=",
"a_max",
")",
"# normalize",
"if",
"normalize_Y",
":",
"self",
".",
"Y_mean",
"=",
"np",
".",
"mean",
"(",
"initial_Y",
")",
"if",
"mean_only",
":",
"self",
".",
"Y_std",
"=",
"1.0",
"else",
":",
"self",
".",
"Y_std",
"=",
"np",
".",
"std",
"(",
"initial_Y",
")",
"self",
".",
"Y_all",
"=",
"(",
"initial_Y",
"-",
"self",
".",
"Y_mean",
")",
"/",
"self",
".",
"Y_std",
"else",
":",
"self",
".",
"Y_mean",
"=",
"0.0",
"self",
".",
"Y_std",
"=",
"1.0",
"self",
".",
"Y_all",
"=",
"initial_Y",
"self",
".",
"normalize_Y",
"=",
"normalize_Y",
"self",
".",
"mean_only",
"=",
"mean_only",
"# now we need to take care of the variance estimates",
"self",
".",
"var_estimates",
"=",
"initial_var",
"/",
"self",
".",
"Y_std",
"**",
"2",
"# normalization",
"self",
".",
"params",
",",
"self",
".",
"rot_mat",
",",
"self",
".",
"rot_mean",
",",
"self",
".",
"rot_std",
"=",
"self",
".",
"normalize_params",
"(",
"initial_design",
")",
"# kernel",
"self",
".",
"dims",
"=",
"int",
"(",
"X_init",
".",
"shape",
"[",
"-",
"1",
"]",
")",
"self",
".",
"kern_type",
"=",
"kern",
"if",
"ARD",
":",
"lengthscales",
"=",
"[",
"1.0",
"for",
"_",
"in",
"range",
"(",
"self",
".",
"dims",
")",
"]",
"else",
":",
"lengthscales",
"=",
"1.0",
"if",
"kern",
"==",
"\"matern52\"",
":",
"self",
".",
"kern",
"=",
"gpflow",
".",
"kernels",
".",
"Matern52",
"(",
"lengthscales",
"=",
"lengthscales",
")",
"elif",
"kern",
"==",
"\"exponential\"",
":",
"self",
".",
"kern",
"=",
"gpflow",
".",
"kernels",
".",
"Exponential",
"(",
"lengthscales",
"=",
"lengthscales",
")",
"else",
":",
"raise",
"IOError",
"(",
"\"Unkown kernel\"",
")",
"self",
".",
"lengthscale_shape",
"=",
"self",
".",
"kern",
".",
"lengthscales",
".",
"shape",
"if",
"num_restarts",
"is",
"None",
"or",
"num_restarts",
"<",
"1",
":",
"print",
"(",
"\"Number of restarts is set to 1!\"",
")",
"self",
".",
"num_restarts",
"=",
"self",
".",
"default_np_int",
"(",
"1",
")",
"else",
":",
"self",
".",
"num_restarts",
"=",
"self",
".",
"default_np_int",
"(",
"num_restarts",
")",
"# get the likelihood",
"self",
".",
"likelihood",
"=",
"HeteroskedasticGaussian",
"(",
")",
"# model (if you get a matrix inversion error here increase number of initial params)",
"self",
".",
"minimum_variance",
"=",
"minimum_variance",
"data",
"=",
"np",
".",
"concatenate",
"(",
"[",
"self",
".",
"Y_all",
",",
"np",
".",
"maximum",
"(",
"self",
".",
"var_estimates",
",",
"self",
".",
"minimum_variance",
")",
"]",
",",
"axis",
"=",
"1",
")",
"self",
".",
"model",
"=",
"gpflow",
".",
"models",
".",
"VGP",
"(",
"(",
"self",
".",
"params",
".",
"astype",
"(",
"self",
".",
"default_np_float",
")",
",",
"data",
".",
"astype",
"(",
"self",
".",
"default_np_float",
")",
")",
",",
"kernel",
"=",
"self",
".",
"kern",
",",
"likelihood",
"=",
"self",
".",
"likelihood",
",",
"num_latent_gps",
"=",
"1",
")",
"# We turn off training for q as it is trained with natgrad",
"gpflow",
".",
"utilities",
".",
"set_trainable",
"(",
"self",
".",
"model",
".",
"q_mu",
",",
"False",
")",
"gpflow",
".",
"utilities",
".",
"set_trainable",
"(",
"self",
".",
"model",
".",
"q_sqrt",
",",
"False",
")",
"# summary",
"print_summary",
"(",
"self",
".",
"model",
")",
"# save params",
"self",
".",
"learning_rate",
"=",
"learning_rate",
"self",
".",
"parameter_noise_scale",
"=",
"parameter_noise_scale",
"self",
".",
"max_opt_iter",
"=",
"max_opt_iter",
"self",
".",
"full_restart",
"=",
"full_restart",
"self",
".",
"optimize_model",
"(",
")",
"# for acquisition",
"self",
".",
"current_transform",
"=",
"lambda",
"x",
":",
"self",
".",
"transform_params",
"(",
"x",
",",
"self",
".",
"rot_mat",
",",
"self",
".",
"rot_mean",
",",
"self",
".",
"rot_std",
")",
"self",
".",
"alpha",
"=",
"alpha"
] | [
68,
4
] | [
188,
26
] | python | en | ['en', 'error', 'th'] | False |
VGP_Emu.acquisition_function | (self, x) |
Raul's acquisition function
|
Raul's acquisition function
| def acquisition_function(self, x):
"""
Raul's acquisition function
"""
if self.current_transform is not None:
x = self.current_transform(x)
mean, var = self.model.predict_f(x)
if self.normalize_Y:
mean = mean * self.Y_std + self.Y_mean
var *= self.Y_std ** 2
return -(tf.exp(mean) + self.alpha * (tf.exp(var) - 1.0) * tf.exp(2 * mean + var)) | [
"def",
"acquisition_function",
"(",
"self",
",",
"x",
")",
":",
"if",
"self",
".",
"current_transform",
"is",
"not",
"None",
":",
"x",
"=",
"self",
".",
"current_transform",
"(",
"x",
")",
"mean",
",",
"var",
"=",
"self",
".",
"model",
".",
"predict_f",
"(",
"x",
")",
"if",
"self",
".",
"normalize_Y",
":",
"mean",
"=",
"mean",
"*",
"self",
".",
"Y_std",
"+",
"self",
".",
"Y_mean",
"var",
"*=",
"self",
".",
"Y_std",
"**",
"2",
"return",
"-",
"(",
"tf",
".",
"exp",
"(",
"mean",
")",
"+",
"self",
".",
"alpha",
"*",
"(",
"tf",
".",
"exp",
"(",
"var",
")",
"-",
"1.0",
")",
"*",
"tf",
".",
"exp",
"(",
"2",
"*",
"mean",
"+",
"var",
")",
")"
] | [
190,
4
] | [
203,
90
] | python | en | ['en', 'error', 'th'] | False |
VGP_Emu.train_model | (self, n_iters) |
Optimizes the model for n_iters
:param n_iters: number of iterations for optimization
:return: a list of losses of each step with length n_iters
|
Optimizes the model for n_iters
:param n_iters: number of iterations for optimization
:return: a list of losses of each step with length n_iters
| def train_model(self, n_iters):
"""
Optimizes the model for n_iters
:param n_iters: number of iterations for optimization
:return: a list of losses of each step with length n_iters
"""
@tf.function
def objective_closure():
return self.model.training_loss()
natgrad = NaturalGradient(gamma=1.0)
adam = tf.optimizers.Adam(self.learning_rate)
print("Training the VGP model params...", flush=True)
losses = []
with tqdm(range(n_iters), total=n_iters) as pbar:
for _ in pbar:
natgrad.minimize(objective_closure, [(self.model.q_mu, self.model.q_sqrt)])
adam.minimize(objective_closure, self.model.trainable_variables)
loss = objective_closure().numpy()
losses.append(loss)
pbar.set_postfix(loss_val=loss, refresh=False)
return losses | [
"def",
"train_model",
"(",
"self",
",",
"n_iters",
")",
":",
"@",
"tf",
".",
"function",
"def",
"objective_closure",
"(",
")",
":",
"return",
"self",
".",
"model",
".",
"training_loss",
"(",
")",
"natgrad",
"=",
"NaturalGradient",
"(",
"gamma",
"=",
"1.0",
")",
"adam",
"=",
"tf",
".",
"optimizers",
".",
"Adam",
"(",
"self",
".",
"learning_rate",
")",
"print",
"(",
"\"Training the VGP model params...\"",
",",
"flush",
"=",
"True",
")",
"losses",
"=",
"[",
"]",
"with",
"tqdm",
"(",
"range",
"(",
"n_iters",
")",
",",
"total",
"=",
"n_iters",
")",
"as",
"pbar",
":",
"for",
"_",
"in",
"pbar",
":",
"natgrad",
".",
"minimize",
"(",
"objective_closure",
",",
"[",
"(",
"self",
".",
"model",
".",
"q_mu",
",",
"self",
".",
"model",
".",
"q_sqrt",
")",
"]",
")",
"adam",
".",
"minimize",
"(",
"objective_closure",
",",
"self",
".",
"model",
".",
"trainable_variables",
")",
"loss",
"=",
"objective_closure",
"(",
")",
".",
"numpy",
"(",
")",
"losses",
".",
"append",
"(",
"loss",
")",
"pbar",
".",
"set_postfix",
"(",
"loss_val",
"=",
"loss",
",",
"refresh",
"=",
"False",
")",
"return",
"losses"
] | [
205,
4
] | [
229,
21
] | python | en | ['en', 'error', 'th'] | False |
VGP_Emu._readout_params | (self) |
Reads out the params of the model and returns them as tupel of arrays
:return: tupel of params
|
Reads out the params of the model and returns them as tupel of arrays
:return: tupel of params
| def _readout_params(self):
"""
Reads out the params of the model and returns them as tupel of arrays
:return: tupel of params
"""
params = (self.model.kernel.variance.numpy(), self.model.kernel.lengthscales.numpy(), self.model.q_mu.numpy(),
self.model.q_sqrt.numpy())
return params | [
"def",
"_readout_params",
"(",
"self",
")",
":",
"params",
"=",
"(",
"self",
".",
"model",
".",
"kernel",
".",
"variance",
".",
"numpy",
"(",
")",
",",
"self",
".",
"model",
".",
"kernel",
".",
"lengthscales",
".",
"numpy",
"(",
")",
",",
"self",
".",
"model",
".",
"q_mu",
".",
"numpy",
"(",
")",
",",
"self",
".",
"model",
".",
"q_sqrt",
".",
"numpy",
"(",
")",
")",
"return",
"params"
] | [
231,
4
] | [
240,
21
] | python | en | ['en', 'error', 'th'] | False |
VGP_Emu._set_params | (self, params) |
Sets the model params to a given tupel of array
:param params: params (tupel of arrays)
|
Sets the model params to a given tupel of array
:param params: params (tupel of arrays)
| def _set_params(self, params):
"""
Sets the model params to a given tupel of array
:param params: params (tupel of arrays)
"""
self.model.kernel.variance.assign(params[0])
self.model.kernel.lengthscales.assign(params[1])
self.model.q_mu.assign(params[2])
self.model.q_sqrt.assign(params[3]) | [
"def",
"_set_params",
"(",
"self",
",",
"params",
")",
":",
"self",
".",
"model",
".",
"kernel",
".",
"variance",
".",
"assign",
"(",
"params",
"[",
"0",
"]",
")",
"self",
".",
"model",
".",
"kernel",
".",
"lengthscales",
".",
"assign",
"(",
"params",
"[",
"1",
"]",
")",
"self",
".",
"model",
".",
"q_mu",
".",
"assign",
"(",
"params",
"[",
"2",
"]",
")",
"self",
".",
"model",
".",
"q_sqrt",
".",
"assign",
"(",
"params",
"[",
"3",
"]",
")"
] | [
242,
4
] | [
250,
43
] | python | en | ['en', 'error', 'th'] | False |
VGP_Emu.optimize_model | (self, scale=1.0) |
Optimizes the model for a given number of restarts and chooses the best result
:param scale: std of the normal distribution used to draw new params
|
Optimizes the model for a given number of restarts and chooses the best result
:param scale: std of the normal distribution used to draw new params
| def optimize_model(self, scale=1.0):
"""
Optimizes the model for a given number of restarts and chooses the best result
:param scale: std of the normal distribution used to draw new params
"""
func_vals = []
model_params = []
# read out the original params
original_params = self._readout_params()
for i in range(self.num_restarts):
# we need to create a new optimizer since Adam has params itself
self.opt = tf.optimizers.Adam(self.learning_rate)
try:
# assign new staring vals
if self.full_restart:
# This is used in GPy opt if no prior is specified (see model.randomize() defined in paramz pack)
self.model.kernel.variance.assign(
tf.maximum(tf.random.normal(shape=(), dtype=self.default_tf_float, mean=scale,
stddev=self.parameter_noise_scale),
tf.constant(0.1, dtype=self.default_tf_float)))
self.model.kernel.lengthscales.assign(
tf.maximum(tf.random.normal(shape=self.lengthscale_shape,
dtype=self.default_tf_float, mean=scale,
stddev=self.parameter_noise_scale),
tf.constant(0.1, dtype=self.default_tf_float)))
self.model.q_mu.assign(tf.zeros_like(self.model.q_mu))
self.model.q_sqrt.assign(tf.eye(len(original_params[2]), batch_shape=[1],
dtype=self.default_tf_float))
else:
self.model.kernel.variance.assign(tf.maximum(original_params[0] +
tf.random.normal(shape=(),
dtype=self.default_tf_float,
stddev=self.parameter_noise_scale),
tf.constant(0.1, dtype=self.default_tf_float, )))
self.model.kernel.lengthscales.assign(tf.maximum(original_params[1] +
tf.random.normal(shape=self.lengthscale_shape,
dtype=self.default_tf_float,
stddev=self.parameter_noise_scale),
tf.constant(0.1, dtype=self.default_tf_float)))
self.model.q_mu.assign(original_params[2])
self.model.q_sqrt.assign(original_params[3])
# now we optimize
losses = self.train_model(self.max_opt_iter)
# we append the final loss value
func_vals.append(losses[-1])
model_params.append(self._readout_params())
if self.verbosity > 0:
print("Optimization {}: achieved {} with params {}".format(i, func_vals[-1], model_params[-1]))
except:
print("Failed Optimization {}".format(i))
# set to minimum
min_index = np.argmin(func_vals)
self._set_params(model_params[min_index]) | [
"def",
"optimize_model",
"(",
"self",
",",
"scale",
"=",
"1.0",
")",
":",
"func_vals",
"=",
"[",
"]",
"model_params",
"=",
"[",
"]",
"# read out the original params",
"original_params",
"=",
"self",
".",
"_readout_params",
"(",
")",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"num_restarts",
")",
":",
"# we need to create a new optimizer since Adam has params itself",
"self",
".",
"opt",
"=",
"tf",
".",
"optimizers",
".",
"Adam",
"(",
"self",
".",
"learning_rate",
")",
"try",
":",
"# assign new staring vals",
"if",
"self",
".",
"full_restart",
":",
"# This is used in GPy opt if no prior is specified (see model.randomize() defined in paramz pack)",
"self",
".",
"model",
".",
"kernel",
".",
"variance",
".",
"assign",
"(",
"tf",
".",
"maximum",
"(",
"tf",
".",
"random",
".",
"normal",
"(",
"shape",
"=",
"(",
")",
",",
"dtype",
"=",
"self",
".",
"default_tf_float",
",",
"mean",
"=",
"scale",
",",
"stddev",
"=",
"self",
".",
"parameter_noise_scale",
")",
",",
"tf",
".",
"constant",
"(",
"0.1",
",",
"dtype",
"=",
"self",
".",
"default_tf_float",
")",
")",
")",
"self",
".",
"model",
".",
"kernel",
".",
"lengthscales",
".",
"assign",
"(",
"tf",
".",
"maximum",
"(",
"tf",
".",
"random",
".",
"normal",
"(",
"shape",
"=",
"self",
".",
"lengthscale_shape",
",",
"dtype",
"=",
"self",
".",
"default_tf_float",
",",
"mean",
"=",
"scale",
",",
"stddev",
"=",
"self",
".",
"parameter_noise_scale",
")",
",",
"tf",
".",
"constant",
"(",
"0.1",
",",
"dtype",
"=",
"self",
".",
"default_tf_float",
")",
")",
")",
"self",
".",
"model",
".",
"q_mu",
".",
"assign",
"(",
"tf",
".",
"zeros_like",
"(",
"self",
".",
"model",
".",
"q_mu",
")",
")",
"self",
".",
"model",
".",
"q_sqrt",
".",
"assign",
"(",
"tf",
".",
"eye",
"(",
"len",
"(",
"original_params",
"[",
"2",
"]",
")",
",",
"batch_shape",
"=",
"[",
"1",
"]",
",",
"dtype",
"=",
"self",
".",
"default_tf_float",
")",
")",
"else",
":",
"self",
".",
"model",
".",
"kernel",
".",
"variance",
".",
"assign",
"(",
"tf",
".",
"maximum",
"(",
"original_params",
"[",
"0",
"]",
"+",
"tf",
".",
"random",
".",
"normal",
"(",
"shape",
"=",
"(",
")",
",",
"dtype",
"=",
"self",
".",
"default_tf_float",
",",
"stddev",
"=",
"self",
".",
"parameter_noise_scale",
")",
",",
"tf",
".",
"constant",
"(",
"0.1",
",",
"dtype",
"=",
"self",
".",
"default_tf_float",
",",
")",
")",
")",
"self",
".",
"model",
".",
"kernel",
".",
"lengthscales",
".",
"assign",
"(",
"tf",
".",
"maximum",
"(",
"original_params",
"[",
"1",
"]",
"+",
"tf",
".",
"random",
".",
"normal",
"(",
"shape",
"=",
"self",
".",
"lengthscale_shape",
",",
"dtype",
"=",
"self",
".",
"default_tf_float",
",",
"stddev",
"=",
"self",
".",
"parameter_noise_scale",
")",
",",
"tf",
".",
"constant",
"(",
"0.1",
",",
"dtype",
"=",
"self",
".",
"default_tf_float",
")",
")",
")",
"self",
".",
"model",
".",
"q_mu",
".",
"assign",
"(",
"original_params",
"[",
"2",
"]",
")",
"self",
".",
"model",
".",
"q_sqrt",
".",
"assign",
"(",
"original_params",
"[",
"3",
"]",
")",
"# now we optimize",
"losses",
"=",
"self",
".",
"train_model",
"(",
"self",
".",
"max_opt_iter",
")",
"# we append the final loss value",
"func_vals",
".",
"append",
"(",
"losses",
"[",
"-",
"1",
"]",
")",
"model_params",
".",
"append",
"(",
"self",
".",
"_readout_params",
"(",
")",
")",
"if",
"self",
".",
"verbosity",
">",
"0",
":",
"print",
"(",
"\"Optimization {}: achieved {} with params {}\"",
".",
"format",
"(",
"i",
",",
"func_vals",
"[",
"-",
"1",
"]",
",",
"model_params",
"[",
"-",
"1",
"]",
")",
")",
"except",
":",
"print",
"(",
"\"Failed Optimization {}\"",
".",
"format",
"(",
"i",
")",
")",
"# set to minimum",
"min_index",
"=",
"np",
".",
"argmin",
"(",
"func_vals",
")",
"self",
".",
"_set_params",
"(",
"model_params",
"[",
"min_index",
"]",
")"
] | [
252,
4
] | [
312,
49
] | python | en | ['en', 'error', 'th'] | False |
VGP_Emu.optimize | (self, n_draw=5, max_iters=100, rel_tol=0.5, n_convergence=1000, sampler_burn_in=1000,
save_path=None, save_iter=5, **kwargs) |
Optimizes the initiated GP emulator for at most max_iters iterations
:param n_draw: number of draws in each step
:param max_iters: maximum number of iterations
:param rel_tol: relative tolarance of the Bhattacharyya distance, the optimization will stop if the relative
change is smaller than rel_tol for 5 consecutive steps
:param n_convergence: number of sample points for the convergence test
:param sampler_burn_in: number of burn in steps for the MCMC that draws new samples
:param save_path: path where to save the intermediate results
:param save_iter: save itermediate results very so often
:param kwargs: additional arguments passed to the sample_new routine (e.g. MCMC type etc.)
|
Optimizes the initiated GP emulator for at most max_iters iterations
:param n_draw: number of draws in each step
:param max_iters: maximum number of iterations
:param rel_tol: relative tolarance of the Bhattacharyya distance, the optimization will stop if the relative
change is smaller than rel_tol for 5 consecutive steps
:param n_convergence: number of sample points for the convergence test
:param sampler_burn_in: number of burn in steps for the MCMC that draws new samples
:param save_path: path where to save the intermediate results
:param save_iter: save itermediate results very so often
:param kwargs: additional arguments passed to the sample_new routine (e.g. MCMC type etc.)
| def optimize(self, n_draw=5, max_iters=100, rel_tol=0.5, n_convergence=1000, sampler_burn_in=1000,
save_path=None, save_iter=5, **kwargs):
"""
Optimizes the initiated GP emulator for at most max_iters iterations
:param n_draw: number of draws in each step
:param max_iters: maximum number of iterations
:param rel_tol: relative tolarance of the Bhattacharyya distance, the optimization will stop if the relative
change is smaller than rel_tol for 5 consecutive steps
:param n_convergence: number of sample points for the convergence test
:param sampler_burn_in: number of burn in steps for the MCMC that draws new samples
:param save_path: path where to save the intermediate results
:param save_iter: save itermediate results very so often
:param kwargs: additional arguments passed to the sample_new routine (e.g. MCMC type etc.)
"""
if self.objective is None or self.space is None:
raise ValueError("Iterative optimization is only possible when the GP emulator was initialized with an "
"objective function and a design space!")
# convergence samples
convergence_samples = self.tfp_prior.sample(n_convergence)
new_preds = None
old_coef = None
changes = []
for i in range(max_iters):
old_preds = new_preds
# get new samples
t0 = time()
new_samples = self.sample_new(n_draw, burn_in=sampler_burn_in, **kwargs)
t1 = time()
print("Drawn {} new samples in {} sec...".format(n_draw, t1 - t0))
# eval objective
t0 = time()
Y_new, var_new = np.split(self.objective(new_samples), axis=1, indices_or_sections=2)
t1 = time()
print("Objective evaluation took {} sec...".format(t1 - t0))
# normalize
if self.normalize_Y:
self.Y_all = self.Y_all * self.Y_std + self.Y_mean
self.var_estimates *= self.Y_std ** 2
self.Y_all = np.concatenate([self.Y_all, Y_new], axis=0)
self.var_estimates = np.concatenate([self.var_estimates, var_new], axis=0)
if self.normalize_Y:
self.Y_mean = np.mean(self.Y_all)
if not self.mean_only:
self.Y_std = np.std(self.Y_all)
self.Y_all = (self.Y_all - self.Y_mean) / self.Y_std
self.var_estimates /= self.Y_std ** 2
# stack
self.params = self.unnormalize_params(self.params, self.rot_mat, self.rot_mean, self.rot_std)
self.params = np.concatenate([self.params, new_samples], axis=0)
self.params, self.rot_mat, self.rot_mean, self.rot_std = self.normalize_params(self.params)
# model
print("N params: ", len(self.params))
data = np.concatenate([self.Y_all, np.maximum(self.var_estimates, self.minimum_variance)], axis=1)
self.model = gpflow.models.VGP((self.params.astype(self.default_np_float),
data.astype(self.default_np_float)),
kernel=self.kern, likelihood=self.likelihood, num_latent_gps=1)
t0 = time()
self.optimize_model()
t1 = time()
print("GP optimization took {} sec with {} restarts...".format(t1 - t0, self.num_restarts))
# new acqui
self.current_transform = lambda x: self.transform_params(x, self.rot_mat, self.rot_mean, self.rot_std)
# TESTING
# =======
# TODO: implement this better
current_samples = self.transform_params(convergence_samples, self.rot_mat, self.rot_mean, self.rot_std)
current_samples = current_samples.astype(self.default_np_float)
# get new preds (unnorm)
new_preds = np.exp(self.model.predict_f(current_samples)[0].numpy().ravel() * self.Y_std + self.Y_mean)
if old_preds is not None:
# Bhattacharyya distance
new_coef = np.mean(np.sqrt(old_preds * new_preds))
print(new_coef)
if old_coef is not None:
rel_change = 100 * np.abs(old_coef - new_coef) / new_coef
print("Relavitve change: ", rel_change)
changes.append(rel_change)
old_coef = new_coef
if len(changes) > 5 and not np.any(np.asarray(changes)[-5:] > rel_tol):
break
# Save stuff
if save_path is not None and i > 0 and i % save_iter == 0:
current_path = os.path.join(save_path, "iter_%i" % (i))
if not os.path.exists(current_path):
os.mkdir(current_path)
self.save_model(current_path)
print("Saved intermediate model at <{}>".format(current_path))
else:
print("Save path <{}> exists, "
"skipping save model in order to avoid overwriting...".format(current_path))
print("Done...") | [
"def",
"optimize",
"(",
"self",
",",
"n_draw",
"=",
"5",
",",
"max_iters",
"=",
"100",
",",
"rel_tol",
"=",
"0.5",
",",
"n_convergence",
"=",
"1000",
",",
"sampler_burn_in",
"=",
"1000",
",",
"save_path",
"=",
"None",
",",
"save_iter",
"=",
"5",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"objective",
"is",
"None",
"or",
"self",
".",
"space",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Iterative optimization is only possible when the GP emulator was initialized with an \"",
"\"objective function and a design space!\"",
")",
"# convergence samples",
"convergence_samples",
"=",
"self",
".",
"tfp_prior",
".",
"sample",
"(",
"n_convergence",
")",
"new_preds",
"=",
"None",
"old_coef",
"=",
"None",
"changes",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"max_iters",
")",
":",
"old_preds",
"=",
"new_preds",
"# get new samples",
"t0",
"=",
"time",
"(",
")",
"new_samples",
"=",
"self",
".",
"sample_new",
"(",
"n_draw",
",",
"burn_in",
"=",
"sampler_burn_in",
",",
"*",
"*",
"kwargs",
")",
"t1",
"=",
"time",
"(",
")",
"print",
"(",
"\"Drawn {} new samples in {} sec...\"",
".",
"format",
"(",
"n_draw",
",",
"t1",
"-",
"t0",
")",
")",
"# eval objective",
"t0",
"=",
"time",
"(",
")",
"Y_new",
",",
"var_new",
"=",
"np",
".",
"split",
"(",
"self",
".",
"objective",
"(",
"new_samples",
")",
",",
"axis",
"=",
"1",
",",
"indices_or_sections",
"=",
"2",
")",
"t1",
"=",
"time",
"(",
")",
"print",
"(",
"\"Objective evaluation took {} sec...\"",
".",
"format",
"(",
"t1",
"-",
"t0",
")",
")",
"# normalize",
"if",
"self",
".",
"normalize_Y",
":",
"self",
".",
"Y_all",
"=",
"self",
".",
"Y_all",
"*",
"self",
".",
"Y_std",
"+",
"self",
".",
"Y_mean",
"self",
".",
"var_estimates",
"*=",
"self",
".",
"Y_std",
"**",
"2",
"self",
".",
"Y_all",
"=",
"np",
".",
"concatenate",
"(",
"[",
"self",
".",
"Y_all",
",",
"Y_new",
"]",
",",
"axis",
"=",
"0",
")",
"self",
".",
"var_estimates",
"=",
"np",
".",
"concatenate",
"(",
"[",
"self",
".",
"var_estimates",
",",
"var_new",
"]",
",",
"axis",
"=",
"0",
")",
"if",
"self",
".",
"normalize_Y",
":",
"self",
".",
"Y_mean",
"=",
"np",
".",
"mean",
"(",
"self",
".",
"Y_all",
")",
"if",
"not",
"self",
".",
"mean_only",
":",
"self",
".",
"Y_std",
"=",
"np",
".",
"std",
"(",
"self",
".",
"Y_all",
")",
"self",
".",
"Y_all",
"=",
"(",
"self",
".",
"Y_all",
"-",
"self",
".",
"Y_mean",
")",
"/",
"self",
".",
"Y_std",
"self",
".",
"var_estimates",
"/=",
"self",
".",
"Y_std",
"**",
"2",
"# stack",
"self",
".",
"params",
"=",
"self",
".",
"unnormalize_params",
"(",
"self",
".",
"params",
",",
"self",
".",
"rot_mat",
",",
"self",
".",
"rot_mean",
",",
"self",
".",
"rot_std",
")",
"self",
".",
"params",
"=",
"np",
".",
"concatenate",
"(",
"[",
"self",
".",
"params",
",",
"new_samples",
"]",
",",
"axis",
"=",
"0",
")",
"self",
".",
"params",
",",
"self",
".",
"rot_mat",
",",
"self",
".",
"rot_mean",
",",
"self",
".",
"rot_std",
"=",
"self",
".",
"normalize_params",
"(",
"self",
".",
"params",
")",
"# model",
"print",
"(",
"\"N params: \"",
",",
"len",
"(",
"self",
".",
"params",
")",
")",
"data",
"=",
"np",
".",
"concatenate",
"(",
"[",
"self",
".",
"Y_all",
",",
"np",
".",
"maximum",
"(",
"self",
".",
"var_estimates",
",",
"self",
".",
"minimum_variance",
")",
"]",
",",
"axis",
"=",
"1",
")",
"self",
".",
"model",
"=",
"gpflow",
".",
"models",
".",
"VGP",
"(",
"(",
"self",
".",
"params",
".",
"astype",
"(",
"self",
".",
"default_np_float",
")",
",",
"data",
".",
"astype",
"(",
"self",
".",
"default_np_float",
")",
")",
",",
"kernel",
"=",
"self",
".",
"kern",
",",
"likelihood",
"=",
"self",
".",
"likelihood",
",",
"num_latent_gps",
"=",
"1",
")",
"t0",
"=",
"time",
"(",
")",
"self",
".",
"optimize_model",
"(",
")",
"t1",
"=",
"time",
"(",
")",
"print",
"(",
"\"GP optimization took {} sec with {} restarts...\"",
".",
"format",
"(",
"t1",
"-",
"t0",
",",
"self",
".",
"num_restarts",
")",
")",
"# new acqui",
"self",
".",
"current_transform",
"=",
"lambda",
"x",
":",
"self",
".",
"transform_params",
"(",
"x",
",",
"self",
".",
"rot_mat",
",",
"self",
".",
"rot_mean",
",",
"self",
".",
"rot_std",
")",
"# TESTING",
"# =======",
"# TODO: implement this better",
"current_samples",
"=",
"self",
".",
"transform_params",
"(",
"convergence_samples",
",",
"self",
".",
"rot_mat",
",",
"self",
".",
"rot_mean",
",",
"self",
".",
"rot_std",
")",
"current_samples",
"=",
"current_samples",
".",
"astype",
"(",
"self",
".",
"default_np_float",
")",
"# get new preds (unnorm)",
"new_preds",
"=",
"np",
".",
"exp",
"(",
"self",
".",
"model",
".",
"predict_f",
"(",
"current_samples",
")",
"[",
"0",
"]",
".",
"numpy",
"(",
")",
".",
"ravel",
"(",
")",
"*",
"self",
".",
"Y_std",
"+",
"self",
".",
"Y_mean",
")",
"if",
"old_preds",
"is",
"not",
"None",
":",
"# Bhattacharyya distance",
"new_coef",
"=",
"np",
".",
"mean",
"(",
"np",
".",
"sqrt",
"(",
"old_preds",
"*",
"new_preds",
")",
")",
"print",
"(",
"new_coef",
")",
"if",
"old_coef",
"is",
"not",
"None",
":",
"rel_change",
"=",
"100",
"*",
"np",
".",
"abs",
"(",
"old_coef",
"-",
"new_coef",
")",
"/",
"new_coef",
"print",
"(",
"\"Relavitve change: \"",
",",
"rel_change",
")",
"changes",
".",
"append",
"(",
"rel_change",
")",
"old_coef",
"=",
"new_coef",
"if",
"len",
"(",
"changes",
")",
">",
"5",
"and",
"not",
"np",
".",
"any",
"(",
"np",
".",
"asarray",
"(",
"changes",
")",
"[",
"-",
"5",
":",
"]",
">",
"rel_tol",
")",
":",
"break",
"# Save stuff",
"if",
"save_path",
"is",
"not",
"None",
"and",
"i",
">",
"0",
"and",
"i",
"%",
"save_iter",
"==",
"0",
":",
"current_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"save_path",
",",
"\"iter_%i\"",
"%",
"(",
"i",
")",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"current_path",
")",
":",
"os",
".",
"mkdir",
"(",
"current_path",
")",
"self",
".",
"save_model",
"(",
"current_path",
")",
"print",
"(",
"\"Saved intermediate model at <{}>\"",
".",
"format",
"(",
"current_path",
")",
")",
"else",
":",
"print",
"(",
"\"Save path <{}> exists, \"",
"\"skipping save model in order to avoid overwriting...\"",
".",
"format",
"(",
"current_path",
")",
")",
"print",
"(",
"\"Done...\"",
")"
] | [
314,
4
] | [
420,
24
] | python | en | ['en', 'error', 'th'] | False |
VGP_Emu.sample_new | (self, n_draw, burn_in=1000, n_leap=10, step_size=0.05, MCMC_type="Hasting",
parallel_iterations=10, start_type="prior", num_results=250, n_chains=None,
replace_post=False, min_dist=1e-3, hasting_scale=0.05) |
Draws new samples from the aquisition function
:param n_draw: number of samples to draw
:param burn_in: number of burn in steps
:param n_leap: number of leap frog steps if MCMC_type==HMC
:param step_size: step size for the HMC algorithm
:param MCMC_type: type of MCMC (either HMC or Hasting)
:param parallel_iterations: parallel iterations of the sample chain procedure
:param start_type: select MCMC initial state by sampling from prior or from the sample of the objective funtion
(posterior) weighted by their probability
:param num_results: number of results to draw (has to be large enough such that accepted > n_draw)
:param n_chains: number of parallel chains
:param replace_post: if start_type == posterior, draw with replacement or not
:param min_dist: minimum distance accepted samples need to be appart to count as "new"
:param hasting_scale: scale of the Gaussian noise used for the Hastings proposals
:return: the new samples as numpy array with default float type
|
Draws new samples from the aquisition function
:param n_draw: number of samples to draw
:param burn_in: number of burn in steps
:param n_leap: number of leap frog steps if MCMC_type==HMC
:param step_size: step size for the HMC algorithm
:param MCMC_type: type of MCMC (either HMC or Hasting)
:param parallel_iterations: parallel iterations of the sample chain procedure
:param start_type: select MCMC initial state by sampling from prior or from the sample of the objective funtion
(posterior) weighted by their probability
:param num_results: number of results to draw (has to be large enough such that accepted > n_draw)
:param n_chains: number of parallel chains
:param replace_post: if start_type == posterior, draw with replacement or not
:param min_dist: minimum distance accepted samples need to be appart to count as "new"
:param hasting_scale: scale of the Gaussian noise used for the Hastings proposals
:return: the new samples as numpy array with default float type
| def sample_new(self, n_draw, burn_in=1000, n_leap=10, step_size=0.05, MCMC_type="Hasting",
parallel_iterations=10, start_type="prior", num_results=250, n_chains=None,
replace_post=False, min_dist=1e-3, hasting_scale=0.05):
"""
Draws new samples from the aquisition function
:param n_draw: number of samples to draw
:param burn_in: number of burn in steps
:param n_leap: number of leap frog steps if MCMC_type==HMC
:param step_size: step size for the HMC algorithm
:param MCMC_type: type of MCMC (either HMC or Hasting)
:param parallel_iterations: parallel iterations of the sample chain procedure
:param start_type: select MCMC initial state by sampling from prior or from the sample of the objective funtion
(posterior) weighted by their probability
:param num_results: number of results to draw (has to be large enough such that accepted > n_draw)
:param n_chains: number of parallel chains
:param replace_post: if start_type == posterior, draw with replacement or not
:param min_dist: minimum distance accepted samples need to be appart to count as "new"
:param hasting_scale: scale of the Gaussian noise used for the Hastings proposals
:return: the new samples as numpy array with default float type
"""
# set number of chains to at least 2*ndim if not defines
if n_chains is None:
n_chains = 2 * self.dims
# starting points
if start_type == "prior":
start = self.tfp_prior.sample(n_chains)
elif start_type == "posterior":
probs = np.ravel(self.Y_all * self.Y_std + self.Y_mean)
probs = probs - np.max(probs)
probs = np.exp(probs)
param = self.unnormalize_params(self.params, self.rot_mat, self.rot_mean, self.rot_std)
choices = np.random.choice(a=len(param), size=n_chains, p=probs / np.sum(probs), replace=replace_post)
start = tf.convert_to_tensor(param[choices], dtype=self.default_tf_float)
else:
raise IOError("Unknown start_type: {}".format(start_type))
# predictor (with alpha value)
rot_mat = tf.constant(self.rot_mat, dtype=self.default_tf_float)
rot_mean = tf.constant(self.rot_mean, dtype=self.default_tf_float)
rot_std = tf.constant(self.rot_std, dtype=self.default_tf_float)
Y_mean = tf.constant(self.Y_mean, dtype=self.default_tf_float)
Y_std = tf.constant(self.Y_std, dtype=self.default_tf_float)
@tf.function
def log_prob_no_prior(X):
rot_params = tf.einsum("ij,aj->ai", rot_mat, X)
X = (rot_params - rot_mean) / rot_std
mean, var = self.model.predict_f(X)
if self.normalize_Y:
mean = mean * Y_std + Y_mean
var *= Y_std ** 2
if self.alpha < 1e-8:
return mean
else:
return tf.math.log(tf.exp(mean) + self.alpha * (tf.exp(var) - 1.0) * tf.exp(2 * mean + var))
# def log prob
@tf.function
def log_prob(x):
# log prob returns -inf if not in prior
condition = tf.reduce_any(self.tfp_prior.log_prob(x) < -1000, axis=-1)
if_true = tf.ones_like(condition, dtype=self.default_tf_float) * -np.inf
if_false = tf.squeeze(log_prob_no_prior(x), axis=-1)
return tf.where(condition, if_true, if_false)
if MCMC_type == "HMC":
kernel = tfp.mcmc.HamiltonianMonteCarlo(target_log_prob_fn=log_prob,
num_leapfrog_steps=n_leap,
step_size=step_size)
kernel = tfp.mcmc.SimpleStepSizeAdaptation(inner_kernel=kernel, num_adaptation_steps=int(burn_in * 0.8))
# Run the chain (with burn-in).
@tf.function
def run_chain():
# Run the chain (with burn-in).
samples, accepted = tfp.mcmc.sample_chain(
num_results=num_results,
num_burnin_steps=burn_in,
current_state=start,
kernel=kernel,
parallel_iterations=parallel_iterations,
trace_fn=lambda _, pkr: pkr.inner_results.is_accepted)
return samples, accepted
samples, accepted = run_chain()
elif MCMC_type == "Hasting":
@tf.function
def run_chain():
samples, accepted = tfp.mcmc.sample_chain(
num_results=num_results,
current_state=start,
kernel=tfp.mcmc.RandomWalkMetropolis(
target_log_prob_fn=log_prob,
new_state_fn=tfp.mcmc.random_walk_normal_fn(scale=hasting_scale, name=None)),
num_burnin_steps=burn_in,
# Thinning.
num_steps_between_results=1,
parallel_iterations=parallel_iterations,
trace_fn=lambda _, pkr: pkr.is_accepted)
return samples, accepted
samples, accepted = run_chain()
else:
raise IOError("Unkown MCMC_type: {}".format(MCMC_type))
samples = samples.numpy()
accepted = accepted.numpy()
n_accepted = np.sum(accepted)
if self.verbosity > 0:
print("Accepted {} samples with burn in {} and num results {}".format(n_accepted, burn_in, num_results))
if n_accepted < n_draw:
raise ValueError("Not enough samples where accecpted with the given burn in and num result,"
"try to increase these values to avoid this error.")
samples = samples.reshape((-1, self.dims))
np.random.shuffle(samples)
# we want n_draw samples that are at least min_dist apart
new_samples = np.zeros((n_draw, self.dims), dtype=self.default_np_float)
new_samples[0] = samples[0]
count = 1
for samp in samples[1:]:
if np.all((np.linalg.norm(new_samples - samp, axis=1) > min_dist)[:count]):
new_samples[count] = samp
count += 1
if count == n_draw:
break
if count != n_draw:
raise ValueError("Number of samples whose distance is large than min_dist is less than n_draw. "
"Either increase num_results or decrease min_dist!")
# return n_draws
return new_samples | [
"def",
"sample_new",
"(",
"self",
",",
"n_draw",
",",
"burn_in",
"=",
"1000",
",",
"n_leap",
"=",
"10",
",",
"step_size",
"=",
"0.05",
",",
"MCMC_type",
"=",
"\"Hasting\"",
",",
"parallel_iterations",
"=",
"10",
",",
"start_type",
"=",
"\"prior\"",
",",
"num_results",
"=",
"250",
",",
"n_chains",
"=",
"None",
",",
"replace_post",
"=",
"False",
",",
"min_dist",
"=",
"1e-3",
",",
"hasting_scale",
"=",
"0.05",
")",
":",
"# set number of chains to at least 2*ndim if not defines",
"if",
"n_chains",
"is",
"None",
":",
"n_chains",
"=",
"2",
"*",
"self",
".",
"dims",
"# starting points",
"if",
"start_type",
"==",
"\"prior\"",
":",
"start",
"=",
"self",
".",
"tfp_prior",
".",
"sample",
"(",
"n_chains",
")",
"elif",
"start_type",
"==",
"\"posterior\"",
":",
"probs",
"=",
"np",
".",
"ravel",
"(",
"self",
".",
"Y_all",
"*",
"self",
".",
"Y_std",
"+",
"self",
".",
"Y_mean",
")",
"probs",
"=",
"probs",
"-",
"np",
".",
"max",
"(",
"probs",
")",
"probs",
"=",
"np",
".",
"exp",
"(",
"probs",
")",
"param",
"=",
"self",
".",
"unnormalize_params",
"(",
"self",
".",
"params",
",",
"self",
".",
"rot_mat",
",",
"self",
".",
"rot_mean",
",",
"self",
".",
"rot_std",
")",
"choices",
"=",
"np",
".",
"random",
".",
"choice",
"(",
"a",
"=",
"len",
"(",
"param",
")",
",",
"size",
"=",
"n_chains",
",",
"p",
"=",
"probs",
"/",
"np",
".",
"sum",
"(",
"probs",
")",
",",
"replace",
"=",
"replace_post",
")",
"start",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"param",
"[",
"choices",
"]",
",",
"dtype",
"=",
"self",
".",
"default_tf_float",
")",
"else",
":",
"raise",
"IOError",
"(",
"\"Unknown start_type: {}\"",
".",
"format",
"(",
"start_type",
")",
")",
"# predictor (with alpha value)",
"rot_mat",
"=",
"tf",
".",
"constant",
"(",
"self",
".",
"rot_mat",
",",
"dtype",
"=",
"self",
".",
"default_tf_float",
")",
"rot_mean",
"=",
"tf",
".",
"constant",
"(",
"self",
".",
"rot_mean",
",",
"dtype",
"=",
"self",
".",
"default_tf_float",
")",
"rot_std",
"=",
"tf",
".",
"constant",
"(",
"self",
".",
"rot_std",
",",
"dtype",
"=",
"self",
".",
"default_tf_float",
")",
"Y_mean",
"=",
"tf",
".",
"constant",
"(",
"self",
".",
"Y_mean",
",",
"dtype",
"=",
"self",
".",
"default_tf_float",
")",
"Y_std",
"=",
"tf",
".",
"constant",
"(",
"self",
".",
"Y_std",
",",
"dtype",
"=",
"self",
".",
"default_tf_float",
")",
"@",
"tf",
".",
"function",
"def",
"log_prob_no_prior",
"(",
"X",
")",
":",
"rot_params",
"=",
"tf",
".",
"einsum",
"(",
"\"ij,aj->ai\"",
",",
"rot_mat",
",",
"X",
")",
"X",
"=",
"(",
"rot_params",
"-",
"rot_mean",
")",
"/",
"rot_std",
"mean",
",",
"var",
"=",
"self",
".",
"model",
".",
"predict_f",
"(",
"X",
")",
"if",
"self",
".",
"normalize_Y",
":",
"mean",
"=",
"mean",
"*",
"Y_std",
"+",
"Y_mean",
"var",
"*=",
"Y_std",
"**",
"2",
"if",
"self",
".",
"alpha",
"<",
"1e-8",
":",
"return",
"mean",
"else",
":",
"return",
"tf",
".",
"math",
".",
"log",
"(",
"tf",
".",
"exp",
"(",
"mean",
")",
"+",
"self",
".",
"alpha",
"*",
"(",
"tf",
".",
"exp",
"(",
"var",
")",
"-",
"1.0",
")",
"*",
"tf",
".",
"exp",
"(",
"2",
"*",
"mean",
"+",
"var",
")",
")",
"# def log prob",
"@",
"tf",
".",
"function",
"def",
"log_prob",
"(",
"x",
")",
":",
"# log prob returns -inf if not in prior",
"condition",
"=",
"tf",
".",
"reduce_any",
"(",
"self",
".",
"tfp_prior",
".",
"log_prob",
"(",
"x",
")",
"<",
"-",
"1000",
",",
"axis",
"=",
"-",
"1",
")",
"if_true",
"=",
"tf",
".",
"ones_like",
"(",
"condition",
",",
"dtype",
"=",
"self",
".",
"default_tf_float",
")",
"*",
"-",
"np",
".",
"inf",
"if_false",
"=",
"tf",
".",
"squeeze",
"(",
"log_prob_no_prior",
"(",
"x",
")",
",",
"axis",
"=",
"-",
"1",
")",
"return",
"tf",
".",
"where",
"(",
"condition",
",",
"if_true",
",",
"if_false",
")",
"if",
"MCMC_type",
"==",
"\"HMC\"",
":",
"kernel",
"=",
"tfp",
".",
"mcmc",
".",
"HamiltonianMonteCarlo",
"(",
"target_log_prob_fn",
"=",
"log_prob",
",",
"num_leapfrog_steps",
"=",
"n_leap",
",",
"step_size",
"=",
"step_size",
")",
"kernel",
"=",
"tfp",
".",
"mcmc",
".",
"SimpleStepSizeAdaptation",
"(",
"inner_kernel",
"=",
"kernel",
",",
"num_adaptation_steps",
"=",
"int",
"(",
"burn_in",
"*",
"0.8",
")",
")",
"# Run the chain (with burn-in).",
"@",
"tf",
".",
"function",
"def",
"run_chain",
"(",
")",
":",
"# Run the chain (with burn-in).",
"samples",
",",
"accepted",
"=",
"tfp",
".",
"mcmc",
".",
"sample_chain",
"(",
"num_results",
"=",
"num_results",
",",
"num_burnin_steps",
"=",
"burn_in",
",",
"current_state",
"=",
"start",
",",
"kernel",
"=",
"kernel",
",",
"parallel_iterations",
"=",
"parallel_iterations",
",",
"trace_fn",
"=",
"lambda",
"_",
",",
"pkr",
":",
"pkr",
".",
"inner_results",
".",
"is_accepted",
")",
"return",
"samples",
",",
"accepted",
"samples",
",",
"accepted",
"=",
"run_chain",
"(",
")",
"elif",
"MCMC_type",
"==",
"\"Hasting\"",
":",
"@",
"tf",
".",
"function",
"def",
"run_chain",
"(",
")",
":",
"samples",
",",
"accepted",
"=",
"tfp",
".",
"mcmc",
".",
"sample_chain",
"(",
"num_results",
"=",
"num_results",
",",
"current_state",
"=",
"start",
",",
"kernel",
"=",
"tfp",
".",
"mcmc",
".",
"RandomWalkMetropolis",
"(",
"target_log_prob_fn",
"=",
"log_prob",
",",
"new_state_fn",
"=",
"tfp",
".",
"mcmc",
".",
"random_walk_normal_fn",
"(",
"scale",
"=",
"hasting_scale",
",",
"name",
"=",
"None",
")",
")",
",",
"num_burnin_steps",
"=",
"burn_in",
",",
"# Thinning.",
"num_steps_between_results",
"=",
"1",
",",
"parallel_iterations",
"=",
"parallel_iterations",
",",
"trace_fn",
"=",
"lambda",
"_",
",",
"pkr",
":",
"pkr",
".",
"is_accepted",
")",
"return",
"samples",
",",
"accepted",
"samples",
",",
"accepted",
"=",
"run_chain",
"(",
")",
"else",
":",
"raise",
"IOError",
"(",
"\"Unkown MCMC_type: {}\"",
".",
"format",
"(",
"MCMC_type",
")",
")",
"samples",
"=",
"samples",
".",
"numpy",
"(",
")",
"accepted",
"=",
"accepted",
".",
"numpy",
"(",
")",
"n_accepted",
"=",
"np",
".",
"sum",
"(",
"accepted",
")",
"if",
"self",
".",
"verbosity",
">",
"0",
":",
"print",
"(",
"\"Accepted {} samples with burn in {} and num results {}\"",
".",
"format",
"(",
"n_accepted",
",",
"burn_in",
",",
"num_results",
")",
")",
"if",
"n_accepted",
"<",
"n_draw",
":",
"raise",
"ValueError",
"(",
"\"Not enough samples where accecpted with the given burn in and num result,\"",
"\"try to increase these values to avoid this error.\"",
")",
"samples",
"=",
"samples",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"self",
".",
"dims",
")",
")",
"np",
".",
"random",
".",
"shuffle",
"(",
"samples",
")",
"# we want n_draw samples that are at least min_dist apart",
"new_samples",
"=",
"np",
".",
"zeros",
"(",
"(",
"n_draw",
",",
"self",
".",
"dims",
")",
",",
"dtype",
"=",
"self",
".",
"default_np_float",
")",
"new_samples",
"[",
"0",
"]",
"=",
"samples",
"[",
"0",
"]",
"count",
"=",
"1",
"for",
"samp",
"in",
"samples",
"[",
"1",
":",
"]",
":",
"if",
"np",
".",
"all",
"(",
"(",
"np",
".",
"linalg",
".",
"norm",
"(",
"new_samples",
"-",
"samp",
",",
"axis",
"=",
"1",
")",
">",
"min_dist",
")",
"[",
":",
"count",
"]",
")",
":",
"new_samples",
"[",
"count",
"]",
"=",
"samp",
"count",
"+=",
"1",
"if",
"count",
"==",
"n_draw",
":",
"break",
"if",
"count",
"!=",
"n_draw",
":",
"raise",
"ValueError",
"(",
"\"Number of samples whose distance is large than min_dist is less than n_draw. \"",
"\"Either increase num_results or decrease min_dist!\"",
")",
"# return n_draws",
"return",
"new_samples"
] | [
422,
4
] | [
561,
26
] | python | en | ['en', 'error', 'th'] | False |
VGP_Emu.normalize_params | (self, params) |
normalizes params to unit variance
|
normalizes params to unit variance
| def normalize_params(self, params):
"""
normalizes params to unit variance
"""
# make the params linearly uncorrelated
cov = np.cov(params, rowvar=False)
# eigenvals and vecs
w, v = np.linalg.eig(cov)
# rot mat is v.T
rot_mat = v.T
# dot prod
rot_params = np.einsum("ij,aj->ai", rot_mat, params)
# mean
rot_mean = np.mean(rot_params, axis=0, keepdims=True)
# std (ddof of np.cov for consistency)
rot_std = np.std(rot_params, axis=0, keepdims=True, ddof=1)
# normalize
new_params = (rot_params - rot_mean) / rot_std
return new_params, rot_mat, rot_mean, rot_std | [
"def",
"normalize_params",
"(",
"self",
",",
"params",
")",
":",
"# make the params linearly uncorrelated",
"cov",
"=",
"np",
".",
"cov",
"(",
"params",
",",
"rowvar",
"=",
"False",
")",
"# eigenvals and vecs",
"w",
",",
"v",
"=",
"np",
".",
"linalg",
".",
"eig",
"(",
"cov",
")",
"# rot mat is v.T",
"rot_mat",
"=",
"v",
".",
"T",
"# dot prod",
"rot_params",
"=",
"np",
".",
"einsum",
"(",
"\"ij,aj->ai\"",
",",
"rot_mat",
",",
"params",
")",
"# mean",
"rot_mean",
"=",
"np",
".",
"mean",
"(",
"rot_params",
",",
"axis",
"=",
"0",
",",
"keepdims",
"=",
"True",
")",
"# std (ddof of np.cov for consistency)",
"rot_std",
"=",
"np",
".",
"std",
"(",
"rot_params",
",",
"axis",
"=",
"0",
",",
"keepdims",
"=",
"True",
",",
"ddof",
"=",
"1",
")",
"# normalize",
"new_params",
"=",
"(",
"rot_params",
"-",
"rot_mean",
")",
"/",
"rot_std",
"return",
"new_params",
",",
"rot_mat",
",",
"rot_mean",
",",
"rot_std"
] | [
564,
4
] | [
583,
53
] | python | en | ['en', 'error', 'th'] | False |
VGP_Emu.transform_params | (self, params, rot_mat, rot_mean, rot_std) |
Normalizes params given the rot, shift and scale
|
Normalizes params given the rot, shift and scale
| def transform_params(self, params, rot_mat, rot_mean, rot_std):
"""
Normalizes params given the rot, shift and scale
"""
rot_params = np.einsum("ij,aj->ai", rot_mat, params)
new_params = (rot_params - rot_mean) / rot_std
return new_params | [
"def",
"transform_params",
"(",
"self",
",",
"params",
",",
"rot_mat",
",",
"rot_mean",
",",
"rot_std",
")",
":",
"rot_params",
"=",
"np",
".",
"einsum",
"(",
"\"ij,aj->ai\"",
",",
"rot_mat",
",",
"params",
")",
"new_params",
"=",
"(",
"rot_params",
"-",
"rot_mean",
")",
"/",
"rot_std",
"return",
"new_params"
] | [
586,
4
] | [
592,
25
] | python | en | ['en', 'error', 'th'] | False |
VGP_Emu.reverse_transform_params | (self, params, rot_mat, rot_mean, rot_std) |
Makes the transformation in reverse
|
Makes the transformation in reverse
| def reverse_transform_params(self, params, rot_mat, rot_mean, rot_std):
"""
Makes the transformation in reverse
"""
new_params = params * rot_std + rot_mean
new_params = np.einsum("ij,aj->ai", rot_mat.T, new_params)
return new_params | [
"def",
"reverse_transform_params",
"(",
"self",
",",
"params",
",",
"rot_mat",
",",
"rot_mean",
",",
"rot_std",
")",
":",
"new_params",
"=",
"params",
"*",
"rot_std",
"+",
"rot_mean",
"new_params",
"=",
"np",
".",
"einsum",
"(",
"\"ij,aj->ai\"",
",",
"rot_mat",
".",
"T",
",",
"new_params",
")",
"return",
"new_params"
] | [
595,
4
] | [
602,
25
] | python | en | ['en', 'error', 'th'] | False |
VGP_Emu.unnormalize_params | (self, params, rot_mat, rot_mean, rot_std) |
Removes normalization
|
Removes normalization
| def unnormalize_params(self, params, rot_mat, rot_mean, rot_std):
"""
Removes normalization
"""
new_params = params * rot_std + rot_mean
# inverse rotation
new_params = np.einsum("ij,aj->ai", rot_mat.T, new_params)
return new_params | [
"def",
"unnormalize_params",
"(",
"self",
",",
"params",
",",
"rot_mat",
",",
"rot_mean",
",",
"rot_std",
")",
":",
"new_params",
"=",
"params",
"*",
"rot_std",
"+",
"rot_mean",
"# inverse rotation",
"new_params",
"=",
"np",
".",
"einsum",
"(",
"\"ij,aj->ai\"",
",",
"rot_mat",
".",
"T",
",",
"new_params",
")",
"return",
"new_params"
] | [
605,
4
] | [
612,
25
] | python | en | ['en', 'error', 'th'] | False |
L_star_fun | (L_star) |
This calcualtes the zero of the optimal well spacing, L_star.
|
This calcualtes the zero of the optimal well spacing, L_star.
| def L_star_fun(L_star):
"""
This calcualtes the zero of the optimal well spacing, L_star.
"""
import numpy as np
return L_star**2.0*np.log(L_star/D) - \
2.0*np.pi*rho_w*permeability_/viscosity * \
((alphaII*rho_r-rho_w)*gravity*reservoir_depth) * \
Cpw*t_inj/((rho_w*Cpw*porosity+rho_r*Cpr*(1.0-porosity))) | [
"def",
"L_star_fun",
"(",
"L_star",
")",
":",
"import",
"numpy",
"as",
"np",
"return",
"L_star",
"**",
"2.0",
"*",
"np",
".",
"log",
"(",
"L_star",
"/",
"D",
")",
"-",
"2.0",
"*",
"np",
".",
"pi",
"*",
"rho_w",
"*",
"permeability_",
"/",
"viscosity",
"*",
"(",
"(",
"alphaII",
"*",
"rho_r",
"-",
"rho_w",
")",
"*",
"gravity",
"*",
"reservoir_depth",
")",
"*",
"Cpw",
"*",
"t_inj",
"/",
"(",
"(",
"rho_w",
"*",
"Cpw",
"*",
"porosity",
"+",
"rho_r",
"*",
"Cpr",
"*",
"(",
"1.0",
"-",
"porosity",
")",
")",
")"
] | [
19,
0
] | [
27,
68
] | python | en | ['en', 'error', 'th'] | False |
L_star_fun2 | (L_star) |
This calcualtes the optimal well spacing, L_star, if the reservoir
constraints imply a flow rate that is higher than the flow rate that
would minimize the LCOH.
define capital_costs and CRF externally
|
This calcualtes the optimal well spacing, L_star, if the reservoir
constraints imply a flow rate that is higher than the flow rate that
would minimize the LCOH. | def L_star_fun2(L_star):
"""
This calcualtes the optimal well spacing, L_star, if the reservoir
constraints imply a flow rate that is higher than the flow rate that
would minimize the LCOH.
define capital_costs and CRF externally
"""
import numpy as np
return (capital_cost_internal*CRF*rho_w*rho_w * np.pi * permeability_*b_ /
(2.0*dollars_per_kWhth*joule_to_kWh*t_inj*viscosity *
np.log(L_star/D)))**0.5 - \
(rho_w*Cpw*porosity + rho_r*Cpr*(1.0-porosity)) * \
(L_star**2 * b_)/ (Cpw*t_inj) | [
"def",
"L_star_fun2",
"(",
"L_star",
")",
":",
"import",
"numpy",
"as",
"np",
"return",
"(",
"capital_cost_internal",
"*",
"CRF",
"*",
"rho_w",
"*",
"rho_w",
"*",
"np",
".",
"pi",
"*",
"permeability_",
"*",
"b_",
"/",
"(",
"2.0",
"*",
"dollars_per_kWhth",
"*",
"joule_to_kWh",
"*",
"t_inj",
"*",
"viscosity",
"*",
"np",
".",
"log",
"(",
"L_star",
"/",
"D",
")",
")",
")",
"**",
"0.5",
"-",
"(",
"rho_w",
"*",
"Cpw",
"*",
"porosity",
"+",
"rho_r",
"*",
"Cpr",
"*",
"(",
"1.0",
"-",
"porosity",
")",
")",
"*",
"(",
"L_star",
"**",
"2",
"*",
"b_",
")",
"/",
"(",
"Cpw",
"*",
"t_inj",
")"
] | [
29,
0
] | [
43,
40
] | python | en | ['en', 'error', 'th'] | False |
Serializer.prepare_response | (self, request, cached) | Verify our vary headers match and construct a real urllib3
HTTPResponse object.
| Verify our vary headers match and construct a real urllib3
HTTPResponse object.
| def prepare_response(self, request, cached):
"""Verify our vary headers match and construct a real urllib3
HTTPResponse object.
"""
# Special case the '*' Vary value as it means we cannot actually
# determine if the cached response is suitable for this request.
# This case is also handled in the controller code when creating
# a cache entry, but is left here for backwards compatibility.
if "*" in cached.get("vary", {}):
return
# Ensure that the Vary headers for the cached response match our
# request
for header, value in cached.get("vary", {}).items():
if request.headers.get(header, None) != value:
return
body_raw = cached["response"].pop("body")
headers = CaseInsensitiveDict(data=cached["response"]["headers"])
if headers.get("transfer-encoding", "") == "chunked":
headers.pop("transfer-encoding")
cached["response"]["headers"] = headers
try:
body = io.BytesIO(body_raw)
except TypeError:
# This can happen if cachecontrol serialized to v1 format (pickle)
# using Python 2. A Python 2 str(byte string) will be unpickled as
# a Python 3 str (unicode string), which will cause the above to
# fail with:
#
# TypeError: 'str' does not support the buffer interface
body = io.BytesIO(body_raw.encode("utf8"))
return HTTPResponse(body=body, preload_content=False, **cached["response"]) | [
"def",
"prepare_response",
"(",
"self",
",",
"request",
",",
"cached",
")",
":",
"# Special case the '*' Vary value as it means we cannot actually",
"# determine if the cached response is suitable for this request.",
"# This case is also handled in the controller code when creating",
"# a cache entry, but is left here for backwards compatibility.",
"if",
"\"*\"",
"in",
"cached",
".",
"get",
"(",
"\"vary\"",
",",
"{",
"}",
")",
":",
"return",
"# Ensure that the Vary headers for the cached response match our",
"# request",
"for",
"header",
",",
"value",
"in",
"cached",
".",
"get",
"(",
"\"vary\"",
",",
"{",
"}",
")",
".",
"items",
"(",
")",
":",
"if",
"request",
".",
"headers",
".",
"get",
"(",
"header",
",",
"None",
")",
"!=",
"value",
":",
"return",
"body_raw",
"=",
"cached",
"[",
"\"response\"",
"]",
".",
"pop",
"(",
"\"body\"",
")",
"headers",
"=",
"CaseInsensitiveDict",
"(",
"data",
"=",
"cached",
"[",
"\"response\"",
"]",
"[",
"\"headers\"",
"]",
")",
"if",
"headers",
".",
"get",
"(",
"\"transfer-encoding\"",
",",
"\"\"",
")",
"==",
"\"chunked\"",
":",
"headers",
".",
"pop",
"(",
"\"transfer-encoding\"",
")",
"cached",
"[",
"\"response\"",
"]",
"[",
"\"headers\"",
"]",
"=",
"headers",
"try",
":",
"body",
"=",
"io",
".",
"BytesIO",
"(",
"body_raw",
")",
"except",
"TypeError",
":",
"# This can happen if cachecontrol serialized to v1 format (pickle)",
"# using Python 2. A Python 2 str(byte string) will be unpickled as",
"# a Python 3 str (unicode string), which will cause the above to",
"# fail with:",
"#",
"# TypeError: 'str' does not support the buffer interface",
"body",
"=",
"io",
".",
"BytesIO",
"(",
"body_raw",
".",
"encode",
"(",
"\"utf8\"",
")",
")",
"return",
"HTTPResponse",
"(",
"body",
"=",
"body",
",",
"preload_content",
"=",
"False",
",",
"*",
"*",
"cached",
"[",
"\"response\"",
"]",
")"
] | [
103,
4
] | [
139,
83
] | python | en | ['en', 'en', 'en'] | True |
validate_system | (system) |
Ensure build system has the requisite fields.
|
Ensure build system has the requisite fields.
| def validate_system(system):
"""
Ensure build system has the requisite fields.
"""
required = {'requires', 'build-backend'}
if not (required <= set(system)):
message = "Missing required fields: {missing}".format(
missing=required-set(system),
)
raise ValueError(message) | [
"def",
"validate_system",
"(",
"system",
")",
":",
"required",
"=",
"{",
"'requires'",
",",
"'build-backend'",
"}",
"if",
"not",
"(",
"required",
"<=",
"set",
"(",
"system",
")",
")",
":",
"message",
"=",
"\"Missing required fields: {missing}\"",
".",
"format",
"(",
"missing",
"=",
"required",
"-",
"set",
"(",
"system",
")",
",",
")",
"raise",
"ValueError",
"(",
"message",
")"
] | [
16,
0
] | [
25,
33
] | python | en | ['en', 'error', 'th'] | False |
load_system | (source_dir) |
Load the build system from a source dir (pyproject.toml).
|
Load the build system from a source dir (pyproject.toml).
| def load_system(source_dir):
"""
Load the build system from a source dir (pyproject.toml).
"""
pyproject = os.path.join(source_dir, 'pyproject.toml')
with io.open(pyproject, encoding="utf-8") as f:
pyproject_data = toml_load(f)
return pyproject_data['build-system'] | [
"def",
"load_system",
"(",
"source_dir",
")",
":",
"pyproject",
"=",
"os",
".",
"path",
".",
"join",
"(",
"source_dir",
",",
"'pyproject.toml'",
")",
"with",
"io",
".",
"open",
"(",
"pyproject",
",",
"encoding",
"=",
"\"utf-8\"",
")",
"as",
"f",
":",
"pyproject_data",
"=",
"toml_load",
"(",
"f",
")",
"return",
"pyproject_data",
"[",
"'build-system'",
"]"
] | [
28,
0
] | [
35,
41
] | python | en | ['en', 'error', 'th'] | False |
compat_system | (source_dir) |
Given a source dir, attempt to get a build system backend
and requirements from pyproject.toml. Fallback to
setuptools but only if the file was not found or a build
system was not indicated.
|
Given a source dir, attempt to get a build system backend
and requirements from pyproject.toml. Fallback to
setuptools but only if the file was not found or a build
system was not indicated.
| def compat_system(source_dir):
"""
Given a source dir, attempt to get a build system backend
and requirements from pyproject.toml. Fallback to
setuptools but only if the file was not found or a build
system was not indicated.
"""
try:
system = load_system(source_dir)
except (FileNotFoundError, KeyError):
system = {}
system.setdefault(
'build-backend',
'setuptools.build_meta:__legacy__',
)
system.setdefault('requires', ['setuptools', 'wheel'])
return system | [
"def",
"compat_system",
"(",
"source_dir",
")",
":",
"try",
":",
"system",
"=",
"load_system",
"(",
"source_dir",
")",
"except",
"(",
"FileNotFoundError",
",",
"KeyError",
")",
":",
"system",
"=",
"{",
"}",
"system",
".",
"setdefault",
"(",
"'build-backend'",
",",
"'setuptools.build_meta:__legacy__'",
",",
")",
"system",
".",
"setdefault",
"(",
"'requires'",
",",
"[",
"'setuptools'",
",",
"'wheel'",
"]",
")",
"return",
"system"
] | [
38,
0
] | [
54,
17
] | python | en | ['en', 'error', 'th'] | False |
_Enhance.enhance | (self, factor) |
Returns an enhanced image.
:param factor: A floating point value controlling the enhancement.
Factor 1.0 always returns a copy of the original image,
lower factors mean less color (brightness, contrast,
etc), and higher values more. There are no restrictions
on this value.
:rtype: :py:class:`~PIL.Image.Image`
|
Returns an enhanced image. | def enhance(self, factor):
"""
Returns an enhanced image.
:param factor: A floating point value controlling the enhancement.
Factor 1.0 always returns a copy of the original image,
lower factors mean less color (brightness, contrast,
etc), and higher values more. There are no restrictions
on this value.
:rtype: :py:class:`~PIL.Image.Image`
"""
return Image.blend(self.degenerate, self.image, factor) | [
"def",
"enhance",
"(",
"self",
",",
"factor",
")",
":",
"return",
"Image",
".",
"blend",
"(",
"self",
".",
"degenerate",
",",
"self",
".",
"image",
",",
"factor",
")"
] | [
24,
4
] | [
35,
63
] | python | en | ['en', 'error', 'th'] | False |
Ghostscript | (tile, size, fp, scale=1) | Render an image using Ghostscript | Render an image using Ghostscript | def Ghostscript(tile, size, fp, scale=1):
"""Render an image using Ghostscript"""
# Unpack decoder tile
decoder, tile, offset, data = tile[0]
length, bbox = data
# Hack to support hi-res rendering
scale = int(scale) or 1
# orig_size = size
# orig_bbox = bbox
size = (size[0] * scale, size[1] * scale)
# resolution is dependent on bbox and size
res = (
72.0 * size[0] / (bbox[2] - bbox[0]),
72.0 * size[1] / (bbox[3] - bbox[1]),
)
out_fd, outfile = tempfile.mkstemp()
os.close(out_fd)
infile_temp = None
if hasattr(fp, "name") and os.path.exists(fp.name):
infile = fp.name
else:
in_fd, infile_temp = tempfile.mkstemp()
os.close(in_fd)
infile = infile_temp
# Ignore length and offset!
# Ghostscript can read it
# Copy whole file to read in Ghostscript
with open(infile_temp, "wb") as f:
# fetch length of fp
fp.seek(0, io.SEEK_END)
fsize = fp.tell()
# ensure start position
# go back
fp.seek(0)
lengthfile = fsize
while lengthfile > 0:
s = fp.read(min(lengthfile, 100 * 1024))
if not s:
break
lengthfile -= len(s)
f.write(s)
# Build Ghostscript command
command = [
"gs",
"-q", # quiet mode
"-g%dx%d" % size, # set output geometry (pixels)
"-r%fx%f" % res, # set input DPI (dots per inch)
"-dBATCH", # exit after processing
"-dNOPAUSE", # don't pause between pages
"-dSAFER", # safe mode
"-sDEVICE=ppmraw", # ppm driver
f"-sOutputFile={outfile}", # output file
# adjust for image origin
"-c",
f"{-bbox[0]} {-bbox[1]} translate",
"-f",
infile, # input file
# showpage (see https://bugs.ghostscript.com/show_bug.cgi?id=698272)
"-c",
"showpage",
]
if gs_windows_binary is not None:
if not gs_windows_binary:
raise OSError("Unable to locate Ghostscript on paths")
command[0] = gs_windows_binary
# push data through Ghostscript
try:
startupinfo = None
if sys.platform.startswith("win"):
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
subprocess.check_call(command, startupinfo=startupinfo)
out_im = Image.open(outfile)
out_im.load()
finally:
try:
os.unlink(outfile)
if infile_temp:
os.unlink(infile_temp)
except OSError:
pass
im = out_im.im.copy()
out_im.close()
return im | [
"def",
"Ghostscript",
"(",
"tile",
",",
"size",
",",
"fp",
",",
"scale",
"=",
"1",
")",
":",
"# Unpack decoder tile",
"decoder",
",",
"tile",
",",
"offset",
",",
"data",
"=",
"tile",
"[",
"0",
"]",
"length",
",",
"bbox",
"=",
"data",
"# Hack to support hi-res rendering",
"scale",
"=",
"int",
"(",
"scale",
")",
"or",
"1",
"# orig_size = size",
"# orig_bbox = bbox",
"size",
"=",
"(",
"size",
"[",
"0",
"]",
"*",
"scale",
",",
"size",
"[",
"1",
"]",
"*",
"scale",
")",
"# resolution is dependent on bbox and size",
"res",
"=",
"(",
"72.0",
"*",
"size",
"[",
"0",
"]",
"/",
"(",
"bbox",
"[",
"2",
"]",
"-",
"bbox",
"[",
"0",
"]",
")",
",",
"72.0",
"*",
"size",
"[",
"1",
"]",
"/",
"(",
"bbox",
"[",
"3",
"]",
"-",
"bbox",
"[",
"1",
"]",
")",
",",
")",
"out_fd",
",",
"outfile",
"=",
"tempfile",
".",
"mkstemp",
"(",
")",
"os",
".",
"close",
"(",
"out_fd",
")",
"infile_temp",
"=",
"None",
"if",
"hasattr",
"(",
"fp",
",",
"\"name\"",
")",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"fp",
".",
"name",
")",
":",
"infile",
"=",
"fp",
".",
"name",
"else",
":",
"in_fd",
",",
"infile_temp",
"=",
"tempfile",
".",
"mkstemp",
"(",
")",
"os",
".",
"close",
"(",
"in_fd",
")",
"infile",
"=",
"infile_temp",
"# Ignore length and offset!",
"# Ghostscript can read it",
"# Copy whole file to read in Ghostscript",
"with",
"open",
"(",
"infile_temp",
",",
"\"wb\"",
")",
"as",
"f",
":",
"# fetch length of fp",
"fp",
".",
"seek",
"(",
"0",
",",
"io",
".",
"SEEK_END",
")",
"fsize",
"=",
"fp",
".",
"tell",
"(",
")",
"# ensure start position",
"# go back",
"fp",
".",
"seek",
"(",
"0",
")",
"lengthfile",
"=",
"fsize",
"while",
"lengthfile",
">",
"0",
":",
"s",
"=",
"fp",
".",
"read",
"(",
"min",
"(",
"lengthfile",
",",
"100",
"*",
"1024",
")",
")",
"if",
"not",
"s",
":",
"break",
"lengthfile",
"-=",
"len",
"(",
"s",
")",
"f",
".",
"write",
"(",
"s",
")",
"# Build Ghostscript command",
"command",
"=",
"[",
"\"gs\"",
",",
"\"-q\"",
",",
"# quiet mode",
"\"-g%dx%d\"",
"%",
"size",
",",
"# set output geometry (pixels)",
"\"-r%fx%f\"",
"%",
"res",
",",
"# set input DPI (dots per inch)",
"\"-dBATCH\"",
",",
"# exit after processing",
"\"-dNOPAUSE\"",
",",
"# don't pause between pages",
"\"-dSAFER\"",
",",
"# safe mode",
"\"-sDEVICE=ppmraw\"",
",",
"# ppm driver",
"f\"-sOutputFile={outfile}\"",
",",
"# output file",
"# adjust for image origin",
"\"-c\"",
",",
"f\"{-bbox[0]} {-bbox[1]} translate\"",
",",
"\"-f\"",
",",
"infile",
",",
"# input file",
"# showpage (see https://bugs.ghostscript.com/show_bug.cgi?id=698272)",
"\"-c\"",
",",
"\"showpage\"",
",",
"]",
"if",
"gs_windows_binary",
"is",
"not",
"None",
":",
"if",
"not",
"gs_windows_binary",
":",
"raise",
"OSError",
"(",
"\"Unable to locate Ghostscript on paths\"",
")",
"command",
"[",
"0",
"]",
"=",
"gs_windows_binary",
"# push data through Ghostscript",
"try",
":",
"startupinfo",
"=",
"None",
"if",
"sys",
".",
"platform",
".",
"startswith",
"(",
"\"win\"",
")",
":",
"startupinfo",
"=",
"subprocess",
".",
"STARTUPINFO",
"(",
")",
"startupinfo",
".",
"dwFlags",
"|=",
"subprocess",
".",
"STARTF_USESHOWWINDOW",
"subprocess",
".",
"check_call",
"(",
"command",
",",
"startupinfo",
"=",
"startupinfo",
")",
"out_im",
"=",
"Image",
".",
"open",
"(",
"outfile",
")",
"out_im",
".",
"load",
"(",
")",
"finally",
":",
"try",
":",
"os",
".",
"unlink",
"(",
"outfile",
")",
"if",
"infile_temp",
":",
"os",
".",
"unlink",
"(",
"infile_temp",
")",
"except",
"OSError",
":",
"pass",
"im",
"=",
"out_im",
".",
"im",
".",
"copy",
"(",
")",
"out_im",
".",
"close",
"(",
")",
"return",
"im"
] | [
63,
0
] | [
155,
13
] | python | af | ['de', 'af', 'en'] | False |
_save | (im, fp, filename, eps=1) | EPS Writer for the Python Imaging Library. | EPS Writer for the Python Imaging Library. | def _save(im, fp, filename, eps=1):
"""EPS Writer for the Python Imaging Library."""
#
# make sure image data is available
im.load()
#
# determine PostScript image mode
if im.mode == "L":
operator = (8, 1, b"image")
elif im.mode == "RGB":
operator = (8, 3, b"false 3 colorimage")
elif im.mode == "CMYK":
operator = (8, 4, b"false 4 colorimage")
else:
raise ValueError("image mode is not supported")
if eps:
#
# write EPS header
fp.write(b"%!PS-Adobe-3.0 EPSF-3.0\n")
fp.write(b"%%Creator: PIL 0.1 EpsEncode\n")
# fp.write("%%CreationDate: %s"...)
fp.write(b"%%%%BoundingBox: 0 0 %d %d\n" % im.size)
fp.write(b"%%Pages: 1\n")
fp.write(b"%%EndComments\n")
fp.write(b"%%Page: 1 1\n")
fp.write(b"%%ImageData: %d %d " % im.size)
fp.write(b'%d %d 0 1 1 "%s"\n' % operator)
#
# image header
fp.write(b"gsave\n")
fp.write(b"10 dict begin\n")
fp.write(b"/buf %d string def\n" % (im.size[0] * operator[1]))
fp.write(b"%d %d scale\n" % im.size)
fp.write(b"%d %d 8\n" % im.size) # <= bits
fp.write(b"[%d 0 0 -%d 0 %d]\n" % (im.size[0], im.size[1], im.size[1]))
fp.write(b"{ currentfile buf readhexstring pop } bind\n")
fp.write(operator[2] + b"\n")
if hasattr(fp, "flush"):
fp.flush()
ImageFile._save(im, fp, [("eps", (0, 0) + im.size, 0, None)])
fp.write(b"\n%%%%EndBinary\n")
fp.write(b"grestore end\n")
if hasattr(fp, "flush"):
fp.flush() | [
"def",
"_save",
"(",
"im",
",",
"fp",
",",
"filename",
",",
"eps",
"=",
"1",
")",
":",
"#",
"# make sure image data is available",
"im",
".",
"load",
"(",
")",
"#",
"# determine PostScript image mode",
"if",
"im",
".",
"mode",
"==",
"\"L\"",
":",
"operator",
"=",
"(",
"8",
",",
"1",
",",
"b\"image\"",
")",
"elif",
"im",
".",
"mode",
"==",
"\"RGB\"",
":",
"operator",
"=",
"(",
"8",
",",
"3",
",",
"b\"false 3 colorimage\"",
")",
"elif",
"im",
".",
"mode",
"==",
"\"CMYK\"",
":",
"operator",
"=",
"(",
"8",
",",
"4",
",",
"b\"false 4 colorimage\"",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"image mode is not supported\"",
")",
"if",
"eps",
":",
"#",
"# write EPS header",
"fp",
".",
"write",
"(",
"b\"%!PS-Adobe-3.0 EPSF-3.0\\n\"",
")",
"fp",
".",
"write",
"(",
"b\"%%Creator: PIL 0.1 EpsEncode\\n\"",
")",
"# fp.write(\"%%CreationDate: %s\"...)",
"fp",
".",
"write",
"(",
"b\"%%%%BoundingBox: 0 0 %d %d\\n\"",
"%",
"im",
".",
"size",
")",
"fp",
".",
"write",
"(",
"b\"%%Pages: 1\\n\"",
")",
"fp",
".",
"write",
"(",
"b\"%%EndComments\\n\"",
")",
"fp",
".",
"write",
"(",
"b\"%%Page: 1 1\\n\"",
")",
"fp",
".",
"write",
"(",
"b\"%%ImageData: %d %d \"",
"%",
"im",
".",
"size",
")",
"fp",
".",
"write",
"(",
"b'%d %d 0 1 1 \"%s\"\\n'",
"%",
"operator",
")",
"#",
"# image header",
"fp",
".",
"write",
"(",
"b\"gsave\\n\"",
")",
"fp",
".",
"write",
"(",
"b\"10 dict begin\\n\"",
")",
"fp",
".",
"write",
"(",
"b\"/buf %d string def\\n\"",
"%",
"(",
"im",
".",
"size",
"[",
"0",
"]",
"*",
"operator",
"[",
"1",
"]",
")",
")",
"fp",
".",
"write",
"(",
"b\"%d %d scale\\n\"",
"%",
"im",
".",
"size",
")",
"fp",
".",
"write",
"(",
"b\"%d %d 8\\n\"",
"%",
"im",
".",
"size",
")",
"# <= bits",
"fp",
".",
"write",
"(",
"b\"[%d 0 0 -%d 0 %d]\\n\"",
"%",
"(",
"im",
".",
"size",
"[",
"0",
"]",
",",
"im",
".",
"size",
"[",
"1",
"]",
",",
"im",
".",
"size",
"[",
"1",
"]",
")",
")",
"fp",
".",
"write",
"(",
"b\"{ currentfile buf readhexstring pop } bind\\n\"",
")",
"fp",
".",
"write",
"(",
"operator",
"[",
"2",
"]",
"+",
"b\"\\n\"",
")",
"if",
"hasattr",
"(",
"fp",
",",
"\"flush\"",
")",
":",
"fp",
".",
"flush",
"(",
")",
"ImageFile",
".",
"_save",
"(",
"im",
",",
"fp",
",",
"[",
"(",
"\"eps\"",
",",
"(",
"0",
",",
"0",
")",
"+",
"im",
".",
"size",
",",
"0",
",",
"None",
")",
"]",
")",
"fp",
".",
"write",
"(",
"b\"\\n%%%%EndBinary\\n\"",
")",
"fp",
".",
"write",
"(",
"b\"grestore end\\n\"",
")",
"if",
"hasattr",
"(",
"fp",
",",
"\"flush\"",
")",
":",
"fp",
".",
"flush",
"(",
")"
] | [
346,
0
] | [
395,
18
] | python | en | ['en', 'en', 'en'] | True |
escape | (text) |
Return the given text with ampersands, quotes and angle brackets encoded
for use in HTML.
Always escape input, even if it's already escaped and marked as such.
This may result in double-escaping. If this is a concern, use
conditional_escape() instead.
|
Return the given text with ampersands, quotes and angle brackets encoded
for use in HTML. | def escape(text):
"""
Return the given text with ampersands, quotes and angle brackets encoded
for use in HTML.
Always escape input, even if it's already escaped and marked as such.
This may result in double-escaping. If this is a concern, use
conditional_escape() instead.
"""
return mark_safe(html.escape(str(text))) | [
"def",
"escape",
"(",
"text",
")",
":",
"return",
"mark_safe",
"(",
"html",
".",
"escape",
"(",
"str",
"(",
"text",
")",
")",
")"
] | [
33,
0
] | [
42,
44
] | python | en | ['en', 'error', 'th'] | False |
escapejs | (value) | Hex encode characters for use in JavaScript strings. | Hex encode characters for use in JavaScript strings. | def escapejs(value):
"""Hex encode characters for use in JavaScript strings."""
return mark_safe(str(value).translate(_js_escapes)) | [
"def",
"escapejs",
"(",
"value",
")",
":",
"return",
"mark_safe",
"(",
"str",
"(",
"value",
")",
".",
"translate",
"(",
"_js_escapes",
")",
")"
] | [
65,
0
] | [
67,
55
] | python | en | ['en', 'en', 'en'] | True |
json_script | (value, element_id) |
Escape all the HTML/XML special characters with their unicode escapes, so
value is safe to be output anywhere except for inside a tag attribute. Wrap
the escaped JSON in a script tag.
|
Escape all the HTML/XML special characters with their unicode escapes, so
value is safe to be output anywhere except for inside a tag attribute. Wrap
the escaped JSON in a script tag.
| def json_script(value, element_id):
"""
Escape all the HTML/XML special characters with their unicode escapes, so
value is safe to be output anywhere except for inside a tag attribute. Wrap
the escaped JSON in a script tag.
"""
from django.core.serializers.json import DjangoJSONEncoder
json_str = json.dumps(value, cls=DjangoJSONEncoder).translate(_json_script_escapes)
return format_html(
'<script id="{}" type="application/json">{}</script>',
element_id, mark_safe(json_str)
) | [
"def",
"json_script",
"(",
"value",
",",
"element_id",
")",
":",
"from",
"django",
".",
"core",
".",
"serializers",
".",
"json",
"import",
"DjangoJSONEncoder",
"json_str",
"=",
"json",
".",
"dumps",
"(",
"value",
",",
"cls",
"=",
"DjangoJSONEncoder",
")",
".",
"translate",
"(",
"_json_script_escapes",
")",
"return",
"format_html",
"(",
"'<script id=\"{}\" type=\"application/json\">{}</script>'",
",",
"element_id",
",",
"mark_safe",
"(",
"json_str",
")",
")"
] | [
77,
0
] | [
88,
5
] | python | en | ['en', 'error', 'th'] | False |
conditional_escape | (text) |
Similar to escape(), except that it doesn't operate on pre-escaped strings.
This function relies on the __html__ convention used both by Django's
SafeData class and by third-party libraries like markupsafe.
|
Similar to escape(), except that it doesn't operate on pre-escaped strings. | def conditional_escape(text):
"""
Similar to escape(), except that it doesn't operate on pre-escaped strings.
This function relies on the __html__ convention used both by Django's
SafeData class and by third-party libraries like markupsafe.
"""
if isinstance(text, Promise):
text = str(text)
if hasattr(text, '__html__'):
return text.__html__()
else:
return escape(text) | [
"def",
"conditional_escape",
"(",
"text",
")",
":",
"if",
"isinstance",
"(",
"text",
",",
"Promise",
")",
":",
"text",
"=",
"str",
"(",
"text",
")",
"if",
"hasattr",
"(",
"text",
",",
"'__html__'",
")",
":",
"return",
"text",
".",
"__html__",
"(",
")",
"else",
":",
"return",
"escape",
"(",
"text",
")"
] | [
91,
0
] | [
103,
27
] | python | en | ['en', 'error', 'th'] | False |
format_html | (format_string, *args, **kwargs) |
Similar to str.format, but pass all arguments through conditional_escape(),
and call mark_safe() on the result. This function should be used instead
of str.format or % interpolation to build up small HTML fragments.
|
Similar to str.format, but pass all arguments through conditional_escape(),
and call mark_safe() on the result. This function should be used instead
of str.format or % interpolation to build up small HTML fragments.
| def format_html(format_string, *args, **kwargs):
"""
Similar to str.format, but pass all arguments through conditional_escape(),
and call mark_safe() on the result. This function should be used instead
of str.format or % interpolation to build up small HTML fragments.
"""
args_safe = map(conditional_escape, args)
kwargs_safe = {k: conditional_escape(v) for (k, v) in kwargs.items()}
return mark_safe(format_string.format(*args_safe, **kwargs_safe)) | [
"def",
"format_html",
"(",
"format_string",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"args_safe",
"=",
"map",
"(",
"conditional_escape",
",",
"args",
")",
"kwargs_safe",
"=",
"{",
"k",
":",
"conditional_escape",
"(",
"v",
")",
"for",
"(",
"k",
",",
"v",
")",
"in",
"kwargs",
".",
"items",
"(",
")",
"}",
"return",
"mark_safe",
"(",
"format_string",
".",
"format",
"(",
"*",
"args_safe",
",",
"*",
"*",
"kwargs_safe",
")",
")"
] | [
106,
0
] | [
114,
69
] | python | en | ['en', 'error', 'th'] | False |
format_html_join | (sep, format_string, args_generator) |
A wrapper of format_html, for the common case of a group of arguments that
need to be formatted using the same format string, and then joined using
'sep'. 'sep' is also passed through conditional_escape.
'args_generator' should be an iterator that returns the sequence of 'args'
that will be passed to format_html.
Example:
format_html_join('\n', "<li>{} {}</li>", ((u.first_name, u.last_name)
for u in users))
|
A wrapper of format_html, for the common case of a group of arguments that
need to be formatted using the same format string, and then joined using
'sep'. 'sep' is also passed through conditional_escape. | def format_html_join(sep, format_string, args_generator):
"""
A wrapper of format_html, for the common case of a group of arguments that
need to be formatted using the same format string, and then joined using
'sep'. 'sep' is also passed through conditional_escape.
'args_generator' should be an iterator that returns the sequence of 'args'
that will be passed to format_html.
Example:
format_html_join('\n', "<li>{} {}</li>", ((u.first_name, u.last_name)
for u in users))
"""
return mark_safe(conditional_escape(sep).join(
format_html(format_string, *args)
for args in args_generator
)) | [
"def",
"format_html_join",
"(",
"sep",
",",
"format_string",
",",
"args_generator",
")",
":",
"return",
"mark_safe",
"(",
"conditional_escape",
"(",
"sep",
")",
".",
"join",
"(",
"format_html",
"(",
"format_string",
",",
"*",
"args",
")",
"for",
"args",
"in",
"args_generator",
")",
")"
] | [
117,
0
] | [
134,
6
] | python | en | ['en', 'error', 'th'] | False |
linebreaks | (value, autoescape=False) | Convert newlines into <p> and <br>s. | Convert newlines into <p> and <br>s. | def linebreaks(value, autoescape=False):
"""Convert newlines into <p> and <br>s."""
value = normalize_newlines(value)
paras = re.split('\n{2,}', str(value))
if autoescape:
paras = ['<p>%s</p>' % escape(p).replace('\n', '<br>') for p in paras]
else:
paras = ['<p>%s</p>' % p.replace('\n', '<br>') for p in paras]
return '\n\n'.join(paras) | [
"def",
"linebreaks",
"(",
"value",
",",
"autoescape",
"=",
"False",
")",
":",
"value",
"=",
"normalize_newlines",
"(",
"value",
")",
"paras",
"=",
"re",
".",
"split",
"(",
"'\\n{2,}'",
",",
"str",
"(",
"value",
")",
")",
"if",
"autoescape",
":",
"paras",
"=",
"[",
"'<p>%s</p>'",
"%",
"escape",
"(",
"p",
")",
".",
"replace",
"(",
"'\\n'",
",",
"'<br>'",
")",
"for",
"p",
"in",
"paras",
"]",
"else",
":",
"paras",
"=",
"[",
"'<p>%s</p>'",
"%",
"p",
".",
"replace",
"(",
"'\\n'",
",",
"'<br>'",
")",
"for",
"p",
"in",
"paras",
"]",
"return",
"'\\n\\n'",
".",
"join",
"(",
"paras",
")"
] | [
138,
0
] | [
146,
29
] | python | en | ['en', 'en', 'en'] | True |
_strip_once | (value) |
Internal tag stripping utility used by strip_tags.
|
Internal tag stripping utility used by strip_tags.
| def _strip_once(value):
"""
Internal tag stripping utility used by strip_tags.
"""
s = MLStripper()
s.feed(value)
s.close()
return s.get_data() | [
"def",
"_strip_once",
"(",
"value",
")",
":",
"s",
"=",
"MLStripper",
"(",
")",
"s",
".",
"feed",
"(",
"value",
")",
"s",
".",
"close",
"(",
")",
"return",
"s",
".",
"get_data",
"(",
")"
] | [
168,
0
] | [
175,
23
] | python | en | ['en', 'error', 'th'] | False |
strip_tags | (value) | Return the given HTML with all tags stripped. | Return the given HTML with all tags stripped. | def strip_tags(value):
"""Return the given HTML with all tags stripped."""
# Note: in typical case this loop executes _strip_once once. Loop condition
# is redundant, but helps to reduce number of executions of _strip_once.
value = str(value)
while '<' in value and '>' in value:
new_value = _strip_once(value)
if value.count('<') == new_value.count('<'):
# _strip_once wasn't able to detect more tags.
break
value = new_value
return value | [
"def",
"strip_tags",
"(",
"value",
")",
":",
"# Note: in typical case this loop executes _strip_once once. Loop condition",
"# is redundant, but helps to reduce number of executions of _strip_once.",
"value",
"=",
"str",
"(",
"value",
")",
"while",
"'<'",
"in",
"value",
"and",
"'>'",
"in",
"value",
":",
"new_value",
"=",
"_strip_once",
"(",
"value",
")",
"if",
"value",
".",
"count",
"(",
"'<'",
")",
"==",
"new_value",
".",
"count",
"(",
"'<'",
")",
":",
"# _strip_once wasn't able to detect more tags.",
"break",
"value",
"=",
"new_value",
"return",
"value"
] | [
179,
0
] | [
190,
16
] | python | en | ['en', 'en', 'en'] | True |
strip_spaces_between_tags | (value) | Return the given HTML with spaces between tags removed. | Return the given HTML with spaces between tags removed. | def strip_spaces_between_tags(value):
"""Return the given HTML with spaces between tags removed."""
return re.sub(r'>\s+<', '><', str(value)) | [
"def",
"strip_spaces_between_tags",
"(",
"value",
")",
":",
"return",
"re",
".",
"sub",
"(",
"r'>\\s+<'",
",",
"'><'",
",",
"str",
"(",
"value",
")",
")"
] | [
194,
0
] | [
196,
45
] | python | en | ['en', 'en', 'en'] | True |
smart_urlquote | (url) | Quote a URL if it isn't already quoted. | Quote a URL if it isn't already quoted. | def smart_urlquote(url):
"""Quote a URL if it isn't already quoted."""
def unquote_quote(segment):
segment = unquote(segment)
# Tilde is part of RFC3986 Unreserved Characters
# https://tools.ietf.org/html/rfc3986#section-2.3
# See also https://bugs.python.org/issue16285
return quote(segment, safe=RFC3986_SUBDELIMS + RFC3986_GENDELIMS + '~')
# Handle IDN before quoting.
try:
scheme, netloc, path, query, fragment = urlsplit(url)
except ValueError:
# invalid IPv6 URL (normally square brackets in hostname part).
return unquote_quote(url)
try:
netloc = punycode(netloc) # IDN -> ACE
except UnicodeError: # invalid domain part
return unquote_quote(url)
if query:
# Separately unquoting key/value, so as to not mix querystring separators
# included in query values. See #22267.
query_parts = [(unquote(q[0]), unquote(q[1]))
for q in parse_qsl(query, keep_blank_values=True)]
# urlencode will take care of quoting
query = urlencode(query_parts)
path = unquote_quote(path)
fragment = unquote_quote(fragment)
return urlunsplit((scheme, netloc, path, query, fragment)) | [
"def",
"smart_urlquote",
"(",
"url",
")",
":",
"def",
"unquote_quote",
"(",
"segment",
")",
":",
"segment",
"=",
"unquote",
"(",
"segment",
")",
"# Tilde is part of RFC3986 Unreserved Characters",
"# https://tools.ietf.org/html/rfc3986#section-2.3",
"# See also https://bugs.python.org/issue16285",
"return",
"quote",
"(",
"segment",
",",
"safe",
"=",
"RFC3986_SUBDELIMS",
"+",
"RFC3986_GENDELIMS",
"+",
"'~'",
")",
"# Handle IDN before quoting.",
"try",
":",
"scheme",
",",
"netloc",
",",
"path",
",",
"query",
",",
"fragment",
"=",
"urlsplit",
"(",
"url",
")",
"except",
"ValueError",
":",
"# invalid IPv6 URL (normally square brackets in hostname part).",
"return",
"unquote_quote",
"(",
"url",
")",
"try",
":",
"netloc",
"=",
"punycode",
"(",
"netloc",
")",
"# IDN -> ACE",
"except",
"UnicodeError",
":",
"# invalid domain part",
"return",
"unquote_quote",
"(",
"url",
")",
"if",
"query",
":",
"# Separately unquoting key/value, so as to not mix querystring separators",
"# included in query values. See #22267.",
"query_parts",
"=",
"[",
"(",
"unquote",
"(",
"q",
"[",
"0",
"]",
")",
",",
"unquote",
"(",
"q",
"[",
"1",
"]",
")",
")",
"for",
"q",
"in",
"parse_qsl",
"(",
"query",
",",
"keep_blank_values",
"=",
"True",
")",
"]",
"# urlencode will take care of quoting",
"query",
"=",
"urlencode",
"(",
"query_parts",
")",
"path",
"=",
"unquote_quote",
"(",
"path",
")",
"fragment",
"=",
"unquote_quote",
"(",
"fragment",
")",
"return",
"urlunsplit",
"(",
"(",
"scheme",
",",
"netloc",
",",
"path",
",",
"query",
",",
"fragment",
")",
")"
] | [
199,
0
] | [
231,
62
] | python | en | ['en', 'en', 'en'] | True |
urlize | (text, trim_url_limit=None, nofollow=False, autoescape=False) |
Convert any URLs in text into clickable links.
Works on http://, https://, www. links, and also on links ending in one of
the original seven gTLDs (.com, .edu, .gov, .int, .mil, .net, and .org).
Links can have trailing punctuation (periods, commas, close-parens) and
leading punctuation (opening parens) and it'll still do the right thing.
If trim_url_limit is not None, truncate the URLs in the link text longer
than this limit to trim_url_limit - 1 characters and append an ellipsis.
If nofollow is True, give the links a rel="nofollow" attribute.
If autoescape is True, autoescape the link text and URLs.
|
Convert any URLs in text into clickable links. | def urlize(text, trim_url_limit=None, nofollow=False, autoescape=False):
"""
Convert any URLs in text into clickable links.
Works on http://, https://, www. links, and also on links ending in one of
the original seven gTLDs (.com, .edu, .gov, .int, .mil, .net, and .org).
Links can have trailing punctuation (periods, commas, close-parens) and
leading punctuation (opening parens) and it'll still do the right thing.
If trim_url_limit is not None, truncate the URLs in the link text longer
than this limit to trim_url_limit - 1 characters and append an ellipsis.
If nofollow is True, give the links a rel="nofollow" attribute.
If autoescape is True, autoescape the link text and URLs.
"""
safe_input = isinstance(text, SafeData)
def trim_url(x, limit=trim_url_limit):
if limit is None or len(x) <= limit:
return x
return '%s…' % x[:max(0, limit - 1)]
def trim_punctuation(lead, middle, trail):
"""
Trim trailing and wrapping punctuation from `middle`. Return the items
of the new state.
"""
# Continue trimming until middle remains unchanged.
trimmed_something = True
while trimmed_something:
trimmed_something = False
# Trim wrapping punctuation.
for opening, closing in WRAPPING_PUNCTUATION:
if middle.startswith(opening):
middle = middle[len(opening):]
lead += opening
trimmed_something = True
# Keep parentheses at the end only if they're balanced.
if (middle.endswith(closing) and
middle.count(closing) == middle.count(opening) + 1):
middle = middle[:-len(closing)]
trail = closing + trail
trimmed_something = True
# Trim trailing punctuation (after trimming wrapping punctuation,
# as encoded entities contain ';'). Unescape entities to avoid
# breaking them by removing ';'.
middle_unescaped = html.unescape(middle)
stripped = middle_unescaped.rstrip(TRAILING_PUNCTUATION_CHARS)
if middle_unescaped != stripped:
trail = middle[len(stripped):] + trail
middle = middle[:len(stripped) - len(middle_unescaped)]
trimmed_something = True
return lead, middle, trail
def is_email_simple(value):
"""Return True if value looks like an email address."""
# An @ must be in the middle of the value.
if '@' not in value or value.startswith('@') or value.endswith('@'):
return False
try:
p1, p2 = value.split('@')
except ValueError:
# value contains more than one @.
return False
# Dot must be in p2 (e.g. example.com)
if '.' not in p2 or p2.startswith('.'):
return False
return True
words = word_split_re.split(str(text))
for i, word in enumerate(words):
if '.' in word or '@' in word or ':' in word:
# lead: Current punctuation trimmed from the beginning of the word.
# middle: Current state of the word.
# trail: Current punctuation trimmed from the end of the word.
lead, middle, trail = '', word, ''
# Deal with punctuation.
lead, middle, trail = trim_punctuation(lead, middle, trail)
# Make URL we want to point to.
url = None
nofollow_attr = ' rel="nofollow"' if nofollow else ''
if simple_url_re.match(middle):
url = smart_urlquote(html.unescape(middle))
elif simple_url_2_re.match(middle):
url = smart_urlquote('http://%s' % html.unescape(middle))
elif ':' not in middle and is_email_simple(middle):
local, domain = middle.rsplit('@', 1)
try:
domain = punycode(domain)
except UnicodeError:
continue
url = 'mailto:%s@%s' % (local, domain)
nofollow_attr = ''
# Make link.
if url:
trimmed = trim_url(middle)
if autoescape and not safe_input:
lead, trail = escape(lead), escape(trail)
trimmed = escape(trimmed)
middle = '<a href="%s"%s>%s</a>' % (escape(url), nofollow_attr, trimmed)
words[i] = mark_safe('%s%s%s' % (lead, middle, trail))
else:
if safe_input:
words[i] = mark_safe(word)
elif autoescape:
words[i] = escape(word)
elif safe_input:
words[i] = mark_safe(word)
elif autoescape:
words[i] = escape(word)
return ''.join(words) | [
"def",
"urlize",
"(",
"text",
",",
"trim_url_limit",
"=",
"None",
",",
"nofollow",
"=",
"False",
",",
"autoescape",
"=",
"False",
")",
":",
"safe_input",
"=",
"isinstance",
"(",
"text",
",",
"SafeData",
")",
"def",
"trim_url",
"(",
"x",
",",
"limit",
"=",
"trim_url_limit",
")",
":",
"if",
"limit",
"is",
"None",
"or",
"len",
"(",
"x",
")",
"<=",
"limit",
":",
"return",
"x",
"return",
"'%s…' %",
"x",
":",
"m",
"a",
"x(0",
",",
" ",
"l",
"mit -",
"1",
"]",
"",
"",
"def",
"trim_punctuation",
"(",
"lead",
",",
"middle",
",",
"trail",
")",
":",
"\"\"\"\n Trim trailing and wrapping punctuation from `middle`. Return the items\n of the new state.\n \"\"\"",
"# Continue trimming until middle remains unchanged.",
"trimmed_something",
"=",
"True",
"while",
"trimmed_something",
":",
"trimmed_something",
"=",
"False",
"# Trim wrapping punctuation.",
"for",
"opening",
",",
"closing",
"in",
"WRAPPING_PUNCTUATION",
":",
"if",
"middle",
".",
"startswith",
"(",
"opening",
")",
":",
"middle",
"=",
"middle",
"[",
"len",
"(",
"opening",
")",
":",
"]",
"lead",
"+=",
"opening",
"trimmed_something",
"=",
"True",
"# Keep parentheses at the end only if they're balanced.",
"if",
"(",
"middle",
".",
"endswith",
"(",
"closing",
")",
"and",
"middle",
".",
"count",
"(",
"closing",
")",
"==",
"middle",
".",
"count",
"(",
"opening",
")",
"+",
"1",
")",
":",
"middle",
"=",
"middle",
"[",
":",
"-",
"len",
"(",
"closing",
")",
"]",
"trail",
"=",
"closing",
"+",
"trail",
"trimmed_something",
"=",
"True",
"# Trim trailing punctuation (after trimming wrapping punctuation,",
"# as encoded entities contain ';'). Unescape entities to avoid",
"# breaking them by removing ';'.",
"middle_unescaped",
"=",
"html",
".",
"unescape",
"(",
"middle",
")",
"stripped",
"=",
"middle_unescaped",
".",
"rstrip",
"(",
"TRAILING_PUNCTUATION_CHARS",
")",
"if",
"middle_unescaped",
"!=",
"stripped",
":",
"trail",
"=",
"middle",
"[",
"len",
"(",
"stripped",
")",
":",
"]",
"+",
"trail",
"middle",
"=",
"middle",
"[",
":",
"len",
"(",
"stripped",
")",
"-",
"len",
"(",
"middle_unescaped",
")",
"]",
"trimmed_something",
"=",
"True",
"return",
"lead",
",",
"middle",
",",
"trail",
"def",
"is_email_simple",
"(",
"value",
")",
":",
"\"\"\"Return True if value looks like an email address.\"\"\"",
"# An @ must be in the middle of the value.",
"if",
"'@'",
"not",
"in",
"value",
"or",
"value",
".",
"startswith",
"(",
"'@'",
")",
"or",
"value",
".",
"endswith",
"(",
"'@'",
")",
":",
"return",
"False",
"try",
":",
"p1",
",",
"p2",
"=",
"value",
".",
"split",
"(",
"'@'",
")",
"except",
"ValueError",
":",
"# value contains more than one @.",
"return",
"False",
"# Dot must be in p2 (e.g. example.com)",
"if",
"'.'",
"not",
"in",
"p2",
"or",
"p2",
".",
"startswith",
"(",
"'.'",
")",
":",
"return",
"False",
"return",
"True",
"words",
"=",
"word_split_re",
".",
"split",
"(",
"str",
"(",
"text",
")",
")",
"for",
"i",
",",
"word",
"in",
"enumerate",
"(",
"words",
")",
":",
"if",
"'.'",
"in",
"word",
"or",
"'@'",
"in",
"word",
"or",
"':'",
"in",
"word",
":",
"# lead: Current punctuation trimmed from the beginning of the word.",
"# middle: Current state of the word.",
"# trail: Current punctuation trimmed from the end of the word.",
"lead",
",",
"middle",
",",
"trail",
"=",
"''",
",",
"word",
",",
"''",
"# Deal with punctuation.",
"lead",
",",
"middle",
",",
"trail",
"=",
"trim_punctuation",
"(",
"lead",
",",
"middle",
",",
"trail",
")",
"# Make URL we want to point to.",
"url",
"=",
"None",
"nofollow_attr",
"=",
"' rel=\"nofollow\"'",
"if",
"nofollow",
"else",
"''",
"if",
"simple_url_re",
".",
"match",
"(",
"middle",
")",
":",
"url",
"=",
"smart_urlquote",
"(",
"html",
".",
"unescape",
"(",
"middle",
")",
")",
"elif",
"simple_url_2_re",
".",
"match",
"(",
"middle",
")",
":",
"url",
"=",
"smart_urlquote",
"(",
"'http://%s'",
"%",
"html",
".",
"unescape",
"(",
"middle",
")",
")",
"elif",
"':'",
"not",
"in",
"middle",
"and",
"is_email_simple",
"(",
"middle",
")",
":",
"local",
",",
"domain",
"=",
"middle",
".",
"rsplit",
"(",
"'@'",
",",
"1",
")",
"try",
":",
"domain",
"=",
"punycode",
"(",
"domain",
")",
"except",
"UnicodeError",
":",
"continue",
"url",
"=",
"'mailto:%s@%s'",
"%",
"(",
"local",
",",
"domain",
")",
"nofollow_attr",
"=",
"''",
"# Make link.",
"if",
"url",
":",
"trimmed",
"=",
"trim_url",
"(",
"middle",
")",
"if",
"autoescape",
"and",
"not",
"safe_input",
":",
"lead",
",",
"trail",
"=",
"escape",
"(",
"lead",
")",
",",
"escape",
"(",
"trail",
")",
"trimmed",
"=",
"escape",
"(",
"trimmed",
")",
"middle",
"=",
"'<a href=\"%s\"%s>%s</a>'",
"%",
"(",
"escape",
"(",
"url",
")",
",",
"nofollow_attr",
",",
"trimmed",
")",
"words",
"[",
"i",
"]",
"=",
"mark_safe",
"(",
"'%s%s%s'",
"%",
"(",
"lead",
",",
"middle",
",",
"trail",
")",
")",
"else",
":",
"if",
"safe_input",
":",
"words",
"[",
"i",
"]",
"=",
"mark_safe",
"(",
"word",
")",
"elif",
"autoescape",
":",
"words",
"[",
"i",
"]",
"=",
"escape",
"(",
"word",
")",
"elif",
"safe_input",
":",
"words",
"[",
"i",
"]",
"=",
"mark_safe",
"(",
"word",
")",
"elif",
"autoescape",
":",
"words",
"[",
"i",
"]",
"=",
"escape",
"(",
"word",
")",
"return",
"''",
".",
"join",
"(",
"words",
")"
] | [
235,
0
] | [
348,
25
] | python | en | ['en', 'error', 'th'] | False |
avoid_wrapping | (value) |
Avoid text wrapping in the middle of a phrase by adding non-breaking
spaces where there previously were normal spaces.
|
Avoid text wrapping in the middle of a phrase by adding non-breaking
spaces where there previously were normal spaces.
| def avoid_wrapping(value):
"""
Avoid text wrapping in the middle of a phrase by adding non-breaking
spaces where there previously were normal spaces.
"""
return value.replace(" ", "\xa0") | [
"def",
"avoid_wrapping",
"(",
"value",
")",
":",
"return",
"value",
".",
"replace",
"(",
"\" \"",
",",
"\"\\xa0\"",
")"
] | [
351,
0
] | [
356,
37
] | python | en | ['en', 'error', 'th'] | False |
html_safe | (klass) |
A decorator that defines the __html__ method. This helps non-Django
templates to detect classes whose __str__ methods return SafeString.
|
A decorator that defines the __html__ method. This helps non-Django
templates to detect classes whose __str__ methods return SafeString.
| def html_safe(klass):
"""
A decorator that defines the __html__ method. This helps non-Django
templates to detect classes whose __str__ methods return SafeString.
"""
if '__html__' in klass.__dict__:
raise ValueError(
"can't apply @html_safe to %s because it defines "
"__html__()." % klass.__name__
)
if '__str__' not in klass.__dict__:
raise ValueError(
"can't apply @html_safe to %s because it doesn't "
"define __str__()." % klass.__name__
)
klass_str = klass.__str__
klass.__str__ = lambda self: mark_safe(klass_str(self))
klass.__html__ = lambda self: str(self)
return klass | [
"def",
"html_safe",
"(",
"klass",
")",
":",
"if",
"'__html__'",
"in",
"klass",
".",
"__dict__",
":",
"raise",
"ValueError",
"(",
"\"can't apply @html_safe to %s because it defines \"",
"\"__html__().\"",
"%",
"klass",
".",
"__name__",
")",
"if",
"'__str__'",
"not",
"in",
"klass",
".",
"__dict__",
":",
"raise",
"ValueError",
"(",
"\"can't apply @html_safe to %s because it doesn't \"",
"\"define __str__().\"",
"%",
"klass",
".",
"__name__",
")",
"klass_str",
"=",
"klass",
".",
"__str__",
"klass",
".",
"__str__",
"=",
"lambda",
"self",
":",
"mark_safe",
"(",
"klass_str",
"(",
"self",
")",
")",
"klass",
".",
"__html__",
"=",
"lambda",
"self",
":",
"str",
"(",
"self",
")",
"return",
"klass"
] | [
359,
0
] | [
377,
16
] | python | en | ['en', 'error', 'th'] | False |
parse_config | (cfg_file) | Load a config file into AttrDict | Load a config file into AttrDict | def parse_config(cfg_file):
"""Load a config file into AttrDict"""
with open(cfg_file, 'r') as fopen:
yaml_config = AttrDict(yaml.load(fopen, Loader=yaml.SafeLoader))
create_attr_dict(yaml_config)
return yaml_config | [
"def",
"parse_config",
"(",
"cfg_file",
")",
":",
"with",
"open",
"(",
"cfg_file",
",",
"'r'",
")",
"as",
"fopen",
":",
"yaml_config",
"=",
"AttrDict",
"(",
"yaml",
".",
"load",
"(",
"fopen",
",",
"Loader",
"=",
"yaml",
".",
"SafeLoader",
")",
")",
"create_attr_dict",
"(",
"yaml_config",
")",
"return",
"yaml_config"
] | [
50,
0
] | [
55,
22
] | python | en | ['en', 'es', 'en'] | True |
print_dict | (d, delimiter=0) |
Recursively visualize a dict and
indenting acrrording by the relationship of keys.
|
Recursively visualize a dict and
indenting acrrording by the relationship of keys.
| def print_dict(d, delimiter=0):
"""
Recursively visualize a dict and
indenting acrrording by the relationship of keys.
"""
placeholder = "-" * 60
for k, v in sorted(d.items()):
if isinstance(v, dict):
logger.info("{}{} : ".format(delimiter * " ",
logger.coloring(k, "HEADER")))
print_dict(v, delimiter + 4)
elif isinstance(v, list) and len(v) >= 1 and isinstance(v[0], dict):
logger.info("{}{} : ".format(delimiter * " ",
logger.coloring(str(k), "HEADER")))
for value in v:
print_dict(value, delimiter + 4)
else:
logger.info("{}{} : {}".format(delimiter * " ",
logger.coloring(k, "HEADER"),
logger.coloring(v, "OKGREEN")))
if k.isupper():
logger.info(placeholder) | [
"def",
"print_dict",
"(",
"d",
",",
"delimiter",
"=",
"0",
")",
":",
"placeholder",
"=",
"\"-\"",
"*",
"60",
"for",
"k",
",",
"v",
"in",
"sorted",
"(",
"d",
".",
"items",
"(",
")",
")",
":",
"if",
"isinstance",
"(",
"v",
",",
"dict",
")",
":",
"logger",
".",
"info",
"(",
"\"{}{} : \"",
".",
"format",
"(",
"delimiter",
"*",
"\" \"",
",",
"logger",
".",
"coloring",
"(",
"k",
",",
"\"HEADER\"",
")",
")",
")",
"print_dict",
"(",
"v",
",",
"delimiter",
"+",
"4",
")",
"elif",
"isinstance",
"(",
"v",
",",
"list",
")",
"and",
"len",
"(",
"v",
")",
">=",
"1",
"and",
"isinstance",
"(",
"v",
"[",
"0",
"]",
",",
"dict",
")",
":",
"logger",
".",
"info",
"(",
"\"{}{} : \"",
".",
"format",
"(",
"delimiter",
"*",
"\" \"",
",",
"logger",
".",
"coloring",
"(",
"str",
"(",
"k",
")",
",",
"\"HEADER\"",
")",
")",
")",
"for",
"value",
"in",
"v",
":",
"print_dict",
"(",
"value",
",",
"delimiter",
"+",
"4",
")",
"else",
":",
"logger",
".",
"info",
"(",
"\"{}{} : {}\"",
".",
"format",
"(",
"delimiter",
"*",
"\" \"",
",",
"logger",
".",
"coloring",
"(",
"k",
",",
"\"HEADER\"",
")",
",",
"logger",
".",
"coloring",
"(",
"v",
",",
"\"OKGREEN\"",
")",
")",
")",
"if",
"k",
".",
"isupper",
"(",
")",
":",
"logger",
".",
"info",
"(",
"placeholder",
")"
] | [
58,
0
] | [
80,
36
] | python | en | ['en', 'error', 'th'] | False |
print_config | (config) |
visualize configs
Arguments:
config: configs
|
visualize configs | def print_config(config):
"""
visualize configs
Arguments:
config: configs
"""
logger.advertise()
print_dict(config) | [
"def",
"print_config",
"(",
"config",
")",
":",
"logger",
".",
"advertise",
"(",
")",
"print_dict",
"(",
"config",
")"
] | [
83,
0
] | [
91,
22
] | python | en | ['en', 'error', 'th'] | False |
check_config | (config) |
Check config
|
Check config
| def check_config(config):
"""
Check config
"""
check.check_version()
use_gpu = config.get('use_gpu', True)
if use_gpu:
check.check_gpu()
architecture = config.get('ARCHITECTURE')
check.check_architecture(architecture)
use_mix = config.get('use_mix', False)
check.check_mix(architecture, use_mix)
classes_num = config.get('classes_num')
check.check_classes_num(classes_num)
mode = config.get('mode', 'train')
if mode.lower() == 'train':
check.check_function_params(config, 'LEARNING_RATE')
check.check_function_params(config, 'OPTIMIZER') | [
"def",
"check_config",
"(",
"config",
")",
":",
"check",
".",
"check_version",
"(",
")",
"use_gpu",
"=",
"config",
".",
"get",
"(",
"'use_gpu'",
",",
"True",
")",
"if",
"use_gpu",
":",
"check",
".",
"check_gpu",
"(",
")",
"architecture",
"=",
"config",
".",
"get",
"(",
"'ARCHITECTURE'",
")",
"check",
".",
"check_architecture",
"(",
"architecture",
")",
"use_mix",
"=",
"config",
".",
"get",
"(",
"'use_mix'",
",",
"False",
")",
"check",
".",
"check_mix",
"(",
"architecture",
",",
"use_mix",
")",
"classes_num",
"=",
"config",
".",
"get",
"(",
"'classes_num'",
")",
"check",
".",
"check_classes_num",
"(",
"classes_num",
")",
"mode",
"=",
"config",
".",
"get",
"(",
"'mode'",
",",
"'train'",
")",
"if",
"mode",
".",
"lower",
"(",
")",
"==",
"'train'",
":",
"check",
".",
"check_function_params",
"(",
"config",
",",
"'LEARNING_RATE'",
")",
"check",
".",
"check_function_params",
"(",
"config",
",",
"'OPTIMIZER'",
")"
] | [
94,
0
] | [
116,
56
] | python | en | ['en', 'error', 'th'] | False |
override | (dl, ks, v) |
Recursively replace dict of list
Args:
dl(dict or list): dict or list to be replaced
ks(list): list of keys
v(str): value to be replaced
|
Recursively replace dict of list | def override(dl, ks, v):
"""
Recursively replace dict of list
Args:
dl(dict or list): dict or list to be replaced
ks(list): list of keys
v(str): value to be replaced
"""
def str2num(v):
try:
return eval(v)
except Exception:
return v
assert isinstance(dl, (list, dict)), ("{} should be a list or a dict")
assert len(ks) > 0, ('lenght of keys should larger than 0')
if isinstance(dl, list):
k = str2num(ks[0])
if len(ks) == 1:
assert k < len(dl), ('index({}) out of range({})'.format(k, dl))
dl[k] = str2num(v)
else:
override(dl[k], ks[1:], v)
else:
if len(ks) == 1:
# assert ks[0] in dl, ('{} is not exist in {}'.format(ks[0], dl))
if not ks[0] in dl:
logger.warning('A new filed ({}) detected!'.format(ks[0], dl))
dl[ks[0]] = str2num(v)
else:
override(dl[ks[0]], ks[1:], v) | [
"def",
"override",
"(",
"dl",
",",
"ks",
",",
"v",
")",
":",
"def",
"str2num",
"(",
"v",
")",
":",
"try",
":",
"return",
"eval",
"(",
"v",
")",
"except",
"Exception",
":",
"return",
"v",
"assert",
"isinstance",
"(",
"dl",
",",
"(",
"list",
",",
"dict",
")",
")",
",",
"(",
"\"{} should be a list or a dict\"",
")",
"assert",
"len",
"(",
"ks",
")",
">",
"0",
",",
"(",
"'lenght of keys should larger than 0'",
")",
"if",
"isinstance",
"(",
"dl",
",",
"list",
")",
":",
"k",
"=",
"str2num",
"(",
"ks",
"[",
"0",
"]",
")",
"if",
"len",
"(",
"ks",
")",
"==",
"1",
":",
"assert",
"k",
"<",
"len",
"(",
"dl",
")",
",",
"(",
"'index({}) out of range({})'",
".",
"format",
"(",
"k",
",",
"dl",
")",
")",
"dl",
"[",
"k",
"]",
"=",
"str2num",
"(",
"v",
")",
"else",
":",
"override",
"(",
"dl",
"[",
"k",
"]",
",",
"ks",
"[",
"1",
":",
"]",
",",
"v",
")",
"else",
":",
"if",
"len",
"(",
"ks",
")",
"==",
"1",
":",
"# assert ks[0] in dl, ('{} is not exist in {}'.format(ks[0], dl))",
"if",
"not",
"ks",
"[",
"0",
"]",
"in",
"dl",
":",
"logger",
".",
"warning",
"(",
"'A new filed ({}) detected!'",
".",
"format",
"(",
"ks",
"[",
"0",
"]",
",",
"dl",
")",
")",
"dl",
"[",
"ks",
"[",
"0",
"]",
"]",
"=",
"str2num",
"(",
"v",
")",
"else",
":",
"override",
"(",
"dl",
"[",
"ks",
"[",
"0",
"]",
"]",
",",
"ks",
"[",
"1",
":",
"]",
",",
"v",
")"
] | [
119,
0
] | [
151,
42
] | python | en | ['en', 'error', 'th'] | False |
override_config | (config, options=None) |
Recursively override the config
Args:
config(dict): dict to be replaced
options(list): list of pairs(key0.key1.idx.key2=value)
such as: [
'topk=2',
'VALID.transforms.1.ResizeImage.resize_short=300'
]
Returns:
config(dict): replaced config
|
Recursively override the config | def override_config(config, options=None):
"""
Recursively override the config
Args:
config(dict): dict to be replaced
options(list): list of pairs(key0.key1.idx.key2=value)
such as: [
'topk=2',
'VALID.transforms.1.ResizeImage.resize_short=300'
]
Returns:
config(dict): replaced config
"""
if options is not None:
for opt in options:
assert isinstance(opt, str), (
"option({}) should be a str".format(opt))
assert "=" in opt, (
"option({}) should contain a ="
"to distinguish between key and value".format(opt))
pair = opt.split('=')
assert len(pair) == 2, ("there can be only a = in the option")
key, value = pair
keys = key.split('.')
override(config, keys, value)
return config | [
"def",
"override_config",
"(",
"config",
",",
"options",
"=",
"None",
")",
":",
"if",
"options",
"is",
"not",
"None",
":",
"for",
"opt",
"in",
"options",
":",
"assert",
"isinstance",
"(",
"opt",
",",
"str",
")",
",",
"(",
"\"option({}) should be a str\"",
".",
"format",
"(",
"opt",
")",
")",
"assert",
"\"=\"",
"in",
"opt",
",",
"(",
"\"option({}) should contain a =\"",
"\"to distinguish between key and value\"",
".",
"format",
"(",
"opt",
")",
")",
"pair",
"=",
"opt",
".",
"split",
"(",
"'='",
")",
"assert",
"len",
"(",
"pair",
")",
"==",
"2",
",",
"(",
"\"there can be only a = in the option\"",
")",
"key",
",",
"value",
"=",
"pair",
"keys",
"=",
"key",
".",
"split",
"(",
"'.'",
")",
"override",
"(",
"config",
",",
"keys",
",",
"value",
")",
"return",
"config"
] | [
154,
0
] | [
182,
17
] | python | en | ['en', 'error', 'th'] | False |
get_config | (fname, overrides=None, show=True) |
Read config from file
|
Read config from file
| def get_config(fname, overrides=None, show=True):
"""
Read config from file
"""
assert os.path.exists(fname), (
'config file({}) is not exist'.format(fname))
config = parse_config(fname)
override_config(config, overrides)
if show:
print_config(config)
check_config(config)
return config | [
"def",
"get_config",
"(",
"fname",
",",
"overrides",
"=",
"None",
",",
"show",
"=",
"True",
")",
":",
"assert",
"os",
".",
"path",
".",
"exists",
"(",
"fname",
")",
",",
"(",
"'config file({}) is not exist'",
".",
"format",
"(",
"fname",
")",
")",
"config",
"=",
"parse_config",
"(",
"fname",
")",
"override_config",
"(",
"config",
",",
"overrides",
")",
"if",
"show",
":",
"print_config",
"(",
"config",
")",
"check_config",
"(",
"config",
")",
"return",
"config"
] | [
185,
0
] | [
196,
17
] | python | en | ['en', 'error', 'th'] | False |
format | (number, decimal_sep, decimal_pos=None, grouping=0, thousand_sep='',
force_grouping=False, use_l10n=None) |
Get a number (as a number or string), and return it as a string,
using formats defined as arguments:
* decimal_sep: Decimal separator symbol (for example ".")
* decimal_pos: Number of decimal positions
* grouping: Number of digits in every group limited by thousand separator.
For non-uniform digit grouping, it can be a sequence with the number
of digit group sizes following the format used by the Python locale
module in locale.localeconv() LC_NUMERIC grouping (e.g. (3, 2, 0)).
* thousand_sep: Thousand separator symbol (for example ",")
|
Get a number (as a number or string), and return it as a string,
using formats defined as arguments: | def format(number, decimal_sep, decimal_pos=None, grouping=0, thousand_sep='',
force_grouping=False, use_l10n=None):
"""
Get a number (as a number or string), and return it as a string,
using formats defined as arguments:
* decimal_sep: Decimal separator symbol (for example ".")
* decimal_pos: Number of decimal positions
* grouping: Number of digits in every group limited by thousand separator.
For non-uniform digit grouping, it can be a sequence with the number
of digit group sizes following the format used by the Python locale
module in locale.localeconv() LC_NUMERIC grouping (e.g. (3, 2, 0)).
* thousand_sep: Thousand separator symbol (for example ",")
"""
use_grouping = (use_l10n or (use_l10n is None and settings.USE_L10N)) and settings.USE_THOUSAND_SEPARATOR
use_grouping = use_grouping or force_grouping
use_grouping = use_grouping and grouping != 0
# Make the common case fast
if isinstance(number, int) and not use_grouping and not decimal_pos:
return mark_safe(number)
# sign
sign = ''
# Treat potentially very large/small floats as Decimals.
if isinstance(number, float) and 'e' in str(number).lower():
number = Decimal(str(number))
if isinstance(number, Decimal):
if decimal_pos is not None:
# If the provided number is too small to affect any of the visible
# decimal places, consider it equal to '0'.
cutoff = Decimal('0.' + '1'.rjust(decimal_pos, '0'))
if abs(number) < cutoff:
number = Decimal('0')
# Format values with more than 200 digits (an arbitrary cutoff) using
# scientific notation to avoid high memory usage in {:f}'.format().
_, digits, exponent = number.as_tuple()
if abs(exponent) + len(digits) > 200:
number = '{:e}'.format(number)
coefficient, exponent = number.split('e')
# Format the coefficient.
coefficient = format(
coefficient, decimal_sep, decimal_pos, grouping,
thousand_sep, force_grouping, use_l10n,
)
return '{}e{}'.format(coefficient, exponent)
else:
str_number = '{:f}'.format(number)
else:
str_number = str(number)
if str_number[0] == '-':
sign = '-'
str_number = str_number[1:]
# decimal part
if '.' in str_number:
int_part, dec_part = str_number.split('.')
if decimal_pos is not None:
dec_part = dec_part[:decimal_pos]
else:
int_part, dec_part = str_number, ''
if decimal_pos is not None:
dec_part = dec_part + ('0' * (decimal_pos - len(dec_part)))
dec_part = dec_part and decimal_sep + dec_part
# grouping
if use_grouping:
try:
# if grouping is a sequence
intervals = list(grouping)
except TypeError:
# grouping is a single value
intervals = [grouping, 0]
active_interval = intervals.pop(0)
int_part_gd = ''
cnt = 0
for digit in int_part[::-1]:
if cnt and cnt == active_interval:
if intervals:
active_interval = intervals.pop(0) or active_interval
int_part_gd += thousand_sep[::-1]
cnt = 0
int_part_gd += digit
cnt += 1
int_part = int_part_gd[::-1]
return sign + int_part + dec_part | [
"def",
"format",
"(",
"number",
",",
"decimal_sep",
",",
"decimal_pos",
"=",
"None",
",",
"grouping",
"=",
"0",
",",
"thousand_sep",
"=",
"''",
",",
"force_grouping",
"=",
"False",
",",
"use_l10n",
"=",
"None",
")",
":",
"use_grouping",
"=",
"(",
"use_l10n",
"or",
"(",
"use_l10n",
"is",
"None",
"and",
"settings",
".",
"USE_L10N",
")",
")",
"and",
"settings",
".",
"USE_THOUSAND_SEPARATOR",
"use_grouping",
"=",
"use_grouping",
"or",
"force_grouping",
"use_grouping",
"=",
"use_grouping",
"and",
"grouping",
"!=",
"0",
"# Make the common case fast",
"if",
"isinstance",
"(",
"number",
",",
"int",
")",
"and",
"not",
"use_grouping",
"and",
"not",
"decimal_pos",
":",
"return",
"mark_safe",
"(",
"number",
")",
"# sign",
"sign",
"=",
"''",
"# Treat potentially very large/small floats as Decimals.",
"if",
"isinstance",
"(",
"number",
",",
"float",
")",
"and",
"'e'",
"in",
"str",
"(",
"number",
")",
".",
"lower",
"(",
")",
":",
"number",
"=",
"Decimal",
"(",
"str",
"(",
"number",
")",
")",
"if",
"isinstance",
"(",
"number",
",",
"Decimal",
")",
":",
"if",
"decimal_pos",
"is",
"not",
"None",
":",
"# If the provided number is too small to affect any of the visible",
"# decimal places, consider it equal to '0'.",
"cutoff",
"=",
"Decimal",
"(",
"'0.'",
"+",
"'1'",
".",
"rjust",
"(",
"decimal_pos",
",",
"'0'",
")",
")",
"if",
"abs",
"(",
"number",
")",
"<",
"cutoff",
":",
"number",
"=",
"Decimal",
"(",
"'0'",
")",
"# Format values with more than 200 digits (an arbitrary cutoff) using",
"# scientific notation to avoid high memory usage in {:f}'.format().",
"_",
",",
"digits",
",",
"exponent",
"=",
"number",
".",
"as_tuple",
"(",
")",
"if",
"abs",
"(",
"exponent",
")",
"+",
"len",
"(",
"digits",
")",
">",
"200",
":",
"number",
"=",
"'{:e}'",
".",
"format",
"(",
"number",
")",
"coefficient",
",",
"exponent",
"=",
"number",
".",
"split",
"(",
"'e'",
")",
"# Format the coefficient.",
"coefficient",
"=",
"format",
"(",
"coefficient",
",",
"decimal_sep",
",",
"decimal_pos",
",",
"grouping",
",",
"thousand_sep",
",",
"force_grouping",
",",
"use_l10n",
",",
")",
"return",
"'{}e{}'",
".",
"format",
"(",
"coefficient",
",",
"exponent",
")",
"else",
":",
"str_number",
"=",
"'{:f}'",
".",
"format",
"(",
"number",
")",
"else",
":",
"str_number",
"=",
"str",
"(",
"number",
")",
"if",
"str_number",
"[",
"0",
"]",
"==",
"'-'",
":",
"sign",
"=",
"'-'",
"str_number",
"=",
"str_number",
"[",
"1",
":",
"]",
"# decimal part",
"if",
"'.'",
"in",
"str_number",
":",
"int_part",
",",
"dec_part",
"=",
"str_number",
".",
"split",
"(",
"'.'",
")",
"if",
"decimal_pos",
"is",
"not",
"None",
":",
"dec_part",
"=",
"dec_part",
"[",
":",
"decimal_pos",
"]",
"else",
":",
"int_part",
",",
"dec_part",
"=",
"str_number",
",",
"''",
"if",
"decimal_pos",
"is",
"not",
"None",
":",
"dec_part",
"=",
"dec_part",
"+",
"(",
"'0'",
"*",
"(",
"decimal_pos",
"-",
"len",
"(",
"dec_part",
")",
")",
")",
"dec_part",
"=",
"dec_part",
"and",
"decimal_sep",
"+",
"dec_part",
"# grouping",
"if",
"use_grouping",
":",
"try",
":",
"# if grouping is a sequence",
"intervals",
"=",
"list",
"(",
"grouping",
")",
"except",
"TypeError",
":",
"# grouping is a single value",
"intervals",
"=",
"[",
"grouping",
",",
"0",
"]",
"active_interval",
"=",
"intervals",
".",
"pop",
"(",
"0",
")",
"int_part_gd",
"=",
"''",
"cnt",
"=",
"0",
"for",
"digit",
"in",
"int_part",
"[",
":",
":",
"-",
"1",
"]",
":",
"if",
"cnt",
"and",
"cnt",
"==",
"active_interval",
":",
"if",
"intervals",
":",
"active_interval",
"=",
"intervals",
".",
"pop",
"(",
"0",
")",
"or",
"active_interval",
"int_part_gd",
"+=",
"thousand_sep",
"[",
":",
":",
"-",
"1",
"]",
"cnt",
"=",
"0",
"int_part_gd",
"+=",
"digit",
"cnt",
"+=",
"1",
"int_part",
"=",
"int_part_gd",
"[",
":",
":",
"-",
"1",
"]",
"return",
"sign",
"+",
"int_part",
"+",
"dec_part"
] | [
6,
0
] | [
89,
37
] | python | en | ['en', 'error', 'th'] | False |
DatabaseCreation._maindb_connection | (self) |
This is analogous to other backends' `_nodb_connection` property,
which allows access to an "administrative" connection which can
be used to manage the test databases.
For Oracle, the only connection that can be used for that purpose
is the main (non-test) connection.
|
This is analogous to other backends' `_nodb_connection` property,
which allows access to an "administrative" connection which can
be used to manage the test databases.
For Oracle, the only connection that can be used for that purpose
is the main (non-test) connection.
| def _maindb_connection(self):
"""
This is analogous to other backends' `_nodb_connection` property,
which allows access to an "administrative" connection which can
be used to manage the test databases.
For Oracle, the only connection that can be used for that purpose
is the main (non-test) connection.
"""
settings_dict = settings.DATABASES[self.connection.alias]
user = settings_dict.get('SAVED_USER') or settings_dict['USER']
password = settings_dict.get('SAVED_PASSWORD') or settings_dict['PASSWORD']
settings_dict = {**settings_dict, 'USER': user, 'PASSWORD': password}
DatabaseWrapper = type(self.connection)
return DatabaseWrapper(settings_dict, alias=self.connection.alias) | [
"def",
"_maindb_connection",
"(",
"self",
")",
":",
"settings_dict",
"=",
"settings",
".",
"DATABASES",
"[",
"self",
".",
"connection",
".",
"alias",
"]",
"user",
"=",
"settings_dict",
".",
"get",
"(",
"'SAVED_USER'",
")",
"or",
"settings_dict",
"[",
"'USER'",
"]",
"password",
"=",
"settings_dict",
".",
"get",
"(",
"'SAVED_PASSWORD'",
")",
"or",
"settings_dict",
"[",
"'PASSWORD'",
"]",
"settings_dict",
"=",
"{",
"*",
"*",
"settings_dict",
",",
"'USER'",
":",
"user",
",",
"'PASSWORD'",
":",
"password",
"}",
"DatabaseWrapper",
"=",
"type",
"(",
"self",
".",
"connection",
")",
"return",
"DatabaseWrapper",
"(",
"settings_dict",
",",
"alias",
"=",
"self",
".",
"connection",
".",
"alias",
")"
] | [
14,
4
] | [
27,
74
] | python | en | ['en', 'error', 'th'] | False |
DatabaseCreation._switch_to_test_user | (self, parameters) |
Switch to the user that's used for creating the test database.
Oracle doesn't have the concept of separate databases under the same
user, so a separate user is used; see _create_test_db(). The main user
is also needed for cleanup when testing is completed, so save its
credentials in the SAVED_USER/SAVED_PASSWORD key in the settings dict.
|
Switch to the user that's used for creating the test database. | def _switch_to_test_user(self, parameters):
"""
Switch to the user that's used for creating the test database.
Oracle doesn't have the concept of separate databases under the same
user, so a separate user is used; see _create_test_db(). The main user
is also needed for cleanup when testing is completed, so save its
credentials in the SAVED_USER/SAVED_PASSWORD key in the settings dict.
"""
real_settings = settings.DATABASES[self.connection.alias]
real_settings['SAVED_USER'] = self.connection.settings_dict['SAVED_USER'] = \
self.connection.settings_dict['USER']
real_settings['SAVED_PASSWORD'] = self.connection.settings_dict['SAVED_PASSWORD'] = \
self.connection.settings_dict['PASSWORD']
real_test_settings = real_settings['TEST']
test_settings = self.connection.settings_dict['TEST']
real_test_settings['USER'] = real_settings['USER'] = test_settings['USER'] = \
self.connection.settings_dict['USER'] = parameters['user']
real_settings['PASSWORD'] = self.connection.settings_dict['PASSWORD'] = parameters['password'] | [
"def",
"_switch_to_test_user",
"(",
"self",
",",
"parameters",
")",
":",
"real_settings",
"=",
"settings",
".",
"DATABASES",
"[",
"self",
".",
"connection",
".",
"alias",
"]",
"real_settings",
"[",
"'SAVED_USER'",
"]",
"=",
"self",
".",
"connection",
".",
"settings_dict",
"[",
"'SAVED_USER'",
"]",
"=",
"self",
".",
"connection",
".",
"settings_dict",
"[",
"'USER'",
"]",
"real_settings",
"[",
"'SAVED_PASSWORD'",
"]",
"=",
"self",
".",
"connection",
".",
"settings_dict",
"[",
"'SAVED_PASSWORD'",
"]",
"=",
"self",
".",
"connection",
".",
"settings_dict",
"[",
"'PASSWORD'",
"]",
"real_test_settings",
"=",
"real_settings",
"[",
"'TEST'",
"]",
"test_settings",
"=",
"self",
".",
"connection",
".",
"settings_dict",
"[",
"'TEST'",
"]",
"real_test_settings",
"[",
"'USER'",
"]",
"=",
"real_settings",
"[",
"'USER'",
"]",
"=",
"test_settings",
"[",
"'USER'",
"]",
"=",
"self",
".",
"connection",
".",
"settings_dict",
"[",
"'USER'",
"]",
"=",
"parameters",
"[",
"'user'",
"]",
"real_settings",
"[",
"'PASSWORD'",
"]",
"=",
"self",
".",
"connection",
".",
"settings_dict",
"[",
"'PASSWORD'",
"]",
"=",
"parameters",
"[",
"'password'",
"]"
] | [
101,
4
] | [
119,
102
] | python | en | ['en', 'error', 'th'] | False |
DatabaseCreation.set_as_test_mirror | (self, primary_settings_dict) |
Set this database up to be used in testing as a mirror of a primary
database whose settings are given.
|
Set this database up to be used in testing as a mirror of a primary
database whose settings are given.
| def set_as_test_mirror(self, primary_settings_dict):
"""
Set this database up to be used in testing as a mirror of a primary
database whose settings are given.
"""
self.connection.settings_dict['USER'] = primary_settings_dict['USER']
self.connection.settings_dict['PASSWORD'] = primary_settings_dict['PASSWORD'] | [
"def",
"set_as_test_mirror",
"(",
"self",
",",
"primary_settings_dict",
")",
":",
"self",
".",
"connection",
".",
"settings_dict",
"[",
"'USER'",
"]",
"=",
"primary_settings_dict",
"[",
"'USER'",
"]",
"self",
".",
"connection",
".",
"settings_dict",
"[",
"'PASSWORD'",
"]",
"=",
"primary_settings_dict",
"[",
"'PASSWORD'",
"]"
] | [
121,
4
] | [
127,
85
] | python | en | ['en', 'error', 'th'] | False |
DatabaseCreation._destroy_test_db | (self, test_database_name, verbosity=1) |
Destroy a test database, prompting the user for confirmation if the
database already exists. Return the name of the test database created.
|
Destroy a test database, prompting the user for confirmation if the
database already exists. Return the name of the test database created.
| def _destroy_test_db(self, test_database_name, verbosity=1):
"""
Destroy a test database, prompting the user for confirmation if the
database already exists. Return the name of the test database created.
"""
self.connection.settings_dict['USER'] = self.connection.settings_dict['SAVED_USER']
self.connection.settings_dict['PASSWORD'] = self.connection.settings_dict['SAVED_PASSWORD']
self.connection.close()
parameters = self._get_test_db_params()
with self._maindb_connection.cursor() as cursor:
if self._test_user_create():
if verbosity >= 1:
self.log('Destroying test user...')
self._destroy_test_user(cursor, parameters, verbosity)
if self._test_database_create():
if verbosity >= 1:
self.log('Destroying test database tables...')
self._execute_test_db_destruction(cursor, parameters, verbosity)
self._maindb_connection.close() | [
"def",
"_destroy_test_db",
"(",
"self",
",",
"test_database_name",
",",
"verbosity",
"=",
"1",
")",
":",
"self",
".",
"connection",
".",
"settings_dict",
"[",
"'USER'",
"]",
"=",
"self",
".",
"connection",
".",
"settings_dict",
"[",
"'SAVED_USER'",
"]",
"self",
".",
"connection",
".",
"settings_dict",
"[",
"'PASSWORD'",
"]",
"=",
"self",
".",
"connection",
".",
"settings_dict",
"[",
"'SAVED_PASSWORD'",
"]",
"self",
".",
"connection",
".",
"close",
"(",
")",
"parameters",
"=",
"self",
".",
"_get_test_db_params",
"(",
")",
"with",
"self",
".",
"_maindb_connection",
".",
"cursor",
"(",
")",
"as",
"cursor",
":",
"if",
"self",
".",
"_test_user_create",
"(",
")",
":",
"if",
"verbosity",
">=",
"1",
":",
"self",
".",
"log",
"(",
"'Destroying test user...'",
")",
"self",
".",
"_destroy_test_user",
"(",
"cursor",
",",
"parameters",
",",
"verbosity",
")",
"if",
"self",
".",
"_test_database_create",
"(",
")",
":",
"if",
"verbosity",
">=",
"1",
":",
"self",
".",
"log",
"(",
"'Destroying test database tables...'",
")",
"self",
".",
"_execute_test_db_destruction",
"(",
"cursor",
",",
"parameters",
",",
"verbosity",
")",
"self",
".",
"_maindb_connection",
".",
"close",
"(",
")"
] | [
166,
4
] | [
184,
39
] | python | en | ['en', 'error', 'th'] | False |
DatabaseCreation._execute_allow_fail_statements | (self, cursor, statements, parameters, verbosity, acceptable_ora_err) |
Execute statements which are allowed to fail silently if the Oracle
error code given by `acceptable_ora_err` is raised. Return True if the
statements execute without an exception, or False otherwise.
|
Execute statements which are allowed to fail silently if the Oracle
error code given by `acceptable_ora_err` is raised. Return True if the
statements execute without an exception, or False otherwise.
| def _execute_allow_fail_statements(self, cursor, statements, parameters, verbosity, acceptable_ora_err):
"""
Execute statements which are allowed to fail silently if the Oracle
error code given by `acceptable_ora_err` is raised. Return True if the
statements execute without an exception, or False otherwise.
"""
try:
# Statement can fail when acceptable_ora_err is not None
allow_quiet_fail = acceptable_ora_err is not None and len(acceptable_ora_err) > 0
self._execute_statements(cursor, statements, parameters, verbosity, allow_quiet_fail=allow_quiet_fail)
return True
except DatabaseError as err:
description = str(err)
if acceptable_ora_err is None or acceptable_ora_err not in description:
raise
return False | [
"def",
"_execute_allow_fail_statements",
"(",
"self",
",",
"cursor",
",",
"statements",
",",
"parameters",
",",
"verbosity",
",",
"acceptable_ora_err",
")",
":",
"try",
":",
"# Statement can fail when acceptable_ora_err is not None",
"allow_quiet_fail",
"=",
"acceptable_ora_err",
"is",
"not",
"None",
"and",
"len",
"(",
"acceptable_ora_err",
")",
">",
"0",
"self",
".",
"_execute_statements",
"(",
"cursor",
",",
"statements",
",",
"parameters",
",",
"verbosity",
",",
"allow_quiet_fail",
"=",
"allow_quiet_fail",
")",
"return",
"True",
"except",
"DatabaseError",
"as",
"err",
":",
"description",
"=",
"str",
"(",
"err",
")",
"if",
"acceptable_ora_err",
"is",
"None",
"or",
"acceptable_ora_err",
"not",
"in",
"description",
":",
"raise",
"return",
"False"
] | [
282,
4
] | [
297,
24
] | python | en | ['en', 'error', 'th'] | False |
DatabaseCreation._test_settings_get | (self, key, default=None, prefixed=None) |
Return a value from the test settings dict, or a given default, or a
prefixed entry from the main settings dict.
|
Return a value from the test settings dict, or a given default, or a
prefixed entry from the main settings dict.
| def _test_settings_get(self, key, default=None, prefixed=None):
"""
Return a value from the test settings dict, or a given default, or a
prefixed entry from the main settings dict.
"""
settings_dict = self.connection.settings_dict
val = settings_dict['TEST'].get(key, default)
if val is None and prefixed:
val = TEST_DATABASE_PREFIX + settings_dict[prefixed]
return val | [
"def",
"_test_settings_get",
"(",
"self",
",",
"key",
",",
"default",
"=",
"None",
",",
"prefixed",
"=",
"None",
")",
":",
"settings_dict",
"=",
"self",
".",
"connection",
".",
"settings_dict",
"val",
"=",
"settings_dict",
"[",
"'TEST'",
"]",
".",
"get",
"(",
"key",
",",
"default",
")",
"if",
"val",
"is",
"None",
"and",
"prefixed",
":",
"val",
"=",
"TEST_DATABASE_PREFIX",
"+",
"settings_dict",
"[",
"prefixed",
"]",
"return",
"val"
] | [
316,
4
] | [
325,
18
] | python | en | ['en', 'error', 'th'] | False |
DatabaseCreation._get_test_db_name | (self) |
Return the 'production' DB name to get the test DB creation machinery
to work. This isn't a great deal in this case because DB names as
handled by Django don't have real counterparts in Oracle.
|
Return the 'production' DB name to get the test DB creation machinery
to work. This isn't a great deal in this case because DB names as
handled by Django don't have real counterparts in Oracle.
| def _get_test_db_name(self):
"""
Return the 'production' DB name to get the test DB creation machinery
to work. This isn't a great deal in this case because DB names as
handled by Django don't have real counterparts in Oracle.
"""
return self.connection.settings_dict['NAME'] | [
"def",
"_get_test_db_name",
"(",
"self",
")",
":",
"return",
"self",
".",
"connection",
".",
"settings_dict",
"[",
"'NAME'",
"]"
] | [
383,
4
] | [
389,
52
] | python | en | ['en', 'error', 'th'] | False |
autocomplete | () | Entry Point for completion of main and subcommand options. | Entry Point for completion of main and subcommand options. | def autocomplete() -> None:
"""Entry Point for completion of main and subcommand options."""
# Don't complete if user hasn't sourced bash_completion file.
if "PIP_AUTO_COMPLETE" not in os.environ:
return
cwords = os.environ["COMP_WORDS"].split()[1:]
cword = int(os.environ["COMP_CWORD"])
try:
current = cwords[cword - 1]
except IndexError:
current = ""
parser = create_main_parser()
subcommands = list(commands_dict)
options = []
# subcommand
subcommand_name: Optional[str] = None
for word in cwords:
if word in subcommands:
subcommand_name = word
break
# subcommand options
if subcommand_name is not None:
# special case: 'help' subcommand has no options
if subcommand_name == "help":
sys.exit(1)
# special case: list locally installed dists for show and uninstall
should_list_installed = not current.startswith("-") and subcommand_name in [
"show",
"uninstall",
]
if should_list_installed:
env = get_default_environment()
lc = current.lower()
installed = [
dist.canonical_name
for dist in env.iter_installed_distributions(local_only=True)
if dist.canonical_name.startswith(lc)
and dist.canonical_name not in cwords[1:]
]
# if there are no dists installed, fall back to option completion
if installed:
for dist in installed:
print(dist)
sys.exit(1)
subcommand = create_command(subcommand_name)
for opt in subcommand.parser.option_list_all:
if opt.help != optparse.SUPPRESS_HELP:
for opt_str in opt._long_opts + opt._short_opts:
options.append((opt_str, opt.nargs))
# filter out previously specified options from available options
prev_opts = [x.split("=")[0] for x in cwords[1 : cword - 1]]
options = [(x, v) for (x, v) in options if x not in prev_opts]
# filter options by current input
options = [(k, v) for k, v in options if k.startswith(current)]
# get completion type given cwords and available subcommand options
completion_type = get_path_completion_type(
cwords,
cword,
subcommand.parser.option_list_all,
)
# get completion files and directories if ``completion_type`` is
# ``<file>``, ``<dir>`` or ``<path>``
if completion_type:
paths = auto_complete_paths(current, completion_type)
options = [(path, 0) for path in paths]
for option in options:
opt_label = option[0]
# append '=' to options which require args
if option[1] and option[0][:2] == "--":
opt_label += "="
print(opt_label)
else:
# show main parser options only when necessary
opts = [i.option_list for i in parser.option_groups]
opts.append(parser.option_list)
flattened_opts = chain.from_iterable(opts)
if current.startswith("-"):
for opt in flattened_opts:
if opt.help != optparse.SUPPRESS_HELP:
subcommands += opt._long_opts + opt._short_opts
else:
# get completion type given cwords and all available options
completion_type = get_path_completion_type(cwords, cword, flattened_opts)
if completion_type:
subcommands = list(auto_complete_paths(current, completion_type))
print(" ".join([x for x in subcommands if x.startswith(current)]))
sys.exit(1) | [
"def",
"autocomplete",
"(",
")",
"->",
"None",
":",
"# Don't complete if user hasn't sourced bash_completion file.",
"if",
"\"PIP_AUTO_COMPLETE\"",
"not",
"in",
"os",
".",
"environ",
":",
"return",
"cwords",
"=",
"os",
".",
"environ",
"[",
"\"COMP_WORDS\"",
"]",
".",
"split",
"(",
")",
"[",
"1",
":",
"]",
"cword",
"=",
"int",
"(",
"os",
".",
"environ",
"[",
"\"COMP_CWORD\"",
"]",
")",
"try",
":",
"current",
"=",
"cwords",
"[",
"cword",
"-",
"1",
"]",
"except",
"IndexError",
":",
"current",
"=",
"\"\"",
"parser",
"=",
"create_main_parser",
"(",
")",
"subcommands",
"=",
"list",
"(",
"commands_dict",
")",
"options",
"=",
"[",
"]",
"# subcommand",
"subcommand_name",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
"for",
"word",
"in",
"cwords",
":",
"if",
"word",
"in",
"subcommands",
":",
"subcommand_name",
"=",
"word",
"break",
"# subcommand options",
"if",
"subcommand_name",
"is",
"not",
"None",
":",
"# special case: 'help' subcommand has no options",
"if",
"subcommand_name",
"==",
"\"help\"",
":",
"sys",
".",
"exit",
"(",
"1",
")",
"# special case: list locally installed dists for show and uninstall",
"should_list_installed",
"=",
"not",
"current",
".",
"startswith",
"(",
"\"-\"",
")",
"and",
"subcommand_name",
"in",
"[",
"\"show\"",
",",
"\"uninstall\"",
",",
"]",
"if",
"should_list_installed",
":",
"env",
"=",
"get_default_environment",
"(",
")",
"lc",
"=",
"current",
".",
"lower",
"(",
")",
"installed",
"=",
"[",
"dist",
".",
"canonical_name",
"for",
"dist",
"in",
"env",
".",
"iter_installed_distributions",
"(",
"local_only",
"=",
"True",
")",
"if",
"dist",
".",
"canonical_name",
".",
"startswith",
"(",
"lc",
")",
"and",
"dist",
".",
"canonical_name",
"not",
"in",
"cwords",
"[",
"1",
":",
"]",
"]",
"# if there are no dists installed, fall back to option completion",
"if",
"installed",
":",
"for",
"dist",
"in",
"installed",
":",
"print",
"(",
"dist",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"subcommand",
"=",
"create_command",
"(",
"subcommand_name",
")",
"for",
"opt",
"in",
"subcommand",
".",
"parser",
".",
"option_list_all",
":",
"if",
"opt",
".",
"help",
"!=",
"optparse",
".",
"SUPPRESS_HELP",
":",
"for",
"opt_str",
"in",
"opt",
".",
"_long_opts",
"+",
"opt",
".",
"_short_opts",
":",
"options",
".",
"append",
"(",
"(",
"opt_str",
",",
"opt",
".",
"nargs",
")",
")",
"# filter out previously specified options from available options",
"prev_opts",
"=",
"[",
"x",
".",
"split",
"(",
"\"=\"",
")",
"[",
"0",
"]",
"for",
"x",
"in",
"cwords",
"[",
"1",
":",
"cword",
"-",
"1",
"]",
"]",
"options",
"=",
"[",
"(",
"x",
",",
"v",
")",
"for",
"(",
"x",
",",
"v",
")",
"in",
"options",
"if",
"x",
"not",
"in",
"prev_opts",
"]",
"# filter options by current input",
"options",
"=",
"[",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"options",
"if",
"k",
".",
"startswith",
"(",
"current",
")",
"]",
"# get completion type given cwords and available subcommand options",
"completion_type",
"=",
"get_path_completion_type",
"(",
"cwords",
",",
"cword",
",",
"subcommand",
".",
"parser",
".",
"option_list_all",
",",
")",
"# get completion files and directories if ``completion_type`` is",
"# ``<file>``, ``<dir>`` or ``<path>``",
"if",
"completion_type",
":",
"paths",
"=",
"auto_complete_paths",
"(",
"current",
",",
"completion_type",
")",
"options",
"=",
"[",
"(",
"path",
",",
"0",
")",
"for",
"path",
"in",
"paths",
"]",
"for",
"option",
"in",
"options",
":",
"opt_label",
"=",
"option",
"[",
"0",
"]",
"# append '=' to options which require args",
"if",
"option",
"[",
"1",
"]",
"and",
"option",
"[",
"0",
"]",
"[",
":",
"2",
"]",
"==",
"\"--\"",
":",
"opt_label",
"+=",
"\"=\"",
"print",
"(",
"opt_label",
")",
"else",
":",
"# show main parser options only when necessary",
"opts",
"=",
"[",
"i",
".",
"option_list",
"for",
"i",
"in",
"parser",
".",
"option_groups",
"]",
"opts",
".",
"append",
"(",
"parser",
".",
"option_list",
")",
"flattened_opts",
"=",
"chain",
".",
"from_iterable",
"(",
"opts",
")",
"if",
"current",
".",
"startswith",
"(",
"\"-\"",
")",
":",
"for",
"opt",
"in",
"flattened_opts",
":",
"if",
"opt",
".",
"help",
"!=",
"optparse",
".",
"SUPPRESS_HELP",
":",
"subcommands",
"+=",
"opt",
".",
"_long_opts",
"+",
"opt",
".",
"_short_opts",
"else",
":",
"# get completion type given cwords and all available options",
"completion_type",
"=",
"get_path_completion_type",
"(",
"cwords",
",",
"cword",
",",
"flattened_opts",
")",
"if",
"completion_type",
":",
"subcommands",
"=",
"list",
"(",
"auto_complete_paths",
"(",
"current",
",",
"completion_type",
")",
")",
"print",
"(",
"\" \"",
".",
"join",
"(",
"[",
"x",
"for",
"x",
"in",
"subcommands",
"if",
"x",
".",
"startswith",
"(",
"current",
")",
"]",
")",
")",
"sys",
".",
"exit",
"(",
"1",
")"
] | [
14,
0
] | [
107,
15
] | python | en | ['en', 'en', 'en'] | True |
get_path_completion_type | (
cwords: List[str], cword: int, opts: Iterable[Any]
) | Get the type of path completion (``file``, ``dir``, ``path`` or None)
:param cwords: same as the environmental variable ``COMP_WORDS``
:param cword: same as the environmental variable ``COMP_CWORD``
:param opts: The available options to check
:return: path completion type (``file``, ``dir``, ``path`` or None)
| Get the type of path completion (``file``, ``dir``, ``path`` or None) | def get_path_completion_type(
cwords: List[str], cword: int, opts: Iterable[Any]
) -> Optional[str]:
"""Get the type of path completion (``file``, ``dir``, ``path`` or None)
:param cwords: same as the environmental variable ``COMP_WORDS``
:param cword: same as the environmental variable ``COMP_CWORD``
:param opts: The available options to check
:return: path completion type (``file``, ``dir``, ``path`` or None)
"""
if cword < 2 or not cwords[cword - 2].startswith("-"):
return None
for opt in opts:
if opt.help == optparse.SUPPRESS_HELP:
continue
for o in str(opt).split("/"):
if cwords[cword - 2].split("=")[0] == o:
if not opt.metavar or any(
x in ("path", "file", "dir") for x in opt.metavar.split("/")
):
return opt.metavar
return None | [
"def",
"get_path_completion_type",
"(",
"cwords",
":",
"List",
"[",
"str",
"]",
",",
"cword",
":",
"int",
",",
"opts",
":",
"Iterable",
"[",
"Any",
"]",
")",
"->",
"Optional",
"[",
"str",
"]",
":",
"if",
"cword",
"<",
"2",
"or",
"not",
"cwords",
"[",
"cword",
"-",
"2",
"]",
".",
"startswith",
"(",
"\"-\"",
")",
":",
"return",
"None",
"for",
"opt",
"in",
"opts",
":",
"if",
"opt",
".",
"help",
"==",
"optparse",
".",
"SUPPRESS_HELP",
":",
"continue",
"for",
"o",
"in",
"str",
"(",
"opt",
")",
".",
"split",
"(",
"\"/\"",
")",
":",
"if",
"cwords",
"[",
"cword",
"-",
"2",
"]",
".",
"split",
"(",
"\"=\"",
")",
"[",
"0",
"]",
"==",
"o",
":",
"if",
"not",
"opt",
".",
"metavar",
"or",
"any",
"(",
"x",
"in",
"(",
"\"path\"",
",",
"\"file\"",
",",
"\"dir\"",
")",
"for",
"x",
"in",
"opt",
".",
"metavar",
".",
"split",
"(",
"\"/\"",
")",
")",
":",
"return",
"opt",
".",
"metavar",
"return",
"None"
] | [
110,
0
] | [
131,
15
] | python | en | ['en', 'en', 'en'] | True |
auto_complete_paths | (current: str, completion_type: str) | If ``completion_type`` is ``file`` or ``path``, list all regular files
and directories starting with ``current``; otherwise only list directories
starting with ``current``.
:param current: The word to be completed
:param completion_type: path completion type(`file`, `path` or `dir`)i
:return: A generator of regular files and/or directories
| If ``completion_type`` is ``file`` or ``path``, list all regular files
and directories starting with ``current``; otherwise only list directories
starting with ``current``. | def auto_complete_paths(current: str, completion_type: str) -> Iterable[str]:
"""If ``completion_type`` is ``file`` or ``path``, list all regular files
and directories starting with ``current``; otherwise only list directories
starting with ``current``.
:param current: The word to be completed
:param completion_type: path completion type(`file`, `path` or `dir`)i
:return: A generator of regular files and/or directories
"""
directory, filename = os.path.split(current)
current_path = os.path.abspath(directory)
# Don't complete paths if they can't be accessed
if not os.access(current_path, os.R_OK):
return
filename = os.path.normcase(filename)
# list all files that start with ``filename``
file_list = (
x for x in os.listdir(current_path) if os.path.normcase(x).startswith(filename)
)
for f in file_list:
opt = os.path.join(current_path, f)
comp_file = os.path.normcase(os.path.join(directory, f))
# complete regular files when there is not ``<dir>`` after option
# complete directories when there is ``<file>``, ``<path>`` or
# ``<dir>``after option
if completion_type != "dir" and os.path.isfile(opt):
yield comp_file
elif os.path.isdir(opt):
yield os.path.join(comp_file, "") | [
"def",
"auto_complete_paths",
"(",
"current",
":",
"str",
",",
"completion_type",
":",
"str",
")",
"->",
"Iterable",
"[",
"str",
"]",
":",
"directory",
",",
"filename",
"=",
"os",
".",
"path",
".",
"split",
"(",
"current",
")",
"current_path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"directory",
")",
"# Don't complete paths if they can't be accessed",
"if",
"not",
"os",
".",
"access",
"(",
"current_path",
",",
"os",
".",
"R_OK",
")",
":",
"return",
"filename",
"=",
"os",
".",
"path",
".",
"normcase",
"(",
"filename",
")",
"# list all files that start with ``filename``",
"file_list",
"=",
"(",
"x",
"for",
"x",
"in",
"os",
".",
"listdir",
"(",
"current_path",
")",
"if",
"os",
".",
"path",
".",
"normcase",
"(",
"x",
")",
".",
"startswith",
"(",
"filename",
")",
")",
"for",
"f",
"in",
"file_list",
":",
"opt",
"=",
"os",
".",
"path",
".",
"join",
"(",
"current_path",
",",
"f",
")",
"comp_file",
"=",
"os",
".",
"path",
".",
"normcase",
"(",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"f",
")",
")",
"# complete regular files when there is not ``<dir>`` after option",
"# complete directories when there is ``<file>``, ``<path>`` or",
"# ``<dir>``after option",
"if",
"completion_type",
"!=",
"\"dir\"",
"and",
"os",
".",
"path",
".",
"isfile",
"(",
"opt",
")",
":",
"yield",
"comp_file",
"elif",
"os",
".",
"path",
".",
"isdir",
"(",
"opt",
")",
":",
"yield",
"os",
".",
"path",
".",
"join",
"(",
"comp_file",
",",
"\"\"",
")"
] | [
134,
0
] | [
162,
45
] | python | en | ['en', 'en', 'en'] | True |
move_to_completion_bucket | (target_bucket, target_infix, **kwargs) | A utility method to move an object to a target location in GCS. | A utility method to move an object to a target location in GCS. | def move_to_completion_bucket(target_bucket, target_infix, **kwargs):
"""A utility method to move an object to a target location in GCS."""
# Here we establish a connection hook to GoogleCloudStorage.
# Google Cloud Composer automatically provides a google_cloud_storage_default
# connection id that is used by this hook.
conn = gcs_hook.GoogleCloudStorageHook()
# The external trigger (Google Cloud Function) that initiates this DAG
# provides a dag_run.conf dictionary with event attributes that specify
# the information about the GCS object that triggered this DAG.
# We extract the bucket and object name from this dictionary.
source_bucket = kwargs['dag_run'].conf['bucket']
source_object = kwargs['dag_run'].conf['name']
completion_ds = kwargs['ds']
target_object = os.path.join(target_infix, completion_ds, source_object)
logging.info('Copying %s to %s',
os.path.join(source_bucket, source_object),
os.path.join(target_bucket, target_object))
conn.copy(source_bucket, source_object, target_bucket, target_object)
logging.info('Deleting %s',
os.path.join(source_bucket, source_object))
conn.delete(source_bucket, source_object) | [
"def",
"move_to_completion_bucket",
"(",
"target_bucket",
",",
"target_infix",
",",
"*",
"*",
"kwargs",
")",
":",
"# Here we establish a connection hook to GoogleCloudStorage.",
"# Google Cloud Composer automatically provides a google_cloud_storage_default",
"# connection id that is used by this hook.",
"conn",
"=",
"gcs_hook",
".",
"GoogleCloudStorageHook",
"(",
")",
"# The external trigger (Google Cloud Function) that initiates this DAG",
"# provides a dag_run.conf dictionary with event attributes that specify",
"# the information about the GCS object that triggered this DAG.",
"# We extract the bucket and object name from this dictionary.",
"source_bucket",
"=",
"kwargs",
"[",
"'dag_run'",
"]",
".",
"conf",
"[",
"'bucket'",
"]",
"source_object",
"=",
"kwargs",
"[",
"'dag_run'",
"]",
".",
"conf",
"[",
"'name'",
"]",
"completion_ds",
"=",
"kwargs",
"[",
"'ds'",
"]",
"target_object",
"=",
"os",
".",
"path",
".",
"join",
"(",
"target_infix",
",",
"completion_ds",
",",
"source_object",
")",
"logging",
".",
"info",
"(",
"'Copying %s to %s'",
",",
"os",
".",
"path",
".",
"join",
"(",
"source_bucket",
",",
"source_object",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"target_bucket",
",",
"target_object",
")",
")",
"conn",
".",
"copy",
"(",
"source_bucket",
",",
"source_object",
",",
"target_bucket",
",",
"target_object",
")",
"logging",
".",
"info",
"(",
"'Deleting %s'",
",",
"os",
".",
"path",
".",
"join",
"(",
"source_bucket",
",",
"source_object",
")",
")",
"conn",
".",
"delete",
"(",
"source_bucket",
",",
"source_object",
")"
] | [
77,
0
] | [
101,
45
] | python | en | ['en', 'en', 'en'] | True |
train_and_deploy | (
project='cloud-training-demos',
bucket='cloud-training-demos-ml',
startYear='2000'
) | Pipeline to train babyweight model | Pipeline to train babyweight model | def train_and_deploy(
project='cloud-training-demos',
bucket='cloud-training-demos-ml',
startYear='2000'
):
"""Pipeline to train babyweight model"""
start_step = 1
# Step 1: create training dataset using Apache Beam on Cloud Dataflow
if start_step <= 1:
preprocess = dsl.ContainerOp(
name='preprocess',
# image needs to be a compile-time string
image='gcr.io/cloud-training-demos/babyweight-pipeline-bqtocsv:latest',
arguments=[
'--project', project,
'--mode', 'cloud',
'--bucket', bucket,
'--start_year', startYear
],
file_outputs={'bucket': '/output.txt'}
)
else:
preprocess = ObjectDict({
'outputs': {
'bucket': bucket
}
})
# Step 2: Do hyperparameter tuning of the model on Cloud ML Engine
if start_step <= 2:
hparam_train = dsl.ContainerOp(
name='hypertrain',
# image needs to be a compile-time string
image='gcr.io/cloud-training-demos/babyweight-pipeline-hypertrain:latest',
arguments=[
preprocess.outputs['bucket']
],
file_outputs={'jobname': '/output.txt'}
)
else:
hparam_train = ObjectDict({
'outputs': {
'jobname': 'babyweight_181008_210829'
}
})
# Step 3: Train the model some more, but on the pipelines cluster itself
if start_step <= 3:
train_tuned = dsl.ContainerOp(
name='traintuned',
# image needs to be a compile-time string
image='gcr.io/cloud-training-demos/babyweight-pipeline-traintuned-trainer:latest',
#image='gcr.io/cloud-training-demos/babyweight-pipeline-traintuned-trainer@sha256:3d73c805430a16d0675aeafa9819d6d2cfbad0f0f34cff5fb9ed4e24493bc9a8',
arguments=[
hparam_train.outputs['jobname'],
bucket
],
file_outputs={'train': '/output.txt'}
)
train_tuned.set_memory_request('2G')
train_tuned.set_cpu_request('1')
else:
train_tuned = ObjectDict({
'outputs': {
'train': 'gs://cloud-training-demos-ml/babyweight/hyperparam/15'
}
})
# Step 4: Deploy the trained model to Cloud ML Engine
if start_step <= 4:
deploy_cmle = dsl.ContainerOp(
name='deploycmle',
# image needs to be a compile-time string
image='gcr.io/cloud-training-demos/babyweight-pipeline-deploycmle:latest',
arguments=[
train_tuned.outputs['train'], # modeldir
'babyweight',
'mlp'
],
file_outputs={
'model': '/model.txt',
'version': '/version.txt'
}
)
else:
deploy_cmle = ObjectDict({
'outputs': {
'model': 'babyweight',
'version': 'mlp'
}
})
# Step 5: Deploy the trained model to AppEngine
if start_step <= 5:
deploy_cmle = dsl.ContainerOp(
name='deployapp',
# image needs to be a compile-time string
image='gcr.io/cloud-training-demos/babyweight-pipeline-deployapp:latest',
arguments=[
deploy_cmle.outputs['model'],
deploy_cmle.outputs['version']
],
file_outputs={
'appurl': '/appurl.txt'
}
)
else:
deploy_cmle = ObjectDict({
'outputs': {
'appurl': 'https://cloud-training-demos.appspot.com/'
}
}) | [
"def",
"train_and_deploy",
"(",
"project",
"=",
"'cloud-training-demos'",
",",
"bucket",
"=",
"'cloud-training-demos-ml'",
",",
"startYear",
"=",
"'2000'",
")",
":",
"start_step",
"=",
"1",
"# Step 1: create training dataset using Apache Beam on Cloud Dataflow",
"if",
"start_step",
"<=",
"1",
":",
"preprocess",
"=",
"dsl",
".",
"ContainerOp",
"(",
"name",
"=",
"'preprocess'",
",",
"# image needs to be a compile-time string",
"image",
"=",
"'gcr.io/cloud-training-demos/babyweight-pipeline-bqtocsv:latest'",
",",
"arguments",
"=",
"[",
"'--project'",
",",
"project",
",",
"'--mode'",
",",
"'cloud'",
",",
"'--bucket'",
",",
"bucket",
",",
"'--start_year'",
",",
"startYear",
"]",
",",
"file_outputs",
"=",
"{",
"'bucket'",
":",
"'/output.txt'",
"}",
")",
"else",
":",
"preprocess",
"=",
"ObjectDict",
"(",
"{",
"'outputs'",
":",
"{",
"'bucket'",
":",
"bucket",
"}",
"}",
")",
"# Step 2: Do hyperparameter tuning of the model on Cloud ML Engine",
"if",
"start_step",
"<=",
"2",
":",
"hparam_train",
"=",
"dsl",
".",
"ContainerOp",
"(",
"name",
"=",
"'hypertrain'",
",",
"# image needs to be a compile-time string",
"image",
"=",
"'gcr.io/cloud-training-demos/babyweight-pipeline-hypertrain:latest'",
",",
"arguments",
"=",
"[",
"preprocess",
".",
"outputs",
"[",
"'bucket'",
"]",
"]",
",",
"file_outputs",
"=",
"{",
"'jobname'",
":",
"'/output.txt'",
"}",
")",
"else",
":",
"hparam_train",
"=",
"ObjectDict",
"(",
"{",
"'outputs'",
":",
"{",
"'jobname'",
":",
"'babyweight_181008_210829'",
"}",
"}",
")",
"# Step 3: Train the model some more, but on the pipelines cluster itself",
"if",
"start_step",
"<=",
"3",
":",
"train_tuned",
"=",
"dsl",
".",
"ContainerOp",
"(",
"name",
"=",
"'traintuned'",
",",
"# image needs to be a compile-time string",
"image",
"=",
"'gcr.io/cloud-training-demos/babyweight-pipeline-traintuned-trainer:latest'",
",",
"#image='gcr.io/cloud-training-demos/babyweight-pipeline-traintuned-trainer@sha256:3d73c805430a16d0675aeafa9819d6d2cfbad0f0f34cff5fb9ed4e24493bc9a8',",
"arguments",
"=",
"[",
"hparam_train",
".",
"outputs",
"[",
"'jobname'",
"]",
",",
"bucket",
"]",
",",
"file_outputs",
"=",
"{",
"'train'",
":",
"'/output.txt'",
"}",
")",
"train_tuned",
".",
"set_memory_request",
"(",
"'2G'",
")",
"train_tuned",
".",
"set_cpu_request",
"(",
"'1'",
")",
"else",
":",
"train_tuned",
"=",
"ObjectDict",
"(",
"{",
"'outputs'",
":",
"{",
"'train'",
":",
"'gs://cloud-training-demos-ml/babyweight/hyperparam/15'",
"}",
"}",
")",
"# Step 4: Deploy the trained model to Cloud ML Engine",
"if",
"start_step",
"<=",
"4",
":",
"deploy_cmle",
"=",
"dsl",
".",
"ContainerOp",
"(",
"name",
"=",
"'deploycmle'",
",",
"# image needs to be a compile-time string",
"image",
"=",
"'gcr.io/cloud-training-demos/babyweight-pipeline-deploycmle:latest'",
",",
"arguments",
"=",
"[",
"train_tuned",
".",
"outputs",
"[",
"'train'",
"]",
",",
"# modeldir",
"'babyweight'",
",",
"'mlp'",
"]",
",",
"file_outputs",
"=",
"{",
"'model'",
":",
"'/model.txt'",
",",
"'version'",
":",
"'/version.txt'",
"}",
")",
"else",
":",
"deploy_cmle",
"=",
"ObjectDict",
"(",
"{",
"'outputs'",
":",
"{",
"'model'",
":",
"'babyweight'",
",",
"'version'",
":",
"'mlp'",
"}",
"}",
")",
"# Step 5: Deploy the trained model to AppEngine",
"if",
"start_step",
"<=",
"5",
":",
"deploy_cmle",
"=",
"dsl",
".",
"ContainerOp",
"(",
"name",
"=",
"'deployapp'",
",",
"# image needs to be a compile-time string",
"image",
"=",
"'gcr.io/cloud-training-demos/babyweight-pipeline-deployapp:latest'",
",",
"arguments",
"=",
"[",
"deploy_cmle",
".",
"outputs",
"[",
"'model'",
"]",
",",
"deploy_cmle",
".",
"outputs",
"[",
"'version'",
"]",
"]",
",",
"file_outputs",
"=",
"{",
"'appurl'",
":",
"'/appurl.txt'",
"}",
")",
"else",
":",
"deploy_cmle",
"=",
"ObjectDict",
"(",
"{",
"'outputs'",
":",
"{",
"'appurl'",
":",
"'https://cloud-training-demos.appspot.com/'",
"}",
"}",
")"
] | [
30,
0
] | [
143,
6
] | python | en | ['en', 'en', 'en'] | True |
xclProbe | () |
xclProbe() - Enumerate devices found in the system
:return: count of devices found
|
xclProbe() - Enumerate devices found in the system
:return: count of devices found
| def xclProbe():
"""
xclProbe() - Enumerate devices found in the system
:return: count of devices found
"""
return libc.xclProbe() | [
"def",
"xclProbe",
"(",
")",
":",
"return",
"libc",
".",
"xclProbe",
"(",
")"
] | [
157,
0
] | [
162,
26
] | python | en | ['en', 'error', 'th'] | False |
xclVersion | () |
:return: the version number. 1 => Hal1 ; 2 => Hal2
|
:return: the version number. 1 => Hal1 ; 2 => Hal2
| def xclVersion():
"""
:return: the version number. 1 => Hal1 ; 2 => Hal2
"""
return libc.xclVersion() | [
"def",
"xclVersion",
"(",
")",
":",
"return",
"libc",
".",
"xclVersion",
"(",
")"
] | [
164,
0
] | [
168,
28
] | python | en | ['en', 'error', 'th'] | False |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.