Search is not available for this dataset
identifier
stringlengths 1
155
| parameters
stringlengths 2
6.09k
| docstring
stringlengths 11
63.4k
| docstring_summary
stringlengths 0
63.4k
| function
stringlengths 29
99.8k
| function_tokens
sequence | start_point
sequence | end_point
sequence | language
stringclasses 1
value | docstring_language
stringlengths 2
7
| docstring_language_predictions
stringlengths 18
23
| is_langid_reliable
stringclasses 2
values |
---|---|---|---|---|---|---|---|---|---|---|---|
Base.from_decimal | (self, n, base=10) | Input: base10 integer. Output: base2-64 string. | Input: base10 integer. Output: base2-64 string. | def from_decimal(self, n, base=10):
"Input: base10 integer. Output: base2-64 string."
try:
n = int(n)
except (ValueError, TypeError):
return "NaN"
if base < 2 or base > 64:
return "N/A"
basecases = "0123456789" + self.alphabet
if 63 <= base <= 64:
basecases = self.alphabet + "0123456789+/"
if n < base:
return basecases[n]
encoded = []
while n:
remainder, n = n % base, n // base
encoded.insert(0, basecases[remainder])
return "".join(encoded) | [
"def",
"from_decimal",
"(",
"self",
",",
"n",
",",
"base",
"=",
"10",
")",
":",
"try",
":",
"n",
"=",
"int",
"(",
"n",
")",
"except",
"(",
"ValueError",
",",
"TypeError",
")",
":",
"return",
"\"NaN\"",
"if",
"base",
"<",
"2",
"or",
"base",
">",
"64",
":",
"return",
"\"N/A\"",
"basecases",
"=",
"\"0123456789\"",
"+",
"self",
".",
"alphabet",
"if",
"63",
"<=",
"base",
"<=",
"64",
":",
"basecases",
"=",
"self",
".",
"alphabet",
"+",
"\"0123456789+/\"",
"if",
"n",
"<",
"base",
":",
"return",
"basecases",
"[",
"n",
"]",
"encoded",
"=",
"[",
"]",
"while",
"n",
":",
"remainder",
",",
"n",
"=",
"n",
"%",
"base",
",",
"n",
"//",
"base",
"encoded",
".",
"insert",
"(",
"0",
",",
"basecases",
"[",
"remainder",
"]",
")",
"return",
"\"\"",
".",
"join",
"(",
"encoded",
")"
] | [
54,
4
] | [
74,
31
] | python | da | ['de', 'da', 'en'] | False |
Base.to_decimal | (self, s, base=10) | Input: base2-64 string. Output: base10 integer. | Input: base2-64 string. Output: base10 integer. | def to_decimal(self, s, base=10):
"Input: base2-64 string. Output: base10 integer."
try:
s = str(s)
except (ValueError, TypeError):
return "NaN"
if base < 2 or base > 64:
return "N/A"
basecases = "0123456789" + self.alphabet
if 63 <= base <= 64:
basecases = self.alphabet + "0123456789+/"
basecases = basecases[:base]
slen = len(s)
n, idx = 0, 0
# Allow lowercase letters for base11-36 (single alphabet).
if base <= 36:
s = s.upper()
for c in s:
if c not in basecases:
return "NaN"
power = slen - (idx + 1)
n += basecases.index(c) * (base ** power)
idx += 1
return n | [
"def",
"to_decimal",
"(",
"self",
",",
"s",
",",
"base",
"=",
"10",
")",
":",
"try",
":",
"s",
"=",
"str",
"(",
"s",
")",
"except",
"(",
"ValueError",
",",
"TypeError",
")",
":",
"return",
"\"NaN\"",
"if",
"base",
"<",
"2",
"or",
"base",
">",
"64",
":",
"return",
"\"N/A\"",
"basecases",
"=",
"\"0123456789\"",
"+",
"self",
".",
"alphabet",
"if",
"63",
"<=",
"base",
"<=",
"64",
":",
"basecases",
"=",
"self",
".",
"alphabet",
"+",
"\"0123456789+/\"",
"basecases",
"=",
"basecases",
"[",
":",
"base",
"]",
"slen",
"=",
"len",
"(",
"s",
")",
"n",
",",
"idx",
"=",
"0",
",",
"0",
"# Allow lowercase letters for base11-36 (single alphabet).",
"if",
"base",
"<=",
"36",
":",
"s",
"=",
"s",
".",
"upper",
"(",
")",
"for",
"c",
"in",
"s",
":",
"if",
"c",
"not",
"in",
"basecases",
":",
"return",
"\"NaN\"",
"power",
"=",
"slen",
"-",
"(",
"idx",
"+",
"1",
")",
"n",
"+=",
"basecases",
".",
"index",
"(",
"c",
")",
"*",
"(",
"base",
"**",
"power",
")",
"idx",
"+=",
"1",
"return",
"n"
] | [
76,
4
] | [
102,
16
] | python | da | ['de', 'da', 'en'] | False |
Data.kilobytes | (self) | 1000 bytes, kB or KB | 1000 bytes, kB or KB | def kilobytes(self):
"1000 bytes, kB or KB"
return format_num(self._bytes / 1000, self.decplaces) | [
"def",
"kilobytes",
"(",
"self",
")",
":",
"return",
"format_num",
"(",
"self",
".",
"_bytes",
"/",
"1000",
",",
"self",
".",
"decplaces",
")"
] | [
129,
4
] | [
131,
61
] | python | en | ['en', 'hmn', 'en'] | True |
Data.megabytes | (self) | 1000^2 bytes, MB | 1000^2 bytes, MB | def megabytes(self):
"1000^2 bytes, MB"
return format_num(self._bytes / (1000 ** 2), self.decplaces) | [
"def",
"megabytes",
"(",
"self",
")",
":",
"return",
"format_num",
"(",
"self",
".",
"_bytes",
"/",
"(",
"1000",
"**",
"2",
")",
",",
"self",
".",
"decplaces",
")"
] | [
138,
4
] | [
140,
68
] | python | en | ['en', 'hu', 'hi'] | False |
Data.gigabytes | (self) | 1000^3 bytes, GB | 1000^3 bytes, GB | def gigabytes(self):
"1000^3 bytes, GB"
return format_num(self._bytes / (1000 ** 3), self.decplaces) | [
"def",
"gigabytes",
"(",
"self",
")",
":",
"return",
"format_num",
"(",
"self",
".",
"_bytes",
"/",
"(",
"1000",
"**",
"3",
")",
",",
"self",
".",
"decplaces",
")"
] | [
147,
4
] | [
149,
68
] | python | en | ['en', 'uz', 'en'] | True |
Data.terrabytes | (self) | 1000^4 bytes, TB | 1000^4 bytes, TB | def terrabytes(self):
"1000^4 bytes, TB"
return format_num(self._bytes / (1000 ** 4), self.decplaces) | [
"def",
"terrabytes",
"(",
"self",
")",
":",
"return",
"format_num",
"(",
"self",
".",
"_bytes",
"/",
"(",
"1000",
"**",
"4",
")",
",",
"self",
".",
"decplaces",
")"
] | [
156,
4
] | [
158,
68
] | python | en | ['en', 'hu', 'hi'] | False |
Data.petabytes | (self) | 1000^5 bytes, PB | 1000^5 bytes, PB | def petabytes(self):
"1000^5 bytes, PB"
return format_num(self._bytes / (1000 ** 5), self.decplaces) | [
"def",
"petabytes",
"(",
"self",
")",
":",
"return",
"format_num",
"(",
"self",
".",
"_bytes",
"/",
"(",
"1000",
"**",
"5",
")",
",",
"self",
".",
"decplaces",
")"
] | [
165,
4
] | [
167,
68
] | python | hi | ['nl', 'uz', 'hi'] | False |
Data.kibibytes | (self) | 1024 bytes, KiB or KB | 1024 bytes, KiB or KB | def kibibytes(self):
"1024 bytes, KiB or KB"
return format_num(self._bytes / 1024, self.decplaces) | [
"def",
"kibibytes",
"(",
"self",
")",
":",
"return",
"format_num",
"(",
"self",
".",
"_bytes",
"/",
"1024",
",",
"self",
".",
"decplaces",
")"
] | [
174,
4
] | [
176,
61
] | python | en | ['en', 'hmn', 'hi'] | False |
Data.mebibytes | (self) | 1024^2 bytes, MiB | 1024^2 bytes, MiB | def mebibytes(self):
"1024^2 bytes, MiB"
return format_num(self._bytes / (1024 ** 2), self.decplaces) | [
"def",
"mebibytes",
"(",
"self",
")",
":",
"return",
"format_num",
"(",
"self",
".",
"_bytes",
"/",
"(",
"1024",
"**",
"2",
")",
",",
"self",
".",
"decplaces",
")"
] | [
183,
4
] | [
185,
68
] | python | en | ['en', 'uz', 'hi'] | False |
Data.gibibytes | (self) | 1024^3 bytes, GiB | 1024^3 bytes, GiB | def gibibytes(self):
"1024^3 bytes, GiB"
return format_num(self._bytes / (1024 ** 3), self.decplaces) | [
"def",
"gibibytes",
"(",
"self",
")",
":",
"return",
"format_num",
"(",
"self",
".",
"_bytes",
"/",
"(",
"1024",
"**",
"3",
")",
",",
"self",
".",
"decplaces",
")"
] | [
192,
4
] | [
194,
68
] | python | en | ['en', 'ru-Latn', 'hi'] | False |
Data.tebibytes | (self) | 1024^4 bytes, TiB | 1024^4 bytes, TiB | def tebibytes(self):
"1024^4 bytes, TiB"
return format_num(self._bytes / (1024 ** 4), self.decplaces) | [
"def",
"tebibytes",
"(",
"self",
")",
":",
"return",
"format_num",
"(",
"self",
".",
"_bytes",
"/",
"(",
"1024",
"**",
"4",
")",
",",
"self",
".",
"decplaces",
")"
] | [
201,
4
] | [
203,
68
] | python | en | ['en', 'uz', 'hi'] | False |
Data.pebibytes | (self) | 1024^5 bytes, PiB | 1024^5 bytes, PiB | def pebibytes(self):
"1024^5 bytes, PiB"
return format_num(self._bytes / (1024 ** 5), self.decplaces) | [
"def",
"pebibytes",
"(",
"self",
")",
":",
"return",
"format_num",
"(",
"self",
".",
"_bytes",
"/",
"(",
"1024",
"**",
"5",
")",
",",
"self",
".",
"decplaces",
")"
] | [
210,
4
] | [
212,
68
] | python | en | ['en', 'uk', 'hi'] | False |
init_logger | (logpath=None, loglevel=1, quiet=False) | Initializes the logging module. | Initializes the logging module. | def init_logger(logpath=None, loglevel=1, quiet=False):
"Initializes the logging module."
logger = logging.getLogger()
# Set the loglevel.
if loglevel > 3:
loglevel = 3 # Cap at 3 to avoid index errors.
levels = [logging.ERROR, logging.WARN, logging.INFO, logging.DEBUG]
logger.setLevel(levels[loglevel])
logformat = "%(asctime)-14s %(levelname)-8s %(name)-8s %(message)s"
formatter = logging.Formatter(logformat, "%Y-%m-%d %H:%M:%S")
if not quiet:
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
log.debug("Added logging console handler.")
log.info("Loglevel is {}.".format(levels[loglevel]))
if logpath:
try:
logfile = os.path.abspath(logpath)
file_handler = logging.FileHandler(logfile)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
log.debug("Added logging file handler: {}.".format(logfile))
except IOError:
log.error("Could not attach file handler.") | [
"def",
"init_logger",
"(",
"logpath",
"=",
"None",
",",
"loglevel",
"=",
"1",
",",
"quiet",
"=",
"False",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
")",
"# Set the loglevel.",
"if",
"loglevel",
">",
"3",
":",
"loglevel",
"=",
"3",
"# Cap at 3 to avoid index errors.",
"levels",
"=",
"[",
"logging",
".",
"ERROR",
",",
"logging",
".",
"WARN",
",",
"logging",
".",
"INFO",
",",
"logging",
".",
"DEBUG",
"]",
"logger",
".",
"setLevel",
"(",
"levels",
"[",
"loglevel",
"]",
")",
"logformat",
"=",
"\"%(asctime)-14s %(levelname)-8s %(name)-8s %(message)s\"",
"formatter",
"=",
"logging",
".",
"Formatter",
"(",
"logformat",
",",
"\"%Y-%m-%d %H:%M:%S\"",
")",
"if",
"not",
"quiet",
":",
"console_handler",
"=",
"logging",
".",
"StreamHandler",
"(",
"sys",
".",
"stdout",
")",
"console_handler",
".",
"setFormatter",
"(",
"formatter",
")",
"logger",
".",
"addHandler",
"(",
"console_handler",
")",
"log",
".",
"debug",
"(",
"\"Added logging console handler.\"",
")",
"log",
".",
"info",
"(",
"\"Loglevel is {}.\"",
".",
"format",
"(",
"levels",
"[",
"loglevel",
"]",
")",
")",
"if",
"logpath",
":",
"try",
":",
"logfile",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"logpath",
")",
"file_handler",
"=",
"logging",
".",
"FileHandler",
"(",
"logfile",
")",
"file_handler",
".",
"setFormatter",
"(",
"formatter",
")",
"logger",
".",
"addHandler",
"(",
"file_handler",
")",
"log",
".",
"debug",
"(",
"\"Added logging file handler: {}.\"",
".",
"format",
"(",
"logfile",
")",
")",
"except",
"IOError",
":",
"log",
".",
"error",
"(",
"\"Could not attach file handler.\"",
")"
] | [
286,
0
] | [
314,
55
] | python | en | ['en', 'en', 'en'] | True |
main | () | Entry point for the CLI-version of Blockify. | Entry point for the CLI-version of Blockify. | def main():
"Entry point for the CLI-version of Blockify."
try:
args = docopt(__doc__, version="1.0")
init_logger(args["--log"], args["-v"], args["--quiet"])
except NameError:
init_logger(logpath=None, loglevel=2, quiet=False)
log.error("Please install docopt to use the CLI.")
blocklist = Blocklist()
blockify = Blockify(blocklist)
blockify.bind_signals()
blockify.toggle_mute()
while True:
# Initiate gtk loop to enable the window list for .get_windows().
while gtk.events_pending():
gtk.main_iteration(False)
blockify.update()
time.sleep(0.5) | [
"def",
"main",
"(",
")",
":",
"try",
":",
"args",
"=",
"docopt",
"(",
"__doc__",
",",
"version",
"=",
"\"1.0\"",
")",
"init_logger",
"(",
"args",
"[",
"\"--log\"",
"]",
",",
"args",
"[",
"\"-v\"",
"]",
",",
"args",
"[",
"\"--quiet\"",
"]",
")",
"except",
"NameError",
":",
"init_logger",
"(",
"logpath",
"=",
"None",
",",
"loglevel",
"=",
"2",
",",
"quiet",
"=",
"False",
")",
"log",
".",
"error",
"(",
"\"Please install docopt to use the CLI.\"",
")",
"blocklist",
"=",
"Blocklist",
"(",
")",
"blockify",
"=",
"Blockify",
"(",
"blocklist",
")",
"blockify",
".",
"bind_signals",
"(",
")",
"blockify",
".",
"toggle_mute",
"(",
")",
"while",
"True",
":",
"# Initiate gtk loop to enable the window list for .get_windows().",
"while",
"gtk",
".",
"events_pending",
"(",
")",
":",
"gtk",
".",
"main_iteration",
"(",
"False",
")",
"blockify",
".",
"update",
"(",
")",
"time",
".",
"sleep",
"(",
"0.5",
")"
] | [
317,
0
] | [
337,
23
] | python | en | ['en', 'en', 'en'] | True |
Blocklist.append | (self, item) | Overloading list.append to automatically save the list to a file. | Overloading list.append to automatically save the list to a file. | def append(self, item):
"Overloading list.append to automatically save the list to a file."
# Only allow nonempty strings.
if item in self or not item or item == " ":
log.debug("Not adding empty or duplicate item: {}.".format(item))
return
log.info("Adding {} to {}.".format(item, self.location))
super(Blocklist, self).append(item)
self.save() | [
"def",
"append",
"(",
"self",
",",
"item",
")",
":",
"# Only allow nonempty strings.",
"if",
"item",
"in",
"self",
"or",
"not",
"item",
"or",
"item",
"==",
"\" \"",
":",
"log",
".",
"debug",
"(",
"\"Not adding empty or duplicate item: {}.\"",
".",
"format",
"(",
"item",
")",
")",
"return",
"log",
".",
"info",
"(",
"\"Adding {} to {}.\"",
".",
"format",
"(",
"item",
",",
"self",
".",
"location",
")",
")",
"super",
"(",
"Blocklist",
",",
"self",
")",
".",
"append",
"(",
"item",
")",
"self",
".",
"save",
"(",
")"
] | [
51,
4
] | [
59,
19
] | python | en | ['en', 'en', 'en'] | True |
Blockify.update | (self) | Main loop. Checks for blocklist match and mutes accordingly. | Main loop. Checks for blocklist match and mutes accordingly. | def update(self):
"Main loop. Checks for blocklist match and mutes accordingly."
# It all relies on current_song.
self.current_song = self.get_current_song()
if not self.current_song or not self.automute:
return
# Check if the blockfile has changed.
current_timestamp = self.blocklist.get_timestamp()
if self.blocklist.timestamp != current_timestamp:
log.info("Blockfile changed. Reloading.")
self.blocklist.__init__()
for i in self.blocklist:
if i in self.current_song:
self.toggle_mute(True)
return True # Return boolean to use as self.found in GUI.
else:
self.toggle_mute()
return False | [
"def",
"update",
"(",
"self",
")",
":",
"# It all relies on current_song.",
"self",
".",
"current_song",
"=",
"self",
".",
"get_current_song",
"(",
")",
"if",
"not",
"self",
".",
"current_song",
"or",
"not",
"self",
".",
"automute",
":",
"return",
"# Check if the blockfile has changed.",
"current_timestamp",
"=",
"self",
".",
"blocklist",
".",
"get_timestamp",
"(",
")",
"if",
"self",
".",
"blocklist",
".",
"timestamp",
"!=",
"current_timestamp",
":",
"log",
".",
"info",
"(",
"\"Blockfile changed. Reloading.\"",
")",
"self",
".",
"blocklist",
".",
"__init__",
"(",
")",
"for",
"i",
"in",
"self",
".",
"blocklist",
":",
"if",
"i",
"in",
"self",
".",
"current_song",
":",
"self",
".",
"toggle_mute",
"(",
"True",
")",
"return",
"True",
"# Return boolean to use as self.found in GUI.",
"else",
":",
"self",
".",
"toggle_mute",
"(",
")",
"return",
"False"
] | [
119,
4
] | [
140,
20
] | python | en | ['en', 'en', 'en'] | True |
Blockify.get_windows | (self) | Libwnck list of currently open windows. | Libwnck list of currently open windows. | def get_windows(self):
"Libwnck list of currently open windows."
# Get the current screen.
screen = wnck.screen_get_default()
# Object list of windows in screen.
windows = screen.get_windows()
# Return the actual list of windows or an empty list.
return [win.get_icon_name() for win in windows if len(windows)] | [
"def",
"get_windows",
"(",
"self",
")",
":",
"# Get the current screen.",
"screen",
"=",
"wnck",
".",
"screen_get_default",
"(",
")",
"# Object list of windows in screen.",
"windows",
"=",
"screen",
".",
"get_windows",
"(",
")",
"# Return the actual list of windows or an empty list.",
"return",
"[",
"win",
".",
"get_icon_name",
"(",
")",
"for",
"win",
"in",
"windows",
"if",
"len",
"(",
"windows",
")",
"]"
] | [
142,
4
] | [
151,
71
] | python | en | ['en', 'en', 'en'] | True |
Blockify.get_current_song | (self) | Checks if a Spotify window exists and returns the current songname. | Checks if a Spotify window exists and returns the current songname. | def get_current_song(self):
"Checks if a Spotify window exists and returns the current songname."
pipe = self.get_windows()
for line in pipe:
if line.startswith("Spotify - "):
# Remove "Spotify - " and return the rest of the songname.
return " ".join(line.split()[2:])
# No song playing, so return an empty string.
return "" | [
"def",
"get_current_song",
"(",
"self",
")",
":",
"pipe",
"=",
"self",
".",
"get_windows",
"(",
")",
"for",
"line",
"in",
"pipe",
":",
"if",
"line",
".",
"startswith",
"(",
"\"Spotify - \"",
")",
":",
"# Remove \"Spotify - \" and return the rest of the songname.",
"return",
"\" \"",
".",
"join",
"(",
"line",
".",
"split",
"(",
")",
"[",
"2",
":",
"]",
")",
"# No song playing, so return an empty string.",
"return",
"\"\""
] | [
153,
4
] | [
162,
17
] | python | en | ['en', 'en', 'en'] | True |
Blockify.alsa_mute | (self, force) | Mute method for systems without Pulseaudio. Mutes sound system-wide. | Mute method for systems without Pulseaudio. Mutes sound system-wide. | def alsa_mute(self, force):
"Mute method for systems without Pulseaudio. Mutes sound system-wide."
state = self.get_state(force)
if not state:
return
for channel in self.channels:
subprocess.Popen(["amixer", "-q", "set", channel, state]) | [
"def",
"alsa_mute",
"(",
"self",
",",
"force",
")",
":",
"state",
"=",
"self",
".",
"get_state",
"(",
"force",
")",
"if",
"not",
"state",
":",
"return",
"for",
"channel",
"in",
"self",
".",
"channels",
":",
"subprocess",
".",
"Popen",
"(",
"[",
"\"amixer\"",
",",
"\"-q\"",
",",
"\"set\"",
",",
"channel",
",",
"state",
"]",
")"
] | [
210,
4
] | [
217,
69
] | python | en | ['en', 'en', 'en'] | True |
Blockify.pulse_mute | (self, force) | Used if pulseaudio is installed but no sinks are found. System-wide. | Used if pulseaudio is installed but no sinks are found. System-wide. | def pulse_mute(self, force):
"Used if pulseaudio is installed but no sinks are found. System-wide."
state = self.get_state(force)
if not state:
return
for channel in self.channels:
subprocess.Popen(["amixer", "-qD", "pulse", "set", channel, state]) | [
"def",
"pulse_mute",
"(",
"self",
",",
"force",
")",
":",
"state",
"=",
"self",
".",
"get_state",
"(",
"force",
")",
"if",
"not",
"state",
":",
"return",
"for",
"channel",
"in",
"self",
".",
"channels",
":",
"subprocess",
".",
"Popen",
"(",
"[",
"\"amixer\"",
",",
"\"-qD\"",
",",
"\"pulse\"",
",",
"\"set\"",
",",
"channel",
",",
"state",
"]",
")"
] | [
219,
4
] | [
226,
79
] | python | en | ['en', 'en', 'en'] | True |
Blockify.pulsesink_mute | (self, force) | Finds spotify's audio sink and toggles its mute state. | Finds spotify's audio sink and toggles its mute state. | def pulsesink_mute(self, force):
"Finds spotify's audio sink and toggles its mute state."
try:
pacmd_out = subprocess.check_output(["pacmd", "list-sink-inputs"])
pidof_out = subprocess.check_output(["pidof", "spotify"])
except subprocess.CalledProcessError:
log.error("Sink or process not found. Is Pulse/Spotify running?")
log.error("Resorting to amixer as mute method.")
self.mute_mode = "pulse" # Fall back to amixer mute mode.
return
pattern = re.compile(r"(?: index|muted|application\.process\.id).*?(\w+)")
pids = pidof_out.decode("utf-8").strip().split(" ")
output = pacmd_out.decode("utf-8")
# Every third element is a key, the value is the preceding two
# elements in the form of a tuple - {pid : (index, muted)}
info = pattern.findall(output)
idxd = {info[3 * n + 2]: (info[3 * n], info[3 * n + 1])
for n in range(len(info) // 3)}
try:
pid = [k for k in idxd.keys() if k in pids][0]
index, muted = idxd[pid]
except IndexError:
return
if muted == "no" and force:
log.info("Muting {}.".format(self.current_song))
subprocess.call(["pacmd", "set-sink-input-mute", index, "1"])
elif muted == "yes" and not force:
log.info("Unmuting.")
subprocess.call(["pacmd", "set-sink-input-mute", index, "0"]) | [
"def",
"pulsesink_mute",
"(",
"self",
",",
"force",
")",
":",
"try",
":",
"pacmd_out",
"=",
"subprocess",
".",
"check_output",
"(",
"[",
"\"pacmd\"",
",",
"\"list-sink-inputs\"",
"]",
")",
"pidof_out",
"=",
"subprocess",
".",
"check_output",
"(",
"[",
"\"pidof\"",
",",
"\"spotify\"",
"]",
")",
"except",
"subprocess",
".",
"CalledProcessError",
":",
"log",
".",
"error",
"(",
"\"Sink or process not found. Is Pulse/Spotify running?\"",
")",
"log",
".",
"error",
"(",
"\"Resorting to amixer as mute method.\"",
")",
"self",
".",
"mute_mode",
"=",
"\"pulse\"",
"# Fall back to amixer mute mode.",
"return",
"pattern",
"=",
"re",
".",
"compile",
"(",
"r\"(?: index|muted|application\\.process\\.id).*?(\\w+)\"",
")",
"pids",
"=",
"pidof_out",
".",
"decode",
"(",
"\"utf-8\"",
")",
".",
"strip",
"(",
")",
".",
"split",
"(",
"\" \"",
")",
"output",
"=",
"pacmd_out",
".",
"decode",
"(",
"\"utf-8\"",
")",
"# Every third element is a key, the value is the preceding two",
"# elements in the form of a tuple - {pid : (index, muted)}",
"info",
"=",
"pattern",
".",
"findall",
"(",
"output",
")",
"idxd",
"=",
"{",
"info",
"[",
"3",
"*",
"n",
"+",
"2",
"]",
":",
"(",
"info",
"[",
"3",
"*",
"n",
"]",
",",
"info",
"[",
"3",
"*",
"n",
"+",
"1",
"]",
")",
"for",
"n",
"in",
"range",
"(",
"len",
"(",
"info",
")",
"//",
"3",
")",
"}",
"try",
":",
"pid",
"=",
"[",
"k",
"for",
"k",
"in",
"idxd",
".",
"keys",
"(",
")",
"if",
"k",
"in",
"pids",
"]",
"[",
"0",
"]",
"index",
",",
"muted",
"=",
"idxd",
"[",
"pid",
"]",
"except",
"IndexError",
":",
"return",
"if",
"muted",
"==",
"\"no\"",
"and",
"force",
":",
"log",
".",
"info",
"(",
"\"Muting {}.\"",
".",
"format",
"(",
"self",
".",
"current_song",
")",
")",
"subprocess",
".",
"call",
"(",
"[",
"\"pacmd\"",
",",
"\"set-sink-input-mute\"",
",",
"index",
",",
"\"1\"",
"]",
")",
"elif",
"muted",
"==",
"\"yes\"",
"and",
"not",
"force",
":",
"log",
".",
"info",
"(",
"\"Unmuting.\"",
")",
"subprocess",
".",
"call",
"(",
"[",
"\"pacmd\"",
",",
"\"set-sink-input-mute\"",
",",
"index",
",",
"\"0\"",
"]",
")"
] | [
235,
4
] | [
267,
73
] | python | en | ['en', 'en', 'en'] | True |
Blockify.bind_signals | (self) | Catch SIGINT and SIGTERM to exit cleanly & SIGUSR1 to block a song. | Catch SIGINT and SIGTERM to exit cleanly & SIGUSR1 to block a song. | def bind_signals(self):
"Catch SIGINT and SIGTERM to exit cleanly & SIGUSR1 to block a song."
signal.signal(signal.SIGUSR1, lambda sig, hdl: self.block_current())
signal.signal(signal.SIGUSR2, lambda sig, hdl: self.unblock_current())
signal.signal(signal.SIGTERM, lambda sig, hdl: self.stop())
signal.signal(signal.SIGINT, lambda sig, hdl: self.stop()) | [
"def",
"bind_signals",
"(",
"self",
")",
":",
"signal",
".",
"signal",
"(",
"signal",
".",
"SIGUSR1",
",",
"lambda",
"sig",
",",
"hdl",
":",
"self",
".",
"block_current",
"(",
")",
")",
"signal",
".",
"signal",
"(",
"signal",
".",
"SIGUSR2",
",",
"lambda",
"sig",
",",
"hdl",
":",
"self",
".",
"unblock_current",
"(",
")",
")",
"signal",
".",
"signal",
"(",
"signal",
".",
"SIGTERM",
",",
"lambda",
"sig",
",",
"hdl",
":",
"self",
".",
"stop",
"(",
")",
")",
"signal",
".",
"signal",
"(",
"signal",
".",
"SIGINT",
",",
"lambda",
"sig",
",",
"hdl",
":",
"self",
".",
"stop",
"(",
")",
")"
] | [
269,
4
] | [
274,
66
] | python | en | ['en', 'en', 'en'] | True |
render_value_in_context | (value, context) |
Convert any value to a string to become part of a rendered template. This
means escaping, if required, and conversion to a string. If value is a
string, it's expected to already be translated.
|
Convert any value to a string to become part of a rendered template. This
means escaping, if required, and conversion to a string. If value is a
string, it's expected to already be translated.
| def render_value_in_context(value, context):
"""
Convert any value to a string to become part of a rendered template. This
means escaping, if required, and conversion to a string. If value is a
string, it's expected to already be translated.
"""
value = template_localtime(value, use_tz=context.use_tz)
value = localize(value, use_l10n=context.use_l10n)
if context.autoescape:
if not issubclass(type(value), str):
value = str(value)
return conditional_escape(value)
else:
return str(value) | [
"def",
"render_value_in_context",
"(",
"value",
",",
"context",
")",
":",
"value",
"=",
"template_localtime",
"(",
"value",
",",
"use_tz",
"=",
"context",
".",
"use_tz",
")",
"value",
"=",
"localize",
"(",
"value",
",",
"use_l10n",
"=",
"context",
".",
"use_l10n",
")",
"if",
"context",
".",
"autoescape",
":",
"if",
"not",
"issubclass",
"(",
"type",
"(",
"value",
")",
",",
"str",
")",
":",
"value",
"=",
"str",
"(",
"value",
")",
"return",
"conditional_escape",
"(",
"value",
")",
"else",
":",
"return",
"str",
"(",
"value",
")"
] | [
962,
0
] | [
975,
25
] | python | en | ['en', 'error', 'th'] | False |
token_kwargs | (bits, parser, support_legacy=False) |
Parse token keyword arguments and return a dictionary of the arguments
retrieved from the ``bits`` token list.
`bits` is a list containing the remainder of the token (split by spaces)
that is to be checked for arguments. Valid arguments are removed from this
list.
`support_legacy` - if True, the legacy format ``1 as foo`` is accepted.
Otherwise, only the standard ``foo=1`` format is allowed.
There is no requirement for all remaining token ``bits`` to be keyword
arguments, so return the dictionary as soon as an invalid argument format
is reached.
|
Parse token keyword arguments and return a dictionary of the arguments
retrieved from the ``bits`` token list. | def token_kwargs(bits, parser, support_legacy=False):
"""
Parse token keyword arguments and return a dictionary of the arguments
retrieved from the ``bits`` token list.
`bits` is a list containing the remainder of the token (split by spaces)
that is to be checked for arguments. Valid arguments are removed from this
list.
`support_legacy` - if True, the legacy format ``1 as foo`` is accepted.
Otherwise, only the standard ``foo=1`` format is allowed.
There is no requirement for all remaining token ``bits`` to be keyword
arguments, so return the dictionary as soon as an invalid argument format
is reached.
"""
if not bits:
return {}
match = kwarg_re.match(bits[0])
kwarg_format = match and match[1]
if not kwarg_format:
if not support_legacy:
return {}
if len(bits) < 3 or bits[1] != 'as':
return {}
kwargs = {}
while bits:
if kwarg_format:
match = kwarg_re.match(bits[0])
if not match or not match[1]:
return kwargs
key, value = match.groups()
del bits[:1]
else:
if len(bits) < 3 or bits[1] != 'as':
return kwargs
key, value = bits[2], bits[0]
del bits[:3]
kwargs[key] = parser.compile_filter(value)
if bits and not kwarg_format:
if bits[0] != 'and':
return kwargs
del bits[:1]
return kwargs | [
"def",
"token_kwargs",
"(",
"bits",
",",
"parser",
",",
"support_legacy",
"=",
"False",
")",
":",
"if",
"not",
"bits",
":",
"return",
"{",
"}",
"match",
"=",
"kwarg_re",
".",
"match",
"(",
"bits",
"[",
"0",
"]",
")",
"kwarg_format",
"=",
"match",
"and",
"match",
"[",
"1",
"]",
"if",
"not",
"kwarg_format",
":",
"if",
"not",
"support_legacy",
":",
"return",
"{",
"}",
"if",
"len",
"(",
"bits",
")",
"<",
"3",
"or",
"bits",
"[",
"1",
"]",
"!=",
"'as'",
":",
"return",
"{",
"}",
"kwargs",
"=",
"{",
"}",
"while",
"bits",
":",
"if",
"kwarg_format",
":",
"match",
"=",
"kwarg_re",
".",
"match",
"(",
"bits",
"[",
"0",
"]",
")",
"if",
"not",
"match",
"or",
"not",
"match",
"[",
"1",
"]",
":",
"return",
"kwargs",
"key",
",",
"value",
"=",
"match",
".",
"groups",
"(",
")",
"del",
"bits",
"[",
":",
"1",
"]",
"else",
":",
"if",
"len",
"(",
"bits",
")",
"<",
"3",
"or",
"bits",
"[",
"1",
"]",
"!=",
"'as'",
":",
"return",
"kwargs",
"key",
",",
"value",
"=",
"bits",
"[",
"2",
"]",
",",
"bits",
"[",
"0",
"]",
"del",
"bits",
"[",
":",
"3",
"]",
"kwargs",
"[",
"key",
"]",
"=",
"parser",
".",
"compile_filter",
"(",
"value",
")",
"if",
"bits",
"and",
"not",
"kwarg_format",
":",
"if",
"bits",
"[",
"0",
"]",
"!=",
"'and'",
":",
"return",
"kwargs",
"del",
"bits",
"[",
":",
"1",
"]",
"return",
"kwargs"
] | [
1000,
0
] | [
1044,
17
] | python | en | ['en', 'error', 'th'] | False |
Template.render | (self, context) | Display stage -- can be called many times | Display stage -- can be called many times | def render(self, context):
"Display stage -- can be called many times"
with context.render_context.push_state(self):
if context.template is None:
with context.bind_template(self):
context.template_name = self.name
return self._render(context)
else:
return self._render(context) | [
"def",
"render",
"(",
"self",
",",
"context",
")",
":",
"with",
"context",
".",
"render_context",
".",
"push_state",
"(",
"self",
")",
":",
"if",
"context",
".",
"template",
"is",
"None",
":",
"with",
"context",
".",
"bind_template",
"(",
"self",
")",
":",
"context",
".",
"template_name",
"=",
"self",
".",
"name",
"return",
"self",
".",
"_render",
"(",
"context",
")",
"else",
":",
"return",
"self",
".",
"_render",
"(",
"context",
")"
] | [
163,
4
] | [
171,
44
] | python | en | ['en', 'en', 'en'] | True |
Template.compile_nodelist | (self) |
Parse and compile the template source into a nodelist. If debug
is True and an exception occurs during parsing, the exception is
annotated with contextual line information where it occurred in the
template source.
|
Parse and compile the template source into a nodelist. If debug
is True and an exception occurs during parsing, the exception is
annotated with contextual line information where it occurred in the
template source.
| def compile_nodelist(self):
"""
Parse and compile the template source into a nodelist. If debug
is True and an exception occurs during parsing, the exception is
annotated with contextual line information where it occurred in the
template source.
"""
if self.engine.debug:
lexer = DebugLexer(self.source)
else:
lexer = Lexer(self.source)
tokens = lexer.tokenize()
parser = Parser(
tokens, self.engine.template_libraries, self.engine.template_builtins,
self.origin,
)
try:
return parser.parse()
except Exception as e:
if self.engine.debug:
e.template_debug = self.get_exception_info(e, e.token)
raise | [
"def",
"compile_nodelist",
"(",
"self",
")",
":",
"if",
"self",
".",
"engine",
".",
"debug",
":",
"lexer",
"=",
"DebugLexer",
"(",
"self",
".",
"source",
")",
"else",
":",
"lexer",
"=",
"Lexer",
"(",
"self",
".",
"source",
")",
"tokens",
"=",
"lexer",
".",
"tokenize",
"(",
")",
"parser",
"=",
"Parser",
"(",
"tokens",
",",
"self",
".",
"engine",
".",
"template_libraries",
",",
"self",
".",
"engine",
".",
"template_builtins",
",",
"self",
".",
"origin",
",",
")",
"try",
":",
"return",
"parser",
".",
"parse",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"if",
"self",
".",
"engine",
".",
"debug",
":",
"e",
".",
"template_debug",
"=",
"self",
".",
"get_exception_info",
"(",
"e",
",",
"e",
".",
"token",
")",
"raise"
] | [
173,
4
] | [
196,
17
] | python | en | ['en', 'error', 'th'] | False |
Template.get_exception_info | (self, exception, token) |
Return a dictionary containing contextual line information of where
the exception occurred in the template. The following information is
provided:
message
The message of the exception raised.
source_lines
The lines before, after, and including the line the exception
occurred on.
line
The line number the exception occurred on.
before, during, after
The line the exception occurred on split into three parts:
1. The content before the token that raised the error.
2. The token that raised the error.
3. The content after the token that raised the error.
total
The number of lines in source_lines.
top
The line number where source_lines starts.
bottom
The line number where source_lines ends.
start
The start position of the token in the template source.
end
The end position of the token in the template source.
|
Return a dictionary containing contextual line information of where
the exception occurred in the template. The following information is
provided: | def get_exception_info(self, exception, token):
"""
Return a dictionary containing contextual line information of where
the exception occurred in the template. The following information is
provided:
message
The message of the exception raised.
source_lines
The lines before, after, and including the line the exception
occurred on.
line
The line number the exception occurred on.
before, during, after
The line the exception occurred on split into three parts:
1. The content before the token that raised the error.
2. The token that raised the error.
3. The content after the token that raised the error.
total
The number of lines in source_lines.
top
The line number where source_lines starts.
bottom
The line number where source_lines ends.
start
The start position of the token in the template source.
end
The end position of the token in the template source.
"""
start, end = token.position
context_lines = 10
line = 0
upto = 0
source_lines = []
before = during = after = ""
for num, next in enumerate(linebreak_iter(self.source)):
if start >= upto and end <= next:
line = num
before = escape(self.source[upto:start])
during = escape(self.source[start:end])
after = escape(self.source[end:next])
source_lines.append((num, escape(self.source[upto:next])))
upto = next
total = len(source_lines)
top = max(1, line - context_lines)
bottom = min(total, line + 1 + context_lines)
# In some rare cases exc_value.args can be empty or an invalid
# string.
try:
message = str(exception.args[0])
except (IndexError, UnicodeDecodeError):
message = '(Could not get exception message)'
return {
'message': message,
'source_lines': source_lines[top:bottom],
'before': before,
'during': during,
'after': after,
'top': top,
'bottom': bottom,
'total': total,
'line': line,
'name': self.origin.name,
'start': start,
'end': end,
} | [
"def",
"get_exception_info",
"(",
"self",
",",
"exception",
",",
"token",
")",
":",
"start",
",",
"end",
"=",
"token",
".",
"position",
"context_lines",
"=",
"10",
"line",
"=",
"0",
"upto",
"=",
"0",
"source_lines",
"=",
"[",
"]",
"before",
"=",
"during",
"=",
"after",
"=",
"\"\"",
"for",
"num",
",",
"next",
"in",
"enumerate",
"(",
"linebreak_iter",
"(",
"self",
".",
"source",
")",
")",
":",
"if",
"start",
">=",
"upto",
"and",
"end",
"<=",
"next",
":",
"line",
"=",
"num",
"before",
"=",
"escape",
"(",
"self",
".",
"source",
"[",
"upto",
":",
"start",
"]",
")",
"during",
"=",
"escape",
"(",
"self",
".",
"source",
"[",
"start",
":",
"end",
"]",
")",
"after",
"=",
"escape",
"(",
"self",
".",
"source",
"[",
"end",
":",
"next",
"]",
")",
"source_lines",
".",
"append",
"(",
"(",
"num",
",",
"escape",
"(",
"self",
".",
"source",
"[",
"upto",
":",
"next",
"]",
")",
")",
")",
"upto",
"=",
"next",
"total",
"=",
"len",
"(",
"source_lines",
")",
"top",
"=",
"max",
"(",
"1",
",",
"line",
"-",
"context_lines",
")",
"bottom",
"=",
"min",
"(",
"total",
",",
"line",
"+",
"1",
"+",
"context_lines",
")",
"# In some rare cases exc_value.args can be empty or an invalid",
"# string.",
"try",
":",
"message",
"=",
"str",
"(",
"exception",
".",
"args",
"[",
"0",
"]",
")",
"except",
"(",
"IndexError",
",",
"UnicodeDecodeError",
")",
":",
"message",
"=",
"'(Could not get exception message)'",
"return",
"{",
"'message'",
":",
"message",
",",
"'source_lines'",
":",
"source_lines",
"[",
"top",
":",
"bottom",
"]",
",",
"'before'",
":",
"before",
",",
"'during'",
":",
"during",
",",
"'after'",
":",
"after",
",",
"'top'",
":",
"top",
",",
"'bottom'",
":",
"bottom",
",",
"'total'",
":",
"total",
",",
"'line'",
":",
"line",
",",
"'name'",
":",
"self",
".",
"origin",
".",
"name",
",",
"'start'",
":",
"start",
",",
"'end'",
":",
"end",
",",
"}"
] | [
198,
4
] | [
274,
9
] | python | en | ['en', 'error', 'th'] | False |
Token.__init__ | (self, token_type, contents, position=None, lineno=None) |
A token representing a string from the template.
token_type
A TokenType, either .TEXT, .VAR, .BLOCK, or .COMMENT.
contents
The token source string.
position
An optional tuple containing the start and end index of the token
in the template source. This is used for traceback information
when debug is on.
lineno
The line number the token appears on in the template source.
This is used for traceback information and gettext files.
|
A token representing a string from the template. | def __init__(self, token_type, contents, position=None, lineno=None):
"""
A token representing a string from the template.
token_type
A TokenType, either .TEXT, .VAR, .BLOCK, or .COMMENT.
contents
The token source string.
position
An optional tuple containing the start and end index of the token
in the template source. This is used for traceback information
when debug is on.
lineno
The line number the token appears on in the template source.
This is used for traceback information and gettext files.
"""
self.token_type, self.contents = token_type, contents
self.lineno = lineno
self.position = position | [
"def",
"__init__",
"(",
"self",
",",
"token_type",
",",
"contents",
",",
"position",
"=",
"None",
",",
"lineno",
"=",
"None",
")",
":",
"self",
".",
"token_type",
",",
"self",
".",
"contents",
"=",
"token_type",
",",
"contents",
"self",
".",
"lineno",
"=",
"lineno",
"self",
".",
"position",
"=",
"position"
] | [
287,
4
] | [
308,
32
] | python | en | ['en', 'error', 'th'] | False |
Lexer.tokenize | (self) |
Return a list of tokens from a given template_string.
|
Return a list of tokens from a given template_string.
| def tokenize(self):
"""
Return a list of tokens from a given template_string.
"""
in_tag = False
lineno = 1
result = []
for bit in tag_re.split(self.template_string):
if bit:
result.append(self.create_token(bit, None, lineno, in_tag))
in_tag = not in_tag
lineno += bit.count('\n')
return result | [
"def",
"tokenize",
"(",
"self",
")",
":",
"in_tag",
"=",
"False",
"lineno",
"=",
"1",
"result",
"=",
"[",
"]",
"for",
"bit",
"in",
"tag_re",
".",
"split",
"(",
"self",
".",
"template_string",
")",
":",
"if",
"bit",
":",
"result",
".",
"append",
"(",
"self",
".",
"create_token",
"(",
"bit",
",",
"None",
",",
"lineno",
",",
"in_tag",
")",
")",
"in_tag",
"=",
"not",
"in_tag",
"lineno",
"+=",
"bit",
".",
"count",
"(",
"'\\n'",
")",
"return",
"result"
] | [
336,
4
] | [
348,
21
] | python | en | ['en', 'error', 'th'] | False |
Lexer.create_token | (self, token_string, position, lineno, in_tag) |
Convert the given token string into a new Token object and return it.
If in_tag is True, we are processing something that matched a tag,
otherwise it should be treated as a literal string.
|
Convert the given token string into a new Token object and return it.
If in_tag is True, we are processing something that matched a tag,
otherwise it should be treated as a literal string.
| def create_token(self, token_string, position, lineno, in_tag):
"""
Convert the given token string into a new Token object and return it.
If in_tag is True, we are processing something that matched a tag,
otherwise it should be treated as a literal string.
"""
if in_tag and token_string.startswith(BLOCK_TAG_START):
# The [2:-2] ranges below strip off *_TAG_START and *_TAG_END.
# We could do len(BLOCK_TAG_START) to be more "correct", but we've
# hard-coded the 2s here for performance. And it's not like
# the TAG_START values are going to change anytime, anyway.
block_content = token_string[2:-2].strip()
if self.verbatim and block_content == self.verbatim:
self.verbatim = False
if in_tag and not self.verbatim:
if token_string.startswith(VARIABLE_TAG_START):
return Token(TokenType.VAR, token_string[2:-2].strip(), position, lineno)
elif token_string.startswith(BLOCK_TAG_START):
if block_content[:9] in ('verbatim', 'verbatim '):
self.verbatim = 'end%s' % block_content
return Token(TokenType.BLOCK, block_content, position, lineno)
elif token_string.startswith(COMMENT_TAG_START):
content = ''
if token_string.find(TRANSLATOR_COMMENT_MARK):
content = token_string[2:-2].strip()
return Token(TokenType.COMMENT, content, position, lineno)
else:
return Token(TokenType.TEXT, token_string, position, lineno) | [
"def",
"create_token",
"(",
"self",
",",
"token_string",
",",
"position",
",",
"lineno",
",",
"in_tag",
")",
":",
"if",
"in_tag",
"and",
"token_string",
".",
"startswith",
"(",
"BLOCK_TAG_START",
")",
":",
"# The [2:-2] ranges below strip off *_TAG_START and *_TAG_END.",
"# We could do len(BLOCK_TAG_START) to be more \"correct\", but we've",
"# hard-coded the 2s here for performance. And it's not like",
"# the TAG_START values are going to change anytime, anyway.",
"block_content",
"=",
"token_string",
"[",
"2",
":",
"-",
"2",
"]",
".",
"strip",
"(",
")",
"if",
"self",
".",
"verbatim",
"and",
"block_content",
"==",
"self",
".",
"verbatim",
":",
"self",
".",
"verbatim",
"=",
"False",
"if",
"in_tag",
"and",
"not",
"self",
".",
"verbatim",
":",
"if",
"token_string",
".",
"startswith",
"(",
"VARIABLE_TAG_START",
")",
":",
"return",
"Token",
"(",
"TokenType",
".",
"VAR",
",",
"token_string",
"[",
"2",
":",
"-",
"2",
"]",
".",
"strip",
"(",
")",
",",
"position",
",",
"lineno",
")",
"elif",
"token_string",
".",
"startswith",
"(",
"BLOCK_TAG_START",
")",
":",
"if",
"block_content",
"[",
":",
"9",
"]",
"in",
"(",
"'verbatim'",
",",
"'verbatim '",
")",
":",
"self",
".",
"verbatim",
"=",
"'end%s'",
"%",
"block_content",
"return",
"Token",
"(",
"TokenType",
".",
"BLOCK",
",",
"block_content",
",",
"position",
",",
"lineno",
")",
"elif",
"token_string",
".",
"startswith",
"(",
"COMMENT_TAG_START",
")",
":",
"content",
"=",
"''",
"if",
"token_string",
".",
"find",
"(",
"TRANSLATOR_COMMENT_MARK",
")",
":",
"content",
"=",
"token_string",
"[",
"2",
":",
"-",
"2",
"]",
".",
"strip",
"(",
")",
"return",
"Token",
"(",
"TokenType",
".",
"COMMENT",
",",
"content",
",",
"position",
",",
"lineno",
")",
"else",
":",
"return",
"Token",
"(",
"TokenType",
".",
"TEXT",
",",
"token_string",
",",
"position",
",",
"lineno",
")"
] | [
350,
4
] | [
377,
72
] | python | en | ['en', 'error', 'th'] | False |
DebugLexer.tokenize | (self) |
Split a template string into tokens and annotates each token with its
start and end position in the source. This is slower than the default
lexer so only use it when debug is True.
|
Split a template string into tokens and annotates each token with its
start and end position in the source. This is slower than the default
lexer so only use it when debug is True.
| def tokenize(self):
"""
Split a template string into tokens and annotates each token with its
start and end position in the source. This is slower than the default
lexer so only use it when debug is True.
"""
lineno = 1
result = []
upto = 0
for match in tag_re.finditer(self.template_string):
start, end = match.span()
if start > upto:
token_string = self.template_string[upto:start]
result.append(self.create_token(token_string, (upto, start), lineno, in_tag=False))
lineno += token_string.count('\n')
token_string = self.template_string[start:end]
result.append(self.create_token(token_string, (start, end), lineno, in_tag=True))
lineno += token_string.count('\n')
upto = end
last_bit = self.template_string[upto:]
if last_bit:
result.append(self.create_token(last_bit, (upto, upto + len(last_bit)), lineno, in_tag=False))
return result | [
"def",
"tokenize",
"(",
"self",
")",
":",
"lineno",
"=",
"1",
"result",
"=",
"[",
"]",
"upto",
"=",
"0",
"for",
"match",
"in",
"tag_re",
".",
"finditer",
"(",
"self",
".",
"template_string",
")",
":",
"start",
",",
"end",
"=",
"match",
".",
"span",
"(",
")",
"if",
"start",
">",
"upto",
":",
"token_string",
"=",
"self",
".",
"template_string",
"[",
"upto",
":",
"start",
"]",
"result",
".",
"append",
"(",
"self",
".",
"create_token",
"(",
"token_string",
",",
"(",
"upto",
",",
"start",
")",
",",
"lineno",
",",
"in_tag",
"=",
"False",
")",
")",
"lineno",
"+=",
"token_string",
".",
"count",
"(",
"'\\n'",
")",
"token_string",
"=",
"self",
".",
"template_string",
"[",
"start",
":",
"end",
"]",
"result",
".",
"append",
"(",
"self",
".",
"create_token",
"(",
"token_string",
",",
"(",
"start",
",",
"end",
")",
",",
"lineno",
",",
"in_tag",
"=",
"True",
")",
")",
"lineno",
"+=",
"token_string",
".",
"count",
"(",
"'\\n'",
")",
"upto",
"=",
"end",
"last_bit",
"=",
"self",
".",
"template_string",
"[",
"upto",
":",
"]",
"if",
"last_bit",
":",
"result",
".",
"append",
"(",
"self",
".",
"create_token",
"(",
"last_bit",
",",
"(",
"upto",
",",
"upto",
"+",
"len",
"(",
"last_bit",
")",
")",
",",
"lineno",
",",
"in_tag",
"=",
"False",
")",
")",
"return",
"result"
] | [
381,
4
] | [
403,
21
] | python | en | ['en', 'error', 'th'] | False |
Parser.parse | (self, parse_until=None) |
Iterate through the parser tokens and compiles each one into a node.
If parse_until is provided, parsing will stop once one of the
specified tokens has been reached. This is formatted as a list of
tokens, e.g. ['elif', 'else', 'endif']. If no matching token is
reached, raise an exception with the unclosed block tag details.
|
Iterate through the parser tokens and compiles each one into a node. | def parse(self, parse_until=None):
"""
Iterate through the parser tokens and compiles each one into a node.
If parse_until is provided, parsing will stop once one of the
specified tokens has been reached. This is formatted as a list of
tokens, e.g. ['elif', 'else', 'endif']. If no matching token is
reached, raise an exception with the unclosed block tag details.
"""
if parse_until is None:
parse_until = []
nodelist = NodeList()
while self.tokens:
token = self.next_token()
# Use the raw values here for TokenType.* for a tiny performance boost.
if token.token_type.value == 0: # TokenType.TEXT
self.extend_nodelist(nodelist, TextNode(token.contents), token)
elif token.token_type.value == 1: # TokenType.VAR
if not token.contents:
raise self.error(token, 'Empty variable tag on line %d' % token.lineno)
try:
filter_expression = self.compile_filter(token.contents)
except TemplateSyntaxError as e:
raise self.error(token, e)
var_node = VariableNode(filter_expression)
self.extend_nodelist(nodelist, var_node, token)
elif token.token_type.value == 2: # TokenType.BLOCK
try:
command = token.contents.split()[0]
except IndexError:
raise self.error(token, 'Empty block tag on line %d' % token.lineno)
if command in parse_until:
# A matching token has been reached. Return control to
# the caller. Put the token back on the token list so the
# caller knows where it terminated.
self.prepend_token(token)
return nodelist
# Add the token to the command stack. This is used for error
# messages if further parsing fails due to an unclosed block
# tag.
self.command_stack.append((command, token))
# Get the tag callback function from the ones registered with
# the parser.
try:
compile_func = self.tags[command]
except KeyError:
self.invalid_block_tag(token, command, parse_until)
# Compile the callback into a node object and add it to
# the node list.
try:
compiled_result = compile_func(self, token)
except Exception as e:
raise self.error(token, e)
self.extend_nodelist(nodelist, compiled_result, token)
# Compile success. Remove the token from the command stack.
self.command_stack.pop()
if parse_until:
self.unclosed_block_tag(parse_until)
return nodelist | [
"def",
"parse",
"(",
"self",
",",
"parse_until",
"=",
"None",
")",
":",
"if",
"parse_until",
"is",
"None",
":",
"parse_until",
"=",
"[",
"]",
"nodelist",
"=",
"NodeList",
"(",
")",
"while",
"self",
".",
"tokens",
":",
"token",
"=",
"self",
".",
"next_token",
"(",
")",
"# Use the raw values here for TokenType.* for a tiny performance boost.",
"if",
"token",
".",
"token_type",
".",
"value",
"==",
"0",
":",
"# TokenType.TEXT",
"self",
".",
"extend_nodelist",
"(",
"nodelist",
",",
"TextNode",
"(",
"token",
".",
"contents",
")",
",",
"token",
")",
"elif",
"token",
".",
"token_type",
".",
"value",
"==",
"1",
":",
"# TokenType.VAR",
"if",
"not",
"token",
".",
"contents",
":",
"raise",
"self",
".",
"error",
"(",
"token",
",",
"'Empty variable tag on line %d'",
"%",
"token",
".",
"lineno",
")",
"try",
":",
"filter_expression",
"=",
"self",
".",
"compile_filter",
"(",
"token",
".",
"contents",
")",
"except",
"TemplateSyntaxError",
"as",
"e",
":",
"raise",
"self",
".",
"error",
"(",
"token",
",",
"e",
")",
"var_node",
"=",
"VariableNode",
"(",
"filter_expression",
")",
"self",
".",
"extend_nodelist",
"(",
"nodelist",
",",
"var_node",
",",
"token",
")",
"elif",
"token",
".",
"token_type",
".",
"value",
"==",
"2",
":",
"# TokenType.BLOCK",
"try",
":",
"command",
"=",
"token",
".",
"contents",
".",
"split",
"(",
")",
"[",
"0",
"]",
"except",
"IndexError",
":",
"raise",
"self",
".",
"error",
"(",
"token",
",",
"'Empty block tag on line %d'",
"%",
"token",
".",
"lineno",
")",
"if",
"command",
"in",
"parse_until",
":",
"# A matching token has been reached. Return control to",
"# the caller. Put the token back on the token list so the",
"# caller knows where it terminated.",
"self",
".",
"prepend_token",
"(",
"token",
")",
"return",
"nodelist",
"# Add the token to the command stack. This is used for error",
"# messages if further parsing fails due to an unclosed block",
"# tag.",
"self",
".",
"command_stack",
".",
"append",
"(",
"(",
"command",
",",
"token",
")",
")",
"# Get the tag callback function from the ones registered with",
"# the parser.",
"try",
":",
"compile_func",
"=",
"self",
".",
"tags",
"[",
"command",
"]",
"except",
"KeyError",
":",
"self",
".",
"invalid_block_tag",
"(",
"token",
",",
"command",
",",
"parse_until",
")",
"# Compile the callback into a node object and add it to",
"# the node list.",
"try",
":",
"compiled_result",
"=",
"compile_func",
"(",
"self",
",",
"token",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"self",
".",
"error",
"(",
"token",
",",
"e",
")",
"self",
".",
"extend_nodelist",
"(",
"nodelist",
",",
"compiled_result",
",",
"token",
")",
"# Compile success. Remove the token from the command stack.",
"self",
".",
"command_stack",
".",
"pop",
"(",
")",
"if",
"parse_until",
":",
"self",
".",
"unclosed_block_tag",
"(",
"parse_until",
")",
"return",
"nodelist"
] | [
425,
4
] | [
483,
23
] | python | en | ['en', 'error', 'th'] | False |
Parser.error | (self, token, e) |
Return an exception annotated with the originating token. Since the
parser can be called recursively, check if a token is already set. This
ensures the innermost token is highlighted if an exception occurs,
e.g. a compile error within the body of an if statement.
|
Return an exception annotated with the originating token. Since the
parser can be called recursively, check if a token is already set. This
ensures the innermost token is highlighted if an exception occurs,
e.g. a compile error within the body of an if statement.
| def error(self, token, e):
"""
Return an exception annotated with the originating token. Since the
parser can be called recursively, check if a token is already set. This
ensures the innermost token is highlighted if an exception occurs,
e.g. a compile error within the body of an if statement.
"""
if not isinstance(e, Exception):
e = TemplateSyntaxError(e)
if not hasattr(e, 'token'):
e.token = token
return e | [
"def",
"error",
"(",
"self",
",",
"token",
",",
"e",
")",
":",
"if",
"not",
"isinstance",
"(",
"e",
",",
"Exception",
")",
":",
"e",
"=",
"TemplateSyntaxError",
"(",
"e",
")",
"if",
"not",
"hasattr",
"(",
"e",
",",
"'token'",
")",
":",
"e",
".",
"token",
"=",
"token",
"return",
"e"
] | [
506,
4
] | [
517,
16
] | python | en | ['en', 'error', 'th'] | False |
Parser.compile_filter | (self, token) |
Convenient wrapper for FilterExpression
|
Convenient wrapper for FilterExpression
| def compile_filter(self, token):
"""
Convenient wrapper for FilterExpression
"""
return FilterExpression(token, self) | [
"def",
"compile_filter",
"(",
"self",
",",
"token",
")",
":",
"return",
"FilterExpression",
"(",
"token",
",",
"self",
")"
] | [
558,
4
] | [
562,
44
] | python | en | ['en', 'error', 'th'] | False |
Variable.resolve | (self, context) | Resolve this variable against a given context. | Resolve this variable against a given context. | def resolve(self, context):
"""Resolve this variable against a given context."""
if self.lookups is not None:
# We're dealing with a variable that needs to be resolved
value = self._resolve_lookup(context)
else:
# We're dealing with a literal, so it's already been "resolved"
value = self.literal
if self.translate:
is_safe = isinstance(value, SafeData)
msgid = value.replace('%', '%%')
msgid = mark_safe(msgid) if is_safe else msgid
if self.message_context:
return pgettext_lazy(self.message_context, msgid)
else:
return gettext_lazy(msgid)
return value | [
"def",
"resolve",
"(",
"self",
",",
"context",
")",
":",
"if",
"self",
".",
"lookups",
"is",
"not",
"None",
":",
"# We're dealing with a variable that needs to be resolved",
"value",
"=",
"self",
".",
"_resolve_lookup",
"(",
"context",
")",
"else",
":",
"# We're dealing with a literal, so it's already been \"resolved\"",
"value",
"=",
"self",
".",
"literal",
"if",
"self",
".",
"translate",
":",
"is_safe",
"=",
"isinstance",
"(",
"value",
",",
"SafeData",
")",
"msgid",
"=",
"value",
".",
"replace",
"(",
"'%'",
",",
"'%%'",
")",
"msgid",
"=",
"mark_safe",
"(",
"msgid",
")",
"if",
"is_safe",
"else",
"msgid",
"if",
"self",
".",
"message_context",
":",
"return",
"pgettext_lazy",
"(",
"self",
".",
"message_context",
",",
"msgid",
")",
"else",
":",
"return",
"gettext_lazy",
"(",
"msgid",
")",
"return",
"value"
] | [
791,
4
] | [
807,
20
] | python | en | ['en', 'en', 'en'] | True |
Variable._resolve_lookup | (self, context) |
Perform resolution of a real variable (i.e. not a literal) against the
given context.
As indicated by the method's name, this method is an implementation
detail and shouldn't be called by external code. Use Variable.resolve()
instead.
|
Perform resolution of a real variable (i.e. not a literal) against the
given context. | def _resolve_lookup(self, context):
"""
Perform resolution of a real variable (i.e. not a literal) against the
given context.
As indicated by the method's name, this method is an implementation
detail and shouldn't be called by external code. Use Variable.resolve()
instead.
"""
current = context
try: # catch-all for silent variable failures
for bit in self.lookups:
try: # dictionary lookup
current = current[bit]
# ValueError/IndexError are for numpy.array lookup on
# numpy < 1.9 and 1.9+ respectively
except (TypeError, AttributeError, KeyError, ValueError, IndexError):
try: # attribute lookup
# Don't return class attributes if the class is the context:
if isinstance(current, BaseContext) and getattr(type(current), bit):
raise AttributeError
current = getattr(current, bit)
except (TypeError, AttributeError):
# Reraise if the exception was raised by a @property
if not isinstance(current, BaseContext) and bit in dir(current):
raise
try: # list-index lookup
current = current[int(bit)]
except (IndexError, # list index out of range
ValueError, # invalid literal for int()
KeyError, # current is a dict without `int(bit)` key
TypeError): # unsubscriptable object
raise VariableDoesNotExist("Failed lookup for key "
"[%s] in %r",
(bit, current)) # missing attribute
if callable(current):
if getattr(current, 'do_not_call_in_templates', False):
pass
elif getattr(current, 'alters_data', False):
current = context.template.engine.string_if_invalid
else:
try: # method call (assuming no args required)
current = current()
except TypeError:
signature = inspect.signature(current)
try:
signature.bind()
except TypeError: # arguments *were* required
current = context.template.engine.string_if_invalid # invalid method call
else:
raise
except Exception as e:
template_name = getattr(context, 'template_name', None) or 'unknown'
logger.debug(
"Exception while resolving variable '%s' in template '%s'.",
bit,
template_name,
exc_info=True,
)
if getattr(e, 'silent_variable_failure', False):
current = context.template.engine.string_if_invalid
else:
raise
return current | [
"def",
"_resolve_lookup",
"(",
"self",
",",
"context",
")",
":",
"current",
"=",
"context",
"try",
":",
"# catch-all for silent variable failures",
"for",
"bit",
"in",
"self",
".",
"lookups",
":",
"try",
":",
"# dictionary lookup",
"current",
"=",
"current",
"[",
"bit",
"]",
"# ValueError/IndexError are for numpy.array lookup on",
"# numpy < 1.9 and 1.9+ respectively",
"except",
"(",
"TypeError",
",",
"AttributeError",
",",
"KeyError",
",",
"ValueError",
",",
"IndexError",
")",
":",
"try",
":",
"# attribute lookup",
"# Don't return class attributes if the class is the context:",
"if",
"isinstance",
"(",
"current",
",",
"BaseContext",
")",
"and",
"getattr",
"(",
"type",
"(",
"current",
")",
",",
"bit",
")",
":",
"raise",
"AttributeError",
"current",
"=",
"getattr",
"(",
"current",
",",
"bit",
")",
"except",
"(",
"TypeError",
",",
"AttributeError",
")",
":",
"# Reraise if the exception was raised by a @property",
"if",
"not",
"isinstance",
"(",
"current",
",",
"BaseContext",
")",
"and",
"bit",
"in",
"dir",
"(",
"current",
")",
":",
"raise",
"try",
":",
"# list-index lookup",
"current",
"=",
"current",
"[",
"int",
"(",
"bit",
")",
"]",
"except",
"(",
"IndexError",
",",
"# list index out of range",
"ValueError",
",",
"# invalid literal for int()",
"KeyError",
",",
"# current is a dict without `int(bit)` key",
"TypeError",
")",
":",
"# unsubscriptable object",
"raise",
"VariableDoesNotExist",
"(",
"\"Failed lookup for key \"",
"\"[%s] in %r\"",
",",
"(",
"bit",
",",
"current",
")",
")",
"# missing attribute",
"if",
"callable",
"(",
"current",
")",
":",
"if",
"getattr",
"(",
"current",
",",
"'do_not_call_in_templates'",
",",
"False",
")",
":",
"pass",
"elif",
"getattr",
"(",
"current",
",",
"'alters_data'",
",",
"False",
")",
":",
"current",
"=",
"context",
".",
"template",
".",
"engine",
".",
"string_if_invalid",
"else",
":",
"try",
":",
"# method call (assuming no args required)",
"current",
"=",
"current",
"(",
")",
"except",
"TypeError",
":",
"signature",
"=",
"inspect",
".",
"signature",
"(",
"current",
")",
"try",
":",
"signature",
".",
"bind",
"(",
")",
"except",
"TypeError",
":",
"# arguments *were* required",
"current",
"=",
"context",
".",
"template",
".",
"engine",
".",
"string_if_invalid",
"# invalid method call",
"else",
":",
"raise",
"except",
"Exception",
"as",
"e",
":",
"template_name",
"=",
"getattr",
"(",
"context",
",",
"'template_name'",
",",
"None",
")",
"or",
"'unknown'",
"logger",
".",
"debug",
"(",
"\"Exception while resolving variable '%s' in template '%s'.\"",
",",
"bit",
",",
"template_name",
",",
"exc_info",
"=",
"True",
",",
")",
"if",
"getattr",
"(",
"e",
",",
"'silent_variable_failure'",
",",
"False",
")",
":",
"current",
"=",
"context",
".",
"template",
".",
"engine",
".",
"string_if_invalid",
"else",
":",
"raise",
"return",
"current"
] | [
815,
4
] | [
880,
22
] | python | en | ['en', 'error', 'th'] | False |
Node.render | (self, context) |
Return the node rendered as a string.
|
Return the node rendered as a string.
| def render(self, context):
"""
Return the node rendered as a string.
"""
pass | [
"def",
"render",
"(",
"self",
",",
"context",
")",
":",
"pass"
] | [
890,
4
] | [
894,
12
] | python | en | ['en', 'error', 'th'] | False |
Node.render_annotated | (self, context) |
Render the node. If debug is True and an exception occurs during
rendering, the exception is annotated with contextual line information
where it occurred in the template. For internal usage this method is
preferred over using the render method directly.
|
Render the node. If debug is True and an exception occurs during
rendering, the exception is annotated with contextual line information
where it occurred in the template. For internal usage this method is
preferred over using the render method directly.
| def render_annotated(self, context):
"""
Render the node. If debug is True and an exception occurs during
rendering, the exception is annotated with contextual line information
where it occurred in the template. For internal usage this method is
preferred over using the render method directly.
"""
try:
return self.render(context)
except Exception as e:
if context.template.engine.debug and not hasattr(e, 'template_debug'):
e.template_debug = context.render_context.template.get_exception_info(e, self.token)
raise | [
"def",
"render_annotated",
"(",
"self",
",",
"context",
")",
":",
"try",
":",
"return",
"self",
".",
"render",
"(",
"context",
")",
"except",
"Exception",
"as",
"e",
":",
"if",
"context",
".",
"template",
".",
"engine",
".",
"debug",
"and",
"not",
"hasattr",
"(",
"e",
",",
"'template_debug'",
")",
":",
"e",
".",
"template_debug",
"=",
"context",
".",
"render_context",
".",
"template",
".",
"get_exception_info",
"(",
"e",
",",
"self",
".",
"token",
")",
"raise"
] | [
896,
4
] | [
908,
17
] | python | en | ['en', 'error', 'th'] | False |
Node.get_nodes_by_type | (self, nodetype) |
Return a list of all nodes (within this node and its nodelist)
of the given type
|
Return a list of all nodes (within this node and its nodelist)
of the given type
| def get_nodes_by_type(self, nodetype):
"""
Return a list of all nodes (within this node and its nodelist)
of the given type
"""
nodes = []
if isinstance(self, nodetype):
nodes.append(self)
for attr in self.child_nodelists:
nodelist = getattr(self, attr, None)
if nodelist:
nodes.extend(nodelist.get_nodes_by_type(nodetype))
return nodes | [
"def",
"get_nodes_by_type",
"(",
"self",
",",
"nodetype",
")",
":",
"nodes",
"=",
"[",
"]",
"if",
"isinstance",
"(",
"self",
",",
"nodetype",
")",
":",
"nodes",
".",
"append",
"(",
"self",
")",
"for",
"attr",
"in",
"self",
".",
"child_nodelists",
":",
"nodelist",
"=",
"getattr",
"(",
"self",
",",
"attr",
",",
"None",
")",
"if",
"nodelist",
":",
"nodes",
".",
"extend",
"(",
"nodelist",
".",
"get_nodes_by_type",
"(",
"nodetype",
")",
")",
"return",
"nodes"
] | [
913,
4
] | [
925,
20
] | python | en | ['en', 'error', 'th'] | False |
NodeList.get_nodes_by_type | (self, nodetype) | Return a list of all nodes of the given type | Return a list of all nodes of the given type | def get_nodes_by_type(self, nodetype):
"Return a list of all nodes of the given type"
nodes = []
for node in self:
nodes.extend(node.get_nodes_by_type(nodetype))
return nodes | [
"def",
"get_nodes_by_type",
"(",
"self",
",",
"nodetype",
")",
":",
"nodes",
"=",
"[",
"]",
"for",
"node",
"in",
"self",
":",
"nodes",
".",
"extend",
"(",
"node",
".",
"get_nodes_by_type",
"(",
"nodetype",
")",
")",
"return",
"nodes"
] | [
943,
4
] | [
948,
20
] | python | en | ['en', 'en', 'en'] | True |
reshape_into_image | (features, params) | reshape features dict containing ref, ltg channels into image.
Args:
features (dict): Looks for ref, ltg entries in dict
params (dict): command-line parameters
Returns:
reshaped tensor with shape [2*train_patch_radius, 2*train_patch_radius, 2]
| reshape features dict containing ref, ltg channels into image. | def reshape_into_image(features, params):
"""reshape features dict containing ref, ltg channels into image.
Args:
features (dict): Looks for ref, ltg entries in dict
params (dict): command-line parameters
Returns:
reshaped tensor with shape [2*train_patch_radius, 2*train_patch_radius, 2]
"""
# stack the inputs to form a 2-channel input
# features['ref'] is [-1, height*width]
# stacked image is [-1, height*width, n_channels]
n_channels = 2
print('shape of ref feature {}'.format(features['ref'].shape))
stacked = tf.concat([features['ref'], features['ltg']], axis=1)
height = width = PATCH_SIZE(params)
print('shape of all features {}, will be reshaped to [{},{},{}]'.format(
stacked.shape, height, width, n_channels))
return tf.reshape(stacked, [height, width, n_channels]) | [
"def",
"reshape_into_image",
"(",
"features",
",",
"params",
")",
":",
"# stack the inputs to form a 2-channel input",
"# features['ref'] is [-1, height*width]",
"# stacked image is [-1, height*width, n_channels]",
"n_channels",
"=",
"2",
"print",
"(",
"'shape of ref feature {}'",
".",
"format",
"(",
"features",
"[",
"'ref'",
"]",
".",
"shape",
")",
")",
"stacked",
"=",
"tf",
".",
"concat",
"(",
"[",
"features",
"[",
"'ref'",
"]",
",",
"features",
"[",
"'ltg'",
"]",
"]",
",",
"axis",
"=",
"1",
")",
"height",
"=",
"width",
"=",
"PATCH_SIZE",
"(",
"params",
")",
"print",
"(",
"'shape of all features {}, will be reshaped to [{},{},{}]'",
".",
"format",
"(",
"stacked",
".",
"shape",
",",
"height",
",",
"width",
",",
"n_channels",
")",
")",
"return",
"tf",
".",
"reshape",
"(",
"stacked",
",",
"[",
"height",
",",
"width",
",",
"n_channels",
"]",
")"
] | [
29,
0
] | [
48,
57
] | python | en | ['en', 'en', 'en'] | True |
make_preprocess_fn | (params) | Make preprocessing function.
Args:
params (dict): command-line parameters
Returns:
function that takes tfexample and returns img, label
| Make preprocessing function. | def make_preprocess_fn(params):
"""Make preprocessing function.
Args:
params (dict): command-line parameters
Returns:
function that takes tfexample and returns img, label
"""
def _sparse_to_dense(data, arrlen):
return tf.expand_dims(
tf.reshape(tf.sparse_tensor_to_dense(data, default_value=0), [arrlen]),
-1)
def read_and_preprocess(example_data):
"""parses tfrecord and returns image, label.
Args:
example_data (str): tfrecord
Returns:
img, label
"""
height = width = PATCH_SIZE(params)
parsed = tf.parse_single_example(
example_data, {
'ref': tf.VarLenFeature(tf.float32),
'ltg': tf.VarLenFeature(tf.float32),
'has_ltg': tf.FixedLenFeature([], tf.int64, 1),
})
parsed['ref'] = _sparse_to_dense(parsed['ref'], height * width)
parsed['ltg'] = _sparse_to_dense(parsed['ltg'], height * width)
# keras wants labels to be float32
label = tf.cast(
tf.reshape(parsed['has_ltg'], shape=[]),
dtype=tf.float32)
print('shape of label {}'.format(label.shape))
img = reshape_into_image(parsed, params)
return img, label
return read_and_preprocess | [
"def",
"make_preprocess_fn",
"(",
"params",
")",
":",
"def",
"_sparse_to_dense",
"(",
"data",
",",
"arrlen",
")",
":",
"return",
"tf",
".",
"expand_dims",
"(",
"tf",
".",
"reshape",
"(",
"tf",
".",
"sparse_tensor_to_dense",
"(",
"data",
",",
"default_value",
"=",
"0",
")",
",",
"[",
"arrlen",
"]",
")",
",",
"-",
"1",
")",
"def",
"read_and_preprocess",
"(",
"example_data",
")",
":",
"\"\"\"parses tfrecord and returns image, label.\n\n Args:\n example_data (str): tfrecord\n Returns:\n img, label\n \"\"\"",
"height",
"=",
"width",
"=",
"PATCH_SIZE",
"(",
"params",
")",
"parsed",
"=",
"tf",
".",
"parse_single_example",
"(",
"example_data",
",",
"{",
"'ref'",
":",
"tf",
".",
"VarLenFeature",
"(",
"tf",
".",
"float32",
")",
",",
"'ltg'",
":",
"tf",
".",
"VarLenFeature",
"(",
"tf",
".",
"float32",
")",
",",
"'has_ltg'",
":",
"tf",
".",
"FixedLenFeature",
"(",
"[",
"]",
",",
"tf",
".",
"int64",
",",
"1",
")",
",",
"}",
")",
"parsed",
"[",
"'ref'",
"]",
"=",
"_sparse_to_dense",
"(",
"parsed",
"[",
"'ref'",
"]",
",",
"height",
"*",
"width",
")",
"parsed",
"[",
"'ltg'",
"]",
"=",
"_sparse_to_dense",
"(",
"parsed",
"[",
"'ltg'",
"]",
",",
"height",
"*",
"width",
")",
"# keras wants labels to be float32",
"label",
"=",
"tf",
".",
"cast",
"(",
"tf",
".",
"reshape",
"(",
"parsed",
"[",
"'has_ltg'",
"]",
",",
"shape",
"=",
"[",
"]",
")",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"print",
"(",
"'shape of label {}'",
".",
"format",
"(",
"label",
".",
"shape",
")",
")",
"img",
"=",
"reshape_into_image",
"(",
"parsed",
",",
"params",
")",
"return",
"img",
",",
"label",
"return",
"read_and_preprocess"
] | [
51,
0
] | [
92,
28
] | python | en | ['en', 'jv', 'en'] | True |
make_dataset | (pattern, mode, batch_size, params) | Make training/evaluation dataset.
Args:
pattern (str): filename pattern
mode (int): TRAIN/EVAL/PREDICT
default_batch_size (int): batch_size
params (dict): transpose, num_cores
Returns:
tf.data dataset
| Make training/evaluation dataset. | def make_dataset(pattern, mode, batch_size, params):
"""Make training/evaluation dataset.
Args:
pattern (str): filename pattern
mode (int): TRAIN/EVAL/PREDICT
default_batch_size (int): batch_size
params (dict): transpose, num_cores
Returns:
tf.data dataset
"""
def _set_shapes(batch_size, images, labels):
"""Statically set the batch_size dimension."""
if params['transpose']:
images.set_shape(images.get_shape().merge_with(
tf.TensorShape([None, None, None, batch_size])))
labels.set_shape(labels.get_shape().merge_with(
tf.TensorShape([batch_size])))
else:
images.set_shape(images.get_shape().merge_with(
tf.TensorShape([batch_size, None, None, None])))
labels.set_shape(labels.get_shape().merge_with(
tf.TensorShape([batch_size])))
# keras wants labels to be same shape as logits
labels = tf.expand_dims(labels, -1)
return images, labels
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
# read the dataset
dataset = tf.data.Dataset.list_files(pattern, shuffle=is_training)
def fetch_dataset(filename):
buffer_size = 8 * 1024 * 1024 # 8 MiB per file
dataset = tf.data.TFRecordDataset(filename, buffer_size=buffer_size)
return dataset
dataset = dataset.apply(
tf.contrib.data.parallel_interleave(
fetch_dataset, cycle_length=64, sloppy=True))
dataset = dataset.shuffle(batch_size * 50) # shuffle by a bit
# convert features into images
preprocess_fn = make_preprocess_fn(params)
dataset = dataset.apply(
tf.contrib.data.map_and_batch(
preprocess_fn,
batch_size=batch_size,
num_parallel_batches=params['num_cores'],
drop_remainder=True))
if params['transpose']:
dataset = dataset.map(
lambda images, labels: (tf.transpose(images, [1, 2, 3, 0]), labels),
num_parallel_calls=params['num_cores'])
# assign static shape
dataset = dataset.map(functools.partial(_set_shapes, batch_size))
# prefetch data while training
dataset = dataset.repeat()
dataset = dataset.prefetch(tf.contrib.data.AUTOTUNE)
return dataset | [
"def",
"make_dataset",
"(",
"pattern",
",",
"mode",
",",
"batch_size",
",",
"params",
")",
":",
"def",
"_set_shapes",
"(",
"batch_size",
",",
"images",
",",
"labels",
")",
":",
"\"\"\"Statically set the batch_size dimension.\"\"\"",
"if",
"params",
"[",
"'transpose'",
"]",
":",
"images",
".",
"set_shape",
"(",
"images",
".",
"get_shape",
"(",
")",
".",
"merge_with",
"(",
"tf",
".",
"TensorShape",
"(",
"[",
"None",
",",
"None",
",",
"None",
",",
"batch_size",
"]",
")",
")",
")",
"labels",
".",
"set_shape",
"(",
"labels",
".",
"get_shape",
"(",
")",
".",
"merge_with",
"(",
"tf",
".",
"TensorShape",
"(",
"[",
"batch_size",
"]",
")",
")",
")",
"else",
":",
"images",
".",
"set_shape",
"(",
"images",
".",
"get_shape",
"(",
")",
".",
"merge_with",
"(",
"tf",
".",
"TensorShape",
"(",
"[",
"batch_size",
",",
"None",
",",
"None",
",",
"None",
"]",
")",
")",
")",
"labels",
".",
"set_shape",
"(",
"labels",
".",
"get_shape",
"(",
")",
".",
"merge_with",
"(",
"tf",
".",
"TensorShape",
"(",
"[",
"batch_size",
"]",
")",
")",
")",
"# keras wants labels to be same shape as logits",
"labels",
"=",
"tf",
".",
"expand_dims",
"(",
"labels",
",",
"-",
"1",
")",
"return",
"images",
",",
"labels",
"is_training",
"=",
"(",
"mode",
"==",
"tf",
".",
"estimator",
".",
"ModeKeys",
".",
"TRAIN",
")",
"# read the dataset",
"dataset",
"=",
"tf",
".",
"data",
".",
"Dataset",
".",
"list_files",
"(",
"pattern",
",",
"shuffle",
"=",
"is_training",
")",
"def",
"fetch_dataset",
"(",
"filename",
")",
":",
"buffer_size",
"=",
"8",
"*",
"1024",
"*",
"1024",
"# 8 MiB per file",
"dataset",
"=",
"tf",
".",
"data",
".",
"TFRecordDataset",
"(",
"filename",
",",
"buffer_size",
"=",
"buffer_size",
")",
"return",
"dataset",
"dataset",
"=",
"dataset",
".",
"apply",
"(",
"tf",
".",
"contrib",
".",
"data",
".",
"parallel_interleave",
"(",
"fetch_dataset",
",",
"cycle_length",
"=",
"64",
",",
"sloppy",
"=",
"True",
")",
")",
"dataset",
"=",
"dataset",
".",
"shuffle",
"(",
"batch_size",
"*",
"50",
")",
"# shuffle by a bit",
"# convert features into images",
"preprocess_fn",
"=",
"make_preprocess_fn",
"(",
"params",
")",
"dataset",
"=",
"dataset",
".",
"apply",
"(",
"tf",
".",
"contrib",
".",
"data",
".",
"map_and_batch",
"(",
"preprocess_fn",
",",
"batch_size",
"=",
"batch_size",
",",
"num_parallel_batches",
"=",
"params",
"[",
"'num_cores'",
"]",
",",
"drop_remainder",
"=",
"True",
")",
")",
"if",
"params",
"[",
"'transpose'",
"]",
":",
"dataset",
"=",
"dataset",
".",
"map",
"(",
"lambda",
"images",
",",
"labels",
":",
"(",
"tf",
".",
"transpose",
"(",
"images",
",",
"[",
"1",
",",
"2",
",",
"3",
",",
"0",
"]",
")",
",",
"labels",
")",
",",
"num_parallel_calls",
"=",
"params",
"[",
"'num_cores'",
"]",
")",
"# assign static shape",
"dataset",
"=",
"dataset",
".",
"map",
"(",
"functools",
".",
"partial",
"(",
"_set_shapes",
",",
"batch_size",
")",
")",
"# prefetch data while training",
"dataset",
"=",
"dataset",
".",
"repeat",
"(",
")",
"dataset",
"=",
"dataset",
".",
"prefetch",
"(",
"tf",
".",
"contrib",
".",
"data",
".",
"AUTOTUNE",
")",
"return",
"dataset"
] | [
160,
0
] | [
225,
16
] | python | en | ['fr', 'zu', 'en'] | False |
train_and_evaluate | (hparams) | Main train and evaluate loop.
Args:
hparams (dict): Command-line parameters passed in
| Main train and evaluate loop. | def train_and_evaluate(hparams):
"""Main train and evaluate loop.
Args:
hparams (dict): Command-line parameters passed in
"""
output_dir = hparams['job_dir']
max_steps = hparams['train_steps']
# avoid overly frequent evaluation
steps_per_epoch = min(1000, max_steps//10)
num_epochs = max_steps // steps_per_epoch
# eval batch size has to be divisible by num_cores
eval_batch_size = min(hparams['num_eval_records'],
hparams['train_batch_size'])
eval_batch_size = eval_batch_size - eval_batch_size % hparams['num_cores']
eval_steps = hparams['num_eval_records'] // eval_batch_size
tf.logging.info('train_batch_size=%d eval_batch_size=%d'
' train_steps=%d (%d x %d) eval_steps=%d',
hparams['train_batch_size'], eval_batch_size,
max_steps, steps_per_epoch, num_epochs,
eval_steps)
# create model
model = create_combined_model(hparams)
# resolve TPU and rewrite model for TPU if necessary
if hparams['use_tpu'] and hparams['master']:
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
hparams['master'])
trained_model = tf.contrib.tpu.keras_to_tpu_model(
model,
strategy=tf.contrib.tpu.TPUDistributionStrategy(
tpu_cluster_resolver
)
)
# on a TPU, we need to provide a function that returns a dataset
# this is so that the TPU can put the input pipeline on attached VM
train_data = lambda: make_dataset(hparams['train_data_path'],
tf.estimator.ModeKeys.TRAIN,
hparams['train_batch_size'],
hparams)
eval_data = lambda: make_dataset(hparams['eval_data_path'],
tf.estimator.ModeKeys.EVAL,
eval_batch_size,
hparams)
else:
trained_model = model
train_data = make_dataset(hparams['train_data_path'],
tf.estimator.ModeKeys.TRAIN,
hparams['train_batch_size'],
hparams)
eval_data = make_dataset(hparams['eval_data_path'],
tf.estimator.ModeKeys.EVAL,
eval_batch_size,
hparams)
# train and evaluate
start_timestamp = time.time()
history = trained_model.fit(
train_data,
steps_per_epoch=steps_per_epoch,
epochs=num_epochs,
validation_data=eval_data,
validation_steps=eval_steps,
verbose=2 # 1=progress 2=one line per epoch
)
elapsed_time = int(time.time() - start_timestamp)
tf.logging.info('Finished training up to step %d. Elapsed seconds %d.',
max_steps, elapsed_time)
#tf.logging.info(model.summary())
print("if running interactively, graph: {}".format(history.history.keys()))
# write validation accuracy as hyperparameter tuning metric
hpt = hypertune.HyperTune()
hpt.report_hyperparameter_tuning_metric(
hyperparameter_metric_tag='val_acc',
metric_value=history.history['val_acc'][-1], # last one
global_step=0)
# Serve the model via CMLE
export_keras(model, trained_model, output_dir, hparams) | [
"def",
"train_and_evaluate",
"(",
"hparams",
")",
":",
"output_dir",
"=",
"hparams",
"[",
"'job_dir'",
"]",
"max_steps",
"=",
"hparams",
"[",
"'train_steps'",
"]",
"# avoid overly frequent evaluation",
"steps_per_epoch",
"=",
"min",
"(",
"1000",
",",
"max_steps",
"//",
"10",
")",
"num_epochs",
"=",
"max_steps",
"//",
"steps_per_epoch",
"# eval batch size has to be divisible by num_cores",
"eval_batch_size",
"=",
"min",
"(",
"hparams",
"[",
"'num_eval_records'",
"]",
",",
"hparams",
"[",
"'train_batch_size'",
"]",
")",
"eval_batch_size",
"=",
"eval_batch_size",
"-",
"eval_batch_size",
"%",
"hparams",
"[",
"'num_cores'",
"]",
"eval_steps",
"=",
"hparams",
"[",
"'num_eval_records'",
"]",
"//",
"eval_batch_size",
"tf",
".",
"logging",
".",
"info",
"(",
"'train_batch_size=%d eval_batch_size=%d'",
"' train_steps=%d (%d x %d) eval_steps=%d'",
",",
"hparams",
"[",
"'train_batch_size'",
"]",
",",
"eval_batch_size",
",",
"max_steps",
",",
"steps_per_epoch",
",",
"num_epochs",
",",
"eval_steps",
")",
"# create model",
"model",
"=",
"create_combined_model",
"(",
"hparams",
")",
"# resolve TPU and rewrite model for TPU if necessary",
"if",
"hparams",
"[",
"'use_tpu'",
"]",
"and",
"hparams",
"[",
"'master'",
"]",
":",
"tpu_cluster_resolver",
"=",
"tf",
".",
"contrib",
".",
"cluster_resolver",
".",
"TPUClusterResolver",
"(",
"hparams",
"[",
"'master'",
"]",
")",
"trained_model",
"=",
"tf",
".",
"contrib",
".",
"tpu",
".",
"keras_to_tpu_model",
"(",
"model",
",",
"strategy",
"=",
"tf",
".",
"contrib",
".",
"tpu",
".",
"TPUDistributionStrategy",
"(",
"tpu_cluster_resolver",
")",
")",
"# on a TPU, we need to provide a function that returns a dataset",
"# this is so that the TPU can put the input pipeline on attached VM",
"train_data",
"=",
"lambda",
":",
"make_dataset",
"(",
"hparams",
"[",
"'train_data_path'",
"]",
",",
"tf",
".",
"estimator",
".",
"ModeKeys",
".",
"TRAIN",
",",
"hparams",
"[",
"'train_batch_size'",
"]",
",",
"hparams",
")",
"eval_data",
"=",
"lambda",
":",
"make_dataset",
"(",
"hparams",
"[",
"'eval_data_path'",
"]",
",",
"tf",
".",
"estimator",
".",
"ModeKeys",
".",
"EVAL",
",",
"eval_batch_size",
",",
"hparams",
")",
"else",
":",
"trained_model",
"=",
"model",
"train_data",
"=",
"make_dataset",
"(",
"hparams",
"[",
"'train_data_path'",
"]",
",",
"tf",
".",
"estimator",
".",
"ModeKeys",
".",
"TRAIN",
",",
"hparams",
"[",
"'train_batch_size'",
"]",
",",
"hparams",
")",
"eval_data",
"=",
"make_dataset",
"(",
"hparams",
"[",
"'eval_data_path'",
"]",
",",
"tf",
".",
"estimator",
".",
"ModeKeys",
".",
"EVAL",
",",
"eval_batch_size",
",",
"hparams",
")",
"# train and evaluate",
"start_timestamp",
"=",
"time",
".",
"time",
"(",
")",
"history",
"=",
"trained_model",
".",
"fit",
"(",
"train_data",
",",
"steps_per_epoch",
"=",
"steps_per_epoch",
",",
"epochs",
"=",
"num_epochs",
",",
"validation_data",
"=",
"eval_data",
",",
"validation_steps",
"=",
"eval_steps",
",",
"verbose",
"=",
"2",
"# 1=progress 2=one line per epoch",
")",
"elapsed_time",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start_timestamp",
")",
"tf",
".",
"logging",
".",
"info",
"(",
"'Finished training up to step %d. Elapsed seconds %d.'",
",",
"max_steps",
",",
"elapsed_time",
")",
"#tf.logging.info(model.summary())",
"print",
"(",
"\"if running interactively, graph: {}\"",
".",
"format",
"(",
"history",
".",
"history",
".",
"keys",
"(",
")",
")",
")",
"# write validation accuracy as hyperparameter tuning metric",
"hpt",
"=",
"hypertune",
".",
"HyperTune",
"(",
")",
"hpt",
".",
"report_hyperparameter_tuning_metric",
"(",
"hyperparameter_metric_tag",
"=",
"'val_acc'",
",",
"metric_value",
"=",
"history",
".",
"history",
"[",
"'val_acc'",
"]",
"[",
"-",
"1",
"]",
",",
"# last one",
"global_step",
"=",
"0",
")",
"# Serve the model via CMLE",
"export_keras",
"(",
"model",
",",
"trained_model",
",",
"output_dir",
",",
"hparams",
")"
] | [
228,
0
] | [
312,
57
] | python | en | ['en', 'id', 'en'] | True |
normalize | (pattern) | r"""
Given a reg-exp pattern, normalize it to an iterable of forms that
suffice for reverse matching. This does the following:
(1) For any repeating sections, keeps the minimum number of occurrences
permitted (this means zero for optional groups).
(2) If an optional group includes parameters, include one occurrence of
that group (along with the zero occurrence case from step (1)).
(3) Select the first (essentially an arbitrary) element from any character
class. Select an arbitrary character for any unordered class (e.g. '.'
or '\w') in the pattern.
(4) Ignore look-ahead and look-behind assertions.
(5) Raise an error on any disjunctive ('|') constructs.
Django's URLs for forward resolving are either all positional arguments or
all keyword arguments. That is assumed here, as well. Although reverse
resolving can be done using positional args when keyword args are
specified, the two cannot be mixed in the same reverse() call.
| r"""
Given a reg-exp pattern, normalize it to an iterable of forms that
suffice for reverse matching. This does the following: | def normalize(pattern):
r"""
Given a reg-exp pattern, normalize it to an iterable of forms that
suffice for reverse matching. This does the following:
(1) For any repeating sections, keeps the minimum number of occurrences
permitted (this means zero for optional groups).
(2) If an optional group includes parameters, include one occurrence of
that group (along with the zero occurrence case from step (1)).
(3) Select the first (essentially an arbitrary) element from any character
class. Select an arbitrary character for any unordered class (e.g. '.'
or '\w') in the pattern.
(4) Ignore look-ahead and look-behind assertions.
(5) Raise an error on any disjunctive ('|') constructs.
Django's URLs for forward resolving are either all positional arguments or
all keyword arguments. That is assumed here, as well. Although reverse
resolving can be done using positional args when keyword args are
specified, the two cannot be mixed in the same reverse() call.
"""
# Do a linear scan to work out the special features of this pattern. The
# idea is that we scan once here and collect all the information we need to
# make future decisions.
result = []
non_capturing_groups = []
consume_next = True
pattern_iter = next_char(iter(pattern))
num_args = 0
# A "while" loop is used here because later on we need to be able to peek
# at the next character and possibly go around without consuming another
# one at the top of the loop.
try:
ch, escaped = next(pattern_iter)
except StopIteration:
return [('', [])]
try:
while True:
if escaped:
result.append(ch)
elif ch == '.':
# Replace "any character" with an arbitrary representative.
result.append(".")
elif ch == '|':
# FIXME: One day we'll should do this, but not in 1.0.
raise NotImplementedError('Awaiting Implementation')
elif ch == "^":
pass
elif ch == '$':
break
elif ch == ')':
# This can only be the end of a non-capturing group, since all
# other unescaped parentheses are handled by the grouping
# section later (and the full group is handled there).
#
# We regroup everything inside the capturing group so that it
# can be quantified, if necessary.
start = non_capturing_groups.pop()
inner = NonCapture(result[start:])
result = result[:start] + [inner]
elif ch == '[':
# Replace ranges with the first character in the range.
ch, escaped = next(pattern_iter)
result.append(ch)
ch, escaped = next(pattern_iter)
while escaped or ch != ']':
ch, escaped = next(pattern_iter)
elif ch == '(':
# Some kind of group.
ch, escaped = next(pattern_iter)
if ch != '?' or escaped:
# A positional group
name = "_%d" % num_args
num_args += 1
result.append(Group((("%%(%s)s" % name), name)))
walk_to_end(ch, pattern_iter)
else:
ch, escaped = next(pattern_iter)
if ch in '!=<':
# All of these are ignorable. Walk to the end of the
# group.
walk_to_end(ch, pattern_iter)
elif ch == ':':
# Non-capturing group
non_capturing_groups.append(len(result))
elif ch != 'P':
# Anything else, other than a named group, is something
# we cannot reverse.
raise ValueError("Non-reversible reg-exp portion: '(?%s'" % ch)
else:
ch, escaped = next(pattern_iter)
if ch not in ('<', '='):
raise ValueError("Non-reversible reg-exp portion: '(?P%s'" % ch)
# We are in a named capturing group. Extra the name and
# then skip to the end.
if ch == '<':
terminal_char = '>'
# We are in a named backreference.
else:
terminal_char = ')'
name = []
ch, escaped = next(pattern_iter)
while ch != terminal_char:
name.append(ch)
ch, escaped = next(pattern_iter)
param = ''.join(name)
# Named backreferences have already consumed the
# parenthesis.
if terminal_char != ')':
result.append(Group((("%%(%s)s" % param), param)))
walk_to_end(ch, pattern_iter)
else:
result.append(Group((("%%(%s)s" % param), None)))
elif ch in "*?+{":
# Quantifiers affect the previous item in the result list.
count, ch = get_quantifier(ch, pattern_iter)
if ch:
# We had to look ahead, but it wasn't need to compute the
# quantifier, so use this character next time around the
# main loop.
consume_next = False
if count == 0:
if contains(result[-1], Group):
# If we are quantifying a capturing group (or
# something containing such a group) and the minimum is
# zero, we must also handle the case of one occurrence
# being present. All the quantifiers (except {0,0},
# which we conveniently ignore) that have a 0 minimum
# also allow a single occurrence.
result[-1] = Choice([None, result[-1]])
else:
result.pop()
elif count > 1:
result.extend([result[-1]] * (count - 1))
else:
# Anything else is a literal.
result.append(ch)
if consume_next:
ch, escaped = next(pattern_iter)
consume_next = True
except StopIteration:
pass
except NotImplementedError:
# A case of using the disjunctive form. No results for you!
return [('', [])]
return list(zip(*flatten_result(result))) | [
"def",
"normalize",
"(",
"pattern",
")",
":",
"# Do a linear scan to work out the special features of this pattern. The",
"# idea is that we scan once here and collect all the information we need to",
"# make future decisions.",
"result",
"=",
"[",
"]",
"non_capturing_groups",
"=",
"[",
"]",
"consume_next",
"=",
"True",
"pattern_iter",
"=",
"next_char",
"(",
"iter",
"(",
"pattern",
")",
")",
"num_args",
"=",
"0",
"# A \"while\" loop is used here because later on we need to be able to peek",
"# at the next character and possibly go around without consuming another",
"# one at the top of the loop.",
"try",
":",
"ch",
",",
"escaped",
"=",
"next",
"(",
"pattern_iter",
")",
"except",
"StopIteration",
":",
"return",
"[",
"(",
"''",
",",
"[",
"]",
")",
"]",
"try",
":",
"while",
"True",
":",
"if",
"escaped",
":",
"result",
".",
"append",
"(",
"ch",
")",
"elif",
"ch",
"==",
"'.'",
":",
"# Replace \"any character\" with an arbitrary representative.",
"result",
".",
"append",
"(",
"\".\"",
")",
"elif",
"ch",
"==",
"'|'",
":",
"# FIXME: One day we'll should do this, but not in 1.0.",
"raise",
"NotImplementedError",
"(",
"'Awaiting Implementation'",
")",
"elif",
"ch",
"==",
"\"^\"",
":",
"pass",
"elif",
"ch",
"==",
"'$'",
":",
"break",
"elif",
"ch",
"==",
"')'",
":",
"# This can only be the end of a non-capturing group, since all",
"# other unescaped parentheses are handled by the grouping",
"# section later (and the full group is handled there).",
"#",
"# We regroup everything inside the capturing group so that it",
"# can be quantified, if necessary.",
"start",
"=",
"non_capturing_groups",
".",
"pop",
"(",
")",
"inner",
"=",
"NonCapture",
"(",
"result",
"[",
"start",
":",
"]",
")",
"result",
"=",
"result",
"[",
":",
"start",
"]",
"+",
"[",
"inner",
"]",
"elif",
"ch",
"==",
"'['",
":",
"# Replace ranges with the first character in the range.",
"ch",
",",
"escaped",
"=",
"next",
"(",
"pattern_iter",
")",
"result",
".",
"append",
"(",
"ch",
")",
"ch",
",",
"escaped",
"=",
"next",
"(",
"pattern_iter",
")",
"while",
"escaped",
"or",
"ch",
"!=",
"']'",
":",
"ch",
",",
"escaped",
"=",
"next",
"(",
"pattern_iter",
")",
"elif",
"ch",
"==",
"'('",
":",
"# Some kind of group.",
"ch",
",",
"escaped",
"=",
"next",
"(",
"pattern_iter",
")",
"if",
"ch",
"!=",
"'?'",
"or",
"escaped",
":",
"# A positional group",
"name",
"=",
"\"_%d\"",
"%",
"num_args",
"num_args",
"+=",
"1",
"result",
".",
"append",
"(",
"Group",
"(",
"(",
"(",
"\"%%(%s)s\"",
"%",
"name",
")",
",",
"name",
")",
")",
")",
"walk_to_end",
"(",
"ch",
",",
"pattern_iter",
")",
"else",
":",
"ch",
",",
"escaped",
"=",
"next",
"(",
"pattern_iter",
")",
"if",
"ch",
"in",
"'!=<'",
":",
"# All of these are ignorable. Walk to the end of the",
"# group.",
"walk_to_end",
"(",
"ch",
",",
"pattern_iter",
")",
"elif",
"ch",
"==",
"':'",
":",
"# Non-capturing group",
"non_capturing_groups",
".",
"append",
"(",
"len",
"(",
"result",
")",
")",
"elif",
"ch",
"!=",
"'P'",
":",
"# Anything else, other than a named group, is something",
"# we cannot reverse.",
"raise",
"ValueError",
"(",
"\"Non-reversible reg-exp portion: '(?%s'\"",
"%",
"ch",
")",
"else",
":",
"ch",
",",
"escaped",
"=",
"next",
"(",
"pattern_iter",
")",
"if",
"ch",
"not",
"in",
"(",
"'<'",
",",
"'='",
")",
":",
"raise",
"ValueError",
"(",
"\"Non-reversible reg-exp portion: '(?P%s'\"",
"%",
"ch",
")",
"# We are in a named capturing group. Extra the name and",
"# then skip to the end.",
"if",
"ch",
"==",
"'<'",
":",
"terminal_char",
"=",
"'>'",
"# We are in a named backreference.",
"else",
":",
"terminal_char",
"=",
"')'",
"name",
"=",
"[",
"]",
"ch",
",",
"escaped",
"=",
"next",
"(",
"pattern_iter",
")",
"while",
"ch",
"!=",
"terminal_char",
":",
"name",
".",
"append",
"(",
"ch",
")",
"ch",
",",
"escaped",
"=",
"next",
"(",
"pattern_iter",
")",
"param",
"=",
"''",
".",
"join",
"(",
"name",
")",
"# Named backreferences have already consumed the",
"# parenthesis.",
"if",
"terminal_char",
"!=",
"')'",
":",
"result",
".",
"append",
"(",
"Group",
"(",
"(",
"(",
"\"%%(%s)s\"",
"%",
"param",
")",
",",
"param",
")",
")",
")",
"walk_to_end",
"(",
"ch",
",",
"pattern_iter",
")",
"else",
":",
"result",
".",
"append",
"(",
"Group",
"(",
"(",
"(",
"\"%%(%s)s\"",
"%",
"param",
")",
",",
"None",
")",
")",
")",
"elif",
"ch",
"in",
"\"*?+{\"",
":",
"# Quantifiers affect the previous item in the result list.",
"count",
",",
"ch",
"=",
"get_quantifier",
"(",
"ch",
",",
"pattern_iter",
")",
"if",
"ch",
":",
"# We had to look ahead, but it wasn't need to compute the",
"# quantifier, so use this character next time around the",
"# main loop.",
"consume_next",
"=",
"False",
"if",
"count",
"==",
"0",
":",
"if",
"contains",
"(",
"result",
"[",
"-",
"1",
"]",
",",
"Group",
")",
":",
"# If we are quantifying a capturing group (or",
"# something containing such a group) and the minimum is",
"# zero, we must also handle the case of one occurrence",
"# being present. All the quantifiers (except {0,0},",
"# which we conveniently ignore) that have a 0 minimum",
"# also allow a single occurrence.",
"result",
"[",
"-",
"1",
"]",
"=",
"Choice",
"(",
"[",
"None",
",",
"result",
"[",
"-",
"1",
"]",
"]",
")",
"else",
":",
"result",
".",
"pop",
"(",
")",
"elif",
"count",
">",
"1",
":",
"result",
".",
"extend",
"(",
"[",
"result",
"[",
"-",
"1",
"]",
"]",
"*",
"(",
"count",
"-",
"1",
")",
")",
"else",
":",
"# Anything else is a literal.",
"result",
".",
"append",
"(",
"ch",
")",
"if",
"consume_next",
":",
"ch",
",",
"escaped",
"=",
"next",
"(",
"pattern_iter",
")",
"consume_next",
"=",
"True",
"except",
"StopIteration",
":",
"pass",
"except",
"NotImplementedError",
":",
"# A case of using the disjunctive form. No results for you!",
"return",
"[",
"(",
"''",
",",
"[",
"]",
")",
"]",
"return",
"list",
"(",
"zip",
"(",
"*",
"flatten_result",
"(",
"result",
")",
")",
")"
] | [
40,
0
] | [
189,
45
] | python | cy | ['en', 'cy', 'hi'] | False |
next_char | (input_iter) | r"""
An iterator that yields the next character from "pattern_iter", respecting
escape sequences. An escaped character is replaced by a representative of
its class (e.g. \w -> "x"). If the escaped character is one that is
skipped, it is not returned (the next character is returned instead).
Yield the next character, along with a boolean indicating whether it is a
raw (unescaped) character or not.
| r"""
An iterator that yields the next character from "pattern_iter", respecting
escape sequences. An escaped character is replaced by a representative of
its class (e.g. \w -> "x"). If the escaped character is one that is
skipped, it is not returned (the next character is returned instead). | def next_char(input_iter):
r"""
An iterator that yields the next character from "pattern_iter", respecting
escape sequences. An escaped character is replaced by a representative of
its class (e.g. \w -> "x"). If the escaped character is one that is
skipped, it is not returned (the next character is returned instead).
Yield the next character, along with a boolean indicating whether it is a
raw (unescaped) character or not.
"""
for ch in input_iter:
if ch != '\\':
yield ch, False
continue
ch = next(input_iter)
representative = ESCAPE_MAPPINGS.get(ch, ch)
if representative is None:
continue
yield representative, True | [
"def",
"next_char",
"(",
"input_iter",
")",
":",
"for",
"ch",
"in",
"input_iter",
":",
"if",
"ch",
"!=",
"'\\\\'",
":",
"yield",
"ch",
",",
"False",
"continue",
"ch",
"=",
"next",
"(",
"input_iter",
")",
"representative",
"=",
"ESCAPE_MAPPINGS",
".",
"get",
"(",
"ch",
",",
"ch",
")",
"if",
"representative",
"is",
"None",
":",
"continue",
"yield",
"representative",
",",
"True"
] | [
192,
0
] | [
210,
34
] | python | cy | ['en', 'cy', 'hi'] | False |
walk_to_end | (ch, input_iter) |
The iterator is currently inside a capturing group. Walk to the close of
this group, skipping over any nested groups and handling escaped
parentheses correctly.
|
The iterator is currently inside a capturing group. Walk to the close of
this group, skipping over any nested groups and handling escaped
parentheses correctly.
| def walk_to_end(ch, input_iter):
"""
The iterator is currently inside a capturing group. Walk to the close of
this group, skipping over any nested groups and handling escaped
parentheses correctly.
"""
if ch == '(':
nesting = 1
else:
nesting = 0
for ch, escaped in input_iter:
if escaped:
continue
elif ch == '(':
nesting += 1
elif ch == ')':
if not nesting:
return
nesting -= 1 | [
"def",
"walk_to_end",
"(",
"ch",
",",
"input_iter",
")",
":",
"if",
"ch",
"==",
"'('",
":",
"nesting",
"=",
"1",
"else",
":",
"nesting",
"=",
"0",
"for",
"ch",
",",
"escaped",
"in",
"input_iter",
":",
"if",
"escaped",
":",
"continue",
"elif",
"ch",
"==",
"'('",
":",
"nesting",
"+=",
"1",
"elif",
"ch",
"==",
"')'",
":",
"if",
"not",
"nesting",
":",
"return",
"nesting",
"-=",
"1"
] | [
213,
0
] | [
231,
24
] | python | en | ['en', 'error', 'th'] | False |
get_quantifier | (ch, input_iter) |
Parse a quantifier from the input, where "ch" is the first character in the
quantifier.
Return the minimum number of occurrences permitted by the quantifier and
either None or the next character from the input_iter if the next character
is not part of the quantifier.
|
Parse a quantifier from the input, where "ch" is the first character in the
quantifier. | def get_quantifier(ch, input_iter):
"""
Parse a quantifier from the input, where "ch" is the first character in the
quantifier.
Return the minimum number of occurrences permitted by the quantifier and
either None or the next character from the input_iter if the next character
is not part of the quantifier.
"""
if ch in '*?+':
try:
ch2, escaped = next(input_iter)
except StopIteration:
ch2 = None
if ch2 == '?':
ch2 = None
if ch == '+':
return 1, ch2
return 0, ch2
quant = []
while ch != '}':
ch, escaped = next(input_iter)
quant.append(ch)
quant = quant[:-1]
values = ''.join(quant).split(',')
# Consume the trailing '?', if necessary.
try:
ch, escaped = next(input_iter)
except StopIteration:
ch = None
if ch == '?':
ch = None
return int(values[0]), ch | [
"def",
"get_quantifier",
"(",
"ch",
",",
"input_iter",
")",
":",
"if",
"ch",
"in",
"'*?+'",
":",
"try",
":",
"ch2",
",",
"escaped",
"=",
"next",
"(",
"input_iter",
")",
"except",
"StopIteration",
":",
"ch2",
"=",
"None",
"if",
"ch2",
"==",
"'?'",
":",
"ch2",
"=",
"None",
"if",
"ch",
"==",
"'+'",
":",
"return",
"1",
",",
"ch2",
"return",
"0",
",",
"ch2",
"quant",
"=",
"[",
"]",
"while",
"ch",
"!=",
"'}'",
":",
"ch",
",",
"escaped",
"=",
"next",
"(",
"input_iter",
")",
"quant",
".",
"append",
"(",
"ch",
")",
"quant",
"=",
"quant",
"[",
":",
"-",
"1",
"]",
"values",
"=",
"''",
".",
"join",
"(",
"quant",
")",
".",
"split",
"(",
"','",
")",
"# Consume the trailing '?', if necessary.",
"try",
":",
"ch",
",",
"escaped",
"=",
"next",
"(",
"input_iter",
")",
"except",
"StopIteration",
":",
"ch",
"=",
"None",
"if",
"ch",
"==",
"'?'",
":",
"ch",
"=",
"None",
"return",
"int",
"(",
"values",
"[",
"0",
"]",
")",
",",
"ch"
] | [
234,
0
] | [
268,
29
] | python | en | ['en', 'error', 'th'] | False |
contains | (source, inst) |
Return True if the "source" contains an instance of "inst". False,
otherwise.
|
Return True if the "source" contains an instance of "inst". False,
otherwise.
| def contains(source, inst):
"""
Return True if the "source" contains an instance of "inst". False,
otherwise.
"""
if isinstance(source, inst):
return True
if isinstance(source, NonCapture):
for elt in source:
if contains(elt, inst):
return True
return False | [
"def",
"contains",
"(",
"source",
",",
"inst",
")",
":",
"if",
"isinstance",
"(",
"source",
",",
"inst",
")",
":",
"return",
"True",
"if",
"isinstance",
"(",
"source",
",",
"NonCapture",
")",
":",
"for",
"elt",
"in",
"source",
":",
"if",
"contains",
"(",
"elt",
",",
"inst",
")",
":",
"return",
"True",
"return",
"False"
] | [
271,
0
] | [
282,
16
] | python | en | ['en', 'error', 'th'] | False |
flatten_result | (source) |
Turn the given source sequence into a list of reg-exp possibilities and
their arguments. Return a list of strings and a list of argument lists.
Each of the two lists will be of the same length.
|
Turn the given source sequence into a list of reg-exp possibilities and
their arguments. Return a list of strings and a list of argument lists.
Each of the two lists will be of the same length.
| def flatten_result(source):
"""
Turn the given source sequence into a list of reg-exp possibilities and
their arguments. Return a list of strings and a list of argument lists.
Each of the two lists will be of the same length.
"""
if source is None:
return [''], [[]]
if isinstance(source, Group):
if source[1] is None:
params = []
else:
params = [source[1]]
return [source[0]], [params]
result = ['']
result_args = [[]]
pos = last = 0
for pos, elt in enumerate(source):
if isinstance(elt, str):
continue
piece = ''.join(source[last:pos])
if isinstance(elt, Group):
piece += elt[0]
param = elt[1]
else:
param = None
last = pos + 1
for i in range(len(result)):
result[i] += piece
if param:
result_args[i].append(param)
if isinstance(elt, (Choice, NonCapture)):
if isinstance(elt, NonCapture):
elt = [elt]
inner_result, inner_args = [], []
for item in elt:
res, args = flatten_result(item)
inner_result.extend(res)
inner_args.extend(args)
new_result = []
new_args = []
for item, args in zip(result, result_args):
for i_item, i_args in zip(inner_result, inner_args):
new_result.append(item + i_item)
new_args.append(args[:] + i_args)
result = new_result
result_args = new_args
if pos >= last:
piece = ''.join(source[last:])
for i in range(len(result)):
result[i] += piece
return result, result_args | [
"def",
"flatten_result",
"(",
"source",
")",
":",
"if",
"source",
"is",
"None",
":",
"return",
"[",
"''",
"]",
",",
"[",
"[",
"]",
"]",
"if",
"isinstance",
"(",
"source",
",",
"Group",
")",
":",
"if",
"source",
"[",
"1",
"]",
"is",
"None",
":",
"params",
"=",
"[",
"]",
"else",
":",
"params",
"=",
"[",
"source",
"[",
"1",
"]",
"]",
"return",
"[",
"source",
"[",
"0",
"]",
"]",
",",
"[",
"params",
"]",
"result",
"=",
"[",
"''",
"]",
"result_args",
"=",
"[",
"[",
"]",
"]",
"pos",
"=",
"last",
"=",
"0",
"for",
"pos",
",",
"elt",
"in",
"enumerate",
"(",
"source",
")",
":",
"if",
"isinstance",
"(",
"elt",
",",
"str",
")",
":",
"continue",
"piece",
"=",
"''",
".",
"join",
"(",
"source",
"[",
"last",
":",
"pos",
"]",
")",
"if",
"isinstance",
"(",
"elt",
",",
"Group",
")",
":",
"piece",
"+=",
"elt",
"[",
"0",
"]",
"param",
"=",
"elt",
"[",
"1",
"]",
"else",
":",
"param",
"=",
"None",
"last",
"=",
"pos",
"+",
"1",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"result",
")",
")",
":",
"result",
"[",
"i",
"]",
"+=",
"piece",
"if",
"param",
":",
"result_args",
"[",
"i",
"]",
".",
"append",
"(",
"param",
")",
"if",
"isinstance",
"(",
"elt",
",",
"(",
"Choice",
",",
"NonCapture",
")",
")",
":",
"if",
"isinstance",
"(",
"elt",
",",
"NonCapture",
")",
":",
"elt",
"=",
"[",
"elt",
"]",
"inner_result",
",",
"inner_args",
"=",
"[",
"]",
",",
"[",
"]",
"for",
"item",
"in",
"elt",
":",
"res",
",",
"args",
"=",
"flatten_result",
"(",
"item",
")",
"inner_result",
".",
"extend",
"(",
"res",
")",
"inner_args",
".",
"extend",
"(",
"args",
")",
"new_result",
"=",
"[",
"]",
"new_args",
"=",
"[",
"]",
"for",
"item",
",",
"args",
"in",
"zip",
"(",
"result",
",",
"result_args",
")",
":",
"for",
"i_item",
",",
"i_args",
"in",
"zip",
"(",
"inner_result",
",",
"inner_args",
")",
":",
"new_result",
".",
"append",
"(",
"item",
"+",
"i_item",
")",
"new_args",
".",
"append",
"(",
"args",
"[",
":",
"]",
"+",
"i_args",
")",
"result",
"=",
"new_result",
"result_args",
"=",
"new_args",
"if",
"pos",
">=",
"last",
":",
"piece",
"=",
"''",
".",
"join",
"(",
"source",
"[",
"last",
":",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"result",
")",
")",
":",
"result",
"[",
"i",
"]",
"+=",
"piece",
"return",
"result",
",",
"result_args"
] | [
285,
0
] | [
336,
30
] | python | en | ['en', 'error', 'th'] | False |
_lazy_re_compile | (regex, flags=0) | Lazily compile a regex with flags. | Lazily compile a regex with flags. | def _lazy_re_compile(regex, flags=0):
"""Lazily compile a regex with flags."""
def _compile():
# Compile the regex if it was not passed pre-compiled.
if isinstance(regex, (str, bytes)):
return re.compile(regex, flags)
else:
assert not flags, (
'flags must be empty if regex is passed pre-compiled'
)
return regex
return SimpleLazyObject(_compile) | [
"def",
"_lazy_re_compile",
"(",
"regex",
",",
"flags",
"=",
"0",
")",
":",
"def",
"_compile",
"(",
")",
":",
"# Compile the regex if it was not passed pre-compiled.",
"if",
"isinstance",
"(",
"regex",
",",
"(",
"str",
",",
"bytes",
")",
")",
":",
"return",
"re",
".",
"compile",
"(",
"regex",
",",
"flags",
")",
"else",
":",
"assert",
"not",
"flags",
",",
"(",
"'flags must be empty if regex is passed pre-compiled'",
")",
"return",
"regex",
"return",
"SimpleLazyObject",
"(",
"_compile",
")"
] | [
339,
0
] | [
350,
37
] | python | en | ['en', 'en', 'en'] | True |
BaseRatingPrediction.__init__ | (self, train_file, test_file, output_file=None, similarity_metric='cosine', sep='\t',
output_sep='\t') |
This class is base for all rating prediction algorithms. Inherits the class Recommender
and implements / adds common methods and attributes for rating prediction approaches.
:param train_file: File which contains the train set. This file needs to have at least 3 columns
(user item feedback_value).
:type train_file: str
:param test_file: File which contains the test set. This file needs to have at least 3 columns
(user item feedback_value).
:type test_file: str, default None
:param output_file: File with dir to write the final predictions
:type output_file: str, default None
:param similarity_metric:
:type similarity_metric: str, default cosine
:param sep: Delimiter for input files
:type sep: str, default '\t'
:param output_sep: Delimiter for output file
:type output_sep: str, default '\t'
|
This class is base for all rating prediction algorithms. Inherits the class Recommender
and implements / adds common methods and attributes for rating prediction approaches. | def __init__(self, train_file, test_file, output_file=None, similarity_metric='cosine', sep='\t',
output_sep='\t'):
"""
This class is base for all rating prediction algorithms. Inherits the class Recommender
and implements / adds common methods and attributes for rating prediction approaches.
:param train_file: File which contains the train set. This file needs to have at least 3 columns
(user item feedback_value).
:type train_file: str
:param test_file: File which contains the test set. This file needs to have at least 3 columns
(user item feedback_value).
:type test_file: str, default None
:param output_file: File with dir to write the final predictions
:type output_file: str, default None
:param similarity_metric:
:type similarity_metric: str, default cosine
:param sep: Delimiter for input files
:type sep: str, default '\t'
:param output_sep: Delimiter for output file
:type output_sep: str, default '\t'
"""
self.train_file = train_file
self.test_file = test_file
self.similarity_metric = similarity_metric
self.output_file = output_file
self.sep = sep
self.output_sep = output_sep
# internal vars
self.item_to_item_id = {}
self.item_id_to_item = {}
self.user_to_user_id = {}
self.user_id_to_user = {}
self.train_set = None
self.test_set = None
self.users = None
self.items = None
self.matrix = None
self.evaluation_results = None
self.recommender_name = None
self.extra_info_header = None
self.predictions = [] | [
"def",
"__init__",
"(",
"self",
",",
"train_file",
",",
"test_file",
",",
"output_file",
"=",
"None",
",",
"similarity_metric",
"=",
"'cosine'",
",",
"sep",
"=",
"'\\t'",
",",
"output_sep",
"=",
"'\\t'",
")",
":",
"self",
".",
"train_file",
"=",
"train_file",
"self",
".",
"test_file",
"=",
"test_file",
"self",
".",
"similarity_metric",
"=",
"similarity_metric",
"self",
".",
"output_file",
"=",
"output_file",
"self",
".",
"sep",
"=",
"sep",
"self",
".",
"output_sep",
"=",
"output_sep",
"# internal vars",
"self",
".",
"item_to_item_id",
"=",
"{",
"}",
"self",
".",
"item_id_to_item",
"=",
"{",
"}",
"self",
".",
"user_to_user_id",
"=",
"{",
"}",
"self",
".",
"user_id_to_user",
"=",
"{",
"}",
"self",
".",
"train_set",
"=",
"None",
"self",
".",
"test_set",
"=",
"None",
"self",
".",
"users",
"=",
"None",
"self",
".",
"items",
"=",
"None",
"self",
".",
"matrix",
"=",
"None",
"self",
".",
"evaluation_results",
"=",
"None",
"self",
".",
"recommender_name",
"=",
"None",
"self",
".",
"extra_info_header",
"=",
"None",
"self",
".",
"predictions",
"=",
"[",
"]"
] | [
20,
4
] | [
68,
29
] | python | en | ['en', 'error', 'th'] | False |
BaseRatingPrediction.read_files | (self) |
Method to initialize recommender algorithm.
|
Method to initialize recommender algorithm. | def read_files(self):
"""
Method to initialize recommender algorithm.
"""
self.train_set = ReadFile(self.train_file, sep=self.sep).read()
if self.test_file is not None:
self.test_set = ReadFile(self.test_file).read()
self.users = sorted(set(list(self.train_set['users']) + list(self.test_set['users'])))
self.items = sorted(set(list(self.train_set['items']) + list(self.test_set['items'])))
else:
self.users = self.train_set['users']
self.items = self.train_set['items']
for i, item in enumerate(self.items):
self.item_to_item_id.update({item: i})
self.item_id_to_item.update({i: item})
for u, user in enumerate(self.users):
self.user_to_user_id.update({user: u})
self.user_id_to_user.update({u: user}) | [
"def",
"read_files",
"(",
"self",
")",
":",
"self",
".",
"train_set",
"=",
"ReadFile",
"(",
"self",
".",
"train_file",
",",
"sep",
"=",
"self",
".",
"sep",
")",
".",
"read",
"(",
")",
"if",
"self",
".",
"test_file",
"is",
"not",
"None",
":",
"self",
".",
"test_set",
"=",
"ReadFile",
"(",
"self",
".",
"test_file",
")",
".",
"read",
"(",
")",
"self",
".",
"users",
"=",
"sorted",
"(",
"set",
"(",
"list",
"(",
"self",
".",
"train_set",
"[",
"'users'",
"]",
")",
"+",
"list",
"(",
"self",
".",
"test_set",
"[",
"'users'",
"]",
")",
")",
")",
"self",
".",
"items",
"=",
"sorted",
"(",
"set",
"(",
"list",
"(",
"self",
".",
"train_set",
"[",
"'items'",
"]",
")",
"+",
"list",
"(",
"self",
".",
"test_set",
"[",
"'items'",
"]",
")",
")",
")",
"else",
":",
"self",
".",
"users",
"=",
"self",
".",
"train_set",
"[",
"'users'",
"]",
"self",
".",
"items",
"=",
"self",
".",
"train_set",
"[",
"'items'",
"]",
"for",
"i",
",",
"item",
"in",
"enumerate",
"(",
"self",
".",
"items",
")",
":",
"self",
".",
"item_to_item_id",
".",
"update",
"(",
"{",
"item",
":",
"i",
"}",
")",
"self",
".",
"item_id_to_item",
".",
"update",
"(",
"{",
"i",
":",
"item",
"}",
")",
"for",
"u",
",",
"user",
"in",
"enumerate",
"(",
"self",
".",
"users",
")",
":",
"self",
".",
"user_to_user_id",
".",
"update",
"(",
"{",
"user",
":",
"u",
"}",
")",
"self",
".",
"user_id_to_user",
".",
"update",
"(",
"{",
"u",
":",
"user",
"}",
")"
] | [
70,
4
] | [
91,
50
] | python | en | ['en', 'error', 'th'] | False |
BaseRatingPrediction.create_matrix | (self) |
Method to create a feedback matrix
|
Method to create a feedback matrix | def create_matrix(self):
"""
Method to create a feedback matrix
"""
self.matrix = np.zeros((len(self.users), len(self.items)))
for user in self.train_set['users']:
for item in self.train_set['feedback'][user]:
self.matrix[self.user_to_user_id[user]][self.item_to_item_id[item]] = \
self.train_set['feedback'][user][item] | [
"def",
"create_matrix",
"(",
"self",
")",
":",
"self",
".",
"matrix",
"=",
"np",
".",
"zeros",
"(",
"(",
"len",
"(",
"self",
".",
"users",
")",
",",
"len",
"(",
"self",
".",
"items",
")",
")",
")",
"for",
"user",
"in",
"self",
".",
"train_set",
"[",
"'users'",
"]",
":",
"for",
"item",
"in",
"self",
".",
"train_set",
"[",
"'feedback'",
"]",
"[",
"user",
"]",
":",
"self",
".",
"matrix",
"[",
"self",
".",
"user_to_user_id",
"[",
"user",
"]",
"]",
"[",
"self",
".",
"item_to_item_id",
"[",
"item",
"]",
"]",
"=",
"self",
".",
"train_set",
"[",
"'feedback'",
"]",
"[",
"user",
"]",
"[",
"item",
"]"
] | [
93,
4
] | [
104,
58
] | python | en | ['en', 'error', 'th'] | False |
BaseRatingPrediction.compute_similarity | (self, transpose=False) |
Method to compute a similarity matrix from original df_matrix
:param transpose: If True, calculate the similarity in a transpose matrix
:type transpose: bool, default False
|
Method to compute a similarity matrix from original df_matrix | def compute_similarity(self, transpose=False):
"""
Method to compute a similarity matrix from original df_matrix
:param transpose: If True, calculate the similarity in a transpose matrix
:type transpose: bool, default False
"""
# Calculate distance matrix
if transpose:
similarity_matrix = np.float32(squareform(pdist(self.matrix.T, self.similarity_metric)))
else:
similarity_matrix = np.float32(squareform(pdist(self.matrix, self.similarity_metric)))
# Remove NaNs
similarity_matrix[np.isnan(similarity_matrix)] = 1.0
# transform distances in similarities. Values in matrix range from 0-1
similarity_matrix = (similarity_matrix.max() - similarity_matrix) / similarity_matrix.max()
return similarity_matrix | [
"def",
"compute_similarity",
"(",
"self",
",",
"transpose",
"=",
"False",
")",
":",
"# Calculate distance matrix",
"if",
"transpose",
":",
"similarity_matrix",
"=",
"np",
".",
"float32",
"(",
"squareform",
"(",
"pdist",
"(",
"self",
".",
"matrix",
".",
"T",
",",
"self",
".",
"similarity_metric",
")",
")",
")",
"else",
":",
"similarity_matrix",
"=",
"np",
".",
"float32",
"(",
"squareform",
"(",
"pdist",
"(",
"self",
".",
"matrix",
",",
"self",
".",
"similarity_metric",
")",
")",
")",
"# Remove NaNs",
"similarity_matrix",
"[",
"np",
".",
"isnan",
"(",
"similarity_matrix",
")",
"]",
"=",
"1.0",
"# transform distances in similarities. Values in matrix range from 0-1",
"similarity_matrix",
"=",
"(",
"similarity_matrix",
".",
"max",
"(",
")",
"-",
"similarity_matrix",
")",
"/",
"similarity_matrix",
".",
"max",
"(",
")",
"return",
"similarity_matrix"
] | [
106,
4
] | [
126,
32
] | python | en | ['en', 'error', 'th'] | False |
BaseRatingPrediction.evaluate | (self, metrics, verbose=True, as_table=False, table_sep='\t') |
Method to evaluate the final ranking
:param metrics: List of evaluation metrics
:type metrics: list, default ('MAE', 'RMSE')
:param verbose: Print the evaluation results
:type verbose: bool, default True
:param as_table: Print the evaluation results as table
:type as_table: bool, default False
:param table_sep: Delimiter for print results (only work with verbose=True and as_table=True)
:type table_sep: str, default '\t'
|
Method to evaluate the final ranking | def evaluate(self, metrics, verbose=True, as_table=False, table_sep='\t'):
"""
Method to evaluate the final ranking
:param metrics: List of evaluation metrics
:type metrics: list, default ('MAE', 'RMSE')
:param verbose: Print the evaluation results
:type verbose: bool, default True
:param as_table: Print the evaluation results as table
:type as_table: bool, default False
:param table_sep: Delimiter for print results (only work with verbose=True and as_table=True)
:type table_sep: str, default '\t'
"""
self.evaluation_results = {}
if metrics is None:
metrics = list(['MAE', 'RMSE'])
results = RatingPredictionEvaluation(verbose=verbose, as_table=as_table, table_sep=table_sep, metrics=metrics
).evaluate_recommender(predictions=self.predictions,
test_set=self.test_set)
for metric in metrics:
self.evaluation_results[metric.upper()] = results[metric.upper()] | [
"def",
"evaluate",
"(",
"self",
",",
"metrics",
",",
"verbose",
"=",
"True",
",",
"as_table",
"=",
"False",
",",
"table_sep",
"=",
"'\\t'",
")",
":",
"self",
".",
"evaluation_results",
"=",
"{",
"}",
"if",
"metrics",
"is",
"None",
":",
"metrics",
"=",
"list",
"(",
"[",
"'MAE'",
",",
"'RMSE'",
"]",
")",
"results",
"=",
"RatingPredictionEvaluation",
"(",
"verbose",
"=",
"verbose",
",",
"as_table",
"=",
"as_table",
",",
"table_sep",
"=",
"table_sep",
",",
"metrics",
"=",
"metrics",
")",
".",
"evaluate_recommender",
"(",
"predictions",
"=",
"self",
".",
"predictions",
",",
"test_set",
"=",
"self",
".",
"test_set",
")",
"for",
"metric",
"in",
"metrics",
":",
"self",
".",
"evaluation_results",
"[",
"metric",
".",
"upper",
"(",
")",
"]",
"=",
"results",
"[",
"metric",
".",
"upper",
"(",
")",
"]"
] | [
128,
4
] | [
156,
77
] | python | en | ['en', 'error', 'th'] | False |
BaseRatingPrediction.write_predictions | (self) |
Method to write final ranking
|
Method to write final ranking | def write_predictions(self):
"""
Method to write final ranking
"""
if self.output_file is not None:
WriteFile(self.output_file, data=self.predictions, sep=self.sep).write() | [
"def",
"write_predictions",
"(",
"self",
")",
":",
"if",
"self",
".",
"output_file",
"is",
"not",
"None",
":",
"WriteFile",
"(",
"self",
".",
"output_file",
",",
"data",
"=",
"self",
".",
"predictions",
",",
"sep",
"=",
"self",
".",
"sep",
")",
".",
"write",
"(",
")"
] | [
158,
4
] | [
165,
84
] | python | en | ['en', 'error', 'th'] | False |
BaseRatingPrediction.compute | (self, verbose=True) |
Method to run the recommender algorithm
:param verbose: Print the information about recommender
:type verbose: bool, default True
|
Method to run the recommender algorithm | def compute(self, verbose=True):
"""
Method to run the recommender algorithm
:param verbose: Print the information about recommender
:type verbose: bool, default True
"""
# read files
self.read_files()
# initialize empty predictions (Don't remove: important to Cross Validation)
self.predictions = []
if verbose:
test_info = None
main_info = {
'title': 'Rating Prediction > ' + self.recommender_name,
'n_users': len(self.train_set['users']),
'n_items': len(self.train_set['items']),
'n_interactions': self.train_set['number_interactions'],
'sparsity': self.train_set['sparsity']
}
if self.test_file is not None:
test_info = {
'n_users': len(self.test_set['users']),
'n_items': len(self.test_set['items']),
'n_interactions': self.test_set['number_interactions'],
'sparsity': self.test_set['sparsity']
}
print_header(main_info, test_info) | [
"def",
"compute",
"(",
"self",
",",
"verbose",
"=",
"True",
")",
":",
"# read files",
"self",
".",
"read_files",
"(",
")",
"# initialize empty predictions (Don't remove: important to Cross Validation)",
"self",
".",
"predictions",
"=",
"[",
"]",
"if",
"verbose",
":",
"test_info",
"=",
"None",
"main_info",
"=",
"{",
"'title'",
":",
"'Rating Prediction > '",
"+",
"self",
".",
"recommender_name",
",",
"'n_users'",
":",
"len",
"(",
"self",
".",
"train_set",
"[",
"'users'",
"]",
")",
",",
"'n_items'",
":",
"len",
"(",
"self",
".",
"train_set",
"[",
"'items'",
"]",
")",
",",
"'n_interactions'",
":",
"self",
".",
"train_set",
"[",
"'number_interactions'",
"]",
",",
"'sparsity'",
":",
"self",
".",
"train_set",
"[",
"'sparsity'",
"]",
"}",
"if",
"self",
".",
"test_file",
"is",
"not",
"None",
":",
"test_info",
"=",
"{",
"'n_users'",
":",
"len",
"(",
"self",
".",
"test_set",
"[",
"'users'",
"]",
")",
",",
"'n_items'",
":",
"len",
"(",
"self",
".",
"test_set",
"[",
"'items'",
"]",
")",
",",
"'n_interactions'",
":",
"self",
".",
"test_set",
"[",
"'number_interactions'",
"]",
",",
"'sparsity'",
":",
"self",
".",
"test_set",
"[",
"'sparsity'",
"]",
"}",
"print_header",
"(",
"main_info",
",",
"test_info",
")"
] | [
167,
4
] | [
201,
46
] | python | en | ['en', 'error', 'th'] | False |
is_password_usable | (encoded) |
Return True if this password wasn't generated by
User.set_unusable_password(), i.e. make_password(None).
|
Return True if this password wasn't generated by
User.set_unusable_password(), i.e. make_password(None).
| def is_password_usable(encoded):
"""
Return True if this password wasn't generated by
User.set_unusable_password(), i.e. make_password(None).
"""
return encoded is None or not encoded.startswith(UNUSABLE_PASSWORD_PREFIX) | [
"def",
"is_password_usable",
"(",
"encoded",
")",
":",
"return",
"encoded",
"is",
"None",
"or",
"not",
"encoded",
".",
"startswith",
"(",
"UNUSABLE_PASSWORD_PREFIX",
")"
] | [
22,
0
] | [
27,
78
] | python | en | ['en', 'error', 'th'] | False |
check_password | (password, encoded, setter=None, preferred='default') |
Return a boolean of whether the raw password matches the three
part encoded digest.
If setter is specified, it'll be called when you need to
regenerate the password.
|
Return a boolean of whether the raw password matches the three
part encoded digest. | def check_password(password, encoded, setter=None, preferred='default'):
"""
Return a boolean of whether the raw password matches the three
part encoded digest.
If setter is specified, it'll be called when you need to
regenerate the password.
"""
if password is None or not is_password_usable(encoded):
return False
preferred = get_hasher(preferred)
try:
hasher = identify_hasher(encoded)
except ValueError:
# encoded is gibberish or uses a hasher that's no longer installed.
return False
hasher_changed = hasher.algorithm != preferred.algorithm
must_update = hasher_changed or preferred.must_update(encoded)
is_correct = hasher.verify(password, encoded)
# If the hasher didn't change (we don't protect against enumeration if it
# does) and the password should get updated, try to close the timing gap
# between the work factor of the current encoded password and the default
# work factor.
if not is_correct and not hasher_changed and must_update:
hasher.harden_runtime(password, encoded)
if setter and is_correct and must_update:
setter(password)
return is_correct | [
"def",
"check_password",
"(",
"password",
",",
"encoded",
",",
"setter",
"=",
"None",
",",
"preferred",
"=",
"'default'",
")",
":",
"if",
"password",
"is",
"None",
"or",
"not",
"is_password_usable",
"(",
"encoded",
")",
":",
"return",
"False",
"preferred",
"=",
"get_hasher",
"(",
"preferred",
")",
"try",
":",
"hasher",
"=",
"identify_hasher",
"(",
"encoded",
")",
"except",
"ValueError",
":",
"# encoded is gibberish or uses a hasher that's no longer installed.",
"return",
"False",
"hasher_changed",
"=",
"hasher",
".",
"algorithm",
"!=",
"preferred",
".",
"algorithm",
"must_update",
"=",
"hasher_changed",
"or",
"preferred",
".",
"must_update",
"(",
"encoded",
")",
"is_correct",
"=",
"hasher",
".",
"verify",
"(",
"password",
",",
"encoded",
")",
"# If the hasher didn't change (we don't protect against enumeration if it",
"# does) and the password should get updated, try to close the timing gap",
"# between the work factor of the current encoded password and the default",
"# work factor.",
"if",
"not",
"is_correct",
"and",
"not",
"hasher_changed",
"and",
"must_update",
":",
"hasher",
".",
"harden_runtime",
"(",
"password",
",",
"encoded",
")",
"if",
"setter",
"and",
"is_correct",
"and",
"must_update",
":",
"setter",
"(",
"password",
")",
"return",
"is_correct"
] | [
30,
0
] | [
61,
21
] | python | en | ['en', 'error', 'th'] | False |
make_password | (password, salt=None, hasher='default') |
Turn a plain-text password into a hash for database storage
Same as encode() but generate a new random salt. If password is None then
return a concatenation of UNUSABLE_PASSWORD_PREFIX and a random string,
which disallows logins. Additional random string reduces chances of gaining
access to staff or superuser accounts. See ticket #20079 for more info.
|
Turn a plain-text password into a hash for database storage | def make_password(password, salt=None, hasher='default'):
"""
Turn a plain-text password into a hash for database storage
Same as encode() but generate a new random salt. If password is None then
return a concatenation of UNUSABLE_PASSWORD_PREFIX and a random string,
which disallows logins. Additional random string reduces chances of gaining
access to staff or superuser accounts. See ticket #20079 for more info.
"""
if password is None:
return UNUSABLE_PASSWORD_PREFIX + get_random_string(UNUSABLE_PASSWORD_SUFFIX_LENGTH)
if not isinstance(password, (bytes, str)):
raise TypeError(
'Password must be a string or bytes, got %s.'
% type(password).__qualname__
)
hasher = get_hasher(hasher)
salt = salt or hasher.salt()
return hasher.encode(password, salt) | [
"def",
"make_password",
"(",
"password",
",",
"salt",
"=",
"None",
",",
"hasher",
"=",
"'default'",
")",
":",
"if",
"password",
"is",
"None",
":",
"return",
"UNUSABLE_PASSWORD_PREFIX",
"+",
"get_random_string",
"(",
"UNUSABLE_PASSWORD_SUFFIX_LENGTH",
")",
"if",
"not",
"isinstance",
"(",
"password",
",",
"(",
"bytes",
",",
"str",
")",
")",
":",
"raise",
"TypeError",
"(",
"'Password must be a string or bytes, got %s.'",
"%",
"type",
"(",
"password",
")",
".",
"__qualname__",
")",
"hasher",
"=",
"get_hasher",
"(",
"hasher",
")",
"salt",
"=",
"salt",
"or",
"hasher",
".",
"salt",
"(",
")",
"return",
"hasher",
".",
"encode",
"(",
"password",
",",
"salt",
")"
] | [
64,
0
] | [
82,
40
] | python | en | ['en', 'error', 'th'] | False |
get_hasher | (algorithm='default') |
Return an instance of a loaded password hasher.
If algorithm is 'default', return the default hasher. Lazily import hashers
specified in the project's settings file if needed.
|
Return an instance of a loaded password hasher. | def get_hasher(algorithm='default'):
"""
Return an instance of a loaded password hasher.
If algorithm is 'default', return the default hasher. Lazily import hashers
specified in the project's settings file if needed.
"""
if hasattr(algorithm, 'algorithm'):
return algorithm
elif algorithm == 'default':
return get_hashers()[0]
else:
hashers = get_hashers_by_algorithm()
try:
return hashers[algorithm]
except KeyError:
raise ValueError("Unknown password hashing algorithm '%s'. "
"Did you specify it in the PASSWORD_HASHERS "
"setting?" % algorithm) | [
"def",
"get_hasher",
"(",
"algorithm",
"=",
"'default'",
")",
":",
"if",
"hasattr",
"(",
"algorithm",
",",
"'algorithm'",
")",
":",
"return",
"algorithm",
"elif",
"algorithm",
"==",
"'default'",
":",
"return",
"get_hashers",
"(",
")",
"[",
"0",
"]",
"else",
":",
"hashers",
"=",
"get_hashers_by_algorithm",
"(",
")",
"try",
":",
"return",
"hashers",
"[",
"algorithm",
"]",
"except",
"KeyError",
":",
"raise",
"ValueError",
"(",
"\"Unknown password hashing algorithm '%s'. \"",
"\"Did you specify it in the PASSWORD_HASHERS \"",
"\"setting?\"",
"%",
"algorithm",
")"
] | [
110,
0
] | [
130,
52
] | python | en | ['en', 'error', 'th'] | False |
identify_hasher | (encoded) |
Return an instance of a loaded password hasher.
Identify hasher algorithm by examining encoded hash, and call
get_hasher() to return hasher. Raise ValueError if
algorithm cannot be identified, or if hasher is not loaded.
|
Return an instance of a loaded password hasher. | def identify_hasher(encoded):
"""
Return an instance of a loaded password hasher.
Identify hasher algorithm by examining encoded hash, and call
get_hasher() to return hasher. Raise ValueError if
algorithm cannot be identified, or if hasher is not loaded.
"""
# Ancient versions of Django created plain MD5 passwords and accepted
# MD5 passwords with an empty salt.
if ((len(encoded) == 32 and '$' not in encoded) or
(len(encoded) == 37 and encoded.startswith('md5$$'))):
algorithm = 'unsalted_md5'
# Ancient versions of Django accepted SHA1 passwords with an empty salt.
elif len(encoded) == 46 and encoded.startswith('sha1$$'):
algorithm = 'unsalted_sha1'
else:
algorithm = encoded.split('$', 1)[0]
return get_hasher(algorithm) | [
"def",
"identify_hasher",
"(",
"encoded",
")",
":",
"# Ancient versions of Django created plain MD5 passwords and accepted",
"# MD5 passwords with an empty salt.",
"if",
"(",
"(",
"len",
"(",
"encoded",
")",
"==",
"32",
"and",
"'$'",
"not",
"in",
"encoded",
")",
"or",
"(",
"len",
"(",
"encoded",
")",
"==",
"37",
"and",
"encoded",
".",
"startswith",
"(",
"'md5$$'",
")",
")",
")",
":",
"algorithm",
"=",
"'unsalted_md5'",
"# Ancient versions of Django accepted SHA1 passwords with an empty salt.",
"elif",
"len",
"(",
"encoded",
")",
"==",
"46",
"and",
"encoded",
".",
"startswith",
"(",
"'sha1$$'",
")",
":",
"algorithm",
"=",
"'unsalted_sha1'",
"else",
":",
"algorithm",
"=",
"encoded",
".",
"split",
"(",
"'$'",
",",
"1",
")",
"[",
"0",
"]",
"return",
"get_hasher",
"(",
"algorithm",
")"
] | [
133,
0
] | [
151,
32
] | python | en | ['en', 'error', 'th'] | False |
mask_hash | (hash, show=6, char="*") |
Return the given hash, with only the first ``show`` number shown. The
rest are masked with ``char`` for security reasons.
|
Return the given hash, with only the first ``show`` number shown. The
rest are masked with ``char`` for security reasons.
| def mask_hash(hash, show=6, char="*"):
"""
Return the given hash, with only the first ``show`` number shown. The
rest are masked with ``char`` for security reasons.
"""
masked = hash[:show]
masked += char * len(hash[show:])
return masked | [
"def",
"mask_hash",
"(",
"hash",
",",
"show",
"=",
"6",
",",
"char",
"=",
"\"*\"",
")",
":",
"masked",
"=",
"hash",
"[",
":",
"show",
"]",
"masked",
"+=",
"char",
"*",
"len",
"(",
"hash",
"[",
"show",
":",
"]",
")",
"return",
"masked"
] | [
154,
0
] | [
161,
17
] | python | en | ['en', 'error', 'th'] | False |
BasePasswordHasher.salt | (self) |
Generate a cryptographically secure nonce salt in ASCII with an entropy
of at least `salt_entropy` bits.
|
Generate a cryptographically secure nonce salt in ASCII with an entropy
of at least `salt_entropy` bits.
| def salt(self):
"""
Generate a cryptographically secure nonce salt in ASCII with an entropy
of at least `salt_entropy` bits.
"""
# Each character in the salt provides
# log_2(len(alphabet)) bits of entropy.
char_count = math.ceil(self.salt_entropy / math.log2(len(RANDOM_STRING_CHARS)))
return get_random_string(char_count, allowed_chars=RANDOM_STRING_CHARS) | [
"def",
"salt",
"(",
"self",
")",
":",
"# Each character in the salt provides",
"# log_2(len(alphabet)) bits of entropy.",
"char_count",
"=",
"math",
".",
"ceil",
"(",
"self",
".",
"salt_entropy",
"/",
"math",
".",
"log2",
"(",
"len",
"(",
"RANDOM_STRING_CHARS",
")",
")",
")",
"return",
"get_random_string",
"(",
"char_count",
",",
"allowed_chars",
"=",
"RANDOM_STRING_CHARS",
")"
] | [
197,
4
] | [
205,
79
] | python | en | ['en', 'error', 'th'] | False |
BasePasswordHasher.verify | (self, password, encoded) | Check if the given password is correct. | Check if the given password is correct. | def verify(self, password, encoded):
"""Check if the given password is correct."""
raise NotImplementedError('subclasses of BasePasswordHasher must provide a verify() method') | [
"def",
"verify",
"(",
"self",
",",
"password",
",",
"encoded",
")",
":",
"raise",
"NotImplementedError",
"(",
"'subclasses of BasePasswordHasher must provide a verify() method'",
")"
] | [
207,
4
] | [
209,
100
] | python | en | ['en', 'en', 'en'] | True |
BasePasswordHasher.encode | (self, password, salt) |
Create an encoded database value.
The result is normally formatted as "algorithm$salt$hash" and
must be fewer than 128 characters.
|
Create an encoded database value. | def encode(self, password, salt):
"""
Create an encoded database value.
The result is normally formatted as "algorithm$salt$hash" and
must be fewer than 128 characters.
"""
raise NotImplementedError('subclasses of BasePasswordHasher must provide an encode() method') | [
"def",
"encode",
"(",
"self",
",",
"password",
",",
"salt",
")",
":",
"raise",
"NotImplementedError",
"(",
"'subclasses of BasePasswordHasher must provide an encode() method'",
")"
] | [
211,
4
] | [
218,
101
] | python | en | ['en', 'error', 'th'] | False |
BasePasswordHasher.decode | (self, encoded) |
Return a decoded database value.
The result is a dictionary and should contain `algorithm`, `hash`, and
`salt`. Extra keys can be algorithm specific like `iterations` or
`work_factor`.
|
Return a decoded database value. | def decode(self, encoded):
"""
Return a decoded database value.
The result is a dictionary and should contain `algorithm`, `hash`, and
`salt`. Extra keys can be algorithm specific like `iterations` or
`work_factor`.
"""
raise NotImplementedError(
'subclasses of BasePasswordHasher must provide a decode() method.'
) | [
"def",
"decode",
"(",
"self",
",",
"encoded",
")",
":",
"raise",
"NotImplementedError",
"(",
"'subclasses of BasePasswordHasher must provide a decode() method.'",
")"
] | [
220,
4
] | [
230,
9
] | python | en | ['en', 'error', 'th'] | False |
BasePasswordHasher.safe_summary | (self, encoded) |
Return a summary of safe values.
The result is a dictionary and will be used where the password field
must be displayed to construct a safe representation of the password.
|
Return a summary of safe values. | def safe_summary(self, encoded):
"""
Return a summary of safe values.
The result is a dictionary and will be used where the password field
must be displayed to construct a safe representation of the password.
"""
raise NotImplementedError('subclasses of BasePasswordHasher must provide a safe_summary() method') | [
"def",
"safe_summary",
"(",
"self",
",",
"encoded",
")",
":",
"raise",
"NotImplementedError",
"(",
"'subclasses of BasePasswordHasher must provide a safe_summary() method'",
")"
] | [
232,
4
] | [
239,
106
] | python | en | ['en', 'error', 'th'] | False |
BasePasswordHasher.harden_runtime | (self, password, encoded) |
Bridge the runtime gap between the work factor supplied in `encoded`
and the work factor suggested by this hasher.
Taking PBKDF2 as an example, if `encoded` contains 20000 iterations and
`self.iterations` is 30000, this method should run password through
another 10000 iterations of PBKDF2. Similar approaches should exist
for any hasher that has a work factor. If not, this method should be
defined as a no-op to silence the warning.
|
Bridge the runtime gap between the work factor supplied in `encoded`
and the work factor suggested by this hasher. | def harden_runtime(self, password, encoded):
"""
Bridge the runtime gap between the work factor supplied in `encoded`
and the work factor suggested by this hasher.
Taking PBKDF2 as an example, if `encoded` contains 20000 iterations and
`self.iterations` is 30000, this method should run password through
another 10000 iterations of PBKDF2. Similar approaches should exist
for any hasher that has a work factor. If not, this method should be
defined as a no-op to silence the warning.
"""
warnings.warn('subclasses of BasePasswordHasher should provide a harden_runtime() method') | [
"def",
"harden_runtime",
"(",
"self",
",",
"password",
",",
"encoded",
")",
":",
"warnings",
".",
"warn",
"(",
"'subclasses of BasePasswordHasher should provide a harden_runtime() method'",
")"
] | [
244,
4
] | [
255,
98
] | python | en | ['en', 'error', 'th'] | False |
_suggest_semantic_version | (s) |
Try to suggest a semantic form for a version for which
_suggest_normalized_version couldn't come up with anything.
|
Try to suggest a semantic form for a version for which
_suggest_normalized_version couldn't come up with anything.
| def _suggest_semantic_version(s):
"""
Try to suggest a semantic form for a version for which
_suggest_normalized_version couldn't come up with anything.
"""
result = s.strip().lower()
for pat, repl in _REPLACEMENTS:
result = pat.sub(repl, result)
if not result:
result = '0.0.0'
# Now look for numeric prefix, and separate it out from
# the rest.
#import pdb; pdb.set_trace()
m = _NUMERIC_PREFIX.match(result)
if not m:
prefix = '0.0.0'
suffix = result
else:
prefix = m.groups()[0].split('.')
prefix = [int(i) for i in prefix]
while len(prefix) < 3:
prefix.append(0)
if len(prefix) == 3:
suffix = result[m.end():]
else:
suffix = '.'.join([str(i) for i in prefix[3:]]) + result[m.end():]
prefix = prefix[:3]
prefix = '.'.join([str(i) for i in prefix])
suffix = suffix.strip()
if suffix:
#import pdb; pdb.set_trace()
# massage the suffix.
for pat, repl in _SUFFIX_REPLACEMENTS:
suffix = pat.sub(repl, suffix)
if not suffix:
result = prefix
else:
sep = '-' if 'dev' in suffix else '+'
result = prefix + sep + suffix
if not is_semver(result):
result = None
return result | [
"def",
"_suggest_semantic_version",
"(",
"s",
")",
":",
"result",
"=",
"s",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
"for",
"pat",
",",
"repl",
"in",
"_REPLACEMENTS",
":",
"result",
"=",
"pat",
".",
"sub",
"(",
"repl",
",",
"result",
")",
"if",
"not",
"result",
":",
"result",
"=",
"'0.0.0'",
"# Now look for numeric prefix, and separate it out from",
"# the rest.",
"#import pdb; pdb.set_trace()",
"m",
"=",
"_NUMERIC_PREFIX",
".",
"match",
"(",
"result",
")",
"if",
"not",
"m",
":",
"prefix",
"=",
"'0.0.0'",
"suffix",
"=",
"result",
"else",
":",
"prefix",
"=",
"m",
".",
"groups",
"(",
")",
"[",
"0",
"]",
".",
"split",
"(",
"'.'",
")",
"prefix",
"=",
"[",
"int",
"(",
"i",
")",
"for",
"i",
"in",
"prefix",
"]",
"while",
"len",
"(",
"prefix",
")",
"<",
"3",
":",
"prefix",
".",
"append",
"(",
"0",
")",
"if",
"len",
"(",
"prefix",
")",
"==",
"3",
":",
"suffix",
"=",
"result",
"[",
"m",
".",
"end",
"(",
")",
":",
"]",
"else",
":",
"suffix",
"=",
"'.'",
".",
"join",
"(",
"[",
"str",
"(",
"i",
")",
"for",
"i",
"in",
"prefix",
"[",
"3",
":",
"]",
"]",
")",
"+",
"result",
"[",
"m",
".",
"end",
"(",
")",
":",
"]",
"prefix",
"=",
"prefix",
"[",
":",
"3",
"]",
"prefix",
"=",
"'.'",
".",
"join",
"(",
"[",
"str",
"(",
"i",
")",
"for",
"i",
"in",
"prefix",
"]",
")",
"suffix",
"=",
"suffix",
".",
"strip",
"(",
")",
"if",
"suffix",
":",
"#import pdb; pdb.set_trace()",
"# massage the suffix.",
"for",
"pat",
",",
"repl",
"in",
"_SUFFIX_REPLACEMENTS",
":",
"suffix",
"=",
"pat",
".",
"sub",
"(",
"repl",
",",
"suffix",
")",
"if",
"not",
"suffix",
":",
"result",
"=",
"prefix",
"else",
":",
"sep",
"=",
"'-'",
"if",
"'dev'",
"in",
"suffix",
"else",
"'+'",
"result",
"=",
"prefix",
"+",
"sep",
"+",
"suffix",
"if",
"not",
"is_semver",
"(",
"result",
")",
":",
"result",
"=",
"None",
"return",
"result"
] | [
405,
0
] | [
448,
17
] | python | en | ['en', 'error', 'th'] | False |
_suggest_normalized_version | (s) | Suggest a normalized version close to the given version string.
If you have a version string that isn't rational (i.e. NormalizedVersion
doesn't like it) then you might be able to get an equivalent (or close)
rational version from this function.
This does a number of simple normalizations to the given string, based
on observation of versions currently in use on PyPI. Given a dump of
those version during PyCon 2009, 4287 of them:
- 2312 (53.93%) match NormalizedVersion without change
with the automatic suggestion
- 3474 (81.04%) match when using this suggestion method
@param s {str} An irrational version string.
@returns A rational version string, or None, if couldn't determine one.
| Suggest a normalized version close to the given version string. | def _suggest_normalized_version(s):
"""Suggest a normalized version close to the given version string.
If you have a version string that isn't rational (i.e. NormalizedVersion
doesn't like it) then you might be able to get an equivalent (or close)
rational version from this function.
This does a number of simple normalizations to the given string, based
on observation of versions currently in use on PyPI. Given a dump of
those version during PyCon 2009, 4287 of them:
- 2312 (53.93%) match NormalizedVersion without change
with the automatic suggestion
- 3474 (81.04%) match when using this suggestion method
@param s {str} An irrational version string.
@returns A rational version string, or None, if couldn't determine one.
"""
try:
_normalized_key(s)
return s # already rational
except UnsupportedVersionError:
pass
rs = s.lower()
# part of this could use maketrans
for orig, repl in (('-alpha', 'a'), ('-beta', 'b'), ('alpha', 'a'),
('beta', 'b'), ('rc', 'c'), ('-final', ''),
('-pre', 'c'),
('-release', ''), ('.release', ''), ('-stable', ''),
('+', '.'), ('_', '.'), (' ', ''), ('.final', ''),
('final', '')):
rs = rs.replace(orig, repl)
# if something ends with dev or pre, we add a 0
rs = re.sub(r"pre$", r"pre0", rs)
rs = re.sub(r"dev$", r"dev0", rs)
# if we have something like "b-2" or "a.2" at the end of the
# version, that is probably beta, alpha, etc
# let's remove the dash or dot
rs = re.sub(r"([abc]|rc)[\-\.](\d+)$", r"\1\2", rs)
# 1.0-dev-r371 -> 1.0.dev371
# 0.1-dev-r79 -> 0.1.dev79
rs = re.sub(r"[\-\.](dev)[\-\.]?r?(\d+)$", r".\1\2", rs)
# Clean: 2.0.a.3, 2.0.b1, 0.9.0~c1
rs = re.sub(r"[.~]?([abc])\.?", r"\1", rs)
# Clean: v0.3, v1.0
if rs.startswith('v'):
rs = rs[1:]
# Clean leading '0's on numbers.
#TODO: unintended side-effect on, e.g., "2003.05.09"
# PyPI stats: 77 (~2%) better
rs = re.sub(r"\b0+(\d+)(?!\d)", r"\1", rs)
# Clean a/b/c with no version. E.g. "1.0a" -> "1.0a0". Setuptools infers
# zero.
# PyPI stats: 245 (7.56%) better
rs = re.sub(r"(\d+[abc])$", r"\g<1>0", rs)
# the 'dev-rNNN' tag is a dev tag
rs = re.sub(r"\.?(dev-r|dev\.r)\.?(\d+)$", r".dev\2", rs)
# clean the - when used as a pre delimiter
rs = re.sub(r"-(a|b|c)(\d+)$", r"\1\2", rs)
# a terminal "dev" or "devel" can be changed into ".dev0"
rs = re.sub(r"[\.\-](dev|devel)$", r".dev0", rs)
# a terminal "dev" can be changed into ".dev0"
rs = re.sub(r"(?![\.\-])dev$", r".dev0", rs)
# a terminal "final" or "stable" can be removed
rs = re.sub(r"(final|stable)$", "", rs)
# The 'r' and the '-' tags are post release tags
# 0.4a1.r10 -> 0.4a1.post10
# 0.9.33-17222 -> 0.9.33.post17222
# 0.9.33-r17222 -> 0.9.33.post17222
rs = re.sub(r"\.?(r|-|-r)\.?(\d+)$", r".post\2", rs)
# Clean 'r' instead of 'dev' usage:
# 0.9.33+r17222 -> 0.9.33.dev17222
# 1.0dev123 -> 1.0.dev123
# 1.0.git123 -> 1.0.dev123
# 1.0.bzr123 -> 1.0.dev123
# 0.1a0dev.123 -> 0.1a0.dev123
# PyPI stats: ~150 (~4%) better
rs = re.sub(r"\.?(dev|git|bzr)\.?(\d+)$", r".dev\2", rs)
# Clean '.pre' (normalized from '-pre' above) instead of 'c' usage:
# 0.2.pre1 -> 0.2c1
# 0.2-c1 -> 0.2c1
# 1.0preview123 -> 1.0c123
# PyPI stats: ~21 (0.62%) better
rs = re.sub(r"\.?(pre|preview|-c)(\d+)$", r"c\g<2>", rs)
# Tcl/Tk uses "px" for their post release markers
rs = re.sub(r"p(\d+)$", r".post\1", rs)
try:
_normalized_key(rs)
except UnsupportedVersionError:
rs = None
return rs | [
"def",
"_suggest_normalized_version",
"(",
"s",
")",
":",
"try",
":",
"_normalized_key",
"(",
"s",
")",
"return",
"s",
"# already rational",
"except",
"UnsupportedVersionError",
":",
"pass",
"rs",
"=",
"s",
".",
"lower",
"(",
")",
"# part of this could use maketrans",
"for",
"orig",
",",
"repl",
"in",
"(",
"(",
"'-alpha'",
",",
"'a'",
")",
",",
"(",
"'-beta'",
",",
"'b'",
")",
",",
"(",
"'alpha'",
",",
"'a'",
")",
",",
"(",
"'beta'",
",",
"'b'",
")",
",",
"(",
"'rc'",
",",
"'c'",
")",
",",
"(",
"'-final'",
",",
"''",
")",
",",
"(",
"'-pre'",
",",
"'c'",
")",
",",
"(",
"'-release'",
",",
"''",
")",
",",
"(",
"'.release'",
",",
"''",
")",
",",
"(",
"'-stable'",
",",
"''",
")",
",",
"(",
"'+'",
",",
"'.'",
")",
",",
"(",
"'_'",
",",
"'.'",
")",
",",
"(",
"' '",
",",
"''",
")",
",",
"(",
"'.final'",
",",
"''",
")",
",",
"(",
"'final'",
",",
"''",
")",
")",
":",
"rs",
"=",
"rs",
".",
"replace",
"(",
"orig",
",",
"repl",
")",
"# if something ends with dev or pre, we add a 0",
"rs",
"=",
"re",
".",
"sub",
"(",
"r\"pre$\"",
",",
"r\"pre0\"",
",",
"rs",
")",
"rs",
"=",
"re",
".",
"sub",
"(",
"r\"dev$\"",
",",
"r\"dev0\"",
",",
"rs",
")",
"# if we have something like \"b-2\" or \"a.2\" at the end of the",
"# version, that is probably beta, alpha, etc",
"# let's remove the dash or dot",
"rs",
"=",
"re",
".",
"sub",
"(",
"r\"([abc]|rc)[\\-\\.](\\d+)$\"",
",",
"r\"\\1\\2\"",
",",
"rs",
")",
"# 1.0-dev-r371 -> 1.0.dev371",
"# 0.1-dev-r79 -> 0.1.dev79",
"rs",
"=",
"re",
".",
"sub",
"(",
"r\"[\\-\\.](dev)[\\-\\.]?r?(\\d+)$\"",
",",
"r\".\\1\\2\"",
",",
"rs",
")",
"# Clean: 2.0.a.3, 2.0.b1, 0.9.0~c1",
"rs",
"=",
"re",
".",
"sub",
"(",
"r\"[.~]?([abc])\\.?\"",
",",
"r\"\\1\"",
",",
"rs",
")",
"# Clean: v0.3, v1.0",
"if",
"rs",
".",
"startswith",
"(",
"'v'",
")",
":",
"rs",
"=",
"rs",
"[",
"1",
":",
"]",
"# Clean leading '0's on numbers.",
"#TODO: unintended side-effect on, e.g., \"2003.05.09\"",
"# PyPI stats: 77 (~2%) better",
"rs",
"=",
"re",
".",
"sub",
"(",
"r\"\\b0+(\\d+)(?!\\d)\"",
",",
"r\"\\1\"",
",",
"rs",
")",
"# Clean a/b/c with no version. E.g. \"1.0a\" -> \"1.0a0\". Setuptools infers",
"# zero.",
"# PyPI stats: 245 (7.56%) better",
"rs",
"=",
"re",
".",
"sub",
"(",
"r\"(\\d+[abc])$\"",
",",
"r\"\\g<1>0\"",
",",
"rs",
")",
"# the 'dev-rNNN' tag is a dev tag",
"rs",
"=",
"re",
".",
"sub",
"(",
"r\"\\.?(dev-r|dev\\.r)\\.?(\\d+)$\"",
",",
"r\".dev\\2\"",
",",
"rs",
")",
"# clean the - when used as a pre delimiter",
"rs",
"=",
"re",
".",
"sub",
"(",
"r\"-(a|b|c)(\\d+)$\"",
",",
"r\"\\1\\2\"",
",",
"rs",
")",
"# a terminal \"dev\" or \"devel\" can be changed into \".dev0\"",
"rs",
"=",
"re",
".",
"sub",
"(",
"r\"[\\.\\-](dev|devel)$\"",
",",
"r\".dev0\"",
",",
"rs",
")",
"# a terminal \"dev\" can be changed into \".dev0\"",
"rs",
"=",
"re",
".",
"sub",
"(",
"r\"(?![\\.\\-])dev$\"",
",",
"r\".dev0\"",
",",
"rs",
")",
"# a terminal \"final\" or \"stable\" can be removed",
"rs",
"=",
"re",
".",
"sub",
"(",
"r\"(final|stable)$\"",
",",
"\"\"",
",",
"rs",
")",
"# The 'r' and the '-' tags are post release tags",
"# 0.4a1.r10 -> 0.4a1.post10",
"# 0.9.33-17222 -> 0.9.33.post17222",
"# 0.9.33-r17222 -> 0.9.33.post17222",
"rs",
"=",
"re",
".",
"sub",
"(",
"r\"\\.?(r|-|-r)\\.?(\\d+)$\"",
",",
"r\".post\\2\"",
",",
"rs",
")",
"# Clean 'r' instead of 'dev' usage:",
"# 0.9.33+r17222 -> 0.9.33.dev17222",
"# 1.0dev123 -> 1.0.dev123",
"# 1.0.git123 -> 1.0.dev123",
"# 1.0.bzr123 -> 1.0.dev123",
"# 0.1a0dev.123 -> 0.1a0.dev123",
"# PyPI stats: ~150 (~4%) better",
"rs",
"=",
"re",
".",
"sub",
"(",
"r\"\\.?(dev|git|bzr)\\.?(\\d+)$\"",
",",
"r\".dev\\2\"",
",",
"rs",
")",
"# Clean '.pre' (normalized from '-pre' above) instead of 'c' usage:",
"# 0.2.pre1 -> 0.2c1",
"# 0.2-c1 -> 0.2c1",
"# 1.0preview123 -> 1.0c123",
"# PyPI stats: ~21 (0.62%) better",
"rs",
"=",
"re",
".",
"sub",
"(",
"r\"\\.?(pre|preview|-c)(\\d+)$\"",
",",
"r\"c\\g<2>\"",
",",
"rs",
")",
"# Tcl/Tk uses \"px\" for their post release markers",
"rs",
"=",
"re",
".",
"sub",
"(",
"r\"p(\\d+)$\"",
",",
"r\".post\\1\"",
",",
"rs",
")",
"try",
":",
"_normalized_key",
"(",
"rs",
")",
"except",
"UnsupportedVersionError",
":",
"rs",
"=",
"None",
"return",
"rs"
] | [
451,
0
] | [
559,
13
] | python | en | ['en', 'en', 'en'] | True |
Matcher.match | (self, version) |
Check if the provided version matches the constraints.
:param version: The version to match against this instance.
:type version: String or :class:`Version` instance.
|
Check if the provided version matches the constraints. | def match(self, version):
"""
Check if the provided version matches the constraints.
:param version: The version to match against this instance.
:type version: String or :class:`Version` instance.
"""
if isinstance(version, string_types):
version = self.version_class(version)
for operator, constraint, prefix in self._parts:
f = self._operators.get(operator)
if isinstance(f, string_types):
f = getattr(self, f)
if not f:
msg = ('%r not implemented '
'for %s' % (operator, self.__class__.__name__))
raise NotImplementedError(msg)
if not f(version, constraint, prefix):
return False
return True | [
"def",
"match",
"(",
"self",
",",
"version",
")",
":",
"if",
"isinstance",
"(",
"version",
",",
"string_types",
")",
":",
"version",
"=",
"self",
".",
"version_class",
"(",
"version",
")",
"for",
"operator",
",",
"constraint",
",",
"prefix",
"in",
"self",
".",
"_parts",
":",
"f",
"=",
"self",
".",
"_operators",
".",
"get",
"(",
"operator",
")",
"if",
"isinstance",
"(",
"f",
",",
"string_types",
")",
":",
"f",
"=",
"getattr",
"(",
"self",
",",
"f",
")",
"if",
"not",
"f",
":",
"msg",
"=",
"(",
"'%r not implemented '",
"'for %s'",
"%",
"(",
"operator",
",",
"self",
".",
"__class__",
".",
"__name__",
")",
")",
"raise",
"NotImplementedError",
"(",
"msg",
")",
"if",
"not",
"f",
"(",
"version",
",",
"constraint",
",",
"prefix",
")",
":",
"return",
"False",
"return",
"True"
] | [
128,
4
] | [
147,
19
] | python | en | ['en', 'error', 'th'] | False |
VersionScheme.is_valid_constraint_list | (self, s) |
Used for processing some metadata fields
|
Used for processing some metadata fields
| def is_valid_constraint_list(self, s):
"""
Used for processing some metadata fields
"""
# See issue #140. Be tolerant of a single trailing comma.
if s.endswith(','):
s = s[:-1]
return self.is_valid_matcher('dummy_name (%s)' % s) | [
"def",
"is_valid_constraint_list",
"(",
"self",
",",
"s",
")",
":",
"# See issue #140. Be tolerant of a single trailing comma.",
"if",
"s",
".",
"endswith",
"(",
"','",
")",
":",
"s",
"=",
"s",
"[",
":",
"-",
"1",
"]",
"return",
"self",
".",
"is_valid_matcher",
"(",
"'dummy_name (%s)'",
"%",
"s",
")"
] | [
708,
4
] | [
715,
59
] | python | en | ['en', 'error', 'th'] | False |
partition | (
predicate: Callable[[Any], bool],
iterator: Sequence[Any]
) | A stable, out-of-place partition. | A stable, out-of-place partition. | def partition(
predicate: Callable[[Any], bool],
iterator: Sequence[Any]
) -> Tuple[List[Any], List[Any]]:
"""A stable, out-of-place partition."""
results = ([], [])
for i in iterator:
results[int(predicate(i))].append(i)
# Returns trueList, falseList
return results[1], results[0] | [
"def",
"partition",
"(",
"predicate",
":",
"Callable",
"[",
"[",
"Any",
"]",
",",
"bool",
"]",
",",
"iterator",
":",
"Sequence",
"[",
"Any",
"]",
")",
"->",
"Tuple",
"[",
"List",
"[",
"Any",
"]",
",",
"List",
"[",
"Any",
"]",
"]",
":",
"results",
"=",
"(",
"[",
"]",
",",
"[",
"]",
")",
"for",
"i",
"in",
"iterator",
":",
"results",
"[",
"int",
"(",
"predicate",
"(",
"i",
")",
")",
"]",
".",
"append",
"(",
"i",
")",
"# Returns trueList, falseList",
"return",
"results",
"[",
"1",
"]",
",",
"results",
"[",
"0",
"]"
] | [
24,
0
] | [
35,
33
] | python | en | ['en', 'en', 'en'] | True |
fix_files | (
in_dir: pathlib.Path,
out_dir: pathlib.Path,
*,
transformer=dashboardCallTransformer(),
) | Duplicate the input dir to the output dir, fixing file method calls.
Preconditions:
* in_dir is a real directory
* out_dir is a real, empty directory
| Duplicate the input dir to the output dir, fixing file method calls. | def fix_files(
in_dir: pathlib.Path,
out_dir: pathlib.Path,
*,
transformer=dashboardCallTransformer(),
):
"""Duplicate the input dir to the output dir, fixing file method calls.
Preconditions:
* in_dir is a real directory
* out_dir is a real, empty directory
"""
pyfile_gen = (
pathlib.Path(os.path.join(root, f))
for root, _, files in os.walk(in_dir)
for f in files if os.path.splitext(f)[1] == ".py"
)
for fpath in pyfile_gen:
with open(fpath, 'r') as f:
src = f.read()
# Parse the code and insert method call fixes.
tree = cst.parse_module(src)
updated = tree.visit(transformer)
# Create the path and directory structure for the new file.
updated_path = out_dir.joinpath(fpath.relative_to(in_dir))
updated_path.parent.mkdir(parents=True, exist_ok=True)
# Generate the updated source file at the corresponding path.
with open(updated_path, 'w') as f:
f.write(updated.code) | [
"def",
"fix_files",
"(",
"in_dir",
":",
"pathlib",
".",
"Path",
",",
"out_dir",
":",
"pathlib",
".",
"Path",
",",
"*",
",",
"transformer",
"=",
"dashboardCallTransformer",
"(",
")",
",",
")",
":",
"pyfile_gen",
"=",
"(",
"pathlib",
".",
"Path",
"(",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"f",
")",
")",
"for",
"root",
",",
"_",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"in_dir",
")",
"for",
"f",
"in",
"files",
"if",
"os",
".",
"path",
".",
"splitext",
"(",
"f",
")",
"[",
"1",
"]",
"==",
"\".py\"",
")",
"for",
"fpath",
"in",
"pyfile_gen",
":",
"with",
"open",
"(",
"fpath",
",",
"'r'",
")",
"as",
"f",
":",
"src",
"=",
"f",
".",
"read",
"(",
")",
"# Parse the code and insert method call fixes.",
"tree",
"=",
"cst",
".",
"parse_module",
"(",
"src",
")",
"updated",
"=",
"tree",
".",
"visit",
"(",
"transformer",
")",
"# Create the path and directory structure for the new file.",
"updated_path",
"=",
"out_dir",
".",
"joinpath",
"(",
"fpath",
".",
"relative_to",
"(",
"in_dir",
")",
")",
"updated_path",
".",
"parent",
".",
"mkdir",
"(",
"parents",
"=",
"True",
",",
"exist_ok",
"=",
"True",
")",
"# Generate the updated source file at the corresponding path.",
"with",
"open",
"(",
"updated_path",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"updated",
".",
"code",
")"
] | [
90,
0
] | [
122,
33
] | python | en | ['en', 'su', 'en'] | True |
get_major_minor_version | () |
Return the major-minor version of the current Python as a string, e.g.
"3.7" or "3.10".
|
Return the major-minor version of the current Python as a string, e.g.
"3.7" or "3.10".
| def get_major_minor_version() -> str:
"""
Return the major-minor version of the current Python as a string, e.g.
"3.7" or "3.10".
"""
return "{}.{}".format(*sys.version_info) | [
"def",
"get_major_minor_version",
"(",
")",
"->",
"str",
":",
"return",
"\"{}.{}\"",
".",
"format",
"(",
"*",
"sys",
".",
"version_info",
")"
] | [
17,
0
] | [
22,
44
] | python | en | ['en', 'error', 'th'] | False |
MultiValueDict.__getitem__ | (self, key) |
Return the last data value for this key, or [] if it's an empty list;
raise KeyError if not found.
|
Return the last data value for this key, or [] if it's an empty list;
raise KeyError if not found.
| def __getitem__(self, key):
"""
Return the last data value for this key, or [] if it's an empty list;
raise KeyError if not found.
"""
try:
list_ = super().__getitem__(key)
except KeyError:
raise MultiValueDictKeyError(key)
try:
return list_[-1]
except IndexError:
return [] | [
"def",
"__getitem__",
"(",
"self",
",",
"key",
")",
":",
"try",
":",
"list_",
"=",
"super",
"(",
")",
".",
"__getitem__",
"(",
"key",
")",
"except",
"KeyError",
":",
"raise",
"MultiValueDictKeyError",
"(",
"key",
")",
"try",
":",
"return",
"list_",
"[",
"-",
"1",
"]",
"except",
"IndexError",
":",
"return",
"[",
"]"
] | [
69,
4
] | [
81,
21
] | python | en | ['en', 'error', 'th'] | False |
MultiValueDict.get | (self, key, default=None) |
Return the last data value for the passed key. If key doesn't exist
or value is an empty list, return `default`.
|
Return the last data value for the passed key. If key doesn't exist
or value is an empty list, return `default`.
| def get(self, key, default=None):
"""
Return the last data value for the passed key. If key doesn't exist
or value is an empty list, return `default`.
"""
try:
val = self[key]
except KeyError:
return default
if val == []:
return default
return val | [
"def",
"get",
"(",
"self",
",",
"key",
",",
"default",
"=",
"None",
")",
":",
"try",
":",
"val",
"=",
"self",
"[",
"key",
"]",
"except",
"KeyError",
":",
"return",
"default",
"if",
"val",
"==",
"[",
"]",
":",
"return",
"default",
"return",
"val"
] | [
109,
4
] | [
120,
18
] | python | en | ['en', 'error', 'th'] | False |
MultiValueDict._getlist | (self, key, default=None, force_list=False) |
Return a list of values for the key.
Used internally to manipulate values list. If force_list is True,
return a new copy of values.
|
Return a list of values for the key. | def _getlist(self, key, default=None, force_list=False):
"""
Return a list of values for the key.
Used internally to manipulate values list. If force_list is True,
return a new copy of values.
"""
try:
values = super().__getitem__(key)
except KeyError:
if default is None:
return []
return default
else:
if force_list:
values = list(values) if values is not None else None
return values | [
"def",
"_getlist",
"(",
"self",
",",
"key",
",",
"default",
"=",
"None",
",",
"force_list",
"=",
"False",
")",
":",
"try",
":",
"values",
"=",
"super",
"(",
")",
".",
"__getitem__",
"(",
"key",
")",
"except",
"KeyError",
":",
"if",
"default",
"is",
"None",
":",
"return",
"[",
"]",
"return",
"default",
"else",
":",
"if",
"force_list",
":",
"values",
"=",
"list",
"(",
"values",
")",
"if",
"values",
"is",
"not",
"None",
"else",
"None",
"return",
"values"
] | [
122,
4
] | [
138,
25
] | python | en | ['en', 'error', 'th'] | False |
MultiValueDict.getlist | (self, key, default=None) |
Return the list of values for the key. If key doesn't exist, return a
default value.
|
Return the list of values for the key. If key doesn't exist, return a
default value.
| def getlist(self, key, default=None):
"""
Return the list of values for the key. If key doesn't exist, return a
default value.
"""
return self._getlist(key, default, force_list=True) | [
"def",
"getlist",
"(",
"self",
",",
"key",
",",
"default",
"=",
"None",
")",
":",
"return",
"self",
".",
"_getlist",
"(",
"key",
",",
"default",
",",
"force_list",
"=",
"True",
")"
] | [
140,
4
] | [
145,
59
] | python | en | ['en', 'error', 'th'] | False |
MultiValueDict.appendlist | (self, key, value) | Append an item to the internal list associated with key. | Append an item to the internal list associated with key. | def appendlist(self, key, value):
"""Append an item to the internal list associated with key."""
self.setlistdefault(key).append(value) | [
"def",
"appendlist",
"(",
"self",
",",
"key",
",",
"value",
")",
":",
"self",
".",
"setlistdefault",
"(",
"key",
")",
".",
"append",
"(",
"value",
")"
] | [
166,
4
] | [
168,
46
] | python | en | ['en', 'en', 'en'] | True |
MultiValueDict.items | (self) |
Yield (key, value) pairs, where value is the last item in the list
associated with the key.
|
Yield (key, value) pairs, where value is the last item in the list
associated with the key.
| def items(self):
"""
Yield (key, value) pairs, where value is the last item in the list
associated with the key.
"""
for key in self:
yield key, self[key] | [
"def",
"items",
"(",
"self",
")",
":",
"for",
"key",
"in",
"self",
":",
"yield",
"key",
",",
"self",
"[",
"key",
"]"
] | [
170,
4
] | [
176,
32
] | python | en | ['en', 'error', 'th'] | False |
MultiValueDict.lists | (self) | Yield (key, list) pairs. | Yield (key, list) pairs. | def lists(self):
"""Yield (key, list) pairs."""
return iter(super().items()) | [
"def",
"lists",
"(",
"self",
")",
":",
"return",
"iter",
"(",
"super",
"(",
")",
".",
"items",
"(",
")",
")"
] | [
178,
4
] | [
180,
36
] | python | en | ['en', 'hmn', 'en'] | True |
MultiValueDict.values | (self) | Yield the last value on every key list. | Yield the last value on every key list. | def values(self):
"""Yield the last value on every key list."""
for key in self:
yield self[key] | [
"def",
"values",
"(",
"self",
")",
":",
"for",
"key",
"in",
"self",
":",
"yield",
"self",
"[",
"key",
"]"
] | [
182,
4
] | [
185,
27
] | python | en | ['en', 'en', 'en'] | True |
MultiValueDict.copy | (self) | Return a shallow copy of this object. | Return a shallow copy of this object. | def copy(self):
"""Return a shallow copy of this object."""
return copy.copy(self) | [
"def",
"copy",
"(",
"self",
")",
":",
"return",
"copy",
".",
"copy",
"(",
"self",
")"
] | [
187,
4
] | [
189,
30
] | python | en | ['en', 'en', 'en'] | True |
MultiValueDict.update | (self, *args, **kwargs) | Extend rather than replace existing key lists. | Extend rather than replace existing key lists. | def update(self, *args, **kwargs):
"""Extend rather than replace existing key lists."""
if len(args) > 1:
raise TypeError("update expected at most 1 argument, got %d" % len(args))
if args:
arg = args[0]
if isinstance(arg, MultiValueDict):
for key, value_list in arg.lists():
self.setlistdefault(key).extend(value_list)
else:
if isinstance(arg, Mapping):
arg = arg.items()
for key, value in arg:
self.setlistdefault(key).append(value)
for key, value in kwargs.items():
self.setlistdefault(key).append(value) | [
"def",
"update",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"len",
"(",
"args",
")",
">",
"1",
":",
"raise",
"TypeError",
"(",
"\"update expected at most 1 argument, got %d\"",
"%",
"len",
"(",
"args",
")",
")",
"if",
"args",
":",
"arg",
"=",
"args",
"[",
"0",
"]",
"if",
"isinstance",
"(",
"arg",
",",
"MultiValueDict",
")",
":",
"for",
"key",
",",
"value_list",
"in",
"arg",
".",
"lists",
"(",
")",
":",
"self",
".",
"setlistdefault",
"(",
"key",
")",
".",
"extend",
"(",
"value_list",
")",
"else",
":",
"if",
"isinstance",
"(",
"arg",
",",
"Mapping",
")",
":",
"arg",
"=",
"arg",
".",
"items",
"(",
")",
"for",
"key",
",",
"value",
"in",
"arg",
":",
"self",
".",
"setlistdefault",
"(",
"key",
")",
".",
"append",
"(",
"value",
")",
"for",
"key",
",",
"value",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"self",
".",
"setlistdefault",
"(",
"key",
")",
".",
"append",
"(",
"value",
")"
] | [
191,
4
] | [
206,
50
] | python | en | ['en', 'en', 'en'] | True |
MultiValueDict.dict | (self) | Return current object as a dict with singular values. | Return current object as a dict with singular values. | def dict(self):
"""Return current object as a dict with singular values."""
return {key: self[key] for key in self} | [
"def",
"dict",
"(",
"self",
")",
":",
"return",
"{",
"key",
":",
"self",
"[",
"key",
"]",
"for",
"key",
"in",
"self",
"}"
] | [
208,
4
] | [
210,
47
] | python | en | ['en', 'en', 'en'] | True |
DictWrapper.__getitem__ | (self, key) |
Retrieve the real value after stripping the prefix string (if
present). If the prefix is present, pass the value through self.func
before returning, otherwise return the raw value.
|
Retrieve the real value after stripping the prefix string (if
present). If the prefix is present, pass the value through self.func
before returning, otherwise return the raw value.
| def __getitem__(self, key):
"""
Retrieve the real value after stripping the prefix string (if
present). If the prefix is present, pass the value through self.func
before returning, otherwise return the raw value.
"""
use_func = key.startswith(self.prefix)
if use_func:
key = key[len(self.prefix):]
value = super().__getitem__(key)
if use_func:
return self.func(value)
return value | [
"def",
"__getitem__",
"(",
"self",
",",
"key",
")",
":",
"use_func",
"=",
"key",
".",
"startswith",
"(",
"self",
".",
"prefix",
")",
"if",
"use_func",
":",
"key",
"=",
"key",
"[",
"len",
"(",
"self",
".",
"prefix",
")",
":",
"]",
"value",
"=",
"super",
"(",
")",
".",
"__getitem__",
"(",
"key",
")",
"if",
"use_func",
":",
"return",
"self",
".",
"func",
"(",
"value",
")",
"return",
"value"
] | [
264,
4
] | [
276,
20
] | python | en | ['en', 'error', 'th'] | False |
create_reverse_many_to_one_manager | (superclass, rel) |
Create a manager for the reverse side of a many-to-one relation.
This manager subclasses another manager, generally the default manager of
the related model, and adds behaviors specific to many-to-one relations.
|
Create a manager for the reverse side of a many-to-one relation. | def create_reverse_many_to_one_manager(superclass, rel):
"""
Create a manager for the reverse side of a many-to-one relation.
This manager subclasses another manager, generally the default manager of
the related model, and adds behaviors specific to many-to-one relations.
"""
class RelatedManager(superclass):
def __init__(self, instance):
super().__init__()
self.instance = instance
self.model = rel.related_model
self.field = rel.field
self.core_filters = {self.field.name: instance}
def __call__(self, *, manager):
manager = getattr(self.model, manager)
manager_class = create_reverse_many_to_one_manager(manager.__class__, rel)
return manager_class(self.instance)
do_not_call_in_templates = True
def _apply_rel_filters(self, queryset):
"""
Filter the queryset for the instance this manager is bound to.
"""
db = self._db or router.db_for_read(self.model, instance=self.instance)
empty_strings_as_null = connections[db].features.interprets_empty_strings_as_nulls
queryset._add_hints(instance=self.instance)
if self._db:
queryset = queryset.using(self._db)
queryset._defer_next_filter = True
queryset = queryset.filter(**self.core_filters)
for field in self.field.foreign_related_fields:
val = getattr(self.instance, field.attname)
if val is None or (val == '' and empty_strings_as_null):
return queryset.none()
if self.field.many_to_one:
# Guard against field-like objects such as GenericRelation
# that abuse create_reverse_many_to_one_manager() with reverse
# one-to-many relationships instead and break known related
# objects assignment.
try:
target_field = self.field.target_field
except FieldError:
# The relationship has multiple target fields. Use a tuple
# for related object id.
rel_obj_id = tuple([
getattr(self.instance, target_field.attname)
for target_field in self.field.get_path_info()[-1].target_fields
])
else:
rel_obj_id = getattr(self.instance, target_field.attname)
queryset._known_related_objects = {self.field: {rel_obj_id: self.instance}}
return queryset
def _remove_prefetched_objects(self):
try:
self.instance._prefetched_objects_cache.pop(self.field.remote_field.get_cache_name())
except (AttributeError, KeyError):
pass # nothing to clear from cache
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[self.field.remote_field.get_cache_name()]
except (AttributeError, KeyError):
queryset = super().get_queryset()
return self._apply_rel_filters(queryset)
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = super().get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using(queryset._db or self._db)
rel_obj_attr = self.field.get_local_related_value
instance_attr = self.field.get_foreign_related_value
instances_dict = {instance_attr(inst): inst for inst in instances}
query = {'%s__in' % self.field.name: instances}
queryset = queryset.filter(**query)
# Since we just bypassed this class' get_queryset(), we must manage
# the reverse relation manually.
for rel_obj in queryset:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, self.field.name, instance)
cache_name = self.field.remote_field.get_cache_name()
return queryset, rel_obj_attr, instance_attr, False, cache_name, False
def add(self, *objs, bulk=True):
self._remove_prefetched_objects()
db = router.db_for_write(self.model, instance=self.instance)
def check_and_update_obj(obj):
if not isinstance(obj, self.model):
raise TypeError("'%s' instance expected, got %r" % (
self.model._meta.object_name, obj,
))
setattr(obj, self.field.name, self.instance)
if bulk:
pks = []
for obj in objs:
check_and_update_obj(obj)
if obj._state.adding or obj._state.db != db:
raise ValueError(
"%r instance isn't saved. Use bulk=False or save "
"the object first." % obj
)
pks.append(obj.pk)
self.model._base_manager.using(db).filter(pk__in=pks).update(**{
self.field.name: self.instance,
})
else:
with transaction.atomic(using=db, savepoint=False):
for obj in objs:
check_and_update_obj(obj)
obj.save()
add.alters_data = True
def create(self, **kwargs):
kwargs[self.field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).create(**kwargs)
create.alters_data = True
def get_or_create(self, **kwargs):
kwargs[self.field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).get_or_create(**kwargs)
get_or_create.alters_data = True
def update_or_create(self, **kwargs):
kwargs[self.field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).update_or_create(**kwargs)
update_or_create.alters_data = True
# remove() and clear() are only provided if the ForeignKey can have a value of null.
if rel.field.null:
def remove(self, *objs, bulk=True):
if not objs:
return
val = self.field.get_foreign_related_value(self.instance)
old_ids = set()
for obj in objs:
if not isinstance(obj, self.model):
raise TypeError("'%s' instance expected, got %r" % (
self.model._meta.object_name, obj,
))
# Is obj actually part of this descriptor set?
if self.field.get_local_related_value(obj) == val:
old_ids.add(obj.pk)
else:
raise self.field.remote_field.model.DoesNotExist(
"%r is not related to %r." % (obj, self.instance)
)
self._clear(self.filter(pk__in=old_ids), bulk)
remove.alters_data = True
def clear(self, *, bulk=True):
self._clear(self, bulk)
clear.alters_data = True
def _clear(self, queryset, bulk):
self._remove_prefetched_objects()
db = router.db_for_write(self.model, instance=self.instance)
queryset = queryset.using(db)
if bulk:
# `QuerySet.update()` is intrinsically atomic.
queryset.update(**{self.field.name: None})
else:
with transaction.atomic(using=db, savepoint=False):
for obj in queryset:
setattr(obj, self.field.name, None)
obj.save(update_fields=[self.field.name])
_clear.alters_data = True
def set(self, objs, *, bulk=True, clear=False):
# Force evaluation of `objs` in case it's a queryset whose value
# could be affected by `manager.clear()`. Refs #19816.
objs = tuple(objs)
if self.field.null:
db = router.db_for_write(self.model, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
if clear:
self.clear(bulk=bulk)
self.add(*objs, bulk=bulk)
else:
old_objs = set(self.using(db).all())
new_objs = []
for obj in objs:
if obj in old_objs:
old_objs.remove(obj)
else:
new_objs.append(obj)
self.remove(*old_objs, bulk=bulk)
self.add(*new_objs, bulk=bulk)
else:
self.add(*objs, bulk=bulk)
set.alters_data = True
return RelatedManager | [
"def",
"create_reverse_many_to_one_manager",
"(",
"superclass",
",",
"rel",
")",
":",
"class",
"RelatedManager",
"(",
"superclass",
")",
":",
"def",
"__init__",
"(",
"self",
",",
"instance",
")",
":",
"super",
"(",
")",
".",
"__init__",
"(",
")",
"self",
".",
"instance",
"=",
"instance",
"self",
".",
"model",
"=",
"rel",
".",
"related_model",
"self",
".",
"field",
"=",
"rel",
".",
"field",
"self",
".",
"core_filters",
"=",
"{",
"self",
".",
"field",
".",
"name",
":",
"instance",
"}",
"def",
"__call__",
"(",
"self",
",",
"*",
",",
"manager",
")",
":",
"manager",
"=",
"getattr",
"(",
"self",
".",
"model",
",",
"manager",
")",
"manager_class",
"=",
"create_reverse_many_to_one_manager",
"(",
"manager",
".",
"__class__",
",",
"rel",
")",
"return",
"manager_class",
"(",
"self",
".",
"instance",
")",
"do_not_call_in_templates",
"=",
"True",
"def",
"_apply_rel_filters",
"(",
"self",
",",
"queryset",
")",
":",
"\"\"\"\n Filter the queryset for the instance this manager is bound to.\n \"\"\"",
"db",
"=",
"self",
".",
"_db",
"or",
"router",
".",
"db_for_read",
"(",
"self",
".",
"model",
",",
"instance",
"=",
"self",
".",
"instance",
")",
"empty_strings_as_null",
"=",
"connections",
"[",
"db",
"]",
".",
"features",
".",
"interprets_empty_strings_as_nulls",
"queryset",
".",
"_add_hints",
"(",
"instance",
"=",
"self",
".",
"instance",
")",
"if",
"self",
".",
"_db",
":",
"queryset",
"=",
"queryset",
".",
"using",
"(",
"self",
".",
"_db",
")",
"queryset",
".",
"_defer_next_filter",
"=",
"True",
"queryset",
"=",
"queryset",
".",
"filter",
"(",
"*",
"*",
"self",
".",
"core_filters",
")",
"for",
"field",
"in",
"self",
".",
"field",
".",
"foreign_related_fields",
":",
"val",
"=",
"getattr",
"(",
"self",
".",
"instance",
",",
"field",
".",
"attname",
")",
"if",
"val",
"is",
"None",
"or",
"(",
"val",
"==",
"''",
"and",
"empty_strings_as_null",
")",
":",
"return",
"queryset",
".",
"none",
"(",
")",
"if",
"self",
".",
"field",
".",
"many_to_one",
":",
"# Guard against field-like objects such as GenericRelation",
"# that abuse create_reverse_many_to_one_manager() with reverse",
"# one-to-many relationships instead and break known related",
"# objects assignment.",
"try",
":",
"target_field",
"=",
"self",
".",
"field",
".",
"target_field",
"except",
"FieldError",
":",
"# The relationship has multiple target fields. Use a tuple",
"# for related object id.",
"rel_obj_id",
"=",
"tuple",
"(",
"[",
"getattr",
"(",
"self",
".",
"instance",
",",
"target_field",
".",
"attname",
")",
"for",
"target_field",
"in",
"self",
".",
"field",
".",
"get_path_info",
"(",
")",
"[",
"-",
"1",
"]",
".",
"target_fields",
"]",
")",
"else",
":",
"rel_obj_id",
"=",
"getattr",
"(",
"self",
".",
"instance",
",",
"target_field",
".",
"attname",
")",
"queryset",
".",
"_known_related_objects",
"=",
"{",
"self",
".",
"field",
":",
"{",
"rel_obj_id",
":",
"self",
".",
"instance",
"}",
"}",
"return",
"queryset",
"def",
"_remove_prefetched_objects",
"(",
"self",
")",
":",
"try",
":",
"self",
".",
"instance",
".",
"_prefetched_objects_cache",
".",
"pop",
"(",
"self",
".",
"field",
".",
"remote_field",
".",
"get_cache_name",
"(",
")",
")",
"except",
"(",
"AttributeError",
",",
"KeyError",
")",
":",
"pass",
"# nothing to clear from cache",
"def",
"get_queryset",
"(",
"self",
")",
":",
"try",
":",
"return",
"self",
".",
"instance",
".",
"_prefetched_objects_cache",
"[",
"self",
".",
"field",
".",
"remote_field",
".",
"get_cache_name",
"(",
")",
"]",
"except",
"(",
"AttributeError",
",",
"KeyError",
")",
":",
"queryset",
"=",
"super",
"(",
")",
".",
"get_queryset",
"(",
")",
"return",
"self",
".",
"_apply_rel_filters",
"(",
"queryset",
")",
"def",
"get_prefetch_queryset",
"(",
"self",
",",
"instances",
",",
"queryset",
"=",
"None",
")",
":",
"if",
"queryset",
"is",
"None",
":",
"queryset",
"=",
"super",
"(",
")",
".",
"get_queryset",
"(",
")",
"queryset",
".",
"_add_hints",
"(",
"instance",
"=",
"instances",
"[",
"0",
"]",
")",
"queryset",
"=",
"queryset",
".",
"using",
"(",
"queryset",
".",
"_db",
"or",
"self",
".",
"_db",
")",
"rel_obj_attr",
"=",
"self",
".",
"field",
".",
"get_local_related_value",
"instance_attr",
"=",
"self",
".",
"field",
".",
"get_foreign_related_value",
"instances_dict",
"=",
"{",
"instance_attr",
"(",
"inst",
")",
":",
"inst",
"for",
"inst",
"in",
"instances",
"}",
"query",
"=",
"{",
"'%s__in'",
"%",
"self",
".",
"field",
".",
"name",
":",
"instances",
"}",
"queryset",
"=",
"queryset",
".",
"filter",
"(",
"*",
"*",
"query",
")",
"# Since we just bypassed this class' get_queryset(), we must manage",
"# the reverse relation manually.",
"for",
"rel_obj",
"in",
"queryset",
":",
"instance",
"=",
"instances_dict",
"[",
"rel_obj_attr",
"(",
"rel_obj",
")",
"]",
"setattr",
"(",
"rel_obj",
",",
"self",
".",
"field",
".",
"name",
",",
"instance",
")",
"cache_name",
"=",
"self",
".",
"field",
".",
"remote_field",
".",
"get_cache_name",
"(",
")",
"return",
"queryset",
",",
"rel_obj_attr",
",",
"instance_attr",
",",
"False",
",",
"cache_name",
",",
"False",
"def",
"add",
"(",
"self",
",",
"*",
"objs",
",",
"bulk",
"=",
"True",
")",
":",
"self",
".",
"_remove_prefetched_objects",
"(",
")",
"db",
"=",
"router",
".",
"db_for_write",
"(",
"self",
".",
"model",
",",
"instance",
"=",
"self",
".",
"instance",
")",
"def",
"check_and_update_obj",
"(",
"obj",
")",
":",
"if",
"not",
"isinstance",
"(",
"obj",
",",
"self",
".",
"model",
")",
":",
"raise",
"TypeError",
"(",
"\"'%s' instance expected, got %r\"",
"%",
"(",
"self",
".",
"model",
".",
"_meta",
".",
"object_name",
",",
"obj",
",",
")",
")",
"setattr",
"(",
"obj",
",",
"self",
".",
"field",
".",
"name",
",",
"self",
".",
"instance",
")",
"if",
"bulk",
":",
"pks",
"=",
"[",
"]",
"for",
"obj",
"in",
"objs",
":",
"check_and_update_obj",
"(",
"obj",
")",
"if",
"obj",
".",
"_state",
".",
"adding",
"or",
"obj",
".",
"_state",
".",
"db",
"!=",
"db",
":",
"raise",
"ValueError",
"(",
"\"%r instance isn't saved. Use bulk=False or save \"",
"\"the object first.\"",
"%",
"obj",
")",
"pks",
".",
"append",
"(",
"obj",
".",
"pk",
")",
"self",
".",
"model",
".",
"_base_manager",
".",
"using",
"(",
"db",
")",
".",
"filter",
"(",
"pk__in",
"=",
"pks",
")",
".",
"update",
"(",
"*",
"*",
"{",
"self",
".",
"field",
".",
"name",
":",
"self",
".",
"instance",
",",
"}",
")",
"else",
":",
"with",
"transaction",
".",
"atomic",
"(",
"using",
"=",
"db",
",",
"savepoint",
"=",
"False",
")",
":",
"for",
"obj",
"in",
"objs",
":",
"check_and_update_obj",
"(",
"obj",
")",
"obj",
".",
"save",
"(",
")",
"add",
".",
"alters_data",
"=",
"True",
"def",
"create",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"self",
".",
"field",
".",
"name",
"]",
"=",
"self",
".",
"instance",
"db",
"=",
"router",
".",
"db_for_write",
"(",
"self",
".",
"model",
",",
"instance",
"=",
"self",
".",
"instance",
")",
"return",
"super",
"(",
"RelatedManager",
",",
"self",
".",
"db_manager",
"(",
"db",
")",
")",
".",
"create",
"(",
"*",
"*",
"kwargs",
")",
"create",
".",
"alters_data",
"=",
"True",
"def",
"get_or_create",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"self",
".",
"field",
".",
"name",
"]",
"=",
"self",
".",
"instance",
"db",
"=",
"router",
".",
"db_for_write",
"(",
"self",
".",
"model",
",",
"instance",
"=",
"self",
".",
"instance",
")",
"return",
"super",
"(",
"RelatedManager",
",",
"self",
".",
"db_manager",
"(",
"db",
")",
")",
".",
"get_or_create",
"(",
"*",
"*",
"kwargs",
")",
"get_or_create",
".",
"alters_data",
"=",
"True",
"def",
"update_or_create",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"self",
".",
"field",
".",
"name",
"]",
"=",
"self",
".",
"instance",
"db",
"=",
"router",
".",
"db_for_write",
"(",
"self",
".",
"model",
",",
"instance",
"=",
"self",
".",
"instance",
")",
"return",
"super",
"(",
"RelatedManager",
",",
"self",
".",
"db_manager",
"(",
"db",
")",
")",
".",
"update_or_create",
"(",
"*",
"*",
"kwargs",
")",
"update_or_create",
".",
"alters_data",
"=",
"True",
"# remove() and clear() are only provided if the ForeignKey can have a value of null.",
"if",
"rel",
".",
"field",
".",
"null",
":",
"def",
"remove",
"(",
"self",
",",
"*",
"objs",
",",
"bulk",
"=",
"True",
")",
":",
"if",
"not",
"objs",
":",
"return",
"val",
"=",
"self",
".",
"field",
".",
"get_foreign_related_value",
"(",
"self",
".",
"instance",
")",
"old_ids",
"=",
"set",
"(",
")",
"for",
"obj",
"in",
"objs",
":",
"if",
"not",
"isinstance",
"(",
"obj",
",",
"self",
".",
"model",
")",
":",
"raise",
"TypeError",
"(",
"\"'%s' instance expected, got %r\"",
"%",
"(",
"self",
".",
"model",
".",
"_meta",
".",
"object_name",
",",
"obj",
",",
")",
")",
"# Is obj actually part of this descriptor set?",
"if",
"self",
".",
"field",
".",
"get_local_related_value",
"(",
"obj",
")",
"==",
"val",
":",
"old_ids",
".",
"add",
"(",
"obj",
".",
"pk",
")",
"else",
":",
"raise",
"self",
".",
"field",
".",
"remote_field",
".",
"model",
".",
"DoesNotExist",
"(",
"\"%r is not related to %r.\"",
"%",
"(",
"obj",
",",
"self",
".",
"instance",
")",
")",
"self",
".",
"_clear",
"(",
"self",
".",
"filter",
"(",
"pk__in",
"=",
"old_ids",
")",
",",
"bulk",
")",
"remove",
".",
"alters_data",
"=",
"True",
"def",
"clear",
"(",
"self",
",",
"*",
",",
"bulk",
"=",
"True",
")",
":",
"self",
".",
"_clear",
"(",
"self",
",",
"bulk",
")",
"clear",
".",
"alters_data",
"=",
"True",
"def",
"_clear",
"(",
"self",
",",
"queryset",
",",
"bulk",
")",
":",
"self",
".",
"_remove_prefetched_objects",
"(",
")",
"db",
"=",
"router",
".",
"db_for_write",
"(",
"self",
".",
"model",
",",
"instance",
"=",
"self",
".",
"instance",
")",
"queryset",
"=",
"queryset",
".",
"using",
"(",
"db",
")",
"if",
"bulk",
":",
"# `QuerySet.update()` is intrinsically atomic.",
"queryset",
".",
"update",
"(",
"*",
"*",
"{",
"self",
".",
"field",
".",
"name",
":",
"None",
"}",
")",
"else",
":",
"with",
"transaction",
".",
"atomic",
"(",
"using",
"=",
"db",
",",
"savepoint",
"=",
"False",
")",
":",
"for",
"obj",
"in",
"queryset",
":",
"setattr",
"(",
"obj",
",",
"self",
".",
"field",
".",
"name",
",",
"None",
")",
"obj",
".",
"save",
"(",
"update_fields",
"=",
"[",
"self",
".",
"field",
".",
"name",
"]",
")",
"_clear",
".",
"alters_data",
"=",
"True",
"def",
"set",
"(",
"self",
",",
"objs",
",",
"*",
",",
"bulk",
"=",
"True",
",",
"clear",
"=",
"False",
")",
":",
"# Force evaluation of `objs` in case it's a queryset whose value",
"# could be affected by `manager.clear()`. Refs #19816.",
"objs",
"=",
"tuple",
"(",
"objs",
")",
"if",
"self",
".",
"field",
".",
"null",
":",
"db",
"=",
"router",
".",
"db_for_write",
"(",
"self",
".",
"model",
",",
"instance",
"=",
"self",
".",
"instance",
")",
"with",
"transaction",
".",
"atomic",
"(",
"using",
"=",
"db",
",",
"savepoint",
"=",
"False",
")",
":",
"if",
"clear",
":",
"self",
".",
"clear",
"(",
"bulk",
"=",
"bulk",
")",
"self",
".",
"add",
"(",
"*",
"objs",
",",
"bulk",
"=",
"bulk",
")",
"else",
":",
"old_objs",
"=",
"set",
"(",
"self",
".",
"using",
"(",
"db",
")",
".",
"all",
"(",
")",
")",
"new_objs",
"=",
"[",
"]",
"for",
"obj",
"in",
"objs",
":",
"if",
"obj",
"in",
"old_objs",
":",
"old_objs",
".",
"remove",
"(",
"obj",
")",
"else",
":",
"new_objs",
".",
"append",
"(",
"obj",
")",
"self",
".",
"remove",
"(",
"*",
"old_objs",
",",
"bulk",
"=",
"bulk",
")",
"self",
".",
"add",
"(",
"*",
"new_objs",
",",
"bulk",
"=",
"bulk",
")",
"else",
":",
"self",
".",
"add",
"(",
"*",
"objs",
",",
"bulk",
"=",
"bulk",
")",
"set",
".",
"alters_data",
"=",
"True",
"return",
"RelatedManager"
] | [
550,
0
] | [
757,
25
] | python | en | ['en', 'error', 'th'] | False |
create_forward_many_to_many_manager | (superclass, rel, reverse) |
Create a manager for the either side of a many-to-many relation.
This manager subclasses another manager, generally the default manager of
the related model, and adds behaviors specific to many-to-many relations.
|
Create a manager for the either side of a many-to-many relation. | def create_forward_many_to_many_manager(superclass, rel, reverse):
"""
Create a manager for the either side of a many-to-many relation.
This manager subclasses another manager, generally the default manager of
the related model, and adds behaviors specific to many-to-many relations.
"""
class ManyRelatedManager(superclass):
def __init__(self, instance=None):
super().__init__()
self.instance = instance
if not reverse:
self.model = rel.model
self.query_field_name = rel.field.related_query_name()
self.prefetch_cache_name = rel.field.name
self.source_field_name = rel.field.m2m_field_name()
self.target_field_name = rel.field.m2m_reverse_field_name()
self.symmetrical = rel.symmetrical
else:
self.model = rel.related_model
self.query_field_name = rel.field.name
self.prefetch_cache_name = rel.field.related_query_name()
self.source_field_name = rel.field.m2m_reverse_field_name()
self.target_field_name = rel.field.m2m_field_name()
self.symmetrical = False
self.through = rel.through
self.reverse = reverse
self.source_field = self.through._meta.get_field(self.source_field_name)
self.target_field = self.through._meta.get_field(self.target_field_name)
self.core_filters = {}
self.pk_field_names = {}
for lh_field, rh_field in self.source_field.related_fields:
core_filter_key = '%s__%s' % (self.query_field_name, rh_field.name)
self.core_filters[core_filter_key] = getattr(instance, rh_field.attname)
self.pk_field_names[lh_field.name] = rh_field.name
self.related_val = self.source_field.get_foreign_related_value(instance)
if None in self.related_val:
raise ValueError('"%r" needs to have a value for field "%s" before '
'this many-to-many relationship can be used.' %
(instance, self.pk_field_names[self.source_field_name]))
# Even if this relation is not to pk, we require still pk value.
# The wish is that the instance has been already saved to DB,
# although having a pk value isn't a guarantee of that.
if instance.pk is None:
raise ValueError("%r instance needs to have a primary key value before "
"a many-to-many relationship can be used." %
instance.__class__.__name__)
def __call__(self, *, manager):
manager = getattr(self.model, manager)
manager_class = create_forward_many_to_many_manager(manager.__class__, rel, reverse)
return manager_class(instance=self.instance)
do_not_call_in_templates = True
def _build_remove_filters(self, removed_vals):
filters = Q(**{self.source_field_name: self.related_val})
# No need to add a subquery condition if removed_vals is a QuerySet without
# filters.
removed_vals_filters = (not isinstance(removed_vals, QuerySet) or
removed_vals._has_filters())
if removed_vals_filters:
filters &= Q(**{'%s__in' % self.target_field_name: removed_vals})
if self.symmetrical:
symmetrical_filters = Q(**{self.target_field_name: self.related_val})
if removed_vals_filters:
symmetrical_filters &= Q(
**{'%s__in' % self.source_field_name: removed_vals})
filters |= symmetrical_filters
return filters
def _apply_rel_filters(self, queryset):
"""
Filter the queryset for the instance this manager is bound to.
"""
queryset._add_hints(instance=self.instance)
if self._db:
queryset = queryset.using(self._db)
queryset._defer_next_filter = True
return queryset._next_is_sticky().filter(**self.core_filters)
def _remove_prefetched_objects(self):
try:
self.instance._prefetched_objects_cache.pop(self.prefetch_cache_name)
except (AttributeError, KeyError):
pass # nothing to clear from cache
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[self.prefetch_cache_name]
except (AttributeError, KeyError):
queryset = super().get_queryset()
return self._apply_rel_filters(queryset)
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = super().get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using(queryset._db or self._db)
query = {'%s__in' % self.query_field_name: instances}
queryset = queryset._next_is_sticky().filter(**query)
# M2M: need to annotate the query in order to get the primary model
# that the secondary model was actually related to. We know that
# there will already be a join on the join table, so we can just add
# the select.
# For non-autocreated 'through' models, can't assume we are
# dealing with PK values.
fk = self.through._meta.get_field(self.source_field_name)
join_table = fk.model._meta.db_table
connection = connections[queryset.db]
qn = connection.ops.quote_name
queryset = queryset.extra(select={
'_prefetch_related_val_%s' % f.attname:
'%s.%s' % (qn(join_table), qn(f.column)) for f in fk.local_related_fields})
return (
queryset,
lambda result: tuple(
getattr(result, '_prefetch_related_val_%s' % f.attname)
for f in fk.local_related_fields
),
lambda inst: tuple(
f.get_db_prep_value(getattr(inst, f.attname), connection)
for f in fk.foreign_related_fields
),
False,
self.prefetch_cache_name,
False,
)
def add(self, *objs, through_defaults=None):
self._remove_prefetched_objects()
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
self._add_items(
self.source_field_name, self.target_field_name, *objs,
through_defaults=through_defaults,
)
# If this is a symmetrical m2m relation to self, add the mirror
# entry in the m2m table.
if self.symmetrical:
self._add_items(
self.target_field_name,
self.source_field_name,
*objs,
through_defaults=through_defaults,
)
add.alters_data = True
def remove(self, *objs):
self._remove_prefetched_objects()
self._remove_items(self.source_field_name, self.target_field_name, *objs)
remove.alters_data = True
def clear(self):
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
signals.m2m_changed.send(
sender=self.through, action="pre_clear",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=None, using=db,
)
self._remove_prefetched_objects()
filters = self._build_remove_filters(super().get_queryset().using(db))
self.through._default_manager.using(db).filter(filters).delete()
signals.m2m_changed.send(
sender=self.through, action="post_clear",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=None, using=db,
)
clear.alters_data = True
def set(self, objs, *, clear=False, through_defaults=None):
# Force evaluation of `objs` in case it's a queryset whose value
# could be affected by `manager.clear()`. Refs #19816.
objs = tuple(objs)
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
if clear:
self.clear()
self.add(*objs, through_defaults=through_defaults)
else:
old_ids = set(self.using(db).values_list(self.target_field.target_field.attname, flat=True))
new_objs = []
for obj in objs:
fk_val = (
self.target_field.get_foreign_related_value(obj)[0]
if isinstance(obj, self.model)
else self.target_field.get_prep_value(obj)
)
if fk_val in old_ids:
old_ids.remove(fk_val)
else:
new_objs.append(obj)
self.remove(*old_ids)
self.add(*new_objs, through_defaults=through_defaults)
set.alters_data = True
def create(self, *, through_defaults=None, **kwargs):
db = router.db_for_write(self.instance.__class__, instance=self.instance)
new_obj = super(ManyRelatedManager, self.db_manager(db)).create(**kwargs)
self.add(new_obj, through_defaults=through_defaults)
return new_obj
create.alters_data = True
def get_or_create(self, *, through_defaults=None, **kwargs):
db = router.db_for_write(self.instance.__class__, instance=self.instance)
obj, created = super(ManyRelatedManager, self.db_manager(db)).get_or_create(**kwargs)
# We only need to add() if created because if we got an object back
# from get() then the relationship already exists.
if created:
self.add(obj, through_defaults=through_defaults)
return obj, created
get_or_create.alters_data = True
def update_or_create(self, *, through_defaults=None, **kwargs):
db = router.db_for_write(self.instance.__class__, instance=self.instance)
obj, created = super(ManyRelatedManager, self.db_manager(db)).update_or_create(**kwargs)
# We only need to add() if created because if we got an object back
# from get() then the relationship already exists.
if created:
self.add(obj, through_defaults=through_defaults)
return obj, created
update_or_create.alters_data = True
def _get_target_ids(self, target_field_name, objs):
"""
Return the set of ids of `objs` that the target field references.
"""
from django.db.models import Model
target_ids = set()
target_field = self.through._meta.get_field(target_field_name)
for obj in objs:
if isinstance(obj, self.model):
if not router.allow_relation(obj, self.instance):
raise ValueError(
'Cannot add "%r": instance is on database "%s", '
'value is on database "%s"' %
(obj, self.instance._state.db, obj._state.db)
)
target_id = target_field.get_foreign_related_value(obj)[0]
if target_id is None:
raise ValueError(
'Cannot add "%r": the value for field "%s" is None' %
(obj, target_field_name)
)
target_ids.add(target_id)
elif isinstance(obj, Model):
raise TypeError(
"'%s' instance expected, got %r" %
(self.model._meta.object_name, obj)
)
else:
target_ids.add(target_field.get_prep_value(obj))
return target_ids
def _get_missing_target_ids(self, source_field_name, target_field_name, db, target_ids):
"""
Return the subset of ids of `objs` that aren't already assigned to
this relationship.
"""
vals = self.through._default_manager.using(db).values_list(
target_field_name, flat=True
).filter(**{
source_field_name: self.related_val[0],
'%s__in' % target_field_name: target_ids,
})
return target_ids.difference(vals)
def _get_add_plan(self, db, source_field_name):
"""
Return a boolean triple of the way the add should be performed.
The first element is whether or not bulk_create(ignore_conflicts)
can be used, the second whether or not signals must be sent, and
the third element is whether or not the immediate bulk insertion
with conflicts ignored can be performed.
"""
# Conflicts can be ignored when the intermediary model is
# auto-created as the only possible collision is on the
# (source_id, target_id) tuple. The same assertion doesn't hold for
# user-defined intermediary models as they could have other fields
# causing conflicts which must be surfaced.
can_ignore_conflicts = (
connections[db].features.supports_ignore_conflicts and
self.through._meta.auto_created is not False
)
# Don't send the signal when inserting duplicate data row
# for symmetrical reverse entries.
must_send_signals = (self.reverse or source_field_name == self.source_field_name) and (
signals.m2m_changed.has_listeners(self.through)
)
# Fast addition through bulk insertion can only be performed
# if no m2m_changed listeners are connected for self.through
# as they require the added set of ids to be provided via
# pk_set.
return can_ignore_conflicts, must_send_signals, (can_ignore_conflicts and not must_send_signals)
def _add_items(self, source_field_name, target_field_name, *objs, through_defaults=None):
# source_field_name: the PK fieldname in join table for the source object
# target_field_name: the PK fieldname in join table for the target object
# *objs - objects to add. Either object instances, or primary keys of object instances.
if not objs:
return
through_defaults = dict(resolve_callables(through_defaults or {}))
target_ids = self._get_target_ids(target_field_name, objs)
db = router.db_for_write(self.through, instance=self.instance)
can_ignore_conflicts, must_send_signals, can_fast_add = self._get_add_plan(db, source_field_name)
if can_fast_add:
self.through._default_manager.using(db).bulk_create([
self.through(**{
'%s_id' % source_field_name: self.related_val[0],
'%s_id' % target_field_name: target_id,
})
for target_id in target_ids
], ignore_conflicts=True)
return
missing_target_ids = self._get_missing_target_ids(
source_field_name, target_field_name, db, target_ids
)
with transaction.atomic(using=db, savepoint=False):
if must_send_signals:
signals.m2m_changed.send(
sender=self.through, action='pre_add',
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=missing_target_ids, using=db,
)
# Add the ones that aren't there already.
self.through._default_manager.using(db).bulk_create([
self.through(**through_defaults, **{
'%s_id' % source_field_name: self.related_val[0],
'%s_id' % target_field_name: target_id,
})
for target_id in missing_target_ids
], ignore_conflicts=can_ignore_conflicts)
if must_send_signals:
signals.m2m_changed.send(
sender=self.through, action='post_add',
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=missing_target_ids, using=db,
)
def _remove_items(self, source_field_name, target_field_name, *objs):
# source_field_name: the PK colname in join table for the source object
# target_field_name: the PK colname in join table for the target object
# *objs - objects to remove. Either object instances, or primary
# keys of object instances.
if not objs:
return
# Check that all the objects are of the right type
old_ids = set()
for obj in objs:
if isinstance(obj, self.model):
fk_val = self.target_field.get_foreign_related_value(obj)[0]
old_ids.add(fk_val)
else:
old_ids.add(obj)
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
# Send a signal to the other end if need be.
signals.m2m_changed.send(
sender=self.through, action="pre_remove",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=old_ids, using=db,
)
target_model_qs = super().get_queryset()
if target_model_qs._has_filters():
old_vals = target_model_qs.using(db).filter(**{
'%s__in' % self.target_field.target_field.attname: old_ids})
else:
old_vals = old_ids
filters = self._build_remove_filters(old_vals)
self.through._default_manager.using(db).filter(filters).delete()
signals.m2m_changed.send(
sender=self.through, action="post_remove",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=old_ids, using=db,
)
return ManyRelatedManager | [
"def",
"create_forward_many_to_many_manager",
"(",
"superclass",
",",
"rel",
",",
"reverse",
")",
":",
"class",
"ManyRelatedManager",
"(",
"superclass",
")",
":",
"def",
"__init__",
"(",
"self",
",",
"instance",
"=",
"None",
")",
":",
"super",
"(",
")",
".",
"__init__",
"(",
")",
"self",
".",
"instance",
"=",
"instance",
"if",
"not",
"reverse",
":",
"self",
".",
"model",
"=",
"rel",
".",
"model",
"self",
".",
"query_field_name",
"=",
"rel",
".",
"field",
".",
"related_query_name",
"(",
")",
"self",
".",
"prefetch_cache_name",
"=",
"rel",
".",
"field",
".",
"name",
"self",
".",
"source_field_name",
"=",
"rel",
".",
"field",
".",
"m2m_field_name",
"(",
")",
"self",
".",
"target_field_name",
"=",
"rel",
".",
"field",
".",
"m2m_reverse_field_name",
"(",
")",
"self",
".",
"symmetrical",
"=",
"rel",
".",
"symmetrical",
"else",
":",
"self",
".",
"model",
"=",
"rel",
".",
"related_model",
"self",
".",
"query_field_name",
"=",
"rel",
".",
"field",
".",
"name",
"self",
".",
"prefetch_cache_name",
"=",
"rel",
".",
"field",
".",
"related_query_name",
"(",
")",
"self",
".",
"source_field_name",
"=",
"rel",
".",
"field",
".",
"m2m_reverse_field_name",
"(",
")",
"self",
".",
"target_field_name",
"=",
"rel",
".",
"field",
".",
"m2m_field_name",
"(",
")",
"self",
".",
"symmetrical",
"=",
"False",
"self",
".",
"through",
"=",
"rel",
".",
"through",
"self",
".",
"reverse",
"=",
"reverse",
"self",
".",
"source_field",
"=",
"self",
".",
"through",
".",
"_meta",
".",
"get_field",
"(",
"self",
".",
"source_field_name",
")",
"self",
".",
"target_field",
"=",
"self",
".",
"through",
".",
"_meta",
".",
"get_field",
"(",
"self",
".",
"target_field_name",
")",
"self",
".",
"core_filters",
"=",
"{",
"}",
"self",
".",
"pk_field_names",
"=",
"{",
"}",
"for",
"lh_field",
",",
"rh_field",
"in",
"self",
".",
"source_field",
".",
"related_fields",
":",
"core_filter_key",
"=",
"'%s__%s'",
"%",
"(",
"self",
".",
"query_field_name",
",",
"rh_field",
".",
"name",
")",
"self",
".",
"core_filters",
"[",
"core_filter_key",
"]",
"=",
"getattr",
"(",
"instance",
",",
"rh_field",
".",
"attname",
")",
"self",
".",
"pk_field_names",
"[",
"lh_field",
".",
"name",
"]",
"=",
"rh_field",
".",
"name",
"self",
".",
"related_val",
"=",
"self",
".",
"source_field",
".",
"get_foreign_related_value",
"(",
"instance",
")",
"if",
"None",
"in",
"self",
".",
"related_val",
":",
"raise",
"ValueError",
"(",
"'\"%r\" needs to have a value for field \"%s\" before '",
"'this many-to-many relationship can be used.'",
"%",
"(",
"instance",
",",
"self",
".",
"pk_field_names",
"[",
"self",
".",
"source_field_name",
"]",
")",
")",
"# Even if this relation is not to pk, we require still pk value.",
"# The wish is that the instance has been already saved to DB,",
"# although having a pk value isn't a guarantee of that.",
"if",
"instance",
".",
"pk",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"%r instance needs to have a primary key value before \"",
"\"a many-to-many relationship can be used.\"",
"%",
"instance",
".",
"__class__",
".",
"__name__",
")",
"def",
"__call__",
"(",
"self",
",",
"*",
",",
"manager",
")",
":",
"manager",
"=",
"getattr",
"(",
"self",
".",
"model",
",",
"manager",
")",
"manager_class",
"=",
"create_forward_many_to_many_manager",
"(",
"manager",
".",
"__class__",
",",
"rel",
",",
"reverse",
")",
"return",
"manager_class",
"(",
"instance",
"=",
"self",
".",
"instance",
")",
"do_not_call_in_templates",
"=",
"True",
"def",
"_build_remove_filters",
"(",
"self",
",",
"removed_vals",
")",
":",
"filters",
"=",
"Q",
"(",
"*",
"*",
"{",
"self",
".",
"source_field_name",
":",
"self",
".",
"related_val",
"}",
")",
"# No need to add a subquery condition if removed_vals is a QuerySet without",
"# filters.",
"removed_vals_filters",
"=",
"(",
"not",
"isinstance",
"(",
"removed_vals",
",",
"QuerySet",
")",
"or",
"removed_vals",
".",
"_has_filters",
"(",
")",
")",
"if",
"removed_vals_filters",
":",
"filters",
"&=",
"Q",
"(",
"*",
"*",
"{",
"'%s__in'",
"%",
"self",
".",
"target_field_name",
":",
"removed_vals",
"}",
")",
"if",
"self",
".",
"symmetrical",
":",
"symmetrical_filters",
"=",
"Q",
"(",
"*",
"*",
"{",
"self",
".",
"target_field_name",
":",
"self",
".",
"related_val",
"}",
")",
"if",
"removed_vals_filters",
":",
"symmetrical_filters",
"&=",
"Q",
"(",
"*",
"*",
"{",
"'%s__in'",
"%",
"self",
".",
"source_field_name",
":",
"removed_vals",
"}",
")",
"filters",
"|=",
"symmetrical_filters",
"return",
"filters",
"def",
"_apply_rel_filters",
"(",
"self",
",",
"queryset",
")",
":",
"\"\"\"\n Filter the queryset for the instance this manager is bound to.\n \"\"\"",
"queryset",
".",
"_add_hints",
"(",
"instance",
"=",
"self",
".",
"instance",
")",
"if",
"self",
".",
"_db",
":",
"queryset",
"=",
"queryset",
".",
"using",
"(",
"self",
".",
"_db",
")",
"queryset",
".",
"_defer_next_filter",
"=",
"True",
"return",
"queryset",
".",
"_next_is_sticky",
"(",
")",
".",
"filter",
"(",
"*",
"*",
"self",
".",
"core_filters",
")",
"def",
"_remove_prefetched_objects",
"(",
"self",
")",
":",
"try",
":",
"self",
".",
"instance",
".",
"_prefetched_objects_cache",
".",
"pop",
"(",
"self",
".",
"prefetch_cache_name",
")",
"except",
"(",
"AttributeError",
",",
"KeyError",
")",
":",
"pass",
"# nothing to clear from cache",
"def",
"get_queryset",
"(",
"self",
")",
":",
"try",
":",
"return",
"self",
".",
"instance",
".",
"_prefetched_objects_cache",
"[",
"self",
".",
"prefetch_cache_name",
"]",
"except",
"(",
"AttributeError",
",",
"KeyError",
")",
":",
"queryset",
"=",
"super",
"(",
")",
".",
"get_queryset",
"(",
")",
"return",
"self",
".",
"_apply_rel_filters",
"(",
"queryset",
")",
"def",
"get_prefetch_queryset",
"(",
"self",
",",
"instances",
",",
"queryset",
"=",
"None",
")",
":",
"if",
"queryset",
"is",
"None",
":",
"queryset",
"=",
"super",
"(",
")",
".",
"get_queryset",
"(",
")",
"queryset",
".",
"_add_hints",
"(",
"instance",
"=",
"instances",
"[",
"0",
"]",
")",
"queryset",
"=",
"queryset",
".",
"using",
"(",
"queryset",
".",
"_db",
"or",
"self",
".",
"_db",
")",
"query",
"=",
"{",
"'%s__in'",
"%",
"self",
".",
"query_field_name",
":",
"instances",
"}",
"queryset",
"=",
"queryset",
".",
"_next_is_sticky",
"(",
")",
".",
"filter",
"(",
"*",
"*",
"query",
")",
"# M2M: need to annotate the query in order to get the primary model",
"# that the secondary model was actually related to. We know that",
"# there will already be a join on the join table, so we can just add",
"# the select.",
"# For non-autocreated 'through' models, can't assume we are",
"# dealing with PK values.",
"fk",
"=",
"self",
".",
"through",
".",
"_meta",
".",
"get_field",
"(",
"self",
".",
"source_field_name",
")",
"join_table",
"=",
"fk",
".",
"model",
".",
"_meta",
".",
"db_table",
"connection",
"=",
"connections",
"[",
"queryset",
".",
"db",
"]",
"qn",
"=",
"connection",
".",
"ops",
".",
"quote_name",
"queryset",
"=",
"queryset",
".",
"extra",
"(",
"select",
"=",
"{",
"'_prefetch_related_val_%s'",
"%",
"f",
".",
"attname",
":",
"'%s.%s'",
"%",
"(",
"qn",
"(",
"join_table",
")",
",",
"qn",
"(",
"f",
".",
"column",
")",
")",
"for",
"f",
"in",
"fk",
".",
"local_related_fields",
"}",
")",
"return",
"(",
"queryset",
",",
"lambda",
"result",
":",
"tuple",
"(",
"getattr",
"(",
"result",
",",
"'_prefetch_related_val_%s'",
"%",
"f",
".",
"attname",
")",
"for",
"f",
"in",
"fk",
".",
"local_related_fields",
")",
",",
"lambda",
"inst",
":",
"tuple",
"(",
"f",
".",
"get_db_prep_value",
"(",
"getattr",
"(",
"inst",
",",
"f",
".",
"attname",
")",
",",
"connection",
")",
"for",
"f",
"in",
"fk",
".",
"foreign_related_fields",
")",
",",
"False",
",",
"self",
".",
"prefetch_cache_name",
",",
"False",
",",
")",
"def",
"add",
"(",
"self",
",",
"*",
"objs",
",",
"through_defaults",
"=",
"None",
")",
":",
"self",
".",
"_remove_prefetched_objects",
"(",
")",
"db",
"=",
"router",
".",
"db_for_write",
"(",
"self",
".",
"through",
",",
"instance",
"=",
"self",
".",
"instance",
")",
"with",
"transaction",
".",
"atomic",
"(",
"using",
"=",
"db",
",",
"savepoint",
"=",
"False",
")",
":",
"self",
".",
"_add_items",
"(",
"self",
".",
"source_field_name",
",",
"self",
".",
"target_field_name",
",",
"*",
"objs",
",",
"through_defaults",
"=",
"through_defaults",
",",
")",
"# If this is a symmetrical m2m relation to self, add the mirror",
"# entry in the m2m table.",
"if",
"self",
".",
"symmetrical",
":",
"self",
".",
"_add_items",
"(",
"self",
".",
"target_field_name",
",",
"self",
".",
"source_field_name",
",",
"*",
"objs",
",",
"through_defaults",
"=",
"through_defaults",
",",
")",
"add",
".",
"alters_data",
"=",
"True",
"def",
"remove",
"(",
"self",
",",
"*",
"objs",
")",
":",
"self",
".",
"_remove_prefetched_objects",
"(",
")",
"self",
".",
"_remove_items",
"(",
"self",
".",
"source_field_name",
",",
"self",
".",
"target_field_name",
",",
"*",
"objs",
")",
"remove",
".",
"alters_data",
"=",
"True",
"def",
"clear",
"(",
"self",
")",
":",
"db",
"=",
"router",
".",
"db_for_write",
"(",
"self",
".",
"through",
",",
"instance",
"=",
"self",
".",
"instance",
")",
"with",
"transaction",
".",
"atomic",
"(",
"using",
"=",
"db",
",",
"savepoint",
"=",
"False",
")",
":",
"signals",
".",
"m2m_changed",
".",
"send",
"(",
"sender",
"=",
"self",
".",
"through",
",",
"action",
"=",
"\"pre_clear\"",
",",
"instance",
"=",
"self",
".",
"instance",
",",
"reverse",
"=",
"self",
".",
"reverse",
",",
"model",
"=",
"self",
".",
"model",
",",
"pk_set",
"=",
"None",
",",
"using",
"=",
"db",
",",
")",
"self",
".",
"_remove_prefetched_objects",
"(",
")",
"filters",
"=",
"self",
".",
"_build_remove_filters",
"(",
"super",
"(",
")",
".",
"get_queryset",
"(",
")",
".",
"using",
"(",
"db",
")",
")",
"self",
".",
"through",
".",
"_default_manager",
".",
"using",
"(",
"db",
")",
".",
"filter",
"(",
"filters",
")",
".",
"delete",
"(",
")",
"signals",
".",
"m2m_changed",
".",
"send",
"(",
"sender",
"=",
"self",
".",
"through",
",",
"action",
"=",
"\"post_clear\"",
",",
"instance",
"=",
"self",
".",
"instance",
",",
"reverse",
"=",
"self",
".",
"reverse",
",",
"model",
"=",
"self",
".",
"model",
",",
"pk_set",
"=",
"None",
",",
"using",
"=",
"db",
",",
")",
"clear",
".",
"alters_data",
"=",
"True",
"def",
"set",
"(",
"self",
",",
"objs",
",",
"*",
",",
"clear",
"=",
"False",
",",
"through_defaults",
"=",
"None",
")",
":",
"# Force evaluation of `objs` in case it's a queryset whose value",
"# could be affected by `manager.clear()`. Refs #19816.",
"objs",
"=",
"tuple",
"(",
"objs",
")",
"db",
"=",
"router",
".",
"db_for_write",
"(",
"self",
".",
"through",
",",
"instance",
"=",
"self",
".",
"instance",
")",
"with",
"transaction",
".",
"atomic",
"(",
"using",
"=",
"db",
",",
"savepoint",
"=",
"False",
")",
":",
"if",
"clear",
":",
"self",
".",
"clear",
"(",
")",
"self",
".",
"add",
"(",
"*",
"objs",
",",
"through_defaults",
"=",
"through_defaults",
")",
"else",
":",
"old_ids",
"=",
"set",
"(",
"self",
".",
"using",
"(",
"db",
")",
".",
"values_list",
"(",
"self",
".",
"target_field",
".",
"target_field",
".",
"attname",
",",
"flat",
"=",
"True",
")",
")",
"new_objs",
"=",
"[",
"]",
"for",
"obj",
"in",
"objs",
":",
"fk_val",
"=",
"(",
"self",
".",
"target_field",
".",
"get_foreign_related_value",
"(",
"obj",
")",
"[",
"0",
"]",
"if",
"isinstance",
"(",
"obj",
",",
"self",
".",
"model",
")",
"else",
"self",
".",
"target_field",
".",
"get_prep_value",
"(",
"obj",
")",
")",
"if",
"fk_val",
"in",
"old_ids",
":",
"old_ids",
".",
"remove",
"(",
"fk_val",
")",
"else",
":",
"new_objs",
".",
"append",
"(",
"obj",
")",
"self",
".",
"remove",
"(",
"*",
"old_ids",
")",
"self",
".",
"add",
"(",
"*",
"new_objs",
",",
"through_defaults",
"=",
"through_defaults",
")",
"set",
".",
"alters_data",
"=",
"True",
"def",
"create",
"(",
"self",
",",
"*",
",",
"through_defaults",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"db",
"=",
"router",
".",
"db_for_write",
"(",
"self",
".",
"instance",
".",
"__class__",
",",
"instance",
"=",
"self",
".",
"instance",
")",
"new_obj",
"=",
"super",
"(",
"ManyRelatedManager",
",",
"self",
".",
"db_manager",
"(",
"db",
")",
")",
".",
"create",
"(",
"*",
"*",
"kwargs",
")",
"self",
".",
"add",
"(",
"new_obj",
",",
"through_defaults",
"=",
"through_defaults",
")",
"return",
"new_obj",
"create",
".",
"alters_data",
"=",
"True",
"def",
"get_or_create",
"(",
"self",
",",
"*",
",",
"through_defaults",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"db",
"=",
"router",
".",
"db_for_write",
"(",
"self",
".",
"instance",
".",
"__class__",
",",
"instance",
"=",
"self",
".",
"instance",
")",
"obj",
",",
"created",
"=",
"super",
"(",
"ManyRelatedManager",
",",
"self",
".",
"db_manager",
"(",
"db",
")",
")",
".",
"get_or_create",
"(",
"*",
"*",
"kwargs",
")",
"# We only need to add() if created because if we got an object back",
"# from get() then the relationship already exists.",
"if",
"created",
":",
"self",
".",
"add",
"(",
"obj",
",",
"through_defaults",
"=",
"through_defaults",
")",
"return",
"obj",
",",
"created",
"get_or_create",
".",
"alters_data",
"=",
"True",
"def",
"update_or_create",
"(",
"self",
",",
"*",
",",
"through_defaults",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"db",
"=",
"router",
".",
"db_for_write",
"(",
"self",
".",
"instance",
".",
"__class__",
",",
"instance",
"=",
"self",
".",
"instance",
")",
"obj",
",",
"created",
"=",
"super",
"(",
"ManyRelatedManager",
",",
"self",
".",
"db_manager",
"(",
"db",
")",
")",
".",
"update_or_create",
"(",
"*",
"*",
"kwargs",
")",
"# We only need to add() if created because if we got an object back",
"# from get() then the relationship already exists.",
"if",
"created",
":",
"self",
".",
"add",
"(",
"obj",
",",
"through_defaults",
"=",
"through_defaults",
")",
"return",
"obj",
",",
"created",
"update_or_create",
".",
"alters_data",
"=",
"True",
"def",
"_get_target_ids",
"(",
"self",
",",
"target_field_name",
",",
"objs",
")",
":",
"\"\"\"\n Return the set of ids of `objs` that the target field references.\n \"\"\"",
"from",
"django",
".",
"db",
".",
"models",
"import",
"Model",
"target_ids",
"=",
"set",
"(",
")",
"target_field",
"=",
"self",
".",
"through",
".",
"_meta",
".",
"get_field",
"(",
"target_field_name",
")",
"for",
"obj",
"in",
"objs",
":",
"if",
"isinstance",
"(",
"obj",
",",
"self",
".",
"model",
")",
":",
"if",
"not",
"router",
".",
"allow_relation",
"(",
"obj",
",",
"self",
".",
"instance",
")",
":",
"raise",
"ValueError",
"(",
"'Cannot add \"%r\": instance is on database \"%s\", '",
"'value is on database \"%s\"'",
"%",
"(",
"obj",
",",
"self",
".",
"instance",
".",
"_state",
".",
"db",
",",
"obj",
".",
"_state",
".",
"db",
")",
")",
"target_id",
"=",
"target_field",
".",
"get_foreign_related_value",
"(",
"obj",
")",
"[",
"0",
"]",
"if",
"target_id",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'Cannot add \"%r\": the value for field \"%s\" is None'",
"%",
"(",
"obj",
",",
"target_field_name",
")",
")",
"target_ids",
".",
"add",
"(",
"target_id",
")",
"elif",
"isinstance",
"(",
"obj",
",",
"Model",
")",
":",
"raise",
"TypeError",
"(",
"\"'%s' instance expected, got %r\"",
"%",
"(",
"self",
".",
"model",
".",
"_meta",
".",
"object_name",
",",
"obj",
")",
")",
"else",
":",
"target_ids",
".",
"add",
"(",
"target_field",
".",
"get_prep_value",
"(",
"obj",
")",
")",
"return",
"target_ids",
"def",
"_get_missing_target_ids",
"(",
"self",
",",
"source_field_name",
",",
"target_field_name",
",",
"db",
",",
"target_ids",
")",
":",
"\"\"\"\n Return the subset of ids of `objs` that aren't already assigned to\n this relationship.\n \"\"\"",
"vals",
"=",
"self",
".",
"through",
".",
"_default_manager",
".",
"using",
"(",
"db",
")",
".",
"values_list",
"(",
"target_field_name",
",",
"flat",
"=",
"True",
")",
".",
"filter",
"(",
"*",
"*",
"{",
"source_field_name",
":",
"self",
".",
"related_val",
"[",
"0",
"]",
",",
"'%s__in'",
"%",
"target_field_name",
":",
"target_ids",
",",
"}",
")",
"return",
"target_ids",
".",
"difference",
"(",
"vals",
")",
"def",
"_get_add_plan",
"(",
"self",
",",
"db",
",",
"source_field_name",
")",
":",
"\"\"\"\n Return a boolean triple of the way the add should be performed.\n\n The first element is whether or not bulk_create(ignore_conflicts)\n can be used, the second whether or not signals must be sent, and\n the third element is whether or not the immediate bulk insertion\n with conflicts ignored can be performed.\n \"\"\"",
"# Conflicts can be ignored when the intermediary model is",
"# auto-created as the only possible collision is on the",
"# (source_id, target_id) tuple. The same assertion doesn't hold for",
"# user-defined intermediary models as they could have other fields",
"# causing conflicts which must be surfaced.",
"can_ignore_conflicts",
"=",
"(",
"connections",
"[",
"db",
"]",
".",
"features",
".",
"supports_ignore_conflicts",
"and",
"self",
".",
"through",
".",
"_meta",
".",
"auto_created",
"is",
"not",
"False",
")",
"# Don't send the signal when inserting duplicate data row",
"# for symmetrical reverse entries.",
"must_send_signals",
"=",
"(",
"self",
".",
"reverse",
"or",
"source_field_name",
"==",
"self",
".",
"source_field_name",
")",
"and",
"(",
"signals",
".",
"m2m_changed",
".",
"has_listeners",
"(",
"self",
".",
"through",
")",
")",
"# Fast addition through bulk insertion can only be performed",
"# if no m2m_changed listeners are connected for self.through",
"# as they require the added set of ids to be provided via",
"# pk_set.",
"return",
"can_ignore_conflicts",
",",
"must_send_signals",
",",
"(",
"can_ignore_conflicts",
"and",
"not",
"must_send_signals",
")",
"def",
"_add_items",
"(",
"self",
",",
"source_field_name",
",",
"target_field_name",
",",
"*",
"objs",
",",
"through_defaults",
"=",
"None",
")",
":",
"# source_field_name: the PK fieldname in join table for the source object",
"# target_field_name: the PK fieldname in join table for the target object",
"# *objs - objects to add. Either object instances, or primary keys of object instances.",
"if",
"not",
"objs",
":",
"return",
"through_defaults",
"=",
"dict",
"(",
"resolve_callables",
"(",
"through_defaults",
"or",
"{",
"}",
")",
")",
"target_ids",
"=",
"self",
".",
"_get_target_ids",
"(",
"target_field_name",
",",
"objs",
")",
"db",
"=",
"router",
".",
"db_for_write",
"(",
"self",
".",
"through",
",",
"instance",
"=",
"self",
".",
"instance",
")",
"can_ignore_conflicts",
",",
"must_send_signals",
",",
"can_fast_add",
"=",
"self",
".",
"_get_add_plan",
"(",
"db",
",",
"source_field_name",
")",
"if",
"can_fast_add",
":",
"self",
".",
"through",
".",
"_default_manager",
".",
"using",
"(",
"db",
")",
".",
"bulk_create",
"(",
"[",
"self",
".",
"through",
"(",
"*",
"*",
"{",
"'%s_id'",
"%",
"source_field_name",
":",
"self",
".",
"related_val",
"[",
"0",
"]",
",",
"'%s_id'",
"%",
"target_field_name",
":",
"target_id",
",",
"}",
")",
"for",
"target_id",
"in",
"target_ids",
"]",
",",
"ignore_conflicts",
"=",
"True",
")",
"return",
"missing_target_ids",
"=",
"self",
".",
"_get_missing_target_ids",
"(",
"source_field_name",
",",
"target_field_name",
",",
"db",
",",
"target_ids",
")",
"with",
"transaction",
".",
"atomic",
"(",
"using",
"=",
"db",
",",
"savepoint",
"=",
"False",
")",
":",
"if",
"must_send_signals",
":",
"signals",
".",
"m2m_changed",
".",
"send",
"(",
"sender",
"=",
"self",
".",
"through",
",",
"action",
"=",
"'pre_add'",
",",
"instance",
"=",
"self",
".",
"instance",
",",
"reverse",
"=",
"self",
".",
"reverse",
",",
"model",
"=",
"self",
".",
"model",
",",
"pk_set",
"=",
"missing_target_ids",
",",
"using",
"=",
"db",
",",
")",
"# Add the ones that aren't there already.",
"self",
".",
"through",
".",
"_default_manager",
".",
"using",
"(",
"db",
")",
".",
"bulk_create",
"(",
"[",
"self",
".",
"through",
"(",
"*",
"*",
"through_defaults",
",",
"*",
"*",
"{",
"'%s_id'",
"%",
"source_field_name",
":",
"self",
".",
"related_val",
"[",
"0",
"]",
",",
"'%s_id'",
"%",
"target_field_name",
":",
"target_id",
",",
"}",
")",
"for",
"target_id",
"in",
"missing_target_ids",
"]",
",",
"ignore_conflicts",
"=",
"can_ignore_conflicts",
")",
"if",
"must_send_signals",
":",
"signals",
".",
"m2m_changed",
".",
"send",
"(",
"sender",
"=",
"self",
".",
"through",
",",
"action",
"=",
"'post_add'",
",",
"instance",
"=",
"self",
".",
"instance",
",",
"reverse",
"=",
"self",
".",
"reverse",
",",
"model",
"=",
"self",
".",
"model",
",",
"pk_set",
"=",
"missing_target_ids",
",",
"using",
"=",
"db",
",",
")",
"def",
"_remove_items",
"(",
"self",
",",
"source_field_name",
",",
"target_field_name",
",",
"*",
"objs",
")",
":",
"# source_field_name: the PK colname in join table for the source object",
"# target_field_name: the PK colname in join table for the target object",
"# *objs - objects to remove. Either object instances, or primary",
"# keys of object instances.",
"if",
"not",
"objs",
":",
"return",
"# Check that all the objects are of the right type",
"old_ids",
"=",
"set",
"(",
")",
"for",
"obj",
"in",
"objs",
":",
"if",
"isinstance",
"(",
"obj",
",",
"self",
".",
"model",
")",
":",
"fk_val",
"=",
"self",
".",
"target_field",
".",
"get_foreign_related_value",
"(",
"obj",
")",
"[",
"0",
"]",
"old_ids",
".",
"add",
"(",
"fk_val",
")",
"else",
":",
"old_ids",
".",
"add",
"(",
"obj",
")",
"db",
"=",
"router",
".",
"db_for_write",
"(",
"self",
".",
"through",
",",
"instance",
"=",
"self",
".",
"instance",
")",
"with",
"transaction",
".",
"atomic",
"(",
"using",
"=",
"db",
",",
"savepoint",
"=",
"False",
")",
":",
"# Send a signal to the other end if need be.",
"signals",
".",
"m2m_changed",
".",
"send",
"(",
"sender",
"=",
"self",
".",
"through",
",",
"action",
"=",
"\"pre_remove\"",
",",
"instance",
"=",
"self",
".",
"instance",
",",
"reverse",
"=",
"self",
".",
"reverse",
",",
"model",
"=",
"self",
".",
"model",
",",
"pk_set",
"=",
"old_ids",
",",
"using",
"=",
"db",
",",
")",
"target_model_qs",
"=",
"super",
"(",
")",
".",
"get_queryset",
"(",
")",
"if",
"target_model_qs",
".",
"_has_filters",
"(",
")",
":",
"old_vals",
"=",
"target_model_qs",
".",
"using",
"(",
"db",
")",
".",
"filter",
"(",
"*",
"*",
"{",
"'%s__in'",
"%",
"self",
".",
"target_field",
".",
"target_field",
".",
"attname",
":",
"old_ids",
"}",
")",
"else",
":",
"old_vals",
"=",
"old_ids",
"filters",
"=",
"self",
".",
"_build_remove_filters",
"(",
"old_vals",
")",
"self",
".",
"through",
".",
"_default_manager",
".",
"using",
"(",
"db",
")",
".",
"filter",
"(",
"filters",
")",
".",
"delete",
"(",
")",
"signals",
".",
"m2m_changed",
".",
"send",
"(",
"sender",
"=",
"self",
".",
"through",
",",
"action",
"=",
"\"post_remove\"",
",",
"instance",
"=",
"self",
".",
"instance",
",",
"reverse",
"=",
"self",
".",
"reverse",
",",
"model",
"=",
"self",
".",
"model",
",",
"pk_set",
"=",
"old_ids",
",",
"using",
"=",
"db",
",",
")",
"return",
"ManyRelatedManager"
] | [
806,
0
] | [
1204,
29
] | python | en | ['en', 'error', 'th'] | False |
ForwardManyToOneDescriptor.__get__ | (self, instance, cls=None) |
Get the related instance through the forward relation.
With the example above, when getting ``child.parent``:
- ``self`` is the descriptor managing the ``parent`` attribute
- ``instance`` is the ``child`` instance
- ``cls`` is the ``Child`` class (we don't need it)
|
Get the related instance through the forward relation. | def __get__(self, instance, cls=None):
"""
Get the related instance through the forward relation.
With the example above, when getting ``child.parent``:
- ``self`` is the descriptor managing the ``parent`` attribute
- ``instance`` is the ``child`` instance
- ``cls`` is the ``Child`` class (we don't need it)
"""
if instance is None:
return self
# The related instance is loaded from the database and then cached
# by the field on the model instance state. It can also be pre-cached
# by the reverse accessor (ReverseOneToOneDescriptor).
try:
rel_obj = self.field.get_cached_value(instance)
except KeyError:
has_value = None not in self.field.get_local_related_value(instance)
ancestor_link = instance._meta.get_ancestor_link(self.field.model) if has_value else None
if ancestor_link and ancestor_link.is_cached(instance):
# An ancestor link will exist if this field is defined on a
# multi-table inheritance parent of the instance's class.
ancestor = ancestor_link.get_cached_value(instance)
# The value might be cached on an ancestor if the instance
# originated from walking down the inheritance chain.
rel_obj = self.field.get_cached_value(ancestor, default=None)
else:
rel_obj = None
if rel_obj is None and has_value:
rel_obj = self.get_object(instance)
remote_field = self.field.remote_field
# If this is a one-to-one relation, set the reverse accessor
# cache on the related object to the current instance to avoid
# an extra SQL query if it's accessed later on.
if not remote_field.multiple:
remote_field.set_cached_value(rel_obj, instance)
self.field.set_cached_value(instance, rel_obj)
if rel_obj is None and not self.field.null:
raise self.RelatedObjectDoesNotExist(
"%s has no %s." % (self.field.model.__name__, self.field.name)
)
else:
return rel_obj | [
"def",
"__get__",
"(",
"self",
",",
"instance",
",",
"cls",
"=",
"None",
")",
":",
"if",
"instance",
"is",
"None",
":",
"return",
"self",
"# The related instance is loaded from the database and then cached",
"# by the field on the model instance state. It can also be pre-cached",
"# by the reverse accessor (ReverseOneToOneDescriptor).",
"try",
":",
"rel_obj",
"=",
"self",
".",
"field",
".",
"get_cached_value",
"(",
"instance",
")",
"except",
"KeyError",
":",
"has_value",
"=",
"None",
"not",
"in",
"self",
".",
"field",
".",
"get_local_related_value",
"(",
"instance",
")",
"ancestor_link",
"=",
"instance",
".",
"_meta",
".",
"get_ancestor_link",
"(",
"self",
".",
"field",
".",
"model",
")",
"if",
"has_value",
"else",
"None",
"if",
"ancestor_link",
"and",
"ancestor_link",
".",
"is_cached",
"(",
"instance",
")",
":",
"# An ancestor link will exist if this field is defined on a",
"# multi-table inheritance parent of the instance's class.",
"ancestor",
"=",
"ancestor_link",
".",
"get_cached_value",
"(",
"instance",
")",
"# The value might be cached on an ancestor if the instance",
"# originated from walking down the inheritance chain.",
"rel_obj",
"=",
"self",
".",
"field",
".",
"get_cached_value",
"(",
"ancestor",
",",
"default",
"=",
"None",
")",
"else",
":",
"rel_obj",
"=",
"None",
"if",
"rel_obj",
"is",
"None",
"and",
"has_value",
":",
"rel_obj",
"=",
"self",
".",
"get_object",
"(",
"instance",
")",
"remote_field",
"=",
"self",
".",
"field",
".",
"remote_field",
"# If this is a one-to-one relation, set the reverse accessor",
"# cache on the related object to the current instance to avoid",
"# an extra SQL query if it's accessed later on.",
"if",
"not",
"remote_field",
".",
"multiple",
":",
"remote_field",
".",
"set_cached_value",
"(",
"rel_obj",
",",
"instance",
")",
"self",
".",
"field",
".",
"set_cached_value",
"(",
"instance",
",",
"rel_obj",
")",
"if",
"rel_obj",
"is",
"None",
"and",
"not",
"self",
".",
"field",
".",
"null",
":",
"raise",
"self",
".",
"RelatedObjectDoesNotExist",
"(",
"\"%s has no %s.\"",
"%",
"(",
"self",
".",
"field",
".",
"model",
".",
"__name__",
",",
"self",
".",
"field",
".",
"name",
")",
")",
"else",
":",
"return",
"rel_obj"
] | [
155,
4
] | [
200,
26
] | python | en | ['en', 'error', 'th'] | False |
ForwardManyToOneDescriptor.__set__ | (self, instance, value) |
Set the related instance through the forward relation.
With the example above, when setting ``child.parent = parent``:
- ``self`` is the descriptor managing the ``parent`` attribute
- ``instance`` is the ``child`` instance
- ``value`` is the ``parent`` instance on the right of the equal sign
|
Set the related instance through the forward relation. | def __set__(self, instance, value):
"""
Set the related instance through the forward relation.
With the example above, when setting ``child.parent = parent``:
- ``self`` is the descriptor managing the ``parent`` attribute
- ``instance`` is the ``child`` instance
- ``value`` is the ``parent`` instance on the right of the equal sign
"""
# An object must be an instance of the related class.
if value is not None and not isinstance(value, self.field.remote_field.model._meta.concrete_model):
raise ValueError(
'Cannot assign "%r": "%s.%s" must be a "%s" instance.' % (
value,
instance._meta.object_name,
self.field.name,
self.field.remote_field.model._meta.object_name,
)
)
elif value is not None:
if instance._state.db is None:
instance._state.db = router.db_for_write(instance.__class__, instance=value)
if value._state.db is None:
value._state.db = router.db_for_write(value.__class__, instance=instance)
if not router.allow_relation(value, instance):
raise ValueError('Cannot assign "%r": the current database router prevents this relation.' % value)
remote_field = self.field.remote_field
# If we're setting the value of a OneToOneField to None, we need to clear
# out the cache on any old related object. Otherwise, deleting the
# previously-related object will also cause this object to be deleted,
# which is wrong.
if value is None:
# Look up the previously-related object, which may still be available
# since we've not yet cleared out the related field.
# Use the cache directly, instead of the accessor; if we haven't
# populated the cache, then we don't care - we're only accessing
# the object to invalidate the accessor cache, so there's no
# need to populate the cache just to expire it again.
related = self.field.get_cached_value(instance, default=None)
# If we've got an old related object, we need to clear out its
# cache. This cache also might not exist if the related object
# hasn't been accessed yet.
if related is not None:
remote_field.set_cached_value(related, None)
for lh_field, rh_field in self.field.related_fields:
setattr(instance, lh_field.attname, None)
# Set the values of the related field.
else:
for lh_field, rh_field in self.field.related_fields:
setattr(instance, lh_field.attname, getattr(value, rh_field.attname))
# Set the related instance cache used by __get__ to avoid an SQL query
# when accessing the attribute we just set.
self.field.set_cached_value(instance, value)
# If this is a one-to-one relation, set the reverse accessor cache on
# the related object to the current instance to avoid an extra SQL
# query if it's accessed later on.
if value is not None and not remote_field.multiple:
remote_field.set_cached_value(value, instance) | [
"def",
"__set__",
"(",
"self",
",",
"instance",
",",
"value",
")",
":",
"# An object must be an instance of the related class.",
"if",
"value",
"is",
"not",
"None",
"and",
"not",
"isinstance",
"(",
"value",
",",
"self",
".",
"field",
".",
"remote_field",
".",
"model",
".",
"_meta",
".",
"concrete_model",
")",
":",
"raise",
"ValueError",
"(",
"'Cannot assign \"%r\": \"%s.%s\" must be a \"%s\" instance.'",
"%",
"(",
"value",
",",
"instance",
".",
"_meta",
".",
"object_name",
",",
"self",
".",
"field",
".",
"name",
",",
"self",
".",
"field",
".",
"remote_field",
".",
"model",
".",
"_meta",
".",
"object_name",
",",
")",
")",
"elif",
"value",
"is",
"not",
"None",
":",
"if",
"instance",
".",
"_state",
".",
"db",
"is",
"None",
":",
"instance",
".",
"_state",
".",
"db",
"=",
"router",
".",
"db_for_write",
"(",
"instance",
".",
"__class__",
",",
"instance",
"=",
"value",
")",
"if",
"value",
".",
"_state",
".",
"db",
"is",
"None",
":",
"value",
".",
"_state",
".",
"db",
"=",
"router",
".",
"db_for_write",
"(",
"value",
".",
"__class__",
",",
"instance",
"=",
"instance",
")",
"if",
"not",
"router",
".",
"allow_relation",
"(",
"value",
",",
"instance",
")",
":",
"raise",
"ValueError",
"(",
"'Cannot assign \"%r\": the current database router prevents this relation.'",
"%",
"value",
")",
"remote_field",
"=",
"self",
".",
"field",
".",
"remote_field",
"# If we're setting the value of a OneToOneField to None, we need to clear",
"# out the cache on any old related object. Otherwise, deleting the",
"# previously-related object will also cause this object to be deleted,",
"# which is wrong.",
"if",
"value",
"is",
"None",
":",
"# Look up the previously-related object, which may still be available",
"# since we've not yet cleared out the related field.",
"# Use the cache directly, instead of the accessor; if we haven't",
"# populated the cache, then we don't care - we're only accessing",
"# the object to invalidate the accessor cache, so there's no",
"# need to populate the cache just to expire it again.",
"related",
"=",
"self",
".",
"field",
".",
"get_cached_value",
"(",
"instance",
",",
"default",
"=",
"None",
")",
"# If we've got an old related object, we need to clear out its",
"# cache. This cache also might not exist if the related object",
"# hasn't been accessed yet.",
"if",
"related",
"is",
"not",
"None",
":",
"remote_field",
".",
"set_cached_value",
"(",
"related",
",",
"None",
")",
"for",
"lh_field",
",",
"rh_field",
"in",
"self",
".",
"field",
".",
"related_fields",
":",
"setattr",
"(",
"instance",
",",
"lh_field",
".",
"attname",
",",
"None",
")",
"# Set the values of the related field.",
"else",
":",
"for",
"lh_field",
",",
"rh_field",
"in",
"self",
".",
"field",
".",
"related_fields",
":",
"setattr",
"(",
"instance",
",",
"lh_field",
".",
"attname",
",",
"getattr",
"(",
"value",
",",
"rh_field",
".",
"attname",
")",
")",
"# Set the related instance cache used by __get__ to avoid an SQL query",
"# when accessing the attribute we just set.",
"self",
".",
"field",
".",
"set_cached_value",
"(",
"instance",
",",
"value",
")",
"# If this is a one-to-one relation, set the reverse accessor cache on",
"# the related object to the current instance to avoid an extra SQL",
"# query if it's accessed later on.",
"if",
"value",
"is",
"not",
"None",
"and",
"not",
"remote_field",
".",
"multiple",
":",
"remote_field",
".",
"set_cached_value",
"(",
"value",
",",
"instance",
")"
] | [
202,
4
] | [
266,
58
] | python | en | ['en', 'error', 'th'] | False |
ForwardManyToOneDescriptor.__reduce__ | (self) |
Pickling should return the instance attached by self.field on the
model, not a new copy of that descriptor. Use getattr() to retrieve
the instance directly from the model.
|
Pickling should return the instance attached by self.field on the
model, not a new copy of that descriptor. Use getattr() to retrieve
the instance directly from the model.
| def __reduce__(self):
"""
Pickling should return the instance attached by self.field on the
model, not a new copy of that descriptor. Use getattr() to retrieve
the instance directly from the model.
"""
return getattr, (self.field.model, self.field.name) | [
"def",
"__reduce__",
"(",
"self",
")",
":",
"return",
"getattr",
",",
"(",
"self",
".",
"field",
".",
"model",
",",
"self",
".",
"field",
".",
"name",
")"
] | [
268,
4
] | [
274,
59
] | python | en | ['en', 'error', 'th'] | False |
ReverseOneToOneDescriptor.__get__ | (self, instance, cls=None) |
Get the related instance through the reverse relation.
With the example above, when getting ``place.restaurant``:
- ``self`` is the descriptor managing the ``restaurant`` attribute
- ``instance`` is the ``place`` instance
- ``cls`` is the ``Place`` class (unused)
Keep in mind that ``Restaurant`` holds the foreign key to ``Place``.
|
Get the related instance through the reverse relation. | def __get__(self, instance, cls=None):
"""
Get the related instance through the reverse relation.
With the example above, when getting ``place.restaurant``:
- ``self`` is the descriptor managing the ``restaurant`` attribute
- ``instance`` is the ``place`` instance
- ``cls`` is the ``Place`` class (unused)
Keep in mind that ``Restaurant`` holds the foreign key to ``Place``.
"""
if instance is None:
return self
# The related instance is loaded from the database and then cached
# by the field on the model instance state. It can also be pre-cached
# by the forward accessor (ForwardManyToOneDescriptor).
try:
rel_obj = self.related.get_cached_value(instance)
except KeyError:
related_pk = instance.pk
if related_pk is None:
rel_obj = None
else:
filter_args = self.related.field.get_forward_related_filter(instance)
try:
rel_obj = self.get_queryset(instance=instance).get(**filter_args)
except self.related.related_model.DoesNotExist:
rel_obj = None
else:
# Set the forward accessor cache on the related object to
# the current instance to avoid an extra SQL query if it's
# accessed later on.
self.related.field.set_cached_value(rel_obj, instance)
self.related.set_cached_value(instance, rel_obj)
if rel_obj is None:
raise self.RelatedObjectDoesNotExist(
"%s has no %s." % (
instance.__class__.__name__,
self.related.get_accessor_name()
)
)
else:
return rel_obj | [
"def",
"__get__",
"(",
"self",
",",
"instance",
",",
"cls",
"=",
"None",
")",
":",
"if",
"instance",
"is",
"None",
":",
"return",
"self",
"# The related instance is loaded from the database and then cached",
"# by the field on the model instance state. It can also be pre-cached",
"# by the forward accessor (ForwardManyToOneDescriptor).",
"try",
":",
"rel_obj",
"=",
"self",
".",
"related",
".",
"get_cached_value",
"(",
"instance",
")",
"except",
"KeyError",
":",
"related_pk",
"=",
"instance",
".",
"pk",
"if",
"related_pk",
"is",
"None",
":",
"rel_obj",
"=",
"None",
"else",
":",
"filter_args",
"=",
"self",
".",
"related",
".",
"field",
".",
"get_forward_related_filter",
"(",
"instance",
")",
"try",
":",
"rel_obj",
"=",
"self",
".",
"get_queryset",
"(",
"instance",
"=",
"instance",
")",
".",
"get",
"(",
"*",
"*",
"filter_args",
")",
"except",
"self",
".",
"related",
".",
"related_model",
".",
"DoesNotExist",
":",
"rel_obj",
"=",
"None",
"else",
":",
"# Set the forward accessor cache on the related object to",
"# the current instance to avoid an extra SQL query if it's",
"# accessed later on.",
"self",
".",
"related",
".",
"field",
".",
"set_cached_value",
"(",
"rel_obj",
",",
"instance",
")",
"self",
".",
"related",
".",
"set_cached_value",
"(",
"instance",
",",
"rel_obj",
")",
"if",
"rel_obj",
"is",
"None",
":",
"raise",
"self",
".",
"RelatedObjectDoesNotExist",
"(",
"\"%s has no %s.\"",
"%",
"(",
"instance",
".",
"__class__",
".",
"__name__",
",",
"self",
".",
"related",
".",
"get_accessor_name",
"(",
")",
")",
")",
"else",
":",
"return",
"rel_obj"
] | [
382,
4
] | [
427,
26
] | python | en | ['en', 'error', 'th'] | False |
ReverseOneToOneDescriptor.__set__ | (self, instance, value) |
Set the related instance through the reverse relation.
With the example above, when setting ``place.restaurant = restaurant``:
- ``self`` is the descriptor managing the ``restaurant`` attribute
- ``instance`` is the ``place`` instance
- ``value`` is the ``restaurant`` instance on the right of the equal sign
Keep in mind that ``Restaurant`` holds the foreign key to ``Place``.
|
Set the related instance through the reverse relation. | def __set__(self, instance, value):
"""
Set the related instance through the reverse relation.
With the example above, when setting ``place.restaurant = restaurant``:
- ``self`` is the descriptor managing the ``restaurant`` attribute
- ``instance`` is the ``place`` instance
- ``value`` is the ``restaurant`` instance on the right of the equal sign
Keep in mind that ``Restaurant`` holds the foreign key to ``Place``.
"""
# The similarity of the code below to the code in
# ForwardManyToOneDescriptor is annoying, but there's a bunch
# of small differences that would make a common base class convoluted.
if value is None:
# Update the cached related instance (if any) & clear the cache.
# Following the example above, this would be the cached
# ``restaurant`` instance (if any).
rel_obj = self.related.get_cached_value(instance, default=None)
if rel_obj is not None:
# Remove the ``restaurant`` instance from the ``place``
# instance cache.
self.related.delete_cached_value(instance)
# Set the ``place`` field on the ``restaurant``
# instance to None.
setattr(rel_obj, self.related.field.name, None)
elif not isinstance(value, self.related.related_model):
# An object must be an instance of the related class.
raise ValueError(
'Cannot assign "%r": "%s.%s" must be a "%s" instance.' % (
value,
instance._meta.object_name,
self.related.get_accessor_name(),
self.related.related_model._meta.object_name,
)
)
else:
if instance._state.db is None:
instance._state.db = router.db_for_write(instance.__class__, instance=value)
if value._state.db is None:
value._state.db = router.db_for_write(value.__class__, instance=instance)
if not router.allow_relation(value, instance):
raise ValueError('Cannot assign "%r": the current database router prevents this relation.' % value)
related_pk = tuple(getattr(instance, field.attname) for field in self.related.field.foreign_related_fields)
# Set the value of the related field to the value of the related object's related field
for index, field in enumerate(self.related.field.local_related_fields):
setattr(value, field.attname, related_pk[index])
# Set the related instance cache used by __get__ to avoid an SQL query
# when accessing the attribute we just set.
self.related.set_cached_value(instance, value)
# Set the forward accessor cache on the related object to the current
# instance to avoid an extra SQL query if it's accessed later on.
self.related.field.set_cached_value(value, instance) | [
"def",
"__set__",
"(",
"self",
",",
"instance",
",",
"value",
")",
":",
"# The similarity of the code below to the code in",
"# ForwardManyToOneDescriptor is annoying, but there's a bunch",
"# of small differences that would make a common base class convoluted.",
"if",
"value",
"is",
"None",
":",
"# Update the cached related instance (if any) & clear the cache.",
"# Following the example above, this would be the cached",
"# ``restaurant`` instance (if any).",
"rel_obj",
"=",
"self",
".",
"related",
".",
"get_cached_value",
"(",
"instance",
",",
"default",
"=",
"None",
")",
"if",
"rel_obj",
"is",
"not",
"None",
":",
"# Remove the ``restaurant`` instance from the ``place``",
"# instance cache.",
"self",
".",
"related",
".",
"delete_cached_value",
"(",
"instance",
")",
"# Set the ``place`` field on the ``restaurant``",
"# instance to None.",
"setattr",
"(",
"rel_obj",
",",
"self",
".",
"related",
".",
"field",
".",
"name",
",",
"None",
")",
"elif",
"not",
"isinstance",
"(",
"value",
",",
"self",
".",
"related",
".",
"related_model",
")",
":",
"# An object must be an instance of the related class.",
"raise",
"ValueError",
"(",
"'Cannot assign \"%r\": \"%s.%s\" must be a \"%s\" instance.'",
"%",
"(",
"value",
",",
"instance",
".",
"_meta",
".",
"object_name",
",",
"self",
".",
"related",
".",
"get_accessor_name",
"(",
")",
",",
"self",
".",
"related",
".",
"related_model",
".",
"_meta",
".",
"object_name",
",",
")",
")",
"else",
":",
"if",
"instance",
".",
"_state",
".",
"db",
"is",
"None",
":",
"instance",
".",
"_state",
".",
"db",
"=",
"router",
".",
"db_for_write",
"(",
"instance",
".",
"__class__",
",",
"instance",
"=",
"value",
")",
"if",
"value",
".",
"_state",
".",
"db",
"is",
"None",
":",
"value",
".",
"_state",
".",
"db",
"=",
"router",
".",
"db_for_write",
"(",
"value",
".",
"__class__",
",",
"instance",
"=",
"instance",
")",
"if",
"not",
"router",
".",
"allow_relation",
"(",
"value",
",",
"instance",
")",
":",
"raise",
"ValueError",
"(",
"'Cannot assign \"%r\": the current database router prevents this relation.'",
"%",
"value",
")",
"related_pk",
"=",
"tuple",
"(",
"getattr",
"(",
"instance",
",",
"field",
".",
"attname",
")",
"for",
"field",
"in",
"self",
".",
"related",
".",
"field",
".",
"foreign_related_fields",
")",
"# Set the value of the related field to the value of the related object's related field",
"for",
"index",
",",
"field",
"in",
"enumerate",
"(",
"self",
".",
"related",
".",
"field",
".",
"local_related_fields",
")",
":",
"setattr",
"(",
"value",
",",
"field",
".",
"attname",
",",
"related_pk",
"[",
"index",
"]",
")",
"# Set the related instance cache used by __get__ to avoid an SQL query",
"# when accessing the attribute we just set.",
"self",
".",
"related",
".",
"set_cached_value",
"(",
"instance",
",",
"value",
")",
"# Set the forward accessor cache on the related object to the current",
"# instance to avoid an extra SQL query if it's accessed later on.",
"self",
".",
"related",
".",
"field",
".",
"set_cached_value",
"(",
"value",
",",
"instance",
")"
] | [
429,
4
] | [
486,
64
] | python | en | ['en', 'error', 'th'] | False |
ReverseManyToOneDescriptor.__get__ | (self, instance, cls=None) |
Get the related objects through the reverse relation.
With the example above, when getting ``parent.children``:
- ``self`` is the descriptor managing the ``children`` attribute
- ``instance`` is the ``parent`` instance
- ``cls`` is the ``Parent`` class (unused)
|
Get the related objects through the reverse relation. | def __get__(self, instance, cls=None):
"""
Get the related objects through the reverse relation.
With the example above, when getting ``parent.children``:
- ``self`` is the descriptor managing the ``children`` attribute
- ``instance`` is the ``parent`` instance
- ``cls`` is the ``Parent`` class (unused)
"""
if instance is None:
return self
return self.related_manager_cls(instance) | [
"def",
"__get__",
"(",
"self",
",",
"instance",
",",
"cls",
"=",
"None",
")",
":",
"if",
"instance",
"is",
"None",
":",
"return",
"self",
"return",
"self",
".",
"related_manager_cls",
"(",
"instance",
")"
] | [
522,
4
] | [
535,
49
] | python | en | ['en', 'error', 'th'] | False |
is_appengine_sandbox | () | Reports if the app is running in the first generation sandbox.
The second generation runtimes are technically still in a sandbox, but it
is much less restrictive, so generally you shouldn't need to check for it.
see https://cloud.google.com/appengine/docs/standard/runtimes
| Reports if the app is running in the first generation sandbox. | def is_appengine_sandbox():
"""Reports if the app is running in the first generation sandbox.
The second generation runtimes are technically still in a sandbox, but it
is much less restrictive, so generally you shouldn't need to check for it.
see https://cloud.google.com/appengine/docs/standard/runtimes
"""
return is_appengine() and os.environ["APPENGINE_RUNTIME"] == "python27" | [
"def",
"is_appengine_sandbox",
"(",
")",
":",
"return",
"is_appengine",
"(",
")",
"and",
"os",
".",
"environ",
"[",
"\"APPENGINE_RUNTIME\"",
"]",
"==",
"\"python27\""
] | [
11,
0
] | [
18,
75
] | python | en | ['en', 'en', 'en'] | True |
is_prod_appengine_mvms | () | Deprecated. | Deprecated. | def is_prod_appengine_mvms():
"""Deprecated."""
return False | [
"def",
"is_prod_appengine_mvms",
"(",
")",
":",
"return",
"False"
] | [
33,
0
] | [
35,
16
] | python | en | ['en', 'la', 'it'] | False |
default_credentials | () | Returns Application Default Credentials. | Returns Application Default Credentials. | def default_credentials():
"""Returns Application Default Credentials."""
if HAS_GOOGLE_AUTH:
credentials, _ = google.auth.default()
return credentials
elif HAS_OAUTH2CLIENT:
return oauth2client.client.GoogleCredentials.get_application_default()
else:
raise EnvironmentError(
'No authentication library is available. Please install either '
'google-auth or oauth2client.') | [
"def",
"default_credentials",
"(",
")",
":",
"if",
"HAS_GOOGLE_AUTH",
":",
"credentials",
",",
"_",
"=",
"google",
".",
"auth",
".",
"default",
"(",
")",
"return",
"credentials",
"elif",
"HAS_OAUTH2CLIENT",
":",
"return",
"oauth2client",
".",
"client",
".",
"GoogleCredentials",
".",
"get_application_default",
"(",
")",
"else",
":",
"raise",
"EnvironmentError",
"(",
"'No authentication library is available. Please install either '",
"'google-auth or oauth2client.'",
")"
] | [
34,
0
] | [
44,
43
] | python | en | ['en', 'ca', 'en'] | True |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.