nwo
stringlengths
5
106
sha
stringlengths
40
40
path
stringlengths
4
174
language
stringclasses
1 value
identifier
stringlengths
1
140
parameters
stringlengths
0
87.7k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
426k
docstring
stringlengths
0
64.3k
docstring_summary
stringlengths
0
26.3k
docstring_tokens
list
function
stringlengths
18
4.83M
function_tokens
list
url
stringlengths
83
304
FederatedAI/FATE
32540492623568ecd1afcb367360133616e02fa3
python/federatedml/secureprotol/spdz/utils/random_utils.py
python
_MixRand._inc
(self)
[]
def _inc(self): self._caches.append(random.SystemRandom().randint(self._lower, self._upper))
[ "def", "_inc", "(", "self", ")", ":", "self", ".", "_caches", ".", "append", "(", "random", ".", "SystemRandom", "(", ")", ".", "randint", "(", "self", ".", "_lower", ",", "self", ".", "_upper", ")", ")" ]
https://github.com/FederatedAI/FATE/blob/32540492623568ecd1afcb367360133616e02fa3/python/federatedml/secureprotol/spdz/utils/random_utils.py#L54-L55
openatx/facebook-wda
0b01d58cfc4a1971896fc116885e27baf9ffcaf1
wda/__init__.py
python
Alert.click_exists
(self, buttons: Optional[Union[str, list]] = None)
Args: - buttons: the name of the button of list of names Returns: button_name clicked or None
Args: - buttons: the name of the button of list of names
[ "Args", ":", "-", "buttons", ":", "the", "name", "of", "the", "button", "of", "list", "of", "names" ]
def click_exists(self, buttons: Optional[Union[str, list]] = None): """ Args: - buttons: the name of the button of list of names Returns: button_name clicked or None """ try: return self.click(buttons) except (ValueError, WDARequestError): return None
[ "def", "click_exists", "(", "self", ",", "buttons", ":", "Optional", "[", "Union", "[", "str", ",", "list", "]", "]", "=", "None", ")", ":", "try", ":", "return", "self", ".", "click", "(", "buttons", ")", "except", "(", "ValueError", ",", "WDARequestError", ")", ":", "return", "None" ]
https://github.com/openatx/facebook-wda/blob/0b01d58cfc4a1971896fc116885e27baf9ffcaf1/wda/__init__.py#L1211-L1222
mautrix/telegram
9f48eca5a6654bc38012cb761ecaaaf416aabdd0
mautrix_telegram/commands/telegram/misc.py
python
sync
(evt: CommandEvent)
return await evt.reply("Synchronization complete.")
[]
async def sync(evt: CommandEvent) -> EventID: if len(evt.args) > 0: sync_only = evt.args[0] if sync_only not in ("chats", "contacts", "me"): return await evt.reply("**Usage:** `$cmdprefix+sp sync [chats|contacts|me]`") else: sync_only = None if not sync_only or sync_only == "chats": await evt.reply("Synchronizing chats...") await evt.sender.sync_dialogs() if not sync_only or sync_only == "contacts": await evt.reply("Synchronizing contacts...") await evt.sender.sync_contacts() if not sync_only or sync_only == "me": await evt.sender.update_info() return await evt.reply("Synchronization complete.")
[ "async", "def", "sync", "(", "evt", ":", "CommandEvent", ")", "->", "EventID", ":", "if", "len", "(", "evt", ".", "args", ")", ">", "0", ":", "sync_only", "=", "evt", ".", "args", "[", "0", "]", "if", "sync_only", "not", "in", "(", "\"chats\"", ",", "\"contacts\"", ",", "\"me\"", ")", ":", "return", "await", "evt", ".", "reply", "(", "\"**Usage:** `$cmdprefix+sp sync [chats|contacts|me]`\"", ")", "else", ":", "sync_only", "=", "None", "if", "not", "sync_only", "or", "sync_only", "==", "\"chats\"", ":", "await", "evt", ".", "reply", "(", "\"Synchronizing chats...\"", ")", "await", "evt", ".", "sender", ".", "sync_dialogs", "(", ")", "if", "not", "sync_only", "or", "sync_only", "==", "\"contacts\"", ":", "await", "evt", ".", "reply", "(", "\"Synchronizing contacts...\"", ")", "await", "evt", ".", "sender", ".", "sync_contacts", "(", ")", "if", "not", "sync_only", "or", "sync_only", "==", "\"me\"", ":", "await", "evt", ".", "sender", ".", "update_info", "(", ")", "return", "await", "evt", ".", "reply", "(", "\"Synchronization complete.\"", ")" ]
https://github.com/mautrix/telegram/blob/9f48eca5a6654bc38012cb761ecaaaf416aabdd0/mautrix_telegram/commands/telegram/misc.py#L239-L255
andresriancho/w3af
cd22e5252243a87aaa6d0ddea47cf58dacfe00a9
w3af/plugins/attack/db/sqlmap/plugins/dbms/sqlite/syntax.py
python
Syntax.__init__
(self)
[]
def __init__(self): GenericSyntax.__init__(self)
[ "def", "__init__", "(", "self", ")", ":", "GenericSyntax", ".", "__init__", "(", "self", ")" ]
https://github.com/andresriancho/w3af/blob/cd22e5252243a87aaa6d0ddea47cf58dacfe00a9/w3af/plugins/attack/db/sqlmap/plugins/dbms/sqlite/syntax.py#L15-L16
MichaelGrupp/evo
c65af3b69188aaadbbd7b5f99ac7973d74343d65
evo/core/trajectory.py
python
PoseTrajectory3D.get_infos
(self)
return infos
:return: dictionary with some infos about the trajectory
:return: dictionary with some infos about the trajectory
[ ":", "return", ":", "dictionary", "with", "some", "infos", "about", "the", "trajectory" ]
def get_infos(self) -> dict: """ :return: dictionary with some infos about the trajectory """ infos = super(PoseTrajectory3D, self).get_infos() infos["duration (s)"] = self.timestamps[-1] - self.timestamps[0] infos["t_start (s)"] = self.timestamps[0] infos["t_end (s)"] = self.timestamps[-1] return infos
[ "def", "get_infos", "(", "self", ")", "->", "dict", ":", "infos", "=", "super", "(", "PoseTrajectory3D", ",", "self", ")", ".", "get_infos", "(", ")", "infos", "[", "\"duration (s)\"", "]", "=", "self", ".", "timestamps", "[", "-", "1", "]", "-", "self", ".", "timestamps", "[", "0", "]", "infos", "[", "\"t_start (s)\"", "]", "=", "self", ".", "timestamps", "[", "0", "]", "infos", "[", "\"t_end (s)\"", "]", "=", "self", ".", "timestamps", "[", "-", "1", "]", "return", "infos" ]
https://github.com/MichaelGrupp/evo/blob/c65af3b69188aaadbbd7b5f99ac7973d74343d65/evo/core/trajectory.py#L382-L390
redhat-performance/tuned
77f534993633e8e1c236ce9b05094aa3947a81ed
tuned-gui.py
python
Base.data_for_listbox_summary_of_active_profile
(self)
This add rows to object listbox_summary_of_active_profile. Row consist of grid. Inside grid on first possition is label, second possition is vertical grid. label = name of plugin verical grid consist of labels where are stored values for plugin option and value. This method is emited after change profile and on startup of app.
This add rows to object listbox_summary_of_active_profile. Row consist of grid. Inside grid on first possition is label, second possition is vertical grid. label = name of plugin verical grid consist of labels where are stored values for plugin option and value.
[ "This", "add", "rows", "to", "object", "listbox_summary_of_active_profile", ".", "Row", "consist", "of", "grid", ".", "Inside", "grid", "on", "first", "possition", "is", "label", "second", "possition", "is", "vertical", "grid", ".", "label", "=", "name", "of", "plugin", "verical", "grid", "consist", "of", "labels", "where", "are", "stored", "values", "for", "plugin", "option", "and", "value", "." ]
def data_for_listbox_summary_of_active_profile(self): """ This add rows to object listbox_summary_of_active_profile. Row consist of grid. Inside grid on first possition is label, second possition is vertical grid. label = name of plugin verical grid consist of labels where are stored values for plugin option and value. This method is emited after change profile and on startup of app. """ for row in self._gobj('listboxSummaryOfActiveProfile'): self._gobj('listboxSummaryOfActiveProfile').remove(row) if self.is_tuned_connection_ok(): self.active_profile = \ self.manager.get_profile(self.controller.active_profile()) else: self.active_profile = None self._gobj('summaryProfileName').set_text(self.active_profile.name) try: self._gobj('summaryIncludedProfileName').set_text(self.active_profile.options['include' ]) except: # keyerror probably self._gobj('summaryIncludedProfileName').set_text('None') row = Gtk.ListBoxRow() box = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=0) plugin_name = Gtk.Label() plugin_name.set_markup('<b>Plugin Name</b>') plugin_option = Gtk.Label() plugin_option.set_markup('<b>Plugin Options</b>') box.pack_start(plugin_name, True, True, 0) box.pack_start(plugin_option, True, True, 0) row.add(box) self._gobj('listboxSummaryOfActiveProfile').add(row) sep = Gtk.Separator.new(Gtk.Orientation.HORIZONTAL) self._gobj('listboxSummaryOfActiveProfile').add(sep) sep.show() for u in self.active_profile.units: row = Gtk.ListBoxRow() hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=0) hbox.set_homogeneous(True) row.add(hbox) label = Gtk.Label() label.set_markup(u) label.set_justify(Gtk.Justification.LEFT) hbox.pack_start(label, False, True, 1) grid = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=0) grid.set_homogeneous(True) for o in self.active_profile.units[u].options: label_option = Gtk.Label() label_option.set_markup(o + ' = ' + '<b>' + self.active_profile.units[u].options[o] + '</b>') grid.pack_start(label_option, False, True, 0) hbox.pack_start(grid, False, True, 0) self._gobj('listboxSummaryOfActiveProfile').add(row) separator = Gtk.Separator.new(Gtk.Orientation.HORIZONTAL) self._gobj('listboxSummaryOfActiveProfile').add(separator) separator.show() self._gobj('listboxSummaryOfActiveProfile').show_all()
[ "def", "data_for_listbox_summary_of_active_profile", "(", "self", ")", ":", "for", "row", "in", "self", ".", "_gobj", "(", "'listboxSummaryOfActiveProfile'", ")", ":", "self", ".", "_gobj", "(", "'listboxSummaryOfActiveProfile'", ")", ".", "remove", "(", "row", ")", "if", "self", ".", "is_tuned_connection_ok", "(", ")", ":", "self", ".", "active_profile", "=", "self", ".", "manager", ".", "get_profile", "(", "self", ".", "controller", ".", "active_profile", "(", ")", ")", "else", ":", "self", ".", "active_profile", "=", "None", "self", ".", "_gobj", "(", "'summaryProfileName'", ")", ".", "set_text", "(", "self", ".", "active_profile", ".", "name", ")", "try", ":", "self", ".", "_gobj", "(", "'summaryIncludedProfileName'", ")", ".", "set_text", "(", "self", ".", "active_profile", ".", "options", "[", "'include'", "]", ")", "except", ":", "# keyerror probably", "self", ".", "_gobj", "(", "'summaryIncludedProfileName'", ")", ".", "set_text", "(", "'None'", ")", "row", "=", "Gtk", ".", "ListBoxRow", "(", ")", "box", "=", "Gtk", ".", "Box", "(", "orientation", "=", "Gtk", ".", "Orientation", ".", "HORIZONTAL", ",", "spacing", "=", "0", ")", "plugin_name", "=", "Gtk", ".", "Label", "(", ")", "plugin_name", ".", "set_markup", "(", "'<b>Plugin Name</b>'", ")", "plugin_option", "=", "Gtk", ".", "Label", "(", ")", "plugin_option", ".", "set_markup", "(", "'<b>Plugin Options</b>'", ")", "box", ".", "pack_start", "(", "plugin_name", ",", "True", ",", "True", ",", "0", ")", "box", ".", "pack_start", "(", "plugin_option", ",", "True", ",", "True", ",", "0", ")", "row", ".", "add", "(", "box", ")", "self", ".", "_gobj", "(", "'listboxSummaryOfActiveProfile'", ")", ".", "add", "(", "row", ")", "sep", "=", "Gtk", ".", "Separator", ".", "new", "(", "Gtk", ".", "Orientation", ".", "HORIZONTAL", ")", "self", ".", "_gobj", "(", "'listboxSummaryOfActiveProfile'", ")", ".", "add", "(", "sep", ")", "sep", ".", "show", "(", ")", "for", "u", "in", "self", ".", "active_profile", ".", "units", ":", "row", "=", "Gtk", ".", "ListBoxRow", "(", ")", "hbox", "=", "Gtk", ".", "Box", "(", "orientation", "=", "Gtk", ".", "Orientation", ".", "HORIZONTAL", ",", "spacing", "=", "0", ")", "hbox", ".", "set_homogeneous", "(", "True", ")", "row", ".", "add", "(", "hbox", ")", "label", "=", "Gtk", ".", "Label", "(", ")", "label", ".", "set_markup", "(", "u", ")", "label", ".", "set_justify", "(", "Gtk", ".", "Justification", ".", "LEFT", ")", "hbox", ".", "pack_start", "(", "label", ",", "False", ",", "True", ",", "1", ")", "grid", "=", "Gtk", ".", "Box", "(", "orientation", "=", "Gtk", ".", "Orientation", ".", "VERTICAL", ",", "spacing", "=", "0", ")", "grid", ".", "set_homogeneous", "(", "True", ")", "for", "o", "in", "self", ".", "active_profile", ".", "units", "[", "u", "]", ".", "options", ":", "label_option", "=", "Gtk", ".", "Label", "(", ")", "label_option", ".", "set_markup", "(", "o", "+", "' = '", "+", "'<b>'", "+", "self", ".", "active_profile", ".", "units", "[", "u", "]", ".", "options", "[", "o", "]", "+", "'</b>'", ")", "grid", ".", "pack_start", "(", "label_option", ",", "False", ",", "True", ",", "0", ")", "hbox", ".", "pack_start", "(", "grid", ",", "False", ",", "True", ",", "0", ")", "self", ".", "_gobj", "(", "'listboxSummaryOfActiveProfile'", ")", ".", "add", "(", "row", ")", "separator", "=", "Gtk", ".", "Separator", ".", "new", "(", "Gtk", ".", "Orientation", ".", "HORIZONTAL", ")", "self", ".", "_gobj", "(", "'listboxSummaryOfActiveProfile'", ")", ".", "add", "(", "separator", ")", "separator", ".", "show", "(", ")", "self", ".", "_gobj", "(", "'listboxSummaryOfActiveProfile'", ")", ".", "show_all", "(", ")" ]
https://github.com/redhat-performance/tuned/blob/77f534993633e8e1c236ce9b05094aa3947a81ed/tuned-gui.py#L234-L305
castorini/castor
fa2f59535c71a0fb4586afbe543b81ba812c8630
datasets/trecqa.py
python
TRECQA.iters
(cls, path, vectors_name, vectors_dir, batch_size=64, shuffle=True, device=0, pt_file=False, vectors=None, unk_init=torch.Tensor.zero_)
return BucketIterator.splits((train, validation, test), batch_size=batch_size, repeat=False, shuffle=shuffle, sort_within_batch=True, device=device)
:param path: directory containing train, test, dev files :param vectors_name: name of word vectors file :param vectors_dir: directory containing word vectors file :param batch_size: batch size :param device: GPU device :param vectors: custom vectors - either predefined torchtext vectors or your own custom Vector classes :param unk_init: function used to generate vector for OOV words :return:
:param path: directory containing train, test, dev files :param vectors_name: name of word vectors file :param vectors_dir: directory containing word vectors file :param batch_size: batch size :param device: GPU device :param vectors: custom vectors - either predefined torchtext vectors or your own custom Vector classes :param unk_init: function used to generate vector for OOV words :return:
[ ":", "param", "path", ":", "directory", "containing", "train", "test", "dev", "files", ":", "param", "vectors_name", ":", "name", "of", "word", "vectors", "file", ":", "param", "vectors_dir", ":", "directory", "containing", "word", "vectors", "file", ":", "param", "batch_size", ":", "batch", "size", ":", "param", "device", ":", "GPU", "device", ":", "param", "vectors", ":", "custom", "vectors", "-", "either", "predefined", "torchtext", "vectors", "or", "your", "own", "custom", "Vector", "classes", ":", "param", "unk_init", ":", "function", "used", "to", "generate", "vector", "for", "OOV", "words", ":", "return", ":" ]
def iters(cls, path, vectors_name, vectors_dir, batch_size=64, shuffle=True, device=0, pt_file=False, vectors=None, unk_init=torch.Tensor.zero_): """ :param path: directory containing train, test, dev files :param vectors_name: name of word vectors file :param vectors_dir: directory containing word vectors file :param batch_size: batch size :param device: GPU device :param vectors: custom vectors - either predefined torchtext vectors or your own custom Vector classes :param unk_init: function used to generate vector for OOV words :return: """ train, validation, test = cls.splits(path) if not pt_file: if vectors is None: vectors = Vectors(name=vectors_name, cache=vectors_dir, unk_init=unk_init) cls.TEXT_FIELD.build_vocab(train, validation, test, vectors=vectors) else: cls.TEXT_FIELD.build_vocab(train, validation, test) cls.TEXT_FIELD = cls.set_vectors(cls.TEXT_FIELD, os.path.join(vectors_dir, vectors_name)) cls.LABEL_FIELD.build_vocab(train, validation, test) cls.VOCAB_SIZE = len(cls.TEXT_FIELD.vocab) return BucketIterator.splits((train, validation, test), batch_size=batch_size, repeat=False, shuffle=shuffle, sort_within_batch=True, device=device)
[ "def", "iters", "(", "cls", ",", "path", ",", "vectors_name", ",", "vectors_dir", ",", "batch_size", "=", "64", ",", "shuffle", "=", "True", ",", "device", "=", "0", ",", "pt_file", "=", "False", ",", "vectors", "=", "None", ",", "unk_init", "=", "torch", ".", "Tensor", ".", "zero_", ")", ":", "train", ",", "validation", ",", "test", "=", "cls", ".", "splits", "(", "path", ")", "if", "not", "pt_file", ":", "if", "vectors", "is", "None", ":", "vectors", "=", "Vectors", "(", "name", "=", "vectors_name", ",", "cache", "=", "vectors_dir", ",", "unk_init", "=", "unk_init", ")", "cls", ".", "TEXT_FIELD", ".", "build_vocab", "(", "train", ",", "validation", ",", "test", ",", "vectors", "=", "vectors", ")", "else", ":", "cls", ".", "TEXT_FIELD", ".", "build_vocab", "(", "train", ",", "validation", ",", "test", ")", "cls", ".", "TEXT_FIELD", "=", "cls", ".", "set_vectors", "(", "cls", ".", "TEXT_FIELD", ",", "os", ".", "path", ".", "join", "(", "vectors_dir", ",", "vectors_name", ")", ")", "cls", ".", "LABEL_FIELD", ".", "build_vocab", "(", "train", ",", "validation", ",", "test", ")", "cls", ".", "VOCAB_SIZE", "=", "len", "(", "cls", ".", "TEXT_FIELD", ".", "vocab", ")", "return", "BucketIterator", ".", "splits", "(", "(", "train", ",", "validation", ",", "test", ")", ",", "batch_size", "=", "batch_size", ",", "repeat", "=", "False", ",", "shuffle", "=", "shuffle", ",", "sort_within_batch", "=", "True", ",", "device", "=", "device", ")" ]
https://github.com/castorini/castor/blob/fa2f59535c71a0fb4586afbe543b81ba812c8630/datasets/trecqa.py#L37-L62
ctxis/CAPE
dae9fa6a254ecdbabeb7eb0d2389fa63722c1e82
modules/processing/parsers/mwcp/utils/pefileutils.py
python
obtain_physical_offset_x64
(rel_loc, inst_end_raw, pe=None, file_data=None)
For a 64-bit PE file, pointers to data elements are relative to the end of the assembly instruction. Therefore, given a location (rel_loc) relative to the end of an instruction (inst_end_raw), convert the end instruction address to a memory offset, add that value to the relative location of the data, and convert that to a raw offset. :param rel_loc: Location of data element relative to the end of the instruction address in inst_end_raw :param inst_end_raw: End of an instruction address referencing the data for rel_loc :param pe: pefile.PE object :param file_data: Input file data :return: Raw offset for the data, or None.
For a 64-bit PE file, pointers to data elements are relative to the end of the assembly instruction. Therefore, given a location (rel_loc) relative to the end of an instruction (inst_end_raw), convert the end instruction address to a memory offset, add that value to the relative location of the data, and convert that to a raw offset.
[ "For", "a", "64", "-", "bit", "PE", "file", "pointers", "to", "data", "elements", "are", "relative", "to", "the", "end", "of", "the", "assembly", "instruction", ".", "Therefore", "given", "a", "location", "(", "rel_loc", ")", "relative", "to", "the", "end", "of", "an", "instruction", "(", "inst_end_raw", ")", "convert", "the", "end", "instruction", "address", "to", "a", "memory", "offset", "add", "that", "value", "to", "the", "relative", "location", "of", "the", "data", "and", "convert", "that", "to", "a", "raw", "offset", "." ]
def obtain_physical_offset_x64(rel_loc, inst_end_raw, pe=None, file_data=None): """ For a 64-bit PE file, pointers to data elements are relative to the end of the assembly instruction. Therefore, given a location (rel_loc) relative to the end of an instruction (inst_end_raw), convert the end instruction address to a memory offset, add that value to the relative location of the data, and convert that to a raw offset. :param rel_loc: Location of data element relative to the end of the instruction address in inst_end_raw :param inst_end_raw: End of an instruction address referencing the data for rel_loc :param pe: pefile.PE object :param file_data: Input file data :return: Raw offset for the data, or None. """ if file_data: pe = obtain_pe(file_data) if pe: inst_end_mem = obtain_memory_offset(inst_end_raw, pe=pe) # Obtain the memory location of the data and convert it to a physical offset mem_loc = rel_loc + inst_end_mem return obtain_physical_offset(mem_loc, pe=pe) else: return None
[ "def", "obtain_physical_offset_x64", "(", "rel_loc", ",", "inst_end_raw", ",", "pe", "=", "None", ",", "file_data", "=", "None", ")", ":", "if", "file_data", ":", "pe", "=", "obtain_pe", "(", "file_data", ")", "if", "pe", ":", "inst_end_mem", "=", "obtain_memory_offset", "(", "inst_end_raw", ",", "pe", "=", "pe", ")", "# Obtain the memory location of the data and convert it to a physical offset", "mem_loc", "=", "rel_loc", "+", "inst_end_mem", "return", "obtain_physical_offset", "(", "mem_loc", ",", "pe", "=", "pe", ")", "else", ":", "return", "None" ]
https://github.com/ctxis/CAPE/blob/dae9fa6a254ecdbabeb7eb0d2389fa63722c1e82/modules/processing/parsers/mwcp/utils/pefileutils.py#L134-L156
theotherp/nzbhydra
4b03d7f769384b97dfc60dade4806c0fc987514e
libs/bs4/dammit.py
python
EntitySubstitution.substitute_xml_containing_entities
( cls, value, make_quoted_attribute=False)
return value
Substitute XML entities for special XML characters. :param value: A string to be substituted. The less-than sign will become &lt;, the greater-than sign will become &gt;, and any ampersands that are not part of an entity defition will become &amp;. :param make_quoted_attribute: If True, then the string will be quoted, as befits an attribute value.
Substitute XML entities for special XML characters.
[ "Substitute", "XML", "entities", "for", "special", "XML", "characters", "." ]
def substitute_xml_containing_entities( cls, value, make_quoted_attribute=False): """Substitute XML entities for special XML characters. :param value: A string to be substituted. The less-than sign will become &lt;, the greater-than sign will become &gt;, and any ampersands that are not part of an entity defition will become &amp;. :param make_quoted_attribute: If True, then the string will be quoted, as befits an attribute value. """ # Escape angle brackets, and ampersands that aren't part of # entities. value = cls.BARE_AMPERSAND_OR_BRACKET.sub( cls._substitute_xml_entity, value) if make_quoted_attribute: value = cls.quoted_attribute_value(value) return value
[ "def", "substitute_xml_containing_entities", "(", "cls", ",", "value", ",", "make_quoted_attribute", "=", "False", ")", ":", "# Escape angle brackets, and ampersands that aren't part of", "# entities.", "value", "=", "cls", ".", "BARE_AMPERSAND_OR_BRACKET", ".", "sub", "(", "cls", ".", "_substitute_xml_entity", ",", "value", ")", "if", "make_quoted_attribute", ":", "value", "=", "cls", ".", "quoted_attribute_value", "(", "value", ")", "return", "value" ]
https://github.com/theotherp/nzbhydra/blob/4b03d7f769384b97dfc60dade4806c0fc987514e/libs/bs4/dammit.py#L161-L180
IronLanguages/ironpython3
7a7bb2a872eeab0d1009fc8a6e24dca43f65b693
Src/StdLib/Lib/lib2to3/pytree.py
python
BasePattern.match
(self, node, results=None)
return True
Does this pattern exactly match a node? Returns True if it matches, False if not. If results is not None, it must be a dict which will be updated with the nodes matching named subpatterns. Default implementation for non-wildcard patterns.
Does this pattern exactly match a node?
[ "Does", "this", "pattern", "exactly", "match", "a", "node?" ]
def match(self, node, results=None): """ Does this pattern exactly match a node? Returns True if it matches, False if not. If results is not None, it must be a dict which will be updated with the nodes matching named subpatterns. Default implementation for non-wildcard patterns. """ if self.type is not None and node.type != self.type: return False if self.content is not None: r = None if results is not None: r = {} if not self._submatch(node, r): return False if r: results.update(r) if results is not None and self.name: results[self.name] = node return True
[ "def", "match", "(", "self", ",", "node", ",", "results", "=", "None", ")", ":", "if", "self", ".", "type", "is", "not", "None", "and", "node", ".", "type", "!=", "self", ".", "type", ":", "return", "False", "if", "self", ".", "content", "is", "not", "None", ":", "r", "=", "None", "if", "results", "is", "not", "None", ":", "r", "=", "{", "}", "if", "not", "self", ".", "_submatch", "(", "node", ",", "r", ")", ":", "return", "False", "if", "r", ":", "results", ".", "update", "(", "r", ")", "if", "results", "is", "not", "None", "and", "self", ".", "name", ":", "results", "[", "self", ".", "name", "]", "=", "node", "return", "True" ]
https://github.com/IronLanguages/ironpython3/blob/7a7bb2a872eeab0d1009fc8a6e24dca43f65b693/Src/StdLib/Lib/lib2to3/pytree.py#L455-L478
conansherry/detectron2
72c935d9aad8935406b1038af408aa06077d950a
detectron2/modeling/backbone/build.py
python
build_backbone
(cfg, input_shape=None)
return backbone
Build a backbone from `cfg.MODEL.BACKBONE.NAME`. Returns: an instance of :class:`Backbone`
Build a backbone from `cfg.MODEL.BACKBONE.NAME`.
[ "Build", "a", "backbone", "from", "cfg", ".", "MODEL", ".", "BACKBONE", ".", "NAME", "." ]
def build_backbone(cfg, input_shape=None): """ Build a backbone from `cfg.MODEL.BACKBONE.NAME`. Returns: an instance of :class:`Backbone` """ if input_shape is None: input_shape = ShapeSpec(channels=len(cfg.MODEL.PIXEL_MEAN)) backbone_name = cfg.MODEL.BACKBONE.NAME backbone = BACKBONE_REGISTRY.get(backbone_name)(cfg, input_shape) assert isinstance(backbone, Backbone) return backbone
[ "def", "build_backbone", "(", "cfg", ",", "input_shape", "=", "None", ")", ":", "if", "input_shape", "is", "None", ":", "input_shape", "=", "ShapeSpec", "(", "channels", "=", "len", "(", "cfg", ".", "MODEL", ".", "PIXEL_MEAN", ")", ")", "backbone_name", "=", "cfg", ".", "MODEL", ".", "BACKBONE", ".", "NAME", "backbone", "=", "BACKBONE_REGISTRY", ".", "get", "(", "backbone_name", ")", "(", "cfg", ",", "input_shape", ")", "assert", "isinstance", "(", "backbone", ",", "Backbone", ")", "return", "backbone" ]
https://github.com/conansherry/detectron2/blob/72c935d9aad8935406b1038af408aa06077d950a/detectron2/modeling/backbone/build.py#L20-L33
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_flaskbb/lib/python2.7/site-packages/whoosh/searching.py
python
Hit.itervalues
(self)
return itervalues(self.fields())
[]
def itervalues(self): return itervalues(self.fields())
[ "def", "itervalues", "(", "self", ")", ":", "return", "itervalues", "(", "self", ".", "fields", "(", ")", ")" ]
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/lib/python2.7/site-packages/whoosh/searching.py#L1534-L1535
aws/aws-sam-cli
2aa7bf01b2e0b0864ef63b1898a8b30577443acc
samcli/lib/sync/sync_flow.py
python
SyncFlow.__init__
( self, build_context: "BuildContext", deploy_context: "DeployContext", physical_id_mapping: Dict[str, str], log_name: str, stacks: Optional[List[Stack]] = None, )
Parameters ---------- build_context : BuildContext BuildContext used for build related parameters deploy_context : BuildContext DeployContext used for this deploy related parameters physical_id_mapping : Dict[str, str] Mapping between resource logical identifier and physical identifier log_name : str Name to be used for logging purposes stacks : List[Stack], optional List of stacks containing a root stack and optional nested stacks
Parameters ---------- build_context : BuildContext BuildContext used for build related parameters deploy_context : BuildContext DeployContext used for this deploy related parameters physical_id_mapping : Dict[str, str] Mapping between resource logical identifier and physical identifier log_name : str Name to be used for logging purposes stacks : List[Stack], optional List of stacks containing a root stack and optional nested stacks
[ "Parameters", "----------", "build_context", ":", "BuildContext", "BuildContext", "used", "for", "build", "related", "parameters", "deploy_context", ":", "BuildContext", "DeployContext", "used", "for", "this", "deploy", "related", "parameters", "physical_id_mapping", ":", "Dict", "[", "str", "str", "]", "Mapping", "between", "resource", "logical", "identifier", "and", "physical", "identifier", "log_name", ":", "str", "Name", "to", "be", "used", "for", "logging", "purposes", "stacks", ":", "List", "[", "Stack", "]", "optional", "List", "of", "stacks", "containing", "a", "root", "stack", "and", "optional", "nested", "stacks" ]
def __init__( self, build_context: "BuildContext", deploy_context: "DeployContext", physical_id_mapping: Dict[str, str], log_name: str, stacks: Optional[List[Stack]] = None, ): """ Parameters ---------- build_context : BuildContext BuildContext used for build related parameters deploy_context : BuildContext DeployContext used for this deploy related parameters physical_id_mapping : Dict[str, str] Mapping between resource logical identifier and physical identifier log_name : str Name to be used for logging purposes stacks : List[Stack], optional List of stacks containing a root stack and optional nested stacks """ self._build_context = build_context self._deploy_context = deploy_context self._log_name = log_name self._stacks = stacks self._session = None self._physical_id_mapping = physical_id_mapping self._locks = None
[ "def", "__init__", "(", "self", ",", "build_context", ":", "\"BuildContext\"", ",", "deploy_context", ":", "\"DeployContext\"", ",", "physical_id_mapping", ":", "Dict", "[", "str", ",", "str", "]", ",", "log_name", ":", "str", ",", "stacks", ":", "Optional", "[", "List", "[", "Stack", "]", "]", "=", "None", ",", ")", ":", "self", ".", "_build_context", "=", "build_context", "self", ".", "_deploy_context", "=", "deploy_context", "self", ".", "_log_name", "=", "log_name", "self", ".", "_stacks", "=", "stacks", "self", ".", "_session", "=", "None", "self", ".", "_physical_id_mapping", "=", "physical_id_mapping", "self", ".", "_locks", "=", "None" ]
https://github.com/aws/aws-sam-cli/blob/2aa7bf01b2e0b0864ef63b1898a8b30577443acc/samcli/lib/sync/sync_flow.py#L43-L71
glitchdotcom/WebPutty
4f5da5eb2b4668cbf3c15cf002feacd1d95d2ef7
ziplibs/BeautifulSoup.py
python
Tag.__nonzero__
(self)
return True
A tag is non-None even if it has no contents.
A tag is non-None even if it has no contents.
[ "A", "tag", "is", "non", "-", "None", "even", "if", "it", "has", "no", "contents", "." ]
def __nonzero__(self): "A tag is non-None even if it has no contents." return True
[ "def", "__nonzero__", "(", "self", ")", ":", "return", "True" ]
https://github.com/glitchdotcom/WebPutty/blob/4f5da5eb2b4668cbf3c15cf002feacd1d95d2ef7/ziplibs/BeautifulSoup.py#L614-L616
HumanDynamics/openPDS
20d9e0669b0f120399055765b61f0f1fa298a1ff
openpds/connectors/funf/views.py
python
clean_keys
(d)
return new
replace all "." with "-" and force keys to lowercase
replace all "." with "-" and force keys to lowercase
[ "replace", "all", ".", "with", "-", "and", "force", "keys", "to", "lowercase" ]
def clean_keys(d): '''replace all "." with "-" and force keys to lowercase''' new = {} for k, v in d.iteritems(): if isinstance(v, dict): v = clean_keys(v) if isinstance(v, list): for idx,i in enumerate(v): if isinstance(i, dict): v[idx] = clean_keys(i) new[k.replace('.', '-').lower()] = v return new
[ "def", "clean_keys", "(", "d", ")", ":", "new", "=", "{", "}", "for", "k", ",", "v", "in", "d", ".", "iteritems", "(", ")", ":", "if", "isinstance", "(", "v", ",", "dict", ")", ":", "v", "=", "clean_keys", "(", "v", ")", "if", "isinstance", "(", "v", ",", "list", ")", ":", "for", "idx", ",", "i", "in", "enumerate", "(", "v", ")", ":", "if", "isinstance", "(", "i", ",", "dict", ")", ":", "v", "[", "idx", "]", "=", "clean_keys", "(", "i", ")", "new", "[", "k", ".", "replace", "(", "'.'", ",", "'-'", ")", ".", "lower", "(", ")", "]", "=", "v", "return", "new" ]
https://github.com/HumanDynamics/openPDS/blob/20d9e0669b0f120399055765b61f0f1fa298a1ff/openpds/connectors/funf/views.py#L146-L157
chrysn/aiocoap
1f03d4ceb969b2b443c288c312d44c3b7c3e2031
aiocoap/pipe.py
python
Pipe.poke
(self)
Ask the responder for a life sign. It is up to the responder to ignore this (eg. because the responder is the library/application and can't be just gone), to issue a generic transport-dependent 'ping' to see whether the connection is still alive, or to retransmit the request if it is an observation over an unreliable channel. In any case, no status is reported directly to the poke, but if whatever the responder does fails, it will send an appropriate error message as a response.
Ask the responder for a life sign. It is up to the responder to ignore this (eg. because the responder is the library/application and can't be just gone), to issue a generic transport-dependent 'ping' to see whether the connection is still alive, or to retransmit the request if it is an observation over an unreliable channel.
[ "Ask", "the", "responder", "for", "a", "life", "sign", ".", "It", "is", "up", "to", "the", "responder", "to", "ignore", "this", "(", "eg", ".", "because", "the", "responder", "is", "the", "library", "/", "application", "and", "can", "t", "be", "just", "gone", ")", "to", "issue", "a", "generic", "transport", "-", "dependent", "ping", "to", "see", "whether", "the", "connection", "is", "still", "alive", "or", "to", "retransmit", "the", "request", "if", "it", "is", "an", "observation", "over", "an", "unreliable", "channel", "." ]
def poke(self): """Ask the responder for a life sign. It is up to the responder to ignore this (eg. because the responder is the library/application and can't be just gone), to issue a generic transport-dependent 'ping' to see whether the connection is still alive, or to retransmit the request if it is an observation over an unreliable channel. In any case, no status is reported directly to the poke, but if whatever the responder does fails, it will send an appropriate error message as a response.""" raise NotImplementedError()
[ "def", "poke", "(", "self", ")", ":", "raise", "NotImplementedError", "(", ")" ]
https://github.com/chrysn/aiocoap/blob/1f03d4ceb969b2b443c288c312d44c3b7c3e2031/aiocoap/pipe.py#L90-L100
vertical-knowledge/ripozo
e648db2967c4fbf4315860c21a8418f384db4454
ripozo/adapters/jsonapi.py
python
JSONAPIAdapter._construct_links
(self, resource)
return links
Constructs the links object according to `this section <http://jsonapi.org/format/#document-links>`_ :param ResourceBase resource: The links from this resource instance will be used to construct the links object. :return: A dictionary representing the links object :rtype: dict
Constructs the links object according to `this section <http://jsonapi.org/format/#document-links>`_
[ "Constructs", "the", "links", "object", "according", "to", "this", "section", "<http", ":", "//", "jsonapi", ".", "org", "/", "format", "/", "#document", "-", "links", ">", "_" ]
def _construct_links(self, resource): """ Constructs the links object according to `this section <http://jsonapi.org/format/#document-links>`_ :param ResourceBase resource: The links from this resource instance will be used to construct the links object. :return: A dictionary representing the links object :rtype: dict """ self_url = self.combine_base_url_with_resource_url(resource.url) links = {'self': self_url} for link, name, embedded in resource.linked_resources: links[name] = self.combine_base_url_with_resource_url(link.url) return links
[ "def", "_construct_links", "(", "self", ",", "resource", ")", ":", "self_url", "=", "self", ".", "combine_base_url_with_resource_url", "(", "resource", ".", "url", ")", "links", "=", "{", "'self'", ":", "self_url", "}", "for", "link", ",", "name", ",", "embedded", "in", "resource", ".", "linked_resources", ":", "links", "[", "name", "]", "=", "self", ".", "combine_base_url_with_resource_url", "(", "link", ".", "url", ")", "return", "links" ]
https://github.com/vertical-knowledge/ripozo/blob/e648db2967c4fbf4315860c21a8418f384db4454/ripozo/adapters/jsonapi.py#L67-L81
MartinThoma/algorithms
6199cfa3446e1056c7b4d75ca6e306e9e56fd95b
bib/scrape.py
python
add_data
(path_to_db, data, table)
Add the data to the database.
Add the data to the database.
[ "Add", "the", "data", "to", "the", "database", "." ]
def add_data(path_to_db, data, table): """Add the data to the database.""" con = sqlite3.connect(path_to_db) cur = con.cursor() for el in data: if table in ['seatestimate', 'manualcount']: sql = ("INSERT INTO `{table}` " "(`date_str`, `timezone_type`, `timezone`, " "`location_name`, " "`free_seats`, `occupied_seats`) " "VALUES ('{date}', '{timezone_type}', " "'{timezone}', '{location_name}'," "'{free_seats}', '{occupied_seats}');" "").format(table=table, date=el['timestamp']['date'], timezone_type=el['timestamp']['timezone_type'], timezone=el['timestamp']['timezone'], location_name=el['location_name'], occupied_seats=el['occupied_seats'], free_seats=el['free_seats']) else: row_counter = 'number_of_ports' if table == 'wlanclients': row_counter = 'number_of_clients' sql = ("INSERT INTO `{table}` " "(`date_str`, `timezone_type`, `timezone`, " "`location_name`, `{row_counter}`) " "VALUES ('{date}', '{timezone_type}', " "'{timezone}', '{location_name}'," "'{counter}');" "").format(table=table, row_counter=row_counter, date=el['timestamp']['date'], timezone_type=el['timestamp']['timezone_type'], timezone=el['timestamp']['timezone'], location_name=el['location_name'], counter=el[row_counter]) try: cur.execute(sql) except sqlite3.IntegrityError: pass # This data is already in the DB. Just ignore it. con.commit() con.close()
[ "def", "add_data", "(", "path_to_db", ",", "data", ",", "table", ")", ":", "con", "=", "sqlite3", ".", "connect", "(", "path_to_db", ")", "cur", "=", "con", ".", "cursor", "(", ")", "for", "el", "in", "data", ":", "if", "table", "in", "[", "'seatestimate'", ",", "'manualcount'", "]", ":", "sql", "=", "(", "\"INSERT INTO `{table}` \"", "\"(`date_str`, `timezone_type`, `timezone`, \"", "\"`location_name`, \"", "\"`free_seats`, `occupied_seats`) \"", "\"VALUES ('{date}', '{timezone_type}', \"", "\"'{timezone}', '{location_name}',\"", "\"'{free_seats}', '{occupied_seats}');\"", "\"\"", ")", ".", "format", "(", "table", "=", "table", ",", "date", "=", "el", "[", "'timestamp'", "]", "[", "'date'", "]", ",", "timezone_type", "=", "el", "[", "'timestamp'", "]", "[", "'timezone_type'", "]", ",", "timezone", "=", "el", "[", "'timestamp'", "]", "[", "'timezone'", "]", ",", "location_name", "=", "el", "[", "'location_name'", "]", ",", "occupied_seats", "=", "el", "[", "'occupied_seats'", "]", ",", "free_seats", "=", "el", "[", "'free_seats'", "]", ")", "else", ":", "row_counter", "=", "'number_of_ports'", "if", "table", "==", "'wlanclients'", ":", "row_counter", "=", "'number_of_clients'", "sql", "=", "(", "\"INSERT INTO `{table}` \"", "\"(`date_str`, `timezone_type`, `timezone`, \"", "\"`location_name`, `{row_counter}`) \"", "\"VALUES ('{date}', '{timezone_type}', \"", "\"'{timezone}', '{location_name}',\"", "\"'{counter}');\"", "\"\"", ")", ".", "format", "(", "table", "=", "table", ",", "row_counter", "=", "row_counter", ",", "date", "=", "el", "[", "'timestamp'", "]", "[", "'date'", "]", ",", "timezone_type", "=", "el", "[", "'timestamp'", "]", "[", "'timezone_type'", "]", ",", "timezone", "=", "el", "[", "'timestamp'", "]", "[", "'timezone'", "]", ",", "location_name", "=", "el", "[", "'location_name'", "]", ",", "counter", "=", "el", "[", "row_counter", "]", ")", "try", ":", "cur", ".", "execute", "(", "sql", ")", "except", "sqlite3", ".", "IntegrityError", ":", "pass", "# This data is already in the DB. Just ignore it.", "con", ".", "commit", "(", ")", "con", ".", "close", "(", ")" ]
https://github.com/MartinThoma/algorithms/blob/6199cfa3446e1056c7b4d75ca6e306e9e56fd95b/bib/scrape.py#L111-L154
chrisconlan/algorithmic-trading-with-python
ebe01087c7d9172db72bc3c9adc1eee5e882ac49
src/pypm/portfolio.py
python
Position.__init__
(self, symbol: Symbol, entry_date: pd.Timestamp, entry_price: Dollars, shares: int)
Equivalent to buying a certain number of shares of the asset
Equivalent to buying a certain number of shares of the asset
[ "Equivalent", "to", "buying", "a", "certain", "number", "of", "shares", "of", "the", "asset" ]
def __init__(self, symbol: Symbol, entry_date: pd.Timestamp, entry_price: Dollars, shares: int): """ Equivalent to buying a certain number of shares of the asset """ # Recorded on initialization self.entry_date = entry_date assert entry_price > 0, 'Cannot buy asset with zero or negative price.' self.entry_price = entry_price assert shares > 0, 'Cannot buy zero or negative shares.' self.shares = shares self.symbol = symbol # Recorded on position exit self.exit_date: pd.Timestamp = None self.exit_price: Dollars = None # For easily getting current portfolio value self.last_date: pd.Timestamp = None self.last_price: Dollars = None # Updated intermediately self._dict_series: Dict[pd.Timestamp, Dollars] = OrderedDict() self.record_price_update(entry_date, entry_price) # Cache control for pd.Series representation self._price_series: pd.Series = None self._needs_update_pd_series: bool = True
[ "def", "__init__", "(", "self", ",", "symbol", ":", "Symbol", ",", "entry_date", ":", "pd", ".", "Timestamp", ",", "entry_price", ":", "Dollars", ",", "shares", ":", "int", ")", ":", "# Recorded on initialization", "self", ".", "entry_date", "=", "entry_date", "assert", "entry_price", ">", "0", ",", "'Cannot buy asset with zero or negative price.'", "self", ".", "entry_price", "=", "entry_price", "assert", "shares", ">", "0", ",", "'Cannot buy zero or negative shares.'", "self", ".", "shares", "=", "shares", "self", ".", "symbol", "=", "symbol", "# Recorded on position exit", "self", ".", "exit_date", ":", "pd", ".", "Timestamp", "=", "None", "self", ".", "exit_price", ":", "Dollars", "=", "None", "# For easily getting current portfolio value", "self", ".", "last_date", ":", "pd", ".", "Timestamp", "=", "None", "self", ".", "last_price", ":", "Dollars", "=", "None", "# Updated intermediately", "self", ".", "_dict_series", ":", "Dict", "[", "pd", ".", "Timestamp", ",", "Dollars", "]", "=", "OrderedDict", "(", ")", "self", ".", "record_price_update", "(", "entry_date", ",", "entry_price", ")", "# Cache control for pd.Series representation", "self", ".", "_price_series", ":", "pd", ".", "Series", "=", "None", "self", ".", "_needs_update_pd_series", ":", "bool", "=", "True" ]
https://github.com/chrisconlan/algorithmic-trading-with-python/blob/ebe01087c7d9172db72bc3c9adc1eee5e882ac49/src/pypm/portfolio.py#L29-L60
Coalfire-Research/Slackor
aa32a7f9250bd8b107d48fd573f26176b527b2a5
impacket/impacket/dcerpc/v5/enum.py
python
_is_sunder
(name)
return (name[0] == name[-1] == '_' and name[1:2] != '_' and name[-2:-1] != '_' and len(name) > 2)
Returns True if a _sunder_ name, False otherwise.
Returns True if a _sunder_ name, False otherwise.
[ "Returns", "True", "if", "a", "_sunder_", "name", "False", "otherwise", "." ]
def _is_sunder(name): """Returns True if a _sunder_ name, False otherwise.""" return (name[0] == name[-1] == '_' and name[1:2] != '_' and name[-2:-1] != '_' and len(name) > 2)
[ "def", "_is_sunder", "(", "name", ")", ":", "return", "(", "name", "[", "0", "]", "==", "name", "[", "-", "1", "]", "==", "'_'", "and", "name", "[", "1", ":", "2", "]", "!=", "'_'", "and", "name", "[", "-", "2", ":", "-", "1", "]", "!=", "'_'", "and", "len", "(", "name", ")", ">", "2", ")" ]
https://github.com/Coalfire-Research/Slackor/blob/aa32a7f9250bd8b107d48fd573f26176b527b2a5/impacket/impacket/dcerpc/v5/enum.py#L59-L64
cheshirekow/cmake_format
eff5df1f41c665ea7cac799396042e4f406ef09a
cmakelang/format/formatter.py
python
StackContext.push_node
(self, node)
Push `node` onto the `node_path` and yield a context manager. Pop `node` off of `node_path` when the context manager `__exit__()s`
Push `node` onto the `node_path` and yield a context manager. Pop `node` off of `node_path` when the context manager `__exit__()s`
[ "Push", "node", "onto", "the", "node_path", "and", "yield", "a", "context", "manager", ".", "Pop", "node", "off", "of", "node_path", "when", "the", "context", "manager", "__exit__", "()", "s" ]
def push_node(self, node): """ Push `node` onto the `node_path` and yield a context manager. Pop `node` off of `node_path` when the context manager `__exit__()s` """ self.node_path.append(node) yield None self.node_path.pop(-1)
[ "def", "push_node", "(", "self", ",", "node", ")", ":", "self", ".", "node_path", ".", "append", "(", "node", ")", "yield", "None", "self", ".", "node_path", ".", "pop", "(", "-", "1", ")" ]
https://github.com/cheshirekow/cmake_format/blob/eff5df1f41c665ea7cac799396042e4f406ef09a/cmakelang/format/formatter.py#L288-L295
frePPLe/frepple
57aa612030b4fcd03cb9c613f83a7dac4f0e8d6d
freppledb/common/report.py
python
getCurrency
()
[]
def getCurrency(): try: cur = Parameter.getValue("currency").split(",") if len(cur) < 2: return ("", " %s" % escape(cur[0])) else: return ("%s " % escape(cur[0]), " %s" % escape(cur[1])) except Exception: return ("", " $")
[ "def", "getCurrency", "(", ")", ":", "try", ":", "cur", "=", "Parameter", ".", "getValue", "(", "\"currency\"", ")", ".", "split", "(", "\",\"", ")", "if", "len", "(", "cur", ")", "<", "2", ":", "return", "(", "\"\"", ",", "\" %s\"", "%", "escape", "(", "cur", "[", "0", "]", ")", ")", "else", ":", "return", "(", "\"%s \"", "%", "escape", "(", "cur", "[", "0", "]", ")", ",", "\" %s\"", "%", "escape", "(", "cur", "[", "1", "]", ")", ")", "except", "Exception", ":", "return", "(", "\"\"", ",", "\" $\"", ")" ]
https://github.com/frePPLe/frepple/blob/57aa612030b4fcd03cb9c613f83a7dac4f0e8d6d/freppledb/common/report.py#L455-L463
jupyter-widgets/ipyleaflet
3970dc74861d17eca56824eb5bf93bafe156319e
ipyleaflet/leaflet.py
python
Marker.on_move
(self, callback, remove=False)
Add a move event listener. Parameters ---------- callback : callable Callback function that will be called on move event. remove: boolean Whether to remove this callback or not. Defaults to False.
Add a move event listener.
[ "Add", "a", "move", "event", "listener", "." ]
def on_move(self, callback, remove=False): """Add a move event listener. Parameters ---------- callback : callable Callback function that will be called on move event. remove: boolean Whether to remove this callback or not. Defaults to False. """ self._move_callbacks.register_callback(callback, remove=remove)
[ "def", "on_move", "(", "self", ",", "callback", ",", "remove", "=", "False", ")", ":", "self", ".", "_move_callbacks", ".", "register_callback", "(", "callback", ",", "remove", "=", "remove", ")" ]
https://github.com/jupyter-widgets/ipyleaflet/blob/3970dc74861d17eca56824eb5bf93bafe156319e/ipyleaflet/leaflet.py#L434-L444
home-assistant/core
265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1
homeassistant/components/elkm1/alarm_control_panel.py
python
ElkArea.changed_by
(self)
return self._changed_by
Last change triggered by.
Last change triggered by.
[ "Last", "change", "triggered", "by", "." ]
def changed_by(self): """Last change triggered by.""" return self._changed_by
[ "def", "changed_by", "(", "self", ")", ":", "return", "self", ".", "_changed_by" ]
https://github.com/home-assistant/core/blob/265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1/homeassistant/components/elkm1/alarm_control_panel.py#L198-L200
naszilla/bananas
e2c12ade9e290e309e206fc3c8f97a373a136a81
bo/pp/pp_gp_my_distmat.py
python
MyGpDistmatPP.set_data
(self, data)
Set self.data
Set self.data
[ "Set", "self", ".", "data" ]
def set_data(self, data): """ Set self.data """ if data is None: pass #TODO self.data_init = copy.deepcopy(data) self.data = copy.deepcopy(self.data_init)
[ "def", "set_data", "(", "self", ",", "data", ")", ":", "if", "data", "is", "None", ":", "pass", "#TODO", "self", ".", "data_init", "=", "copy", ".", "deepcopy", "(", "data", ")", "self", ".", "data", "=", "copy", ".", "deepcopy", "(", "self", ".", "data_init", ")" ]
https://github.com/naszilla/bananas/blob/e2c12ade9e290e309e206fc3c8f97a373a136a81/bo/pp/pp_gp_my_distmat.py#L36-L41
Chaffelson/nipyapi
d3b186fd701ce308c2812746d98af9120955e810
nipyapi/nifi/models/drop_request_dto.py
python
DropRequestDTO.submission_time
(self, submission_time)
Sets the submission_time of this DropRequestDTO. The timestamp when the query was submitted. :param submission_time: The submission_time of this DropRequestDTO. :type: str
Sets the submission_time of this DropRequestDTO. The timestamp when the query was submitted.
[ "Sets", "the", "submission_time", "of", "this", "DropRequestDTO", ".", "The", "timestamp", "when", "the", "query", "was", "submitted", "." ]
def submission_time(self, submission_time): """ Sets the submission_time of this DropRequestDTO. The timestamp when the query was submitted. :param submission_time: The submission_time of this DropRequestDTO. :type: str """ self._submission_time = submission_time
[ "def", "submission_time", "(", "self", ",", "submission_time", ")", ":", "self", ".", "_submission_time", "=", "submission_time" ]
https://github.com/Chaffelson/nipyapi/blob/d3b186fd701ce308c2812746d98af9120955e810/nipyapi/nifi/models/drop_request_dto.py#L189-L198
meduza-corp/interstellar
40a801ccd7856491726f5a126621d9318cabe2e1
gsutil/gslib/copy_helper.py
python
_AppendComponentTrackerToParallelUploadTrackerFile
(tracker_file, component, tracker_file_lock)
Appends info about the uploaded component to an existing tracker file. Follows the format described in _CreateParallelUploadTrackerFile. Args: tracker_file: Tracker file to append to. component: Component that was uploaded. tracker_file_lock: Thread and process-safe Lock for the tracker file.
Appends info about the uploaded component to an existing tracker file.
[ "Appends", "info", "about", "the", "uploaded", "component", "to", "an", "existing", "tracker", "file", "." ]
def _AppendComponentTrackerToParallelUploadTrackerFile(tracker_file, component, tracker_file_lock): """Appends info about the uploaded component to an existing tracker file. Follows the format described in _CreateParallelUploadTrackerFile. Args: tracker_file: Tracker file to append to. component: Component that was uploaded. tracker_file_lock: Thread and process-safe Lock for the tracker file. """ lines = _GetParallelUploadTrackerFileLinesForComponents([component]) lines = [line + '\n' for line in lines] with tracker_file_lock: with open(tracker_file, 'a') as f: f.writelines(lines)
[ "def", "_AppendComponentTrackerToParallelUploadTrackerFile", "(", "tracker_file", ",", "component", ",", "tracker_file_lock", ")", ":", "lines", "=", "_GetParallelUploadTrackerFileLinesForComponents", "(", "[", "component", "]", ")", "lines", "=", "[", "line", "+", "'\\n'", "for", "line", "in", "lines", "]", "with", "tracker_file_lock", ":", "with", "open", "(", "tracker_file", ",", "'a'", ")", "as", "f", ":", "f", ".", "writelines", "(", "lines", ")" ]
https://github.com/meduza-corp/interstellar/blob/40a801ccd7856491726f5a126621d9318cabe2e1/gsutil/gslib/copy_helper.py#L2630-L2645
uber-research/PPLM
e236b8989322128360182d29a79944627957ad47
paper_code/pytorch_pretrained_bert/optimization_openai.py
python
warmup_constant
(x, warmup=0.002)
return 1.0
Linearly increases learning rate over `warmup`*`t_total` (as provided to OpenAIAdam) training steps. Learning rate is 1. afterwards.
Linearly increases learning rate over `warmup`*`t_total` (as provided to OpenAIAdam) training steps. Learning rate is 1. afterwards.
[ "Linearly", "increases", "learning", "rate", "over", "warmup", "*", "t_total", "(", "as", "provided", "to", "OpenAIAdam", ")", "training", "steps", ".", "Learning", "rate", "is", "1", ".", "afterwards", "." ]
def warmup_constant(x, warmup=0.002): """ Linearly increases learning rate over `warmup`*`t_total` (as provided to OpenAIAdam) training steps. Learning rate is 1. afterwards. """ if x < warmup: return x/warmup return 1.0
[ "def", "warmup_constant", "(", "x", ",", "warmup", "=", "0.002", ")", ":", "if", "x", "<", "warmup", ":", "return", "x", "/", "warmup", "return", "1.0" ]
https://github.com/uber-research/PPLM/blob/e236b8989322128360182d29a79944627957ad47/paper_code/pytorch_pretrained_bert/optimization_openai.py#L32-L37
algorhythms/LeetCode
3fb14aeea62a960442e47dfde9f964c7ffce32be
394 Decode String.py
python
SolutionError.decodeString
(self, s)
return ''.join(ret)
:type s: str :rtype: str
:type s: str :rtype: str
[ ":", "type", "s", ":", "str", ":", "rtype", ":", "str" ]
def decodeString(self, s): """ :type s: str :rtype: str """ stk = [] i = 0 ret = [] while i < len(s): if s[i].isdigit(): j = i + 1 while s[j] != '[': j += 1 prev = stk[-1] if stk else 1 stk.append(prev * int(s[i:j])) i = j + 1 elif s[i].islower(): repeat = stk[-1] if stk else 1 for _ in xrange(repeat): ret.append(s[i]) i += 1 elif s[i] == ']': stk.pop() i += 1 return ''.join(ret)
[ "def", "decodeString", "(", "self", ",", "s", ")", ":", "stk", "=", "[", "]", "i", "=", "0", "ret", "=", "[", "]", "while", "i", "<", "len", "(", "s", ")", ":", "if", "s", "[", "i", "]", ".", "isdigit", "(", ")", ":", "j", "=", "i", "+", "1", "while", "s", "[", "j", "]", "!=", "'['", ":", "j", "+=", "1", "prev", "=", "stk", "[", "-", "1", "]", "if", "stk", "else", "1", "stk", ".", "append", "(", "prev", "*", "int", "(", "s", "[", "i", ":", "j", "]", ")", ")", "i", "=", "j", "+", "1", "elif", "s", "[", "i", "]", ".", "islower", "(", ")", ":", "repeat", "=", "stk", "[", "-", "1", "]", "if", "stk", "else", "1", "for", "_", "in", "xrange", "(", "repeat", ")", ":", "ret", ".", "append", "(", "s", "[", "i", "]", ")", "i", "+=", "1", "elif", "s", "[", "i", "]", "==", "']'", ":", "stk", ".", "pop", "(", ")", "i", "+=", "1", "return", "''", ".", "join", "(", "ret", ")" ]
https://github.com/algorhythms/LeetCode/blob/3fb14aeea62a960442e47dfde9f964c7ffce32be/394 Decode String.py#L88-L111
giantbranch/python-hacker-code
addbc8c73e7e6fb9e4fcadcec022fa1d3da4b96d
我手敲的代码(中文注释)/chapter11/volatility-2.3/volatility/commands.py
python
Command.table_header
(self, outfd, title_format_list = None)
Table header renders the title row of a table This also stores the header types to ensure everything is formatted appropriately. It must be a list of tuples rather than a dict for ordering purposes.
Table header renders the title row of a table
[ "Table", "header", "renders", "the", "title", "row", "of", "a", "table" ]
def table_header(self, outfd, title_format_list = None): """Table header renders the title row of a table This also stores the header types to ensure everything is formatted appropriately. It must be a list of tuples rather than a dict for ordering purposes. """ titles = [] rules = [] self._formatlist = [] profile = addrspace.BufferAddressSpace(self._config).profile for (k, v) in title_format_list: spec = fmtspec.FormatSpec(self._formatlookup(profile, v)) # If spec.minwidth = -1, this field is unbounded length if spec.minwidth != -1: spec.minwidth = max(spec.minwidth, len(k)) # Get the title specification to follow the alignment of the field titlespec = fmtspec.FormatSpec(formtype = 's', minwidth = max(spec.minwidth, len(k))) titlespec.align = spec.align if spec.align in "<>^" else "<" # Add this to the titles, rules, and formatspecs lists titles.append(("{0:" + titlespec.to_string() + "}").format(k)) rules.append("-" * titlespec.minwidth) self._formatlist.append(spec) # Write out the titles and line rules if outfd: outfd.write(self.tablesep.join(titles) + "\n") outfd.write(self.tablesep.join(rules) + "\n")
[ "def", "table_header", "(", "self", ",", "outfd", ",", "title_format_list", "=", "None", ")", ":", "titles", "=", "[", "]", "rules", "=", "[", "]", "self", ".", "_formatlist", "=", "[", "]", "profile", "=", "addrspace", ".", "BufferAddressSpace", "(", "self", ".", "_config", ")", ".", "profile", "for", "(", "k", ",", "v", ")", "in", "title_format_list", ":", "spec", "=", "fmtspec", ".", "FormatSpec", "(", "self", ".", "_formatlookup", "(", "profile", ",", "v", ")", ")", "# If spec.minwidth = -1, this field is unbounded length", "if", "spec", ".", "minwidth", "!=", "-", "1", ":", "spec", ".", "minwidth", "=", "max", "(", "spec", ".", "minwidth", ",", "len", "(", "k", ")", ")", "# Get the title specification to follow the alignment of the field", "titlespec", "=", "fmtspec", ".", "FormatSpec", "(", "formtype", "=", "'s'", ",", "minwidth", "=", "max", "(", "spec", ".", "minwidth", ",", "len", "(", "k", ")", ")", ")", "titlespec", ".", "align", "=", "spec", ".", "align", "if", "spec", ".", "align", "in", "\"<>^\"", "else", "\"<\"", "# Add this to the titles, rules, and formatspecs lists", "titles", ".", "append", "(", "(", "\"{0:\"", "+", "titlespec", ".", "to_string", "(", ")", "+", "\"}\"", ")", ".", "format", "(", "k", ")", ")", "rules", ".", "append", "(", "\"-\"", "*", "titlespec", ".", "minwidth", ")", "self", ".", "_formatlist", ".", "append", "(", "spec", ")", "# Write out the titles and line rules", "if", "outfd", ":", "outfd", ".", "write", "(", "self", ".", "tablesep", ".", "join", "(", "titles", ")", "+", "\"\\n\"", ")", "outfd", ".", "write", "(", "self", ".", "tablesep", ".", "join", "(", "rules", ")", "+", "\"\\n\"", ")" ]
https://github.com/giantbranch/python-hacker-code/blob/addbc8c73e7e6fb9e4fcadcec022fa1d3da4b96d/我手敲的代码(中文注释)/chapter11/volatility-2.3/volatility/commands.py#L164-L194
peter-u-diehl/stdp-mnist
d527ca3ee579d4f156d25ff160c0551a5ab82cf1
Diehl&Cook_MNIST_random_conn_generator.py
python
create_weights
()
[]
def create_weights(): nInput = 784 nE = 400 nI = nE dataPath = './random/' weight = {} weight['ee_input'] = 0.3 weight['ei_input'] = 0.2 weight['ee'] = 0.1 weight['ei'] = 10.4 weight['ie'] = 17.0 weight['ii'] = 0.4 pConn = {} pConn['ee_input'] = 1.0 pConn['ei_input'] = 0.1 pConn['ee'] = 1.0 pConn['ei'] = 0.0025 pConn['ie'] = 0.9 pConn['ii'] = 0.1 print 'create random connection matrices' connNameList = ['XeAe'] for name in connNameList: weightMatrix = np.random.random((nInput, nE)) + 0.01 weightMatrix *= weight['ee_input'] if pConn['ee_input'] < 1.0: weightMatrix, weightList = sparsenMatrix(weightMatrix, pConn['ee_input']) else: weightList = [(i, j, weightMatrix[i,j]) for j in xrange(nE) for i in xrange(nInput)] np.save(dataPath+name, weightList) print 'create connection matrices from E->I which are purely random' connNameList = ['XeAi'] for name in connNameList: weightMatrix = np.random.random((nInput, nI)) weightMatrix *= weight['ei_input'] weightMatrix, weightList = sparsenMatrix(weightMatrix, pConn['ei_input']) print 'save connection matrix', name np.save(dataPath+name, weightList) print 'create connection matrices from E->I which are purely random' connNameList = ['AeAi'] for name in connNameList: if nE == nI: weightList = [(i, i, weight['ei']) for i in xrange(nE)] else: weightMatrix = np.random.random((nE, nI)) weightMatrix *= weight['ei'] weightMatrix, weightList = sparsenMatrix(weightMatrix, pConn['ei']) print 'save connection matrix', name np.save(dataPath+name, weightList) print 'create connection matrices from I->E which are purely random' connNameList = ['AiAe'] for name in connNameList: if nE == nI: weightMatrix = np.ones((nI, nE)) weightMatrix *= weight['ie'] for i in xrange(nI): weightMatrix[i,i] = 0 weightList = [(i, j, weightMatrix[i,j]) for i in xrange(nI) for j in xrange(nE)] else: weightMatrix = np.random.random((nI, nE)) weightMatrix *= weight['ie'] weightMatrix, weightList = sparsenMatrix(weightMatrix, pConn['ie']) print 'save connection matrix', name np.save(dataPath+name, weightList)
[ "def", "create_weights", "(", ")", ":", "nInput", "=", "784", "nE", "=", "400", "nI", "=", "nE", "dataPath", "=", "'./random/'", "weight", "=", "{", "}", "weight", "[", "'ee_input'", "]", "=", "0.3", "weight", "[", "'ei_input'", "]", "=", "0.2", "weight", "[", "'ee'", "]", "=", "0.1", "weight", "[", "'ei'", "]", "=", "10.4", "weight", "[", "'ie'", "]", "=", "17.0", "weight", "[", "'ii'", "]", "=", "0.4", "pConn", "=", "{", "}", "pConn", "[", "'ee_input'", "]", "=", "1.0", "pConn", "[", "'ei_input'", "]", "=", "0.1", "pConn", "[", "'ee'", "]", "=", "1.0", "pConn", "[", "'ei'", "]", "=", "0.0025", "pConn", "[", "'ie'", "]", "=", "0.9", "pConn", "[", "'ii'", "]", "=", "0.1", "print", "'create random connection matrices'", "connNameList", "=", "[", "'XeAe'", "]", "for", "name", "in", "connNameList", ":", "weightMatrix", "=", "np", ".", "random", ".", "random", "(", "(", "nInput", ",", "nE", ")", ")", "+", "0.01", "weightMatrix", "*=", "weight", "[", "'ee_input'", "]", "if", "pConn", "[", "'ee_input'", "]", "<", "1.0", ":", "weightMatrix", ",", "weightList", "=", "sparsenMatrix", "(", "weightMatrix", ",", "pConn", "[", "'ee_input'", "]", ")", "else", ":", "weightList", "=", "[", "(", "i", ",", "j", ",", "weightMatrix", "[", "i", ",", "j", "]", ")", "for", "j", "in", "xrange", "(", "nE", ")", "for", "i", "in", "xrange", "(", "nInput", ")", "]", "np", ".", "save", "(", "dataPath", "+", "name", ",", "weightList", ")", "print", "'create connection matrices from E->I which are purely random'", "connNameList", "=", "[", "'XeAi'", "]", "for", "name", "in", "connNameList", ":", "weightMatrix", "=", "np", ".", "random", ".", "random", "(", "(", "nInput", ",", "nI", ")", ")", "weightMatrix", "*=", "weight", "[", "'ei_input'", "]", "weightMatrix", ",", "weightList", "=", "sparsenMatrix", "(", "weightMatrix", ",", "pConn", "[", "'ei_input'", "]", ")", "print", "'save connection matrix'", ",", "name", "np", ".", "save", "(", "dataPath", "+", "name", ",", "weightList", ")", "print", "'create connection matrices from E->I which are purely random'", "connNameList", "=", "[", "'AeAi'", "]", "for", "name", "in", "connNameList", ":", "if", "nE", "==", "nI", ":", "weightList", "=", "[", "(", "i", ",", "i", ",", "weight", "[", "'ei'", "]", ")", "for", "i", "in", "xrange", "(", "nE", ")", "]", "else", ":", "weightMatrix", "=", "np", ".", "random", ".", "random", "(", "(", "nE", ",", "nI", ")", ")", "weightMatrix", "*=", "weight", "[", "'ei'", "]", "weightMatrix", ",", "weightList", "=", "sparsenMatrix", "(", "weightMatrix", ",", "pConn", "[", "'ei'", "]", ")", "print", "'save connection matrix'", ",", "name", "np", ".", "save", "(", "dataPath", "+", "name", ",", "weightList", ")", "print", "'create connection matrices from I->E which are purely random'", "connNameList", "=", "[", "'AiAe'", "]", "for", "name", "in", "connNameList", ":", "if", "nE", "==", "nI", ":", "weightMatrix", "=", "np", ".", "ones", "(", "(", "nI", ",", "nE", ")", ")", "weightMatrix", "*=", "weight", "[", "'ie'", "]", "for", "i", "in", "xrange", "(", "nI", ")", ":", "weightMatrix", "[", "i", ",", "i", "]", "=", "0", "weightList", "=", "[", "(", "i", ",", "j", ",", "weightMatrix", "[", "i", ",", "j", "]", ")", "for", "i", "in", "xrange", "(", "nI", ")", "for", "j", "in", "xrange", "(", "nE", ")", "]", "else", ":", "weightMatrix", "=", "np", ".", "random", ".", "random", "(", "(", "nI", ",", "nE", ")", ")", "weightMatrix", "*=", "weight", "[", "'ie'", "]", "weightMatrix", ",", "weightList", "=", "sparsenMatrix", "(", "weightMatrix", ",", "pConn", "[", "'ie'", "]", ")", "print", "'save connection matrix'", ",", "name", "np", ".", "save", "(", "dataPath", "+", "name", ",", "weightList", ")" ]
https://github.com/peter-u-diehl/stdp-mnist/blob/d527ca3ee579d4f156d25ff160c0551a5ab82cf1/Diehl&Cook_MNIST_random_conn_generator.py#L37-L111
matplotlib/trendvis
ec52af70d1ffba62edeeb15f4b6c181307ae2647
trendvis/xgrid_ystack.py
python
XGrid.reverse_xaxis
(self, reverse_x='all', adjust_bar_frame=True)
Reverse all or any x axis. Parameters ---------- reverse_x : string or list of ints Default 'all'. 'all' or list of indices of the x axes to be reversed accepted. If unsure of index for a twin y axis in ``self.axes``, find using ``self.get_twin_rownum()`` adjust_bar_frame : Boolean Default True. Realign ``matplotlib Rectangle patches`` made via ``self.draw_bar`` and ``self.draw_frame``.
Reverse all or any x axis.
[ "Reverse", "all", "or", "any", "x", "axis", "." ]
def reverse_xaxis(self, reverse_x='all', adjust_bar_frame=True): """ Reverse all or any x axis. Parameters ---------- reverse_x : string or list of ints Default 'all'. 'all' or list of indices of the x axes to be reversed accepted. If unsure of index for a twin y axis in ``self.axes``, find using ``self.get_twin_rownum()`` adjust_bar_frame : Boolean Default True. Realign ``matplotlib Rectangle patches`` made via ``self.draw_bar`` and ``self.draw_frame``. """ if reverse_x is 'all': reverse_x = range(0, self.mainax_dim) # Invert x axis of each axis in first row for r in reverse_x: self.axes[0][r].invert_xaxis() if adjust_bar_frame: self.adjust_bar_frame()
[ "def", "reverse_xaxis", "(", "self", ",", "reverse_x", "=", "'all'", ",", "adjust_bar_frame", "=", "True", ")", ":", "if", "reverse_x", "is", "'all'", ":", "reverse_x", "=", "range", "(", "0", ",", "self", ".", "mainax_dim", ")", "# Invert x axis of each axis in first row", "for", "r", "in", "reverse_x", ":", "self", ".", "axes", "[", "0", "]", "[", "r", "]", ".", "invert_xaxis", "(", ")", "if", "adjust_bar_frame", ":", "self", ".", "adjust_bar_frame", "(", ")" ]
https://github.com/matplotlib/trendvis/blob/ec52af70d1ffba62edeeb15f4b6c181307ae2647/trendvis/xgrid_ystack.py#L409-L433
dreadatour/Flake8Lint
8703c5633ac3fab4bd116da82aae7ff7bbc795c0
Flake8Lint.py
python
LintReport.get_gutter_mark
(self)
Return gutter mark icon or empty string if marks are disabled.
Return gutter mark icon or empty string if marks are disabled.
[ "Return", "gutter", "mark", "icon", "or", "empty", "string", "if", "marks", "are", "disabled", "." ]
def get_gutter_mark(self): """Return gutter mark icon or empty string if marks are disabled.""" # ST does not expect platform specific paths here, but only # forward-slash separated paths relative to "Packages" self.gutter_mark_success = '/'.join( [settings.mark_themes_dir, 'success'] ) if int(sublime.version()) >= 3014: self.gutter_mark_success += '.png' self.gutter_mark = '' mark_type = settings.gutter_marks if mark_type in ('dot', 'circle', 'bookmark', 'cross'): self.gutter_mark = mark_type elif mark_type.startswith('theme-'): theme = mark_type[6:] if theme not in ('alpha', 'bright', 'dark', 'hard', 'simple'): log("unknown gutter mark theme: '{0}'".format(mark_type)) return # ST does not expect platform specific paths here, but only # forward-slash separated paths relative to "Packages" self.gutter_mark = '/'.join( [settings.mark_themes_dir, '{0}-{{0}}'.format(theme)] ) if int(sublime.version()) >= 3014: self.gutter_mark += '.png'
[ "def", "get_gutter_mark", "(", "self", ")", ":", "# ST does not expect platform specific paths here, but only", "# forward-slash separated paths relative to \"Packages\"", "self", ".", "gutter_mark_success", "=", "'/'", ".", "join", "(", "[", "settings", ".", "mark_themes_dir", ",", "'success'", "]", ")", "if", "int", "(", "sublime", ".", "version", "(", ")", ")", ">=", "3014", ":", "self", ".", "gutter_mark_success", "+=", "'.png'", "self", ".", "gutter_mark", "=", "''", "mark_type", "=", "settings", ".", "gutter_marks", "if", "mark_type", "in", "(", "'dot'", ",", "'circle'", ",", "'bookmark'", ",", "'cross'", ")", ":", "self", ".", "gutter_mark", "=", "mark_type", "elif", "mark_type", ".", "startswith", "(", "'theme-'", ")", ":", "theme", "=", "mark_type", "[", "6", ":", "]", "if", "theme", "not", "in", "(", "'alpha'", ",", "'bright'", ",", "'dark'", ",", "'hard'", ",", "'simple'", ")", ":", "log", "(", "\"unknown gutter mark theme: '{0}'\"", ".", "format", "(", "mark_type", ")", ")", "return", "# ST does not expect platform specific paths here, but only", "# forward-slash separated paths relative to \"Packages\"", "self", ".", "gutter_mark", "=", "'/'", ".", "join", "(", "[", "settings", ".", "mark_themes_dir", ",", "'{0}-{{0}}'", ".", "format", "(", "theme", ")", "]", ")", "if", "int", "(", "sublime", ".", "version", "(", ")", ")", ">=", "3014", ":", "self", ".", "gutter_mark", "+=", "'.png'" ]
https://github.com/dreadatour/Flake8Lint/blob/8703c5633ac3fab4bd116da82aae7ff7bbc795c0/Flake8Lint.py#L463-L490
omnilib/aiomultiprocess
3daaa8a22e129dec4298b2d3e4ff6fea742a8f3c
aiomultiprocess/core.py
python
Process.terminate
(self)
return self.aio_process.terminate()
Send SIGTERM to child process.
Send SIGTERM to child process.
[ "Send", "SIGTERM", "to", "child", "process", "." ]
def terminate(self) -> None: """Send SIGTERM to child process.""" return self.aio_process.terminate()
[ "def", "terminate", "(", "self", ")", "->", "None", ":", "return", "self", ".", "aio_process", ".", "terminate", "(", ")" ]
https://github.com/omnilib/aiomultiprocess/blob/3daaa8a22e129dec4298b2d3e4ff6fea742a8f3c/aiomultiprocess/core.py#L195-L197
BYU-PCCL/holodeck
521c1c828f47abe7fc24b027fb907eaf26495aed
src/holodeck/packagemanager.py
python
get_scenario
(scenario_name)
return load_scenario_file(config_path)
Gets the scenario configuration associated with the given name Args: scenario_name (:obj:`str`): name of the configuration to load - eg "UrbanCity-Follow" Must be an exact match. Name must be unique among all installed packages Returns: :obj:`dict`: A dictionary containing the configuration file
Gets the scenario configuration associated with the given name
[ "Gets", "the", "scenario", "configuration", "associated", "with", "the", "given", "name" ]
def get_scenario(scenario_name): """Gets the scenario configuration associated with the given name Args: scenario_name (:obj:`str`): name of the configuration to load - eg "UrbanCity-Follow" Must be an exact match. Name must be unique among all installed packages Returns: :obj:`dict`: A dictionary containing the configuration file """ config_path = _find_file_in_worlds_dir(scenario_name + ".json") if config_path == "": raise FileNotFoundError( "The file `{file}.json` could not be found in {path}. " "Make sure the package that contains {file} " "is installed.".format(file=scenario_name, path=util.get_holodeck_path()) ) return load_scenario_file(config_path)
[ "def", "get_scenario", "(", "scenario_name", ")", ":", "config_path", "=", "_find_file_in_worlds_dir", "(", "scenario_name", "+", "\".json\"", ")", "if", "config_path", "==", "\"\"", ":", "raise", "FileNotFoundError", "(", "\"The file `{file}.json` could not be found in {path}. \"", "\"Make sure the package that contains {file} \"", "\"is installed.\"", ".", "format", "(", "file", "=", "scenario_name", ",", "path", "=", "util", ".", "get_holodeck_path", "(", ")", ")", ")", "return", "load_scenario_file", "(", "config_path", ")" ]
https://github.com/BYU-PCCL/holodeck/blob/521c1c828f47abe7fc24b027fb907eaf26495aed/src/holodeck/packagemanager.py#L301-L321
zhaoolee/StarsAndClown
b2d4039cad2f9232b691e5976f787b49a0a2c113
node_modules/npmi/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/input.py
python
GetIncludedBuildFiles
(build_file_path, aux_data, included=None)
return included
Return a list of all build files included into build_file_path. The returned list will contain build_file_path as well as all other files that it included, either directly or indirectly. Note that the list may contain files that were included into a conditional section that evaluated to false and was not merged into build_file_path's dict. aux_data is a dict containing a key for each build file or included build file. Those keys provide access to dicts whose "included" keys contain lists of all other files included by the build file. included should be left at its default None value by external callers. It is used for recursion. The returned list will not contain any duplicate entries. Each build file in the list will be relative to the current directory.
Return a list of all build files included into build_file_path.
[ "Return", "a", "list", "of", "all", "build", "files", "included", "into", "build_file_path", "." ]
def GetIncludedBuildFiles(build_file_path, aux_data, included=None): """Return a list of all build files included into build_file_path. The returned list will contain build_file_path as well as all other files that it included, either directly or indirectly. Note that the list may contain files that were included into a conditional section that evaluated to false and was not merged into build_file_path's dict. aux_data is a dict containing a key for each build file or included build file. Those keys provide access to dicts whose "included" keys contain lists of all other files included by the build file. included should be left at its default None value by external callers. It is used for recursion. The returned list will not contain any duplicate entries. Each build file in the list will be relative to the current directory. """ if included == None: included = [] if build_file_path in included: return included included.append(build_file_path) for included_build_file in aux_data[build_file_path].get('included', []): GetIncludedBuildFiles(included_build_file, aux_data, included) return included
[ "def", "GetIncludedBuildFiles", "(", "build_file_path", ",", "aux_data", ",", "included", "=", "None", ")", ":", "if", "included", "==", "None", ":", "included", "=", "[", "]", "if", "build_file_path", "in", "included", ":", "return", "included", "included", ".", "append", "(", "build_file_path", ")", "for", "included_build_file", "in", "aux_data", "[", "build_file_path", "]", ".", "get", "(", "'included'", ",", "[", "]", ")", ":", "GetIncludedBuildFiles", "(", "included_build_file", ",", "aux_data", ",", "included", ")", "return", "included" ]
https://github.com/zhaoolee/StarsAndClown/blob/b2d4039cad2f9232b691e5976f787b49a0a2c113/node_modules/npmi/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/input.py#L143-L173
golismero/golismero
7d605b937e241f51c1ca4f47b20f755eeefb9d76
thirdparty_libs/openvas_lib/common.py
python
ConnectionManager._get_protocol_version
(self)
Get OMP protocol version. If host is 'dummy', return 'dummy' version. :return: version of protocol :rtype: str :raises: ServerError, RemoteVersionError
Get OMP protocol version. If host is 'dummy', return 'dummy' version.
[ "Get", "OMP", "protocol", "version", ".", "If", "host", "is", "dummy", "return", "dummy", "version", "." ]
def _get_protocol_version(self): """ Get OMP protocol version. If host is 'dummy', return 'dummy' version. :return: version of protocol :rtype: str :raises: ServerError, RemoteVersionError """ if self.__host == "dummy": return "dummy" response = self.make_xml_request('<get_version/>', xml_result=True) v = response.find("version").text if not v: raise RemoteVersionError("Unknown remote server version") else: return v
[ "def", "_get_protocol_version", "(", "self", ")", ":", "if", "self", ".", "__host", "==", "\"dummy\"", ":", "return", "\"dummy\"", "response", "=", "self", ".", "make_xml_request", "(", "'<get_version/>'", ",", "xml_result", "=", "True", ")", "v", "=", "response", ".", "find", "(", "\"version\"", ")", ".", "text", "if", "not", "v", ":", "raise", "RemoteVersionError", "(", "\"Unknown remote server version\"", ")", "else", ":", "return", "v" ]
https://github.com/golismero/golismero/blob/7d605b937e241f51c1ca4f47b20f755eeefb9d76/thirdparty_libs/openvas_lib/common.py#L233-L253
oilshell/oil
94388e7d44a9ad879b12615f6203b38596b5a2d3
opy/compiler2/transformer.py
python
Transformer.atom_lpar
(self, nodelist)
return self.com_node(nodelist[1])
[]
def atom_lpar(self, nodelist): if nodelist[1][0] == token.RPAR: return Tuple((), lineno=nodelist[0][2]) return self.com_node(nodelist[1])
[ "def", "atom_lpar", "(", "self", ",", "nodelist", ")", ":", "if", "nodelist", "[", "1", "]", "[", "0", "]", "==", "token", ".", "RPAR", ":", "return", "Tuple", "(", "(", ")", ",", "lineno", "=", "nodelist", "[", "0", "]", "[", "2", "]", ")", "return", "self", ".", "com_node", "(", "nodelist", "[", "1", "]", ")" ]
https://github.com/oilshell/oil/blob/94388e7d44a9ad879b12615f6203b38596b5a2d3/opy/compiler2/transformer.py#L825-L828
python-discord/bot
26c5587ac13e5414361bb6e7ada42983b81014d2
bot/exts/info/python_news.py
python
PythonNews.start_tasks
(self)
Start the tasks for fetching new PEPs and mailing list messages.
Start the tasks for fetching new PEPs and mailing list messages.
[ "Start", "the", "tasks", "for", "fetching", "new", "PEPs", "and", "mailing", "list", "messages", "." ]
async def start_tasks(self) -> None: """Start the tasks for fetching new PEPs and mailing list messages.""" self.fetch_new_media.start()
[ "async", "def", "start_tasks", "(", "self", ")", "->", "None", ":", "self", ".", "fetch_new_media", ".", "start", "(", ")" ]
https://github.com/python-discord/bot/blob/26c5587ac13e5414361bb6e7ada42983b81014d2/bot/exts/info/python_news.py#L48-L50
pandas-dev/pandas
5ba7d714014ae8feaccc0dd4a98890828cf2832d
pandas/io/clipboard/__init__.py
python
set_clipboard
(clipboard)
Explicitly sets the clipboard mechanism. The "clipboard mechanism" is how the copy() and paste() functions interact with the operating system to implement the copy/paste feature. The clipboard parameter must be one of: - pbcopy - pbobjc (default on Mac OS X) - qt - xclip - xsel - klipper - windows (default on Windows) - no (this is what is set when no clipboard mechanism can be found)
Explicitly sets the clipboard mechanism. The "clipboard mechanism" is how the copy() and paste() functions interact with the operating system to implement the copy/paste feature. The clipboard parameter must be one of: - pbcopy - pbobjc (default on Mac OS X) - qt - xclip - xsel - klipper - windows (default on Windows) - no (this is what is set when no clipboard mechanism can be found)
[ "Explicitly", "sets", "the", "clipboard", "mechanism", ".", "The", "clipboard", "mechanism", "is", "how", "the", "copy", "()", "and", "paste", "()", "functions", "interact", "with", "the", "operating", "system", "to", "implement", "the", "copy", "/", "paste", "feature", ".", "The", "clipboard", "parameter", "must", "be", "one", "of", ":", "-", "pbcopy", "-", "pbobjc", "(", "default", "on", "Mac", "OS", "X", ")", "-", "qt", "-", "xclip", "-", "xsel", "-", "klipper", "-", "windows", "(", "default", "on", "Windows", ")", "-", "no", "(", "this", "is", "what", "is", "set", "when", "no", "clipboard", "mechanism", "can", "be", "found", ")" ]
def set_clipboard(clipboard): """ Explicitly sets the clipboard mechanism. The "clipboard mechanism" is how the copy() and paste() functions interact with the operating system to implement the copy/paste feature. The clipboard parameter must be one of: - pbcopy - pbobjc (default on Mac OS X) - qt - xclip - xsel - klipper - windows (default on Windows) - no (this is what is set when no clipboard mechanism can be found) """ global copy, paste clipboard_types = { "pbcopy": init_osx_pbcopy_clipboard, "pyobjc": init_osx_pyobjc_clipboard, "qt": init_qt_clipboard, # TODO - split this into 'qtpy', 'pyqt4', and 'pyqt5' "xclip": init_xclip_clipboard, "xsel": init_xsel_clipboard, "klipper": init_klipper_clipboard, "windows": init_windows_clipboard, "no": init_no_clipboard, } if clipboard not in clipboard_types: allowed_clipboard_types = [repr(_) for _ in clipboard_types.keys()] raise ValueError( f"Argument must be one of {', '.join(allowed_clipboard_types)}" ) # Sets pyperclip's copy() and paste() functions: copy, paste = clipboard_types[clipboard]()
[ "def", "set_clipboard", "(", "clipboard", ")", ":", "global", "copy", ",", "paste", "clipboard_types", "=", "{", "\"pbcopy\"", ":", "init_osx_pbcopy_clipboard", ",", "\"pyobjc\"", ":", "init_osx_pyobjc_clipboard", ",", "\"qt\"", ":", "init_qt_clipboard", ",", "# TODO - split this into 'qtpy', 'pyqt4', and 'pyqt5'", "\"xclip\"", ":", "init_xclip_clipboard", ",", "\"xsel\"", ":", "init_xsel_clipboard", ",", "\"klipper\"", ":", "init_klipper_clipboard", ",", "\"windows\"", ":", "init_windows_clipboard", ",", "\"no\"", ":", "init_no_clipboard", ",", "}", "if", "clipboard", "not", "in", "clipboard_types", ":", "allowed_clipboard_types", "=", "[", "repr", "(", "_", ")", "for", "_", "in", "clipboard_types", ".", "keys", "(", ")", "]", "raise", "ValueError", "(", "f\"Argument must be one of {', '.join(allowed_clipboard_types)}\"", ")", "# Sets pyperclip's copy() and paste() functions:", "copy", ",", "paste", "=", "clipboard_types", "[", "clipboard", "]", "(", ")" ]
https://github.com/pandas-dev/pandas/blob/5ba7d714014ae8feaccc0dd4a98890828cf2832d/pandas/io/clipboard/__init__.py#L581-L615
facebookresearch/ReAgent
52f666670a7fa03206812ef48949f6b934d400f7
reagent/samplers/frechet.py
python
FrechetSort.log_prob
( self, scores: torch.Tensor, action: torch.Tensor, equiv_len_override: Optional[torch.Tensor] = None, )
return log_prob
What is the probability of a given set of scores producing the given list of permutations only considering the top `equiv_len` ranks? We may want to override the default equiv_len here when we know the having larger action space doesn't matter. i.e. in Reels
What is the probability of a given set of scores producing the given list of permutations only considering the top `equiv_len` ranks?
[ "What", "is", "the", "probability", "of", "a", "given", "set", "of", "scores", "producing", "the", "given", "list", "of", "permutations", "only", "considering", "the", "top", "equiv_len", "ranks?" ]
def log_prob( self, scores: torch.Tensor, action: torch.Tensor, equiv_len_override: Optional[torch.Tensor] = None, ) -> torch.Tensor: """ What is the probability of a given set of scores producing the given list of permutations only considering the top `equiv_len` ranks? We may want to override the default equiv_len here when we know the having larger action space doesn't matter. i.e. in Reels """ upto = self.upto if equiv_len_override is not None: assert equiv_len_override.shape == ( scores.shape[0], ), f"Invalid shape {equiv_len_override.shape}, compared to scores {scores.shape}. equiv_len_override {equiv_len_override}" upto = equiv_len_override.long() if self.topk is not None and torch.any(equiv_len_override > self.topk): raise ValueError( f"Override {equiv_len_override} cannot exceed topk={self.topk}." ) squeeze = False if len(scores.shape) == 1: squeeze = True scores = scores.unsqueeze(0) action = action.unsqueeze(0) assert len(action.shape) == len(scores.shape) == 2, "scores should be batch" if action.shape[1] > scores.shape[1]: raise ValueError( f"action cardinality ({action.shape[1]}) is larger than the number of scores ({scores.shape[1]})" ) elif action.shape[1] < scores.shape[1]: raise NotImplementedError( f"This semantic is ambiguous. If you have shorter slate, pad it with scores.shape[1] ({scores.shape[1]})" ) log_scores = scores if self.log_scores else torch.log(scores) n = log_scores.shape[-1] # Add scores for the padding value log_scores = torch.cat( [ log_scores, torch.full( (log_scores.shape[0], 1), -math.inf, device=log_scores.device ), ], dim=1, ) log_scores = torch.gather(log_scores, 1, action) * self.shape p = upto if upto is not None else n # We should unsqueeze here if isinstance(p, int): log_prob = sum( torch.nan_to_num( F.log_softmax(log_scores[:, i:], dim=1)[:, 0], neginf=0.0 ) for i in range(p) ) elif isinstance(p, torch.Tensor): # do masked sum log_prob = sum( torch.nan_to_num( F.log_softmax(log_scores[:, i:], dim=1)[:, 0], neginf=0.0 ) * (i < p).float() for i in range(n) ) else: raise RuntimeError(f"p is {p}") assert not torch.any(log_prob.isnan()), f"Nan in {log_prob}" return log_prob
[ "def", "log_prob", "(", "self", ",", "scores", ":", "torch", ".", "Tensor", ",", "action", ":", "torch", ".", "Tensor", ",", "equiv_len_override", ":", "Optional", "[", "torch", ".", "Tensor", "]", "=", "None", ",", ")", "->", "torch", ".", "Tensor", ":", "upto", "=", "self", ".", "upto", "if", "equiv_len_override", "is", "not", "None", ":", "assert", "equiv_len_override", ".", "shape", "==", "(", "scores", ".", "shape", "[", "0", "]", ",", ")", ",", "f\"Invalid shape {equiv_len_override.shape}, compared to scores {scores.shape}. equiv_len_override {equiv_len_override}\"", "upto", "=", "equiv_len_override", ".", "long", "(", ")", "if", "self", ".", "topk", "is", "not", "None", "and", "torch", ".", "any", "(", "equiv_len_override", ">", "self", ".", "topk", ")", ":", "raise", "ValueError", "(", "f\"Override {equiv_len_override} cannot exceed topk={self.topk}.\"", ")", "squeeze", "=", "False", "if", "len", "(", "scores", ".", "shape", ")", "==", "1", ":", "squeeze", "=", "True", "scores", "=", "scores", ".", "unsqueeze", "(", "0", ")", "action", "=", "action", ".", "unsqueeze", "(", "0", ")", "assert", "len", "(", "action", ".", "shape", ")", "==", "len", "(", "scores", ".", "shape", ")", "==", "2", ",", "\"scores should be batch\"", "if", "action", ".", "shape", "[", "1", "]", ">", "scores", ".", "shape", "[", "1", "]", ":", "raise", "ValueError", "(", "f\"action cardinality ({action.shape[1]}) is larger than the number of scores ({scores.shape[1]})\"", ")", "elif", "action", ".", "shape", "[", "1", "]", "<", "scores", ".", "shape", "[", "1", "]", ":", "raise", "NotImplementedError", "(", "f\"This semantic is ambiguous. If you have shorter slate, pad it with scores.shape[1] ({scores.shape[1]})\"", ")", "log_scores", "=", "scores", "if", "self", ".", "log_scores", "else", "torch", ".", "log", "(", "scores", ")", "n", "=", "log_scores", ".", "shape", "[", "-", "1", "]", "# Add scores for the padding value", "log_scores", "=", "torch", ".", "cat", "(", "[", "log_scores", ",", "torch", ".", "full", "(", "(", "log_scores", ".", "shape", "[", "0", "]", ",", "1", ")", ",", "-", "math", ".", "inf", ",", "device", "=", "log_scores", ".", "device", ")", ",", "]", ",", "dim", "=", "1", ",", ")", "log_scores", "=", "torch", ".", "gather", "(", "log_scores", ",", "1", ",", "action", ")", "*", "self", ".", "shape", "p", "=", "upto", "if", "upto", "is", "not", "None", "else", "n", "# We should unsqueeze here", "if", "isinstance", "(", "p", ",", "int", ")", ":", "log_prob", "=", "sum", "(", "torch", ".", "nan_to_num", "(", "F", ".", "log_softmax", "(", "log_scores", "[", ":", ",", "i", ":", "]", ",", "dim", "=", "1", ")", "[", ":", ",", "0", "]", ",", "neginf", "=", "0.0", ")", "for", "i", "in", "range", "(", "p", ")", ")", "elif", "isinstance", "(", "p", ",", "torch", ".", "Tensor", ")", ":", "# do masked sum", "log_prob", "=", "sum", "(", "torch", ".", "nan_to_num", "(", "F", ".", "log_softmax", "(", "log_scores", "[", ":", ",", "i", ":", "]", ",", "dim", "=", "1", ")", "[", ":", ",", "0", "]", ",", "neginf", "=", "0.0", ")", "*", "(", "i", "<", "p", ")", ".", "float", "(", ")", "for", "i", "in", "range", "(", "n", ")", ")", "else", ":", "raise", "RuntimeError", "(", "f\"p is {p}\"", ")", "assert", "not", "torch", ".", "any", "(", "log_prob", ".", "isnan", "(", ")", ")", ",", "f\"Nan in {log_prob}\"", "return", "log_prob" ]
https://github.com/facebookresearch/ReAgent/blob/52f666670a7fa03206812ef48949f6b934d400f7/reagent/samplers/frechet.py#L85-L161
IronLanguages/main
a949455434b1fda8c783289e897e78a9a0caabb5
External.LCA_RESTRICTED/Languages/IronPython/27/Lib/site-packages/win32/scripts/regsetup.py
python
FindRegisterPackage
(packageName, knownFile, searchPaths, registryAppName = None)
Find and Register a package. Assumes the core registry setup correctly. In addition, if the location located by the package is already in the **core** path, then an entry is registered, but no path. (no other paths are checked, as the application whose path was used may later be uninstalled. This should not happen with the core)
Find and Register a package.
[ "Find", "and", "Register", "a", "package", "." ]
def FindRegisterPackage(packageName, knownFile, searchPaths, registryAppName = None): """Find and Register a package. Assumes the core registry setup correctly. In addition, if the location located by the package is already in the **core** path, then an entry is registered, but no path. (no other paths are checked, as the application whose path was used may later be uninstalled. This should not happen with the core) """ import regutil, string if not packageName: raise error("A package name must be supplied") corePaths = regutil.GetRegisteredNamedPath(None).split(";") if not searchPaths: searchPaths = corePaths registryAppName = registryAppName or packageName try: pathLook, pathAdd = FindPackagePath(packageName, knownFile, searchPaths) if pathAdd is not None: if pathAdd in corePaths: pathAdd = "" regutil.RegisterNamedPath(registryAppName, pathAdd) return pathLook except error, details: print "*** The %s package could not be registered - %s" % (packageName, details) print "*** Please ensure you have passed the correct paths on the command line." print "*** - For packages, you should pass a path to the packages parent directory," print "*** - and not the package directory itself..."
[ "def", "FindRegisterPackage", "(", "packageName", ",", "knownFile", ",", "searchPaths", ",", "registryAppName", "=", "None", ")", ":", "import", "regutil", ",", "string", "if", "not", "packageName", ":", "raise", "error", "(", "\"A package name must be supplied\"", ")", "corePaths", "=", "regutil", ".", "GetRegisteredNamedPath", "(", "None", ")", ".", "split", "(", "\";\"", ")", "if", "not", "searchPaths", ":", "searchPaths", "=", "corePaths", "registryAppName", "=", "registryAppName", "or", "packageName", "try", ":", "pathLook", ",", "pathAdd", "=", "FindPackagePath", "(", "packageName", ",", "knownFile", ",", "searchPaths", ")", "if", "pathAdd", "is", "not", "None", ":", "if", "pathAdd", "in", "corePaths", ":", "pathAdd", "=", "\"\"", "regutil", ".", "RegisterNamedPath", "(", "registryAppName", ",", "pathAdd", ")", "return", "pathLook", "except", "error", ",", "details", ":", "print", "\"*** The %s package could not be registered - %s\"", "%", "(", "packageName", ",", "details", ")", "print", "\"*** Please ensure you have passed the correct paths on the command line.\"", "print", "\"*** - For packages, you should pass a path to the packages parent directory,\"", "print", "\"*** - and not the package directory itself...\"" ]
https://github.com/IronLanguages/main/blob/a949455434b1fda8c783289e897e78a9a0caabb5/External.LCA_RESTRICTED/Languages/IronPython/27/Lib/site-packages/win32/scripts/regsetup.py#L236-L262
Azure/azure-devops-cli-extension
11334cd55806bef0b99c3bee5a438eed71e44037
azure-devops/azext_devops/devops_sdk/v5_1/core/core_client.py
python
CoreClient.get_project_properties
(self, project_id, keys=None)
return self._deserialize('[ProjectProperty]', self._unwrap_collection(response))
GetProjectProperties. [Preview API] Get a collection of team project properties. :param str project_id: The team project ID. :param [str] keys: A comma-delimited string of team project property names. Wildcard characters ("?" and "*") are supported. If no key is specified, all properties will be returned. :rtype: [ProjectProperty]
GetProjectProperties. [Preview API] Get a collection of team project properties. :param str project_id: The team project ID. :param [str] keys: A comma-delimited string of team project property names. Wildcard characters ("?" and "*") are supported. If no key is specified, all properties will be returned. :rtype: [ProjectProperty]
[ "GetProjectProperties", ".", "[", "Preview", "API", "]", "Get", "a", "collection", "of", "team", "project", "properties", ".", ":", "param", "str", "project_id", ":", "The", "team", "project", "ID", ".", ":", "param", "[", "str", "]", "keys", ":", "A", "comma", "-", "delimited", "string", "of", "team", "project", "property", "names", ".", "Wildcard", "characters", "(", "?", "and", "*", ")", "are", "supported", ".", "If", "no", "key", "is", "specified", "all", "properties", "will", "be", "returned", ".", ":", "rtype", ":", "[", "ProjectProperty", "]" ]
def get_project_properties(self, project_id, keys=None): """GetProjectProperties. [Preview API] Get a collection of team project properties. :param str project_id: The team project ID. :param [str] keys: A comma-delimited string of team project property names. Wildcard characters ("?" and "*") are supported. If no key is specified, all properties will be returned. :rtype: [ProjectProperty] """ route_values = {} if project_id is not None: route_values['projectId'] = self._serialize.url('project_id', project_id, 'str') query_parameters = {} if keys is not None: keys = ",".join(keys) query_parameters['keys'] = self._serialize.query('keys', keys, 'str') response = self._send(http_method='GET', location_id='4976a71a-4487-49aa-8aab-a1eda469037a', version='5.1-preview.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[ProjectProperty]', self._unwrap_collection(response))
[ "def", "get_project_properties", "(", "self", ",", "project_id", ",", "keys", "=", "None", ")", ":", "route_values", "=", "{", "}", "if", "project_id", "is", "not", "None", ":", "route_values", "[", "'projectId'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'project_id'", ",", "project_id", ",", "'str'", ")", "query_parameters", "=", "{", "}", "if", "keys", "is", "not", "None", ":", "keys", "=", "\",\"", ".", "join", "(", "keys", ")", "query_parameters", "[", "'keys'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'keys'", ",", "keys", ",", "'str'", ")", "response", "=", "self", ".", "_send", "(", "http_method", "=", "'GET'", ",", "location_id", "=", "'4976a71a-4487-49aa-8aab-a1eda469037a'", ",", "version", "=", "'5.1-preview.1'", ",", "route_values", "=", "route_values", ",", "query_parameters", "=", "query_parameters", ")", "return", "self", ".", "_deserialize", "(", "'[ProjectProperty]'", ",", "self", ".", "_unwrap_collection", "(", "response", ")", ")" ]
https://github.com/Azure/azure-devops-cli-extension/blob/11334cd55806bef0b99c3bee5a438eed71e44037/azure-devops/azext_devops/devops_sdk/v5_1/core/core_client.py#L308-L327
pyglet/pyglet
2833c1df902ca81aeeffa786c12e7e87d402434b
pyglet/canvas/base.py
python
Canvas.__init__
(self, display)
:parameters: `display` : `Display` :attr:`display`
:parameters: `display` : `Display` :attr:`display`
[ ":", "parameters", ":", "display", ":", "Display", ":", "attr", ":", "display" ]
def __init__(self, display): """ :parameters: `display` : `Display` :attr:`display` """ self.display = display """Display this canvas was created on."""
[ "def", "__init__", "(", "self", ",", "display", ")", ":", "self", ".", "display", "=", "display", "\"\"\"Display this canvas was created on.\"\"\"" ]
https://github.com/pyglet/pyglet/blob/2833c1df902ca81aeeffa786c12e7e87d402434b/pyglet/canvas/base.py#L357-L366
gkrizek/bash-lambda-layer
703b0ade8174022d44779d823172ab7ac33a5505
bin/botocore/httpsession.py
python
ProxyConfiguration.proxy_url_for
(self, url)
return proxy
Retrirves the corresponding proxy url for a given url.
Retrirves the corresponding proxy url for a given url.
[ "Retrirves", "the", "corresponding", "proxy", "url", "for", "a", "given", "url", "." ]
def proxy_url_for(self, url): """Retrirves the corresponding proxy url for a given url. """ parsed_url = urlparse(url) proxy = self._proxies.get(parsed_url.scheme) if proxy: proxy = self._fix_proxy_url(proxy) return proxy
[ "def", "proxy_url_for", "(", "self", ",", "url", ")", ":", "parsed_url", "=", "urlparse", "(", "url", ")", "proxy", "=", "self", ".", "_proxies", ".", "get", "(", "parsed_url", ".", "scheme", ")", "if", "proxy", ":", "proxy", "=", "self", ".", "_fix_proxy_url", "(", "proxy", ")", "return", "proxy" ]
https://github.com/gkrizek/bash-lambda-layer/blob/703b0ade8174022d44779d823172ab7ac33a5505/bin/botocore/httpsession.py#L98-L104
Abjad/abjad
d0646dfbe83db3dc5ab268f76a0950712b87b7fd
abjad/contextmanagers.py
python
RedirectedStreams.__enter__
(self)
return self
Enters redirected streams context manager. Returns none.
Enters redirected streams context manager.
[ "Enters", "redirected", "streams", "context", "manager", "." ]
def __enter__(self): """ Enters redirected streams context manager. Returns none. """ self._old_stdout, self._old_stderr = sys.stdout, sys.stderr self._old_stdout.flush() self._old_stderr.flush() sys.stdout, sys.stderr = self._stdout, self._stderr return self
[ "def", "__enter__", "(", "self", ")", ":", "self", ".", "_old_stdout", ",", "self", ".", "_old_stderr", "=", "sys", ".", "stdout", ",", "sys", ".", "stderr", "self", ".", "_old_stdout", ".", "flush", "(", ")", "self", ".", "_old_stderr", ".", "flush", "(", ")", "sys", ".", "stdout", ",", "sys", ".", "stderr", "=", "self", ".", "_stdout", ",", "self", ".", "_stderr", "return", "self" ]
https://github.com/Abjad/abjad/blob/d0646dfbe83db3dc5ab268f76a0950712b87b7fd/abjad/contextmanagers.py#L472-L482
rail-berkeley/rlkit
c81509d982b4d52a6239e7bfe7d2540e3d3cd986
rlkit/torch/smac/smac.py
python
SmacTrainer.train_from_torch
(self, batch)
Policy and Alpha Loss
Policy and Alpha Loss
[ "Policy", "and", "Alpha", "Loss" ]
def train_from_torch(self, batch): rewards = batch['rewards'] terminals = batch['terminals'] obs = batch['observations'] actions = batch['actions'] next_obs = batch['next_observations'] context = batch['context'] if self.reward_transform: rewards = self.reward_transform(rewards) if self.terminal_transform: terminals = self.terminal_transform(terminals) """ Policy and Alpha Loss """ dist, p_z, task_z_with_grad = self.agent( obs, context, return_latent_posterior_and_task_z=True, ) task_z_detached = task_z_with_grad.detach() new_obs_actions, log_pi = dist.rsample_and_logprob() log_pi = log_pi.unsqueeze(1) next_dist = self.agent(next_obs, context) if self._debug_ignore_context: task_z_with_grad = task_z_with_grad * 0 # flattens out the task dimension t, b, _ = obs.size() obs = obs.view(t * b, -1) actions = actions.view(t * b, -1) next_obs = next_obs.view(t * b, -1) unscaled_rewards_flat = rewards.view(t * b, 1) rewards_flat = unscaled_rewards_flat * self.reward_scale terms_flat = terminals.view(t * b, 1) if self.use_automatic_entropy_tuning: alpha_loss = -(self.log_alpha * ( log_pi + self.target_entropy).detach()).mean() self.alpha_optimizer.zero_grad() alpha_loss.backward() self.alpha_optimizer.step() alpha = self.log_alpha.exp() else: alpha_loss = 0 alpha = self.alpha """ QF Loss """ if self.backprop_q_loss_into_encoder: q1_pred = self.qf1(obs, actions, task_z_with_grad) q2_pred = self.qf2(obs, actions, task_z_with_grad) else: q1_pred = self.qf1(obs, actions, task_z_detached) q2_pred = self.qf2(obs, actions, task_z_detached) # Make sure policy accounts for squashing functions like tanh correctly! new_next_actions, new_log_pi = next_dist.rsample_and_logprob() new_log_pi = new_log_pi.unsqueeze(1) with torch.no_grad(): target_q_values = torch.min( self.target_qf1(next_obs, new_next_actions, task_z_detached), self.target_qf2(next_obs, new_next_actions, task_z_detached), ) - alpha * new_log_pi q_target = rewards_flat + ( 1. - terms_flat) * self.discount * target_q_values qf1_loss = self.qf_criterion(q1_pred, q_target.detach()) qf2_loss = self.qf_criterion(q2_pred, q_target.detach()) """ Context Encoder Loss """ if self._debug_use_ground_truth_context: kl_div = kl_loss = ptu.zeros(0) else: kl_div = kl_divergence(p_z, self.agent.latent_prior).mean(dim=0).sum() kl_loss = self.kl_lambda * kl_div if self.train_context_decoder: # TODO: change to use a distribution reward_pred = self.context_decoder(obs, actions, task_z_with_grad) reward_prediction_loss = ((reward_pred - unscaled_rewards_flat)**2).mean() context_loss = kl_loss + reward_prediction_loss else: context_loss = kl_loss reward_prediction_loss = ptu.zeros(1) """ Policy Loss """ qf1_new_actions = self.qf1(obs, new_obs_actions, task_z_detached) qf2_new_actions = self.qf2(obs, new_obs_actions, task_z_detached) q_new_actions = torch.min( qf1_new_actions, qf2_new_actions, ) # Advantage-weighted regression if self.vf_K > 1: vs = [] for i in range(self.vf_K): u = dist.sample() q1 = self.qf1(obs, u, task_z_detached) q2 = self.qf2(obs, u, task_z_detached) v = torch.min(q1, q2) # v = q1 vs.append(v) v_pi = torch.cat(vs, 1).mean(dim=1) else: # v_pi = self.qf1(obs, new_obs_actions) v1_pi = self.qf1(obs, new_obs_actions, task_z_detached) v2_pi = self.qf2(obs, new_obs_actions, task_z_detached) v_pi = torch.min(v1_pi, v2_pi) u = actions if self.awr_min_q: q_adv = torch.min(q1_pred, q2_pred) else: q_adv = q1_pred policy_logpp = dist.log_prob(u) if self.use_automatic_beta_tuning: buffer_dist = self.buffer_policy(obs) beta = self.log_beta.exp() kldiv = torch.distributions.kl.kl_divergence(dist, buffer_dist) beta_loss = -1 * ( beta * (kldiv - self.beta_epsilon).detach()).mean() self.beta_optimizer.zero_grad() beta_loss.backward() self.beta_optimizer.step() else: beta = self.beta_schedule.get_value(self._n_train_steps_total) beta_loss = ptu.zeros(1) score = q_adv - v_pi if self.mask_positive_advantage: score = torch.sign(score) if self.clip_score is not None: score = torch.clamp(score, max=self.clip_score) weights = batch.get('weights', None) if self.weight_loss and weights is None: if self.normalize_over_batch == True: weights = F.softmax(score / beta, dim=0) elif self.normalize_over_batch == "whiten": adv_mean = torch.mean(score) adv_std = torch.std(score) + 1e-5 normalized_score = (score - adv_mean) / adv_std weights = torch.exp(normalized_score / beta) elif self.normalize_over_batch == "exp": weights = torch.exp(score / beta) elif self.normalize_over_batch == "step_fn": weights = (score > 0).float() elif self.normalize_over_batch == False: weights = score elif self.normalize_over_batch == 'uniform': weights = F.softmax(ptu.ones_like(score) / beta, dim=0) else: raise ValueError(self.normalize_over_batch) weights = weights[:, 0] policy_loss = alpha * log_pi.mean() if self.use_awr_update and self.weight_loss: policy_loss = policy_loss + self.awr_weight * ( -policy_logpp * len(weights) * weights.detach()).mean() elif self.use_awr_update: policy_loss = policy_loss + self.awr_weight * (-policy_logpp).mean() if self.use_reparam_update: policy_loss = policy_loss + self.train_reparam_weight * ( -q_new_actions).mean() policy_loss = self.rl_weight * policy_loss """ Update networks """ if self._n_train_steps_total % self.q_update_period == 0: if self.train_encoder_decoder: self.context_optimizer.zero_grad() if self.train_agent: self.qf1_optimizer.zero_grad() self.qf2_optimizer.zero_grad() context_loss.backward(retain_graph=True) # retain graph because the encoder is trained by both QF losses qf1_loss.backward(retain_graph=True) qf2_loss.backward() if self.train_agent: self.qf1_optimizer.step() self.qf2_optimizer.step() if self.train_encoder_decoder: self.context_optimizer.step() if self.train_agent: if self._n_train_steps_total % self.policy_update_period == 0 and self.update_policy: self.policy_optimizer.zero_grad() policy_loss.backward() self.policy_optimizer.step() self._num_gradient_steps += 1 """ Soft Updates """ if self._n_train_steps_total % self.target_update_period == 0: ptu.soft_update_from_to( self.qf1, self.target_qf1, self.soft_target_tau ) ptu.soft_update_from_to( self.qf2, self.target_qf2, self.soft_target_tau ) """ Save some statistics for eval """ if self._need_to_update_eval_statistics: self._need_to_update_eval_statistics = False """ Eval should set this to None. This way, these statistics are only computed for one batch. """ policy_loss = (log_pi - q_new_actions).mean() self.eval_statistics['QF1 Loss'] = np.mean(ptu.get_numpy(qf1_loss)) self.eval_statistics['QF2 Loss'] = np.mean(ptu.get_numpy(qf2_loss)) self.eval_statistics['Policy Loss'] = np.mean(ptu.get_numpy( policy_loss )) self.eval_statistics.update(create_stats_ordered_dict( 'Q1 Predictions', ptu.get_numpy(q1_pred), )) self.eval_statistics.update(create_stats_ordered_dict( 'Q2 Predictions', ptu.get_numpy(q2_pred), )) self.eval_statistics.update(create_stats_ordered_dict( 'Q Targets', ptu.get_numpy(q_target), )) self.eval_statistics['task_embedding/kl_divergence'] = ( ptu.get_numpy(kl_div) ) self.eval_statistics['task_embedding/kl_loss'] = ( ptu.get_numpy(kl_loss) ) self.eval_statistics['task_embedding/reward_prediction_loss'] = ( ptu.get_numpy(reward_prediction_loss) ) self.eval_statistics['task_embedding/context_loss'] = ( ptu.get_numpy(context_loss) ) self.eval_statistics.update(create_stats_ordered_dict( 'Log Pis', ptu.get_numpy(log_pi), )) self.eval_statistics.update(create_stats_ordered_dict( 'rewards', ptu.get_numpy(rewards), )) self.eval_statistics.update(create_stats_ordered_dict( 'terminals', ptu.get_numpy(terminals), )) policy_statistics = add_prefix(dist.get_diagnostics(), "policy/") self.eval_statistics.update(policy_statistics) self.eval_statistics.update(create_stats_ordered_dict( 'Advantage Weights', ptu.get_numpy(weights), )) self.eval_statistics.update(create_stats_ordered_dict( 'Advantage Score', ptu.get_numpy(score), )) self.eval_statistics['reparam_weight'] = self.train_reparam_weight self.eval_statistics['num_gradient_steps'] = ( self._num_gradient_steps ) if self.use_automatic_entropy_tuning: self.eval_statistics['Alpha'] = alpha.item() self.eval_statistics['Alpha Loss'] = alpha_loss.item() if self.use_automatic_beta_tuning: self.eval_statistics.update({ "adaptive_beta/beta": ptu.get_numpy(beta.mean()), "adaptive_beta/beta loss": ptu.get_numpy(beta_loss.mean()), }) self._n_train_steps_total += 1
[ "def", "train_from_torch", "(", "self", ",", "batch", ")", ":", "rewards", "=", "batch", "[", "'rewards'", "]", "terminals", "=", "batch", "[", "'terminals'", "]", "obs", "=", "batch", "[", "'observations'", "]", "actions", "=", "batch", "[", "'actions'", "]", "next_obs", "=", "batch", "[", "'next_observations'", "]", "context", "=", "batch", "[", "'context'", "]", "if", "self", ".", "reward_transform", ":", "rewards", "=", "self", ".", "reward_transform", "(", "rewards", ")", "if", "self", ".", "terminal_transform", ":", "terminals", "=", "self", ".", "terminal_transform", "(", "terminals", ")", "dist", ",", "p_z", ",", "task_z_with_grad", "=", "self", ".", "agent", "(", "obs", ",", "context", ",", "return_latent_posterior_and_task_z", "=", "True", ",", ")", "task_z_detached", "=", "task_z_with_grad", ".", "detach", "(", ")", "new_obs_actions", ",", "log_pi", "=", "dist", ".", "rsample_and_logprob", "(", ")", "log_pi", "=", "log_pi", ".", "unsqueeze", "(", "1", ")", "next_dist", "=", "self", ".", "agent", "(", "next_obs", ",", "context", ")", "if", "self", ".", "_debug_ignore_context", ":", "task_z_with_grad", "=", "task_z_with_grad", "*", "0", "# flattens out the task dimension", "t", ",", "b", ",", "_", "=", "obs", ".", "size", "(", ")", "obs", "=", "obs", ".", "view", "(", "t", "*", "b", ",", "-", "1", ")", "actions", "=", "actions", ".", "view", "(", "t", "*", "b", ",", "-", "1", ")", "next_obs", "=", "next_obs", ".", "view", "(", "t", "*", "b", ",", "-", "1", ")", "unscaled_rewards_flat", "=", "rewards", ".", "view", "(", "t", "*", "b", ",", "1", ")", "rewards_flat", "=", "unscaled_rewards_flat", "*", "self", ".", "reward_scale", "terms_flat", "=", "terminals", ".", "view", "(", "t", "*", "b", ",", "1", ")", "if", "self", ".", "use_automatic_entropy_tuning", ":", "alpha_loss", "=", "-", "(", "self", ".", "log_alpha", "*", "(", "log_pi", "+", "self", ".", "target_entropy", ")", ".", "detach", "(", ")", ")", ".", "mean", "(", ")", "self", ".", "alpha_optimizer", ".", "zero_grad", "(", ")", "alpha_loss", ".", "backward", "(", ")", "self", ".", "alpha_optimizer", ".", "step", "(", ")", "alpha", "=", "self", ".", "log_alpha", ".", "exp", "(", ")", "else", ":", "alpha_loss", "=", "0", "alpha", "=", "self", ".", "alpha", "\"\"\"\n QF Loss\n \"\"\"", "if", "self", ".", "backprop_q_loss_into_encoder", ":", "q1_pred", "=", "self", ".", "qf1", "(", "obs", ",", "actions", ",", "task_z_with_grad", ")", "q2_pred", "=", "self", ".", "qf2", "(", "obs", ",", "actions", ",", "task_z_with_grad", ")", "else", ":", "q1_pred", "=", "self", ".", "qf1", "(", "obs", ",", "actions", ",", "task_z_detached", ")", "q2_pred", "=", "self", ".", "qf2", "(", "obs", ",", "actions", ",", "task_z_detached", ")", "# Make sure policy accounts for squashing functions like tanh correctly!", "new_next_actions", ",", "new_log_pi", "=", "next_dist", ".", "rsample_and_logprob", "(", ")", "new_log_pi", "=", "new_log_pi", ".", "unsqueeze", "(", "1", ")", "with", "torch", ".", "no_grad", "(", ")", ":", "target_q_values", "=", "torch", ".", "min", "(", "self", ".", "target_qf1", "(", "next_obs", ",", "new_next_actions", ",", "task_z_detached", ")", ",", "self", ".", "target_qf2", "(", "next_obs", ",", "new_next_actions", ",", "task_z_detached", ")", ",", ")", "-", "alpha", "*", "new_log_pi", "q_target", "=", "rewards_flat", "+", "(", "1.", "-", "terms_flat", ")", "*", "self", ".", "discount", "*", "target_q_values", "qf1_loss", "=", "self", ".", "qf_criterion", "(", "q1_pred", ",", "q_target", ".", "detach", "(", ")", ")", "qf2_loss", "=", "self", ".", "qf_criterion", "(", "q2_pred", ",", "q_target", ".", "detach", "(", ")", ")", "\"\"\"\n Context Encoder Loss\n \"\"\"", "if", "self", ".", "_debug_use_ground_truth_context", ":", "kl_div", "=", "kl_loss", "=", "ptu", ".", "zeros", "(", "0", ")", "else", ":", "kl_div", "=", "kl_divergence", "(", "p_z", ",", "self", ".", "agent", ".", "latent_prior", ")", ".", "mean", "(", "dim", "=", "0", ")", ".", "sum", "(", ")", "kl_loss", "=", "self", ".", "kl_lambda", "*", "kl_div", "if", "self", ".", "train_context_decoder", ":", "# TODO: change to use a distribution", "reward_pred", "=", "self", ".", "context_decoder", "(", "obs", ",", "actions", ",", "task_z_with_grad", ")", "reward_prediction_loss", "=", "(", "(", "reward_pred", "-", "unscaled_rewards_flat", ")", "**", "2", ")", ".", "mean", "(", ")", "context_loss", "=", "kl_loss", "+", "reward_prediction_loss", "else", ":", "context_loss", "=", "kl_loss", "reward_prediction_loss", "=", "ptu", ".", "zeros", "(", "1", ")", "\"\"\"\n Policy Loss\n \"\"\"", "qf1_new_actions", "=", "self", ".", "qf1", "(", "obs", ",", "new_obs_actions", ",", "task_z_detached", ")", "qf2_new_actions", "=", "self", ".", "qf2", "(", "obs", ",", "new_obs_actions", ",", "task_z_detached", ")", "q_new_actions", "=", "torch", ".", "min", "(", "qf1_new_actions", ",", "qf2_new_actions", ",", ")", "# Advantage-weighted regression", "if", "self", ".", "vf_K", ">", "1", ":", "vs", "=", "[", "]", "for", "i", "in", "range", "(", "self", ".", "vf_K", ")", ":", "u", "=", "dist", ".", "sample", "(", ")", "q1", "=", "self", ".", "qf1", "(", "obs", ",", "u", ",", "task_z_detached", ")", "q2", "=", "self", ".", "qf2", "(", "obs", ",", "u", ",", "task_z_detached", ")", "v", "=", "torch", ".", "min", "(", "q1", ",", "q2", ")", "# v = q1", "vs", ".", "append", "(", "v", ")", "v_pi", "=", "torch", ".", "cat", "(", "vs", ",", "1", ")", ".", "mean", "(", "dim", "=", "1", ")", "else", ":", "# v_pi = self.qf1(obs, new_obs_actions)", "v1_pi", "=", "self", ".", "qf1", "(", "obs", ",", "new_obs_actions", ",", "task_z_detached", ")", "v2_pi", "=", "self", ".", "qf2", "(", "obs", ",", "new_obs_actions", ",", "task_z_detached", ")", "v_pi", "=", "torch", ".", "min", "(", "v1_pi", ",", "v2_pi", ")", "u", "=", "actions", "if", "self", ".", "awr_min_q", ":", "q_adv", "=", "torch", ".", "min", "(", "q1_pred", ",", "q2_pred", ")", "else", ":", "q_adv", "=", "q1_pred", "policy_logpp", "=", "dist", ".", "log_prob", "(", "u", ")", "if", "self", ".", "use_automatic_beta_tuning", ":", "buffer_dist", "=", "self", ".", "buffer_policy", "(", "obs", ")", "beta", "=", "self", ".", "log_beta", ".", "exp", "(", ")", "kldiv", "=", "torch", ".", "distributions", ".", "kl", ".", "kl_divergence", "(", "dist", ",", "buffer_dist", ")", "beta_loss", "=", "-", "1", "*", "(", "beta", "*", "(", "kldiv", "-", "self", ".", "beta_epsilon", ")", ".", "detach", "(", ")", ")", ".", "mean", "(", ")", "self", ".", "beta_optimizer", ".", "zero_grad", "(", ")", "beta_loss", ".", "backward", "(", ")", "self", ".", "beta_optimizer", ".", "step", "(", ")", "else", ":", "beta", "=", "self", ".", "beta_schedule", ".", "get_value", "(", "self", ".", "_n_train_steps_total", ")", "beta_loss", "=", "ptu", ".", "zeros", "(", "1", ")", "score", "=", "q_adv", "-", "v_pi", "if", "self", ".", "mask_positive_advantage", ":", "score", "=", "torch", ".", "sign", "(", "score", ")", "if", "self", ".", "clip_score", "is", "not", "None", ":", "score", "=", "torch", ".", "clamp", "(", "score", ",", "max", "=", "self", ".", "clip_score", ")", "weights", "=", "batch", ".", "get", "(", "'weights'", ",", "None", ")", "if", "self", ".", "weight_loss", "and", "weights", "is", "None", ":", "if", "self", ".", "normalize_over_batch", "==", "True", ":", "weights", "=", "F", ".", "softmax", "(", "score", "/", "beta", ",", "dim", "=", "0", ")", "elif", "self", ".", "normalize_over_batch", "==", "\"whiten\"", ":", "adv_mean", "=", "torch", ".", "mean", "(", "score", ")", "adv_std", "=", "torch", ".", "std", "(", "score", ")", "+", "1e-5", "normalized_score", "=", "(", "score", "-", "adv_mean", ")", "/", "adv_std", "weights", "=", "torch", ".", "exp", "(", "normalized_score", "/", "beta", ")", "elif", "self", ".", "normalize_over_batch", "==", "\"exp\"", ":", "weights", "=", "torch", ".", "exp", "(", "score", "/", "beta", ")", "elif", "self", ".", "normalize_over_batch", "==", "\"step_fn\"", ":", "weights", "=", "(", "score", ">", "0", ")", ".", "float", "(", ")", "elif", "self", ".", "normalize_over_batch", "==", "False", ":", "weights", "=", "score", "elif", "self", ".", "normalize_over_batch", "==", "'uniform'", ":", "weights", "=", "F", ".", "softmax", "(", "ptu", ".", "ones_like", "(", "score", ")", "/", "beta", ",", "dim", "=", "0", ")", "else", ":", "raise", "ValueError", "(", "self", ".", "normalize_over_batch", ")", "weights", "=", "weights", "[", ":", ",", "0", "]", "policy_loss", "=", "alpha", "*", "log_pi", ".", "mean", "(", ")", "if", "self", ".", "use_awr_update", "and", "self", ".", "weight_loss", ":", "policy_loss", "=", "policy_loss", "+", "self", ".", "awr_weight", "*", "(", "-", "policy_logpp", "*", "len", "(", "weights", ")", "*", "weights", ".", "detach", "(", ")", ")", ".", "mean", "(", ")", "elif", "self", ".", "use_awr_update", ":", "policy_loss", "=", "policy_loss", "+", "self", ".", "awr_weight", "*", "(", "-", "policy_logpp", ")", ".", "mean", "(", ")", "if", "self", ".", "use_reparam_update", ":", "policy_loss", "=", "policy_loss", "+", "self", ".", "train_reparam_weight", "*", "(", "-", "q_new_actions", ")", ".", "mean", "(", ")", "policy_loss", "=", "self", ".", "rl_weight", "*", "policy_loss", "\"\"\"\n Update networks\n \"\"\"", "if", "self", ".", "_n_train_steps_total", "%", "self", ".", "q_update_period", "==", "0", ":", "if", "self", ".", "train_encoder_decoder", ":", "self", ".", "context_optimizer", ".", "zero_grad", "(", ")", "if", "self", ".", "train_agent", ":", "self", ".", "qf1_optimizer", ".", "zero_grad", "(", ")", "self", ".", "qf2_optimizer", ".", "zero_grad", "(", ")", "context_loss", ".", "backward", "(", "retain_graph", "=", "True", ")", "# retain graph because the encoder is trained by both QF losses", "qf1_loss", ".", "backward", "(", "retain_graph", "=", "True", ")", "qf2_loss", ".", "backward", "(", ")", "if", "self", ".", "train_agent", ":", "self", ".", "qf1_optimizer", ".", "step", "(", ")", "self", ".", "qf2_optimizer", ".", "step", "(", ")", "if", "self", ".", "train_encoder_decoder", ":", "self", ".", "context_optimizer", ".", "step", "(", ")", "if", "self", ".", "train_agent", ":", "if", "self", ".", "_n_train_steps_total", "%", "self", ".", "policy_update_period", "==", "0", "and", "self", ".", "update_policy", ":", "self", ".", "policy_optimizer", ".", "zero_grad", "(", ")", "policy_loss", ".", "backward", "(", ")", "self", ".", "policy_optimizer", ".", "step", "(", ")", "self", ".", "_num_gradient_steps", "+=", "1", "\"\"\"\n Soft Updates\n \"\"\"", "if", "self", ".", "_n_train_steps_total", "%", "self", ".", "target_update_period", "==", "0", ":", "ptu", ".", "soft_update_from_to", "(", "self", ".", "qf1", ",", "self", ".", "target_qf1", ",", "self", ".", "soft_target_tau", ")", "ptu", ".", "soft_update_from_to", "(", "self", ".", "qf2", ",", "self", ".", "target_qf2", ",", "self", ".", "soft_target_tau", ")", "\"\"\"\n Save some statistics for eval\n \"\"\"", "if", "self", ".", "_need_to_update_eval_statistics", ":", "self", ".", "_need_to_update_eval_statistics", "=", "False", "\"\"\"\n Eval should set this to None.\n This way, these statistics are only computed for one batch.\n \"\"\"", "policy_loss", "=", "(", "log_pi", "-", "q_new_actions", ")", ".", "mean", "(", ")", "self", ".", "eval_statistics", "[", "'QF1 Loss'", "]", "=", "np", ".", "mean", "(", "ptu", ".", "get_numpy", "(", "qf1_loss", ")", ")", "self", ".", "eval_statistics", "[", "'QF2 Loss'", "]", "=", "np", ".", "mean", "(", "ptu", ".", "get_numpy", "(", "qf2_loss", ")", ")", "self", ".", "eval_statistics", "[", "'Policy Loss'", "]", "=", "np", ".", "mean", "(", "ptu", ".", "get_numpy", "(", "policy_loss", ")", ")", "self", ".", "eval_statistics", ".", "update", "(", "create_stats_ordered_dict", "(", "'Q1 Predictions'", ",", "ptu", ".", "get_numpy", "(", "q1_pred", ")", ",", ")", ")", "self", ".", "eval_statistics", ".", "update", "(", "create_stats_ordered_dict", "(", "'Q2 Predictions'", ",", "ptu", ".", "get_numpy", "(", "q2_pred", ")", ",", ")", ")", "self", ".", "eval_statistics", ".", "update", "(", "create_stats_ordered_dict", "(", "'Q Targets'", ",", "ptu", ".", "get_numpy", "(", "q_target", ")", ",", ")", ")", "self", ".", "eval_statistics", "[", "'task_embedding/kl_divergence'", "]", "=", "(", "ptu", ".", "get_numpy", "(", "kl_div", ")", ")", "self", ".", "eval_statistics", "[", "'task_embedding/kl_loss'", "]", "=", "(", "ptu", ".", "get_numpy", "(", "kl_loss", ")", ")", "self", ".", "eval_statistics", "[", "'task_embedding/reward_prediction_loss'", "]", "=", "(", "ptu", ".", "get_numpy", "(", "reward_prediction_loss", ")", ")", "self", ".", "eval_statistics", "[", "'task_embedding/context_loss'", "]", "=", "(", "ptu", ".", "get_numpy", "(", "context_loss", ")", ")", "self", ".", "eval_statistics", ".", "update", "(", "create_stats_ordered_dict", "(", "'Log Pis'", ",", "ptu", ".", "get_numpy", "(", "log_pi", ")", ",", ")", ")", "self", ".", "eval_statistics", ".", "update", "(", "create_stats_ordered_dict", "(", "'rewards'", ",", "ptu", ".", "get_numpy", "(", "rewards", ")", ",", ")", ")", "self", ".", "eval_statistics", ".", "update", "(", "create_stats_ordered_dict", "(", "'terminals'", ",", "ptu", ".", "get_numpy", "(", "terminals", ")", ",", ")", ")", "policy_statistics", "=", "add_prefix", "(", "dist", ".", "get_diagnostics", "(", ")", ",", "\"policy/\"", ")", "self", ".", "eval_statistics", ".", "update", "(", "policy_statistics", ")", "self", ".", "eval_statistics", ".", "update", "(", "create_stats_ordered_dict", "(", "'Advantage Weights'", ",", "ptu", ".", "get_numpy", "(", "weights", ")", ",", ")", ")", "self", ".", "eval_statistics", ".", "update", "(", "create_stats_ordered_dict", "(", "'Advantage Score'", ",", "ptu", ".", "get_numpy", "(", "score", ")", ",", ")", ")", "self", ".", "eval_statistics", "[", "'reparam_weight'", "]", "=", "self", ".", "train_reparam_weight", "self", ".", "eval_statistics", "[", "'num_gradient_steps'", "]", "=", "(", "self", ".", "_num_gradient_steps", ")", "if", "self", ".", "use_automatic_entropy_tuning", ":", "self", ".", "eval_statistics", "[", "'Alpha'", "]", "=", "alpha", ".", "item", "(", ")", "self", ".", "eval_statistics", "[", "'Alpha Loss'", "]", "=", "alpha_loss", ".", "item", "(", ")", "if", "self", ".", "use_automatic_beta_tuning", ":", "self", ".", "eval_statistics", ".", "update", "(", "{", "\"adaptive_beta/beta\"", ":", "ptu", ".", "get_numpy", "(", "beta", ".", "mean", "(", ")", ")", ",", "\"adaptive_beta/beta loss\"", ":", "ptu", ".", "get_numpy", "(", "beta_loss", ".", "mean", "(", ")", ")", ",", "}", ")", "self", ".", "_n_train_steps_total", "+=", "1" ]
https://github.com/rail-berkeley/rlkit/blob/c81509d982b4d52a6239e7bfe7d2540e3d3cd986/rlkit/torch/smac/smac.py#L324-L617
mesalock-linux/mesapy
ed546d59a21b36feb93e2309d5c6b75aa0ad95c9
lib-python/2.7/string.py
python
capwords
(s, sep=None)
return (sep or ' ').join(x.capitalize() for x in s.split(sep))
capwords(s [,sep]) -> string Split the argument into words using split, capitalize each word using capitalize, and join the capitalized words using join. If the optional second argument sep is absent or None, runs of whitespace characters are replaced by a single space and leading and trailing whitespace are removed, otherwise sep is used to split and join the words.
capwords(s [,sep]) -> string
[ "capwords", "(", "s", "[", "sep", "]", ")", "-", ">", "string" ]
def capwords(s, sep=None): """capwords(s [,sep]) -> string Split the argument into words using split, capitalize each word using capitalize, and join the capitalized words using join. If the optional second argument sep is absent or None, runs of whitespace characters are replaced by a single space and leading and trailing whitespace are removed, otherwise sep is used to split and join the words. """ return (sep or ' ').join(x.capitalize() for x in s.split(sep))
[ "def", "capwords", "(", "s", ",", "sep", "=", "None", ")", ":", "return", "(", "sep", "or", "' '", ")", ".", "join", "(", "x", ".", "capitalize", "(", ")", "for", "x", "in", "s", ".", "split", "(", "sep", ")", ")" ]
https://github.com/mesalock-linux/mesapy/blob/ed546d59a21b36feb93e2309d5c6b75aa0ad95c9/lib-python/2.7/string.py#L45-L56
beched/libpywebhack
40dd01a68956d35e7a7b6b811b69ef0f688645c6
libpywebhack.py
python
WebHack.dobrute
(self, a, b)
A worker-method for WebHack.brutesubs() :param a: beginning of interval :param b: end of interval
A worker-method for WebHack.brutesubs() :param a: beginning of interval :param b: end of interval
[ "A", "worker", "-", "method", "for", "WebHack", ".", "brutesubs", "()", ":", "param", "a", ":", "beginning", "of", "interval", ":", "param", "b", ":", "end", "of", "interval" ]
def dobrute(self, a, b): """ A worker-method for WebHack.brutesubs() :param a: beginning of interval :param b: end of interval """ for sub in self.subs[a: b]: if self.checked_subs % 1000 == 0 and self.checked_subs != 0: self.rep_log('%s names proceeded' % self.checked_subs) try: conn = httplib.HTTPConnection('%s.%s' % (sub, self.host), timeout=5) if self.ban_regex != '': conn.request('GET', '/') else: conn.request('HEAD', '/') res = conn.getresponse() self.cnt_reqs += 1 if (str(res.status) not in self.ban_codes) and not ( self.ban_regex != None and re.search(self.ban_regex, res.read())): domain = '%s.%s' % (sub, self.host) self.known_subs.append(domain) self.rep_log('Found: %s' % domain) conn.close() except (socket.gaierror, socket.herror): pass except (socket.timeout, socket.error): self.rep_log('Found: %s.%s' % (sub, self.host)) self.checked_subs += 1
[ "def", "dobrute", "(", "self", ",", "a", ",", "b", ")", ":", "for", "sub", "in", "self", ".", "subs", "[", "a", ":", "b", "]", ":", "if", "self", ".", "checked_subs", "%", "1000", "==", "0", "and", "self", ".", "checked_subs", "!=", "0", ":", "self", ".", "rep_log", "(", "'%s names proceeded'", "%", "self", ".", "checked_subs", ")", "try", ":", "conn", "=", "httplib", ".", "HTTPConnection", "(", "'%s.%s'", "%", "(", "sub", ",", "self", ".", "host", ")", ",", "timeout", "=", "5", ")", "if", "self", ".", "ban_regex", "!=", "''", ":", "conn", ".", "request", "(", "'GET'", ",", "'/'", ")", "else", ":", "conn", ".", "request", "(", "'HEAD'", ",", "'/'", ")", "res", "=", "conn", ".", "getresponse", "(", ")", "self", ".", "cnt_reqs", "+=", "1", "if", "(", "str", "(", "res", ".", "status", ")", "not", "in", "self", ".", "ban_codes", ")", "and", "not", "(", "self", ".", "ban_regex", "!=", "None", "and", "re", ".", "search", "(", "self", ".", "ban_regex", ",", "res", ".", "read", "(", ")", ")", ")", ":", "domain", "=", "'%s.%s'", "%", "(", "sub", ",", "self", ".", "host", ")", "self", ".", "known_subs", ".", "append", "(", "domain", ")", "self", ".", "rep_log", "(", "'Found: %s'", "%", "domain", ")", "conn", ".", "close", "(", ")", "except", "(", "socket", ".", "gaierror", ",", "socket", ".", "herror", ")", ":", "pass", "except", "(", "socket", ".", "timeout", ",", "socket", ".", "error", ")", ":", "self", ".", "rep_log", "(", "'Found: %s.%s'", "%", "(", "sub", ",", "self", ".", "host", ")", ")", "self", ".", "checked_subs", "+=", "1" ]
https://github.com/beched/libpywebhack/blob/40dd01a68956d35e7a7b6b811b69ef0f688645c6/libpywebhack.py#L438-L465
facelessuser/pymdown-extensions
7a9d548ed3aa921e77fbedd202947ba884cca04c
pymdownx/b64.py
python
B64Extension.__init__
(self, *args, **kwargs)
Initialize.
Initialize.
[ "Initialize", "." ]
def __init__(self, *args, **kwargs): """Initialize.""" self.config = { 'base_path': [".", "Base path for b64 to use to resolve paths - Default: \".\""] } super(B64Extension, self).__init__(*args, **kwargs)
[ "def", "__init__", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "config", "=", "{", "'base_path'", ":", "[", "\".\"", ",", "\"Base path for b64 to use to resolve paths - Default: \\\".\\\"\"", "]", "}", "super", "(", "B64Extension", ",", "self", ")", ".", "__init__", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/facelessuser/pymdown-extensions/blob/7a9d548ed3aa921e77fbedd202947ba884cca04c/pymdownx/b64.py#L124-L131
aws/aws-sam-cli
2aa7bf01b2e0b0864ef63b1898a8b30577443acc
samcli/lib/sync/flows/layer_sync_flow.py
python
AbstractLayerSyncFlow._get_latest_layer_version
(self)
return layer_versions[0].get("Version")
Fetches all layer versions from remote and returns the latest one
Fetches all layer versions from remote and returns the latest one
[ "Fetches", "all", "layer", "versions", "from", "remote", "and", "returns", "the", "latest", "one" ]
def _get_latest_layer_version(self): """Fetches all layer versions from remote and returns the latest one""" layer_versions = self._lambda_client.list_layer_versions(LayerName=self._layer_arn).get("LayerVersions", []) if not layer_versions: raise NoLayerVersionsFoundError(self._layer_arn) return layer_versions[0].get("Version")
[ "def", "_get_latest_layer_version", "(", "self", ")", ":", "layer_versions", "=", "self", ".", "_lambda_client", ".", "list_layer_versions", "(", "LayerName", "=", "self", ".", "_layer_arn", ")", ".", "get", "(", "\"LayerVersions\"", ",", "[", "]", ")", "if", "not", "layer_versions", ":", "raise", "NoLayerVersionsFoundError", "(", "self", ".", "_layer_arn", ")", "return", "layer_versions", "[", "0", "]", ".", "get", "(", "\"Version\"", ")" ]
https://github.com/aws/aws-sam-cli/blob/2aa7bf01b2e0b0864ef63b1898a8b30577443acc/samcli/lib/sync/flows/layer_sync_flow.py#L76-L81
MeanEYE/Sunflower
1024bbdde3b8e202ddad3553b321a7b6230bffc9
sunflower/gui/operation_dialog.py
python
OperationDialog.increment_current_count
(self, value)
Increment current file count by value
Increment current file count by value
[ "Increment", "current", "file", "count", "by", "value" ]
def increment_current_count(self, value): """Increment current file count by value""" self._current_count += value self._update_total_count()
[ "def", "increment_current_count", "(", "self", ",", "value", ")", ":", "self", ".", "_current_count", "+=", "value", "self", ".", "_update_total_count", "(", ")" ]
https://github.com/MeanEYE/Sunflower/blob/1024bbdde3b8e202ddad3553b321a7b6230bffc9/sunflower/gui/operation_dialog.py#L415-L418
msracver/Deformable-ConvNets
6aeda878a95bcb55eadffbe125804e730574de8d
deeplab/core/DataParallelExecutorGroup.py
python
DataParallelExecutorGroup.set_params
(self, arg_params, aux_params)
Assign, i.e. copy parameters to all the executors. Parameters ---------- arg_params : dict A dictionary of name to `NDArray` parameter mapping. aux_params : dict A dictionary of name to `NDArray` auxiliary variable mapping.
Assign, i.e. copy parameters to all the executors.
[ "Assign", "i", ".", "e", ".", "copy", "parameters", "to", "all", "the", "executors", "." ]
def set_params(self, arg_params, aux_params): """Assign, i.e. copy parameters to all the executors. Parameters ---------- arg_params : dict A dictionary of name to `NDArray` parameter mapping. aux_params : dict A dictionary of name to `NDArray` auxiliary variable mapping. """ for exec_ in self.execs: exec_.copy_params_from(arg_params, aux_params)
[ "def", "set_params", "(", "self", ",", "arg_params", ",", "aux_params", ")", ":", "for", "exec_", "in", "self", ".", "execs", ":", "exec_", ".", "copy_params_from", "(", "arg_params", ",", "aux_params", ")" ]
https://github.com/msracver/Deformable-ConvNets/blob/6aeda878a95bcb55eadffbe125804e730574de8d/deeplab/core/DataParallelExecutorGroup.py#L313-L324
ProjectQ-Framework/ProjectQ
0d32c1610ba4e9aefd7f19eb52dadb4fbe5f9005
projectq/backends/_sim/_pysim.py
python
Simulator.emulate_time_evolution
(self, terms_dict, time, ids, ctrlids)
Apply exp(-i*time*H) to the wave function, i.e., evolves under the Hamiltonian H for a given time. The terms in the Hamiltonian are not required to commute. This function computes the action of the matrix exponential using ideas from Al-Mohy and Higham, 2011. TODO: Implement better estimates for s. Args: terms_dict (dict): Operator dictionary (see QubitOperator.terms) defining the Hamiltonian. time (scalar): Time to evolve for ids (list): A list of qubit IDs to which to apply the evolution. ctrlids (list): A list of control qubit IDs.
Apply exp(-i*time*H) to the wave function, i.e., evolves under the Hamiltonian H for a given time.
[ "Apply", "exp", "(", "-", "i", "*", "time", "*", "H", ")", "to", "the", "wave", "function", "i", ".", "e", ".", "evolves", "under", "the", "Hamiltonian", "H", "for", "a", "given", "time", "." ]
def emulate_time_evolution(self, terms_dict, time, ids, ctrlids): # pylint: disable=too-many-locals """ Apply exp(-i*time*H) to the wave function, i.e., evolves under the Hamiltonian H for a given time. The terms in the Hamiltonian are not required to commute. This function computes the action of the matrix exponential using ideas from Al-Mohy and Higham, 2011. TODO: Implement better estimates for s. Args: terms_dict (dict): Operator dictionary (see QubitOperator.terms) defining the Hamiltonian. time (scalar): Time to evolve for ids (list): A list of qubit IDs to which to apply the evolution. ctrlids (list): A list of control qubit IDs. """ # Determine the (normalized) trace, which is nonzero only for identity terms: trace = sum([c for (t, c) in terms_dict if len(t) == 0]) terms_dict = [(t, c) for (t, c) in terms_dict if len(t) > 0] op_nrm = abs(time) * sum([abs(c) for (_, c) in terms_dict]) # rescale the operator by s: scale = int(op_nrm + 1.0) correction = _np.exp(-1j * time * trace / float(scale)) output_state = _np.copy(self._state) mask = self._get_control_mask(ctrlids) for _ in range(scale): j = 0 nrm_change = 1.0 while nrm_change > 1.0e-12: coeff = (-time * 1j) / float(scale * (j + 1)) current_state = _np.copy(self._state) update = 0j for term, tcoeff in terms_dict: self._apply_term(term, ids) self._state *= tcoeff update += self._state self._state = _np.copy(current_state) update *= coeff self._state = update for k, _update in enumerate(update): if (k & mask) == mask: output_state[k] += _update nrm_change = _np.linalg.norm(update) j += 1 for k in range(len(update)): if (k & mask) == mask: output_state[k] *= correction self._state = _np.copy(output_state)
[ "def", "emulate_time_evolution", "(", "self", ",", "terms_dict", ",", "time", ",", "ids", ",", "ctrlids", ")", ":", "# pylint: disable=too-many-locals", "# Determine the (normalized) trace, which is nonzero only for identity terms:", "trace", "=", "sum", "(", "[", "c", "for", "(", "t", ",", "c", ")", "in", "terms_dict", "if", "len", "(", "t", ")", "==", "0", "]", ")", "terms_dict", "=", "[", "(", "t", ",", "c", ")", "for", "(", "t", ",", "c", ")", "in", "terms_dict", "if", "len", "(", "t", ")", ">", "0", "]", "op_nrm", "=", "abs", "(", "time", ")", "*", "sum", "(", "[", "abs", "(", "c", ")", "for", "(", "_", ",", "c", ")", "in", "terms_dict", "]", ")", "# rescale the operator by s:", "scale", "=", "int", "(", "op_nrm", "+", "1.0", ")", "correction", "=", "_np", ".", "exp", "(", "-", "1j", "*", "time", "*", "trace", "/", "float", "(", "scale", ")", ")", "output_state", "=", "_np", ".", "copy", "(", "self", ".", "_state", ")", "mask", "=", "self", ".", "_get_control_mask", "(", "ctrlids", ")", "for", "_", "in", "range", "(", "scale", ")", ":", "j", "=", "0", "nrm_change", "=", "1.0", "while", "nrm_change", ">", "1.0e-12", ":", "coeff", "=", "(", "-", "time", "*", "1j", ")", "/", "float", "(", "scale", "*", "(", "j", "+", "1", ")", ")", "current_state", "=", "_np", ".", "copy", "(", "self", ".", "_state", ")", "update", "=", "0j", "for", "term", ",", "tcoeff", "in", "terms_dict", ":", "self", ".", "_apply_term", "(", "term", ",", "ids", ")", "self", ".", "_state", "*=", "tcoeff", "update", "+=", "self", ".", "_state", "self", ".", "_state", "=", "_np", ".", "copy", "(", "current_state", ")", "update", "*=", "coeff", "self", ".", "_state", "=", "update", "for", "k", ",", "_update", "in", "enumerate", "(", "update", ")", ":", "if", "(", "k", "&", "mask", ")", "==", "mask", ":", "output_state", "[", "k", "]", "+=", "_update", "nrm_change", "=", "_np", ".", "linalg", ".", "norm", "(", "update", ")", "j", "+=", "1", "for", "k", "in", "range", "(", "len", "(", "update", ")", ")", ":", "if", "(", "k", "&", "mask", ")", "==", "mask", ":", "output_state", "[", "k", "]", "*=", "correction", "self", ".", "_state", "=", "_np", ".", "copy", "(", "output_state", ")" ]
https://github.com/ProjectQ-Framework/ProjectQ/blob/0d32c1610ba4e9aefd7f19eb52dadb4fbe5f9005/projectq/backends/_sim/_pysim.py#L318-L364
LZQthePlane/Online-Realtime-Action-Recognition-based-on-OpenPose
33664be6ae7a26e9875f9771dc43d05a8b071dce
Tracking/deep_sort/track.py
python
Track.to_tlbr
(self)
return ret
Get current position in bounding box format `(min x, miny, max x, max y)`. bounding box的第二种表达方式
Get current position in bounding box format `(min x, miny, max x, max y)`. bounding box的第二种表达方式
[ "Get", "current", "position", "in", "bounding", "box", "format", "(", "min", "x", "miny", "max", "x", "max", "y", ")", ".", "bounding", "box的第二种表达方式" ]
def to_tlbr(self): """Get current position in bounding box format `(min x, miny, max x, max y)`. bounding box的第二种表达方式 """ ret = self.to_tlwh() ret[2:] = ret[:2] + ret[2:] return ret
[ "def", "to_tlbr", "(", "self", ")", ":", "ret", "=", "self", ".", "to_tlwh", "(", ")", "ret", "[", "2", ":", "]", "=", "ret", "[", ":", "2", "]", "+", "ret", "[", "2", ":", "]", "return", "ret" ]
https://github.com/LZQthePlane/Online-Realtime-Action-Recognition-based-on-OpenPose/blob/33664be6ae7a26e9875f9771dc43d05a8b071dce/Tracking/deep_sort/track.py#L90-L96
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_flaskbb/lib/python2.7/site-packages/flask_sqlalchemy/__init__.py
python
Pagination.iter_pages
(self, left_edge=2, left_current=2, right_current=5, right_edge=2)
Iterates over the page numbers in the pagination. The four parameters control the thresholds how many numbers should be produced from the sides. Skipped page numbers are represented as `None`. This is how you could render such a pagination in the templates: .. sourcecode:: html+jinja {% macro render_pagination(pagination, endpoint) %} <div class=pagination> {%- for page in pagination.iter_pages() %} {% if page %} {% if page != pagination.page %} <a href="{{ url_for(endpoint, page=page) }}">{{ page }}</a> {% else %} <strong>{{ page }}</strong> {% endif %} {% else %} <span class=ellipsis>…</span> {% endif %} {%- endfor %} </div> {% endmacro %}
Iterates over the page numbers in the pagination. The four parameters control the thresholds how many numbers should be produced from the sides. Skipped page numbers are represented as `None`. This is how you could render such a pagination in the templates:
[ "Iterates", "over", "the", "page", "numbers", "in", "the", "pagination", ".", "The", "four", "parameters", "control", "the", "thresholds", "how", "many", "numbers", "should", "be", "produced", "from", "the", "sides", ".", "Skipped", "page", "numbers", "are", "represented", "as", "None", ".", "This", "is", "how", "you", "could", "render", "such", "a", "pagination", "in", "the", "templates", ":" ]
def iter_pages(self, left_edge=2, left_current=2, right_current=5, right_edge=2): """Iterates over the page numbers in the pagination. The four parameters control the thresholds how many numbers should be produced from the sides. Skipped page numbers are represented as `None`. This is how you could render such a pagination in the templates: .. sourcecode:: html+jinja {% macro render_pagination(pagination, endpoint) %} <div class=pagination> {%- for page in pagination.iter_pages() %} {% if page %} {% if page != pagination.page %} <a href="{{ url_for(endpoint, page=page) }}">{{ page }}</a> {% else %} <strong>{{ page }}</strong> {% endif %} {% else %} <span class=ellipsis>…</span> {% endif %} {%- endfor %} </div> {% endmacro %} """ last = 0 for num in xrange(1, self.pages + 1): if num <= left_edge or \ (num > self.page - left_current - 1 and \ num < self.page + right_current) or \ num > self.pages - right_edge: if last + 1 != num: yield None yield num last = num
[ "def", "iter_pages", "(", "self", ",", "left_edge", "=", "2", ",", "left_current", "=", "2", ",", "right_current", "=", "5", ",", "right_edge", "=", "2", ")", ":", "last", "=", "0", "for", "num", "in", "xrange", "(", "1", ",", "self", ".", "pages", "+", "1", ")", ":", "if", "num", "<=", "left_edge", "or", "(", "num", ">", "self", ".", "page", "-", "left_current", "-", "1", "and", "num", "<", "self", ".", "page", "+", "right_current", ")", "or", "num", ">", "self", ".", "pages", "-", "right_edge", ":", "if", "last", "+", "1", "!=", "num", ":", "yield", "None", "yield", "num", "last", "=", "num" ]
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/lib/python2.7/site-packages/flask_sqlalchemy/__init__.py#L370-L404
OpenEndedGroup/Field
4f7c8edfb01bb0ccc927b78d3c500f018a4ae37c
Contents/lib/python/site.py
python
setquit
()
Define new built-ins 'quit' and 'exit'. These are simply strings that display a hint on how to exit.
Define new built-ins 'quit' and 'exit'. These are simply strings that display a hint on how to exit.
[ "Define", "new", "built", "-", "ins", "quit", "and", "exit", ".", "These", "are", "simply", "strings", "that", "display", "a", "hint", "on", "how", "to", "exit", "." ]
def setquit(): """Define new built-ins 'quit' and 'exit'. These are simply strings that display a hint on how to exit. """ if os.sep == ':': eof = 'Cmd-Q' elif os.sep == '\\': eof = 'Ctrl-Z plus Return' else: eof = 'Ctrl-D (i.e. EOF)' class Quitter(object): def __init__(self, name): self.name = name def __repr__(self): return 'Use %s() or %s to exit' % (self.name, eof) def __call__(self, code=None): # Shells like IDLE catch the SystemExit, but listen when their # stdin wrapper is closed. try: sys.stdin.close() except: pass raise SystemExit(code) __builtin__.quit = Quitter('quit') __builtin__.exit = Quitter('exit')
[ "def", "setquit", "(", ")", ":", "if", "os", ".", "sep", "==", "':'", ":", "eof", "=", "'Cmd-Q'", "elif", "os", ".", "sep", "==", "'\\\\'", ":", "eof", "=", "'Ctrl-Z plus Return'", "else", ":", "eof", "=", "'Ctrl-D (i.e. EOF)'", "class", "Quitter", "(", "object", ")", ":", "def", "__init__", "(", "self", ",", "name", ")", ":", "self", ".", "name", "=", "name", "def", "__repr__", "(", "self", ")", ":", "return", "'Use %s() or %s to exit'", "%", "(", "self", ".", "name", ",", "eof", ")", "def", "__call__", "(", "self", ",", "code", "=", "None", ")", ":", "# Shells like IDLE catch the SystemExit, but listen when their", "# stdin wrapper is closed.", "try", ":", "sys", ".", "stdin", ".", "close", "(", ")", "except", ":", "pass", "raise", "SystemExit", "(", "code", ")", "__builtin__", ".", "quit", "=", "Quitter", "(", "'quit'", ")", "__builtin__", ".", "exit", "=", "Quitter", "(", "'exit'", ")" ]
https://github.com/OpenEndedGroup/Field/blob/4f7c8edfb01bb0ccc927b78d3c500f018a4ae37c/Contents/lib/python/site.py#L233-L259
jgyates/genmon
2cb2ed2945f55cd8c259b09ccfa9a51e23f1341e
genmonlib/gaugediy.py
python
GaugeDIY1.__init__
(self, config, log = None, console = None)
[]
def __init__(self, config, log = None, console = None): super(GaugeDIY1, self).__init__(config, log = log, console = console) self.mv_per_step = self.config.ReadValue('mv_per_step', return_type = int, default = 125) self.Multiplier = self.config.ReadValue('volts_to_percent_multiplier', return_type = float, default = 20.0) self.debug = self.config.ReadValue('debug', return_type = bool, default = False)
[ "def", "__init__", "(", "self", ",", "config", ",", "log", "=", "None", ",", "console", "=", "None", ")", ":", "super", "(", "GaugeDIY1", ",", "self", ")", ".", "__init__", "(", "config", ",", "log", "=", "log", ",", "console", "=", "console", ")", "self", ".", "mv_per_step", "=", "self", ".", "config", ".", "ReadValue", "(", "'mv_per_step'", ",", "return_type", "=", "int", ",", "default", "=", "125", ")", "self", ".", "Multiplier", "=", "self", ".", "config", ".", "ReadValue", "(", "'volts_to_percent_multiplier'", ",", "return_type", "=", "float", ",", "default", "=", "20.0", ")", "self", ".", "debug", "=", "self", ".", "config", ".", "ReadValue", "(", "'debug'", ",", "return_type", "=", "bool", ",", "default", "=", "False", ")" ]
https://github.com/jgyates/genmon/blob/2cb2ed2945f55cd8c259b09ccfa9a51e23f1341e/genmonlib/gaugediy.py#L67-L73
aws-samples/aws-kube-codesuite
ab4e5ce45416b83bffb947ab8d234df5437f4fca
src/kubernetes/client/models/v1beta1_role_binding_list.py
python
V1beta1RoleBindingList.kind
(self)
return self._kind
Gets the kind of this V1beta1RoleBindingList. Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds :return: The kind of this V1beta1RoleBindingList. :rtype: str
Gets the kind of this V1beta1RoleBindingList. Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
[ "Gets", "the", "kind", "of", "this", "V1beta1RoleBindingList", ".", "Kind", "is", "a", "string", "value", "representing", "the", "REST", "resource", "this", "object", "represents", ".", "Servers", "may", "infer", "this", "from", "the", "endpoint", "the", "client", "submits", "requests", "to", ".", "Cannot", "be", "updated", ".", "In", "CamelCase", ".", "More", "info", ":", "https", ":", "//", "git", ".", "k8s", ".", "io", "/", "community", "/", "contributors", "/", "devel", "/", "api", "-", "conventions", ".", "md#types", "-", "kinds" ]
def kind(self): """ Gets the kind of this V1beta1RoleBindingList. Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds :return: The kind of this V1beta1RoleBindingList. :rtype: str """ return self._kind
[ "def", "kind", "(", "self", ")", ":", "return", "self", ".", "_kind" ]
https://github.com/aws-samples/aws-kube-codesuite/blob/ab4e5ce45416b83bffb947ab8d234df5437f4fca/src/kubernetes/client/models/v1beta1_role_binding_list.py#L101-L109
pyparallel/pyparallel
11e8c6072d48c8f13641925d17b147bf36ee0ba3
Lib/site-packages/pip-7.1.2-py3.3.egg/pip/locations.py
python
__get_username
()
return pwd.getpwuid(os.geteuid()).pw_name
Returns the effective username of the current process.
Returns the effective username of the current process.
[ "Returns", "the", "effective", "username", "of", "the", "current", "process", "." ]
def __get_username(): """ Returns the effective username of the current process. """ if WINDOWS: return getpass.getuser() import pwd return pwd.getpwuid(os.geteuid()).pw_name
[ "def", "__get_username", "(", ")", ":", "if", "WINDOWS", ":", "return", "getpass", ".", "getuser", "(", ")", "import", "pwd", "return", "pwd", ".", "getpwuid", "(", "os", ".", "geteuid", "(", ")", ")", ".", "pw_name" ]
https://github.com/pyparallel/pyparallel/blob/11e8c6072d48c8f13641925d17b147bf36ee0ba3/Lib/site-packages/pip-7.1.2-py3.3.egg/pip/locations.py#L97-L102
brad-sp/cuckoo-modified
038cfbba66ef76557d255aa89f2d4205f376ca45
lib/cuckoo/core/startup.py
python
init_console_logging
()
Initializes logging only to console.
Initializes logging only to console.
[ "Initializes", "logging", "only", "to", "console", "." ]
def init_console_logging(): """Initializes logging only to console.""" formatter = logging.Formatter("%(asctime)s [%(name)s] %(levelname)s: %(message)s") ch = ConsoleHandler() ch.setFormatter(formatter) log.addHandler(ch) log.setLevel(logging.INFO)
[ "def", "init_console_logging", "(", ")", ":", "formatter", "=", "logging", ".", "Formatter", "(", "\"%(asctime)s [%(name)s] %(levelname)s: %(message)s\"", ")", "ch", "=", "ConsoleHandler", "(", ")", "ch", ".", "setFormatter", "(", "formatter", ")", "log", ".", "addHandler", "(", "ch", ")", "log", ".", "setLevel", "(", "logging", ".", "INFO", ")" ]
https://github.com/brad-sp/cuckoo-modified/blob/038cfbba66ef76557d255aa89f2d4205f376ca45/lib/cuckoo/core/startup.py#L151-L159
StanfordVL/taskonomy
9f814867b5fe4165860862211e8e99b0f200144d
taskbank/lib/data/load_ops.py
python
mask_if_channel_ge
( img, threshhold, channel_idx, broadcast_to_shape=None, broadcast_to_dim=None )
Returns a mask that masks an entire pixel iff the channel specified has values ge a specified value
Returns a mask that masks an entire pixel iff the channel specified has values ge a specified value
[ "Returns", "a", "mask", "that", "masks", "an", "entire", "pixel", "iff", "the", "channel", "specified", "has", "values", "ge", "a", "specified", "value" ]
def mask_if_channel_ge( img, threshhold, channel_idx, broadcast_to_shape=None, broadcast_to_dim=None ): ''' Returns a mask that masks an entire pixel iff the channel specified has values ge a specified value ''' h, w, c = img.shape mask = ( img[:, :, channel_idx] < threshhold ) # keep if lt if len( mask.shape ) == 2: mask = mask[:, :, np.newaxis].astype( np.float32 ) if broadcast_to_shape is not None: return np.broadcast_to( mask, broadcast_to_shape ) elif broadcast_to_dim is not None: return np.broadcast_to( mask, [h,w,broadcast_to_dim]) else: return np.broadcast_to( mask, img.shape )
[ "def", "mask_if_channel_ge", "(", "img", ",", "threshhold", ",", "channel_idx", ",", "broadcast_to_shape", "=", "None", ",", "broadcast_to_dim", "=", "None", ")", ":", "h", ",", "w", ",", "c", "=", "img", ".", "shape", "mask", "=", "(", "img", "[", ":", ",", ":", ",", "channel_idx", "]", "<", "threshhold", ")", "# keep if lt", "if", "len", "(", "mask", ".", "shape", ")", "==", "2", ":", "mask", "=", "mask", "[", ":", ",", ":", ",", "np", ".", "newaxis", "]", ".", "astype", "(", "np", ".", "float32", ")", "if", "broadcast_to_shape", "is", "not", "None", ":", "return", "np", ".", "broadcast_to", "(", "mask", ",", "broadcast_to_shape", ")", "elif", "broadcast_to_dim", "is", "not", "None", ":", "return", "np", ".", "broadcast_to", "(", "mask", ",", "[", "h", ",", "w", ",", "broadcast_to_dim", "]", ")", "else", ":", "return", "np", ".", "broadcast_to", "(", "mask", ",", "img", ".", "shape", ")" ]
https://github.com/StanfordVL/taskonomy/blob/9f814867b5fe4165860862211e8e99b0f200144d/taskbank/lib/data/load_ops.py#L1531-L1545
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_flaskbb/lib/python2.7/site-packages/werkzeug/datastructures.py
python
ImmutableHeadersMixin.setdefault
(self, key, default)
[]
def setdefault(self, key, default): is_immutable(self)
[ "def", "setdefault", "(", "self", ",", "key", ",", "default", ")", ":", "is_immutable", "(", "self", ")" ]
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/lib/python2.7/site-packages/werkzeug/datastructures.py#L1322-L1323
fortharris/Pcode
147962d160a834c219e12cb456abc130826468e4
rope/base/evaluate.py
python
StatementEvaluator._Compare
(self, node)
[]
def _Compare(self, node): self.result = self._get_builtin_name('bool')
[ "def", "_Compare", "(", "self", ",", "node", ")", ":", "self", ".", "result", "=", "self", ".", "_get_builtin_name", "(", "'bool'", ")" ]
https://github.com/fortharris/Pcode/blob/147962d160a834c219e12cb456abc130826468e4/rope/base/evaluate.py#L214-L215
sensepost/objection
658675f0e7716bd0899c9f6e9c45d25f38a699d8
objection/commands/sqlite.py
python
_should_sync_once_done
(args: list)
return '--sync' in args
Checks if --sync flag was provided. :param args: :return:
Checks if --sync flag was provided.
[ "Checks", "if", "--", "sync", "flag", "was", "provided", "." ]
def _should_sync_once_done(args: list) -> bool: """ Checks if --sync flag was provided. :param args: :return: """ return '--sync' in args
[ "def", "_should_sync_once_done", "(", "args", ":", "list", ")", "->", "bool", ":", "return", "'--sync'", "in", "args" ]
https://github.com/sensepost/objection/blob/658675f0e7716bd0899c9f6e9c45d25f38a699d8/objection/commands/sqlite.py#L43-L51
wwqgtxx/wwqLyParse
33136508e52821babd9294fdecffbdf02d73a6fc
wwqLyParse/lib/aiohttp_lib_py352/aiohttp/web.py
python
run_app
(app, *, host=None, port=None, path=None, sock=None, shutdown_timeout=60.0, ssl_context=None, print=print, backlog=128, access_log_format=None, access_log=access_logger, handle_signals=True, loop=None)
Run an app locally
Run an app locally
[ "Run", "an", "app", "locally" ]
def run_app(app, *, host=None, port=None, path=None, sock=None, shutdown_timeout=60.0, ssl_context=None, print=print, backlog=128, access_log_format=None, access_log=access_logger, handle_signals=True, loop=None): """Run an app locally""" user_supplied_loop = loop is not None if loop is None: loop = asyncio.get_event_loop() app._set_loop(loop) loop.run_until_complete(app.startup()) try: make_handler_kwargs = dict() if access_log_format is not None: make_handler_kwargs['access_log_format'] = access_log_format handler = app.make_handler(loop=loop, access_log=access_log, **make_handler_kwargs) server_creations, uris = _make_server_creators( handler, loop=loop, ssl_context=ssl_context, host=host, port=port, path=path, sock=sock, backlog=backlog) servers = loop.run_until_complete( asyncio.gather(*server_creations, loop=loop) ) if handle_signals: try: loop.add_signal_handler(signal.SIGINT, raise_graceful_exit) loop.add_signal_handler(signal.SIGTERM, raise_graceful_exit) except NotImplementedError: # pragma: no cover # add_signal_handler is not implemented on Windows pass try: if print: print("======== Running on {} ========\n" "(Press CTRL+C to quit)".format(', '.join(uris))) loop.run_forever() except (GracefulExit, KeyboardInterrupt): # pragma: no cover pass finally: server_closures = [] for srv in servers: srv.close() server_closures.append(srv.wait_closed()) loop.run_until_complete( asyncio.gather(*server_closures, loop=loop)) loop.run_until_complete(app.shutdown()) loop.run_until_complete(handler.shutdown(shutdown_timeout)) finally: loop.run_until_complete(app.cleanup()) if not user_supplied_loop: if hasattr(loop, 'shutdown_asyncgens'): loop.run_until_complete(loop.shutdown_asyncgens()) loop.close()
[ "def", "run_app", "(", "app", ",", "*", ",", "host", "=", "None", ",", "port", "=", "None", ",", "path", "=", "None", ",", "sock", "=", "None", ",", "shutdown_timeout", "=", "60.0", ",", "ssl_context", "=", "None", ",", "print", "=", "print", ",", "backlog", "=", "128", ",", "access_log_format", "=", "None", ",", "access_log", "=", "access_logger", ",", "handle_signals", "=", "True", ",", "loop", "=", "None", ")", ":", "user_supplied_loop", "=", "loop", "is", "not", "None", "if", "loop", "is", "None", ":", "loop", "=", "asyncio", ".", "get_event_loop", "(", ")", "app", ".", "_set_loop", "(", "loop", ")", "loop", ".", "run_until_complete", "(", "app", ".", "startup", "(", ")", ")", "try", ":", "make_handler_kwargs", "=", "dict", "(", ")", "if", "access_log_format", "is", "not", "None", ":", "make_handler_kwargs", "[", "'access_log_format'", "]", "=", "access_log_format", "handler", "=", "app", ".", "make_handler", "(", "loop", "=", "loop", ",", "access_log", "=", "access_log", ",", "*", "*", "make_handler_kwargs", ")", "server_creations", ",", "uris", "=", "_make_server_creators", "(", "handler", ",", "loop", "=", "loop", ",", "ssl_context", "=", "ssl_context", ",", "host", "=", "host", ",", "port", "=", "port", ",", "path", "=", "path", ",", "sock", "=", "sock", ",", "backlog", "=", "backlog", ")", "servers", "=", "loop", ".", "run_until_complete", "(", "asyncio", ".", "gather", "(", "*", "server_creations", ",", "loop", "=", "loop", ")", ")", "if", "handle_signals", ":", "try", ":", "loop", ".", "add_signal_handler", "(", "signal", ".", "SIGINT", ",", "raise_graceful_exit", ")", "loop", ".", "add_signal_handler", "(", "signal", ".", "SIGTERM", ",", "raise_graceful_exit", ")", "except", "NotImplementedError", ":", "# pragma: no cover", "# add_signal_handler is not implemented on Windows", "pass", "try", ":", "if", "print", ":", "print", "(", "\"======== Running on {} ========\\n\"", "\"(Press CTRL+C to quit)\"", ".", "format", "(", "', '", ".", "join", "(", "uris", ")", ")", ")", "loop", ".", "run_forever", "(", ")", "except", "(", "GracefulExit", ",", "KeyboardInterrupt", ")", ":", "# pragma: no cover", "pass", "finally", ":", "server_closures", "=", "[", "]", "for", "srv", "in", "servers", ":", "srv", ".", "close", "(", ")", "server_closures", ".", "append", "(", "srv", ".", "wait_closed", "(", ")", ")", "loop", ".", "run_until_complete", "(", "asyncio", ".", "gather", "(", "*", "server_closures", ",", "loop", "=", "loop", ")", ")", "loop", ".", "run_until_complete", "(", "app", ".", "shutdown", "(", ")", ")", "loop", ".", "run_until_complete", "(", "handler", ".", "shutdown", "(", "shutdown_timeout", ")", ")", "finally", ":", "loop", ".", "run_until_complete", "(", "app", ".", "cleanup", "(", ")", ")", "if", "not", "user_supplied_loop", ":", "if", "hasattr", "(", "loop", ",", "'shutdown_asyncgens'", ")", ":", "loop", ".", "run_until_complete", "(", "loop", ".", "shutdown_asyncgens", "(", ")", ")", "loop", ".", "close", "(", ")" ]
https://github.com/wwqgtxx/wwqLyParse/blob/33136508e52821babd9294fdecffbdf02d73a6fc/wwqLyParse/lib/aiohttp_lib_py352/aiohttp/web.py#L432-L489
wucng/TensorExpand
4ea58f64f5c5082b278229b799c9f679536510b7
TensorExpand/Object detection/Mask RCNN/Mask_RCNN Exercise/train.py
python
ShapesDataset.load_shapes
(self, count, height, width)
for i in range(count): comb_image, mask, class_ids = self.random_image(height, width) self.add_image("shapes", image_id=i, path=None, width=width, height=height, image=comb_image, mask=mask, class_ids=class_ids)
for i in range(count): comb_image, mask, class_ids = self.random_image(height, width) self.add_image("shapes", image_id=i, path=None, width=width, height=height, image=comb_image, mask=mask, class_ids=class_ids)
[ "for", "i", "in", "range", "(", "count", ")", ":", "comb_image", "mask", "class_ids", "=", "self", ".", "random_image", "(", "height", "width", ")", "self", ".", "add_image", "(", "shapes", "image_id", "=", "i", "path", "=", "None", "width", "=", "width", "height", "=", "height", "image", "=", "comb_image", "mask", "=", "mask", "class_ids", "=", "class_ids", ")" ]
def load_shapes(self, count, height, width): data=self.load_pkl() class_dict=data[0][0] # 所有类别字典 从1开始 # self.add_class("shapes", 0, 'BG') # 标签0默认为背景 utils中已经添加了 这里不用再添加 # self.add_class("shapes", 1, "square") # self.add_class("shapes", 2, "circle") # self.add_class("shapes", 3, "triangle") # 必须从1、2、3、……开始添加,否则会出错 ,下面这种情况会导致标签对不上 # [self.add_class('shapes',class_dict[i],i) for i in list(class_dict.keys())] # 无序添加会导致标签对应不上 # class id反算出class name class_name_dict = dict(zip(class_dict.values(), class_dict.keys())) [self.add_class('shapes',i,class_name_dict[i]) for i in range(1,21)] # 共20类 ''' for i in range(count): comb_image, mask, class_ids = self.random_image(height, width) self.add_image("shapes", image_id=i, path=None, width=width, height=height, image=comb_image, mask=mask, class_ids=class_ids) ''' for i in range(1,len(data)): comb_image=data[i][0] mask=data[i][1] class_ids=data[i][2] self.add_image("shapes", image_id=i-1, path=None, width=height, height=width, image=comb_image, mask=mask, class_ids=class_ids)
[ "def", "load_shapes", "(", "self", ",", "count", ",", "height", ",", "width", ")", ":", "data", "=", "self", ".", "load_pkl", "(", ")", "class_dict", "=", "data", "[", "0", "]", "[", "0", "]", "# 所有类别字典 从1开始", "# self.add_class(\"shapes\", 0, 'BG') # 标签0默认为背景 utils中已经添加了 这里不用再添加", "# self.add_class(\"shapes\", 1, \"square\")", "# self.add_class(\"shapes\", 2, \"circle\")", "# self.add_class(\"shapes\", 3, \"triangle\")", "# 必须从1、2、3、……开始添加,否则会出错 ,下面这种情况会导致标签对不上", "# [self.add_class('shapes',class_dict[i],i) for i in list(class_dict.keys())] # 无序添加会导致标签对应不上", "# class id反算出class name", "class_name_dict", "=", "dict", "(", "zip", "(", "class_dict", ".", "values", "(", ")", ",", "class_dict", ".", "keys", "(", ")", ")", ")", "[", "self", ".", "add_class", "(", "'shapes'", ",", "i", ",", "class_name_dict", "[", "i", "]", ")", "for", "i", "in", "range", "(", "1", ",", "21", ")", "]", "# 共20类", "for", "i", "in", "range", "(", "1", ",", "len", "(", "data", ")", ")", ":", "comb_image", "=", "data", "[", "i", "]", "[", "0", "]", "mask", "=", "data", "[", "i", "]", "[", "1", "]", "class_ids", "=", "data", "[", "i", "]", "[", "2", "]", "self", ".", "add_image", "(", "\"shapes\"", ",", "image_id", "=", "i", "-", "1", ",", "path", "=", "None", ",", "width", "=", "height", ",", "height", "=", "width", ",", "image", "=", "comb_image", ",", "mask", "=", "mask", ",", "class_ids", "=", "class_ids", ")" ]
https://github.com/wucng/TensorExpand/blob/4ea58f64f5c5082b278229b799c9f679536510b7/TensorExpand/Object detection/Mask RCNN/Mask_RCNN Exercise/train.py#L124-L155
home-assistant/core
265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1
homeassistant/components/somfy/cover.py
python
SomfyCover.stop_cover_tilt
(self, **kwargs)
Stop the cover.
Stop the cover.
[ "Stop", "the", "cover", "." ]
def stop_cover_tilt(self, **kwargs): """Stop the cover.""" self._cover.stop()
[ "def", "stop_cover_tilt", "(", "self", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_cover", ".", "stop", "(", ")" ]
https://github.com/home-assistant/core/blob/265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1/homeassistant/components/somfy/cover.py#L189-L191
tanghaibao/jcvi
5e720870c0928996f8b77a38208106ff0447ccb6
jcvi/formats/fasta.py
python
sort
(args)
return sortedfastafile
%prog sort fastafile Sort a list of sequences and output with sorted IDs, etc.
%prog sort fastafile
[ "%prog", "sort", "fastafile" ]
def sort(args): """ %prog sort fastafile Sort a list of sequences and output with sorted IDs, etc. """ p = OptionParser(sort.__doc__) p.add_option( "--sizes", default=False, action="store_true", help="Sort by decreasing size" ) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(p.print_help()) (fastafile,) = args sortedfastafile = fastafile.rsplit(".", 1)[0] + ".sorted.fasta" f = Fasta(fastafile, index=False) fw = must_open(sortedfastafile, "w") if opts.sizes: # Sort by decreasing size sortlist = sorted(f.itersizes(), key=lambda x: (-x[1], x[0])) logging.debug( "Sort by size: max: {0}, min: {1}".format(sortlist[0], sortlist[-1]) ) sortlist = [x for x, s in sortlist] else: sortlist = sorted(f.iterkeys()) for key in sortlist: rec = f[key] SeqIO.write([rec], fw, "fasta") logging.debug("Sorted file written to `{0}`.".format(sortedfastafile)) fw.close() return sortedfastafile
[ "def", "sort", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "sort", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--sizes\"", ",", "default", "=", "False", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"Sort by decreasing size\"", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "1", ":", "sys", ".", "exit", "(", "p", ".", "print_help", "(", ")", ")", "(", "fastafile", ",", ")", "=", "args", "sortedfastafile", "=", "fastafile", ".", "rsplit", "(", "\".\"", ",", "1", ")", "[", "0", "]", "+", "\".sorted.fasta\"", "f", "=", "Fasta", "(", "fastafile", ",", "index", "=", "False", ")", "fw", "=", "must_open", "(", "sortedfastafile", ",", "\"w\"", ")", "if", "opts", ".", "sizes", ":", "# Sort by decreasing size", "sortlist", "=", "sorted", "(", "f", ".", "itersizes", "(", ")", ",", "key", "=", "lambda", "x", ":", "(", "-", "x", "[", "1", "]", ",", "x", "[", "0", "]", ")", ")", "logging", ".", "debug", "(", "\"Sort by size: max: {0}, min: {1}\"", ".", "format", "(", "sortlist", "[", "0", "]", ",", "sortlist", "[", "-", "1", "]", ")", ")", "sortlist", "=", "[", "x", "for", "x", ",", "s", "in", "sortlist", "]", "else", ":", "sortlist", "=", "sorted", "(", "f", ".", "iterkeys", "(", ")", ")", "for", "key", "in", "sortlist", ":", "rec", "=", "f", "[", "key", "]", "SeqIO", ".", "write", "(", "[", "rec", "]", ",", "fw", ",", "\"fasta\"", ")", "logging", ".", "debug", "(", "\"Sorted file written to `{0}`.\"", ".", "format", "(", "sortedfastafile", ")", ")", "fw", ".", "close", "(", ")", "return", "sortedfastafile" ]
https://github.com/tanghaibao/jcvi/blob/5e720870c0928996f8b77a38208106ff0447ccb6/jcvi/formats/fasta.py#L1092-L1130
sony/nnabla-examples
068be490aacf73740502a1c3b10f8b2d15a52d32
object-detection/centernet/src/lib/utils/voc_eval_lib/voc_datasets/pascal_voc.py
python
pascal_voc._get_default_path
(self, path=None)
Return the set path to PASCAL VOC.
Return the set path to PASCAL VOC.
[ "Return", "the", "set", "path", "to", "PASCAL", "VOC", "." ]
def _get_default_path(self, path=None): """ Return the set path to PASCAL VOC. """ if path is None: # Return the default path where PASCAL VOC is expected to be installed. return os.path.join(cfg.DATA_DIR, 'voc', 'VOCdevkit') else: return os.path.join(path, 'voc', 'VOCdevkit')
[ "def", "_get_default_path", "(", "self", ",", "path", "=", "None", ")", ":", "if", "path", "is", "None", ":", "# Return the default path where PASCAL VOC is expected to be installed.", "return", "os", ".", "path", ".", "join", "(", "cfg", ".", "DATA_DIR", ",", "'voc'", ",", "'VOCdevkit'", ")", "else", ":", "return", "os", ".", "path", ".", "join", "(", "path", ",", "'voc'", ",", "'VOCdevkit'", ")" ]
https://github.com/sony/nnabla-examples/blob/068be490aacf73740502a1c3b10f8b2d15a52d32/object-detection/centernet/src/lib/utils/voc_eval_lib/voc_datasets/pascal_voc.py#L104-L112
requests/requests-oauthlib
05a25a96522dc64c25224a4862f2342a6978e80d
requests_oauthlib/oauth1_session.py
python
OAuth1Session.authorization_url
(self, url, request_token=None, **kwargs)
return add_params_to_uri(url, kwargs.items())
Create an authorization URL by appending request_token and optional kwargs to url. This is the second step in the OAuth 1 workflow. The user should be redirected to this authorization URL, grant access to you, and then be redirected back to you. The redirection back can either be specified during client registration or by supplying a callback URI per request. :param url: The authorization endpoint URL. :param request_token: The previously obtained request token. :param kwargs: Optional parameters to append to the URL. :returns: The authorization URL with new parameters embedded. An example using a registered default callback URI. >>> request_token_url = 'https://api.twitter.com/oauth/request_token' >>> authorization_url = 'https://api.twitter.com/oauth/authorize' >>> oauth_session = OAuth1Session('client-key', client_secret='secret') >>> oauth_session.fetch_request_token(request_token_url) { 'oauth_token': 'sdf0o9823sjdfsdf', 'oauth_token_secret': '2kjshdfp92i34asdasd', } >>> oauth_session.authorization_url(authorization_url) 'https://api.twitter.com/oauth/authorize?oauth_token=sdf0o9823sjdfsdf' >>> oauth_session.authorization_url(authorization_url, foo='bar') 'https://api.twitter.com/oauth/authorize?oauth_token=sdf0o9823sjdfsdf&foo=bar' An example using an explicit callback URI. >>> request_token_url = 'https://api.twitter.com/oauth/request_token' >>> authorization_url = 'https://api.twitter.com/oauth/authorize' >>> oauth_session = OAuth1Session('client-key', client_secret='secret', callback_uri='https://127.0.0.1/callback') >>> oauth_session.fetch_request_token(request_token_url) { 'oauth_token': 'sdf0o9823sjdfsdf', 'oauth_token_secret': '2kjshdfp92i34asdasd', } >>> oauth_session.authorization_url(authorization_url) 'https://api.twitter.com/oauth/authorize?oauth_token=sdf0o9823sjdfsdf&oauth_callback=https%3A%2F%2F127.0.0.1%2Fcallback'
Create an authorization URL by appending request_token and optional kwargs to url.
[ "Create", "an", "authorization", "URL", "by", "appending", "request_token", "and", "optional", "kwargs", "to", "url", "." ]
def authorization_url(self, url, request_token=None, **kwargs): """Create an authorization URL by appending request_token and optional kwargs to url. This is the second step in the OAuth 1 workflow. The user should be redirected to this authorization URL, grant access to you, and then be redirected back to you. The redirection back can either be specified during client registration or by supplying a callback URI per request. :param url: The authorization endpoint URL. :param request_token: The previously obtained request token. :param kwargs: Optional parameters to append to the URL. :returns: The authorization URL with new parameters embedded. An example using a registered default callback URI. >>> request_token_url = 'https://api.twitter.com/oauth/request_token' >>> authorization_url = 'https://api.twitter.com/oauth/authorize' >>> oauth_session = OAuth1Session('client-key', client_secret='secret') >>> oauth_session.fetch_request_token(request_token_url) { 'oauth_token': 'sdf0o9823sjdfsdf', 'oauth_token_secret': '2kjshdfp92i34asdasd', } >>> oauth_session.authorization_url(authorization_url) 'https://api.twitter.com/oauth/authorize?oauth_token=sdf0o9823sjdfsdf' >>> oauth_session.authorization_url(authorization_url, foo='bar') 'https://api.twitter.com/oauth/authorize?oauth_token=sdf0o9823sjdfsdf&foo=bar' An example using an explicit callback URI. >>> request_token_url = 'https://api.twitter.com/oauth/request_token' >>> authorization_url = 'https://api.twitter.com/oauth/authorize' >>> oauth_session = OAuth1Session('client-key', client_secret='secret', callback_uri='https://127.0.0.1/callback') >>> oauth_session.fetch_request_token(request_token_url) { 'oauth_token': 'sdf0o9823sjdfsdf', 'oauth_token_secret': '2kjshdfp92i34asdasd', } >>> oauth_session.authorization_url(authorization_url) 'https://api.twitter.com/oauth/authorize?oauth_token=sdf0o9823sjdfsdf&oauth_callback=https%3A%2F%2F127.0.0.1%2Fcallback' """ kwargs["oauth_token"] = request_token or self._client.client.resource_owner_key log.debug("Adding parameters %s to url %s", kwargs, url) return add_params_to_uri(url, kwargs.items())
[ "def", "authorization_url", "(", "self", ",", "url", ",", "request_token", "=", "None", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "\"oauth_token\"", "]", "=", "request_token", "or", "self", ".", "_client", ".", "client", ".", "resource_owner_key", "log", ".", "debug", "(", "\"Adding parameters %s to url %s\"", ",", "kwargs", ",", "url", ")", "return", "add_params_to_uri", "(", "url", ",", "kwargs", ".", "items", "(", ")", ")" ]
https://github.com/requests/requests-oauthlib/blob/05a25a96522dc64c25224a4862f2342a6978e80d/requests_oauthlib/oauth1_session.py#L214-L258
bayespy/bayespy
0e6e6130c888a4295cc9421d61d4ad27b2960ebb
bayespy/utils/linalg.py
python
inv
(A, ndim=1)
General array inversion. Supports broadcasting and inversion of multidimensional arrays. For instance, an array with shape (4,3,2,3,2) could mean that there are four (3*2) x (3*2) matrices to be inverted. This can be done by inv(A, ndim=2). For inverting scalars, ndim=0. For inverting matrices, ndim=1.
General array inversion.
[ "General", "array", "inversion", "." ]
def inv(A, ndim=1): """ General array inversion. Supports broadcasting and inversion of multidimensional arrays. For instance, an array with shape (4,3,2,3,2) could mean that there are four (3*2) x (3*2) matrices to be inverted. This can be done by inv(A, ndim=2). For inverting scalars, ndim=0. For inverting matrices, ndim=1. """ A = np.asanyarray(A) if ndim == 0: return 1 / A elif ndim == 1: return np.linalg.inv(A) else: raise NotImplementedError()
[ "def", "inv", "(", "A", ",", "ndim", "=", "1", ")", ":", "A", "=", "np", ".", "asanyarray", "(", "A", ")", "if", "ndim", "==", "0", ":", "return", "1", "/", "A", "elif", "ndim", "==", "1", ":", "return", "np", ".", "linalg", ".", "inv", "(", "A", ")", "else", ":", "raise", "NotImplementedError", "(", ")" ]
https://github.com/bayespy/bayespy/blob/0e6e6130c888a4295cc9421d61d4ad27b2960ebb/bayespy/utils/linalg.py#L389-L404
projecthamster/hamster
19d160090de30e756bdc3122ff935bdaa86e2843
waflib/Task.py
python
Task.runnable_status
(self)
return (self.always_run and RUN_ME) or SKIP_ME
Returns the Task status :return: a task state in :py:const:`waflib.Task.RUN_ME`, :py:const:`waflib.Task.SKIP_ME`, :py:const:`waflib.Task.CANCEL_ME` or :py:const:`waflib.Task.ASK_LATER`. :rtype: int
Returns the Task status
[ "Returns", "the", "Task", "status" ]
def runnable_status(self): """ Returns the Task status :return: a task state in :py:const:`waflib.Task.RUN_ME`, :py:const:`waflib.Task.SKIP_ME`, :py:const:`waflib.Task.CANCEL_ME` or :py:const:`waflib.Task.ASK_LATER`. :rtype: int """ bld = self.generator.bld if bld.is_install < 0: return SKIP_ME for t in self.run_after: if not t.hasrun: return ASK_LATER elif t.hasrun < SKIPPED: # a dependency has an error return CANCEL_ME # first compute the signature try: new_sig = self.signature() except Errors.TaskNotReady: return ASK_LATER # compare the signature to a signature computed previously key = self.uid() try: prev_sig = bld.task_sigs[key] except KeyError: Logs.debug('task: task %r must run: it was never run before or the task code changed', self) return RUN_ME if new_sig != prev_sig: Logs.debug('task: task %r must run: the task signature changed', self) return RUN_ME # compare the signatures of the outputs for node in self.outputs: sig = bld.node_sigs.get(node) if not sig: Logs.debug('task: task %r must run: an output node has no signature', self) return RUN_ME if sig != key: Logs.debug('task: task %r must run: an output node was produced by another task', self) return RUN_ME if not node.exists(): Logs.debug('task: task %r must run: an output node does not exist', self) return RUN_ME return (self.always_run and RUN_ME) or SKIP_ME
[ "def", "runnable_status", "(", "self", ")", ":", "bld", "=", "self", ".", "generator", ".", "bld", "if", "bld", ".", "is_install", "<", "0", ":", "return", "SKIP_ME", "for", "t", "in", "self", ".", "run_after", ":", "if", "not", "t", ".", "hasrun", ":", "return", "ASK_LATER", "elif", "t", ".", "hasrun", "<", "SKIPPED", ":", "# a dependency has an error", "return", "CANCEL_ME", "# first compute the signature", "try", ":", "new_sig", "=", "self", ".", "signature", "(", ")", "except", "Errors", ".", "TaskNotReady", ":", "return", "ASK_LATER", "# compare the signature to a signature computed previously", "key", "=", "self", ".", "uid", "(", ")", "try", ":", "prev_sig", "=", "bld", ".", "task_sigs", "[", "key", "]", "except", "KeyError", ":", "Logs", ".", "debug", "(", "'task: task %r must run: it was never run before or the task code changed'", ",", "self", ")", "return", "RUN_ME", "if", "new_sig", "!=", "prev_sig", ":", "Logs", ".", "debug", "(", "'task: task %r must run: the task signature changed'", ",", "self", ")", "return", "RUN_ME", "# compare the signatures of the outputs", "for", "node", "in", "self", ".", "outputs", ":", "sig", "=", "bld", ".", "node_sigs", ".", "get", "(", "node", ")", "if", "not", "sig", ":", "Logs", ".", "debug", "(", "'task: task %r must run: an output node has no signature'", ",", "self", ")", "return", "RUN_ME", "if", "sig", "!=", "key", ":", "Logs", ".", "debug", "(", "'task: task %r must run: an output node was produced by another task'", ",", "self", ")", "return", "RUN_ME", "if", "not", "node", ".", "exists", "(", ")", ":", "Logs", ".", "debug", "(", "'task: task %r must run: an output node does not exist'", ",", "self", ")", "return", "RUN_ME", "return", "(", "self", ".", "always_run", "and", "RUN_ME", ")", "or", "SKIP_ME" ]
https://github.com/projecthamster/hamster/blob/19d160090de30e756bdc3122ff935bdaa86e2843/waflib/Task.py#L658-L708
TarrySingh/Artificial-Intelligence-Deep-Learning-Machine-Learning-Tutorials
5bb97d7e3ffd913abddb4cfa7d78a1b4c868890e
scipy/thinkstats2.py
python
PmfProbGreater
(pmf1, pmf2)
return total
Probability that a value from pmf1 is less than a value from pmf2. Args: pmf1: Pmf object pmf2: Pmf object Returns: float probability
Probability that a value from pmf1 is less than a value from pmf2.
[ "Probability", "that", "a", "value", "from", "pmf1", "is", "less", "than", "a", "value", "from", "pmf2", "." ]
def PmfProbGreater(pmf1, pmf2): """Probability that a value from pmf1 is less than a value from pmf2. Args: pmf1: Pmf object pmf2: Pmf object Returns: float probability """ total = 0.0 for v1, p1 in pmf1.Items(): for v2, p2 in pmf2.Items(): if v1 > v2: total += p1 * p2 return total
[ "def", "PmfProbGreater", "(", "pmf1", ",", "pmf2", ")", ":", "total", "=", "0.0", "for", "v1", ",", "p1", "in", "pmf1", ".", "Items", "(", ")", ":", "for", "v2", ",", "p2", "in", "pmf2", ".", "Items", "(", ")", ":", "if", "v1", ">", "v2", ":", "total", "+=", "p1", "*", "p2", "return", "total" ]
https://github.com/TarrySingh/Artificial-Intelligence-Deep-Learning-Machine-Learning-Tutorials/blob/5bb97d7e3ffd913abddb4cfa7d78a1b4c868890e/scipy/thinkstats2.py#L1665-L1680
IronLanguages/ironpython3
7a7bb2a872eeab0d1009fc8a6e24dca43f65b693
Src/StdLib/Lib/collections/__init__.py
python
Counter.__delitem__
(self, elem)
Like dict.__delitem__() but does not raise KeyError for missing values.
Like dict.__delitem__() but does not raise KeyError for missing values.
[ "Like", "dict", ".", "__delitem__", "()", "but", "does", "not", "raise", "KeyError", "for", "missing", "values", "." ]
def __delitem__(self, elem): 'Like dict.__delitem__() but does not raise KeyError for missing values.' if elem in self: super().__delitem__(elem)
[ "def", "__delitem__", "(", "self", ",", "elem", ")", ":", "if", "elem", "in", "self", ":", "super", "(", ")", ".", "__delitem__", "(", "elem", ")" ]
https://github.com/IronLanguages/ironpython3/blob/7a7bb2a872eeab0d1009fc8a6e24dca43f65b693/Src/StdLib/Lib/collections/__init__.py#L598-L601
python-streamz/streamz
8744c83a0fad1fbd9ee9318d1a79ee538415a4e4
streamz/collection.py
python
OperatorMixin.__mod__
(self, other)
return self.map_partitions(operator.mod, self, other)
[]
def __mod__(self, other): return self.map_partitions(operator.mod, self, other)
[ "def", "__mod__", "(", "self", ",", "other", ")", ":", "return", "self", ".", "map_partitions", "(", "operator", ".", "mod", ",", "self", ",", "other", ")" ]
https://github.com/python-streamz/streamz/blob/8744c83a0fad1fbd9ee9318d1a79ee538415a4e4/streamz/collection.py#L100-L101
python-telegram-bot/python-telegram-bot
ade1529986f5b6d394a65372d6a27045a70725b2
telegram/bot.py
python
Bot.edit_chat_invite_link
( self, chat_id: Union[str, int], invite_link: str, expire_date: Union[int, datetime] = None, member_limit: int = None, timeout: ODVInput[float] = DEFAULT_NONE, api_kwargs: JSONDict = None, name: str = None, creates_join_request: bool = None, )
return ChatInviteLink.de_json(result, self)
Use this method to edit a non-primary invite link created by the bot. The bot must be an administrator in the chat for this to work and must have the appropriate admin rights. Note: Though not stated explicitly in the official docs, Telegram changes not only the optional parameters that are explicitly passed, but also replaces all other optional parameters to the default values. However, since not documented, this behaviour may change unbeknown to PTB. .. versionadded:: 13.4 Args: chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username of the target channel (in the format ``@channelusername``). invite_link (:obj:`str`): The invite link to edit. expire_date (:obj:`int` | :obj:`datetime.datetime`, optional): Date when the link will expire. For timezone naive :obj:`datetime.datetime` objects, the default timezone of the bot will be used. member_limit (:obj:`int`, optional): Maximum number of users that can be members of the chat simultaneously after joining the chat via this invite link; 1-99999. timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as the read timeout from the server (instead of the one specified during creation of the connection pool). api_kwargs (:obj:`dict`, optional): Arbitrary keyword arguments to be passed to the Telegram API. name (:obj:`str`, optional): Invite link name; 0-32 characters. .. versionadded:: 13.8 creates_join_request (:obj:`bool`, optional): :obj:`True`, if users joining the chat via the link need to be approved by chat administrators. If :obj:`True`, ``member_limit`` can't be specified. .. versionadded:: 13.8 Returns: :class:`telegram.ChatInviteLink` Raises: :class:`telegram.error.TelegramError`
Use this method to edit a non-primary invite link created by the bot. The bot must be an administrator in the chat for this to work and must have the appropriate admin rights.
[ "Use", "this", "method", "to", "edit", "a", "non", "-", "primary", "invite", "link", "created", "by", "the", "bot", ".", "The", "bot", "must", "be", "an", "administrator", "in", "the", "chat", "for", "this", "to", "work", "and", "must", "have", "the", "appropriate", "admin", "rights", "." ]
def edit_chat_invite_link( self, chat_id: Union[str, int], invite_link: str, expire_date: Union[int, datetime] = None, member_limit: int = None, timeout: ODVInput[float] = DEFAULT_NONE, api_kwargs: JSONDict = None, name: str = None, creates_join_request: bool = None, ) -> ChatInviteLink: """ Use this method to edit a non-primary invite link created by the bot. The bot must be an administrator in the chat for this to work and must have the appropriate admin rights. Note: Though not stated explicitly in the official docs, Telegram changes not only the optional parameters that are explicitly passed, but also replaces all other optional parameters to the default values. However, since not documented, this behaviour may change unbeknown to PTB. .. versionadded:: 13.4 Args: chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username of the target channel (in the format ``@channelusername``). invite_link (:obj:`str`): The invite link to edit. expire_date (:obj:`int` | :obj:`datetime.datetime`, optional): Date when the link will expire. For timezone naive :obj:`datetime.datetime` objects, the default timezone of the bot will be used. member_limit (:obj:`int`, optional): Maximum number of users that can be members of the chat simultaneously after joining the chat via this invite link; 1-99999. timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as the read timeout from the server (instead of the one specified during creation of the connection pool). api_kwargs (:obj:`dict`, optional): Arbitrary keyword arguments to be passed to the Telegram API. name (:obj:`str`, optional): Invite link name; 0-32 characters. .. versionadded:: 13.8 creates_join_request (:obj:`bool`, optional): :obj:`True`, if users joining the chat via the link need to be approved by chat administrators. If :obj:`True`, ``member_limit`` can't be specified. .. versionadded:: 13.8 Returns: :class:`telegram.ChatInviteLink` Raises: :class:`telegram.error.TelegramError` """ if creates_join_request and member_limit: raise ValueError( "If `creates_join_request` is `True`, `member_limit` can't be specified." ) data: JSONDict = {'chat_id': chat_id, 'invite_link': invite_link} if expire_date is not None: if isinstance(expire_date, datetime): expire_date = to_timestamp( expire_date, tzinfo=self.defaults.tzinfo if self.defaults else None ) data['expire_date'] = expire_date if member_limit is not None: data['member_limit'] = member_limit if name is not None: data['name'] = name if creates_join_request is not None: data['creates_join_request'] = creates_join_request result = self._post('editChatInviteLink', data, timeout=timeout, api_kwargs=api_kwargs) return ChatInviteLink.de_json(result, self)
[ "def", "edit_chat_invite_link", "(", "self", ",", "chat_id", ":", "Union", "[", "str", ",", "int", "]", ",", "invite_link", ":", "str", ",", "expire_date", ":", "Union", "[", "int", ",", "datetime", "]", "=", "None", ",", "member_limit", ":", "int", "=", "None", ",", "timeout", ":", "ODVInput", "[", "float", "]", "=", "DEFAULT_NONE", ",", "api_kwargs", ":", "JSONDict", "=", "None", ",", "name", ":", "str", "=", "None", ",", "creates_join_request", ":", "bool", "=", "None", ",", ")", "->", "ChatInviteLink", ":", "if", "creates_join_request", "and", "member_limit", ":", "raise", "ValueError", "(", "\"If `creates_join_request` is `True`, `member_limit` can't be specified.\"", ")", "data", ":", "JSONDict", "=", "{", "'chat_id'", ":", "chat_id", ",", "'invite_link'", ":", "invite_link", "}", "if", "expire_date", "is", "not", "None", ":", "if", "isinstance", "(", "expire_date", ",", "datetime", ")", ":", "expire_date", "=", "to_timestamp", "(", "expire_date", ",", "tzinfo", "=", "self", ".", "defaults", ".", "tzinfo", "if", "self", ".", "defaults", "else", "None", ")", "data", "[", "'expire_date'", "]", "=", "expire_date", "if", "member_limit", "is", "not", "None", ":", "data", "[", "'member_limit'", "]", "=", "member_limit", "if", "name", "is", "not", "None", ":", "data", "[", "'name'", "]", "=", "name", "if", "creates_join_request", "is", "not", "None", ":", "data", "[", "'creates_join_request'", "]", "=", "creates_join_request", "result", "=", "self", ".", "_post", "(", "'editChatInviteLink'", ",", "data", ",", "timeout", "=", "timeout", ",", "api_kwargs", "=", "api_kwargs", ")", "return", "ChatInviteLink", ".", "de_json", "(", "result", ",", "self", ")" ]
https://github.com/python-telegram-bot/python-telegram-bot/blob/ade1529986f5b6d394a65372d6a27045a70725b2/telegram/bot.py#L4258-L4337
tendenci/tendenci
0f2c348cc0e7d41bc56f50b00ce05544b083bf1d
tendenci/apps/boxes/utils.py
python
render_content
(content, arg=None, limit=None, image_preview=True)
return content
checks for template tags within the content and rendered them currently it is only rendering streams
checks for template tags within the content and rendered them currently it is only rendering streams
[ "checks", "for", "template", "tags", "within", "the", "content", "and", "rendered", "them", "currently", "it", "is", "only", "rendering", "streams" ]
def render_content(content, arg=None, limit=None, image_preview=True): """ checks for template tags within the content and rendered them currently it is only rendering streams """ match = detect_template_tags(content) if match: p = re.compile(r'{%([\w\s\=]+)%}') tags = list(set(re.findall(p, content))) TEMPLATE_TAGS = ['{% load box_tags %}'] if tags: for tag in tags: tag = "{%"+tag+"%}" t = engines['django'].from_string(''.join(TEMPLATE_TAGS) + tag) rendered_tag = t.render(context={'user': None}) content = content.replace(tag, rendered_tag) return content
[ "def", "render_content", "(", "content", ",", "arg", "=", "None", ",", "limit", "=", "None", ",", "image_preview", "=", "True", ")", ":", "match", "=", "detect_template_tags", "(", "content", ")", "if", "match", ":", "p", "=", "re", ".", "compile", "(", "r'{%([\\w\\s\\=]+)%}'", ")", "tags", "=", "list", "(", "set", "(", "re", ".", "findall", "(", "p", ",", "content", ")", ")", ")", "TEMPLATE_TAGS", "=", "[", "'{% load box_tags %}'", "]", "if", "tags", ":", "for", "tag", "in", "tags", ":", "tag", "=", "\"{%\"", "+", "tag", "+", "\"%}\"", "t", "=", "engines", "[", "'django'", "]", ".", "from_string", "(", "''", ".", "join", "(", "TEMPLATE_TAGS", ")", "+", "tag", ")", "rendered_tag", "=", "t", ".", "render", "(", "context", "=", "{", "'user'", ":", "None", "}", ")", "content", "=", "content", ".", "replace", "(", "tag", ",", "rendered_tag", ")", "return", "content" ]
https://github.com/tendenci/tendenci/blob/0f2c348cc0e7d41bc56f50b00ce05544b083bf1d/tendenci/apps/boxes/utils.py#L15-L32
tendenci/tendenci
0f2c348cc0e7d41bc56f50b00ce05544b083bf1d
tendenci/apps/help_files/views.py
python
export
(request, template_name="help_files/export.html")
return render_to_resp(request=request, template_name=template_name, context={ })
Export Help Files
Export Help Files
[ "Export", "Help", "Files" ]
def export(request, template_name="help_files/export.html"): """Export Help Files""" if not request.user.is_superuser: raise Http403 if request.method == 'POST': # initilize initial values fields = [ 'slug', 'topics', 'question', 'answer', 'level', 'is_faq', 'is_featured', 'is_video', 'syndicate', 'view_totals', ] export_id = run_export_task('help_files', 'helpfile', fields) EventLog.objects.log() return redirect('export.status', export_id) return render_to_resp(request=request, template_name=template_name, context={ })
[ "def", "export", "(", "request", ",", "template_name", "=", "\"help_files/export.html\"", ")", ":", "if", "not", "request", ".", "user", ".", "is_superuser", ":", "raise", "Http403", "if", "request", ".", "method", "==", "'POST'", ":", "# initilize initial values", "fields", "=", "[", "'slug'", ",", "'topics'", ",", "'question'", ",", "'answer'", ",", "'level'", ",", "'is_faq'", ",", "'is_featured'", ",", "'is_video'", ",", "'syndicate'", ",", "'view_totals'", ",", "]", "export_id", "=", "run_export_task", "(", "'help_files'", ",", "'helpfile'", ",", "fields", ")", "EventLog", ".", "objects", ".", "log", "(", ")", "return", "redirect", "(", "'export.status'", ",", "export_id", ")", "return", "render_to_resp", "(", "request", "=", "request", ",", "template_name", "=", "template_name", ",", "context", "=", "{", "}", ")" ]
https://github.com/tendenci/tendenci/blob/0f2c348cc0e7d41bc56f50b00ce05544b083bf1d/tendenci/apps/help_files/views.py#L229-L254
CenterForOpenScience/osf.io
cc02691be017e61e2cd64f19b848b2f4c18dcc84
website/conferences/utils.py
python
provision_node
(conference, message, node, user)
:param Conference conference: :param ConferenceMessage message: :param Node node: :param User user:
:param Conference conference: :param ConferenceMessage message: :param Node node: :param User user:
[ ":", "param", "Conference", "conference", ":", ":", "param", "ConferenceMessage", "message", ":", ":", "param", "Node", "node", ":", ":", "param", "User", "user", ":" ]
def provision_node(conference, message, node, user): """ :param Conference conference: :param ConferenceMessage message: :param Node node: :param User user: """ auth = Auth(user=user) try: wiki = WikiPage.objects.create_for_node(node, 'home', message.text, auth) except NodeStateError: wiki = WikiPage.objects.get_for_node(node, 'home') wiki.update(user, message.text) if conference.admins.exists(): node.add_contributors(prepare_contributors(conference.admins.all()), log=False) if not message.is_spam and conference.public_projects: node.set_privacy('public', meeting_creation=True, auth=auth) conference.submissions.add(node) node.add_tag(message.conference_category, auth=auth) for systag in ['emailed', message.conference_name, message.conference_category]: node.add_system_tag(systag, save=False) if message.is_spam: node.add_system_tag('spam', save=False) node.save()
[ "def", "provision_node", "(", "conference", ",", "message", ",", "node", ",", "user", ")", ":", "auth", "=", "Auth", "(", "user", "=", "user", ")", "try", ":", "wiki", "=", "WikiPage", ".", "objects", ".", "create_for_node", "(", "node", ",", "'home'", ",", "message", ".", "text", ",", "auth", ")", "except", "NodeStateError", ":", "wiki", "=", "WikiPage", ".", "objects", ".", "get_for_node", "(", "node", ",", "'home'", ")", "wiki", ".", "update", "(", "user", ",", "message", ".", "text", ")", "if", "conference", ".", "admins", ".", "exists", "(", ")", ":", "node", ".", "add_contributors", "(", "prepare_contributors", "(", "conference", ".", "admins", ".", "all", "(", ")", ")", ",", "log", "=", "False", ")", "if", "not", "message", ".", "is_spam", "and", "conference", ".", "public_projects", ":", "node", ".", "set_privacy", "(", "'public'", ",", "meeting_creation", "=", "True", ",", "auth", "=", "auth", ")", "conference", ".", "submissions", ".", "add", "(", "node", ")", "node", ".", "add_tag", "(", "message", ".", "conference_category", ",", "auth", "=", "auth", ")", "for", "systag", "in", "[", "'emailed'", ",", "message", ".", "conference_name", ",", "message", ".", "conference_category", "]", ":", "node", ".", "add_system_tag", "(", "systag", ",", "save", "=", "False", ")", "if", "message", ".", "is_spam", ":", "node", ".", "add_system_tag", "(", "'spam'", ",", "save", "=", "False", ")", "node", ".", "save", "(", ")" ]
https://github.com/CenterForOpenScience/osf.io/blob/cc02691be017e61e2cd64f19b848b2f4c18dcc84/website/conferences/utils.py#L25-L52
Fantomas42/django-blog-zinnia
881101a9d1d455b2fc581d6f4ae0947cdd8126c6
zinnia/calendar.py
python
Calendar.formatmonthname
(self, theyear, themonth, withyear=True)
return '<caption>%s</caption>' % monthname
Return a month name translated as a table row.
Return a month name translated as a table row.
[ "Return", "a", "month", "name", "translated", "as", "a", "table", "row", "." ]
def formatmonthname(self, theyear, themonth, withyear=True): """Return a month name translated as a table row.""" monthname = '%s %s' % (MONTHS[themonth].title(), theyear) return '<caption>%s</caption>' % monthname
[ "def", "formatmonthname", "(", "self", ",", "theyear", ",", "themonth", ",", "withyear", "=", "True", ")", ":", "monthname", "=", "'%s %s'", "%", "(", "MONTHS", "[", "themonth", "]", ".", "title", "(", ")", ",", "theyear", ")", "return", "'<caption>%s</caption>'", "%", "monthname" ]
https://github.com/Fantomas42/django-blog-zinnia/blob/881101a9d1d455b2fc581d6f4ae0947cdd8126c6/zinnia/calendar.py#L90-L93
KalleHallden/AutoTimer
2d954216700c4930baa154e28dbddc34609af7ce
env/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/specifiers.py
python
LegacySpecifier._coerce_version
(self, version)
return version
[]
def _coerce_version(self, version): if not isinstance(version, LegacyVersion): version = LegacyVersion(str(version)) return version
[ "def", "_coerce_version", "(", "self", ",", "version", ")", ":", "if", "not", "isinstance", "(", "version", ",", "LegacyVersion", ")", ":", "version", "=", "LegacyVersion", "(", "str", "(", "version", ")", ")", "return", "version" ]
https://github.com/KalleHallden/AutoTimer/blob/2d954216700c4930baa154e28dbddc34609af7ce/env/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/specifiers.py#L242-L245
mopidy/mopidy
796ebe9b4e1fe7efdb24199cda810492dcb903a1
mopidy/audio/utils.py
python
calculate_duration
(num_samples, sample_rate)
return Gst.util_uint64_scale(num_samples, Gst.SECOND, sample_rate)
Determine duration of samples using GStreamer helper for precise math.
Determine duration of samples using GStreamer helper for precise math.
[ "Determine", "duration", "of", "samples", "using", "GStreamer", "helper", "for", "precise", "math", "." ]
def calculate_duration(num_samples, sample_rate): """Determine duration of samples using GStreamer helper for precise math.""" return Gst.util_uint64_scale(num_samples, Gst.SECOND, sample_rate)
[ "def", "calculate_duration", "(", "num_samples", ",", "sample_rate", ")", ":", "return", "Gst", ".", "util_uint64_scale", "(", "num_samples", ",", "Gst", ".", "SECOND", ",", "sample_rate", ")" ]
https://github.com/mopidy/mopidy/blob/796ebe9b4e1fe7efdb24199cda810492dcb903a1/mopidy/audio/utils.py#L5-L8
rajarshd/Multi-Step-Reasoning
3218d626839f7217554f38d82e00e4f460b508e4
msr/reader/model.py
python
Model.init_optimizer
(self, state_dict=None)
Initialize an optimizer for the free parameters of the network. Args: state_dict: network parameters
Initialize an optimizer for the free parameters of the network.
[ "Initialize", "an", "optimizer", "for", "the", "free", "parameters", "of", "the", "network", "." ]
def init_optimizer(self, state_dict=None): """Initialize an optimizer for the free parameters of the network. Args: state_dict: network parameters """ if self.args.fix_embeddings: for p in self.network.embedding.parameters(): p.requires_grad = False parameters = [p for p in self.network.parameters() if p.requires_grad] if self.multi_step_reasoner is not None: parameters += [p for p in self.multi_step_reasoner.parameters() if p.requires_grad] parameters += [p for p in self.reader_self_attn.parameters() if p.requires_grad] if self.multi_step_reader is not None: parameters += [p for p in self.multi_step_reader.parameters() if p.requires_grad] if self.args.optimizer == 'sgd': self.optimizer = optim.SGD(parameters, self.args.learning_rate, momentum=self.args.momentum, weight_decay=self.args.weight_decay) elif self.args.optimizer == 'adamax': self.optimizer = optim.Adamax(parameters, weight_decay=self.args.weight_decay) else: raise RuntimeError('Unsupported optimizer: %s' % self.args.optimizer)
[ "def", "init_optimizer", "(", "self", ",", "state_dict", "=", "None", ")", ":", "if", "self", ".", "args", ".", "fix_embeddings", ":", "for", "p", "in", "self", ".", "network", ".", "embedding", ".", "parameters", "(", ")", ":", "p", ".", "requires_grad", "=", "False", "parameters", "=", "[", "p", "for", "p", "in", "self", ".", "network", ".", "parameters", "(", ")", "if", "p", ".", "requires_grad", "]", "if", "self", ".", "multi_step_reasoner", "is", "not", "None", ":", "parameters", "+=", "[", "p", "for", "p", "in", "self", ".", "multi_step_reasoner", ".", "parameters", "(", ")", "if", "p", ".", "requires_grad", "]", "parameters", "+=", "[", "p", "for", "p", "in", "self", ".", "reader_self_attn", ".", "parameters", "(", ")", "if", "p", ".", "requires_grad", "]", "if", "self", ".", "multi_step_reader", "is", "not", "None", ":", "parameters", "+=", "[", "p", "for", "p", "in", "self", ".", "multi_step_reader", ".", "parameters", "(", ")", "if", "p", ".", "requires_grad", "]", "if", "self", ".", "args", ".", "optimizer", "==", "'sgd'", ":", "self", ".", "optimizer", "=", "optim", ".", "SGD", "(", "parameters", ",", "self", ".", "args", ".", "learning_rate", ",", "momentum", "=", "self", ".", "args", ".", "momentum", ",", "weight_decay", "=", "self", ".", "args", ".", "weight_decay", ")", "elif", "self", ".", "args", ".", "optimizer", "==", "'adamax'", ":", "self", ".", "optimizer", "=", "optim", ".", "Adamax", "(", "parameters", ",", "weight_decay", "=", "self", ".", "args", ".", "weight_decay", ")", "else", ":", "raise", "RuntimeError", "(", "'Unsupported optimizer: %s'", "%", "self", ".", "args", ".", "optimizer", ")" ]
https://github.com/rajarshd/Multi-Step-Reasoning/blob/3218d626839f7217554f38d82e00e4f460b508e4/msr/reader/model.py#L225-L251
tomplus/kubernetes_asyncio
f028cc793e3a2c519be6a52a49fb77ff0b014c9b
kubernetes_asyncio/client/api/core_v1_api.py
python
CoreV1Api.list_namespaced_event
(self, namespace, **kwargs)
return self.list_namespaced_event_with_http_info(namespace, **kwargs)
list_namespaced_event # noqa: E501 list or watch objects of kind Event # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_namespaced_event(namespace, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: CoreV1EventList If the method is called asynchronously, returns the request thread.
list_namespaced_event # noqa: E501
[ "list_namespaced_event", "#", "noqa", ":", "E501" ]
def list_namespaced_event(self, namespace, **kwargs): # noqa: E501 """list_namespaced_event # noqa: E501 list or watch objects of kind Event # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_namespaced_event(namespace, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: CoreV1EventList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True return self.list_namespaced_event_with_http_info(namespace, **kwargs)
[ "def", "list_namespaced_event", "(", "self", ",", "namespace", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "return", "self", ".", "list_namespaced_event_with_http_info", "(", "namespace", ",", "*", "*", "kwargs", ")" ]
https://github.com/tomplus/kubernetes_asyncio/blob/f028cc793e3a2c519be6a52a49fb77ff0b014c9b/kubernetes_asyncio/client/api/core_v1_api.py#L14898-L14931
alpacahq/pylivetrader
2d9bf97103814409ba8b56a4291f2655c59514ee
pylivetrader/algorithm.py
python
Algorithm.__setattr__
(self, name, value)
[]
def __setattr__(self, name, value): # Reject names that overlap with API method names if hasattr(self, 'api_methods') and name in self.api_methods: raise AttributeError( 'Cannot set {} on context object as it is the name of ' 'an API method.'.format(name) ) else: object.__setattr__(self, name, value)
[ "def", "__setattr__", "(", "self", ",", "name", ",", "value", ")", ":", "# Reject names that overlap with API method names", "if", "hasattr", "(", "self", ",", "'api_methods'", ")", "and", "name", "in", "self", ".", "api_methods", ":", "raise", "AttributeError", "(", "'Cannot set {} on context object as it is the name of '", "'an API method.'", ".", "format", "(", "name", ")", ")", "else", ":", "object", ".", "__setattr__", "(", "self", ",", "name", ",", "value", ")" ]
https://github.com/alpacahq/pylivetrader/blob/2d9bf97103814409ba8b56a4291f2655c59514ee/pylivetrader/algorithm.py#L96-L104
Komodo/KomodoEdit
61edab75dce2bdb03943b387b0608ea36f548e8e
src/apsw/tools/apswtrace.py
python
fmtfloat
(n, decimals=3, total=None)
return s
Work around borken python float formatting
Work around borken python float formatting
[ "Work", "around", "borken", "python", "float", "formatting" ]
def fmtfloat(n, decimals=3, total=None): "Work around borken python float formatting" s="%0.*f" % (decimals, n) if total: s=(" "*total+s)[-total:] return s
[ "def", "fmtfloat", "(", "n", ",", "decimals", "=", "3", ",", "total", "=", "None", ")", ":", "s", "=", "\"%0.*f\"", "%", "(", "decimals", ",", "n", ")", "if", "total", ":", "s", "=", "(", "\" \"", "*", "total", "+", "s", ")", "[", "-", "total", ":", "]", "return", "s" ]
https://github.com/Komodo/KomodoEdit/blob/61edab75dce2bdb03943b387b0608ea36f548e8e/src/apsw/tools/apswtrace.py#L294-L299
redhat-imaging/imagefactory
176f6e045e1df049d50f33a924653128d5ab8b27
imgfac/rest/bottle.py
python
MultiDict.__len__
(self)
return len(self.dict)
[]
def __len__(self): return len(self.dict)
[ "def", "__len__", "(", "self", ")", ":", "return", "len", "(", "self", ".", "dict", ")" ]
https://github.com/redhat-imaging/imagefactory/blob/176f6e045e1df049d50f33a924653128d5ab8b27/imgfac/rest/bottle.py#L1822-L1822
TM0831/Spiders
89ba07667a1e729b67f012f2f7cf71034243ad28
ProxyPool/crawl.py
python
Crawler.get_proxies
(self, callback)
return self.proxies
运行各个代理爬虫 :param callback: crawl函数名称 :return:
运行各个代理爬虫 :param callback: crawl函数名称 :return:
[ "运行各个代理爬虫", ":", "param", "callback", ":", "crawl函数名称", ":", "return", ":" ]
def get_proxies(self, callback): """ 运行各个代理爬虫 :param callback: crawl函数名称 :return: """ for proxy in eval("self.{}()".format(callback)): print("成功获取代理:", proxy) self.proxies.append(proxy) return self.proxies
[ "def", "get_proxies", "(", "self", ",", "callback", ")", ":", "for", "proxy", "in", "eval", "(", "\"self.{}()\"", ".", "format", "(", "callback", ")", ")", ":", "print", "(", "\"成功获取代理:\", proxy)", "", "", "", "self", ".", "proxies", ".", "append", "(", "proxy", ")", "return", "self", ".", "proxies" ]
https://github.com/TM0831/Spiders/blob/89ba07667a1e729b67f012f2f7cf71034243ad28/ProxyPool/crawl.py#L33-L42
openhatch/oh-mainline
ce29352a034e1223141dcc2f317030bbc3359a51
vendor/packages/Django/django/dispatch/saferef.py
python
BoundMethodWeakref.__call__
(self)
return None
Return a strong reference to the bound method If the target cannot be retrieved, then will return None, otherwise returns a bound instance method for our object and function. Note: You may call this method any number of times, as it does not invalidate the reference.
Return a strong reference to the bound method
[ "Return", "a", "strong", "reference", "to", "the", "bound", "method" ]
def __call__(self): """Return a strong reference to the bound method If the target cannot be retrieved, then will return None, otherwise returns a bound instance method for our object and function. Note: You may call this method any number of times, as it does not invalidate the reference. """ target = self.weakSelf() if target is not None: function = self.weakFunc() if function is not None: return function.__get__(target) return None
[ "def", "__call__", "(", "self", ")", ":", "target", "=", "self", ".", "weakSelf", "(", ")", "if", "target", "is", "not", "None", ":", "function", "=", "self", ".", "weakFunc", "(", ")", "if", "function", "is", "not", "None", ":", "return", "function", ".", "__get__", "(", "target", ")", "return", "None" ]
https://github.com/openhatch/oh-mainline/blob/ce29352a034e1223141dcc2f317030bbc3359a51/vendor/packages/Django/django/dispatch/saferef.py#L171-L187
biopython/biopython
2dd97e71762af7b046d7f7f8a4f1e38db6b06c86
Bio/Phylo/PAML/baseml.py
python
Baseml._set_rel_paths
(self)
Make file/directory paths relative to the PWD (PRIVATE). BASEML requires that all paths specified in the control file be relative to the directory from which it is called rather than absolute paths.
Make file/directory paths relative to the PWD (PRIVATE).
[ "Make", "file", "/", "directory", "paths", "relative", "to", "the", "PWD", "(", "PRIVATE", ")", "." ]
def _set_rel_paths(self): """Make file/directory paths relative to the PWD (PRIVATE). BASEML requires that all paths specified in the control file be relative to the directory from which it is called rather than absolute paths. """ Paml._set_rel_paths(self) if self.tree is not None: self._rel_tree = os.path.relpath(self.tree, self.working_dir)
[ "def", "_set_rel_paths", "(", "self", ")", ":", "Paml", ".", "_set_rel_paths", "(", "self", ")", "if", "self", ".", "tree", "is", "not", "None", ":", "self", ".", "_rel_tree", "=", "os", ".", "path", ".", "relpath", "(", "self", ".", "tree", ",", "self", ".", "working_dir", ")" ]
https://github.com/biopython/biopython/blob/2dd97e71762af7b046d7f7f8a4f1e38db6b06c86/Bio/Phylo/PAML/baseml.py#L154-L163
plotly/plotly.py
cfad7862594b35965c0e000813bd7805e8494a5b
packages/python/plotly/plotly/graph_objs/_heatmap.py
python
Heatmap.text
(self)
return self["text"]
Sets the text elements associated with each z value. The 'text' property is an array that may be specified as a tuple, list, numpy array, or pandas Series Returns ------- numpy.ndarray
Sets the text elements associated with each z value. The 'text' property is an array that may be specified as a tuple, list, numpy array, or pandas Series
[ "Sets", "the", "text", "elements", "associated", "with", "each", "z", "value", ".", "The", "text", "property", "is", "an", "array", "that", "may", "be", "specified", "as", "a", "tuple", "list", "numpy", "array", "or", "pandas", "Series" ]
def text(self): """ Sets the text elements associated with each z value. The 'text' property is an array that may be specified as a tuple, list, numpy array, or pandas Series Returns ------- numpy.ndarray """ return self["text"]
[ "def", "text", "(", "self", ")", ":", "return", "self", "[", "\"text\"", "]" ]
https://github.com/plotly/plotly.py/blob/cfad7862594b35965c0e000813bd7805e8494a5b/packages/python/plotly/plotly/graph_objs/_heatmap.py#L1095-L1106
cloudera/hue
23f02102d4547c17c32bd5ea0eb24e9eadd657a4
desktop/core/ext-py/SQLAlchemy-1.3.17/lib/sqlalchemy/sql/elements.py
python
ColumnElement._uncached_proxy_set
(self)
return s
An 'uncached' version of proxy set. This is so that we can read annotations from the list of columns without breaking the caching of the above proxy_set.
An 'uncached' version of proxy set.
[ "An", "uncached", "version", "of", "proxy", "set", "." ]
def _uncached_proxy_set(self): """An 'uncached' version of proxy set. This is so that we can read annotations from the list of columns without breaking the caching of the above proxy_set. """ s = util.column_set([self]) for c in self._proxies: s.update(c._uncached_proxy_set()) return s
[ "def", "_uncached_proxy_set", "(", "self", ")", ":", "s", "=", "util", ".", "column_set", "(", "[", "self", "]", ")", "for", "c", "in", "self", ".", "_proxies", ":", "s", ".", "update", "(", "c", ".", "_uncached_proxy_set", "(", ")", ")", "return", "s" ]
https://github.com/cloudera/hue/blob/23f02102d4547c17c32bd5ea0eb24e9eadd657a4/desktop/core/ext-py/SQLAlchemy-1.3.17/lib/sqlalchemy/sql/elements.py#L798-L808
CedricGuillemet/Imogen
ee417b42747ed5b46cb11b02ef0c3630000085b3
bin/Lib/distutils/cmd.py
python
Command.run_command
(self, command)
Run some other command: uses the 'run_command()' method of Distribution, which creates and finalizes the command object if necessary and then invokes its 'run()' method.
Run some other command: uses the 'run_command()' method of Distribution, which creates and finalizes the command object if necessary and then invokes its 'run()' method.
[ "Run", "some", "other", "command", ":", "uses", "the", "run_command", "()", "method", "of", "Distribution", "which", "creates", "and", "finalizes", "the", "command", "object", "if", "necessary", "and", "then", "invokes", "its", "run", "()", "method", "." ]
def run_command(self, command): """Run some other command: uses the 'run_command()' method of Distribution, which creates and finalizes the command object if necessary and then invokes its 'run()' method. """ self.distribution.run_command(command)
[ "def", "run_command", "(", "self", ",", "command", ")", ":", "self", ".", "distribution", ".", "run_command", "(", "command", ")" ]
https://github.com/CedricGuillemet/Imogen/blob/ee417b42747ed5b46cb11b02ef0c3630000085b3/bin/Lib/distutils/cmd.py#L308-L313
xonsh/xonsh
b76d6f994f22a4078f602f8b386f4ec280c8461f
xonsh/parsers/base.py
python
BaseParser.p_comma_test
(self, p)
comma_test : COMMA test
comma_test : COMMA test
[ "comma_test", ":", "COMMA", "test" ]
def p_comma_test(self, p): """comma_test : COMMA test""" p[0] = [p[2]]
[ "def", "p_comma_test", "(", "self", ",", "p", ")", ":", "p", "[", "0", "]", "=", "[", "p", "[", "2", "]", "]" ]
https://github.com/xonsh/xonsh/blob/b76d6f994f22a4078f602f8b386f4ec280c8461f/xonsh/parsers/base.py#L1515-L1517
dingjiansw101/RoITransformer_DOTA
9125055aed313dde7a68882e9931c9ea58c6f6ab
fpn/core/module.py
python
Module.backward
(self, out_grads=None)
Backward computation. Parameters ---------- out_grads : NDArray or list of NDArray, optional Gradient on the outputs to be propagated back. This parameter is only needed when bind is called on outputs that are not a loss function.
Backward computation.
[ "Backward", "computation", "." ]
def backward(self, out_grads=None): """Backward computation. Parameters ---------- out_grads : NDArray or list of NDArray, optional Gradient on the outputs to be propagated back. This parameter is only needed when bind is called on outputs that are not a loss function. """ assert self.binded and self.params_initialized self._exec_group.backward(out_grads=out_grads)
[ "def", "backward", "(", "self", ",", "out_grads", "=", "None", ")", ":", "assert", "self", ".", "binded", "and", "self", ".", "params_initialized", "self", ".", "_exec_group", ".", "backward", "(", "out_grads", "=", "out_grads", ")" ]
https://github.com/dingjiansw101/RoITransformer_DOTA/blob/9125055aed313dde7a68882e9931c9ea58c6f6ab/fpn/core/module.py#L553-L564
jeetsukumaran/DendroPy
29fd294bf05d890ebf6a8d576c501e471db27ca1
src/dendropy/datamodel/treemodel.py
python
Node.leaf_iter
(self, filter_fn=None)
Iterate over all tips or leaves that ultimately descend from this node. Visits all leaf or tip nodes descended from this node. Nodes can optionally be filtered by ``filter_fn``: only nodes for which ``filter_fn`` returns |True| when called with the node as an argument are yielded. Parameters ---------- filter_fn : function object, optional A function object that takes a |Node| object as an argument and returns |True| if the |Node| object is to be yielded by the iterator, or |False| if not. If ``filter_fn`` is |None| (default), then all nodes visited will be yielded. Returns ------- :py:class:`collections.Iterator` [|Node|] An iterator yielding leaf nodes of the subtree rooted at this node.
Iterate over all tips or leaves that ultimately descend from this node.
[ "Iterate", "over", "all", "tips", "or", "leaves", "that", "ultimately", "descend", "from", "this", "node", "." ]
def leaf_iter(self, filter_fn=None): """ Iterate over all tips or leaves that ultimately descend from this node. Visits all leaf or tip nodes descended from this node. Nodes can optionally be filtered by ``filter_fn``: only nodes for which ``filter_fn`` returns |True| when called with the node as an argument are yielded. Parameters ---------- filter_fn : function object, optional A function object that takes a |Node| object as an argument and returns |True| if the |Node| object is to be yielded by the iterator, or |False| if not. If ``filter_fn`` is |None| (default), then all nodes visited will be yielded. Returns ------- :py:class:`collections.Iterator` [|Node|] An iterator yielding leaf nodes of the subtree rooted at this node. """ if filter_fn: ff = lambda x: x.is_leaf() and filter_fn(x) or None else: ff = lambda x: x.is_leaf() and x or None for node in self.postorder_iter(ff): yield node
[ "def", "leaf_iter", "(", "self", ",", "filter_fn", "=", "None", ")", ":", "if", "filter_fn", ":", "ff", "=", "lambda", "x", ":", "x", ".", "is_leaf", "(", ")", "and", "filter_fn", "(", "x", ")", "or", "None", "else", ":", "ff", "=", "lambda", "x", ":", "x", ".", "is_leaf", "(", ")", "and", "x", "or", "None", "for", "node", "in", "self", ".", "postorder_iter", "(", "ff", ")", ":", "yield", "node" ]
https://github.com/jeetsukumaran/DendroPy/blob/29fd294bf05d890ebf6a8d576c501e471db27ca1/src/dendropy/datamodel/treemodel.py#L1315-L1341
linxid/Machine_Learning_Study_Path
558e82d13237114bbb8152483977806fc0c222af
Machine Learning In Action/Chapter5-LogisticRegression/venv/Lib/site-packages/pkg_resources/__init__.py
python
NullProvider._fn
(self, base, resource_name)
return base
[]
def _fn(self, base, resource_name): if resource_name: return os.path.join(base, *resource_name.split('/')) return base
[ "def", "_fn", "(", "self", ",", "base", ",", "resource_name", ")", ":", "if", "resource_name", ":", "return", "os", ".", "path", ".", "join", "(", "base", ",", "*", "resource_name", ".", "split", "(", "'/'", ")", ")", "return", "base" ]
https://github.com/linxid/Machine_Learning_Study_Path/blob/558e82d13237114bbb8152483977806fc0c222af/Machine Learning In Action/Chapter5-LogisticRegression/venv/Lib/site-packages/pkg_resources/__init__.py#L1551-L1554
wxWidgets/Phoenix
b2199e299a6ca6d866aa6f3d0888499136ead9d6
wx/lib/plot/polyobjects.py
python
PolyBoxPlot.draw
(self, dc, printerScale, coord=None)
Draws a box plot on the DC. Notes ----- The following draw order is required: 1. First the whisker line 2. Then the IQR box 3. Lasly the median line. This is because + The whiskers are drawn as single line rather than two lines + The median line must be visible over the box if the box has a fill. Other than that, the draw order can be changed.
Draws a box plot on the DC.
[ "Draws", "a", "box", "plot", "on", "the", "DC", "." ]
def draw(self, dc, printerScale, coord=None): """ Draws a box plot on the DC. Notes ----- The following draw order is required: 1. First the whisker line 2. Then the IQR box 3. Lasly the median line. This is because + The whiskers are drawn as single line rather than two lines + The median line must be visible over the box if the box has a fill. Other than that, the draw order can be changed. """ self._draw_whisker(dc, printerScale) self._draw_iqr_box(dc, printerScale) self._draw_median(dc, printerScale) # median after box self._draw_whisker_ends(dc, printerScale) self._draw_outliers(dc, printerScale)
[ "def", "draw", "(", "self", ",", "dc", ",", "printerScale", ",", "coord", "=", "None", ")", ":", "self", ".", "_draw_whisker", "(", "dc", ",", "printerScale", ")", "self", ".", "_draw_iqr_box", "(", "dc", ",", "printerScale", ")", "self", ".", "_draw_median", "(", "dc", ",", "printerScale", ")", "# median after box", "self", ".", "_draw_whisker_ends", "(", "dc", ",", "printerScale", ")", "self", ".", "_draw_outliers", "(", "dc", ",", "printerScale", ")" ]
https://github.com/wxWidgets/Phoenix/blob/b2199e299a6ca6d866aa6f3d0888499136ead9d6/wx/lib/plot/polyobjects.py#L1065-L1088
sentinel-hub/eo-learn
cf964eaf173668d6a374675dbd7c1d244264c11d
examples/crop-type-classification/Tasks/CropTypeClassTasks.py
python
FixLPIS._fix_danish_lpis
(self, eopatch)
See Task's docs for the explanation of what is done.
See Task's docs for the explanation of what is done.
[ "See", "Task", "s", "docs", "for", "the", "explanation", "of", "what", "is", "done", "." ]
def _fix_danish_lpis(self, eopatch): """ See Task's docs for the explanation of what is done. """ eopatch.vector_timeless[self.feature].rename(index=str, columns={"CropName": "crop_geopedia_idx"}, inplace=True) eopatch.vector_timeless[self.feature] = pd.merge(eopatch.vector_timeless[self.feature], self.mapping, on='crop_geopedia_idx') eopatch.vector_timeless[self.feature]['crop_geopedia_idx'] = eopatch.vector_timeless[self.feature]['PreCropName'] self.mapping.rename(index=str, columns={"Crop Name": "PreCrop Name"}, inplace=True) eopatch.vector_timeless[self.feature] = pd.merge(eopatch.vector_timeless[self.feature], self.mapping, on='crop_geopedia_idx') eopatch.vector_timeless[self.feature].drop(['crop_geopedia_idx', 'PreCropName'], axis=1, inplace=True)
[ "def", "_fix_danish_lpis", "(", "self", ",", "eopatch", ")", ":", "eopatch", ".", "vector_timeless", "[", "self", ".", "feature", "]", ".", "rename", "(", "index", "=", "str", ",", "columns", "=", "{", "\"CropName\"", ":", "\"crop_geopedia_idx\"", "}", ",", "inplace", "=", "True", ")", "eopatch", ".", "vector_timeless", "[", "self", ".", "feature", "]", "=", "pd", ".", "merge", "(", "eopatch", ".", "vector_timeless", "[", "self", ".", "feature", "]", ",", "self", ".", "mapping", ",", "on", "=", "'crop_geopedia_idx'", ")", "eopatch", ".", "vector_timeless", "[", "self", ".", "feature", "]", "[", "'crop_geopedia_idx'", "]", "=", "eopatch", ".", "vector_timeless", "[", "self", ".", "feature", "]", "[", "'PreCropName'", "]", "self", ".", "mapping", ".", "rename", "(", "index", "=", "str", ",", "columns", "=", "{", "\"Crop Name\"", ":", "\"PreCrop Name\"", "}", ",", "inplace", "=", "True", ")", "eopatch", ".", "vector_timeless", "[", "self", ".", "feature", "]", "=", "pd", ".", "merge", "(", "eopatch", ".", "vector_timeless", "[", "self", ".", "feature", "]", ",", "self", ".", "mapping", ",", "on", "=", "'crop_geopedia_idx'", ")", "eopatch", ".", "vector_timeless", "[", "self", ".", "feature", "]", ".", "drop", "(", "[", "'crop_geopedia_idx'", ",", "'PreCropName'", "]", ",", "axis", "=", "1", ",", "inplace", "=", "True", ")" ]
https://github.com/sentinel-hub/eo-learn/blob/cf964eaf173668d6a374675dbd7c1d244264c11d/examples/crop-type-classification/Tasks/CropTypeClassTasks.py#L546-L559