repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
peri-source/peri
peri/util.py
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/util.py#L442-L467
def kvectors(self, norm=False, form='broadcast', real=False, shift=False): """ Return the kvectors associated with this tile, given the standard form of -0.5 to 0.5. `norm` and `form` arguments arethe same as that passed to `Tile.coords`. Parameters ----------- real : boolean whether to return kvectors associated with the real fft instead """ if norm is False: norm = 1 if norm is True: norm = np.array(self.shape) norm = aN(norm, self.dim, dtype='float') v = list(np.fft.fftfreq(self.shape[i])/norm[i] for i in range(self.dim)) if shift: v = list(np.fft.fftshift(t) for t in v) if real: v[-1] = v[-1][:(self.shape[-1]+1)//2] return self._format_vector(v, form=form)
[ "def", "kvectors", "(", "self", ",", "norm", "=", "False", ",", "form", "=", "'broadcast'", ",", "real", "=", "False", ",", "shift", "=", "False", ")", ":", "if", "norm", "is", "False", ":", "norm", "=", "1", "if", "norm", "is", "True", ":", "norm", "=", "np", ".", "array", "(", "self", ".", "shape", ")", "norm", "=", "aN", "(", "norm", ",", "self", ".", "dim", ",", "dtype", "=", "'float'", ")", "v", "=", "list", "(", "np", ".", "fft", ".", "fftfreq", "(", "self", ".", "shape", "[", "i", "]", ")", "/", "norm", "[", "i", "]", "for", "i", "in", "range", "(", "self", ".", "dim", ")", ")", "if", "shift", ":", "v", "=", "list", "(", "np", ".", "fft", ".", "fftshift", "(", "t", ")", "for", "t", "in", "v", ")", "if", "real", ":", "v", "[", "-", "1", "]", "=", "v", "[", "-", "1", "]", "[", ":", "(", "self", ".", "shape", "[", "-", "1", "]", "+", "1", ")", "//", "2", "]", "return", "self", ".", "_format_vector", "(", "v", ",", "form", "=", "form", ")" ]
Return the kvectors associated with this tile, given the standard form of -0.5 to 0.5. `norm` and `form` arguments arethe same as that passed to `Tile.coords`. Parameters ----------- real : boolean whether to return kvectors associated with the real fft instead
[ "Return", "the", "kvectors", "associated", "with", "this", "tile", "given", "the", "standard", "form", "of", "-", "0", ".", "5", "to", "0", ".", "5", ".", "norm", "and", "form", "arguments", "arethe", "same", "as", "that", "passed", "to", "Tile", ".", "coords", "." ]
python
valid
31.846154
clalancette/pycdlib
pycdlib/udf.py
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/udf.py#L706-L726
def new(self): # type: () -> None ''' A method to create a new UDF Anchor Volume Structure. Parameters: None. Returns: Nothing. ''' if self._initialized: raise pycdlibexception.PyCdlibInternalError('UDF Anchor Volume Structure already initialized') self.desc_tag = UDFTag() self.desc_tag.new(2) # FIXME: we should let the user set serial_number self.main_vd_length = 32768 self.main_vd_extent = 0 # This will get set later. self.reserve_vd_length = 32768 self.reserve_vd_extent = 0 # This will get set later. self._initialized = True
[ "def", "new", "(", "self", ")", ":", "# type: () -> None", "if", "self", ".", "_initialized", ":", "raise", "pycdlibexception", ".", "PyCdlibInternalError", "(", "'UDF Anchor Volume Structure already initialized'", ")", "self", ".", "desc_tag", "=", "UDFTag", "(", ")", "self", ".", "desc_tag", ".", "new", "(", "2", ")", "# FIXME: we should let the user set serial_number", "self", ".", "main_vd_length", "=", "32768", "self", ".", "main_vd_extent", "=", "0", "# This will get set later.", "self", ".", "reserve_vd_length", "=", "32768", "self", ".", "reserve_vd_extent", "=", "0", "# This will get set later.", "self", ".", "_initialized", "=", "True" ]
A method to create a new UDF Anchor Volume Structure. Parameters: None. Returns: Nothing.
[ "A", "method", "to", "create", "a", "new", "UDF", "Anchor", "Volume", "Structure", "." ]
python
train
31.47619
fabric/fabric
fabric/config.py
https://github.com/fabric/fabric/blob/e9939d68b734935f0c98d98817912ad7c698238f/fabric/config.py#L168-L189
def _load_ssh_files(self): """ Trigger loading of configured SSH config file paths. Expects that ``base_ssh_config`` has already been set to an `~paramiko.config.SSHConfig` object. :returns: ``None``. """ # TODO: does this want to more closely ape the behavior of # InvokeConfig.load_files? re: having a _found attribute for each that # determines whether to load or skip if self._runtime_ssh_path is not None: path = self._runtime_ssh_path # Manually blow up like open() (_load_ssh_file normally doesn't) if not os.path.exists(path): msg = "No such file or directory: {!r}".format(path) raise IOError(errno.ENOENT, msg) self._load_ssh_file(os.path.expanduser(path)) elif self.load_ssh_configs: for path in (self._user_ssh_path, self._system_ssh_path): self._load_ssh_file(os.path.expanduser(path))
[ "def", "_load_ssh_files", "(", "self", ")", ":", "# TODO: does this want to more closely ape the behavior of", "# InvokeConfig.load_files? re: having a _found attribute for each that", "# determines whether to load or skip", "if", "self", ".", "_runtime_ssh_path", "is", "not", "None", ":", "path", "=", "self", ".", "_runtime_ssh_path", "# Manually blow up like open() (_load_ssh_file normally doesn't)", "if", "not", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "msg", "=", "\"No such file or directory: {!r}\"", ".", "format", "(", "path", ")", "raise", "IOError", "(", "errno", ".", "ENOENT", ",", "msg", ")", "self", ".", "_load_ssh_file", "(", "os", ".", "path", ".", "expanduser", "(", "path", ")", ")", "elif", "self", ".", "load_ssh_configs", ":", "for", "path", "in", "(", "self", ".", "_user_ssh_path", ",", "self", ".", "_system_ssh_path", ")", ":", "self", ".", "_load_ssh_file", "(", "os", ".", "path", ".", "expanduser", "(", "path", ")", ")" ]
Trigger loading of configured SSH config file paths. Expects that ``base_ssh_config`` has already been set to an `~paramiko.config.SSHConfig` object. :returns: ``None``.
[ "Trigger", "loading", "of", "configured", "SSH", "config", "file", "paths", "." ]
python
train
44.318182
brocade/pynos
pynos/versions/ver_7/ver_7_1_0/yang/brocade_vswitch.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_vswitch.py#L1008-L1020
def get_vmpolicy_macaddr_output_vmpolicy_macaddr_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_vmpolicy_macaddr = ET.Element("get_vmpolicy_macaddr") config = get_vmpolicy_macaddr output = ET.SubElement(get_vmpolicy_macaddr, "output") vmpolicy_macaddr = ET.SubElement(output, "vmpolicy-macaddr") name = ET.SubElement(vmpolicy_macaddr, "name") name.text = kwargs.pop('name') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "get_vmpolicy_macaddr_output_vmpolicy_macaddr_name", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "get_vmpolicy_macaddr", "=", "ET", ".", "Element", "(", "\"get_vmpolicy_macaddr\"", ")", "config", "=", "get_vmpolicy_macaddr", "output", "=", "ET", ".", "SubElement", "(", "get_vmpolicy_macaddr", ",", "\"output\"", ")", "vmpolicy_macaddr", "=", "ET", ".", "SubElement", "(", "output", ",", "\"vmpolicy-macaddr\"", ")", "name", "=", "ET", ".", "SubElement", "(", "vmpolicy_macaddr", ",", "\"name\"", ")", "name", ".", "text", "=", "kwargs", ".", "pop", "(", "'name'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
43.076923
Kortemme-Lab/klab
klab/bio/bonsai.py
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/bonsai.py#L550-L552
def prune_loop_for_kic(self, loops_segments, search_radius, expected_min_loop_length = None, expected_max_loop_length = None, generate_pymol_session = False): '''A wrapper for prune_structure_according_to_loop_definitions suitable for the Rosetta kinematic closure (KIC) loop modeling method.''' return self.prune_structure_according_to_loop_definitions(loops_segments, search_radius, expected_min_loop_length = expected_min_loop_length, expected_max_loop_length = expected_max_loop_length, generate_pymol_session = generate_pymol_session, check_sequence = True, keep_Ca_buttress_atoms = True)
[ "def", "prune_loop_for_kic", "(", "self", ",", "loops_segments", ",", "search_radius", ",", "expected_min_loop_length", "=", "None", ",", "expected_max_loop_length", "=", "None", ",", "generate_pymol_session", "=", "False", ")", ":", "return", "self", ".", "prune_structure_according_to_loop_definitions", "(", "loops_segments", ",", "search_radius", ",", "expected_min_loop_length", "=", "expected_min_loop_length", ",", "expected_max_loop_length", "=", "expected_max_loop_length", ",", "generate_pymol_session", "=", "generate_pymol_session", ",", "check_sequence", "=", "True", ",", "keep_Ca_buttress_atoms", "=", "True", ")" ]
A wrapper for prune_structure_according_to_loop_definitions suitable for the Rosetta kinematic closure (KIC) loop modeling method.
[ "A", "wrapper", "for", "prune_structure_according_to_loop_definitions", "suitable", "for", "the", "Rosetta", "kinematic", "closure", "(", "KIC", ")", "loop", "modeling", "method", "." ]
python
train
202.333333
IdentityPython/SATOSA
src/satosa/frontends/openid_connect.py
https://github.com/IdentityPython/SATOSA/blob/49da5d4c0ac1a5ebf1a71b4f7aaf04f0e52d8fdb/src/satosa/frontends/openid_connect.py#L211-L222
def _validate_config(self, config): """ Validates that all necessary config parameters are specified. :type config: dict[str, dict[str, Any] | str] :param config: the module config """ if config is None: raise ValueError("OIDCFrontend conf can't be 'None'.") for k in {"signing_key_path", "provider"}: if k not in config: raise ValueError("Missing configuration parameter '{}' for OpenID Connect frontend.".format(k))
[ "def", "_validate_config", "(", "self", ",", "config", ")", ":", "if", "config", "is", "None", ":", "raise", "ValueError", "(", "\"OIDCFrontend conf can't be 'None'.\"", ")", "for", "k", "in", "{", "\"signing_key_path\"", ",", "\"provider\"", "}", ":", "if", "k", "not", "in", "config", ":", "raise", "ValueError", "(", "\"Missing configuration parameter '{}' for OpenID Connect frontend.\"", ".", "format", "(", "k", ")", ")" ]
Validates that all necessary config parameters are specified. :type config: dict[str, dict[str, Any] | str] :param config: the module config
[ "Validates", "that", "all", "necessary", "config", "parameters", "are", "specified", ".", ":", "type", "config", ":", "dict", "[", "str", "dict", "[", "str", "Any", "]", "|", "str", "]", ":", "param", "config", ":", "the", "module", "config" ]
python
train
41.916667
akfullfo/taskforce
taskforce/watch_modules.py
https://github.com/akfullfo/taskforce/blob/bc6dd744bd33546447d085dbd18a350532220193/taskforce/watch_modules.py#L104-L140
def get(self, **params): """ Return a list of commands that where affected by a recent change (following a poll() return for the controlling file descriptor). Each list element is a tuple: (name, command_path, module_list) The event queue will be read multiple times and reads continue until a timeout occurs. """ log = self._getparam('log', self._discard, **params) changes = {} paths = self._watch.get(**params) # On each event, de-invert the tree to produce a # list of changes by command name. # for path in paths: if path in self.modules: for name in self.modules[path]: if name in changes: if path not in changes[name]: changes[name].append(path) else: changes[name] = [path] else: log.warning("Path %r had no matching watch entry", path) names = list(changes) log.debug("Change was to %d name%s", len(names), ses(len(names))) names.sort() resp = [] for name in names: resp.append((name, self.names.get(name), changes[name])) return resp
[ "def", "get", "(", "self", ",", "*", "*", "params", ")", ":", "log", "=", "self", ".", "_getparam", "(", "'log'", ",", "self", ".", "_discard", ",", "*", "*", "params", ")", "changes", "=", "{", "}", "paths", "=", "self", ".", "_watch", ".", "get", "(", "*", "*", "params", ")", "# On each event, de-invert the tree to produce a", "# list of changes by command name.", "#", "for", "path", "in", "paths", ":", "if", "path", "in", "self", ".", "modules", ":", "for", "name", "in", "self", ".", "modules", "[", "path", "]", ":", "if", "name", "in", "changes", ":", "if", "path", "not", "in", "changes", "[", "name", "]", ":", "changes", "[", "name", "]", ".", "append", "(", "path", ")", "else", ":", "changes", "[", "name", "]", "=", "[", "path", "]", "else", ":", "log", ".", "warning", "(", "\"Path %r had no matching watch entry\"", ",", "path", ")", "names", "=", "list", "(", "changes", ")", "log", ".", "debug", "(", "\"Change was to %d name%s\"", ",", "len", "(", "names", ")", ",", "ses", "(", "len", "(", "names", ")", ")", ")", "names", ".", "sort", "(", ")", "resp", "=", "[", "]", "for", "name", "in", "names", ":", "resp", ".", "append", "(", "(", "name", ",", "self", ".", "names", ".", "get", "(", "name", ")", ",", "changes", "[", "name", "]", ")", ")", "return", "resp" ]
Return a list of commands that where affected by a recent change (following a poll() return for the controlling file descriptor). Each list element is a tuple: (name, command_path, module_list) The event queue will be read multiple times and reads continue until a timeout occurs.
[ "Return", "a", "list", "of", "commands", "that", "where", "affected", "by", "a", "recent", "change", "(", "following", "a", "poll", "()", "return", "for", "the", "controlling", "file", "descriptor", ")", "." ]
python
train
34.216216
fprimex/zdesk
zdesk/zdesk_api.py
https://github.com/fprimex/zdesk/blob/851611c13b4d530e9df31390b3ec709baf0a0188/zdesk/zdesk_api.py#L1966-L1969
def help_center_user_segment_create(self, data, **kwargs): "https://developer.zendesk.com/rest_api/docs/help_center/user_segments#create-user-segment" api_path = "/api/v2/help_center/user_segments.json" return self.call(api_path, method="POST", data=data, **kwargs)
[ "def", "help_center_user_segment_create", "(", "self", ",", "data", ",", "*", "*", "kwargs", ")", ":", "api_path", "=", "\"/api/v2/help_center/user_segments.json\"", "return", "self", ".", "call", "(", "api_path", ",", "method", "=", "\"POST\"", ",", "data", "=", "data", ",", "*", "*", "kwargs", ")" ]
https://developer.zendesk.com/rest_api/docs/help_center/user_segments#create-user-segment
[ "https", ":", "//", "developer", ".", "zendesk", ".", "com", "/", "rest_api", "/", "docs", "/", "help_center", "/", "user_segments#create", "-", "user", "-", "segment" ]
python
train
71.5
burnash/gspread
gspread/utils.py
https://github.com/burnash/gspread/blob/0e8debe208095aeed3e3e7136c2fa5cd74090946/gspread/utils.py#L40-L87
def numericise(value, empty2zero=False, default_blank="", allow_underscores_in_numeric_literals=False): """Returns a value that depends on the input string: - Float if input can be converted to Float - Integer if input can be converted to integer - Zero if the input string is empty and empty2zero flag is set - The same input string, empty or not, otherwise. Executable examples: >>> numericise("faa") 'faa' >>> numericise("3") 3 >>> numericise("3_2", allow_underscores_in_numeric_literals=False) '3_2' >>> numericise("3_2", allow_underscores_in_numeric_literals=True) '32' >>> numericise("3.1") 3.1 >>> numericise("", empty2zero=True) 0 >>> numericise("", empty2zero=False) '' >>> numericise("", default_blank=None) >>> >>> numericise("", default_blank="foo") 'foo' >>> numericise("") '' >>> numericise(None) >>> """ if value is not None: if "_" in value and not allow_underscores_in_numeric_literals: return value try: value = int(value) except ValueError: try: value = float(value) except ValueError: if value == "": if empty2zero: value = 0 else: value = default_blank return value
[ "def", "numericise", "(", "value", ",", "empty2zero", "=", "False", ",", "default_blank", "=", "\"\"", ",", "allow_underscores_in_numeric_literals", "=", "False", ")", ":", "if", "value", "is", "not", "None", ":", "if", "\"_\"", "in", "value", "and", "not", "allow_underscores_in_numeric_literals", ":", "return", "value", "try", ":", "value", "=", "int", "(", "value", ")", "except", "ValueError", ":", "try", ":", "value", "=", "float", "(", "value", ")", "except", "ValueError", ":", "if", "value", "==", "\"\"", ":", "if", "empty2zero", ":", "value", "=", "0", "else", ":", "value", "=", "default_blank", "return", "value" ]
Returns a value that depends on the input string: - Float if input can be converted to Float - Integer if input can be converted to integer - Zero if the input string is empty and empty2zero flag is set - The same input string, empty or not, otherwise. Executable examples: >>> numericise("faa") 'faa' >>> numericise("3") 3 >>> numericise("3_2", allow_underscores_in_numeric_literals=False) '3_2' >>> numericise("3_2", allow_underscores_in_numeric_literals=True) '32' >>> numericise("3.1") 3.1 >>> numericise("", empty2zero=True) 0 >>> numericise("", empty2zero=False) '' >>> numericise("", default_blank=None) >>> >>> numericise("", default_blank="foo") 'foo' >>> numericise("") '' >>> numericise(None) >>>
[ "Returns", "a", "value", "that", "depends", "on", "the", "input", "string", ":", "-", "Float", "if", "input", "can", "be", "converted", "to", "Float", "-", "Integer", "if", "input", "can", "be", "converted", "to", "integer", "-", "Zero", "if", "the", "input", "string", "is", "empty", "and", "empty2zero", "flag", "is", "set", "-", "The", "same", "input", "string", "empty", "or", "not", "otherwise", "." ]
python
train
28.625
PmagPy/PmagPy
programs/demag_gui.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/demag_gui.py#L7637-L7685
def on_zijd_mark(self, event): """ Get mouse position on double right click find the interpretation in range of mouse position then mark that interpretation bad or good Parameters ---------- event : the wx Mouseevent for that click Alters ------ current_fit """ if not array(self.CART_rot).any(): return pos = event.GetPosition() width, height = self.canvas1.get_width_height() pos[1] = height - pos[1] xpick_data, ypick_data = pos xdata_org = list(self.CART_rot[:, 0]) + list(self.CART_rot[:, 0]) ydata_org = list(-1*self.CART_rot[:, 1]) + list(-1*self.CART_rot[:, 2]) data_corrected = self.zijplot.transData.transform( vstack([xdata_org, ydata_org]).T) xdata, ydata = data_corrected.T xdata = list(map(float, xdata)) ydata = list(map(float, ydata)) e = 4e0 index = None for i, (x, y) in enumerate(zip(xdata, ydata)): if 0 < sqrt((x-xpick_data)**2. + (y-ypick_data)**2.) < e: index = i break if index != None: steps = self.Data[self.s]['zijdblock'] if self.Data[self.s]['measurement_flag'][index % len(steps)] == "g": self.mark_meas_bad(index % len(steps)) else: self.mark_meas_good(index % len(steps)) pmag.magic_write(os.path.join( self.WD, "magic_measurements.txt"), self.mag_meas_data, "magic_measurements") self.recalculate_current_specimen_interpreatations() if self.ie_open: self.ie.update_current_fit_data() self.calculate_high_levels_data() self.update_selection()
[ "def", "on_zijd_mark", "(", "self", ",", "event", ")", ":", "if", "not", "array", "(", "self", ".", "CART_rot", ")", ".", "any", "(", ")", ":", "return", "pos", "=", "event", ".", "GetPosition", "(", ")", "width", ",", "height", "=", "self", ".", "canvas1", ".", "get_width_height", "(", ")", "pos", "[", "1", "]", "=", "height", "-", "pos", "[", "1", "]", "xpick_data", ",", "ypick_data", "=", "pos", "xdata_org", "=", "list", "(", "self", ".", "CART_rot", "[", ":", ",", "0", "]", ")", "+", "list", "(", "self", ".", "CART_rot", "[", ":", ",", "0", "]", ")", "ydata_org", "=", "list", "(", "-", "1", "*", "self", ".", "CART_rot", "[", ":", ",", "1", "]", ")", "+", "list", "(", "-", "1", "*", "self", ".", "CART_rot", "[", ":", ",", "2", "]", ")", "data_corrected", "=", "self", ".", "zijplot", ".", "transData", ".", "transform", "(", "vstack", "(", "[", "xdata_org", ",", "ydata_org", "]", ")", ".", "T", ")", "xdata", ",", "ydata", "=", "data_corrected", ".", "T", "xdata", "=", "list", "(", "map", "(", "float", ",", "xdata", ")", ")", "ydata", "=", "list", "(", "map", "(", "float", ",", "ydata", ")", ")", "e", "=", "4e0", "index", "=", "None", "for", "i", ",", "(", "x", ",", "y", ")", "in", "enumerate", "(", "zip", "(", "xdata", ",", "ydata", ")", ")", ":", "if", "0", "<", "sqrt", "(", "(", "x", "-", "xpick_data", ")", "**", "2.", "+", "(", "y", "-", "ypick_data", ")", "**", "2.", ")", "<", "e", ":", "index", "=", "i", "break", "if", "index", "!=", "None", ":", "steps", "=", "self", ".", "Data", "[", "self", ".", "s", "]", "[", "'zijdblock'", "]", "if", "self", ".", "Data", "[", "self", ".", "s", "]", "[", "'measurement_flag'", "]", "[", "index", "%", "len", "(", "steps", ")", "]", "==", "\"g\"", ":", "self", ".", "mark_meas_bad", "(", "index", "%", "len", "(", "steps", ")", ")", "else", ":", "self", ".", "mark_meas_good", "(", "index", "%", "len", "(", "steps", ")", ")", "pmag", ".", "magic_write", "(", "os", ".", "path", ".", "join", "(", "self", ".", "WD", ",", "\"magic_measurements.txt\"", ")", ",", "self", ".", "mag_meas_data", ",", "\"magic_measurements\"", ")", "self", ".", "recalculate_current_specimen_interpreatations", "(", ")", "if", "self", ".", "ie_open", ":", "self", ".", "ie", ".", "update_current_fit_data", "(", ")", "self", ".", "calculate_high_levels_data", "(", ")", "self", ".", "update_selection", "(", ")" ]
Get mouse position on double right click find the interpretation in range of mouse position then mark that interpretation bad or good Parameters ---------- event : the wx Mouseevent for that click Alters ------ current_fit
[ "Get", "mouse", "position", "on", "double", "right", "click", "find", "the", "interpretation", "in", "range", "of", "mouse", "position", "then", "mark", "that", "interpretation", "bad", "or", "good" ]
python
train
36.061224
LuminosoInsight/wordfreq
wordfreq/__init__.py
https://github.com/LuminosoInsight/wordfreq/blob/170e3c6536854b06dc63da8d873e8cc4f9ef6180/wordfreq/__init__.py#L262-L286
def word_frequency(word, lang, wordlist='best', minimum=0.): """ Get the frequency of `word` in the language with code `lang`, from the specified `wordlist`. These wordlists can be specified: - 'large': a wordlist built from at least 5 sources, containing word frequencies of 10^-8 and higher - 'small': a wordlist built from at least 3 sources, containing word frquencies of 10^-6 and higher - 'best': uses 'large' if available, and 'small' otherwise The value returned will always be at least as large as `minimum`. You could set this value to 10^-8, for example, to return 10^-8 for unknown words in the 'large' list instead of 0, avoiding a discontinuity. """ args = (word, lang, wordlist, minimum) try: return _wf_cache[args] except KeyError: if len(_wf_cache) >= CACHE_SIZE: _wf_cache.clear() _wf_cache[args] = _word_frequency(*args) return _wf_cache[args]
[ "def", "word_frequency", "(", "word", ",", "lang", ",", "wordlist", "=", "'best'", ",", "minimum", "=", "0.", ")", ":", "args", "=", "(", "word", ",", "lang", ",", "wordlist", ",", "minimum", ")", "try", ":", "return", "_wf_cache", "[", "args", "]", "except", "KeyError", ":", "if", "len", "(", "_wf_cache", ")", ">=", "CACHE_SIZE", ":", "_wf_cache", ".", "clear", "(", ")", "_wf_cache", "[", "args", "]", "=", "_word_frequency", "(", "*", "args", ")", "return", "_wf_cache", "[", "args", "]" ]
Get the frequency of `word` in the language with code `lang`, from the specified `wordlist`. These wordlists can be specified: - 'large': a wordlist built from at least 5 sources, containing word frequencies of 10^-8 and higher - 'small': a wordlist built from at least 3 sources, containing word frquencies of 10^-6 and higher - 'best': uses 'large' if available, and 'small' otherwise The value returned will always be at least as large as `minimum`. You could set this value to 10^-8, for example, to return 10^-8 for unknown words in the 'large' list instead of 0, avoiding a discontinuity.
[ "Get", "the", "frequency", "of", "word", "in", "the", "language", "with", "code", "lang", "from", "the", "specified", "wordlist", "." ]
python
train
38.12
andreafioraldi/angrdbg
angrdbg/page_7.py
https://github.com/andreafioraldi/angrdbg/blob/939b20fb9b341aee695d2db12142b1eddc5b555a/angrdbg/page_7.py#L247-L284
def load_objects(self, addr, num_bytes, ret_on_segv=False): """ Load memory objects from paged memory. :param addr: Address to start loading. :param num_bytes: Number of bytes to load. :param bool ret_on_segv: True if you want load_bytes to return directly when a SIGSEV is triggered, otherwise a SimSegfaultError will be raised. :return: list of tuples of (addr, memory_object) :rtype: tuple """ result = [] end = addr + num_bytes for page_addr in self._containing_pages(addr, end): try: #print "Getting page %x" % (page_addr // self._page_size) page = self._get_page(page_addr // self._page_size) #print "... got it" except KeyError: #print "... missing" #print "... SEGV" # missing page if self.allow_segv: if ret_on_segv: break raise SimSegfaultError(addr, 'read-miss') else: continue if self.allow_segv and not page.concrete_permissions & DbgPage.PROT_READ: #print "... SEGV" if ret_on_segv: break raise SimSegfaultError(addr, 'non-readable') result.extend(page.load_slice(self.state, addr, end)) return result
[ "def", "load_objects", "(", "self", ",", "addr", ",", "num_bytes", ",", "ret_on_segv", "=", "False", ")", ":", "result", "=", "[", "]", "end", "=", "addr", "+", "num_bytes", "for", "page_addr", "in", "self", ".", "_containing_pages", "(", "addr", ",", "end", ")", ":", "try", ":", "#print \"Getting page %x\" % (page_addr // self._page_size)", "page", "=", "self", ".", "_get_page", "(", "page_addr", "//", "self", ".", "_page_size", ")", "#print \"... got it\"", "except", "KeyError", ":", "#print \"... missing\"", "#print \"... SEGV\"", "# missing page", "if", "self", ".", "allow_segv", ":", "if", "ret_on_segv", ":", "break", "raise", "SimSegfaultError", "(", "addr", ",", "'read-miss'", ")", "else", ":", "continue", "if", "self", ".", "allow_segv", "and", "not", "page", ".", "concrete_permissions", "&", "DbgPage", ".", "PROT_READ", ":", "#print \"... SEGV\"", "if", "ret_on_segv", ":", "break", "raise", "SimSegfaultError", "(", "addr", ",", "'non-readable'", ")", "result", ".", "extend", "(", "page", ".", "load_slice", "(", "self", ".", "state", ",", "addr", ",", "end", ")", ")", "return", "result" ]
Load memory objects from paged memory. :param addr: Address to start loading. :param num_bytes: Number of bytes to load. :param bool ret_on_segv: True if you want load_bytes to return directly when a SIGSEV is triggered, otherwise a SimSegfaultError will be raised. :return: list of tuples of (addr, memory_object) :rtype: tuple
[ "Load", "memory", "objects", "from", "paged", "memory", "." ]
python
train
37.868421
Esri/ArcREST
src/arcrest/common/general.py
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/common/general.py#L609-L636
def fromJSON(jsonValue): """returns a featureset from a JSON string""" jd = json.loads(jsonValue) features = [] if 'fields' in jd: fields = jd['fields'] else: fields = {'fields':[]} if 'features' in jd: for feat in jd['features']: wkid = None spatialReference =None if 'spatialReference' in jd: spatialReference = jd['spatialReference'] if 'wkid' in jd['spatialReference']: wkid = jd['spatialReference']['wkid'] elif 'latestWkid' in jd['spatialReference']: # kept for compatibility wkid = jd['spatialReference']['latestWkid'] features.append(Feature(json_string=feat, wkid=wkid, spatialReference=spatialReference)) return FeatureSet(fields, features, hasZ=jd['hasZ'] if 'hasZ' in jd else False, hasM=jd['hasM'] if 'hasM' in jd else False, geometryType=jd['geometryType'] if 'geometryType' in jd else None, objectIdFieldName=jd['objectIdFieldName'] if 'objectIdFieldName' in jd else None, globalIdFieldName=jd['globalIdFieldName'] if 'globalIdFieldName' in jd else None, displayFieldName=jd['displayFieldName'] if 'displayFieldName' in jd else None, spatialReference=jd['spatialReference'] if 'spatialReference' in jd else None)
[ "def", "fromJSON", "(", "jsonValue", ")", ":", "jd", "=", "json", ".", "loads", "(", "jsonValue", ")", "features", "=", "[", "]", "if", "'fields'", "in", "jd", ":", "fields", "=", "jd", "[", "'fields'", "]", "else", ":", "fields", "=", "{", "'fields'", ":", "[", "]", "}", "if", "'features'", "in", "jd", ":", "for", "feat", "in", "jd", "[", "'features'", "]", ":", "wkid", "=", "None", "spatialReference", "=", "None", "if", "'spatialReference'", "in", "jd", ":", "spatialReference", "=", "jd", "[", "'spatialReference'", "]", "if", "'wkid'", "in", "jd", "[", "'spatialReference'", "]", ":", "wkid", "=", "jd", "[", "'spatialReference'", "]", "[", "'wkid'", "]", "elif", "'latestWkid'", "in", "jd", "[", "'spatialReference'", "]", ":", "# kept for compatibility", "wkid", "=", "jd", "[", "'spatialReference'", "]", "[", "'latestWkid'", "]", "features", ".", "append", "(", "Feature", "(", "json_string", "=", "feat", ",", "wkid", "=", "wkid", ",", "spatialReference", "=", "spatialReference", ")", ")", "return", "FeatureSet", "(", "fields", ",", "features", ",", "hasZ", "=", "jd", "[", "'hasZ'", "]", "if", "'hasZ'", "in", "jd", "else", "False", ",", "hasM", "=", "jd", "[", "'hasM'", "]", "if", "'hasM'", "in", "jd", "else", "False", ",", "geometryType", "=", "jd", "[", "'geometryType'", "]", "if", "'geometryType'", "in", "jd", "else", "None", ",", "objectIdFieldName", "=", "jd", "[", "'objectIdFieldName'", "]", "if", "'objectIdFieldName'", "in", "jd", "else", "None", ",", "globalIdFieldName", "=", "jd", "[", "'globalIdFieldName'", "]", "if", "'globalIdFieldName'", "in", "jd", "else", "None", ",", "displayFieldName", "=", "jd", "[", "'displayFieldName'", "]", "if", "'displayFieldName'", "in", "jd", "else", "None", ",", "spatialReference", "=", "jd", "[", "'spatialReference'", "]", "if", "'spatialReference'", "in", "jd", "else", "None", ")" ]
returns a featureset from a JSON string
[ "returns", "a", "featureset", "from", "a", "JSON", "string" ]
python
train
56.142857
gem/oq-engine
openquake/hazardlib/gsim/zhao_2016.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/zhao_2016.py#L230-L238
def _get_ln_a_n_max(self, C, n_sites, idx, rup): """ Defines the rock site amplification defined in equations 10a and 10b """ ln_a_n_max = C["lnSC1AM"] * np.ones(n_sites) for i in [2, 3, 4]: if np.any(idx[i]): ln_a_n_max[idx[i]] += C["S{:g}".format(i)] return ln_a_n_max
[ "def", "_get_ln_a_n_max", "(", "self", ",", "C", ",", "n_sites", ",", "idx", ",", "rup", ")", ":", "ln_a_n_max", "=", "C", "[", "\"lnSC1AM\"", "]", "*", "np", ".", "ones", "(", "n_sites", ")", "for", "i", "in", "[", "2", ",", "3", ",", "4", "]", ":", "if", "np", ".", "any", "(", "idx", "[", "i", "]", ")", ":", "ln_a_n_max", "[", "idx", "[", "i", "]", "]", "+=", "C", "[", "\"S{:g}\"", ".", "format", "(", "i", ")", "]", "return", "ln_a_n_max" ]
Defines the rock site amplification defined in equations 10a and 10b
[ "Defines", "the", "rock", "site", "amplification", "defined", "in", "equations", "10a", "and", "10b" ]
python
train
37.555556
ejeschke/ginga
ginga/util/contour.py
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/util/contour.py#L36-L45
def calc_contours(data, num_contours): """Get sets of contour points for numpy array `data`. `num_contours` specifies the number (int) of contours to make. Returns a list of numpy arrays of points--each array makes a polygon if plotted as such. """ mn = np.nanmean(data) top = np.nanmax(data) levels = np.linspace(mn, top, num_contours) return get_contours(data, levels)
[ "def", "calc_contours", "(", "data", ",", "num_contours", ")", ":", "mn", "=", "np", ".", "nanmean", "(", "data", ")", "top", "=", "np", ".", "nanmax", "(", "data", ")", "levels", "=", "np", ".", "linspace", "(", "mn", ",", "top", ",", "num_contours", ")", "return", "get_contours", "(", "data", ",", "levels", ")" ]
Get sets of contour points for numpy array `data`. `num_contours` specifies the number (int) of contours to make. Returns a list of numpy arrays of points--each array makes a polygon if plotted as such.
[ "Get", "sets", "of", "contour", "points", "for", "numpy", "array", "data", ".", "num_contours", "specifies", "the", "number", "(", "int", ")", "of", "contours", "to", "make", ".", "Returns", "a", "list", "of", "numpy", "arrays", "of", "points", "--", "each", "array", "makes", "a", "polygon", "if", "plotted", "as", "such", "." ]
python
train
39.7
neo4j-drivers/neobolt
neobolt/impl/python/direct.py
https://github.com/neo4j-drivers/neobolt/blob/724569d76e85777c4f5e30e8d0a18116bda4d8cd/neobolt/impl/python/direct.py#L492-L543
def acquire_direct(self, address): """ Acquire a connection to a given address from the pool. The address supplied should always be an IP address, not a host name. This method is thread safe. """ if self.closed(): raise ServiceUnavailable("Connection pool closed") with self.lock: try: connections = self.connections[address] except KeyError: connections = self.connections[address] = deque() connection_acquisition_start_timestamp = perf_counter() while True: # try to find a free connection in pool for connection in list(connections): if connection.closed() or connection.defunct() or connection.timedout(): connections.remove(connection) continue if not connection.in_use: connection.in_use = True return connection # all connections in pool are in-use infinite_connection_pool = (self._max_connection_pool_size < 0 or self._max_connection_pool_size == float("inf")) can_create_new_connection = infinite_connection_pool or len(connections) < self._max_connection_pool_size if can_create_new_connection: try: connection = self.connector(address) except ServiceUnavailable: self.remove(address) raise else: connection.pool = self connection.in_use = True connections.append(connection) return connection # failed to obtain a connection from pool because the pool is full and no free connection in the pool span_timeout = self._connection_acquisition_timeout - (perf_counter() - connection_acquisition_start_timestamp) if span_timeout > 0: self.cond.wait(span_timeout) # if timed out, then we throw error. This time computation is needed, as with python 2.7, we cannot # tell if the condition is notified or timed out when we come to this line if self._connection_acquisition_timeout <= (perf_counter() - connection_acquisition_start_timestamp): raise ClientError("Failed to obtain a connection from pool within {!r}s".format( self._connection_acquisition_timeout)) else: raise ClientError("Failed to obtain a connection from pool within {!r}s".format(self._connection_acquisition_timeout))
[ "def", "acquire_direct", "(", "self", ",", "address", ")", ":", "if", "self", ".", "closed", "(", ")", ":", "raise", "ServiceUnavailable", "(", "\"Connection pool closed\"", ")", "with", "self", ".", "lock", ":", "try", ":", "connections", "=", "self", ".", "connections", "[", "address", "]", "except", "KeyError", ":", "connections", "=", "self", ".", "connections", "[", "address", "]", "=", "deque", "(", ")", "connection_acquisition_start_timestamp", "=", "perf_counter", "(", ")", "while", "True", ":", "# try to find a free connection in pool", "for", "connection", "in", "list", "(", "connections", ")", ":", "if", "connection", ".", "closed", "(", ")", "or", "connection", ".", "defunct", "(", ")", "or", "connection", ".", "timedout", "(", ")", ":", "connections", ".", "remove", "(", "connection", ")", "continue", "if", "not", "connection", ".", "in_use", ":", "connection", ".", "in_use", "=", "True", "return", "connection", "# all connections in pool are in-use", "infinite_connection_pool", "=", "(", "self", ".", "_max_connection_pool_size", "<", "0", "or", "self", ".", "_max_connection_pool_size", "==", "float", "(", "\"inf\"", ")", ")", "can_create_new_connection", "=", "infinite_connection_pool", "or", "len", "(", "connections", ")", "<", "self", ".", "_max_connection_pool_size", "if", "can_create_new_connection", ":", "try", ":", "connection", "=", "self", ".", "connector", "(", "address", ")", "except", "ServiceUnavailable", ":", "self", ".", "remove", "(", "address", ")", "raise", "else", ":", "connection", ".", "pool", "=", "self", "connection", ".", "in_use", "=", "True", "connections", ".", "append", "(", "connection", ")", "return", "connection", "# failed to obtain a connection from pool because the pool is full and no free connection in the pool", "span_timeout", "=", "self", ".", "_connection_acquisition_timeout", "-", "(", "perf_counter", "(", ")", "-", "connection_acquisition_start_timestamp", ")", "if", "span_timeout", ">", "0", ":", "self", ".", "cond", ".", "wait", "(", "span_timeout", ")", "# if timed out, then we throw error. This time computation is needed, as with python 2.7, we cannot", "# tell if the condition is notified or timed out when we come to this line", "if", "self", ".", "_connection_acquisition_timeout", "<=", "(", "perf_counter", "(", ")", "-", "connection_acquisition_start_timestamp", ")", ":", "raise", "ClientError", "(", "\"Failed to obtain a connection from pool within {!r}s\"", ".", "format", "(", "self", ".", "_connection_acquisition_timeout", ")", ")", "else", ":", "raise", "ClientError", "(", "\"Failed to obtain a connection from pool within {!r}s\"", ".", "format", "(", "self", ".", "_connection_acquisition_timeout", ")", ")" ]
Acquire a connection to a given address from the pool. The address supplied should always be an IP address, not a host name. This method is thread safe.
[ "Acquire", "a", "connection", "to", "a", "given", "address", "from", "the", "pool", ".", "The", "address", "supplied", "should", "always", "be", "an", "IP", "address", "not", "a", "host", "name", "." ]
python
train
54.057692
holmes-app/holmes-alf
holmesalf/wrapper.py
https://github.com/holmes-app/holmes-alf/blob/4bf891831390ecfae818cf37d8ffc3a76fe9f1ec/holmesalf/wrapper.py#L26-L34
def sync_client(self): """Synchronous OAuth 2.0 Bearer client""" if not self._sync_client: self._sync_client = AlfSyncClient( token_endpoint=self.config.get('OAUTH_TOKEN_ENDPOINT'), client_id=self.config.get('OAUTH_CLIENT_ID'), client_secret=self.config.get('OAUTH_CLIENT_SECRET') ) return self._sync_client
[ "def", "sync_client", "(", "self", ")", ":", "if", "not", "self", ".", "_sync_client", ":", "self", ".", "_sync_client", "=", "AlfSyncClient", "(", "token_endpoint", "=", "self", ".", "config", ".", "get", "(", "'OAUTH_TOKEN_ENDPOINT'", ")", ",", "client_id", "=", "self", ".", "config", ".", "get", "(", "'OAUTH_CLIENT_ID'", ")", ",", "client_secret", "=", "self", ".", "config", ".", "get", "(", "'OAUTH_CLIENT_SECRET'", ")", ")", "return", "self", ".", "_sync_client" ]
Synchronous OAuth 2.0 Bearer client
[ "Synchronous", "OAuth", "2", ".", "0", "Bearer", "client" ]
python
train
43.888889
googleapis/google-cloud-python
bigquery/google/cloud/bigquery/dbapi/types.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/dbapi/types.py#L45-L60
def TimeFromTicks(ticks, tz=None): """Construct a DB-API time value from the given ticks value. :type ticks: float :param ticks: a number of seconds since the epoch; see the documentation of the standard Python time module for details. :type tz: :class:`datetime.tzinfo` :param tz: (Optional) time zone to use for conversion :rtype: :class:`datetime.time` :returns: time represented by ticks. """ dt = datetime.datetime.fromtimestamp(ticks, tz=tz) return dt.timetz()
[ "def", "TimeFromTicks", "(", "ticks", ",", "tz", "=", "None", ")", ":", "dt", "=", "datetime", ".", "datetime", ".", "fromtimestamp", "(", "ticks", ",", "tz", "=", "tz", ")", "return", "dt", ".", "timetz", "(", ")" ]
Construct a DB-API time value from the given ticks value. :type ticks: float :param ticks: a number of seconds since the epoch; see the documentation of the standard Python time module for details. :type tz: :class:`datetime.tzinfo` :param tz: (Optional) time zone to use for conversion :rtype: :class:`datetime.time` :returns: time represented by ticks.
[ "Construct", "a", "DB", "-", "API", "time", "value", "from", "the", "given", "ticks", "value", "." ]
python
train
31.875
dahlia/sqlalchemy-imageattach
sqlalchemy_imageattach/entity.py
https://github.com/dahlia/sqlalchemy-imageattach/blob/b4bafa73f3bb576ecf67ed7b40b702704a0fbdc8/sqlalchemy_imageattach/entity.py#L903-L924
def open_file(self, store=current_store, use_seek=False): """The shorthand of :meth:`~Image.open_file()` for the :attr:`original`. :param store: the storage which contains the image files :data:`~sqlalchemy_imageattach.context.current_store` by default :type store: :class:`~sqlalchemy_imageattach.store.Store` :param use_seek: whether the file should seekable. if :const:`True` it maybe buffered in the memory. default is :const:`False` :type use_seek: :class:`bool` :returns: the file-like object of the image, which is a context manager (plus, also seekable only if ``use_seek`` is :const:`True`) :rtype: :class:`file`, :class:`~sqlalchemy_imageattach.file.FileProxy`, file-like object """ original = self.require_original() return original.open_file(store, use_seek)
[ "def", "open_file", "(", "self", ",", "store", "=", "current_store", ",", "use_seek", "=", "False", ")", ":", "original", "=", "self", ".", "require_original", "(", ")", "return", "original", ".", "open_file", "(", "store", ",", "use_seek", ")" ]
The shorthand of :meth:`~Image.open_file()` for the :attr:`original`. :param store: the storage which contains the image files :data:`~sqlalchemy_imageattach.context.current_store` by default :type store: :class:`~sqlalchemy_imageattach.store.Store` :param use_seek: whether the file should seekable. if :const:`True` it maybe buffered in the memory. default is :const:`False` :type use_seek: :class:`bool` :returns: the file-like object of the image, which is a context manager (plus, also seekable only if ``use_seek`` is :const:`True`) :rtype: :class:`file`, :class:`~sqlalchemy_imageattach.file.FileProxy`, file-like object
[ "The", "shorthand", "of", ":", "meth", ":", "~Image", ".", "open_file", "()", "for", "the", ":", "attr", ":", "original", "." ]
python
train
45.5
FutunnOpen/futuquant
futuquant/trade/trade_query.py
https://github.com/FutunnOpen/futuquant/blob/1512b321845f92ec9c578ce2689aa4e8482669e4/futuquant/trade/trade_query.py#L329-L340
def unpack_rsp(cls, rsp_pb): """Convert from PLS response to user response""" if rsp_pb.retType != RET_OK: return RET_ERROR, rsp_pb.retMsg, None order_id = str(rsp_pb.s2c.orderID) modify_order_list = [{ 'trd_env': TRADE.REV_TRD_ENV_MAP[rsp_pb.s2c.header.trdEnv], 'order_id': order_id }] return RET_OK, "", modify_order_list
[ "def", "unpack_rsp", "(", "cls", ",", "rsp_pb", ")", ":", "if", "rsp_pb", ".", "retType", "!=", "RET_OK", ":", "return", "RET_ERROR", ",", "rsp_pb", ".", "retMsg", ",", "None", "order_id", "=", "str", "(", "rsp_pb", ".", "s2c", ".", "orderID", ")", "modify_order_list", "=", "[", "{", "'trd_env'", ":", "TRADE", ".", "REV_TRD_ENV_MAP", "[", "rsp_pb", ".", "s2c", ".", "header", ".", "trdEnv", "]", ",", "'order_id'", ":", "order_id", "}", "]", "return", "RET_OK", ",", "\"\"", ",", "modify_order_list" ]
Convert from PLS response to user response
[ "Convert", "from", "PLS", "response", "to", "user", "response" ]
python
train
33.166667
googlefonts/fontbakery
Lib/fontbakery/profiles/universal.py
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/universal.py#L45-L61
def com_google_fonts_check_name_trailing_spaces(ttFont): """Name table records must not have trailing spaces.""" failed = False for name_record in ttFont['name'].names: name_string = name_record.toUnicode() if name_string != name_string.strip(): failed = True name_key = tuple([name_record.platformID, name_record.platEncID, name_record.langID, name_record.nameID]) shortened_str = name_record.toUnicode() if len(shortened_str) > 20: shortened_str = shortened_str[:10] + "[...]" + shortened_str[-10:] yield FAIL, (f"Name table record with key = {name_key} has" " trailing spaces that must be removed:" f" '{shortened_str}'") if not failed: yield PASS, ("No trailing spaces on name table entries.")
[ "def", "com_google_fonts_check_name_trailing_spaces", "(", "ttFont", ")", ":", "failed", "=", "False", "for", "name_record", "in", "ttFont", "[", "'name'", "]", ".", "names", ":", "name_string", "=", "name_record", ".", "toUnicode", "(", ")", "if", "name_string", "!=", "name_string", ".", "strip", "(", ")", ":", "failed", "=", "True", "name_key", "=", "tuple", "(", "[", "name_record", ".", "platformID", ",", "name_record", ".", "platEncID", ",", "name_record", ".", "langID", ",", "name_record", ".", "nameID", "]", ")", "shortened_str", "=", "name_record", ".", "toUnicode", "(", ")", "if", "len", "(", "shortened_str", ")", ">", "20", ":", "shortened_str", "=", "shortened_str", "[", ":", "10", "]", "+", "\"[...]\"", "+", "shortened_str", "[", "-", "10", ":", "]", "yield", "FAIL", ",", "(", "f\"Name table record with key = {name_key} has\"", "\" trailing spaces that must be removed:\"", "f\" '{shortened_str}'\"", ")", "if", "not", "failed", ":", "yield", "PASS", ",", "(", "\"No trailing spaces on name table entries.\"", ")" ]
Name table records must not have trailing spaces.
[ "Name", "table", "records", "must", "not", "have", "trailing", "spaces", "." ]
python
train
47.117647
tango-controls/pytango
tango/databaseds/database.py
https://github.com/tango-controls/pytango/blob/9cf78c517c9cdc1081ff6d080a9646a740cc1d36/tango/databaseds/database.py#L582-L594
def DbGetClassPropertyList(self, argin): """ Get property list for a given Tango class with a specified filter :param argin: The filter :type: tango.DevString :return: Property name list :rtype: tango.DevVarStringArray """ self._log.debug("In DbGetClassPropertyList()") if not argin: argin = "%" else: argin = replace_wildcard(argin) return self.db.get_class_property_list(argin)
[ "def", "DbGetClassPropertyList", "(", "self", ",", "argin", ")", ":", "self", ".", "_log", ".", "debug", "(", "\"In DbGetClassPropertyList()\"", ")", "if", "not", "argin", ":", "argin", "=", "\"%\"", "else", ":", "argin", "=", "replace_wildcard", "(", "argin", ")", "return", "self", ".", "db", ".", "get_class_property_list", "(", "argin", ")" ]
Get property list for a given Tango class with a specified filter :param argin: The filter :type: tango.DevString :return: Property name list :rtype: tango.DevVarStringArray
[ "Get", "property", "list", "for", "a", "given", "Tango", "class", "with", "a", "specified", "filter" ]
python
train
35.692308
wolfhong/formic
formic/formic.py
https://github.com/wolfhong/formic/blob/0d81eb88dcbb6fa705194fc6ccf2993f4abbaa76/formic/formic.py#L579-L590
def _to_string(self): """Implemented a function for __str__ and __repr__ to use, but which prevents infinite recursion when migrating to Python 3""" if self.sections: start = "/" if self.bound_start else "**/" sections = "/**/".join(str(section) for section in self.sections) end = "" if self.bound_end else "/**" else: start = "" sections = "" end = "" if self.bound_end else "**" return "{0}{1}{2}/{3}".format(start, sections, end, str(self.file_pattern))
[ "def", "_to_string", "(", "self", ")", ":", "if", "self", ".", "sections", ":", "start", "=", "\"/\"", "if", "self", ".", "bound_start", "else", "\"**/\"", "sections", "=", "\"/**/\"", ".", "join", "(", "str", "(", "section", ")", "for", "section", "in", "self", ".", "sections", ")", "end", "=", "\"\"", "if", "self", ".", "bound_end", "else", "\"/**\"", "else", ":", "start", "=", "\"\"", "sections", "=", "\"\"", "end", "=", "\"\"", "if", "self", ".", "bound_end", "else", "\"**\"", "return", "\"{0}{1}{2}/{3}\"", ".", "format", "(", "start", ",", "sections", ",", "end", ",", "str", "(", "self", ".", "file_pattern", ")", ")" ]
Implemented a function for __str__ and __repr__ to use, but which prevents infinite recursion when migrating to Python 3
[ "Implemented", "a", "function", "for", "__str__", "and", "__repr__", "to", "use", "but", "which", "prevents", "infinite", "recursion", "when", "migrating", "to", "Python", "3" ]
python
train
46.5
OCHA-DAP/hdx-python-api
src/hdx/data/hdxobject.py
https://github.com/OCHA-DAP/hdx-python-api/blob/212440f54f73805826a16db77dbcb6033b18a313/src/hdx/data/hdxobject.py#L585-L600
def _remove_string_from_commastring(self, field, string): # type: (str, str) -> bool """Remove a string from a comma separated list of strings Args: field (str): Field containing comma separated list string (str): String to remove Returns: bool: True if string removed or False if not """ commastring = self.data.get(field, '') if string in commastring: self.data[field] = commastring.replace(string, '') return True return False
[ "def", "_remove_string_from_commastring", "(", "self", ",", "field", ",", "string", ")", ":", "# type: (str, str) -> bool", "commastring", "=", "self", ".", "data", ".", "get", "(", "field", ",", "''", ")", "if", "string", "in", "commastring", ":", "self", ".", "data", "[", "field", "]", "=", "commastring", ".", "replace", "(", "string", ",", "''", ")", "return", "True", "return", "False" ]
Remove a string from a comma separated list of strings Args: field (str): Field containing comma separated list string (str): String to remove Returns: bool: True if string removed or False if not
[ "Remove", "a", "string", "from", "a", "comma", "separated", "list", "of", "strings" ]
python
train
33.75
boriel/zxbasic
arch/zx48k/backend/__init__.py
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/arch/zx48k/backend/__init__.py#L1419-L1430
def _retstr(ins): """ Returns from a procedure / function a string pointer (16bits) value """ tmp, output = _str_oper(ins.quad[1], no_exaf=True) if not tmp: output.append('call __LOADSTR') REQUIRES.add('loadstr.asm') output.append('#pragma opt require hl') output.append('jp %s' % str(ins.quad[2])) return output
[ "def", "_retstr", "(", "ins", ")", ":", "tmp", ",", "output", "=", "_str_oper", "(", "ins", ".", "quad", "[", "1", "]", ",", "no_exaf", "=", "True", ")", "if", "not", "tmp", ":", "output", ".", "append", "(", "'call __LOADSTR'", ")", "REQUIRES", ".", "add", "(", "'loadstr.asm'", ")", "output", ".", "append", "(", "'#pragma opt require hl'", ")", "output", ".", "append", "(", "'jp %s'", "%", "str", "(", "ins", ".", "quad", "[", "2", "]", ")", ")", "return", "output" ]
Returns from a procedure / function a string pointer (16bits) value
[ "Returns", "from", "a", "procedure", "/", "function", "a", "string", "pointer", "(", "16bits", ")", "value" ]
python
train
28.916667
CI-WATER/gsshapy
gsshapy/orm/spn.py
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/spn.py#L105-L122
def _write(self, session, openFile, replaceParamFile): """ Storm Pipe Network File Write to File Method """ # Retrieve Connection objects and write to file connections = self.connections self._writeConnections(connections=connections, fileObject=openFile) # Retrieve SuperJunction objects and write to file sjuncs = self.superJunctions self._writeSuperJunctions(superJunctions=sjuncs, fileObject=openFile) # Retrieve SuperLink objects and write to file slinks = self.superLinks self._writeSuperLinks(superLinks=slinks, fileObject=openFile)
[ "def", "_write", "(", "self", ",", "session", ",", "openFile", ",", "replaceParamFile", ")", ":", "# Retrieve Connection objects and write to file", "connections", "=", "self", ".", "connections", "self", ".", "_writeConnections", "(", "connections", "=", "connections", ",", "fileObject", "=", "openFile", ")", "# Retrieve SuperJunction objects and write to file", "sjuncs", "=", "self", ".", "superJunctions", "self", ".", "_writeSuperJunctions", "(", "superJunctions", "=", "sjuncs", ",", "fileObject", "=", "openFile", ")", "# Retrieve SuperLink objects and write to file", "slinks", "=", "self", ".", "superLinks", "self", ".", "_writeSuperLinks", "(", "superLinks", "=", "slinks", ",", "fileObject", "=", "openFile", ")" ]
Storm Pipe Network File Write to File Method
[ "Storm", "Pipe", "Network", "File", "Write", "to", "File", "Method" ]
python
train
39.722222
knipknap/exscript
Exscript/queue.py
https://github.com/knipknap/exscript/blob/72718eee3e87b345d5a5255be9824e867e42927b/Exscript/queue.py#L305-L329
def _create_pipe(self): """ Creates a new pipe and returns the child end of the connection. To request an account from the pipe, use:: pipe = queue._create_pipe() # Let the account manager choose an account. pipe.send(('acquire-account-for-host', host)) account = pipe.recv() ... pipe.send(('release-account', account.id())) # Or acquire a specific account. pipe.send(('acquire-account', account.id())) account = pipe.recv() ... pipe.send(('release-account', account.id())) pipe.close() """ child = _PipeHandler(self.account_manager) self.pipe_handlers[id(child)] = child child.start() return child.to_parent
[ "def", "_create_pipe", "(", "self", ")", ":", "child", "=", "_PipeHandler", "(", "self", ".", "account_manager", ")", "self", ".", "pipe_handlers", "[", "id", "(", "child", ")", "]", "=", "child", "child", ".", "start", "(", ")", "return", "child", ".", "to_parent" ]
Creates a new pipe and returns the child end of the connection. To request an account from the pipe, use:: pipe = queue._create_pipe() # Let the account manager choose an account. pipe.send(('acquire-account-for-host', host)) account = pipe.recv() ... pipe.send(('release-account', account.id())) # Or acquire a specific account. pipe.send(('acquire-account', account.id())) account = pipe.recv() ... pipe.send(('release-account', account.id())) pipe.close()
[ "Creates", "a", "new", "pipe", "and", "returns", "the", "child", "end", "of", "the", "connection", ".", "To", "request", "an", "account", "from", "the", "pipe", "use", "::" ]
python
train
31.84
dancsalo/TensorBase
tensorbase/stoch.py
https://github.com/dancsalo/TensorBase/blob/3d42a326452bd03427034916ff2fb90730020204/tensorbase/stoch.py#L50-L54
def compute_samples(self): """ Sample from a Normal distribution with inferred mu and std """ eps = tf.random_normal([self.batch_size, self.eq_samples, self.iw_samples, self.num_latent]) z = tf.reshape(eps * self.std + self.mu, [-1, self.num_latent]) return z
[ "def", "compute_samples", "(", "self", ")", ":", "eps", "=", "tf", ".", "random_normal", "(", "[", "self", ".", "batch_size", ",", "self", ".", "eq_samples", ",", "self", ".", "iw_samples", ",", "self", ".", "num_latent", "]", ")", "z", "=", "tf", ".", "reshape", "(", "eps", "*", "self", ".", "std", "+", "self", ".", "mu", ",", "[", "-", "1", ",", "self", ".", "num_latent", "]", ")", "return", "z" ]
Sample from a Normal distribution with inferred mu and std
[ "Sample", "from", "a", "Normal", "distribution", "with", "inferred", "mu", "and", "std" ]
python
train
57.4
fprimex/zdesk
zdesk/zdesk_api.py
https://github.com/fprimex/zdesk/blob/851611c13b4d530e9df31390b3ec709baf0a0188/zdesk/zdesk_api.py#L2469-L2473
def oauth_token_show(self, id, **kwargs): "https://developer.zendesk.com/rest_api/docs/core/oauth_tokens#show-token" api_path = "/api/v2/oauth/tokens/{id}.json" api_path = api_path.format(id=id) return self.call(api_path, **kwargs)
[ "def", "oauth_token_show", "(", "self", ",", "id", ",", "*", "*", "kwargs", ")", ":", "api_path", "=", "\"/api/v2/oauth/tokens/{id}.json\"", "api_path", "=", "api_path", ".", "format", "(", "id", "=", "id", ")", "return", "self", ".", "call", "(", "api_path", ",", "*", "*", "kwargs", ")" ]
https://developer.zendesk.com/rest_api/docs/core/oauth_tokens#show-token
[ "https", ":", "//", "developer", ".", "zendesk", ".", "com", "/", "rest_api", "/", "docs", "/", "core", "/", "oauth_tokens#show", "-", "token" ]
python
train
51.8
cs50/lib50
lib50/_api.py
https://github.com/cs50/lib50/blob/941767f6c0a3b81af0cdea48c25c8d5a761086eb/lib50/_api.py#L614-L634
def _authenticate_ssh(org): """Try authenticating via ssh, if succesful yields a User, otherwise raises Error.""" # Try to get username from git config username = os.environ.get(f"{org.upper()}_USERNAME") # Require ssh-agent child = pexpect.spawn("ssh -T [email protected]", encoding="utf8") # GitHub prints 'Hi {username}!...' when attempting to get shell access i = child.expect(["Hi (.+)! You've successfully authenticated", "Enter passphrase for key", "Permission denied", "Are you sure you want to continue connecting"]) child.close() if i == 0: if username is None: username = child.match.groups()[0] else: return None return User(name=username, repo=f"[email protected]:{org}/{username}")
[ "def", "_authenticate_ssh", "(", "org", ")", ":", "# Try to get username from git config", "username", "=", "os", ".", "environ", ".", "get", "(", "f\"{org.upper()}_USERNAME\"", ")", "# Require ssh-agent", "child", "=", "pexpect", ".", "spawn", "(", "\"ssh -T [email protected]\"", ",", "encoding", "=", "\"utf8\"", ")", "# GitHub prints 'Hi {username}!...' when attempting to get shell access", "i", "=", "child", ".", "expect", "(", "[", "\"Hi (.+)! You've successfully authenticated\"", ",", "\"Enter passphrase for key\"", ",", "\"Permission denied\"", ",", "\"Are you sure you want to continue connecting\"", "]", ")", "child", ".", "close", "(", ")", "if", "i", "==", "0", ":", "if", "username", "is", "None", ":", "username", "=", "child", ".", "match", ".", "groups", "(", ")", "[", "0", "]", "else", ":", "return", "None", "return", "User", "(", "name", "=", "username", ",", "repo", "=", "f\"[email protected]:{org}/{username}\"", ")" ]
Try authenticating via ssh, if succesful yields a User, otherwise raises Error.
[ "Try", "authenticating", "via", "ssh", "if", "succesful", "yields", "a", "User", "otherwise", "raises", "Error", "." ]
python
train
39.285714
cykerway/logging-ext
logging_ext/__init__.py
https://github.com/cykerway/logging-ext/blob/ed6700bdd602fa26276e1f194d255e74c7f255b4/logging_ext/__init__.py#L24-L30
def d(msg, *args, **kwargs): ''' log a message at debug level; ''' return logging.log(DEBUG, msg, *args, **kwargs)
[ "def", "d", "(", "msg", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "logging", ".", "log", "(", "DEBUG", ",", "msg", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
log a message at debug level;
[ "log", "a", "message", "at", "debug", "level", ";" ]
python
train
18
envi-idl/envipyarclib
envipyarclib/system.py
https://github.com/envi-idl/envipyarclib/blob/90135652510c3d53c5f51177252c1fea2639bf22/envipyarclib/system.py#L20-L25
def appdata_roaming_dir(): """Returns the roaming AppData directory for the installed ArcGIS Desktop.""" install = arcpy.GetInstallInfo('desktop') app_data = arcpy.GetSystemEnvironment("APPDATA") product_dir = ''.join((install['ProductName'], major_version())) return os.path.join(app_data, 'ESRI', product_dir)
[ "def", "appdata_roaming_dir", "(", ")", ":", "install", "=", "arcpy", ".", "GetInstallInfo", "(", "'desktop'", ")", "app_data", "=", "arcpy", ".", "GetSystemEnvironment", "(", "\"APPDATA\"", ")", "product_dir", "=", "''", ".", "join", "(", "(", "install", "[", "'ProductName'", "]", ",", "major_version", "(", ")", ")", ")", "return", "os", ".", "path", ".", "join", "(", "app_data", ",", "'ESRI'", ",", "product_dir", ")" ]
Returns the roaming AppData directory for the installed ArcGIS Desktop.
[ "Returns", "the", "roaming", "AppData", "directory", "for", "the", "installed", "ArcGIS", "Desktop", "." ]
python
train
54.333333
geertj/gruvi
lib/gruvi/process.py
https://github.com/geertj/gruvi/blob/1d77ca439600b6ea7a19aa1ee85dca0f3be3f3f8/lib/gruvi/process.py#L282-L291
def terminate(self): """Terminate the child process. It is not an error to call this method when the child has already exited. """ try: self.send_signal(signal.SIGTERM) except pyuv.error.ProcessError as e: if e.args[0] != pyuv.errno.UV_ESRCH: raise
[ "def", "terminate", "(", "self", ")", ":", "try", ":", "self", ".", "send_signal", "(", "signal", ".", "SIGTERM", ")", "except", "pyuv", ".", "error", ".", "ProcessError", "as", "e", ":", "if", "e", ".", "args", "[", "0", "]", "!=", "pyuv", ".", "errno", ".", "UV_ESRCH", ":", "raise" ]
Terminate the child process. It is not an error to call this method when the child has already exited.
[ "Terminate", "the", "child", "process", "." ]
python
train
32
saltstack/salt
salt/modules/status.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/status.py#L1495-L1516
def all_status(): ''' Return a composite of all status data and info for this minion. Warning: There is a LOT here! CLI Example: .. code-block:: bash salt '*' status.all_status ''' return {'cpuinfo': cpuinfo(), 'cpustats': cpustats(), 'diskstats': diskstats(), 'diskusage': diskusage(), 'loadavg': loadavg(), 'meminfo': meminfo(), 'netdev': netdev(), 'netstats': netstats(), 'uptime': uptime(), 'vmstats': vmstats(), 'w': w()}
[ "def", "all_status", "(", ")", ":", "return", "{", "'cpuinfo'", ":", "cpuinfo", "(", ")", ",", "'cpustats'", ":", "cpustats", "(", ")", ",", "'diskstats'", ":", "diskstats", "(", ")", ",", "'diskusage'", ":", "diskusage", "(", ")", ",", "'loadavg'", ":", "loadavg", "(", ")", ",", "'meminfo'", ":", "meminfo", "(", ")", ",", "'netdev'", ":", "netdev", "(", ")", ",", "'netstats'", ":", "netstats", "(", ")", ",", "'uptime'", ":", "uptime", "(", ")", ",", "'vmstats'", ":", "vmstats", "(", ")", ",", "'w'", ":", "w", "(", ")", "}" ]
Return a composite of all status data and info for this minion. Warning: There is a LOT here! CLI Example: .. code-block:: bash salt '*' status.all_status
[ "Return", "a", "composite", "of", "all", "status", "data", "and", "info", "for", "this", "minion", ".", "Warning", ":", "There", "is", "a", "LOT", "here!" ]
python
train
25.636364
Neurita/boyle
boyle/files/search.py
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/files/search.py#L84-L107
def get_file_list(file_dir, regex=''): """ Creates a list of files that match the search_regex within file_dir. The list of files will have file_dir as path prefix. Parameters ---------- @param file_dir: @param search_regex: Returns: -------- List of paths to files that match the search_regex """ file_list = os.listdir(file_dir) file_list.sort() if regex: file_list = search_list(file_list, regex) file_list = [op.join(file_dir, fname) for fname in file_list] return file_list
[ "def", "get_file_list", "(", "file_dir", ",", "regex", "=", "''", ")", ":", "file_list", "=", "os", ".", "listdir", "(", "file_dir", ")", "file_list", ".", "sort", "(", ")", "if", "regex", ":", "file_list", "=", "search_list", "(", "file_list", ",", "regex", ")", "file_list", "=", "[", "op", ".", "join", "(", "file_dir", ",", "fname", ")", "for", "fname", "in", "file_list", "]", "return", "file_list" ]
Creates a list of files that match the search_regex within file_dir. The list of files will have file_dir as path prefix. Parameters ---------- @param file_dir: @param search_regex: Returns: -------- List of paths to files that match the search_regex
[ "Creates", "a", "list", "of", "files", "that", "match", "the", "search_regex", "within", "file_dir", ".", "The", "list", "of", "files", "will", "have", "file_dir", "as", "path", "prefix", "." ]
python
valid
22.208333
angr/angr
angr/sim_manager.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/sim_manager.py#L505-L516
def drop(self, filter_func=None, stash='active'): """ Drops states from a stash. This is an alias for move(), with defaults for the stashes. :param filter_func: Drop states that match this filter. Should be a function that takes a state and returns True or False. (default: drop all states) :param stash: Drop matching states from this stash. (default: 'active') :returns: The simulation manager, for chaining. :rtype: SimulationManager """ return self.move(stash, self.DROP, filter_func=filter_func)
[ "def", "drop", "(", "self", ",", "filter_func", "=", "None", ",", "stash", "=", "'active'", ")", ":", "return", "self", ".", "move", "(", "stash", ",", "self", ".", "DROP", ",", "filter_func", "=", "filter_func", ")" ]
Drops states from a stash. This is an alias for move(), with defaults for the stashes. :param filter_func: Drop states that match this filter. Should be a function that takes a state and returns True or False. (default: drop all states) :param stash: Drop matching states from this stash. (default: 'active') :returns: The simulation manager, for chaining. :rtype: SimulationManager
[ "Drops", "states", "from", "a", "stash", ".", "This", "is", "an", "alias", "for", "move", "()", "with", "defaults", "for", "the", "stashes", "." ]
python
train
50.916667
matplotlib/cmocean
cmocean/data.py
https://github.com/matplotlib/cmocean/blob/37edd4a209a733d87dea7fed9eb22adc1d5a57c8/cmocean/data.py#L79-L115
def plot_data(): '''Plot sample data up with the fancy colormaps. ''' var = ['temp', 'oxygen', 'salinity', 'fluorescence-ECO', 'density', 'PAR', 'turbidity', 'fluorescence-CDOM'] # colorbar limits for each property lims = np.array([[26, 33], [0, 10], [0, 36], [0, 6], [1005, 1025], [0, 0.6], [0, 2], [0, 9]]) # reasonable values # lims = np.array([[20,36], [26,33], [1.5,5.6], [0,4], [0,9], [0,1.5]]) # values to show colormaps for fname in fnames: fig, axes = plt.subplots(nrows=4, ncols=2) fig.set_size_inches(20, 10) fig.subplots_adjust(top=0.95, bottom=0.01, left=0.2, right=0.99, wspace=0.0, hspace=0.07) i = 0 for ax, Var, cmap in zip(axes.flat, var, cmaps): # loop through data to plot up # get variable data lat, lon, z, data = test.read(Var, fname) map1 = ax.scatter(lat, -z, c=data, cmap=cmap, s=10, linewidths=0., vmin=lims[i, 0], vmax=lims[i, 1]) # no stupid offset y_formatter = mpl.ticker.ScalarFormatter(useOffset=False) ax.xaxis.set_major_formatter(y_formatter) if i == 6: ax.set_xlabel('Latitude [degrees]') ax.set_ylabel('Depth [m]') else: ax.set_xticklabels([]) ax.set_yticklabels([]) ax.set_ylim(-z.max(), 0) ax.set_xlim(lat.min(), lat.max()) cb = plt.colorbar(map1, ax=ax, pad=0.02) cb.set_label(cmap.name + ' [' + '$' + cmap.units + '$]') i += 1 fig.savefig('figures/' + fname.split('.')[0] + '.png', bbox_inches='tight')
[ "def", "plot_data", "(", ")", ":", "var", "=", "[", "'temp'", ",", "'oxygen'", ",", "'salinity'", ",", "'fluorescence-ECO'", ",", "'density'", ",", "'PAR'", ",", "'turbidity'", ",", "'fluorescence-CDOM'", "]", "# colorbar limits for each property", "lims", "=", "np", ".", "array", "(", "[", "[", "26", ",", "33", "]", ",", "[", "0", ",", "10", "]", ",", "[", "0", ",", "36", "]", ",", "[", "0", ",", "6", "]", ",", "[", "1005", ",", "1025", "]", ",", "[", "0", ",", "0.6", "]", ",", "[", "0", ",", "2", "]", ",", "[", "0", ",", "9", "]", "]", ")", "# reasonable values", "# lims = np.array([[20,36], [26,33], [1.5,5.6], [0,4], [0,9], [0,1.5]]) # values to show colormaps", "for", "fname", "in", "fnames", ":", "fig", ",", "axes", "=", "plt", ".", "subplots", "(", "nrows", "=", "4", ",", "ncols", "=", "2", ")", "fig", ".", "set_size_inches", "(", "20", ",", "10", ")", "fig", ".", "subplots_adjust", "(", "top", "=", "0.95", ",", "bottom", "=", "0.01", ",", "left", "=", "0.2", ",", "right", "=", "0.99", ",", "wspace", "=", "0.0", ",", "hspace", "=", "0.07", ")", "i", "=", "0", "for", "ax", ",", "Var", ",", "cmap", "in", "zip", "(", "axes", ".", "flat", ",", "var", ",", "cmaps", ")", ":", "# loop through data to plot up", "# get variable data", "lat", ",", "lon", ",", "z", ",", "data", "=", "test", ".", "read", "(", "Var", ",", "fname", ")", "map1", "=", "ax", ".", "scatter", "(", "lat", ",", "-", "z", ",", "c", "=", "data", ",", "cmap", "=", "cmap", ",", "s", "=", "10", ",", "linewidths", "=", "0.", ",", "vmin", "=", "lims", "[", "i", ",", "0", "]", ",", "vmax", "=", "lims", "[", "i", ",", "1", "]", ")", "# no stupid offset", "y_formatter", "=", "mpl", ".", "ticker", ".", "ScalarFormatter", "(", "useOffset", "=", "False", ")", "ax", ".", "xaxis", ".", "set_major_formatter", "(", "y_formatter", ")", "if", "i", "==", "6", ":", "ax", ".", "set_xlabel", "(", "'Latitude [degrees]'", ")", "ax", ".", "set_ylabel", "(", "'Depth [m]'", ")", "else", ":", "ax", ".", "set_xticklabels", "(", "[", "]", ")", "ax", ".", "set_yticklabels", "(", "[", "]", ")", "ax", ".", "set_ylim", "(", "-", "z", ".", "max", "(", ")", ",", "0", ")", "ax", ".", "set_xlim", "(", "lat", ".", "min", "(", ")", ",", "lat", ".", "max", "(", ")", ")", "cb", "=", "plt", ".", "colorbar", "(", "map1", ",", "ax", "=", "ax", ",", "pad", "=", "0.02", ")", "cb", ".", "set_label", "(", "cmap", ".", "name", "+", "' ['", "+", "'$'", "+", "cmap", ".", "units", "+", "'$]'", ")", "i", "+=", "1", "fig", ".", "savefig", "(", "'figures/'", "+", "fname", ".", "split", "(", "'.'", ")", "[", "0", "]", "+", "'.png'", ",", "bbox_inches", "=", "'tight'", ")" ]
Plot sample data up with the fancy colormaps.
[ "Plot", "sample", "data", "up", "with", "the", "fancy", "colormaps", "." ]
python
train
43.540541
jilljenn/tryalgo
tryalgo/matrix_chain_mult.py
https://github.com/jilljenn/tryalgo/blob/89a4dd9655e7b6b0a176f72b4c60d0196420dfe1/tryalgo/matrix_chain_mult.py#L35-L43
def matrix_chain_mult(M): """Matrix chain multiplication :param M: list of matrices :returns: M[0] * ... * M[-1], computed in time optimal order :complexity: whatever is needed by the multiplications """ opt, arg = matrix_mult_opt_order(M) return _apply_order(M, arg, 0, len(M)-1)
[ "def", "matrix_chain_mult", "(", "M", ")", ":", "opt", ",", "arg", "=", "matrix_mult_opt_order", "(", "M", ")", "return", "_apply_order", "(", "M", ",", "arg", ",", "0", ",", "len", "(", "M", ")", "-", "1", ")" ]
Matrix chain multiplication :param M: list of matrices :returns: M[0] * ... * M[-1], computed in time optimal order :complexity: whatever is needed by the multiplications
[ "Matrix", "chain", "multiplication" ]
python
train
33.444444
Netflix-Skunkworks/swag-client
swag_client/backends/dynamodb.py
https://github.com/Netflix-Skunkworks/swag-client/blob/e43816a85c4f48011cf497a4eae14f9df71fee0f/swag_client/backends/dynamodb.py#L39-L49
def delete(self, item, dry_run=None): """Deletes item in file.""" logger.debug('Deleting item. Item: {item} Table: {namespace}'.format( item=item, namespace=self.namespace )) if not dry_run: self.table.delete_item(Key={'id': item['id']}) return item
[ "def", "delete", "(", "self", ",", "item", ",", "dry_run", "=", "None", ")", ":", "logger", ".", "debug", "(", "'Deleting item. Item: {item} Table: {namespace}'", ".", "format", "(", "item", "=", "item", ",", "namespace", "=", "self", ".", "namespace", ")", ")", "if", "not", "dry_run", ":", "self", ".", "table", ".", "delete_item", "(", "Key", "=", "{", "'id'", ":", "item", "[", "'id'", "]", "}", ")", "return", "item" ]
Deletes item in file.
[ "Deletes", "item", "in", "file", "." ]
python
train
28.818182
paylogic/pip-accel
pip_accel/caches/s3.py
https://github.com/paylogic/pip-accel/blob/ccad1b784927a322d996db593403b1d2d2e22666/pip_accel/caches/s3.py#L367-L372
def get(self, section, name, default=None, **kw): """Replacement for :func:`boto.pyami.config.Config.get()`.""" try: return self.unbound_method(self.instance, section, name, **kw) except Exception: return default
[ "def", "get", "(", "self", ",", "section", ",", "name", ",", "default", "=", "None", ",", "*", "*", "kw", ")", ":", "try", ":", "return", "self", ".", "unbound_method", "(", "self", ".", "instance", ",", "section", ",", "name", ",", "*", "*", "kw", ")", "except", "Exception", ":", "return", "default" ]
Replacement for :func:`boto.pyami.config.Config.get()`.
[ "Replacement", "for", ":", "func", ":", "boto", ".", "pyami", ".", "config", ".", "Config", ".", "get", "()", "." ]
python
train
42.5
lambdalisue/django-permission
src/permission/decorators/permission_required.py
https://github.com/lambdalisue/django-permission/blob/580f7a1f857701d06ccf41163f188ac04fbc4fac/src/permission/decorators/permission_required.py#L11-L71
def permission_required(perm, queryset_or_model=None, login_url=None, raise_exception=False): """ Permission check decorator for classbased/functional generic view This decorator works as class, method or function decorator without any modification. DO NOT use ``method_decorator`` or whatever while this decorator will use ``self`` argument for method of classbased generic view. Parameters ---------- perm : string A permission string queryset_or_model : queryset or model A queryset or model for finding object. With classbased generic view, ``None`` for using view default queryset. When the view does not define ``get_queryset``, ``queryset``, ``get_object``, or ``object`` then ``obj=None`` is used to check permission. With functional generic view, ``None`` for using passed queryset. When non queryset was passed then ``obj=None`` is used to check permission. Examples -------- >>> # As class decorator >>> @permission_required('auth.change_user') >>> class UpdateAuthUserView(UpdateView): ... pass >>> # As method decorator >>> class UpdateAuthUserView(UpdateView): ... @permission_required('auth.change_user') ... def dispatch(self, request, *args, **kwargs): ... pass >>> # As function decorator >>> @permission_required('auth.change_user') >>> def update_auth_user(request, *args, **kwargs): ... pass .. Note:: Classbased generic view is recommended while you can regulate the queryset with ``get_queryset()`` method. Detecting object from passed kwargs may not work correctly. """ # convert model to queryset if queryset_or_model and issubclass(queryset_or_model, Model): queryset_or_model = queryset_or_model._default_manager.all() def wrapper(class_or_method): if inspect.isclass(class_or_method): from permission.decorators.classbase import \ permission_required as decorator else: # method_permission_required can handle method or function # correctly. from permission.decorators.methodbase import \ permission_required as decorator return decorator(perm, queryset_or_model, login_url, raise_exception)(class_or_method) return wrapper
[ "def", "permission_required", "(", "perm", ",", "queryset_or_model", "=", "None", ",", "login_url", "=", "None", ",", "raise_exception", "=", "False", ")", ":", "# convert model to queryset", "if", "queryset_or_model", "and", "issubclass", "(", "queryset_or_model", ",", "Model", ")", ":", "queryset_or_model", "=", "queryset_or_model", ".", "_default_manager", ".", "all", "(", ")", "def", "wrapper", "(", "class_or_method", ")", ":", "if", "inspect", ".", "isclass", "(", "class_or_method", ")", ":", "from", "permission", ".", "decorators", ".", "classbase", "import", "permission_required", "as", "decorator", "else", ":", "# method_permission_required can handle method or function", "# correctly.", "from", "permission", ".", "decorators", ".", "methodbase", "import", "permission_required", "as", "decorator", "return", "decorator", "(", "perm", ",", "queryset_or_model", ",", "login_url", ",", "raise_exception", ")", "(", "class_or_method", ")", "return", "wrapper" ]
Permission check decorator for classbased/functional generic view This decorator works as class, method or function decorator without any modification. DO NOT use ``method_decorator`` or whatever while this decorator will use ``self`` argument for method of classbased generic view. Parameters ---------- perm : string A permission string queryset_or_model : queryset or model A queryset or model for finding object. With classbased generic view, ``None`` for using view default queryset. When the view does not define ``get_queryset``, ``queryset``, ``get_object``, or ``object`` then ``obj=None`` is used to check permission. With functional generic view, ``None`` for using passed queryset. When non queryset was passed then ``obj=None`` is used to check permission. Examples -------- >>> # As class decorator >>> @permission_required('auth.change_user') >>> class UpdateAuthUserView(UpdateView): ... pass >>> # As method decorator >>> class UpdateAuthUserView(UpdateView): ... @permission_required('auth.change_user') ... def dispatch(self, request, *args, **kwargs): ... pass >>> # As function decorator >>> @permission_required('auth.change_user') >>> def update_auth_user(request, *args, **kwargs): ... pass .. Note:: Classbased generic view is recommended while you can regulate the queryset with ``get_queryset()`` method. Detecting object from passed kwargs may not work correctly.
[ "Permission", "check", "decorator", "for", "classbased", "/", "functional", "generic", "view" ]
python
train
39.491803
raphaelm/python-fints
fints/parser.py
https://github.com/raphaelm/python-fints/blob/fee55ae37d3182d0adb40507d4acb98b06057e4a/fints/parser.py#L325-L337
def serialize_message(self, message: SegmentSequence) -> bytes: """Serialize a message (as SegmentSequence, list of FinTS3Segment, or FinTS3Segment) into a byte array""" if isinstance(message, FinTS3Segment): message = SegmentSequence([message]) if isinstance(message, (list, tuple, Iterable)): message = SegmentSequence(list(message)) result = [] for segment in message.segments: result.append(self.serialize_segment(segment)) return self.implode_segments(result)
[ "def", "serialize_message", "(", "self", ",", "message", ":", "SegmentSequence", ")", "->", "bytes", ":", "if", "isinstance", "(", "message", ",", "FinTS3Segment", ")", ":", "message", "=", "SegmentSequence", "(", "[", "message", "]", ")", "if", "isinstance", "(", "message", ",", "(", "list", ",", "tuple", ",", "Iterable", ")", ")", ":", "message", "=", "SegmentSequence", "(", "list", "(", "message", ")", ")", "result", "=", "[", "]", "for", "segment", "in", "message", ".", "segments", ":", "result", ".", "append", "(", "self", ".", "serialize_segment", "(", "segment", ")", ")", "return", "self", ".", "implode_segments", "(", "result", ")" ]
Serialize a message (as SegmentSequence, list of FinTS3Segment, or FinTS3Segment) into a byte array
[ "Serialize", "a", "message", "(", "as", "SegmentSequence", "list", "of", "FinTS3Segment", "or", "FinTS3Segment", ")", "into", "a", "byte", "array" ]
python
train
41.461538
FNNDSC/pfmisc
pfmisc/C_snode.py
https://github.com/FNNDSC/pfmisc/blob/960b4d6135fcc50bed0a8e55db2ab1ddad9b99d8/pfmisc/C_snode.py#L662-L681
def mkdir(self, astr_dirSpec): """ Given an <astr_dirSpec> in form '/a/b/c/d/.../f', create that path in the internal stree, creating all intermediate nodes as necessary :param astr_dirSpec: :return: """ if astr_dirSpec != '/' and astr_dirSpec != "//": str_currentPath = self.cwd() l_pathSpec = astr_dirSpec.split('/') if not len(l_pathSpec[0]): self.cd('/') l_nodesDepth = l_pathSpec[1:] else: l_nodesDepth = l_pathSpec for d in l_nodesDepth: self.mkcd(d) self.cd(str_currentPath)
[ "def", "mkdir", "(", "self", ",", "astr_dirSpec", ")", ":", "if", "astr_dirSpec", "!=", "'/'", "and", "astr_dirSpec", "!=", "\"//\"", ":", "str_currentPath", "=", "self", ".", "cwd", "(", ")", "l_pathSpec", "=", "astr_dirSpec", ".", "split", "(", "'/'", ")", "if", "not", "len", "(", "l_pathSpec", "[", "0", "]", ")", ":", "self", ".", "cd", "(", "'/'", ")", "l_nodesDepth", "=", "l_pathSpec", "[", "1", ":", "]", "else", ":", "l_nodesDepth", "=", "l_pathSpec", "for", "d", "in", "l_nodesDepth", ":", "self", ".", "mkcd", "(", "d", ")", "self", ".", "cd", "(", "str_currentPath", ")" ]
Given an <astr_dirSpec> in form '/a/b/c/d/.../f', create that path in the internal stree, creating all intermediate nodes as necessary :param astr_dirSpec: :return:
[ "Given", "an", "<astr_dirSpec", ">", "in", "form", "/", "a", "/", "b", "/", "c", "/", "d", "/", "...", "/", "f", "create", "that", "path", "in", "the", "internal", "stree", "creating", "all", "intermediate", "nodes", "as", "necessary" ]
python
train
36.75
JohnVinyard/featureflow
featureflow/feature.py
https://github.com/JohnVinyard/featureflow/blob/7731487b00e38fa4f58c88b7881870fda2d69fdb/featureflow/feature.py#L163-L175
def _can_compute(self, _id, persistence): """ Return true if this feature stored, or is unstored, but can be computed from stored dependencies """ if self.store and self._stored(_id, persistence): return True if self.is_root: return False return all( [n._can_compute(_id, persistence) for n in self.dependencies])
[ "def", "_can_compute", "(", "self", ",", "_id", ",", "persistence", ")", ":", "if", "self", ".", "store", "and", "self", ".", "_stored", "(", "_id", ",", "persistence", ")", ":", "return", "True", "if", "self", ".", "is_root", ":", "return", "False", "return", "all", "(", "[", "n", ".", "_can_compute", "(", "_id", ",", "persistence", ")", "for", "n", "in", "self", ".", "dependencies", "]", ")" ]
Return true if this feature stored, or is unstored, but can be computed from stored dependencies
[ "Return", "true", "if", "this", "feature", "stored", "or", "is", "unstored", "but", "can", "be", "computed", "from", "stored", "dependencies" ]
python
train
30.384615
pingali/dgit
dgitcore/vendor/pluginbase/pluginbase.py
https://github.com/pingali/dgit/blob/ecde01f40b98f0719dbcfb54452270ed2f86686d/dgitcore/vendor/pluginbase/pluginbase.py#L260-L272
def load_plugin(self, name): """This automatically loads a plugin by the given name from the current source and returns the module. This is a convenient alternative to the import statement and saves you from invoking ``__import__`` or a similar function yourself. :param name: the name of the plugin to load. """ if '.' in name: raise ImportError('Plugin names cannot contain dots.') with self: return __import__(self.base.package + '.' + name, globals(), {}, ['__name__'])
[ "def", "load_plugin", "(", "self", ",", "name", ")", ":", "if", "'.'", "in", "name", ":", "raise", "ImportError", "(", "'Plugin names cannot contain dots.'", ")", "with", "self", ":", "return", "__import__", "(", "self", ".", "base", ".", "package", "+", "'.'", "+", "name", ",", "globals", "(", ")", ",", "{", "}", ",", "[", "'__name__'", "]", ")" ]
This automatically loads a plugin by the given name from the current source and returns the module. This is a convenient alternative to the import statement and saves you from invoking ``__import__`` or a similar function yourself. :param name: the name of the plugin to load.
[ "This", "automatically", "loads", "a", "plugin", "by", "the", "given", "name", "from", "the", "current", "source", "and", "returns", "the", "module", ".", "This", "is", "a", "convenient", "alternative", "to", "the", "import", "statement", "and", "saves", "you", "from", "invoking", "__import__", "or", "a", "similar", "function", "yourself", "." ]
python
valid
44.692308
numenta/htmresearch
htmresearch/frameworks/poirazi_neuron_model/data_tools.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/poirazi_neuron_model/data_tools.py#L281-L290
def generate_phase_1(dim = 40): """ The first step in creating datapoints in the Poirazi & Mel model. This returns a vector of dimension dim, with the last four values set to 1 and the rest drawn from a normal distribution. """ phase_1 = numpy.random.normal(0, 1, dim) for i in range(dim - 4, dim): phase_1[i] = 1.0 return phase_1
[ "def", "generate_phase_1", "(", "dim", "=", "40", ")", ":", "phase_1", "=", "numpy", ".", "random", ".", "normal", "(", "0", ",", "1", ",", "dim", ")", "for", "i", "in", "range", "(", "dim", "-", "4", ",", "dim", ")", ":", "phase_1", "[", "i", "]", "=", "1.0", "return", "phase_1" ]
The first step in creating datapoints in the Poirazi & Mel model. This returns a vector of dimension dim, with the last four values set to 1 and the rest drawn from a normal distribution.
[ "The", "first", "step", "in", "creating", "datapoints", "in", "the", "Poirazi", "&", "Mel", "model", ".", "This", "returns", "a", "vector", "of", "dimension", "dim", "with", "the", "last", "four", "values", "set", "to", "1", "and", "the", "rest", "drawn", "from", "a", "normal", "distribution", "." ]
python
train
34.1
Contraz/demosys-py
demosys/geometry/plane.py
https://github.com/Contraz/demosys-py/blob/6466128a3029c4d09631420ccce73024025bd5b6/demosys/geometry/plane.py#L7-L68
def plane_xz(size=(10, 10), resolution=(10, 10)) -> VAO: """ Generates a plane on the xz axis of a specific size and resolution. Normals and texture coordinates are also included. Args: size: (x, y) tuple resolution: (x, y) tuple Returns: A :py:class:`demosys.opengl.vao.VAO` instance """ sx, sz = size rx, rz = resolution dx, dz = sx / rx, sz / rz # step ox, oz = -sx / 2, -sz / 2 # start offset def gen_pos(): for z in range(rz): for x in range(rx): yield ox + x * dx yield 0 yield oz + z * dz def gen_uv(): for z in range(rz): for x in range(rx): yield x / (rx - 1) yield 1 - z / (rz - 1) def gen_normal(): for _ in range(rx * rz): yield 0.0 yield 1.0 yield 0.0 def gen_index(): for z in range(rz - 1): for x in range(rx - 1): # quad poly left yield z * rz + x + 1 yield z * rz + x yield z * rz + x + rx # quad poly right yield z * rz + x + 1 yield z * rz + x + rx yield z * rz + x + rx + 1 pos_data = numpy.fromiter(gen_pos(), dtype=numpy.float32) uv_data = numpy.fromiter(gen_uv(), dtype=numpy.float32) normal_data = numpy.fromiter(gen_normal(), dtype=numpy.float32) index_data = numpy.fromiter(gen_index(), dtype=numpy.uint32) vao = VAO("plane_xz", mode=moderngl.TRIANGLES) vao.buffer(pos_data, '3f', ['in_position']) vao.buffer(uv_data, '2f', ['in_uv']) vao.buffer(normal_data, '3f', ['in_normal']) vao.index_buffer(index_data, index_element_size=4) return vao
[ "def", "plane_xz", "(", "size", "=", "(", "10", ",", "10", ")", ",", "resolution", "=", "(", "10", ",", "10", ")", ")", "->", "VAO", ":", "sx", ",", "sz", "=", "size", "rx", ",", "rz", "=", "resolution", "dx", ",", "dz", "=", "sx", "/", "rx", ",", "sz", "/", "rz", "# step", "ox", ",", "oz", "=", "-", "sx", "/", "2", ",", "-", "sz", "/", "2", "# start offset", "def", "gen_pos", "(", ")", ":", "for", "z", "in", "range", "(", "rz", ")", ":", "for", "x", "in", "range", "(", "rx", ")", ":", "yield", "ox", "+", "x", "*", "dx", "yield", "0", "yield", "oz", "+", "z", "*", "dz", "def", "gen_uv", "(", ")", ":", "for", "z", "in", "range", "(", "rz", ")", ":", "for", "x", "in", "range", "(", "rx", ")", ":", "yield", "x", "/", "(", "rx", "-", "1", ")", "yield", "1", "-", "z", "/", "(", "rz", "-", "1", ")", "def", "gen_normal", "(", ")", ":", "for", "_", "in", "range", "(", "rx", "*", "rz", ")", ":", "yield", "0.0", "yield", "1.0", "yield", "0.0", "def", "gen_index", "(", ")", ":", "for", "z", "in", "range", "(", "rz", "-", "1", ")", ":", "for", "x", "in", "range", "(", "rx", "-", "1", ")", ":", "# quad poly left", "yield", "z", "*", "rz", "+", "x", "+", "1", "yield", "z", "*", "rz", "+", "x", "yield", "z", "*", "rz", "+", "x", "+", "rx", "# quad poly right", "yield", "z", "*", "rz", "+", "x", "+", "1", "yield", "z", "*", "rz", "+", "x", "+", "rx", "yield", "z", "*", "rz", "+", "x", "+", "rx", "+", "1", "pos_data", "=", "numpy", ".", "fromiter", "(", "gen_pos", "(", ")", ",", "dtype", "=", "numpy", ".", "float32", ")", "uv_data", "=", "numpy", ".", "fromiter", "(", "gen_uv", "(", ")", ",", "dtype", "=", "numpy", ".", "float32", ")", "normal_data", "=", "numpy", ".", "fromiter", "(", "gen_normal", "(", ")", ",", "dtype", "=", "numpy", ".", "float32", ")", "index_data", "=", "numpy", ".", "fromiter", "(", "gen_index", "(", ")", ",", "dtype", "=", "numpy", ".", "uint32", ")", "vao", "=", "VAO", "(", "\"plane_xz\"", ",", "mode", "=", "moderngl", ".", "TRIANGLES", ")", "vao", ".", "buffer", "(", "pos_data", ",", "'3f'", ",", "[", "'in_position'", "]", ")", "vao", ".", "buffer", "(", "uv_data", ",", "'2f'", ",", "[", "'in_uv'", "]", ")", "vao", ".", "buffer", "(", "normal_data", ",", "'3f'", ",", "[", "'in_normal'", "]", ")", "vao", ".", "index_buffer", "(", "index_data", ",", "index_element_size", "=", "4", ")", "return", "vao" ]
Generates a plane on the xz axis of a specific size and resolution. Normals and texture coordinates are also included. Args: size: (x, y) tuple resolution: (x, y) tuple Returns: A :py:class:`demosys.opengl.vao.VAO` instance
[ "Generates", "a", "plane", "on", "the", "xz", "axis", "of", "a", "specific", "size", "and", "resolution", ".", "Normals", "and", "texture", "coordinates", "are", "also", "included", "." ]
python
valid
28.241935
gwastro/pycbc
pycbc/tmpltbank/coord_utils.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/tmpltbank/coord_utils.py#L780-L807
def outspiral_loop(N): """ Return a list of points that will loop outwards in a 2D lattice in terms of distance from a central point. So if N=2 this will be [0,0], [0,1], [0,-1],[1,0],[-1,0],[1,1] .... This is useful when you want to loop over a number of bins, but want to start in the center and work outwards. """ # Create a 2D lattice of all points X,Y = numpy.meshgrid(numpy.arange(-N,N+1), numpy.arange(-N,N+1)) # Flatten it X = numpy.ndarray.flatten(X) Y = numpy.ndarray.flatten(Y) # Force to an integer X = numpy.array(X, dtype=int) Y = numpy.array(Y, dtype=int) # Calculate distances G = numpy.sqrt(X**2+Y**2) # Combine back into an array out_arr = numpy.array([X,Y,G]) # And order correctly sorted_out_arr = out_arr[:,out_arr[2].argsort()] return sorted_out_arr[:2,:].T
[ "def", "outspiral_loop", "(", "N", ")", ":", "# Create a 2D lattice of all points", "X", ",", "Y", "=", "numpy", ".", "meshgrid", "(", "numpy", ".", "arange", "(", "-", "N", ",", "N", "+", "1", ")", ",", "numpy", ".", "arange", "(", "-", "N", ",", "N", "+", "1", ")", ")", "# Flatten it", "X", "=", "numpy", ".", "ndarray", ".", "flatten", "(", "X", ")", "Y", "=", "numpy", ".", "ndarray", ".", "flatten", "(", "Y", ")", "# Force to an integer", "X", "=", "numpy", ".", "array", "(", "X", ",", "dtype", "=", "int", ")", "Y", "=", "numpy", ".", "array", "(", "Y", ",", "dtype", "=", "int", ")", "# Calculate distances", "G", "=", "numpy", ".", "sqrt", "(", "X", "**", "2", "+", "Y", "**", "2", ")", "# Combine back into an array", "out_arr", "=", "numpy", ".", "array", "(", "[", "X", ",", "Y", ",", "G", "]", ")", "# And order correctly", "sorted_out_arr", "=", "out_arr", "[", ":", ",", "out_arr", "[", "2", "]", ".", "argsort", "(", ")", "]", "return", "sorted_out_arr", "[", ":", "2", ",", ":", "]", ".", "T" ]
Return a list of points that will loop outwards in a 2D lattice in terms of distance from a central point. So if N=2 this will be [0,0], [0,1], [0,-1],[1,0],[-1,0],[1,1] .... This is useful when you want to loop over a number of bins, but want to start in the center and work outwards.
[ "Return", "a", "list", "of", "points", "that", "will", "loop", "outwards", "in", "a", "2D", "lattice", "in", "terms", "of", "distance", "from", "a", "central", "point", ".", "So", "if", "N", "=", "2", "this", "will", "be", "[", "0", "0", "]", "[", "0", "1", "]", "[", "0", "-", "1", "]", "[", "1", "0", "]", "[", "-", "1", "0", "]", "[", "1", "1", "]", "....", "This", "is", "useful", "when", "you", "want", "to", "loop", "over", "a", "number", "of", "bins", "but", "want", "to", "start", "in", "the", "center", "and", "work", "outwards", "." ]
python
train
30.285714
flowersteam/explauto
explauto/interest_model/discrete_progress.py
https://github.com/flowersteam/explauto/blob/cf0f81ecb9f6412f7276a95bd27359000e1e26b6/explauto/interest_model/discrete_progress.py#L36-L43
def sample_given_context(self, c, c_dims): ''' Sample the region with max progress among regions that have the same context c: context value on c_dims dimensions c_dims: w.r.t sensory space dimensions ''' index = self.discrete_progress.sample_given_context(c, c_dims, self.space) return self.space.rand_value(index).flatten()[list(set(range(len(self.space.cardinalities))) - set(c_dims))]
[ "def", "sample_given_context", "(", "self", ",", "c", ",", "c_dims", ")", ":", "index", "=", "self", ".", "discrete_progress", ".", "sample_given_context", "(", "c", ",", "c_dims", ",", "self", ".", "space", ")", "return", "self", ".", "space", ".", "rand_value", "(", "index", ")", ".", "flatten", "(", ")", "[", "list", "(", "set", "(", "range", "(", "len", "(", "self", ".", "space", ".", "cardinalities", ")", ")", ")", "-", "set", "(", "c_dims", ")", ")", "]" ]
Sample the region with max progress among regions that have the same context c: context value on c_dims dimensions c_dims: w.r.t sensory space dimensions
[ "Sample", "the", "region", "with", "max", "progress", "among", "regions", "that", "have", "the", "same", "context", "c", ":", "context", "value", "on", "c_dims", "dimensions", "c_dims", ":", "w", ".", "r", ".", "t", "sensory", "space", "dimensions" ]
python
train
55.625
seatgeek/businesstime
businesstime/__init__.py
https://github.com/seatgeek/businesstime/blob/3f3efd8aed7fc98539c54543bc05ab83587bb180/businesstime/__init__.py#L59-L70
def iterdays(self, d1, d2): """ Date iterator returning dates in d1 <= x < d2 """ curr = datetime.datetime.combine(d1, datetime.time()) end = datetime.datetime.combine(d2, datetime.time()) if d1.date() == d2.date(): yield curr return while curr < end: yield curr curr = curr + datetime.timedelta(days=1)
[ "def", "iterdays", "(", "self", ",", "d1", ",", "d2", ")", ":", "curr", "=", "datetime", ".", "datetime", ".", "combine", "(", "d1", ",", "datetime", ".", "time", "(", ")", ")", "end", "=", "datetime", ".", "datetime", ".", "combine", "(", "d2", ",", "datetime", ".", "time", "(", ")", ")", "if", "d1", ".", "date", "(", ")", "==", "d2", ".", "date", "(", ")", ":", "yield", "curr", "return", "while", "curr", "<", "end", ":", "yield", "curr", "curr", "=", "curr", "+", "datetime", ".", "timedelta", "(", "days", "=", "1", ")" ]
Date iterator returning dates in d1 <= x < d2
[ "Date", "iterator", "returning", "dates", "in", "d1", "<", "=", "x", "<", "d2" ]
python
train
33
google/python_portpicker
src/portserver.py
https://github.com/google/python_portpicker/blob/f737189ea7a2d4b97048a2f4e37609e293b03546/src/portserver.py#L304-L318
def _parse_port_ranges(pool_str): """Given a 'N-P,X-Y' description of port ranges, return a set of ints.""" ports = set() for range_str in pool_str.split(','): try: a, b = range_str.split('-', 1) start, end = int(a), int(b) except ValueError: log.error('Ignoring unparsable port range %r.', range_str) continue if start < 1 or end > 65535: log.error('Ignoring out of bounds port range %r.', range_str) continue ports.update(set(range(start, end + 1))) return ports
[ "def", "_parse_port_ranges", "(", "pool_str", ")", ":", "ports", "=", "set", "(", ")", "for", "range_str", "in", "pool_str", ".", "split", "(", "','", ")", ":", "try", ":", "a", ",", "b", "=", "range_str", ".", "split", "(", "'-'", ",", "1", ")", "start", ",", "end", "=", "int", "(", "a", ")", ",", "int", "(", "b", ")", "except", "ValueError", ":", "log", ".", "error", "(", "'Ignoring unparsable port range %r.'", ",", "range_str", ")", "continue", "if", "start", "<", "1", "or", "end", ">", "65535", ":", "log", ".", "error", "(", "'Ignoring out of bounds port range %r.'", ",", "range_str", ")", "continue", "ports", ".", "update", "(", "set", "(", "range", "(", "start", ",", "end", "+", "1", ")", ")", ")", "return", "ports" ]
Given a 'N-P,X-Y' description of port ranges, return a set of ints.
[ "Given", "a", "N", "-", "P", "X", "-", "Y", "description", "of", "port", "ranges", "return", "a", "set", "of", "ints", "." ]
python
train
38
xflr6/bitsets
bitsets/combos.py
https://github.com/xflr6/bitsets/blob/ddcfe17e7c7a11f71f1c6764b2cecf7db05d9cdf/bitsets/combos.py#L41-L70
def reverse_shortlex(end, other, excludeend=False): """Yield all intersections of end with other in reverse shortlex order. >>> ['{:03b}'.format(s) for s in reverse_shortlex(0b111, [0b011, 0b101, 0b110])] ['111', '011', '101', '110', '001', '010', '100', '000'] >>> ', '.join(''.join(sorted(s)) ... for s in reverse_shortlex({'a', 'b', 'c', 'd'}, ... [{'b', 'c', 'd'}, {'a', 'c', 'd'}, {'a', 'b', 'd'}, {'a', 'b', 'c'}])) 'abcd, bcd, acd, abd, abc, cd, bd, bc, ad, ac, ab, d, c, b, a, ' >>> assert list(reverse_shortlex({1, 2}, [{1}, {2}], excludeend=True)) == \ [{1}, {2}, set()] """ if not excludeend: yield end queue = collections.deque([(end, other)]) while queue: current, other = queue.popleft() while other: first, other = other[0], other[1:] result = current & first yield result if other: queue.append((result, other))
[ "def", "reverse_shortlex", "(", "end", ",", "other", ",", "excludeend", "=", "False", ")", ":", "if", "not", "excludeend", ":", "yield", "end", "queue", "=", "collections", ".", "deque", "(", "[", "(", "end", ",", "other", ")", "]", ")", "while", "queue", ":", "current", ",", "other", "=", "queue", ".", "popleft", "(", ")", "while", "other", ":", "first", ",", "other", "=", "other", "[", "0", "]", ",", "other", "[", "1", ":", "]", "result", "=", "current", "&", "first", "yield", "result", "if", "other", ":", "queue", ".", "append", "(", "(", "result", ",", "other", ")", ")" ]
Yield all intersections of end with other in reverse shortlex order. >>> ['{:03b}'.format(s) for s in reverse_shortlex(0b111, [0b011, 0b101, 0b110])] ['111', '011', '101', '110', '001', '010', '100', '000'] >>> ', '.join(''.join(sorted(s)) ... for s in reverse_shortlex({'a', 'b', 'c', 'd'}, ... [{'b', 'c', 'd'}, {'a', 'c', 'd'}, {'a', 'b', 'd'}, {'a', 'b', 'c'}])) 'abcd, bcd, acd, abd, abc, cd, bd, bc, ad, ac, ab, d, c, b, a, ' >>> assert list(reverse_shortlex({1, 2}, [{1}, {2}], excludeend=True)) == \ [{1}, {2}, set()]
[ "Yield", "all", "intersections", "of", "end", "with", "other", "in", "reverse", "shortlex", "order", "." ]
python
train
31.666667
Tristramg/mumoro
virtualenv.py
https://github.com/Tristramg/mumoro/blob/e37d6ddb72fd23fb485c80fd8a5cda520ca08187/virtualenv.py#L901-L914
def resolve_interpreter(exe): """ If the executable given isn't an absolute path, search $PATH for the interpreter """ if os.path.abspath(exe) != exe: paths = os.environ.get('PATH', '').split(os.pathsep) for path in paths: if os.path.exists(os.path.join(path, exe)): exe = os.path.join(path, exe) break if not os.path.exists(exe): logger.fatal('The executable %s (from --python=%s) does not exist' % (exe, exe)) sys.exit(3) return exe
[ "def", "resolve_interpreter", "(", "exe", ")", ":", "if", "os", ".", "path", ".", "abspath", "(", "exe", ")", "!=", "exe", ":", "paths", "=", "os", ".", "environ", ".", "get", "(", "'PATH'", ",", "''", ")", ".", "split", "(", "os", ".", "pathsep", ")", "for", "path", "in", "paths", ":", "if", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "path", ",", "exe", ")", ")", ":", "exe", "=", "os", ".", "path", ".", "join", "(", "path", ",", "exe", ")", "break", "if", "not", "os", ".", "path", ".", "exists", "(", "exe", ")", ":", "logger", ".", "fatal", "(", "'The executable %s (from --python=%s) does not exist'", "%", "(", "exe", ",", "exe", ")", ")", "sys", ".", "exit", "(", "3", ")", "return", "exe" ]
If the executable given isn't an absolute path, search $PATH for the interpreter
[ "If", "the", "executable", "given", "isn", "t", "an", "absolute", "path", "search", "$PATH", "for", "the", "interpreter" ]
python
train
37.214286
apple/turicreate
deps/src/libxml2-2.9.1/python/libxml2.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L7421-L7426
def context(self): """Get the xpathContext from an xpathParserContext """ ret = libxml2mod.xmlXPathParserGetContext(self._o) if ret is None:raise xpathError('xmlXPathParserGetContext() failed') __tmp = xpathContext(_obj=ret) return __tmp
[ "def", "context", "(", "self", ")", ":", "ret", "=", "libxml2mod", ".", "xmlXPathParserGetContext", "(", "self", ".", "_o", ")", "if", "ret", "is", "None", ":", "raise", "xpathError", "(", "'xmlXPathParserGetContext() failed'", ")", "__tmp", "=", "xpathContext", "(", "_obj", "=", "ret", ")", "return", "__tmp" ]
Get the xpathContext from an xpathParserContext
[ "Get", "the", "xpathContext", "from", "an", "xpathParserContext" ]
python
train
45.333333
Peter-Slump/python-keycloak-client
src/keycloak/client.py
https://github.com/Peter-Slump/python-keycloak-client/blob/379ae58f3c65892327b0c98c06d4982aa83f357e/src/keycloak/client.py#L45-L56
def session(self): """ Get session object to benefit from connection pooling. http://docs.python-requests.org/en/master/user/advanced/#session-objects :rtype: requests.Session """ if self._session is None: self._session = requests.Session() self._session.headers.update(self._headers) return self._session
[ "def", "session", "(", "self", ")", ":", "if", "self", ".", "_session", "is", "None", ":", "self", ".", "_session", "=", "requests", ".", "Session", "(", ")", "self", ".", "_session", ".", "headers", ".", "update", "(", "self", ".", "_headers", ")", "return", "self", ".", "_session" ]
Get session object to benefit from connection pooling. http://docs.python-requests.org/en/master/user/advanced/#session-objects :rtype: requests.Session
[ "Get", "session", "object", "to", "benefit", "from", "connection", "pooling", "." ]
python
train
31.333333
TC01/calcpkg
calcrepo/index.py
https://github.com/TC01/calcpkg/blob/5168f606264620a090b42a64354331d208b00d5f/calcrepo/index.py#L211-L255
def searchNamesIndex(self, nameIndex, nameData, searchString, category="", math=False, game=False, extension="", searchFiles=False): """Search the names index for a string and returns the namedata""" nameData = {} try: nameFile = open(nameIndex, 'rt') except IOError: self.repo.printd("Error: Unable to read index file " + self.fileIndex) return None count = 1 for line in nameFile: count += 1 if searchString.lower() in line.lower(): #Extension argument if extension in line: nameData[count] = line[:len(line) - 1] else: nameData[count] = None #category arg if category in line and extension in line: nameData[count] = line[:len(line) - 1] else: nameData[count] = None #Both game and math if (game and math): if ("/games/" in line or "/math/" in line or "/science" in line): nameData[count] = line[:len(line) - 1] else: nameData[count] = None #Game option switch elif game: if "/games/" in line: nameData[count] = line[:len(line) - 1] else: nameData[count] = None #Math option switch elif math: if ("/math/" in line or "/science/" in line): nameData[count] = line[:len(line) - 1] else: nameData[count] = None #Close the name index and return nameFile.close() return nameData
[ "def", "searchNamesIndex", "(", "self", ",", "nameIndex", ",", "nameData", ",", "searchString", ",", "category", "=", "\"\"", ",", "math", "=", "False", ",", "game", "=", "False", ",", "extension", "=", "\"\"", ",", "searchFiles", "=", "False", ")", ":", "nameData", "=", "{", "}", "try", ":", "nameFile", "=", "open", "(", "nameIndex", ",", "'rt'", ")", "except", "IOError", ":", "self", ".", "repo", ".", "printd", "(", "\"Error: Unable to read index file \"", "+", "self", ".", "fileIndex", ")", "return", "None", "count", "=", "1", "for", "line", "in", "nameFile", ":", "count", "+=", "1", "if", "searchString", ".", "lower", "(", ")", "in", "line", ".", "lower", "(", ")", ":", "#Extension argument", "if", "extension", "in", "line", ":", "nameData", "[", "count", "]", "=", "line", "[", ":", "len", "(", "line", ")", "-", "1", "]", "else", ":", "nameData", "[", "count", "]", "=", "None", "#category arg", "if", "category", "in", "line", "and", "extension", "in", "line", ":", "nameData", "[", "count", "]", "=", "line", "[", ":", "len", "(", "line", ")", "-", "1", "]", "else", ":", "nameData", "[", "count", "]", "=", "None", "#Both game and math", "if", "(", "game", "and", "math", ")", ":", "if", "(", "\"/games/\"", "in", "line", "or", "\"/math/\"", "in", "line", "or", "\"/science\"", "in", "line", ")", ":", "nameData", "[", "count", "]", "=", "line", "[", ":", "len", "(", "line", ")", "-", "1", "]", "else", ":", "nameData", "[", "count", "]", "=", "None", "#Game option switch", "elif", "game", ":", "if", "\"/games/\"", "in", "line", ":", "nameData", "[", "count", "]", "=", "line", "[", ":", "len", "(", "line", ")", "-", "1", "]", "else", ":", "nameData", "[", "count", "]", "=", "None", "#Math option switch", "elif", "math", ":", "if", "(", "\"/math/\"", "in", "line", "or", "\"/science/\"", "in", "line", ")", ":", "nameData", "[", "count", "]", "=", "line", "[", ":", "len", "(", "line", ")", "-", "1", "]", "else", ":", "nameData", "[", "count", "]", "=", "None", "#Close the name index and return", "nameFile", ".", "close", "(", ")", "return", "nameData" ]
Search the names index for a string and returns the namedata
[ "Search", "the", "names", "index", "for", "a", "string", "and", "returns", "the", "namedata" ]
python
train
28.955556
topic2k/pygcgen
pygcgen/fetcher.py
https://github.com/topic2k/pygcgen/blob/c41701815df2c8c3a57fd5f7b8babe702127c8a1/pygcgen/fetcher.py#L56-L78
def fetch_github_token(self): """ Fetch GitHub token. First try to use variable provided by --token option, otherwise try to fetch it from git config and last CHANGELOG_GITHUB_TOKEN env variable. :returns: Nothing """ if not self.options.token: try: for v in GH_CFG_VARS: cmd = ['git', 'config', '--get', '{0}'.format(v)] self.options.token = subprocess.Popen( cmd, stdout=subprocess.PIPE).communicate()[0].strip() if self.options.token: break except (subprocess.CalledProcessError, WindowsError): pass if not self.options.token: self.options.token = os.environ.get(CHANGELOG_GITHUB_TOKEN) if not self.options.token: print(NO_TOKEN_PROVIDED)
[ "def", "fetch_github_token", "(", "self", ")", ":", "if", "not", "self", ".", "options", ".", "token", ":", "try", ":", "for", "v", "in", "GH_CFG_VARS", ":", "cmd", "=", "[", "'git'", ",", "'config'", ",", "'--get'", ",", "'{0}'", ".", "format", "(", "v", ")", "]", "self", ".", "options", ".", "token", "=", "subprocess", ".", "Popen", "(", "cmd", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", ".", "communicate", "(", ")", "[", "0", "]", ".", "strip", "(", ")", "if", "self", ".", "options", ".", "token", ":", "break", "except", "(", "subprocess", ".", "CalledProcessError", ",", "WindowsError", ")", ":", "pass", "if", "not", "self", ".", "options", ".", "token", ":", "self", ".", "options", ".", "token", "=", "os", ".", "environ", ".", "get", "(", "CHANGELOG_GITHUB_TOKEN", ")", "if", "not", "self", ".", "options", ".", "token", ":", "print", "(", "NO_TOKEN_PROVIDED", ")" ]
Fetch GitHub token. First try to use variable provided by --token option, otherwise try to fetch it from git config and last CHANGELOG_GITHUB_TOKEN env variable. :returns: Nothing
[ "Fetch", "GitHub", "token", ".", "First", "try", "to", "use", "variable", "provided", "by", "--", "token", "option", "otherwise", "try", "to", "fetch", "it", "from", "git", "config", "and", "last", "CHANGELOG_GITHUB_TOKEN", "env", "variable", "." ]
python
valid
38.304348
manns/pyspread
pyspread/src/lib/_grid_cairo_renderer.py
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/lib/_grid_cairo_renderer.py#L1112-L1124
def get_below_right_key_rect(self): """Returns tuple key rect of below right cell""" key_below_right = self.row + 1, self.col + 1, self.tab border_width_right = \ float(self.cell_attributes[self.key]["borderwidth_right"]) / 2.0 border_width_bottom = \ float(self.cell_attributes[self.key]["borderwidth_bottom"]) / 2.0 rect_below_right = (self.x+self.width, self.y-self.height, border_width_right, border_width_bottom) return key_below_right, rect_below_right
[ "def", "get_below_right_key_rect", "(", "self", ")", ":", "key_below_right", "=", "self", ".", "row", "+", "1", ",", "self", ".", "col", "+", "1", ",", "self", ".", "tab", "border_width_right", "=", "float", "(", "self", ".", "cell_attributes", "[", "self", ".", "key", "]", "[", "\"borderwidth_right\"", "]", ")", "/", "2.0", "border_width_bottom", "=", "float", "(", "self", ".", "cell_attributes", "[", "self", ".", "key", "]", "[", "\"borderwidth_bottom\"", "]", ")", "/", "2.0", "rect_below_right", "=", "(", "self", ".", "x", "+", "self", ".", "width", ",", "self", ".", "y", "-", "self", ".", "height", ",", "border_width_right", ",", "border_width_bottom", ")", "return", "key_below_right", ",", "rect_below_right" ]
Returns tuple key rect of below right cell
[ "Returns", "tuple", "key", "rect", "of", "below", "right", "cell" ]
python
train
42.230769
flatangle/flatlib
flatlib/ephem/eph.py
https://github.com/flatangle/flatlib/blob/44e05b2991a296c678adbc17a1d51b6a21bc867c/flatlib/ephem/eph.py#L23-L51
def getObject(ID, jd, lat, lon): """ Returns an object for a specific date and location. """ if ID == const.SOUTH_NODE: obj = swe.sweObject(const.NORTH_NODE, jd) obj.update({ 'id': const.SOUTH_NODE, 'lon': angle.norm(obj['lon'] + 180) }) elif ID == const.PARS_FORTUNA: pflon = tools.pfLon(jd, lat, lon) obj = { 'id': ID, 'lon': pflon, 'lat': 0, 'lonspeed': 0, 'latspeed': 0 } elif ID == const.SYZYGY: szjd = tools.syzygyJD(jd) obj = swe.sweObject(const.MOON, szjd) obj['id'] = const.SYZYGY else: obj = swe.sweObject(ID, jd) _signInfo(obj) return obj
[ "def", "getObject", "(", "ID", ",", "jd", ",", "lat", ",", "lon", ")", ":", "if", "ID", "==", "const", ".", "SOUTH_NODE", ":", "obj", "=", "swe", ".", "sweObject", "(", "const", ".", "NORTH_NODE", ",", "jd", ")", "obj", ".", "update", "(", "{", "'id'", ":", "const", ".", "SOUTH_NODE", ",", "'lon'", ":", "angle", ".", "norm", "(", "obj", "[", "'lon'", "]", "+", "180", ")", "}", ")", "elif", "ID", "==", "const", ".", "PARS_FORTUNA", ":", "pflon", "=", "tools", ".", "pfLon", "(", "jd", ",", "lat", ",", "lon", ")", "obj", "=", "{", "'id'", ":", "ID", ",", "'lon'", ":", "pflon", ",", "'lat'", ":", "0", ",", "'lonspeed'", ":", "0", ",", "'latspeed'", ":", "0", "}", "elif", "ID", "==", "const", ".", "SYZYGY", ":", "szjd", "=", "tools", ".", "syzygyJD", "(", "jd", ")", "obj", "=", "swe", ".", "sweObject", "(", "const", ".", "MOON", ",", "szjd", ")", "obj", "[", "'id'", "]", "=", "const", ".", "SYZYGY", "else", ":", "obj", "=", "swe", ".", "sweObject", "(", "ID", ",", "jd", ")", "_signInfo", "(", "obj", ")", "return", "obj" ]
Returns an object for a specific date and location.
[ "Returns", "an", "object", "for", "a", "specific", "date", "and", "location", "." ]
python
train
25.241379
tjcsl/cslbot
cslbot/commands/inspect.py
https://github.com/tjcsl/cslbot/blob/aebe07be47141f61d7c180706bddfb707f19b2b5/cslbot/commands/inspect.py#L22-L31
def cmd(send, msg, args): """'Inspects a bot attribute. Syntax: {command} <attr> """ if not hasattr(args['handler'], msg): send("That attribute was not found in the handler.") return send(str(getattr(args['handler'], msg)))
[ "def", "cmd", "(", "send", ",", "msg", ",", "args", ")", ":", "if", "not", "hasattr", "(", "args", "[", "'handler'", "]", ",", "msg", ")", ":", "send", "(", "\"That attribute was not found in the handler.\"", ")", "return", "send", "(", "str", "(", "getattr", "(", "args", "[", "'handler'", "]", ",", "msg", ")", ")", ")" ]
Inspects a bot attribute. Syntax: {command} <attr>
[ "Inspects", "a", "bot", "attribute", "." ]
python
train
25.2
angr/angr
angr/state_plugins/solver.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/state_plugins/solver.py#L834-L848
def single_valued(self, e): """ Returns True whether `e` is a concrete value or is a value set with only 1 possible value. This differs from `unique` in that this *does* not query the constraint solver. """ if self.state.mode == 'static': if type(e) in (int, bytes, float, bool): return True else: return e.cardinality <= 1 else: # All symbolic expressions are not single-valued return not self.symbolic(e)
[ "def", "single_valued", "(", "self", ",", "e", ")", ":", "if", "self", ".", "state", ".", "mode", "==", "'static'", ":", "if", "type", "(", "e", ")", "in", "(", "int", ",", "bytes", ",", "float", ",", "bool", ")", ":", "return", "True", "else", ":", "return", "e", ".", "cardinality", "<=", "1", "else", ":", "# All symbolic expressions are not single-valued", "return", "not", "self", ".", "symbolic", "(", "e", ")" ]
Returns True whether `e` is a concrete value or is a value set with only 1 possible value. This differs from `unique` in that this *does* not query the constraint solver.
[ "Returns", "True", "whether", "e", "is", "a", "concrete", "value", "or", "is", "a", "value", "set", "with", "only", "1", "possible", "value", ".", "This", "differs", "from", "unique", "in", "that", "this", "*", "does", "*", "not", "query", "the", "constraint", "solver", "." ]
python
train
35.266667
ReFirmLabs/binwalk
src/binwalk/core/module.py
https://github.com/ReFirmLabs/binwalk/blob/a0c5315fd2bae167e5c3d8469ce95d5defc743c2/src/binwalk/core/module.py#L996-L1007
def process_kwargs(obj, kwargs): ''' Convenience wrapper around binwalk.core.module.Modules.kwargs. @obj - The class object (an instance of a sub-class of binwalk.core.module.Module). @kwargs - The kwargs provided to the object's __init__ method. Returns None. ''' with Modules() as m: kwargs = m.kwargs(obj, kwargs) return kwargs
[ "def", "process_kwargs", "(", "obj", ",", "kwargs", ")", ":", "with", "Modules", "(", ")", "as", "m", ":", "kwargs", "=", "m", ".", "kwargs", "(", "obj", ",", "kwargs", ")", "return", "kwargs" ]
Convenience wrapper around binwalk.core.module.Modules.kwargs. @obj - The class object (an instance of a sub-class of binwalk.core.module.Module). @kwargs - The kwargs provided to the object's __init__ method. Returns None.
[ "Convenience", "wrapper", "around", "binwalk", ".", "core", ".", "module", ".", "Modules", ".", "kwargs", "." ]
python
train
30.333333
saltstack/salt
salt/utils/vmware.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L3621-L3647
def update_vm(vm_ref, vm_config_spec): ''' Updates the virtual machine configuration with the given object vm_ref Virtual machine managed object reference vm_config_spec Virtual machine config spec object to update ''' vm_name = get_managed_object_name(vm_ref) log.trace('Updating vm \'%s\'', vm_name) try: task = vm_ref.ReconfigVM_Task(vm_config_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task') return vm_ref
[ "def", "update_vm", "(", "vm_ref", ",", "vm_config_spec", ")", ":", "vm_name", "=", "get_managed_object_name", "(", "vm_ref", ")", "log", ".", "trace", "(", "'Updating vm \\'%s\\''", ",", "vm_name", ")", "try", ":", "task", "=", "vm_ref", ".", "ReconfigVM_Task", "(", "vm_config_spec", ")", "except", "vim", ".", "fault", ".", "NoPermission", "as", "exc", ":", "log", ".", "exception", "(", "exc", ")", "raise", "salt", ".", "exceptions", ".", "VMwareApiError", "(", "'Not enough permissions. Required privilege: '", "'{}'", ".", "format", "(", "exc", ".", "privilegeId", ")", ")", "except", "vim", ".", "fault", ".", "VimFault", "as", "exc", ":", "log", ".", "exception", "(", "exc", ")", "raise", "salt", ".", "exceptions", ".", "VMwareApiError", "(", "exc", ".", "msg", ")", "except", "vmodl", ".", "RuntimeFault", "as", "exc", ":", "log", ".", "exception", "(", "exc", ")", "raise", "salt", ".", "exceptions", ".", "VMwareRuntimeError", "(", "exc", ".", "msg", ")", "vm_ref", "=", "wait_for_task", "(", "task", ",", "vm_name", ",", "'ReconfigureVM Task'", ")", "return", "vm_ref" ]
Updates the virtual machine configuration with the given object vm_ref Virtual machine managed object reference vm_config_spec Virtual machine config spec object to update
[ "Updates", "the", "virtual", "machine", "configuration", "with", "the", "given", "object" ]
python
train
34.222222
kubernetes-client/python
kubernetes/client/apis/core_v1_api.py
https://github.com/kubernetes-client/python/blob/5e512ff564c244c50cab780d821542ed56aa965a/kubernetes/client/apis/core_v1_api.py#L366-L388
def connect_delete_namespaced_service_proxy_with_path(self, name, namespace, path, **kwargs): """ connect DELETE requests to proxy of Service This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.connect_delete_namespaced_service_proxy_with_path(name, namespace, path, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the ServiceProxyOptions (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str path: path to the resource (required) :param str path2: Path is the part of URLs that include service endpoints, suffixes, and parameters to use for the current proxy request to service. For example, the whole request URL is http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. Path is _search?q=user:kimchy. :return: str If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.connect_delete_namespaced_service_proxy_with_path_with_http_info(name, namespace, path, **kwargs) else: (data) = self.connect_delete_namespaced_service_proxy_with_path_with_http_info(name, namespace, path, **kwargs) return data
[ "def", "connect_delete_namespaced_service_proxy_with_path", "(", "self", ",", "name", ",", "namespace", ",", "path", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "connect_delete_namespaced_service_proxy_with_path_with_http_info", "(", "name", ",", "namespace", ",", "path", ",", "*", "*", "kwargs", ")", "else", ":", "(", "data", ")", "=", "self", ".", "connect_delete_namespaced_service_proxy_with_path_with_http_info", "(", "name", ",", "namespace", ",", "path", ",", "*", "*", "kwargs", ")", "return", "data" ]
connect DELETE requests to proxy of Service This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.connect_delete_namespaced_service_proxy_with_path(name, namespace, path, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the ServiceProxyOptions (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str path: path to the resource (required) :param str path2: Path is the part of URLs that include service endpoints, suffixes, and parameters to use for the current proxy request to service. For example, the whole request URL is http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. Path is _search?q=user:kimchy. :return: str If the method is called asynchronously, returns the request thread.
[ "connect", "DELETE", "requests", "to", "proxy", "of", "Service", "This", "method", "makes", "a", "synchronous", "HTTP", "request", "by", "default", ".", "To", "make", "an", "asynchronous", "HTTP", "request", "please", "pass", "async_req", "=", "True", ">>>", "thread", "=", "api", ".", "connect_delete_namespaced_service_proxy_with_path", "(", "name", "namespace", "path", "async_req", "=", "True", ")", ">>>", "result", "=", "thread", ".", "get", "()" ]
python
train
65.521739
rsheftel/raccoon
raccoon/series.py
https://github.com/rsheftel/raccoon/blob/e5c4b5fb933b51f33aff11e8168c39790e9a7c75/raccoon/series.py#L820-L829
def from_series(cls, series, offset=0): """ Creates and return a Series from a Series :param series: raccoon Series :param offset: offset value must be provided as there is no equivalent for a DataFrame :return: Series """ return cls(data=series.data, index=series.index, data_name=series.data_name, index_name=series.index_name, sort=series.sort, offset=offset)
[ "def", "from_series", "(", "cls", ",", "series", ",", "offset", "=", "0", ")", ":", "return", "cls", "(", "data", "=", "series", ".", "data", ",", "index", "=", "series", ".", "index", ",", "data_name", "=", "series", ".", "data_name", ",", "index_name", "=", "series", ".", "index_name", ",", "sort", "=", "series", ".", "sort", ",", "offset", "=", "offset", ")" ]
Creates and return a Series from a Series :param series: raccoon Series :param offset: offset value must be provided as there is no equivalent for a DataFrame :return: Series
[ "Creates", "and", "return", "a", "Series", "from", "a", "Series" ]
python
train
42.9
Pylons/plaster
src/plaster/loaders.py
https://github.com/Pylons/plaster/blob/e70e55c182a8300d7ccf67e54d47740c72e72cd8/src/plaster/loaders.py#L84-L117
def get_loader(config_uri, protocols=None): """ Find a :class:`plaster.ILoader` object capable of handling ``config_uri``. :param config_uri: Anything that can be parsed by :func:`plaster.parse_uri`. :param protocols: Zero or more :term:`loader protocol` identifiers that the loader must implement to match the desired ``config_uri``. :returns: A :class:`plaster.ILoader` object. :raises plaster.LoaderNotFound: If no loader could be found. :raises plaster.MultipleLoadersFound: If multiple loaders match the requested criteria. If this happens, you can disambiguate the lookup by appending the package name to the scheme for the loader you wish to use. For example if ``ini`` is ambiguous then specify ``ini+myapp`` to use the ini loader from the ``myapp`` package. """ config_uri = parse_uri(config_uri) requested_scheme = config_uri.scheme matched_loaders = find_loaders(requested_scheme, protocols=protocols) if len(matched_loaders) < 1: raise LoaderNotFound(requested_scheme, protocols=protocols) if len(matched_loaders) > 1: raise MultipleLoadersFound( requested_scheme, matched_loaders, protocols=protocols) loader_info = matched_loaders[0] loader = loader_info.load(config_uri) return loader
[ "def", "get_loader", "(", "config_uri", ",", "protocols", "=", "None", ")", ":", "config_uri", "=", "parse_uri", "(", "config_uri", ")", "requested_scheme", "=", "config_uri", ".", "scheme", "matched_loaders", "=", "find_loaders", "(", "requested_scheme", ",", "protocols", "=", "protocols", ")", "if", "len", "(", "matched_loaders", ")", "<", "1", ":", "raise", "LoaderNotFound", "(", "requested_scheme", ",", "protocols", "=", "protocols", ")", "if", "len", "(", "matched_loaders", ")", ">", "1", ":", "raise", "MultipleLoadersFound", "(", "requested_scheme", ",", "matched_loaders", ",", "protocols", "=", "protocols", ")", "loader_info", "=", "matched_loaders", "[", "0", "]", "loader", "=", "loader_info", ".", "load", "(", "config_uri", ")", "return", "loader" ]
Find a :class:`plaster.ILoader` object capable of handling ``config_uri``. :param config_uri: Anything that can be parsed by :func:`plaster.parse_uri`. :param protocols: Zero or more :term:`loader protocol` identifiers that the loader must implement to match the desired ``config_uri``. :returns: A :class:`plaster.ILoader` object. :raises plaster.LoaderNotFound: If no loader could be found. :raises plaster.MultipleLoadersFound: If multiple loaders match the requested criteria. If this happens, you can disambiguate the lookup by appending the package name to the scheme for the loader you wish to use. For example if ``ini`` is ambiguous then specify ``ini+myapp`` to use the ini loader from the ``myapp`` package.
[ "Find", "a", ":", "class", ":", "plaster", ".", "ILoader", "object", "capable", "of", "handling", "config_uri", "." ]
python
train
38.647059
RedHatInsights/insights-core
insights/parsers/__init__.py
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/parsers/__init__.py#L451-L528
def keyword_search(rows, **kwargs): """ Takes a list of dictionaries and finds all the dictionaries where the keys and values match those found in the keyword arguments. Keys in the row data have ' ' and '-' replaced with '_', so they can match the keyword argument parsing. For example, the keyword argument 'fix_up_path' will match a key named 'fix-up path'. In addition, several suffixes can be added to the key name to do partial matching of values: * '__contains' will test whether the data value contains the given value. * '__startswith' tests if the data value starts with the given value * '__lower_value' compares the lower-case version of the data and given values. Arguments: rows (list): A list of dictionaries representing the data to be searched. **kwargs (dict): keyword-value pairs corresponding to the fields that need to be found and their required values in the data rows. Returns: (list): The list of rows that match the search keywords. If no keyword arguments are given, no rows are returned. Examples: >>> rows = [ ... {'domain': 'oracle', 'type': 'soft', 'item': 'nofile', 'value': 1024}, ... {'domain': 'oracle', 'type': 'hard', 'item': 'nofile', 'value': 65536}, ... {'domain': 'oracle', 'type': 'soft', 'item': 'stack', 'value': 10240}, ... {'domain': 'oracle', 'type': 'hard', 'item': 'stack', 'value': 3276}, ... {'domain': 'root', 'type': 'soft', 'item': 'nproc', 'value': -1}] ... >>> keyword_search(rows, domain='root') [{'domain': 'root', 'type': 'soft', 'item': 'nproc', 'value': -1}] >>> keyword_search(rows, item__contains='c') [{'domain': 'oracle', 'type': 'soft', 'item': 'stack', 'value': 10240}, {'domain': 'oracle', 'type': 'hard', 'item': 'stack', 'value': 3276}, {'domain': 'root', 'type': 'soft', 'item': 'nproc', 'value': -1}] >>> keyword_search(rows, domain__startswith='r') [{'domain': 'root', 'type': 'soft', 'item': 'nproc', 'value': -1}] """ results = [] if not kwargs: return results # Allows us to transform the key and do lookups like __contains and # __startswith matchers = { 'default': lambda s, v: s == v, 'contains': lambda s, v: v in s, 'startswith': lambda s, v: s.startswith(v), 'lower_value': lambda s, v: s.lower() == v.lower(), } def key_match(row, key, value): # Translate ' ' and '-' of keys in dict to '_' to match keyword arguments. my_row = {} for my_key, val in row.items(): my_row[my_key.replace(' ', '_').replace('-', '_')] = val matcher_fn = matchers['default'] if '__' in key: key, matcher = key.split('__', 1) if matcher not in matchers: # put key back the way we found it, matcher fn unchanged key = key + '__' + matcher else: matcher_fn = matchers[matcher] return key in my_row and matcher_fn(my_row[key], value) data = [] for row in rows: if all(map(lambda kv: key_match(row, kv[0], kv[1]), kwargs.items())): data.append(row) return data
[ "def", "keyword_search", "(", "rows", ",", "*", "*", "kwargs", ")", ":", "results", "=", "[", "]", "if", "not", "kwargs", ":", "return", "results", "# Allows us to transform the key and do lookups like __contains and", "# __startswith", "matchers", "=", "{", "'default'", ":", "lambda", "s", ",", "v", ":", "s", "==", "v", ",", "'contains'", ":", "lambda", "s", ",", "v", ":", "v", "in", "s", ",", "'startswith'", ":", "lambda", "s", ",", "v", ":", "s", ".", "startswith", "(", "v", ")", ",", "'lower_value'", ":", "lambda", "s", ",", "v", ":", "s", ".", "lower", "(", ")", "==", "v", ".", "lower", "(", ")", ",", "}", "def", "key_match", "(", "row", ",", "key", ",", "value", ")", ":", "# Translate ' ' and '-' of keys in dict to '_' to match keyword arguments.", "my_row", "=", "{", "}", "for", "my_key", ",", "val", "in", "row", ".", "items", "(", ")", ":", "my_row", "[", "my_key", ".", "replace", "(", "' '", ",", "'_'", ")", ".", "replace", "(", "'-'", ",", "'_'", ")", "]", "=", "val", "matcher_fn", "=", "matchers", "[", "'default'", "]", "if", "'__'", "in", "key", ":", "key", ",", "matcher", "=", "key", ".", "split", "(", "'__'", ",", "1", ")", "if", "matcher", "not", "in", "matchers", ":", "# put key back the way we found it, matcher fn unchanged", "key", "=", "key", "+", "'__'", "+", "matcher", "else", ":", "matcher_fn", "=", "matchers", "[", "matcher", "]", "return", "key", "in", "my_row", "and", "matcher_fn", "(", "my_row", "[", "key", "]", ",", "value", ")", "data", "=", "[", "]", "for", "row", "in", "rows", ":", "if", "all", "(", "map", "(", "lambda", "kv", ":", "key_match", "(", "row", ",", "kv", "[", "0", "]", ",", "kv", "[", "1", "]", ")", ",", "kwargs", ".", "items", "(", ")", ")", ")", ":", "data", ".", "append", "(", "row", ")", "return", "data" ]
Takes a list of dictionaries and finds all the dictionaries where the keys and values match those found in the keyword arguments. Keys in the row data have ' ' and '-' replaced with '_', so they can match the keyword argument parsing. For example, the keyword argument 'fix_up_path' will match a key named 'fix-up path'. In addition, several suffixes can be added to the key name to do partial matching of values: * '__contains' will test whether the data value contains the given value. * '__startswith' tests if the data value starts with the given value * '__lower_value' compares the lower-case version of the data and given values. Arguments: rows (list): A list of dictionaries representing the data to be searched. **kwargs (dict): keyword-value pairs corresponding to the fields that need to be found and their required values in the data rows. Returns: (list): The list of rows that match the search keywords. If no keyword arguments are given, no rows are returned. Examples: >>> rows = [ ... {'domain': 'oracle', 'type': 'soft', 'item': 'nofile', 'value': 1024}, ... {'domain': 'oracle', 'type': 'hard', 'item': 'nofile', 'value': 65536}, ... {'domain': 'oracle', 'type': 'soft', 'item': 'stack', 'value': 10240}, ... {'domain': 'oracle', 'type': 'hard', 'item': 'stack', 'value': 3276}, ... {'domain': 'root', 'type': 'soft', 'item': 'nproc', 'value': -1}] ... >>> keyword_search(rows, domain='root') [{'domain': 'root', 'type': 'soft', 'item': 'nproc', 'value': -1}] >>> keyword_search(rows, item__contains='c') [{'domain': 'oracle', 'type': 'soft', 'item': 'stack', 'value': 10240}, {'domain': 'oracle', 'type': 'hard', 'item': 'stack', 'value': 3276}, {'domain': 'root', 'type': 'soft', 'item': 'nproc', 'value': -1}] >>> keyword_search(rows, domain__startswith='r') [{'domain': 'root', 'type': 'soft', 'item': 'nproc', 'value': -1}]
[ "Takes", "a", "list", "of", "dictionaries", "and", "finds", "all", "the", "dictionaries", "where", "the", "keys", "and", "values", "match", "those", "found", "in", "the", "keyword", "arguments", "." ]
python
train
41.846154
tamasgal/km3pipe
km3pipe/core.py
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/core.py#L539-L548
def require(self, name): """Return the value of the requested parameter or raise an error.""" value = self.get(name) if value is None: raise TypeError( "{0} requires the parameter '{1}'.".format( self.__class__, name ) ) return value
[ "def", "require", "(", "self", ",", "name", ")", ":", "value", "=", "self", ".", "get", "(", "name", ")", "if", "value", "is", "None", ":", "raise", "TypeError", "(", "\"{0} requires the parameter '{1}'.\"", ".", "format", "(", "self", ".", "__class__", ",", "name", ")", ")", "return", "value" ]
Return the value of the requested parameter or raise an error.
[ "Return", "the", "value", "of", "the", "requested", "parameter", "or", "raise", "an", "error", "." ]
python
train
33.2
idlesign/django-sitetree
sitetree/admin.py
https://github.com/idlesign/django-sitetree/blob/61de4608e6e415247c75fe8691027d7c4ed0d1e7/sitetree/admin.py#L162-L197
def get_form(self, request, obj=None, **kwargs): """Returns modified form for TreeItem model. 'Parent' field choices are built by sitetree itself. """ if obj is not None and obj.parent is not None: self.previous_parent = obj.parent previous_parent_id = self.previous_parent.id else: previous_parent_id = None my_choice_field = TreeItemChoiceField(self.tree, initial=previous_parent_id) form = super(TreeItemAdmin, self).get_form(request, obj, **kwargs) my_choice_field.label = form.base_fields['parent'].label my_choice_field.help_text = form.base_fields['parent'].help_text my_choice_field.widget = form.base_fields['parent'].widget # Replace 'parent' TreeItem field with new appropriate one form.base_fields['parent'] = my_choice_field # Try to resolve all currently registered url names including those in namespaces. if not getattr(self, 'known_url_names', False): self.known_url_names = [] self.known_url_rules = [] resolver = get_resolver(get_urlconf()) for ns, (url_prefix, ns_resolver) in resolver.namespace_dict.items(): if ns != 'admin': self._stack_known_urls(ns_resolver.reverse_dict, ns) self._stack_known_urls(resolver.reverse_dict) self.known_url_rules = sorted(self.known_url_rules) form.known_url_names_hint = _( 'You are seeing this warning because "URL as Pattern" option is active and pattern entered above ' 'seems to be invalid. Currently registered URL pattern names and parameters: ') form.known_url_names = self.known_url_names form.known_url_rules = self.known_url_rules return form
[ "def", "get_form", "(", "self", ",", "request", ",", "obj", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "obj", "is", "not", "None", "and", "obj", ".", "parent", "is", "not", "None", ":", "self", ".", "previous_parent", "=", "obj", ".", "parent", "previous_parent_id", "=", "self", ".", "previous_parent", ".", "id", "else", ":", "previous_parent_id", "=", "None", "my_choice_field", "=", "TreeItemChoiceField", "(", "self", ".", "tree", ",", "initial", "=", "previous_parent_id", ")", "form", "=", "super", "(", "TreeItemAdmin", ",", "self", ")", ".", "get_form", "(", "request", ",", "obj", ",", "*", "*", "kwargs", ")", "my_choice_field", ".", "label", "=", "form", ".", "base_fields", "[", "'parent'", "]", ".", "label", "my_choice_field", ".", "help_text", "=", "form", ".", "base_fields", "[", "'parent'", "]", ".", "help_text", "my_choice_field", ".", "widget", "=", "form", ".", "base_fields", "[", "'parent'", "]", ".", "widget", "# Replace 'parent' TreeItem field with new appropriate one", "form", ".", "base_fields", "[", "'parent'", "]", "=", "my_choice_field", "# Try to resolve all currently registered url names including those in namespaces.", "if", "not", "getattr", "(", "self", ",", "'known_url_names'", ",", "False", ")", ":", "self", ".", "known_url_names", "=", "[", "]", "self", ".", "known_url_rules", "=", "[", "]", "resolver", "=", "get_resolver", "(", "get_urlconf", "(", ")", ")", "for", "ns", ",", "(", "url_prefix", ",", "ns_resolver", ")", "in", "resolver", ".", "namespace_dict", ".", "items", "(", ")", ":", "if", "ns", "!=", "'admin'", ":", "self", ".", "_stack_known_urls", "(", "ns_resolver", ".", "reverse_dict", ",", "ns", ")", "self", ".", "_stack_known_urls", "(", "resolver", ".", "reverse_dict", ")", "self", ".", "known_url_rules", "=", "sorted", "(", "self", ".", "known_url_rules", ")", "form", ".", "known_url_names_hint", "=", "_", "(", "'You are seeing this warning because \"URL as Pattern\" option is active and pattern entered above '", "'seems to be invalid. Currently registered URL pattern names and parameters: '", ")", "form", ".", "known_url_names", "=", "self", ".", "known_url_names", "form", ".", "known_url_rules", "=", "self", ".", "known_url_rules", "return", "form" ]
Returns modified form for TreeItem model. 'Parent' field choices are built by sitetree itself.
[ "Returns", "modified", "form", "for", "TreeItem", "model", ".", "Parent", "field", "choices", "are", "built", "by", "sitetree", "itself", "." ]
python
test
49.694444
Jammy2211/PyAutoLens
autolens/data/array/grids.py
https://github.com/Jammy2211/PyAutoLens/blob/91e50369c7a9c048c83d217625578b72423cd5a7/autolens/data/array/grids.py#L141-L159
def from_shape_pixel_scale_and_sub_grid_size(cls, shape, pixel_scale, sub_grid_size=2): """Setup a grid-stack of grid_stack from a 2D array shape, a pixel scale and a sub-grid size. This grid corresponds to a fully unmasked 2D array. Parameters ----------- shape : (int, int) The 2D shape of the array, where all pixels are used to generate the grid-stack's grid_stack. pixel_scale : float The size of each pixel in arc seconds. sub_grid_size : int The size of a sub-pixel's sub-grid (sub_grid_size x sub_grid_size). """ regular_grid = RegularGrid.from_shape_and_pixel_scale(shape=shape, pixel_scale=pixel_scale) sub_grid = SubGrid.from_shape_pixel_scale_and_sub_grid_size(shape=shape, pixel_scale=pixel_scale, sub_grid_size=sub_grid_size) blurring_grid = np.array([[0.0, 0.0]]) return GridStack(regular_grid, sub_grid, blurring_grid)
[ "def", "from_shape_pixel_scale_and_sub_grid_size", "(", "cls", ",", "shape", ",", "pixel_scale", ",", "sub_grid_size", "=", "2", ")", ":", "regular_grid", "=", "RegularGrid", ".", "from_shape_and_pixel_scale", "(", "shape", "=", "shape", ",", "pixel_scale", "=", "pixel_scale", ")", "sub_grid", "=", "SubGrid", ".", "from_shape_pixel_scale_and_sub_grid_size", "(", "shape", "=", "shape", ",", "pixel_scale", "=", "pixel_scale", ",", "sub_grid_size", "=", "sub_grid_size", ")", "blurring_grid", "=", "np", ".", "array", "(", "[", "[", "0.0", ",", "0.0", "]", "]", ")", "return", "GridStack", "(", "regular_grid", ",", "sub_grid", ",", "blurring_grid", ")" ]
Setup a grid-stack of grid_stack from a 2D array shape, a pixel scale and a sub-grid size. This grid corresponds to a fully unmasked 2D array. Parameters ----------- shape : (int, int) The 2D shape of the array, where all pixels are used to generate the grid-stack's grid_stack. pixel_scale : float The size of each pixel in arc seconds. sub_grid_size : int The size of a sub-pixel's sub-grid (sub_grid_size x sub_grid_size).
[ "Setup", "a", "grid", "-", "stack", "of", "grid_stack", "from", "a", "2D", "array", "shape", "a", "pixel", "scale", "and", "a", "sub", "-", "grid", "size", ".", "This", "grid", "corresponds", "to", "a", "fully", "unmasked", "2D", "array", "." ]
python
valid
54.631579
LISE-B26/pylabcontrol
build/lib/pylabcontrol/gui/windows_and_widgets/main_window.py
https://github.com/LISE-B26/pylabcontrol/blob/67482e5157fcd1c40705e5c2cacfb93564703ed0/build/lib/pylabcontrol/gui/windows_and_widgets/main_window.py#L994-L1008
def script_finished(self): """ waits for the script to emit the script_finshed signal """ script = self.current_script script.updateProgress.disconnect(self.update_status) self.script_thread.started.disconnect() script.finished.disconnect() self.current_script = None self.plot_script(script) self.progressBar.setValue(100) self.btn_start_script.setEnabled(True) self.btn_skip_subscript.setEnabled(False)
[ "def", "script_finished", "(", "self", ")", ":", "script", "=", "self", ".", "current_script", "script", ".", "updateProgress", ".", "disconnect", "(", "self", ".", "update_status", ")", "self", ".", "script_thread", ".", "started", ".", "disconnect", "(", ")", "script", ".", "finished", ".", "disconnect", "(", ")", "self", ".", "current_script", "=", "None", "self", ".", "plot_script", "(", "script", ")", "self", ".", "progressBar", ".", "setValue", "(", "100", ")", "self", ".", "btn_start_script", ".", "setEnabled", "(", "True", ")", "self", ".", "btn_skip_subscript", ".", "setEnabled", "(", "False", ")" ]
waits for the script to emit the script_finshed signal
[ "waits", "for", "the", "script", "to", "emit", "the", "script_finshed", "signal" ]
python
train
32.533333
robinandeer/puzzle
puzzle/plugins/sql/mixins/actions/gemini.py
https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/plugins/sql/mixins/actions/gemini.py#L36-L45
def delete_gemini_query(self, query_id): """Delete a gemini query Args: name (str) """ query_obj = self.gemini_query(query_id) logger.debug("Delete query: {0}".format(query_obj.name_query)) self.session.delete(query_obj) self.save()
[ "def", "delete_gemini_query", "(", "self", ",", "query_id", ")", ":", "query_obj", "=", "self", ".", "gemini_query", "(", "query_id", ")", "logger", ".", "debug", "(", "\"Delete query: {0}\"", ".", "format", "(", "query_obj", ".", "name_query", ")", ")", "self", ".", "session", ".", "delete", "(", "query_obj", ")", "self", ".", "save", "(", ")" ]
Delete a gemini query Args: name (str)
[ "Delete", "a", "gemini", "query" ]
python
train
29.2
spyder-ide/spyder
spyder/plugins/ipythonconsole/plugin.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/ipythonconsole/plugin.py#L1022-L1031
def get_related_clients(self, client): """ Get all other clients that are connected to the same kernel as `client` """ related_clients = [] for cl in self.get_clients(): if cl.connection_file == client.connection_file and \ cl is not client: related_clients.append(cl) return related_clients
[ "def", "get_related_clients", "(", "self", ",", "client", ")", ":", "related_clients", "=", "[", "]", "for", "cl", "in", "self", ".", "get_clients", "(", ")", ":", "if", "cl", ".", "connection_file", "==", "client", ".", "connection_file", "and", "cl", "is", "not", "client", ":", "related_clients", ".", "append", "(", "cl", ")", "return", "related_clients" ]
Get all other clients that are connected to the same kernel as `client`
[ "Get", "all", "other", "clients", "that", "are", "connected", "to", "the", "same", "kernel", "as", "client" ]
python
train
38.1
crackinglandia/pype32
pype32/pype32.py
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L495-L532
def addSection(self, data, name =".pype32\x00", flags = 0x60000000): """ Adds a new section to the existing L{PE} instance. @type data: str @param data: The data to be added in the new section. @type name: str @param name: (Optional) The name for the new section. @type flags: int @param flags: (Optional) The attributes for the new section. """ fa = self.ntHeaders.optionalHeader.fileAlignment.value sa = self.ntHeaders.optionalHeader.sectionAlignment.value padding = "\xcc" * (fa - len(data)) sh = SectionHeader() if len(self.sectionHeaders): # get the va, vz, ra and rz of the last section in the array of section headers vaLastSection = self.sectionHeaders[-1].virtualAddress.value sizeLastSection = self.sectionHeaders[-1].misc.value pointerToRawDataLastSection = self.sectionHeaders[-1].pointerToRawData.value sizeOfRawDataLastSection = self.sectionHeaders[-1].sizeOfRawData.value sh.virtualAddress.value = self._adjustSectionAlignment(vaLastSection + sizeLastSection, fa, sa) sh.pointerToRawData.value = self._adjustFileAlignment(pointerToRawDataLastSection + sizeOfRawDataLastSection, fa) sh.misc.value = self._adjustSectionAlignment(len(data), fa, sa) or consts.DEFAULT_PAGE_SIZE sh.sizeOfRawData.value = self._adjustFileAlignment(len(data), fa) or consts.DEFAULT_FILE_ALIGNMENT sh.characteristics.value = flags sh.name.value = name self.sectionHeaders.append(sh) self.sections.append(data + padding) self.ntHeaders.fileHeader.numberOfSections.value += 1
[ "def", "addSection", "(", "self", ",", "data", ",", "name", "=", "\".pype32\\x00\"", ",", "flags", "=", "0x60000000", ")", ":", "fa", "=", "self", ".", "ntHeaders", ".", "optionalHeader", ".", "fileAlignment", ".", "value", "sa", "=", "self", ".", "ntHeaders", ".", "optionalHeader", ".", "sectionAlignment", ".", "value", "padding", "=", "\"\\xcc\"", "*", "(", "fa", "-", "len", "(", "data", ")", ")", "sh", "=", "SectionHeader", "(", ")", "if", "len", "(", "self", ".", "sectionHeaders", ")", ":", "# get the va, vz, ra and rz of the last section in the array of section headers", "vaLastSection", "=", "self", ".", "sectionHeaders", "[", "-", "1", "]", ".", "virtualAddress", ".", "value", "sizeLastSection", "=", "self", ".", "sectionHeaders", "[", "-", "1", "]", ".", "misc", ".", "value", "pointerToRawDataLastSection", "=", "self", ".", "sectionHeaders", "[", "-", "1", "]", ".", "pointerToRawData", ".", "value", "sizeOfRawDataLastSection", "=", "self", ".", "sectionHeaders", "[", "-", "1", "]", ".", "sizeOfRawData", ".", "value", "sh", ".", "virtualAddress", ".", "value", "=", "self", ".", "_adjustSectionAlignment", "(", "vaLastSection", "+", "sizeLastSection", ",", "fa", ",", "sa", ")", "sh", ".", "pointerToRawData", ".", "value", "=", "self", ".", "_adjustFileAlignment", "(", "pointerToRawDataLastSection", "+", "sizeOfRawDataLastSection", ",", "fa", ")", "sh", ".", "misc", ".", "value", "=", "self", ".", "_adjustSectionAlignment", "(", "len", "(", "data", ")", ",", "fa", ",", "sa", ")", "or", "consts", ".", "DEFAULT_PAGE_SIZE", "sh", ".", "sizeOfRawData", ".", "value", "=", "self", ".", "_adjustFileAlignment", "(", "len", "(", "data", ")", ",", "fa", ")", "or", "consts", ".", "DEFAULT_FILE_ALIGNMENT", "sh", ".", "characteristics", ".", "value", "=", "flags", "sh", ".", "name", ".", "value", "=", "name", "self", ".", "sectionHeaders", ".", "append", "(", "sh", ")", "self", ".", "sections", ".", "append", "(", "data", "+", "padding", ")", "self", ".", "ntHeaders", ".", "fileHeader", ".", "numberOfSections", ".", "value", "+=", "1" ]
Adds a new section to the existing L{PE} instance. @type data: str @param data: The data to be added in the new section. @type name: str @param name: (Optional) The name for the new section. @type flags: int @param flags: (Optional) The attributes for the new section.
[ "Adds", "a", "new", "section", "to", "the", "existing", "L", "{", "PE", "}", "instance", "." ]
python
train
46.421053
contains-io/rcli
rcli/usage.py
https://github.com/contains-io/rcli/blob/cdd6191a0e0a19bc767f84921650835d099349cf/rcli/usage.py#L193-L211
def _get_section(name, source): # type: (str, str) -> Optional[str] """Extract the named section from the source. Args: name: The name of the section to extract (e.g. "Usage"). source: The usage string to parse. Returns: A string containing only the requested section. If the section appears multiple times, each instance will be merged into a single section. """ pattern = re.compile( '^([^\n]*{name}[^\n]*\n?(?:[ \t].*?(?:\n|$))*)'.format(name=name), re.IGNORECASE | re.MULTILINE) usage = None for section in pattern.findall(source): usage = _merge_section(usage, section.strip()) return usage
[ "def", "_get_section", "(", "name", ",", "source", ")", ":", "# type: (str, str) -> Optional[str]", "pattern", "=", "re", ".", "compile", "(", "'^([^\\n]*{name}[^\\n]*\\n?(?:[ \\t].*?(?:\\n|$))*)'", ".", "format", "(", "name", "=", "name", ")", ",", "re", ".", "IGNORECASE", "|", "re", ".", "MULTILINE", ")", "usage", "=", "None", "for", "section", "in", "pattern", ".", "findall", "(", "source", ")", ":", "usage", "=", "_merge_section", "(", "usage", ",", "section", ".", "strip", "(", ")", ")", "return", "usage" ]
Extract the named section from the source. Args: name: The name of the section to extract (e.g. "Usage"). source: The usage string to parse. Returns: A string containing only the requested section. If the section appears multiple times, each instance will be merged into a single section.
[ "Extract", "the", "named", "section", "from", "the", "source", "." ]
python
train
35.315789
diux-dev/ncluster
ncluster/aws_backend.py
https://github.com/diux-dev/ncluster/blob/2fd359621896717197b479c7174d06d80df1529b/ncluster/aws_backend.py#L993-L1030
def _set_aws_environment(task: Task = None): """Sets up AWS environment from NCLUSTER environment variables""" current_zone = os.environ.get('NCLUSTER_ZONE', '') current_region = os.environ.get('AWS_DEFAULT_REGION', '') def log(*args): if task: task.log(*args) else: util.log(*args) if current_region and current_zone: assert current_zone.startswith( current_region), f'Current zone "{current_zone}" ($NCLUSTER_ZONE) is not ' \ f'in current region "{current_region} ($AWS_DEFAULT_REGION)' assert u.get_session().region_name == current_region # setting from ~/.aws # zone is set, set region from zone if current_zone and not current_region: current_region = current_zone[:-1] os.environ['AWS_DEFAULT_REGION'] = current_region # neither zone nor region not set, use default setting for region # if default is not set, use NCLUSTER_DEFAULT_REGION if not current_region: current_region = u.get_session().region_name if not current_region: log(f"No default region available, using {NCLUSTER_DEFAULT_REGION}") current_region = NCLUSTER_DEFAULT_REGION os.environ['AWS_DEFAULT_REGION'] = current_region # zone not set, use first zone of the region # if not current_zone: # current_zone = current_region + 'a' # os.environ['NCLUSTER_ZONE'] = current_zone log(f"Using account {u.get_account_number()}, region {current_region}, " f"zone {current_zone}")
[ "def", "_set_aws_environment", "(", "task", ":", "Task", "=", "None", ")", ":", "current_zone", "=", "os", ".", "environ", ".", "get", "(", "'NCLUSTER_ZONE'", ",", "''", ")", "current_region", "=", "os", ".", "environ", ".", "get", "(", "'AWS_DEFAULT_REGION'", ",", "''", ")", "def", "log", "(", "*", "args", ")", ":", "if", "task", ":", "task", ".", "log", "(", "*", "args", ")", "else", ":", "util", ".", "log", "(", "*", "args", ")", "if", "current_region", "and", "current_zone", ":", "assert", "current_zone", ".", "startswith", "(", "current_region", ")", ",", "f'Current zone \"{current_zone}\" ($NCLUSTER_ZONE) is not '", "f'in current region \"{current_region} ($AWS_DEFAULT_REGION)'", "assert", "u", ".", "get_session", "(", ")", ".", "region_name", "==", "current_region", "# setting from ~/.aws", "# zone is set, set region from zone", "if", "current_zone", "and", "not", "current_region", ":", "current_region", "=", "current_zone", "[", ":", "-", "1", "]", "os", ".", "environ", "[", "'AWS_DEFAULT_REGION'", "]", "=", "current_region", "# neither zone nor region not set, use default setting for region", "# if default is not set, use NCLUSTER_DEFAULT_REGION", "if", "not", "current_region", ":", "current_region", "=", "u", ".", "get_session", "(", ")", ".", "region_name", "if", "not", "current_region", ":", "log", "(", "f\"No default region available, using {NCLUSTER_DEFAULT_REGION}\"", ")", "current_region", "=", "NCLUSTER_DEFAULT_REGION", "os", ".", "environ", "[", "'AWS_DEFAULT_REGION'", "]", "=", "current_region", "# zone not set, use first zone of the region", "# if not current_zone:", "# current_zone = current_region + 'a'", "# os.environ['NCLUSTER_ZONE'] = current_zone", "log", "(", "f\"Using account {u.get_account_number()}, region {current_region}, \"", "f\"zone {current_zone}\"", ")" ]
Sets up AWS environment from NCLUSTER environment variables
[ "Sets", "up", "AWS", "environment", "from", "NCLUSTER", "environment", "variables" ]
python
train
38
DLR-RM/RAFCON
source/rafcon/core/states/concurrency_state.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/core/states/concurrency_state.py#L156-L172
def finalize_concurrency_state(self, outcome): """ Utility function to finalize the forward execution of the concurrency state. :param outcome: :return: """ final_outcome = outcome self.write_output_data() self.check_output_data_type() self.execution_history.push_return_history_item(self, CallType.CONTAINER, self, self.output_data) self.state_execution_status = StateExecutionStatus.WAIT_FOR_NEXT_STATE singleton.state_machine_execution_engine._modify_run_to_states(self) if self.preempted: final_outcome = Outcome(-2, "preempted") return self.finalize(final_outcome)
[ "def", "finalize_concurrency_state", "(", "self", ",", "outcome", ")", ":", "final_outcome", "=", "outcome", "self", ".", "write_output_data", "(", ")", "self", ".", "check_output_data_type", "(", ")", "self", ".", "execution_history", ".", "push_return_history_item", "(", "self", ",", "CallType", ".", "CONTAINER", ",", "self", ",", "self", ".", "output_data", ")", "self", ".", "state_execution_status", "=", "StateExecutionStatus", ".", "WAIT_FOR_NEXT_STATE", "singleton", ".", "state_machine_execution_engine", ".", "_modify_run_to_states", "(", "self", ")", "if", "self", ".", "preempted", ":", "final_outcome", "=", "Outcome", "(", "-", "2", ",", "\"preempted\"", ")", "return", "self", ".", "finalize", "(", "final_outcome", ")" ]
Utility function to finalize the forward execution of the concurrency state. :param outcome: :return:
[ "Utility", "function", "to", "finalize", "the", "forward", "execution", "of", "the", "concurrency", "state", "." ]
python
train
39.058824
wright-group/WrightTools
WrightTools/_dataset.py
https://github.com/wright-group/WrightTools/blob/80d3ddd5074d8d5c1bc03fd5a0e0f10d4b424aeb/WrightTools/_dataset.py#L335-L351
def log10(self, floor=None): """Take the log base 10 of the entire dataset. Parameters ---------- floor : number (optional) Clip values below floor after log. Default is None. """ def f(dataset, s, floor): arr = dataset[s] arr = np.log10(arr) if floor is not None: arr[arr < floor] = floor dataset[s] = arr self.chunkwise(f, floor=floor)
[ "def", "log10", "(", "self", ",", "floor", "=", "None", ")", ":", "def", "f", "(", "dataset", ",", "s", ",", "floor", ")", ":", "arr", "=", "dataset", "[", "s", "]", "arr", "=", "np", ".", "log10", "(", "arr", ")", "if", "floor", "is", "not", "None", ":", "arr", "[", "arr", "<", "floor", "]", "=", "floor", "dataset", "[", "s", "]", "=", "arr", "self", ".", "chunkwise", "(", "f", ",", "floor", "=", "floor", ")" ]
Take the log base 10 of the entire dataset. Parameters ---------- floor : number (optional) Clip values below floor after log. Default is None.
[ "Take", "the", "log", "base", "10", "of", "the", "entire", "dataset", "." ]
python
train
26.823529
wadda/gps3
examples/human.py
https://github.com/wadda/gps3/blob/91adcd7073b891b135b2a46d039ce2125cf09a09/examples/human.py#L39-L57
def add_args(): """Adds commandline arguments and formatted Help""" parser = argparse.ArgumentParser() parser.add_argument('-host', action='store', dest='host', default='127.0.0.1', help='DEFAULT "127.0.0.1"') parser.add_argument('-port', action='store', dest='port', default='2947', help='DEFAULT 2947', type=int) parser.add_argument('-json', dest='gpsd_protocol', const='json', action='store_const', default='json', help='DEFAULT JSON objects */') parser.add_argument('-device', dest='devicepath', action='store', help='alternate devicepath e.g.,"-device /dev/ttyUSB4"') # Infrequently used options parser.add_argument('-nmea', dest='gpsd_protocol', const='nmea', action='store_const', help='*/ output in NMEA */') # parser.add_argument('-rare', dest='gpsd_protocol', const='rare', action='store_const', help='*/ output of packets in hex */') # parser.add_argument('-raw', dest='gpsd_protocol', const='raw', action='store_const', help='*/ output of raw packets */') # parser.add_argument('-scaled', dest='gpsd_protocol', const='scaled', action='store_const', help='*/ scale output to floats */') # parser.add_argument('-timing', dest='gpsd_protocol', const='timing', action='store_const', help='*/ timing information */') # parser.add_argument('-split24', dest='gpsd_protocol', const='split24', action='store_const', help='*/ split AIS Type 24s */') # parser.add_argument('-pps', dest='gpsd_protocol', const='pps', action='store_const', help='*/ enable PPS JSON */') parser.add_argument('-v', '--version', action='version', version='Version: {}'.format(__version__)) cli_args = parser.parse_args() return cli_args
[ "def", "add_args", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", ")", "parser", ".", "add_argument", "(", "'-host'", ",", "action", "=", "'store'", ",", "dest", "=", "'host'", ",", "default", "=", "'127.0.0.1'", ",", "help", "=", "'DEFAULT \"127.0.0.1\"'", ")", "parser", ".", "add_argument", "(", "'-port'", ",", "action", "=", "'store'", ",", "dest", "=", "'port'", ",", "default", "=", "'2947'", ",", "help", "=", "'DEFAULT 2947'", ",", "type", "=", "int", ")", "parser", ".", "add_argument", "(", "'-json'", ",", "dest", "=", "'gpsd_protocol'", ",", "const", "=", "'json'", ",", "action", "=", "'store_const'", ",", "default", "=", "'json'", ",", "help", "=", "'DEFAULT JSON objects */'", ")", "parser", ".", "add_argument", "(", "'-device'", ",", "dest", "=", "'devicepath'", ",", "action", "=", "'store'", ",", "help", "=", "'alternate devicepath e.g.,\"-device /dev/ttyUSB4\"'", ")", "# Infrequently used options", "parser", ".", "add_argument", "(", "'-nmea'", ",", "dest", "=", "'gpsd_protocol'", ",", "const", "=", "'nmea'", ",", "action", "=", "'store_const'", ",", "help", "=", "'*/ output in NMEA */'", ")", "# parser.add_argument('-rare', dest='gpsd_protocol', const='rare', action='store_const', help='*/ output of packets in hex */')", "# parser.add_argument('-raw', dest='gpsd_protocol', const='raw', action='store_const', help='*/ output of raw packets */')", "# parser.add_argument('-scaled', dest='gpsd_protocol', const='scaled', action='store_const', help='*/ scale output to floats */')", "# parser.add_argument('-timing', dest='gpsd_protocol', const='timing', action='store_const', help='*/ timing information */')", "# parser.add_argument('-split24', dest='gpsd_protocol', const='split24', action='store_const', help='*/ split AIS Type 24s */')", "# parser.add_argument('-pps', dest='gpsd_protocol', const='pps', action='store_const', help='*/ enable PPS JSON */')", "parser", ".", "add_argument", "(", "'-v'", ",", "'--version'", ",", "action", "=", "'version'", ",", "version", "=", "'Version: {}'", ".", "format", "(", "__version__", ")", ")", "cli_args", "=", "parser", ".", "parse_args", "(", ")", "return", "cli_args" ]
Adds commandline arguments and formatted Help
[ "Adds", "commandline", "arguments", "and", "formatted", "Help" ]
python
train
87.684211
gem/oq-engine
openquake/commands/upgrade_nrml.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/commands/upgrade_nrml.py#L32-L80
def get_vulnerability_functions_04(fname): """ Parse the vulnerability model in NRML 0.4 format. :param fname: path of the vulnerability file :returns: a dictionary imt, taxonomy -> vulnerability function + vset """ categories = dict(assetCategory=set(), lossCategory=set(), vulnerabilitySetID=set()) imts = set() taxonomies = set() vf_dict = {} # imt, taxonomy -> vulnerability function for vset in nrml.read(fname).vulnerabilityModel: categories['assetCategory'].add(vset['assetCategory']) categories['lossCategory'].add(vset['lossCategory']) categories['vulnerabilitySetID'].add(vset['vulnerabilitySetID']) IML = vset.IML imt_str = IML['IMT'] imls = ~IML imts.add(imt_str) for vfun in vset.getnodes('discreteVulnerability'): taxonomy = vfun['vulnerabilityFunctionID'] if taxonomy in taxonomies: raise InvalidFile( 'Duplicated vulnerabilityFunctionID: %s: %s, line %d' % (taxonomy, fname, vfun.lineno)) taxonomies.add(taxonomy) with context(fname, vfun): loss_ratios = ~vfun.lossRatio coefficients = ~vfun.coefficientsVariation if len(loss_ratios) != len(imls): raise InvalidFile( 'There are %d loss ratios, but %d imls: %s, line %d' % (len(loss_ratios), len(imls), fname, vfun.lossRatio.lineno)) if len(coefficients) != len(imls): raise InvalidFile( 'There are %d coefficients, but %d imls: %s, line %d' % (len(coefficients), len(imls), fname, vfun.coefficientsVariation.lineno)) with context(fname, vfun): vf_dict[imt_str, taxonomy] = scientific.VulnerabilityFunction( taxonomy, imt_str, imls, loss_ratios, coefficients, vfun['probabilisticDistribution']) categories['id'] = '_'.join(sorted(categories['vulnerabilitySetID'])) del categories['vulnerabilitySetID'] return vf_dict, categories
[ "def", "get_vulnerability_functions_04", "(", "fname", ")", ":", "categories", "=", "dict", "(", "assetCategory", "=", "set", "(", ")", ",", "lossCategory", "=", "set", "(", ")", ",", "vulnerabilitySetID", "=", "set", "(", ")", ")", "imts", "=", "set", "(", ")", "taxonomies", "=", "set", "(", ")", "vf_dict", "=", "{", "}", "# imt, taxonomy -> vulnerability function", "for", "vset", "in", "nrml", ".", "read", "(", "fname", ")", ".", "vulnerabilityModel", ":", "categories", "[", "'assetCategory'", "]", ".", "add", "(", "vset", "[", "'assetCategory'", "]", ")", "categories", "[", "'lossCategory'", "]", ".", "add", "(", "vset", "[", "'lossCategory'", "]", ")", "categories", "[", "'vulnerabilitySetID'", "]", ".", "add", "(", "vset", "[", "'vulnerabilitySetID'", "]", ")", "IML", "=", "vset", ".", "IML", "imt_str", "=", "IML", "[", "'IMT'", "]", "imls", "=", "~", "IML", "imts", ".", "add", "(", "imt_str", ")", "for", "vfun", "in", "vset", ".", "getnodes", "(", "'discreteVulnerability'", ")", ":", "taxonomy", "=", "vfun", "[", "'vulnerabilityFunctionID'", "]", "if", "taxonomy", "in", "taxonomies", ":", "raise", "InvalidFile", "(", "'Duplicated vulnerabilityFunctionID: %s: %s, line %d'", "%", "(", "taxonomy", ",", "fname", ",", "vfun", ".", "lineno", ")", ")", "taxonomies", ".", "add", "(", "taxonomy", ")", "with", "context", "(", "fname", ",", "vfun", ")", ":", "loss_ratios", "=", "~", "vfun", ".", "lossRatio", "coefficients", "=", "~", "vfun", ".", "coefficientsVariation", "if", "len", "(", "loss_ratios", ")", "!=", "len", "(", "imls", ")", ":", "raise", "InvalidFile", "(", "'There are %d loss ratios, but %d imls: %s, line %d'", "%", "(", "len", "(", "loss_ratios", ")", ",", "len", "(", "imls", ")", ",", "fname", ",", "vfun", ".", "lossRatio", ".", "lineno", ")", ")", "if", "len", "(", "coefficients", ")", "!=", "len", "(", "imls", ")", ":", "raise", "InvalidFile", "(", "'There are %d coefficients, but %d imls: %s, line %d'", "%", "(", "len", "(", "coefficients", ")", ",", "len", "(", "imls", ")", ",", "fname", ",", "vfun", ".", "coefficientsVariation", ".", "lineno", ")", ")", "with", "context", "(", "fname", ",", "vfun", ")", ":", "vf_dict", "[", "imt_str", ",", "taxonomy", "]", "=", "scientific", ".", "VulnerabilityFunction", "(", "taxonomy", ",", "imt_str", ",", "imls", ",", "loss_ratios", ",", "coefficients", ",", "vfun", "[", "'probabilisticDistribution'", "]", ")", "categories", "[", "'id'", "]", "=", "'_'", ".", "join", "(", "sorted", "(", "categories", "[", "'vulnerabilitySetID'", "]", ")", ")", "del", "categories", "[", "'vulnerabilitySetID'", "]", "return", "vf_dict", ",", "categories" ]
Parse the vulnerability model in NRML 0.4 format. :param fname: path of the vulnerability file :returns: a dictionary imt, taxonomy -> vulnerability function + vset
[ "Parse", "the", "vulnerability", "model", "in", "NRML", "0", ".", "4", "format", "." ]
python
train
44.55102
stephenmcd/gnotty
gnotty/bots/commands.py
https://github.com/stephenmcd/gnotty/blob/bea3762dc9cbc3cb21a5ae7224091cf027273c40/gnotty/bots/commands.py#L29-L34
def handle_join(self, connection, event): """ Store join time for a nickname when it joins. """ nickname = self.get_nickname(event) self.joined[nickname] = datetime.now()
[ "def", "handle_join", "(", "self", ",", "connection", ",", "event", ")", ":", "nickname", "=", "self", ".", "get_nickname", "(", "event", ")", "self", ".", "joined", "[", "nickname", "]", "=", "datetime", ".", "now", "(", ")" ]
Store join time for a nickname when it joins.
[ "Store", "join", "time", "for", "a", "nickname", "when", "it", "joins", "." ]
python
train
34.166667
tensorpack/tensorpack
examples/FasterRCNN/data.py
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/FasterRCNN/data.py#L226-L268
def get_multilevel_rpn_anchor_input(im, boxes, is_crowd): """ Args: im: an image boxes: nx4, floatbox, gt. shoudn't be changed is_crowd: n, Returns: [(fm_labels, fm_boxes)]: Returns a tuple for each FPN level. Each tuple contains the anchor labels and target boxes for each pixel in the featuremap. fm_labels: fHxfWx NUM_ANCHOR_RATIOS fm_boxes: fHxfWx NUM_ANCHOR_RATIOS x4 """ boxes = boxes.copy() anchors_per_level = get_all_anchors_fpn() flatten_anchors_per_level = [k.reshape((-1, 4)) for k in anchors_per_level] all_anchors_flatten = np.concatenate(flatten_anchors_per_level, axis=0) inside_ind, inside_anchors = filter_boxes_inside_shape(all_anchors_flatten, im.shape[:2]) anchor_labels, anchor_gt_boxes = get_anchor_labels(inside_anchors, boxes[is_crowd == 0], boxes[is_crowd == 1]) # map back to all_anchors, then split to each level num_all_anchors = all_anchors_flatten.shape[0] all_labels = -np.ones((num_all_anchors, ), dtype='int32') all_labels[inside_ind] = anchor_labels all_boxes = np.zeros((num_all_anchors, 4), dtype='float32') all_boxes[inside_ind] = anchor_gt_boxes start = 0 multilevel_inputs = [] for level_anchor in anchors_per_level: assert level_anchor.shape[2] == len(cfg.RPN.ANCHOR_RATIOS) anchor_shape = level_anchor.shape[:3] # fHxfWxNUM_ANCHOR_RATIOS num_anchor_this_level = np.prod(anchor_shape) end = start + num_anchor_this_level multilevel_inputs.append( (all_labels[start: end].reshape(anchor_shape), all_boxes[start: end, :].reshape(anchor_shape + (4,)) )) start = end assert end == num_all_anchors, "{} != {}".format(end, num_all_anchors) return multilevel_inputs
[ "def", "get_multilevel_rpn_anchor_input", "(", "im", ",", "boxes", ",", "is_crowd", ")", ":", "boxes", "=", "boxes", ".", "copy", "(", ")", "anchors_per_level", "=", "get_all_anchors_fpn", "(", ")", "flatten_anchors_per_level", "=", "[", "k", ".", "reshape", "(", "(", "-", "1", ",", "4", ")", ")", "for", "k", "in", "anchors_per_level", "]", "all_anchors_flatten", "=", "np", ".", "concatenate", "(", "flatten_anchors_per_level", ",", "axis", "=", "0", ")", "inside_ind", ",", "inside_anchors", "=", "filter_boxes_inside_shape", "(", "all_anchors_flatten", ",", "im", ".", "shape", "[", ":", "2", "]", ")", "anchor_labels", ",", "anchor_gt_boxes", "=", "get_anchor_labels", "(", "inside_anchors", ",", "boxes", "[", "is_crowd", "==", "0", "]", ",", "boxes", "[", "is_crowd", "==", "1", "]", ")", "# map back to all_anchors, then split to each level", "num_all_anchors", "=", "all_anchors_flatten", ".", "shape", "[", "0", "]", "all_labels", "=", "-", "np", ".", "ones", "(", "(", "num_all_anchors", ",", ")", ",", "dtype", "=", "'int32'", ")", "all_labels", "[", "inside_ind", "]", "=", "anchor_labels", "all_boxes", "=", "np", ".", "zeros", "(", "(", "num_all_anchors", ",", "4", ")", ",", "dtype", "=", "'float32'", ")", "all_boxes", "[", "inside_ind", "]", "=", "anchor_gt_boxes", "start", "=", "0", "multilevel_inputs", "=", "[", "]", "for", "level_anchor", "in", "anchors_per_level", ":", "assert", "level_anchor", ".", "shape", "[", "2", "]", "==", "len", "(", "cfg", ".", "RPN", ".", "ANCHOR_RATIOS", ")", "anchor_shape", "=", "level_anchor", ".", "shape", "[", ":", "3", "]", "# fHxfWxNUM_ANCHOR_RATIOS", "num_anchor_this_level", "=", "np", ".", "prod", "(", "anchor_shape", ")", "end", "=", "start", "+", "num_anchor_this_level", "multilevel_inputs", ".", "append", "(", "(", "all_labels", "[", "start", ":", "end", "]", ".", "reshape", "(", "anchor_shape", ")", ",", "all_boxes", "[", "start", ":", "end", ",", ":", "]", ".", "reshape", "(", "anchor_shape", "+", "(", "4", ",", ")", ")", ")", ")", "start", "=", "end", "assert", "end", "==", "num_all_anchors", ",", "\"{} != {}\"", ".", "format", "(", "end", ",", "num_all_anchors", ")", "return", "multilevel_inputs" ]
Args: im: an image boxes: nx4, floatbox, gt. shoudn't be changed is_crowd: n, Returns: [(fm_labels, fm_boxes)]: Returns a tuple for each FPN level. Each tuple contains the anchor labels and target boxes for each pixel in the featuremap. fm_labels: fHxfWx NUM_ANCHOR_RATIOS fm_boxes: fHxfWx NUM_ANCHOR_RATIOS x4
[ "Args", ":", "im", ":", "an", "image", "boxes", ":", "nx4", "floatbox", "gt", ".", "shoudn", "t", "be", "changed", "is_crowd", ":", "n" ]
python
train
41.627907
lltk/lltk
lltk/caching.py
https://github.com/lltk/lltk/blob/d171de55c1b97695fddedf4b02401ae27bf1d634/lltk/caching.py#L15-L21
def register(cache): ''' Registers a cache. ''' global caches name = cache().name if not caches.has_key(name): caches[name] = cache
[ "def", "register", "(", "cache", ")", ":", "global", "caches", "name", "=", "cache", "(", ")", ".", "name", "if", "not", "caches", ".", "has_key", "(", "name", ")", ":", "caches", "[", "name", "]", "=", "cache" ]
Registers a cache.
[ "Registers", "a", "cache", "." ]
python
train
18.857143
yyuu/botornado
boto/gs/key.py
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/gs/key.py#L91-L110
def add_group_grant(self, permission, group_id): """ Convenience method that provides a quick way to add a canonical group grant to a key. This method retrieves the current ACL, creates a new grant based on the parameters passed in, adds that grant to the ACL and then PUT's the new ACL back to GS. :type permission: string :param permission: The permission being granted. Should be one of: READ|FULL_CONTROL See http://code.google.com/apis/storage/docs/developer-guide.html#authorization for more details on permissions. :type group_id: string :param group_id: The canonical group id associated with the Google Groups account you are granting the permission to. """ acl = self.get_acl() acl.add_group_grant(permission, group_id) self.set_acl(acl)
[ "def", "add_group_grant", "(", "self", ",", "permission", ",", "group_id", ")", ":", "acl", "=", "self", ".", "get_acl", "(", ")", "acl", ".", "add_group_grant", "(", "permission", ",", "group_id", ")", "self", ".", "set_acl", "(", "acl", ")" ]
Convenience method that provides a quick way to add a canonical group grant to a key. This method retrieves the current ACL, creates a new grant based on the parameters passed in, adds that grant to the ACL and then PUT's the new ACL back to GS. :type permission: string :param permission: The permission being granted. Should be one of: READ|FULL_CONTROL See http://code.google.com/apis/storage/docs/developer-guide.html#authorization for more details on permissions. :type group_id: string :param group_id: The canonical group id associated with the Google Groups account you are granting the permission to.
[ "Convenience", "method", "that", "provides", "a", "quick", "way", "to", "add", "a", "canonical", "group", "grant", "to", "a", "key", ".", "This", "method", "retrieves", "the", "current", "ACL", "creates", "a", "new", "grant", "based", "on", "the", "parameters", "passed", "in", "adds", "that", "grant", "to", "the", "ACL", "and", "then", "PUT", "s", "the", "new", "ACL", "back", "to", "GS", "." ]
python
train
44.1
Guake/guake
guake/prefs.py
https://github.com/Guake/guake/blob/4153ef38f9044cbed6494075fce80acd5809df2b/guake/prefs.py#L427-L431
def on_gtk_prefer_dark_theme_toggled(self, chk): """Set the `gtk_prefer_dark_theme' property in dconf """ self.settings.general.set_boolean('gtk-prefer-dark-theme', chk.get_active()) select_gtk_theme(self.settings)
[ "def", "on_gtk_prefer_dark_theme_toggled", "(", "self", ",", "chk", ")", ":", "self", ".", "settings", ".", "general", ".", "set_boolean", "(", "'gtk-prefer-dark-theme'", ",", "chk", ".", "get_active", "(", ")", ")", "select_gtk_theme", "(", "self", ".", "settings", ")" ]
Set the `gtk_prefer_dark_theme' property in dconf
[ "Set", "the", "gtk_prefer_dark_theme", "property", "in", "dconf" ]
python
train
48.4
mikedh/trimesh
trimesh/triangles.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/triangles.py#L118-L142
def all_coplanar(triangles): """ Check to see if a list of triangles are all coplanar Parameters ---------------- triangles: (n, 3, 3) float Vertices of triangles Returns --------------- all_coplanar : bool True if all triangles are coplanar """ triangles = np.asanyarray(triangles, dtype=np.float64) if not util.is_shape(triangles, (-1, 3, 3)): raise ValueError('Triangles must be (n,3,3)!') test_normal = normals(triangles)[0] test_vertex = triangles[0][0] distances = point_plane_distance(points=triangles[1:].reshape((-1, 3)), plane_normal=test_normal, plane_origin=test_vertex) all_coplanar = np.all(np.abs(distances) < tol.zero) return all_coplanar
[ "def", "all_coplanar", "(", "triangles", ")", ":", "triangles", "=", "np", ".", "asanyarray", "(", "triangles", ",", "dtype", "=", "np", ".", "float64", ")", "if", "not", "util", ".", "is_shape", "(", "triangles", ",", "(", "-", "1", ",", "3", ",", "3", ")", ")", ":", "raise", "ValueError", "(", "'Triangles must be (n,3,3)!'", ")", "test_normal", "=", "normals", "(", "triangles", ")", "[", "0", "]", "test_vertex", "=", "triangles", "[", "0", "]", "[", "0", "]", "distances", "=", "point_plane_distance", "(", "points", "=", "triangles", "[", "1", ":", "]", ".", "reshape", "(", "(", "-", "1", ",", "3", ")", ")", ",", "plane_normal", "=", "test_normal", ",", "plane_origin", "=", "test_vertex", ")", "all_coplanar", "=", "np", ".", "all", "(", "np", ".", "abs", "(", "distances", ")", "<", "tol", ".", "zero", ")", "return", "all_coplanar" ]
Check to see if a list of triangles are all coplanar Parameters ---------------- triangles: (n, 3, 3) float Vertices of triangles Returns --------------- all_coplanar : bool True if all triangles are coplanar
[ "Check", "to", "see", "if", "a", "list", "of", "triangles", "are", "all", "coplanar" ]
python
train
31.64
crate/crate-python
src/crate/client/sqlalchemy/compiler.py
https://github.com/crate/crate-python/blob/68e39c95f5bbe88b74bbfa26de4347fc644636a8/src/crate/client/sqlalchemy/compiler.py#L362-L423
def _get_crud_params(compiler, stmt, **kw): """ extract values from crud parameters taken from SQLAlchemy's crud module (since 1.0.x) and adapted for Crate dialect""" compiler.postfetch = [] compiler.insert_prefetch = [] compiler.update_prefetch = [] compiler.returning = [] # no parameters in the statement, no parameters in the # compiled params - return binds for all columns if compiler.column_keys is None and stmt.parameters is None: return [(c, crud._create_bind_param(compiler, c, None, required=True)) for c in stmt.table.columns] if stmt._has_multi_parameters: stmt_parameters = stmt.parameters[0] else: stmt_parameters = stmt.parameters # getters - these are normally just column.key, # but in the case of mysql multi-table update, the rules for # .key must conditionally take tablename into account if SA_VERSION >= SA_1_1: _column_as_key, _getattr_col_key, _col_bind_name = \ crud._key_getters_for_crud_column(compiler, stmt) else: _column_as_key, _getattr_col_key, _col_bind_name = \ crud._key_getters_for_crud_column(compiler) # if we have statement parameters - set defaults in the # compiled params if compiler.column_keys is None: parameters = {} else: parameters = dict((_column_as_key(key), crud.REQUIRED) for key in compiler.column_keys if not stmt_parameters or key not in stmt_parameters) # create a list of column assignment clauses as tuples values = [] if stmt_parameters is not None: crud._get_stmt_parameters_params( compiler, parameters, stmt_parameters, _column_as_key, values, kw) check_columns = {} crud._scan_cols(compiler, stmt, parameters, _getattr_col_key, _column_as_key, _col_bind_name, check_columns, values, kw) if stmt._has_multi_parameters: values = crud._extend_values_for_multiparams(compiler, stmt, values, kw) return values
[ "def", "_get_crud_params", "(", "compiler", ",", "stmt", ",", "*", "*", "kw", ")", ":", "compiler", ".", "postfetch", "=", "[", "]", "compiler", ".", "insert_prefetch", "=", "[", "]", "compiler", ".", "update_prefetch", "=", "[", "]", "compiler", ".", "returning", "=", "[", "]", "# no parameters in the statement, no parameters in the", "# compiled params - return binds for all columns", "if", "compiler", ".", "column_keys", "is", "None", "and", "stmt", ".", "parameters", "is", "None", ":", "return", "[", "(", "c", ",", "crud", ".", "_create_bind_param", "(", "compiler", ",", "c", ",", "None", ",", "required", "=", "True", ")", ")", "for", "c", "in", "stmt", ".", "table", ".", "columns", "]", "if", "stmt", ".", "_has_multi_parameters", ":", "stmt_parameters", "=", "stmt", ".", "parameters", "[", "0", "]", "else", ":", "stmt_parameters", "=", "stmt", ".", "parameters", "# getters - these are normally just column.key,", "# but in the case of mysql multi-table update, the rules for", "# .key must conditionally take tablename into account", "if", "SA_VERSION", ">=", "SA_1_1", ":", "_column_as_key", ",", "_getattr_col_key", ",", "_col_bind_name", "=", "crud", ".", "_key_getters_for_crud_column", "(", "compiler", ",", "stmt", ")", "else", ":", "_column_as_key", ",", "_getattr_col_key", ",", "_col_bind_name", "=", "crud", ".", "_key_getters_for_crud_column", "(", "compiler", ")", "# if we have statement parameters - set defaults in the", "# compiled params", "if", "compiler", ".", "column_keys", "is", "None", ":", "parameters", "=", "{", "}", "else", ":", "parameters", "=", "dict", "(", "(", "_column_as_key", "(", "key", ")", ",", "crud", ".", "REQUIRED", ")", "for", "key", "in", "compiler", ".", "column_keys", "if", "not", "stmt_parameters", "or", "key", "not", "in", "stmt_parameters", ")", "# create a list of column assignment clauses as tuples", "values", "=", "[", "]", "if", "stmt_parameters", "is", "not", "None", ":", "crud", ".", "_get_stmt_parameters_params", "(", "compiler", ",", "parameters", ",", "stmt_parameters", ",", "_column_as_key", ",", "values", ",", "kw", ")", "check_columns", "=", "{", "}", "crud", ".", "_scan_cols", "(", "compiler", ",", "stmt", ",", "parameters", ",", "_getattr_col_key", ",", "_column_as_key", ",", "_col_bind_name", ",", "check_columns", ",", "values", ",", "kw", ")", "if", "stmt", ".", "_has_multi_parameters", ":", "values", "=", "crud", ".", "_extend_values_for_multiparams", "(", "compiler", ",", "stmt", ",", "values", ",", "kw", ")", "return", "values" ]
extract values from crud parameters taken from SQLAlchemy's crud module (since 1.0.x) and adapted for Crate dialect
[ "extract", "values", "from", "crud", "parameters" ]
python
train
38.370968
jobovy/galpy
galpy/orbit/OrbitTop.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/orbit/OrbitTop.py#L745-L766
def helioY(self,*args,**kwargs): """ NAME: helioY PURPOSE: return Heliocentric Galactic rectangular y-coordinate (aka "Y") INPUT: t - (optional) time at which to get Y obs=[X,Y,Z] - (optional) position and velocity of observer (in kpc and km/s) (default=Object-wide default) OR Orbit object that corresponds to the orbit of the observer Y is ignored and always assumed to be zero ro= distance in kpc corresponding to R=1. (default=Object-wide default) OUTPUT: helioY(t) in kpc HISTORY: 2011-02-24 - Written - Bovy (NYU) """ _check_roSet(self,kwargs,'helioY') X, Y, Z= self._helioXYZ(*args,**kwargs) return Y
[ "def", "helioY", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "_check_roSet", "(", "self", ",", "kwargs", ",", "'helioY'", ")", "X", ",", "Y", ",", "Z", "=", "self", ".", "_helioXYZ", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "Y" ]
NAME: helioY PURPOSE: return Heliocentric Galactic rectangular y-coordinate (aka "Y") INPUT: t - (optional) time at which to get Y obs=[X,Y,Z] - (optional) position and velocity of observer (in kpc and km/s) (default=Object-wide default) OR Orbit object that corresponds to the orbit of the observer Y is ignored and always assumed to be zero ro= distance in kpc corresponding to R=1. (default=Object-wide default) OUTPUT: helioY(t) in kpc HISTORY: 2011-02-24 - Written - Bovy (NYU)
[ "NAME", ":", "helioY", "PURPOSE", ":", "return", "Heliocentric", "Galactic", "rectangular", "y", "-", "coordinate", "(", "aka", "Y", ")", "INPUT", ":", "t", "-", "(", "optional", ")", "time", "at", "which", "to", "get", "Y", "obs", "=", "[", "X", "Y", "Z", "]", "-", "(", "optional", ")", "position", "and", "velocity", "of", "observer", "(", "in", "kpc", "and", "km", "/", "s", ")", "(", "default", "=", "Object", "-", "wide", "default", ")", "OR", "Orbit", "object", "that", "corresponds", "to", "the", "orbit", "of", "the", "observer", "Y", "is", "ignored", "and", "always", "assumed", "to", "be", "zero", "ro", "=", "distance", "in", "kpc", "corresponding", "to", "R", "=", "1", ".", "(", "default", "=", "Object", "-", "wide", "default", ")", "OUTPUT", ":", "helioY", "(", "t", ")", "in", "kpc", "HISTORY", ":", "2011", "-", "02", "-", "24", "-", "Written", "-", "Bovy", "(", "NYU", ")" ]
python
train
38.772727
ktdreyer/txbugzilla
txbugzilla/__init__.py
https://github.com/ktdreyer/txbugzilla/blob/ccfc6667ce9d696b08b468b25c813cc2b68d30d6/txbugzilla/__init__.py#L87-L98
def get_bugs_summaries(self, bugids): """ Get multiple bug objects' summaries only (faster). param bugids: ``list`` of ``int``, bug numbers. returns: deferred that when fired returns a list of ``AttrDict``s representing these bugs. """ payload = {'ids': bugids, 'include_fields': ['id', 'summary']} d = self.call('Bug.get', payload) d.addCallback(self._parse_bugs_callback) return d
[ "def", "get_bugs_summaries", "(", "self", ",", "bugids", ")", ":", "payload", "=", "{", "'ids'", ":", "bugids", ",", "'include_fields'", ":", "[", "'id'", ",", "'summary'", "]", "}", "d", "=", "self", ".", "call", "(", "'Bug.get'", ",", "payload", ")", "d", ".", "addCallback", "(", "self", ".", "_parse_bugs_callback", ")", "return", "d" ]
Get multiple bug objects' summaries only (faster). param bugids: ``list`` of ``int``, bug numbers. returns: deferred that when fired returns a list of ``AttrDict``s representing these bugs.
[ "Get", "multiple", "bug", "objects", "summaries", "only", "(", "faster", ")", "." ]
python
train
38.416667
PyThaiNLP/pythainlp
pythainlp/ulmfit/__init__.py
https://github.com/PyThaiNLP/pythainlp/blob/e9a300b8a99dfd1a67a955e7c06f62e4afe0fbca/pythainlp/ulmfit/__init__.py#L78-L87
def replace_rep_after(text: str) -> str: "Replace repetitions at the character level in `text` after the repetition" def _replace_rep(m): c, cc = m.groups() return f"{c}{TK_REP}{len(cc)+1}" re_rep = re.compile(r"(\S)(\1{2,})") return re_rep.sub(_replace_rep, text)
[ "def", "replace_rep_after", "(", "text", ":", "str", ")", "->", "str", ":", "def", "_replace_rep", "(", "m", ")", ":", "c", ",", "cc", "=", "m", ".", "groups", "(", ")", "return", "f\"{c}{TK_REP}{len(cc)+1}\"", "re_rep", "=", "re", ".", "compile", "(", "r\"(\\S)(\\1{2,})\"", ")", "return", "re_rep", ".", "sub", "(", "_replace_rep", ",", "text", ")" ]
Replace repetitions at the character level in `text` after the repetition
[ "Replace", "repetitions", "at", "the", "character", "level", "in", "text", "after", "the", "repetition" ]
python
train
29
hyperledger/indy-crypto
wrappers/python/indy_crypto/bls.py
https://github.com/hyperledger/indy-crypto/blob/1675e29a2a5949b44899553d3d128335cf7a61b3/wrappers/python/indy_crypto/bls.py#L77-L91
def new(cls) -> 'Generator': """ Creates and returns random generator point that satisfy BLS algorithm requirements. :return: BLS generator """ logger = logging.getLogger(__name__) logger.debug("Generator::new: >>>") c_instance = c_void_p() do_call(cls.new_handler, byref(c_instance)) res = cls(c_instance) logger.debug("Generator::new: <<< res: %r", res) return res
[ "def", "new", "(", "cls", ")", "->", "'Generator'", ":", "logger", "=", "logging", ".", "getLogger", "(", "__name__", ")", "logger", ".", "debug", "(", "\"Generator::new: >>>\"", ")", "c_instance", "=", "c_void_p", "(", ")", "do_call", "(", "cls", ".", "new_handler", ",", "byref", "(", "c_instance", ")", ")", "res", "=", "cls", "(", "c_instance", ")", "logger", ".", "debug", "(", "\"Generator::new: <<< res: %r\"", ",", "res", ")", "return", "res" ]
Creates and returns random generator point that satisfy BLS algorithm requirements. :return: BLS generator
[ "Creates", "and", "returns", "random", "generator", "point", "that", "satisfy", "BLS", "algorithm", "requirements", ".", ":", "return", ":", "BLS", "generator" ]
python
train
29.533333
googledatalab/pydatalab
google/datalab/bigquery/commands/_bigquery.py
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/commands/_bigquery.py#L480-L516
def _udf_cell(args, cell_body): """Implements the Bigquery udf cell magic for ipython notebooks. The supported syntax is: %%bq udf --name <var> --language <lang> // @param <name> <type> // @returns <type> // @import <gcs_path> <js function> Args: args: the optional arguments following '%%bq udf'. cell_body: the UDF declaration (inputs and outputs) and implementation in javascript. """ udf_name = args['name'] if not udf_name: raise Exception('Declaration must be of the form %%bq udf --name <variable name>') # Parse out parameters, return type, and imports param_pattern = r'^\s*\/\/\s*@param\s+([<>\w]+)\s+([<>\w,\s]+)\s*$' returns_pattern = r'^\s*\/\/\s*@returns\s+([<>\w,\s]+)\s*$' import_pattern = r'^\s*\/\/\s*@import\s+(\S+)\s*$' params = re.findall(param_pattern, cell_body, re.MULTILINE) return_type = re.findall(returns_pattern, cell_body, re.MULTILINE) imports = re.findall(import_pattern, cell_body, re.MULTILINE) if len(return_type) < 1: raise Exception('UDF return type must be defined using // @returns <type>') if len(return_type) > 1: raise Exception('Found more than one return type definition') return_type = return_type[0] # Finally build the UDF object udf = bigquery.UDF(udf_name, cell_body, return_type, params, args['language'], imports) google.datalab.utils.commands.notebook_environment()[udf_name] = udf
[ "def", "_udf_cell", "(", "args", ",", "cell_body", ")", ":", "udf_name", "=", "args", "[", "'name'", "]", "if", "not", "udf_name", ":", "raise", "Exception", "(", "'Declaration must be of the form %%bq udf --name <variable name>'", ")", "# Parse out parameters, return type, and imports", "param_pattern", "=", "r'^\\s*\\/\\/\\s*@param\\s+([<>\\w]+)\\s+([<>\\w,\\s]+)\\s*$'", "returns_pattern", "=", "r'^\\s*\\/\\/\\s*@returns\\s+([<>\\w,\\s]+)\\s*$'", "import_pattern", "=", "r'^\\s*\\/\\/\\s*@import\\s+(\\S+)\\s*$'", "params", "=", "re", ".", "findall", "(", "param_pattern", ",", "cell_body", ",", "re", ".", "MULTILINE", ")", "return_type", "=", "re", ".", "findall", "(", "returns_pattern", ",", "cell_body", ",", "re", ".", "MULTILINE", ")", "imports", "=", "re", ".", "findall", "(", "import_pattern", ",", "cell_body", ",", "re", ".", "MULTILINE", ")", "if", "len", "(", "return_type", ")", "<", "1", ":", "raise", "Exception", "(", "'UDF return type must be defined using // @returns <type>'", ")", "if", "len", "(", "return_type", ")", ">", "1", ":", "raise", "Exception", "(", "'Found more than one return type definition'", ")", "return_type", "=", "return_type", "[", "0", "]", "# Finally build the UDF object", "udf", "=", "bigquery", ".", "UDF", "(", "udf_name", ",", "cell_body", ",", "return_type", ",", "params", ",", "args", "[", "'language'", "]", ",", "imports", ")", "google", ".", "datalab", ".", "utils", ".", "commands", ".", "notebook_environment", "(", ")", "[", "udf_name", "]", "=", "udf" ]
Implements the Bigquery udf cell magic for ipython notebooks. The supported syntax is: %%bq udf --name <var> --language <lang> // @param <name> <type> // @returns <type> // @import <gcs_path> <js function> Args: args: the optional arguments following '%%bq udf'. cell_body: the UDF declaration (inputs and outputs) and implementation in javascript.
[ "Implements", "the", "Bigquery", "udf", "cell", "magic", "for", "ipython", "notebooks", "." ]
python
train
37.081081
kata198/ProcessMappingScanner
ProcessMappingScanner/__init__.py
https://github.com/kata198/ProcessMappingScanner/blob/d1735fe6746493c51aaae213b982fa96f5c5b621/ProcessMappingScanner/__init__.py#L290-L309
def scanAllProcessesForMapping(searchPortion, isExactMatch=False, ignoreCase=False): ''' scanAllProcessesForMapping - Scans all processes on the system for a given search pattern. @param searchPortion <str> - A mapping for which to search, example: libc or python or libz.so.1. Give empty string to return all mappings. @param isExactMatch <bool> Default False - If match should be exact, otherwise a partial match is performed. @param ignoreCase <bool> Default False - If True, search will be performed case-insensitively @return - <dict> - A dictionary of pid -> mappingResults for each pid that matched the search pattern. For format of "mappingResults", @see scanProcessForMapping ''' pids = getAllRunningPids() # Since processes could disappear, we run the scan as fast as possible here with a list comprehension, then assemble the return dictionary later. mappingResults = [scanProcessForMapping(pid, searchPortion, isExactMatch, ignoreCase) for pid in pids] ret = {} for i in range(len(pids)): if mappingResults[i] is not None: ret[pids[i]] = mappingResults[i] return ret
[ "def", "scanAllProcessesForMapping", "(", "searchPortion", ",", "isExactMatch", "=", "False", ",", "ignoreCase", "=", "False", ")", ":", "pids", "=", "getAllRunningPids", "(", ")", "# Since processes could disappear, we run the scan as fast as possible here with a list comprehension, then assemble the return dictionary later.", "mappingResults", "=", "[", "scanProcessForMapping", "(", "pid", ",", "searchPortion", ",", "isExactMatch", ",", "ignoreCase", ")", "for", "pid", "in", "pids", "]", "ret", "=", "{", "}", "for", "i", "in", "range", "(", "len", "(", "pids", ")", ")", ":", "if", "mappingResults", "[", "i", "]", "is", "not", "None", ":", "ret", "[", "pids", "[", "i", "]", "]", "=", "mappingResults", "[", "i", "]", "return", "ret" ]
scanAllProcessesForMapping - Scans all processes on the system for a given search pattern. @param searchPortion <str> - A mapping for which to search, example: libc or python or libz.so.1. Give empty string to return all mappings. @param isExactMatch <bool> Default False - If match should be exact, otherwise a partial match is performed. @param ignoreCase <bool> Default False - If True, search will be performed case-insensitively @return - <dict> - A dictionary of pid -> mappingResults for each pid that matched the search pattern. For format of "mappingResults", @see scanProcessForMapping
[ "scanAllProcessesForMapping", "-", "Scans", "all", "processes", "on", "the", "system", "for", "a", "given", "search", "pattern", "." ]
python
valid
58.35
ensime/ensime-vim
ensime_shared/client.py
https://github.com/ensime/ensime-vim/blob/caa734e84f002b25446c615706283a74edd4ecfe/ensime_shared/client.py#L336-L342
def type_check_cmd(self, args, range=None): """Sets the flag to begin buffering typecheck notes & clears any stale notes before requesting a typecheck from the server""" self.log.debug('type_check_cmd: in') self.start_typechecking() self.type_check("") self.editor.message('typechecking')
[ "def", "type_check_cmd", "(", "self", ",", "args", ",", "range", "=", "None", ")", ":", "self", ".", "log", ".", "debug", "(", "'type_check_cmd: in'", ")", "self", ".", "start_typechecking", "(", ")", "self", ".", "type_check", "(", "\"\"", ")", "self", ".", "editor", ".", "message", "(", "'typechecking'", ")" ]
Sets the flag to begin buffering typecheck notes & clears any stale notes before requesting a typecheck from the server
[ "Sets", "the", "flag", "to", "begin", "buffering", "typecheck", "notes", "&", "clears", "any", "stale", "notes", "before", "requesting", "a", "typecheck", "from", "the", "server" ]
python
train
47.142857
DAI-Lab/Copulas
copulas/multivariate/tree.py
https://github.com/DAI-Lab/Copulas/blob/821df61c3d36a6b81ef2883935f935c2eaaa862c/copulas/multivariate/tree.py#L111-L118
def _get_constraints(self): """Get neighboring edges for each edge in the edges.""" num_edges = len(self.edges) for k in range(num_edges): for i in range(num_edges): # add to constraints if i shared an edge with k if k != i and self.edges[k].is_adjacent(self.edges[i]): self.edges[k].neighbors.append(i)
[ "def", "_get_constraints", "(", "self", ")", ":", "num_edges", "=", "len", "(", "self", ".", "edges", ")", "for", "k", "in", "range", "(", "num_edges", ")", ":", "for", "i", "in", "range", "(", "num_edges", ")", ":", "# add to constraints if i shared an edge with k", "if", "k", "!=", "i", "and", "self", ".", "edges", "[", "k", "]", ".", "is_adjacent", "(", "self", ".", "edges", "[", "i", "]", ")", ":", "self", ".", "edges", "[", "k", "]", ".", "neighbors", ".", "append", "(", "i", ")" ]
Get neighboring edges for each edge in the edges.
[ "Get", "neighboring", "edges", "for", "each", "edge", "in", "the", "edges", "." ]
python
train
48
gmr/tinman
tinman/handlers/rabbitmq.py
https://github.com/gmr/tinman/blob/98f0acd15a228d752caa1864cdf02aaa3d492a9f/tinman/handlers/rabbitmq.py#L117-L126
def _publish_deferred_messages(self): """Called when pika is connected and has a channel open to publish any requests buffered. """ global message_stack if not self._rabbitmq_is_closed and message_stack: LOGGER.info('Publishing %i deferred message(s)', len(message_stack)) while message_stack: self._publish_message(*message_stack.pop())
[ "def", "_publish_deferred_messages", "(", "self", ")", ":", "global", "message_stack", "if", "not", "self", ".", "_rabbitmq_is_closed", "and", "message_stack", ":", "LOGGER", ".", "info", "(", "'Publishing %i deferred message(s)'", ",", "len", "(", "message_stack", ")", ")", "while", "message_stack", ":", "self", ".", "_publish_message", "(", "*", "message_stack", ".", "pop", "(", ")", ")" ]
Called when pika is connected and has a channel open to publish any requests buffered.
[ "Called", "when", "pika", "is", "connected", "and", "has", "a", "channel", "open", "to", "publish", "any", "requests", "buffered", "." ]
python
train
40.9
HdrHistogram/HdrHistogram_py
hdrh/codec.py
https://github.com/HdrHistogram/HdrHistogram_py/blob/cb99981b0564a62e1aa02bd764efa6445923f8f7/hdrh/codec.py#L290-L310
def encode(self): '''Compress the associated encodable payload, prepend the header then encode with base64 if requested Returns: the b64 encoded wire encoding of the histogram (as a string) or the compressed payload (as a string, if b64 wrappinb is disabled) ''' # only compress the first non zero buckets # if histogram is empty we do not encode any counter if self.histogram.total_count: relevant_length = \ self.histogram.get_counts_array_index(self.histogram.max_value) + 1 else: relevant_length = 0 cpayload = self.payload.compress(relevant_length) if self.b64_wrap: self.header.length = len(cpayload) header_str = ctypes.string_at(addressof(self.header), ext_header_size) return base64.b64encode(header_str + cpayload) return cpayload
[ "def", "encode", "(", "self", ")", ":", "# only compress the first non zero buckets", "# if histogram is empty we do not encode any counter", "if", "self", ".", "histogram", ".", "total_count", ":", "relevant_length", "=", "self", ".", "histogram", ".", "get_counts_array_index", "(", "self", ".", "histogram", ".", "max_value", ")", "+", "1", "else", ":", "relevant_length", "=", "0", "cpayload", "=", "self", ".", "payload", ".", "compress", "(", "relevant_length", ")", "if", "self", ".", "b64_wrap", ":", "self", ".", "header", ".", "length", "=", "len", "(", "cpayload", ")", "header_str", "=", "ctypes", ".", "string_at", "(", "addressof", "(", "self", ".", "header", ")", ",", "ext_header_size", ")", "return", "base64", ".", "b64encode", "(", "header_str", "+", "cpayload", ")", "return", "cpayload" ]
Compress the associated encodable payload, prepend the header then encode with base64 if requested Returns: the b64 encoded wire encoding of the histogram (as a string) or the compressed payload (as a string, if b64 wrappinb is disabled)
[ "Compress", "the", "associated", "encodable", "payload", "prepend", "the", "header", "then", "encode", "with", "base64", "if", "requested" ]
python
train
43.285714
Bogdanp/dramatiq
dramatiq/brokers/rabbitmq.py
https://github.com/Bogdanp/dramatiq/blob/a8cc2728478e794952a5a50c3fb19ec455fe91b6/dramatiq/brokers/rabbitmq.py#L183-L224
def declare_queue(self, queue_name): """Declare a queue. Has no effect if a queue with the given name already exists. Parameters: queue_name(str): The name of the new queue. Raises: ConnectionClosed: If the underlying channel or connection has been closed. """ attempts = 1 while True: try: if queue_name not in self.queues: self.emit_before("declare_queue", queue_name) self._declare_queue(queue_name) self.queues.add(queue_name) self.emit_after("declare_queue", queue_name) delayed_name = dq_name(queue_name) self._declare_dq_queue(queue_name) self.delay_queues.add(delayed_name) self.emit_after("declare_delay_queue", delayed_name) self._declare_xq_queue(queue_name) break except (pika.exceptions.AMQPConnectionError, pika.exceptions.AMQPChannelError) as e: # pragma: no cover # Delete the channel and the connection so that the next # caller may initiate new ones of each. del self.channel del self.connection attempts += 1 if attempts > MAX_DECLARE_ATTEMPTS: raise ConnectionClosed(e) from None self.logger.debug( "Retrying declare due to closed connection. [%d/%d]", attempts, MAX_DECLARE_ATTEMPTS, )
[ "def", "declare_queue", "(", "self", ",", "queue_name", ")", ":", "attempts", "=", "1", "while", "True", ":", "try", ":", "if", "queue_name", "not", "in", "self", ".", "queues", ":", "self", ".", "emit_before", "(", "\"declare_queue\"", ",", "queue_name", ")", "self", ".", "_declare_queue", "(", "queue_name", ")", "self", ".", "queues", ".", "add", "(", "queue_name", ")", "self", ".", "emit_after", "(", "\"declare_queue\"", ",", "queue_name", ")", "delayed_name", "=", "dq_name", "(", "queue_name", ")", "self", ".", "_declare_dq_queue", "(", "queue_name", ")", "self", ".", "delay_queues", ".", "add", "(", "delayed_name", ")", "self", ".", "emit_after", "(", "\"declare_delay_queue\"", ",", "delayed_name", ")", "self", ".", "_declare_xq_queue", "(", "queue_name", ")", "break", "except", "(", "pika", ".", "exceptions", ".", "AMQPConnectionError", ",", "pika", ".", "exceptions", ".", "AMQPChannelError", ")", "as", "e", ":", "# pragma: no cover", "# Delete the channel and the connection so that the next", "# caller may initiate new ones of each.", "del", "self", ".", "channel", "del", "self", ".", "connection", "attempts", "+=", "1", "if", "attempts", ">", "MAX_DECLARE_ATTEMPTS", ":", "raise", "ConnectionClosed", "(", "e", ")", "from", "None", "self", ".", "logger", ".", "debug", "(", "\"Retrying declare due to closed connection. [%d/%d]\"", ",", "attempts", ",", "MAX_DECLARE_ATTEMPTS", ",", ")" ]
Declare a queue. Has no effect if a queue with the given name already exists. Parameters: queue_name(str): The name of the new queue. Raises: ConnectionClosed: If the underlying channel or connection has been closed.
[ "Declare", "a", "queue", ".", "Has", "no", "effect", "if", "a", "queue", "with", "the", "given", "name", "already", "exists", "." ]
python
train
38.214286
MacHu-GWU/constant2-project
constant2/_constant2.py
https://github.com/MacHu-GWU/constant2-project/blob/ccf7e14b0e23f9f4bfd13a3e2ce4a1142e570d4f/constant2/_constant2.py#L304-L322
def GetAll(cls, attr, value, e=0.000001, sort_by="__name__"): """Get all nested Constant class that met ``klass.attr == value``. :param attr: attribute name. :param value: value. :param e: used for float value comparison. :param sort_by: nested class is ordered by <sort_by> attribute. .. versionadded:: 0.0.5 """ matched = list() for _, klass in cls.Subclasses(sort_by=sort_by): try: if klass.__dict__[attr] == approx(value, e): matched.append(klass) except: # pragma: no cover pass return matched
[ "def", "GetAll", "(", "cls", ",", "attr", ",", "value", ",", "e", "=", "0.000001", ",", "sort_by", "=", "\"__name__\"", ")", ":", "matched", "=", "list", "(", ")", "for", "_", ",", "klass", "in", "cls", ".", "Subclasses", "(", "sort_by", "=", "sort_by", ")", ":", "try", ":", "if", "klass", ".", "__dict__", "[", "attr", "]", "==", "approx", "(", "value", ",", "e", ")", ":", "matched", ".", "append", "(", "klass", ")", "except", ":", "# pragma: no cover", "pass", "return", "matched" ]
Get all nested Constant class that met ``klass.attr == value``. :param attr: attribute name. :param value: value. :param e: used for float value comparison. :param sort_by: nested class is ordered by <sort_by> attribute. .. versionadded:: 0.0.5
[ "Get", "all", "nested", "Constant", "class", "that", "met", "klass", ".", "attr", "==", "value", "." ]
python
train
33.684211
Xion/taipan
taipan/functional/combinators.py
https://github.com/Xion/taipan/blob/f333f0287c8bd0915182c7d5308e5f05ef0cca78/taipan/functional/combinators.py#L76-L145
def merge(arg, *rest, **kwargs): """Merge a collection, with functions as items, into a single function that takes a collection and maps its items through corresponding functions. :param arg: A collection of functions, such as list, tuple, or dictionary :param default: Optional default function to use for items within merged function's arguments that do not have corresponding functions in ``arg`` Example with two-element tuple:: >> dict_ = {'Alice': -5, 'Bob': 4} >> func = merge((str.upper, abs)) >> dict(map(func, dict_.items())) {'ALICE': 5, 'BOB': 4} Example with a dictionary:: >> func = merge({'id': int, 'name': str.split}) >> data = [ {'id': '1', 'name': "John Doe"}, {'id': '2', 'name': "Anne Arbor"}, ] >> list(map(func, data)) [{'id': 1, 'name': ['John', 'Doe']}, {'id': 2, 'name': ['Anne', 'Arbor']}] :return: Merged function .. versionadded:: 0.0.2 """ ensure_keyword_args(kwargs, optional=('default',)) has_default = 'default' in kwargs if has_default: default = ensure_callable(kwargs['default']) # if more than one argument was given, they must all be functions; # result will be a function that takes multiple arguments (rather than # a single collection) and returns a tuple unary_result = True if rest: fs = (ensure_callable(arg),) + tuple(imap(ensure_callable, rest)) unary_result = False else: fs = arg if is_mapping(fs): if has_default: return lambda arg_: fs.__class__((k, fs.get(k, default)(arg_[k])) for k in arg_) else: return lambda arg_: fs.__class__((k, fs[k](arg_[k])) for k in arg_) else: ensure_sequence(fs) if has_default: # we cannot use ``izip_longest(fs, arg_, fillvalue=default)``, # because we want to terminate the generator # only when ``arg_`` is exhausted (not when just ``fs`` is) func = lambda arg_: fs.__class__( (fs[i] if i < len(fs) else default)(x) for i, x in enumerate(arg_)) else: # we cannot use ``izip(fs, arg_)`` because it would short-circuit # if ``arg_`` is longer than ``fs``, rather than raising # the required ``IndexError`` func = lambda arg_: fs.__class__(fs[i](x) for i, x in enumerate(arg_)) return func if unary_result else lambda *args: func(args)
[ "def", "merge", "(", "arg", ",", "*", "rest", ",", "*", "*", "kwargs", ")", ":", "ensure_keyword_args", "(", "kwargs", ",", "optional", "=", "(", "'default'", ",", ")", ")", "has_default", "=", "'default'", "in", "kwargs", "if", "has_default", ":", "default", "=", "ensure_callable", "(", "kwargs", "[", "'default'", "]", ")", "# if more than one argument was given, they must all be functions;", "# result will be a function that takes multiple arguments (rather than", "# a single collection) and returns a tuple", "unary_result", "=", "True", "if", "rest", ":", "fs", "=", "(", "ensure_callable", "(", "arg", ")", ",", ")", "+", "tuple", "(", "imap", "(", "ensure_callable", ",", "rest", ")", ")", "unary_result", "=", "False", "else", ":", "fs", "=", "arg", "if", "is_mapping", "(", "fs", ")", ":", "if", "has_default", ":", "return", "lambda", "arg_", ":", "fs", ".", "__class__", "(", "(", "k", ",", "fs", ".", "get", "(", "k", ",", "default", ")", "(", "arg_", "[", "k", "]", ")", ")", "for", "k", "in", "arg_", ")", "else", ":", "return", "lambda", "arg_", ":", "fs", ".", "__class__", "(", "(", "k", ",", "fs", "[", "k", "]", "(", "arg_", "[", "k", "]", ")", ")", "for", "k", "in", "arg_", ")", "else", ":", "ensure_sequence", "(", "fs", ")", "if", "has_default", ":", "# we cannot use ``izip_longest(fs, arg_, fillvalue=default)``,", "# because we want to terminate the generator", "# only when ``arg_`` is exhausted (not when just ``fs`` is)", "func", "=", "lambda", "arg_", ":", "fs", ".", "__class__", "(", "(", "fs", "[", "i", "]", "if", "i", "<", "len", "(", "fs", ")", "else", "default", ")", "(", "x", ")", "for", "i", ",", "x", "in", "enumerate", "(", "arg_", ")", ")", "else", ":", "# we cannot use ``izip(fs, arg_)`` because it would short-circuit", "# if ``arg_`` is longer than ``fs``, rather than raising", "# the required ``IndexError``", "func", "=", "lambda", "arg_", ":", "fs", ".", "__class__", "(", "fs", "[", "i", "]", "(", "x", ")", "for", "i", ",", "x", "in", "enumerate", "(", "arg_", ")", ")", "return", "func", "if", "unary_result", "else", "lambda", "*", "args", ":", "func", "(", "args", ")" ]
Merge a collection, with functions as items, into a single function that takes a collection and maps its items through corresponding functions. :param arg: A collection of functions, such as list, tuple, or dictionary :param default: Optional default function to use for items within merged function's arguments that do not have corresponding functions in ``arg`` Example with two-element tuple:: >> dict_ = {'Alice': -5, 'Bob': 4} >> func = merge((str.upper, abs)) >> dict(map(func, dict_.items())) {'ALICE': 5, 'BOB': 4} Example with a dictionary:: >> func = merge({'id': int, 'name': str.split}) >> data = [ {'id': '1', 'name': "John Doe"}, {'id': '2', 'name': "Anne Arbor"}, ] >> list(map(func, data)) [{'id': 1, 'name': ['John', 'Doe']}, {'id': 2, 'name': ['Anne', 'Arbor']}] :return: Merged function .. versionadded:: 0.0.2
[ "Merge", "a", "collection", "with", "functions", "as", "items", "into", "a", "single", "function", "that", "takes", "a", "collection", "and", "maps", "its", "items", "through", "corresponding", "functions", "." ]
python
train
37.857143