repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
ArabellaTech/django-basic-cms
basic_cms/utils.py
https://github.com/ArabellaTech/django-basic-cms/blob/863f3c6098606f663994930cd8e7723ad0c07caf/basic_cms/utils.py#L163-L181
def get_placeholders(template_name): """Return a list of PlaceholderNode found in the given template. :param template_name: the name of the template file """ try: temp = loader.get_template(template_name) except TemplateDoesNotExist: return [] plist, blist = [], [] try: # django 1.8 _placeholders_recursif(temp.template.nodelist, plist, blist) except AttributeError: # django 1.7 # raise _placeholders_recursif(temp.nodelist, plist, blist) return plist
[ "def", "get_placeholders", "(", "template_name", ")", ":", "try", ":", "temp", "=", "loader", ".", "get_template", "(", "template_name", ")", "except", "TemplateDoesNotExist", ":", "return", "[", "]", "plist", ",", "blist", "=", "[", "]", ",", "[", "]", "try", ":", "# django 1.8", "_placeholders_recursif", "(", "temp", ".", "template", ".", "nodelist", ",", "plist", ",", "blist", ")", "except", "AttributeError", ":", "# django 1.7", "# raise", "_placeholders_recursif", "(", "temp", ".", "nodelist", ",", "plist", ",", "blist", ")", "return", "plist" ]
Return a list of PlaceholderNode found in the given template. :param template_name: the name of the template file
[ "Return", "a", "list", "of", "PlaceholderNode", "found", "in", "the", "given", "template", "." ]
python
train
log2timeline/dfdatetime
dfdatetime/time_elements.py
https://github.com/log2timeline/dfdatetime/blob/141ca4ef1eff3d354b5deaac3d81cb08506f98d6/dfdatetime/time_elements.py#L298-L312
def CopyFromDateTimeString(self, time_string): """Copies time elements from a date and time string. Args: time_string (str): date and time value formatted as: YYYY-MM-DD hh:mm:ss.######[+-]##:## Where # are numeric digits ranging from 0 to 9 and the seconds fraction can be either 3 or 6 digits. The time of day, seconds fraction and time zone offset are optional. The default time zone is UTC. """ date_time_values = self._CopyDateTimeFromString(time_string) self._CopyFromDateTimeValues(date_time_values)
[ "def", "CopyFromDateTimeString", "(", "self", ",", "time_string", ")", ":", "date_time_values", "=", "self", ".", "_CopyDateTimeFromString", "(", "time_string", ")", "self", ".", "_CopyFromDateTimeValues", "(", "date_time_values", ")" ]
Copies time elements from a date and time string. Args: time_string (str): date and time value formatted as: YYYY-MM-DD hh:mm:ss.######[+-]##:## Where # are numeric digits ranging from 0 to 9 and the seconds fraction can be either 3 or 6 digits. The time of day, seconds fraction and time zone offset are optional. The default time zone is UTC.
[ "Copies", "time", "elements", "from", "a", "date", "and", "time", "string", "." ]
python
train
phoebe-project/phoebe2
phoebe/parameters/constraint.py
https://github.com/phoebe-project/phoebe2/blob/e64b8be683977064e2d55dd1b3ac400f64c3e379/phoebe/parameters/constraint.py#L236-L287
def asini(b, orbit, solve_for=None): """ Create a constraint for asini in an orbit. If any of the required parameters ('asini', 'sma', 'incl') do not exist in the orbit, they will be created. :parameter b: the :class:`phoebe.frontend.bundle.Bundle` :parameter str orbit: the label of the orbit in which this constraint should be built :parameter str solve_for: if 'asini' should not be the derived/constrained parameter, provide which other parameter should be derived (ie 'sma' or 'incl') :returns: lhs (Parameter), rhs (ConstraintParameter), args (list of arguments that were passed to this function) """ orbit_ps = _get_system_ps(b, orbit) # We want to get the parameters in THIS orbit, but calling through # the bundle in case we need to create it. # To do that, we need to know the search parameters to get items from this PS. metawargs = orbit_ps.meta metawargs.pop('qualifier') # Now we'll define the parameters in case they don't exist and need to be created sma_def = FloatParameter(qualifier='sma', value=8.0, default_unit=u.solRad, description='Semi major axis') incl_def = FloatParameter(qualifier='incl', value=90.0, default_unit=u.deg, description='Orbital inclination angle') asini_def = FloatParameter(qualifier='asini', value=8.0, default_unit=u.solRad, description='Projected semi major axis') # And now call get_or_create on the bundle sma, created = b.get_or_create('sma', sma_def, **metawargs) incl, created = b.get_or_create('incl', incl_def, **metawargs) asini, created = b.get_or_create('asini', asini_def, **metawargs) if solve_for in [None, asini]: lhs = asini rhs = sma * sin(incl) elif solve_for == sma: lhs = sma rhs = asini / sin(incl) elif solve_for == incl: lhs = incl rhs = arcsin(asini/sma) else: raise NotImplementedError #- return lhs, rhs, args_as_pss return lhs, rhs, {'orbit': orbit}
[ "def", "asini", "(", "b", ",", "orbit", ",", "solve_for", "=", "None", ")", ":", "orbit_ps", "=", "_get_system_ps", "(", "b", ",", "orbit", ")", "# We want to get the parameters in THIS orbit, but calling through", "# the bundle in case we need to create it.", "# To do that, we need to know the search parameters to get items from this PS.", "metawargs", "=", "orbit_ps", ".", "meta", "metawargs", ".", "pop", "(", "'qualifier'", ")", "# Now we'll define the parameters in case they don't exist and need to be created", "sma_def", "=", "FloatParameter", "(", "qualifier", "=", "'sma'", ",", "value", "=", "8.0", ",", "default_unit", "=", "u", ".", "solRad", ",", "description", "=", "'Semi major axis'", ")", "incl_def", "=", "FloatParameter", "(", "qualifier", "=", "'incl'", ",", "value", "=", "90.0", ",", "default_unit", "=", "u", ".", "deg", ",", "description", "=", "'Orbital inclination angle'", ")", "asini_def", "=", "FloatParameter", "(", "qualifier", "=", "'asini'", ",", "value", "=", "8.0", ",", "default_unit", "=", "u", ".", "solRad", ",", "description", "=", "'Projected semi major axis'", ")", "# And now call get_or_create on the bundle", "sma", ",", "created", "=", "b", ".", "get_or_create", "(", "'sma'", ",", "sma_def", ",", "*", "*", "metawargs", ")", "incl", ",", "created", "=", "b", ".", "get_or_create", "(", "'incl'", ",", "incl_def", ",", "*", "*", "metawargs", ")", "asini", ",", "created", "=", "b", ".", "get_or_create", "(", "'asini'", ",", "asini_def", ",", "*", "*", "metawargs", ")", "if", "solve_for", "in", "[", "None", ",", "asini", "]", ":", "lhs", "=", "asini", "rhs", "=", "sma", "*", "sin", "(", "incl", ")", "elif", "solve_for", "==", "sma", ":", "lhs", "=", "sma", "rhs", "=", "asini", "/", "sin", "(", "incl", ")", "elif", "solve_for", "==", "incl", ":", "lhs", "=", "incl", "rhs", "=", "arcsin", "(", "asini", "/", "sma", ")", "else", ":", "raise", "NotImplementedError", "#- return lhs, rhs, args_as_pss", "return", "lhs", ",", "rhs", ",", "{", "'orbit'", ":", "orbit", "}" ]
Create a constraint for asini in an orbit. If any of the required parameters ('asini', 'sma', 'incl') do not exist in the orbit, they will be created. :parameter b: the :class:`phoebe.frontend.bundle.Bundle` :parameter str orbit: the label of the orbit in which this constraint should be built :parameter str solve_for: if 'asini' should not be the derived/constrained parameter, provide which other parameter should be derived (ie 'sma' or 'incl') :returns: lhs (Parameter), rhs (ConstraintParameter), args (list of arguments that were passed to this function)
[ "Create", "a", "constraint", "for", "asini", "in", "an", "orbit", "." ]
python
train
SuperCowPowers/bat
bat/bro_log_reader.py
https://github.com/SuperCowPowers/bat/blob/069e6bc52843dc07760969c531cc442ca7da8e0c/bat/bro_log_reader.py#L99-L113
def _readrows(self): """Internal method _readrows, see readrows() for description""" # Read in the Bro Headers offset, self.field_names, self.field_types, self.type_converters = self._parse_bro_header(self._filepath) # Use parent class to yield each row as a dictionary for line in self.readlines(offset=offset): # Check for #close if line.startswith('#close'): return # Yield the line as a dict yield self.make_dict(line.strip().split(self._delimiter))
[ "def", "_readrows", "(", "self", ")", ":", "# Read in the Bro Headers", "offset", ",", "self", ".", "field_names", ",", "self", ".", "field_types", ",", "self", ".", "type_converters", "=", "self", ".", "_parse_bro_header", "(", "self", ".", "_filepath", ")", "# Use parent class to yield each row as a dictionary", "for", "line", "in", "self", ".", "readlines", "(", "offset", "=", "offset", ")", ":", "# Check for #close", "if", "line", ".", "startswith", "(", "'#close'", ")", ":", "return", "# Yield the line as a dict", "yield", "self", ".", "make_dict", "(", "line", ".", "strip", "(", ")", ".", "split", "(", "self", ".", "_delimiter", ")", ")" ]
Internal method _readrows, see readrows() for description
[ "Internal", "method", "_readrows", "see", "readrows", "()", "for", "description" ]
python
train
SwissDataScienceCenter/renku-python
renku/cli/workflow.py
https://github.com/SwissDataScienceCenter/renku-python/blob/691644d695b055a01e0ca22b2620e55bbd928c0d/renku/cli/workflow.py#L59-L77
def workflow(ctx, client): """List or manage workflows with subcommands.""" if ctx.invoked_subcommand is None: from renku.models.refs import LinkReference names = defaultdict(list) for ref in LinkReference.iter_items(client, common_path='workflows'): names[ref.reference.name].append(ref.name) for path in client.workflow_path.glob('*.cwl'): click.echo( '{path}: {names}'.format( path=path.name, names=', '.join( click.style(_deref(name), fg='green') for name in names[path.name] ), ) )
[ "def", "workflow", "(", "ctx", ",", "client", ")", ":", "if", "ctx", ".", "invoked_subcommand", "is", "None", ":", "from", "renku", ".", "models", ".", "refs", "import", "LinkReference", "names", "=", "defaultdict", "(", "list", ")", "for", "ref", "in", "LinkReference", ".", "iter_items", "(", "client", ",", "common_path", "=", "'workflows'", ")", ":", "names", "[", "ref", ".", "reference", ".", "name", "]", ".", "append", "(", "ref", ".", "name", ")", "for", "path", "in", "client", ".", "workflow_path", ".", "glob", "(", "'*.cwl'", ")", ":", "click", ".", "echo", "(", "'{path}: {names}'", ".", "format", "(", "path", "=", "path", ".", "name", ",", "names", "=", "', '", ".", "join", "(", "click", ".", "style", "(", "_deref", "(", "name", ")", ",", "fg", "=", "'green'", ")", "for", "name", "in", "names", "[", "path", ".", "name", "]", ")", ",", ")", ")" ]
List or manage workflows with subcommands.
[ "List", "or", "manage", "workflows", "with", "subcommands", "." ]
python
train
fedora-python/pyp2rpm
pyp2rpm/command/extract_dist.py
https://github.com/fedora-python/pyp2rpm/blob/853eb3d226689a5ccdcdb9358b1a3394fafbd2b5/pyp2rpm/command/extract_dist.py#L75-L86
def to_list(var): """Checks if given value is a list, tries to convert, if it is not.""" if var is None: return [] if isinstance(var, str): var = var.split('\n') elif not isinstance(var, list): try: var = list(var) except TypeError: raise ValueError("{} cannot be converted to the list.".format(var)) return var
[ "def", "to_list", "(", "var", ")", ":", "if", "var", "is", "None", ":", "return", "[", "]", "if", "isinstance", "(", "var", ",", "str", ")", ":", "var", "=", "var", ".", "split", "(", "'\\n'", ")", "elif", "not", "isinstance", "(", "var", ",", "list", ")", ":", "try", ":", "var", "=", "list", "(", "var", ")", "except", "TypeError", ":", "raise", "ValueError", "(", "\"{} cannot be converted to the list.\"", ".", "format", "(", "var", ")", ")", "return", "var" ]
Checks if given value is a list, tries to convert, if it is not.
[ "Checks", "if", "given", "value", "is", "a", "list", "tries", "to", "convert", "if", "it", "is", "not", "." ]
python
train
getsentry/libsourcemap
libsourcemap/highlevel.py
https://github.com/getsentry/libsourcemap/blob/94b5a34814fafee9dc23da8ec0ccca77f30e3370/libsourcemap/highlevel.py#L303-L308
def from_bytes(buffer): """Creates a sourcemap view from a JSON string.""" buffer = to_bytes(buffer) return ProguardView._from_ptr(rustcall( _lib.lsm_proguard_mapping_from_bytes, buffer, len(buffer)))
[ "def", "from_bytes", "(", "buffer", ")", ":", "buffer", "=", "to_bytes", "(", "buffer", ")", "return", "ProguardView", ".", "_from_ptr", "(", "rustcall", "(", "_lib", ".", "lsm_proguard_mapping_from_bytes", ",", "buffer", ",", "len", "(", "buffer", ")", ")", ")" ]
Creates a sourcemap view from a JSON string.
[ "Creates", "a", "sourcemap", "view", "from", "a", "JSON", "string", "." ]
python
train
blueset/ehForwarderBot
ehforwarderbot/wizard.py
https://github.com/blueset/ehForwarderBot/blob/62e8fcfe77b2993aba91623f538f404a90f59f1d/ehforwarderbot/wizard.py#L700-L738
def prerequisite_check(): """ Check prerequisites of the framework, including Python version, installation of modules, etc. Returns: Optional[str]: If the check is not passed, return error message regarding failed test case. None is returned otherwise. """ # Check Python version if sys.version_info < (3, 6): version_str = "%s.%s.%s" % sys.version_info[:3] # TRANSLATORS: This word is used as a part of search query suggested to users, # it may appears in context like "Ubuntu 16.04 install Python 3.7" search_url = build_search_query(_("install") + " Python 3.7") return _("EH Forwarder Bot requires a minimum of Python 3.6 to run. You " "are currently using Python {version}. \n" "\n" "You may want to try:\n" "{url}").format(version=version_str, url=search_url) # Check installations of modules modules_err = _("You may want to visit the modules repository to find a list of " "available modules to install.\n" "https://github.com/blueset/ehForwarderBot/wiki/Channels-Repository") # 1. At least 1 master channel must be installed try: next(pkg_resources.iter_entry_points("ehforwarderbot.master")) except StopIteration: return _("No master channel detected. EH Forwarder Bot requires at least one " "master channel installed to run.") + "\n\n" + modules_err # 2. At least 1 slave channel must be installed try: next(pkg_resources.iter_entry_points("ehforwarderbot.slave")) except StopIteration: return _("No slave channel detected. EH Forwarder Bot requires at least one " "slave channel installed to run.") + "\n\n" + modules_err
[ "def", "prerequisite_check", "(", ")", ":", "# Check Python version", "if", "sys", ".", "version_info", "<", "(", "3", ",", "6", ")", ":", "version_str", "=", "\"%s.%s.%s\"", "%", "sys", ".", "version_info", "[", ":", "3", "]", "# TRANSLATORS: This word is used as a part of search query suggested to users,", "# it may appears in context like \"Ubuntu 16.04 install Python 3.7\"", "search_url", "=", "build_search_query", "(", "_", "(", "\"install\"", ")", "+", "\" Python 3.7\"", ")", "return", "_", "(", "\"EH Forwarder Bot requires a minimum of Python 3.6 to run. You \"", "\"are currently using Python {version}. \\n\"", "\"\\n\"", "\"You may want to try:\\n\"", "\"{url}\"", ")", ".", "format", "(", "version", "=", "version_str", ",", "url", "=", "search_url", ")", "# Check installations of modules", "modules_err", "=", "_", "(", "\"You may want to visit the modules repository to find a list of \"", "\"available modules to install.\\n\"", "\"https://github.com/blueset/ehForwarderBot/wiki/Channels-Repository\"", ")", "# 1. At least 1 master channel must be installed", "try", ":", "next", "(", "pkg_resources", ".", "iter_entry_points", "(", "\"ehforwarderbot.master\"", ")", ")", "except", "StopIteration", ":", "return", "_", "(", "\"No master channel detected. EH Forwarder Bot requires at least one \"", "\"master channel installed to run.\"", ")", "+", "\"\\n\\n\"", "+", "modules_err", "# 2. At least 1 slave channel must be installed", "try", ":", "next", "(", "pkg_resources", ".", "iter_entry_points", "(", "\"ehforwarderbot.slave\"", ")", ")", "except", "StopIteration", ":", "return", "_", "(", "\"No slave channel detected. EH Forwarder Bot requires at least one \"", "\"slave channel installed to run.\"", ")", "+", "\"\\n\\n\"", "+", "modules_err" ]
Check prerequisites of the framework, including Python version, installation of modules, etc. Returns: Optional[str]: If the check is not passed, return error message regarding failed test case. None is returned otherwise.
[ "Check", "prerequisites", "of", "the", "framework", "including", "Python", "version", "installation", "of", "modules", "etc", "." ]
python
train
CalebBell/thermo
thermo/thermal_conductivity.py
https://github.com/CalebBell/thermo/blob/3857ed023a3e64fd3039a32d53576c24990ef1c3/thermo/thermal_conductivity.py#L1066-L1110
def DIPPR9H(ws, ks): r'''Calculates thermal conductivity of a liquid mixture according to mixing rules in [1]_ and also in [2]_. .. math:: \lambda_m = \left( \sum_i w_i \lambda_i^{-2}\right)^{-1/2} Parameters ---------- ws : float Mass fractions of components ks : float Liquid thermal conductivites of all components, [W/m/K] Returns ------- kl : float Thermal conductivity of liquid mixture, [W/m/K] Notes ----- This equation is entirely dimensionless; all dimensions cancel. The example is from [2]_; all results agree. The original source has not been reviewed. DIPPR Procedure 9H: Method for the Thermal Conductivity of Nonaqueous Liquid Mixtures Average deviations of 3%. for 118 nonaqueous systems with 817 data points. Max deviation 20%. According to DIPPR. Examples -------- >>> DIPPR9H([0.258, 0.742], [0.1692, 0.1528]) 0.15657104706719646 References ---------- .. [1] Reid, Robert C.; Prausnitz, John M.; Poling, Bruce E. The Properties of Gases and Liquids. McGraw-Hill Companies, 1987. .. [2] Danner, Ronald P, and Design Institute for Physical Property Data. Manual for Predicting Chemical Process Design Data. New York, N.Y, 1982. ''' if not none_and_length_check([ks, ws]): # check same-length inputs raise Exception('Function inputs are incorrect format') return sum(ws[i]/ks[i]**2 for i in range(len(ws)))**(-0.5)
[ "def", "DIPPR9H", "(", "ws", ",", "ks", ")", ":", "if", "not", "none_and_length_check", "(", "[", "ks", ",", "ws", "]", ")", ":", "# check same-length inputs", "raise", "Exception", "(", "'Function inputs are incorrect format'", ")", "return", "sum", "(", "ws", "[", "i", "]", "/", "ks", "[", "i", "]", "**", "2", "for", "i", "in", "range", "(", "len", "(", "ws", ")", ")", ")", "**", "(", "-", "0.5", ")" ]
r'''Calculates thermal conductivity of a liquid mixture according to mixing rules in [1]_ and also in [2]_. .. math:: \lambda_m = \left( \sum_i w_i \lambda_i^{-2}\right)^{-1/2} Parameters ---------- ws : float Mass fractions of components ks : float Liquid thermal conductivites of all components, [W/m/K] Returns ------- kl : float Thermal conductivity of liquid mixture, [W/m/K] Notes ----- This equation is entirely dimensionless; all dimensions cancel. The example is from [2]_; all results agree. The original source has not been reviewed. DIPPR Procedure 9H: Method for the Thermal Conductivity of Nonaqueous Liquid Mixtures Average deviations of 3%. for 118 nonaqueous systems with 817 data points. Max deviation 20%. According to DIPPR. Examples -------- >>> DIPPR9H([0.258, 0.742], [0.1692, 0.1528]) 0.15657104706719646 References ---------- .. [1] Reid, Robert C.; Prausnitz, John M.; Poling, Bruce E. The Properties of Gases and Liquids. McGraw-Hill Companies, 1987. .. [2] Danner, Ronald P, and Design Institute for Physical Property Data. Manual for Predicting Chemical Process Design Data. New York, N.Y, 1982.
[ "r", "Calculates", "thermal", "conductivity", "of", "a", "liquid", "mixture", "according", "to", "mixing", "rules", "in", "[", "1", "]", "_", "and", "also", "in", "[", "2", "]", "_", "." ]
python
valid
nikhilkumarsingh/content-downloader
ctdl/gui.py
https://github.com/nikhilkumarsingh/content-downloader/blob/8b14af3a6eadcc43581e0425dc1d218208de12df/ctdl/gui.py#L258-L273
def click_download(self, event): """ event for download button """ args ['parallel'] = self.p.get() args ['file_type'] = self.optionmenu.get() args ['no_redirects'] = self.t.get() args ['query'] = self.entry_query.get() args ['min_file_size'] = int( self.entry_min.get()) args ['max_file_size'] = int( self.entry_max.get()) args ['limit'] = int( self.entry_limit.get()) args ['website']= self.entry_website.get() args ['option']= self.engine.get() print(args) self.check_threat() download_content_gui( **args )
[ "def", "click_download", "(", "self", ",", "event", ")", ":", "args", "[", "'parallel'", "]", "=", "self", ".", "p", ".", "get", "(", ")", "args", "[", "'file_type'", "]", "=", "self", ".", "optionmenu", ".", "get", "(", ")", "args", "[", "'no_redirects'", "]", "=", "self", ".", "t", ".", "get", "(", ")", "args", "[", "'query'", "]", "=", "self", ".", "entry_query", ".", "get", "(", ")", "args", "[", "'min_file_size'", "]", "=", "int", "(", "self", ".", "entry_min", ".", "get", "(", ")", ")", "args", "[", "'max_file_size'", "]", "=", "int", "(", "self", ".", "entry_max", ".", "get", "(", ")", ")", "args", "[", "'limit'", "]", "=", "int", "(", "self", ".", "entry_limit", ".", "get", "(", ")", ")", "args", "[", "'website'", "]", "=", "self", ".", "entry_website", ".", "get", "(", ")", "args", "[", "'option'", "]", "=", "self", ".", "engine", ".", "get", "(", ")", "print", "(", "args", ")", "self", ".", "check_threat", "(", ")", "download_content_gui", "(", "*", "*", "args", ")" ]
event for download button
[ "event", "for", "download", "button" ]
python
train
benoitguigal/python-epson-printer
epson_printer/epsonprinter.py
https://github.com/benoitguigal/python-epson-printer/blob/7d89b2f21bc76d2cc4d5ad548e19a356ca92fbc5/epson_printer/epsonprinter.py#L258-L264
def print_images(self, *printable_images): """ This method allows printing several images in one shot. This is useful if the client code does not want the printer to make pause during printing """ printable_image = reduce(lambda x, y: x.append(y), list(printable_images)) self.print_image(printable_image)
[ "def", "print_images", "(", "self", ",", "*", "printable_images", ")", ":", "printable_image", "=", "reduce", "(", "lambda", "x", ",", "y", ":", "x", ".", "append", "(", "y", ")", ",", "list", "(", "printable_images", ")", ")", "self", ".", "print_image", "(", "printable_image", ")" ]
This method allows printing several images in one shot. This is useful if the client code does not want the printer to make pause during printing
[ "This", "method", "allows", "printing", "several", "images", "in", "one", "shot", ".", "This", "is", "useful", "if", "the", "client", "code", "does", "not", "want", "the", "printer", "to", "make", "pause", "during", "printing" ]
python
train
perimosocordiae/viztricks
viztricks/extensions.py
https://github.com/perimosocordiae/viztricks/blob/bae2f8a9ce9278ce0197f8efc34cc4fef1dfe1eb/viztricks/extensions.py#L189-L213
def jitterplot(data, positions=None, ax=None, vert=True, scale=0.1, **scatter_kwargs): '''Plots jittered points as a distribution visualizer. Scatter plot arguments default to: marker='.', c='k', alpha=0.75 Also known as a stripplot. See also: boxplot, violinplot, beeswarm ''' if ax is None: ax = plt.gca() if positions is None: positions = range(len(data)) kwargs = dict(marker='.', c='k', alpha=0.75) kwargs.update(scatter_kwargs) for pos, y in zip(positions, data): if scale > 0: x = np.random.normal(loc=pos, scale=scale, size=len(y)) else: x = np.zeros_like(y) + pos if not vert: x, y = y, x ax.scatter(x, y, **kwargs) return plt.show
[ "def", "jitterplot", "(", "data", ",", "positions", "=", "None", ",", "ax", "=", "None", ",", "vert", "=", "True", ",", "scale", "=", "0.1", ",", "*", "*", "scatter_kwargs", ")", ":", "if", "ax", "is", "None", ":", "ax", "=", "plt", ".", "gca", "(", ")", "if", "positions", "is", "None", ":", "positions", "=", "range", "(", "len", "(", "data", ")", ")", "kwargs", "=", "dict", "(", "marker", "=", "'.'", ",", "c", "=", "'k'", ",", "alpha", "=", "0.75", ")", "kwargs", ".", "update", "(", "scatter_kwargs", ")", "for", "pos", ",", "y", "in", "zip", "(", "positions", ",", "data", ")", ":", "if", "scale", ">", "0", ":", "x", "=", "np", ".", "random", ".", "normal", "(", "loc", "=", "pos", ",", "scale", "=", "scale", ",", "size", "=", "len", "(", "y", ")", ")", "else", ":", "x", "=", "np", ".", "zeros_like", "(", "y", ")", "+", "pos", "if", "not", "vert", ":", "x", ",", "y", "=", "y", ",", "x", "ax", ".", "scatter", "(", "x", ",", "y", ",", "*", "*", "kwargs", ")", "return", "plt", ".", "show" ]
Plots jittered points as a distribution visualizer. Scatter plot arguments default to: marker='.', c='k', alpha=0.75 Also known as a stripplot. See also: boxplot, violinplot, beeswarm
[ "Plots", "jittered", "points", "as", "a", "distribution", "visualizer", "." ]
python
train
oceanprotocol/squid-py
squid_py/keeper/conditions/access.py
https://github.com/oceanprotocol/squid-py/blob/43a5b7431627e4c9ab7382ed9eb8153e96ed4483/squid_py/keeper/conditions/access.py#L55-L76
def get_purchased_assets_by_address(self, address, from_block=0, to_block='latest'): """ Get the list of the assets dids consumed for an address. :param address: is the address of the granted user, hex-str :param from_block: block to start to listen :param to_block: block to stop to listen :return: list of dids """ block_filter = EventFilter( ConditionBase.FULFILLED_EVENT, getattr(self.events, ConditionBase.FULFILLED_EVENT), from_block=from_block, to_block=to_block, argument_filters={'_grantee': address} ) log_items = block_filter.get_all_entries(max_tries=5) did_list = [] for log_i in log_items: did_list.append(id_to_did(log_i.args['_documentId'])) return did_list
[ "def", "get_purchased_assets_by_address", "(", "self", ",", "address", ",", "from_block", "=", "0", ",", "to_block", "=", "'latest'", ")", ":", "block_filter", "=", "EventFilter", "(", "ConditionBase", ".", "FULFILLED_EVENT", ",", "getattr", "(", "self", ".", "events", ",", "ConditionBase", ".", "FULFILLED_EVENT", ")", ",", "from_block", "=", "from_block", ",", "to_block", "=", "to_block", ",", "argument_filters", "=", "{", "'_grantee'", ":", "address", "}", ")", "log_items", "=", "block_filter", ".", "get_all_entries", "(", "max_tries", "=", "5", ")", "did_list", "=", "[", "]", "for", "log_i", "in", "log_items", ":", "did_list", ".", "append", "(", "id_to_did", "(", "log_i", ".", "args", "[", "'_documentId'", "]", ")", ")", "return", "did_list" ]
Get the list of the assets dids consumed for an address. :param address: is the address of the granted user, hex-str :param from_block: block to start to listen :param to_block: block to stop to listen :return: list of dids
[ "Get", "the", "list", "of", "the", "assets", "dids", "consumed", "for", "an", "address", "." ]
python
train
sirfoga/pyhal
hal/files/parsers.py
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/files/parsers.py#L41-L52
def get_matrix(self): """Stores values in array, store lines in array :return: 2D matrix """ data = [] with open(self.path, encoding=self.encoding) as csv_file: csv_reader = csv.reader(csv_file, delimiter=",", quotechar="\"") for row in csv_reader: data.append(row) return data
[ "def", "get_matrix", "(", "self", ")", ":", "data", "=", "[", "]", "with", "open", "(", "self", ".", "path", ",", "encoding", "=", "self", ".", "encoding", ")", "as", "csv_file", ":", "csv_reader", "=", "csv", ".", "reader", "(", "csv_file", ",", "delimiter", "=", "\",\"", ",", "quotechar", "=", "\"\\\"\"", ")", "for", "row", "in", "csv_reader", ":", "data", ".", "append", "(", "row", ")", "return", "data" ]
Stores values in array, store lines in array :return: 2D matrix
[ "Stores", "values", "in", "array", "store", "lines", "in", "array" ]
python
train
rueckstiess/mtools
mtools/mloginfo/sections/query_section.py
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mloginfo/sections/query_section.py#L52-L138
def run(self): """Run this section and print out information.""" grouping = Grouping(group_by=lambda x: (x.namespace, x.operation, x.pattern)) logfile = self.mloginfo.logfile if logfile.start and logfile.end: progress_start = self.mloginfo._datetime_to_epoch(logfile.start) progress_total = (self.mloginfo._datetime_to_epoch(logfile.end) - progress_start) else: self.mloginfo.progress_bar_enabled = False for i, le in enumerate(logfile): # update progress bar every 1000 lines if self.mloginfo.progress_bar_enabled and (i % 1000 == 0): if le.datetime: progress_curr = self.mloginfo._datetime_to_epoch(le .datetime) if progress_total: (self.mloginfo .update_progress(float(progress_curr - progress_start) / progress_total)) if (le.operation in ['query', 'getmore', 'update', 'remove'] or le.command in ['count', 'findandmodify', 'geonear', 'find']): lt = LogTuple(namespace=le.namespace, operation=op_or_cmd(le), pattern=le.pattern, duration=le.duration) grouping.add(lt) grouping.sort_by_size() # clear progress bar again if self.mloginfo.progress_bar_enabled: self.mloginfo.update_progress(1.0) # no queries in the log file if len(grouping) < 1: print('no queries found.') return titles = ['namespace', 'operation', 'pattern', 'count', 'min (ms)', 'max (ms)', 'mean (ms)', '95%-ile (ms)', 'sum (ms)'] table_rows = [] for g in grouping: # calculate statistics for this group namespace, op, pattern = g group_events = [le.duration for le in grouping[g] if le.duration is not None] stats = OrderedDict() stats['namespace'] = namespace stats['operation'] = op stats['pattern'] = pattern stats['count'] = len(group_events) stats['min'] = min(group_events) if group_events else '-' stats['max'] = max(group_events) if group_events else '-' stats['mean'] = 0 if np: stats['95%'] = (np.percentile(group_events, 95) if group_events else '-') else: stats['95%'] = 'n/a' stats['sum'] = sum(group_events) if group_events else '-' stats['mean'] = (stats['sum'] / stats['count'] if group_events else '-') if self.mloginfo.args['verbose']: stats['example'] = grouping[g][0] titles.append('example') table_rows.append(stats) # sort order depending on field names reverse = True if self.mloginfo.args['sort'] in ['namespace', 'pattern']: reverse = False table_rows = sorted(table_rows, key=itemgetter(self.mloginfo.args['sort']), reverse=reverse) print_table(table_rows, titles, uppercase_headers=False) print('')
[ "def", "run", "(", "self", ")", ":", "grouping", "=", "Grouping", "(", "group_by", "=", "lambda", "x", ":", "(", "x", ".", "namespace", ",", "x", ".", "operation", ",", "x", ".", "pattern", ")", ")", "logfile", "=", "self", ".", "mloginfo", ".", "logfile", "if", "logfile", ".", "start", "and", "logfile", ".", "end", ":", "progress_start", "=", "self", ".", "mloginfo", ".", "_datetime_to_epoch", "(", "logfile", ".", "start", ")", "progress_total", "=", "(", "self", ".", "mloginfo", ".", "_datetime_to_epoch", "(", "logfile", ".", "end", ")", "-", "progress_start", ")", "else", ":", "self", ".", "mloginfo", ".", "progress_bar_enabled", "=", "False", "for", "i", ",", "le", "in", "enumerate", "(", "logfile", ")", ":", "# update progress bar every 1000 lines", "if", "self", ".", "mloginfo", ".", "progress_bar_enabled", "and", "(", "i", "%", "1000", "==", "0", ")", ":", "if", "le", ".", "datetime", ":", "progress_curr", "=", "self", ".", "mloginfo", ".", "_datetime_to_epoch", "(", "le", ".", "datetime", ")", "if", "progress_total", ":", "(", "self", ".", "mloginfo", ".", "update_progress", "(", "float", "(", "progress_curr", "-", "progress_start", ")", "/", "progress_total", ")", ")", "if", "(", "le", ".", "operation", "in", "[", "'query'", ",", "'getmore'", ",", "'update'", ",", "'remove'", "]", "or", "le", ".", "command", "in", "[", "'count'", ",", "'findandmodify'", ",", "'geonear'", ",", "'find'", "]", ")", ":", "lt", "=", "LogTuple", "(", "namespace", "=", "le", ".", "namespace", ",", "operation", "=", "op_or_cmd", "(", "le", ")", ",", "pattern", "=", "le", ".", "pattern", ",", "duration", "=", "le", ".", "duration", ")", "grouping", ".", "add", "(", "lt", ")", "grouping", ".", "sort_by_size", "(", ")", "# clear progress bar again", "if", "self", ".", "mloginfo", ".", "progress_bar_enabled", ":", "self", ".", "mloginfo", ".", "update_progress", "(", "1.0", ")", "# no queries in the log file", "if", "len", "(", "grouping", ")", "<", "1", ":", "print", "(", "'no queries found.'", ")", "return", "titles", "=", "[", "'namespace'", ",", "'operation'", ",", "'pattern'", ",", "'count'", ",", "'min (ms)'", ",", "'max (ms)'", ",", "'mean (ms)'", ",", "'95%-ile (ms)'", ",", "'sum (ms)'", "]", "table_rows", "=", "[", "]", "for", "g", "in", "grouping", ":", "# calculate statistics for this group", "namespace", ",", "op", ",", "pattern", "=", "g", "group_events", "=", "[", "le", ".", "duration", "for", "le", "in", "grouping", "[", "g", "]", "if", "le", ".", "duration", "is", "not", "None", "]", "stats", "=", "OrderedDict", "(", ")", "stats", "[", "'namespace'", "]", "=", "namespace", "stats", "[", "'operation'", "]", "=", "op", "stats", "[", "'pattern'", "]", "=", "pattern", "stats", "[", "'count'", "]", "=", "len", "(", "group_events", ")", "stats", "[", "'min'", "]", "=", "min", "(", "group_events", ")", "if", "group_events", "else", "'-'", "stats", "[", "'max'", "]", "=", "max", "(", "group_events", ")", "if", "group_events", "else", "'-'", "stats", "[", "'mean'", "]", "=", "0", "if", "np", ":", "stats", "[", "'95%'", "]", "=", "(", "np", ".", "percentile", "(", "group_events", ",", "95", ")", "if", "group_events", "else", "'-'", ")", "else", ":", "stats", "[", "'95%'", "]", "=", "'n/a'", "stats", "[", "'sum'", "]", "=", "sum", "(", "group_events", ")", "if", "group_events", "else", "'-'", "stats", "[", "'mean'", "]", "=", "(", "stats", "[", "'sum'", "]", "/", "stats", "[", "'count'", "]", "if", "group_events", "else", "'-'", ")", "if", "self", ".", "mloginfo", ".", "args", "[", "'verbose'", "]", ":", "stats", "[", "'example'", "]", "=", "grouping", "[", "g", "]", "[", "0", "]", "titles", ".", "append", "(", "'example'", ")", "table_rows", ".", "append", "(", "stats", ")", "# sort order depending on field names", "reverse", "=", "True", "if", "self", ".", "mloginfo", ".", "args", "[", "'sort'", "]", "in", "[", "'namespace'", ",", "'pattern'", "]", ":", "reverse", "=", "False", "table_rows", "=", "sorted", "(", "table_rows", ",", "key", "=", "itemgetter", "(", "self", ".", "mloginfo", ".", "args", "[", "'sort'", "]", ")", ",", "reverse", "=", "reverse", ")", "print_table", "(", "table_rows", ",", "titles", ",", "uppercase_headers", "=", "False", ")", "print", "(", "''", ")" ]
Run this section and print out information.
[ "Run", "this", "section", "and", "print", "out", "information", "." ]
python
train
jjkester/django-auditlog
src/auditlog/receivers.py
https://github.com/jjkester/django-auditlog/blob/a22978e05b7ed43b87e4b6109550b86c738578fe/src/auditlog/receivers.py#L25-L47
def log_update(sender, instance, **kwargs): """ Signal receiver that creates a log entry when a model instance is changed and saved to the database. Direct use is discouraged, connect your model through :py:func:`auditlog.registry.register` instead. """ if instance.pk is not None: try: old = sender.objects.get(pk=instance.pk) except sender.DoesNotExist: pass else: new = instance changes = model_instance_diff(old, new) # Log an entry only if there are changes if changes: log_entry = LogEntry.objects.log_create( instance, action=LogEntry.Action.UPDATE, changes=json.dumps(changes), )
[ "def", "log_update", "(", "sender", ",", "instance", ",", "*", "*", "kwargs", ")", ":", "if", "instance", ".", "pk", "is", "not", "None", ":", "try", ":", "old", "=", "sender", ".", "objects", ".", "get", "(", "pk", "=", "instance", ".", "pk", ")", "except", "sender", ".", "DoesNotExist", ":", "pass", "else", ":", "new", "=", "instance", "changes", "=", "model_instance_diff", "(", "old", ",", "new", ")", "# Log an entry only if there are changes", "if", "changes", ":", "log_entry", "=", "LogEntry", ".", "objects", ".", "log_create", "(", "instance", ",", "action", "=", "LogEntry", ".", "Action", ".", "UPDATE", ",", "changes", "=", "json", ".", "dumps", "(", "changes", ")", ",", ")" ]
Signal receiver that creates a log entry when a model instance is changed and saved to the database. Direct use is discouraged, connect your model through :py:func:`auditlog.registry.register` instead.
[ "Signal", "receiver", "that", "creates", "a", "log", "entry", "when", "a", "model", "instance", "is", "changed", "and", "saved", "to", "the", "database", "." ]
python
train
openstack/horizon
openstack_dashboard/dashboards/project/instances/utils.py
https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_dashboard/dashboards/project/instances/utils.py#L93-L140
def network_field_data(request, include_empty_option=False, with_cidr=False, for_launch=False): """Returns a list of tuples of all networks. Generates a list of networks available to the user (request). And returns a list of (id, name) tuples. :param request: django http request object :param include_empty_option: flag to include a empty tuple in the front of the list :param with_cidr: flag to include subnets cidr in field name :return: list of (id, name) tuples """ tenant_id = request.user.tenant_id networks = [] if api.base.is_service_enabled(request, 'network'): extra_params = {} if for_launch: extra_params['include_pre_auto_allocate'] = True try: networks = api.neutron.network_list_for_tenant( request, tenant_id, **extra_params) except Exception as e: msg = _('Failed to get network list {0}').format(six.text_type(e)) exceptions.handle(request, msg) _networks = [] for n in networks: if not n['subnets']: continue v = n.name_or_id if with_cidr: cidrs = ([subnet.cidr for subnet in n['subnets'] if subnet.ip_version == 4] + [subnet.cidr for subnet in n['subnets'] if subnet.ip_version == 6]) v += ' (%s)' % ', '.join(cidrs) _networks.append((n.id, v)) networks = sorted(_networks, key=itemgetter(1)) if not networks: if include_empty_option: return [("", _("No networks available")), ] return [] if include_empty_option: return [("", _("Select Network")), ] + networks return networks
[ "def", "network_field_data", "(", "request", ",", "include_empty_option", "=", "False", ",", "with_cidr", "=", "False", ",", "for_launch", "=", "False", ")", ":", "tenant_id", "=", "request", ".", "user", ".", "tenant_id", "networks", "=", "[", "]", "if", "api", ".", "base", ".", "is_service_enabled", "(", "request", ",", "'network'", ")", ":", "extra_params", "=", "{", "}", "if", "for_launch", ":", "extra_params", "[", "'include_pre_auto_allocate'", "]", "=", "True", "try", ":", "networks", "=", "api", ".", "neutron", ".", "network_list_for_tenant", "(", "request", ",", "tenant_id", ",", "*", "*", "extra_params", ")", "except", "Exception", "as", "e", ":", "msg", "=", "_", "(", "'Failed to get network list {0}'", ")", ".", "format", "(", "six", ".", "text_type", "(", "e", ")", ")", "exceptions", ".", "handle", "(", "request", ",", "msg", ")", "_networks", "=", "[", "]", "for", "n", "in", "networks", ":", "if", "not", "n", "[", "'subnets'", "]", ":", "continue", "v", "=", "n", ".", "name_or_id", "if", "with_cidr", ":", "cidrs", "=", "(", "[", "subnet", ".", "cidr", "for", "subnet", "in", "n", "[", "'subnets'", "]", "if", "subnet", ".", "ip_version", "==", "4", "]", "+", "[", "subnet", ".", "cidr", "for", "subnet", "in", "n", "[", "'subnets'", "]", "if", "subnet", ".", "ip_version", "==", "6", "]", ")", "v", "+=", "' (%s)'", "%", "', '", ".", "join", "(", "cidrs", ")", "_networks", ".", "append", "(", "(", "n", ".", "id", ",", "v", ")", ")", "networks", "=", "sorted", "(", "_networks", ",", "key", "=", "itemgetter", "(", "1", ")", ")", "if", "not", "networks", ":", "if", "include_empty_option", ":", "return", "[", "(", "\"\"", ",", "_", "(", "\"No networks available\"", ")", ")", ",", "]", "return", "[", "]", "if", "include_empty_option", ":", "return", "[", "(", "\"\"", ",", "_", "(", "\"Select Network\"", ")", ")", ",", "]", "+", "networks", "return", "networks" ]
Returns a list of tuples of all networks. Generates a list of networks available to the user (request). And returns a list of (id, name) tuples. :param request: django http request object :param include_empty_option: flag to include a empty tuple in the front of the list :param with_cidr: flag to include subnets cidr in field name :return: list of (id, name) tuples
[ "Returns", "a", "list", "of", "tuples", "of", "all", "networks", "." ]
python
train
bolt-project/bolt
bolt/factory.py
https://github.com/bolt-project/bolt/blob/9cd7104aa085498da3097b72696184b9d3651c51/bolt/factory.py#L9-L35
def wrapped(f): """ Decorator to append routed docstrings """ import inspect def extract(func): append = "" args = inspect.getargspec(func) for i, a in enumerate(args.args): if i < (len(args) - len(args.defaults)): append += str(a) + ", " else: default = args.defaults[i-len(args.defaults)] if hasattr(default, "__name__"): default = default.__name__ else: default = str(default) append += str(a) + "=" + default + ", " append = append[:-2] + ")" return append doc = f.__doc__ + "\n" doc += " local -> array(" + extract(getattr(ConstructLocal, f.__name__)) + "\n" doc += " spark -> array(" + extract(getattr(ConstructSpark, f.__name__)) + "\n" f.__doc__ = doc return f
[ "def", "wrapped", "(", "f", ")", ":", "import", "inspect", "def", "extract", "(", "func", ")", ":", "append", "=", "\"\"", "args", "=", "inspect", ".", "getargspec", "(", "func", ")", "for", "i", ",", "a", "in", "enumerate", "(", "args", ".", "args", ")", ":", "if", "i", "<", "(", "len", "(", "args", ")", "-", "len", "(", "args", ".", "defaults", ")", ")", ":", "append", "+=", "str", "(", "a", ")", "+", "\", \"", "else", ":", "default", "=", "args", ".", "defaults", "[", "i", "-", "len", "(", "args", ".", "defaults", ")", "]", "if", "hasattr", "(", "default", ",", "\"__name__\"", ")", ":", "default", "=", "default", ".", "__name__", "else", ":", "default", "=", "str", "(", "default", ")", "append", "+=", "str", "(", "a", ")", "+", "\"=\"", "+", "default", "+", "\", \"", "append", "=", "append", "[", ":", "-", "2", "]", "+", "\")\"", "return", "append", "doc", "=", "f", ".", "__doc__", "+", "\"\\n\"", "doc", "+=", "\" local -> array(\"", "+", "extract", "(", "getattr", "(", "ConstructLocal", ",", "f", ".", "__name__", ")", ")", "+", "\"\\n\"", "doc", "+=", "\" spark -> array(\"", "+", "extract", "(", "getattr", "(", "ConstructSpark", ",", "f", ".", "__name__", ")", ")", "+", "\"\\n\"", "f", ".", "__doc__", "=", "doc", "return", "f" ]
Decorator to append routed docstrings
[ "Decorator", "to", "append", "routed", "docstrings" ]
python
test
kgori/treeCl
treeCl/tasks.py
https://github.com/kgori/treeCl/blob/fed624b3db1c19cc07175ca04e3eda6905a8d305/treeCl/tasks.py#L46-L53
def eucdist_task(newick_string_a, newick_string_b, normalise, min_overlap=4, overlap_fail_value=0): """ Distributed version of tree_distance.eucdist Parameters: two valid newick strings and a boolean """ tree_a = Tree(newick_string_a) tree_b = Tree(newick_string_b) return treedist.eucdist(tree_a, tree_b, normalise, min_overlap, overlap_fail_value)
[ "def", "eucdist_task", "(", "newick_string_a", ",", "newick_string_b", ",", "normalise", ",", "min_overlap", "=", "4", ",", "overlap_fail_value", "=", "0", ")", ":", "tree_a", "=", "Tree", "(", "newick_string_a", ")", "tree_b", "=", "Tree", "(", "newick_string_b", ")", "return", "treedist", ".", "eucdist", "(", "tree_a", ",", "tree_b", ",", "normalise", ",", "min_overlap", ",", "overlap_fail_value", ")" ]
Distributed version of tree_distance.eucdist Parameters: two valid newick strings and a boolean
[ "Distributed", "version", "of", "tree_distance", ".", "eucdist", "Parameters", ":", "two", "valid", "newick", "strings", "and", "a", "boolean" ]
python
train
alex-kostirin/pyatomac
atomac/ldtpd/table.py
https://github.com/alex-kostirin/pyatomac/blob/3f46f6feb4504315eec07abb18bb41be4d257aeb/atomac/ldtpd/table.py#L523-L554
def doubleclickrow(self, window_name, object_name, row_text): """ Double click row matching given text @param window_name: Window name to type in, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @param object_name: Object name to type in, either full name, LDTP's name convention, or a Unix glob. @type object_name: string @param row_text: Row text to select @type row_text: string @return: row index matching the text on success. @rtype: integer """ object_handle = self._get_object_handle(window_name, object_name) if not object_handle.AXEnabled: raise LdtpServerException(u"Object %s state disabled" % object_name) object_handle.activate() self.wait(1) for cell in object_handle.AXRows: cell = self._getfirstmatchingchild(cell, "(AXTextField|AXStaticText)") if not cell: continue if re.match(row_text, cell.AXValue): x, y, width, height = self._getobjectsize(cell) # Mouse double click on the object cell.doubleClickMouse((x + width / 2, y + height / 2)) return 1 raise LdtpServerException('Unable to get row text: %s' % row_text)
[ "def", "doubleclickrow", "(", "self", ",", "window_name", ",", "object_name", ",", "row_text", ")", ":", "object_handle", "=", "self", ".", "_get_object_handle", "(", "window_name", ",", "object_name", ")", "if", "not", "object_handle", ".", "AXEnabled", ":", "raise", "LdtpServerException", "(", "u\"Object %s state disabled\"", "%", "object_name", ")", "object_handle", ".", "activate", "(", ")", "self", ".", "wait", "(", "1", ")", "for", "cell", "in", "object_handle", ".", "AXRows", ":", "cell", "=", "self", ".", "_getfirstmatchingchild", "(", "cell", ",", "\"(AXTextField|AXStaticText)\"", ")", "if", "not", "cell", ":", "continue", "if", "re", ".", "match", "(", "row_text", ",", "cell", ".", "AXValue", ")", ":", "x", ",", "y", ",", "width", ",", "height", "=", "self", ".", "_getobjectsize", "(", "cell", ")", "# Mouse double click on the object", "cell", ".", "doubleClickMouse", "(", "(", "x", "+", "width", "/", "2", ",", "y", "+", "height", "/", "2", ")", ")", "return", "1", "raise", "LdtpServerException", "(", "'Unable to get row text: %s'", "%", "row_text", ")" ]
Double click row matching given text @param window_name: Window name to type in, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @param object_name: Object name to type in, either full name, LDTP's name convention, or a Unix glob. @type object_name: string @param row_text: Row text to select @type row_text: string @return: row index matching the text on success. @rtype: integer
[ "Double", "click", "row", "matching", "given", "text" ]
python
valid
radjkarl/fancyTools
fancytools/fcollections/FIFObuffer.py
https://github.com/radjkarl/fancyTools/blob/4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b/fancytools/fcollections/FIFObuffer.py#L79-L87
def splitPos(self): """return the position of where to split the array to get the values in the right order""" if self._ind < self.shape: return 0 v = int(self._splitPos) if v >= 1: self._splitPos = 0 return v
[ "def", "splitPos", "(", "self", ")", ":", "if", "self", ".", "_ind", "<", "self", ".", "shape", ":", "return", "0", "v", "=", "int", "(", "self", ".", "_splitPos", ")", "if", "v", ">=", "1", ":", "self", ".", "_splitPos", "=", "0", "return", "v" ]
return the position of where to split the array to get the values in the right order
[ "return", "the", "position", "of", "where", "to", "split", "the", "array", "to", "get", "the", "values", "in", "the", "right", "order" ]
python
train
mikedh/trimesh
trimesh/triangles.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/triangles.py#L34-L57
def area(triangles=None, crosses=None, sum=False): """ Calculates the sum area of input triangles Parameters ---------- triangles : (n, 3, 3) float Vertices of triangles crosses : (n, 3) float or None As a speedup don't re- compute cross products sum : bool Return summed area or individual triangle area Returns ---------- area : (n,) float or float Individual or summed area depending on `sum` argument """ if crosses is None: crosses = cross(triangles) area = (np.sum(crosses**2, axis=1)**.5) * .5 if sum: return np.sum(area) return area
[ "def", "area", "(", "triangles", "=", "None", ",", "crosses", "=", "None", ",", "sum", "=", "False", ")", ":", "if", "crosses", "is", "None", ":", "crosses", "=", "cross", "(", "triangles", ")", "area", "=", "(", "np", ".", "sum", "(", "crosses", "**", "2", ",", "axis", "=", "1", ")", "**", ".5", ")", "*", ".5", "if", "sum", ":", "return", "np", ".", "sum", "(", "area", ")", "return", "area" ]
Calculates the sum area of input triangles Parameters ---------- triangles : (n, 3, 3) float Vertices of triangles crosses : (n, 3) float or None As a speedup don't re- compute cross products sum : bool Return summed area or individual triangle area Returns ---------- area : (n,) float or float Individual or summed area depending on `sum` argument
[ "Calculates", "the", "sum", "area", "of", "input", "triangles" ]
python
train
sernst/cauldron
cauldron/session/exposed.py
https://github.com/sernst/cauldron/blob/4086aec9c038c402ea212c79fe8bd0d27104f9cf/cauldron/session/exposed.py#L41-L47
def display(self) -> typing.Union[None, report.Report]: """The display report for the current project.""" return ( self._project.current_step.report if self._project and self._project.current_step else None )
[ "def", "display", "(", "self", ")", "->", "typing", ".", "Union", "[", "None", ",", "report", ".", "Report", "]", ":", "return", "(", "self", ".", "_project", ".", "current_step", ".", "report", "if", "self", ".", "_project", "and", "self", ".", "_project", ".", "current_step", "else", "None", ")" ]
The display report for the current project.
[ "The", "display", "report", "for", "the", "current", "project", "." ]
python
train
senaite/senaite.core
bika/lims/idserver.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/idserver.py#L361-L367
def make_storage_key(portal_type, prefix=None): """Make a storage (dict-) key for the number generator """ key = portal_type.lower() if prefix: key = "{}-{}".format(key, prefix) return key
[ "def", "make_storage_key", "(", "portal_type", ",", "prefix", "=", "None", ")", ":", "key", "=", "portal_type", ".", "lower", "(", ")", "if", "prefix", ":", "key", "=", "\"{}-{}\"", ".", "format", "(", "key", ",", "prefix", ")", "return", "key" ]
Make a storage (dict-) key for the number generator
[ "Make", "a", "storage", "(", "dict", "-", ")", "key", "for", "the", "number", "generator" ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_aaa.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_aaa.py#L305-L317
def tacacs_server_host_port(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") tacacs_server = ET.SubElement(config, "tacacs-server", xmlns="urn:brocade.com:mgmt:brocade-aaa") host = ET.SubElement(tacacs_server, "host") hostname_key = ET.SubElement(host, "hostname") hostname_key.text = kwargs.pop('hostname') port = ET.SubElement(host, "port") port.text = kwargs.pop('port') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "tacacs_server_host_port", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "tacacs_server", "=", "ET", ".", "SubElement", "(", "config", ",", "\"tacacs-server\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-aaa\"", ")", "host", "=", "ET", ".", "SubElement", "(", "tacacs_server", ",", "\"host\"", ")", "hostname_key", "=", "ET", ".", "SubElement", "(", "host", ",", "\"hostname\"", ")", "hostname_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'hostname'", ")", "port", "=", "ET", ".", "SubElement", "(", "host", ",", "\"port\"", ")", "port", ".", "text", "=", "kwargs", ".", "pop", "(", "'port'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
gwastro/pycbc
pycbc/types/timeseries.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/types/timeseries.py#L247-L306
def almost_equal_elem(self,other,tol,relative=True,dtol=0.0): """ Compare whether two time series are almost equal, element by element. If the 'relative' parameter is 'True' (the default) then the 'tol' parameter (which must be positive) is interpreted as a relative tolerance, and the comparison returns 'True' only if abs(self[i]-other[i]) <= tol*abs(self[i]) for all elements of the series. If 'relative' is 'False', then 'tol' is an absolute tolerance, and the comparison is true only if abs(self[i]-other[i]) <= tol for all elements of the series. The method also checks that self.delta_t is within 'dtol' of other.delta_t; if 'dtol' has its default value of 0 then exact equality between the two is required. Other meta-data (type, dtype, length, and epoch) must be exactly equal. If either object's memory lives on the GPU it will be copied to the CPU for the comparison, which may be slow. But the original object itself will not have its memory relocated nor scheme changed. Parameters ---------- other: another Python object, that should be tested for almost-equality with 'self', element-by-element. tol: a non-negative number, the tolerance, which is interpreted as either a relative tolerance (the default) or an absolute tolerance. relative: A boolean, indicating whether 'tol' should be interpreted as a relative tolerance (if True, the default if this argument is omitted) or as an absolute tolerance (if tol is False). dtol: a non-negative number, the tolerance for delta_t. Like 'tol', it is interpreted as relative or absolute based on the value of 'relative'. This parameter defaults to zero, enforcing exact equality between the delta_t values of the two TimeSeries. Returns ------- boolean: 'True' if the data and delta_ts agree within the tolerance, as interpreted by the 'relative' keyword, and if the types, lengths, dtypes, and epochs are exactly the same. """ # Check that the delta_t tolerance is non-negative; raise an exception # if needed. if (dtol < 0.0): raise ValueError("Tolerance in delta_t cannot be negative") if super(TimeSeries,self).almost_equal_elem(other,tol=tol,relative=relative): if relative: return (self._epoch == other._epoch and abs(self._delta_t-other._delta_t) <= dtol*self._delta_t) else: return (self._epoch == other._epoch and abs(self._delta_t-other._delta_t) <= dtol) else: return False
[ "def", "almost_equal_elem", "(", "self", ",", "other", ",", "tol", ",", "relative", "=", "True", ",", "dtol", "=", "0.0", ")", ":", "# Check that the delta_t tolerance is non-negative; raise an exception", "# if needed.", "if", "(", "dtol", "<", "0.0", ")", ":", "raise", "ValueError", "(", "\"Tolerance in delta_t cannot be negative\"", ")", "if", "super", "(", "TimeSeries", ",", "self", ")", ".", "almost_equal_elem", "(", "other", ",", "tol", "=", "tol", ",", "relative", "=", "relative", ")", ":", "if", "relative", ":", "return", "(", "self", ".", "_epoch", "==", "other", ".", "_epoch", "and", "abs", "(", "self", ".", "_delta_t", "-", "other", ".", "_delta_t", ")", "<=", "dtol", "*", "self", ".", "_delta_t", ")", "else", ":", "return", "(", "self", ".", "_epoch", "==", "other", ".", "_epoch", "and", "abs", "(", "self", ".", "_delta_t", "-", "other", ".", "_delta_t", ")", "<=", "dtol", ")", "else", ":", "return", "False" ]
Compare whether two time series are almost equal, element by element. If the 'relative' parameter is 'True' (the default) then the 'tol' parameter (which must be positive) is interpreted as a relative tolerance, and the comparison returns 'True' only if abs(self[i]-other[i]) <= tol*abs(self[i]) for all elements of the series. If 'relative' is 'False', then 'tol' is an absolute tolerance, and the comparison is true only if abs(self[i]-other[i]) <= tol for all elements of the series. The method also checks that self.delta_t is within 'dtol' of other.delta_t; if 'dtol' has its default value of 0 then exact equality between the two is required. Other meta-data (type, dtype, length, and epoch) must be exactly equal. If either object's memory lives on the GPU it will be copied to the CPU for the comparison, which may be slow. But the original object itself will not have its memory relocated nor scheme changed. Parameters ---------- other: another Python object, that should be tested for almost-equality with 'self', element-by-element. tol: a non-negative number, the tolerance, which is interpreted as either a relative tolerance (the default) or an absolute tolerance. relative: A boolean, indicating whether 'tol' should be interpreted as a relative tolerance (if True, the default if this argument is omitted) or as an absolute tolerance (if tol is False). dtol: a non-negative number, the tolerance for delta_t. Like 'tol', it is interpreted as relative or absolute based on the value of 'relative'. This parameter defaults to zero, enforcing exact equality between the delta_t values of the two TimeSeries. Returns ------- boolean: 'True' if the data and delta_ts agree within the tolerance, as interpreted by the 'relative' keyword, and if the types, lengths, dtypes, and epochs are exactly the same.
[ "Compare", "whether", "two", "time", "series", "are", "almost", "equal", "element", "by", "element", "." ]
python
train
cloudtools/stacker
stacker/lookups/handlers/file.py
https://github.com/cloudtools/stacker/blob/ad6013a03a560c46ba3c63c4d153336273e6da5d/stacker/lookups/handlers/file.py#L174-L201
def _parameterize_obj(obj): """Recursively parameterize all strings contained in an object. Parameterizes all values of a Mapping, all items of a Sequence, an unicode string, or pass other objects through unmodified. Byte strings will be interpreted as UTF-8. Args: obj: data to parameterize Return: A parameterized object to be included in a CloudFormation template. Mappings are converted to `dict`, Sequences are converted to `list`, and strings possibly replaced by compositions of function calls. """ if isinstance(obj, Mapping): return dict((key, _parameterize_obj(value)) for key, value in obj.items()) elif isinstance(obj, bytes): return _parameterize_string(obj.decode('utf8')) elif isinstance(obj, str): return _parameterize_string(obj) elif isinstance(obj, Sequence): return list(_parameterize_obj(item) for item in obj) else: return obj
[ "def", "_parameterize_obj", "(", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "Mapping", ")", ":", "return", "dict", "(", "(", "key", ",", "_parameterize_obj", "(", "value", ")", ")", "for", "key", ",", "value", "in", "obj", ".", "items", "(", ")", ")", "elif", "isinstance", "(", "obj", ",", "bytes", ")", ":", "return", "_parameterize_string", "(", "obj", ".", "decode", "(", "'utf8'", ")", ")", "elif", "isinstance", "(", "obj", ",", "str", ")", ":", "return", "_parameterize_string", "(", "obj", ")", "elif", "isinstance", "(", "obj", ",", "Sequence", ")", ":", "return", "list", "(", "_parameterize_obj", "(", "item", ")", "for", "item", "in", "obj", ")", "else", ":", "return", "obj" ]
Recursively parameterize all strings contained in an object. Parameterizes all values of a Mapping, all items of a Sequence, an unicode string, or pass other objects through unmodified. Byte strings will be interpreted as UTF-8. Args: obj: data to parameterize Return: A parameterized object to be included in a CloudFormation template. Mappings are converted to `dict`, Sequences are converted to `list`, and strings possibly replaced by compositions of function calls.
[ "Recursively", "parameterize", "all", "strings", "contained", "in", "an", "object", "." ]
python
train
django-salesforce/django-salesforce
salesforce/router.py
https://github.com/django-salesforce/django-salesforce/blob/6fd5643dba69d49c5881de50875cf90204a8f808/salesforce/router.py#L60-L85
def allow_migrate(self, db, app_label, model_name=None, **hints): """ Don't attempt to sync SF models to non SF databases and vice versa. """ if model_name: model = apps.get_model(app_label, model_name) else: # hints are used with less priority, because many hints are dynamic # models made by migrations on a '__fake__' module which are not # SalesforceModels model = hints.get('model') if hasattr(model, '_salesforce_object'): # SF models can be migrated if SALESFORCE_DB_ALIAS is e.g. # a sqlite3 database or any non-SF database. if not (is_sf_database(db) or db == self.sf_alias): return False else: if is_sf_database(db) or self.sf_alias != 'default' and db == self.sf_alias: return False # TODO: It is usual that "migrate" is currently disallowed for SF. # In the future it can be implemented to do a deep check by # introspection of compatibily between Django models and SF database. if hasattr(model, '_salesforce_object'): # return False pass
[ "def", "allow_migrate", "(", "self", ",", "db", ",", "app_label", ",", "model_name", "=", "None", ",", "*", "*", "hints", ")", ":", "if", "model_name", ":", "model", "=", "apps", ".", "get_model", "(", "app_label", ",", "model_name", ")", "else", ":", "# hints are used with less priority, because many hints are dynamic", "# models made by migrations on a '__fake__' module which are not", "# SalesforceModels", "model", "=", "hints", ".", "get", "(", "'model'", ")", "if", "hasattr", "(", "model", ",", "'_salesforce_object'", ")", ":", "# SF models can be migrated if SALESFORCE_DB_ALIAS is e.g.", "# a sqlite3 database or any non-SF database.", "if", "not", "(", "is_sf_database", "(", "db", ")", "or", "db", "==", "self", ".", "sf_alias", ")", ":", "return", "False", "else", ":", "if", "is_sf_database", "(", "db", ")", "or", "self", ".", "sf_alias", "!=", "'default'", "and", "db", "==", "self", ".", "sf_alias", ":", "return", "False", "# TODO: It is usual that \"migrate\" is currently disallowed for SF.", "# In the future it can be implemented to do a deep check by", "# introspection of compatibily between Django models and SF database.", "if", "hasattr", "(", "model", ",", "'_salesforce_object'", ")", ":", "# return False", "pass" ]
Don't attempt to sync SF models to non SF databases and vice versa.
[ "Don", "t", "attempt", "to", "sync", "SF", "models", "to", "non", "SF", "databases", "and", "vice", "versa", "." ]
python
train
nerdvegas/rez
src/rez/build_system.py
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/build_system.py#L198-L243
def get_standard_vars(cls, context, variant, build_type, install, build_path, install_path=None): """Returns a standard set of environment variables that can be set for the build system to use """ from rez.config import config package = variant.parent variant_requires = map(str, variant.variant_requires) if variant.index is None: variant_subpath = '' else: variant_subpath = variant._non_shortlinked_subpath vars_ = { 'REZ_BUILD_ENV': 1, 'REZ_BUILD_PATH': build_path, 'REZ_BUILD_THREAD_COUNT': package.config.build_thread_count, 'REZ_BUILD_VARIANT_INDEX': variant.index or 0, 'REZ_BUILD_VARIANT_REQUIRES': ' '.join(variant_requires), 'REZ_BUILD_VARIANT_SUBPATH': variant_subpath, 'REZ_BUILD_PROJECT_VERSION': str(package.version), 'REZ_BUILD_PROJECT_NAME': package.name, 'REZ_BUILD_PROJECT_DESCRIPTION': (package.description or '').strip(), 'REZ_BUILD_PROJECT_FILE': package.filepath, 'REZ_BUILD_SOURCE_PATH': os.path.dirname(package.filepath), 'REZ_BUILD_REQUIRES': ' '.join( str(x) for x in context.requested_packages(True) ), 'REZ_BUILD_REQUIRES_UNVERSIONED': ' '.join( x.name for x in context.requested_packages(True) ), 'REZ_BUILD_TYPE': build_type.name, 'REZ_BUILD_INSTALL': 1 if install else 0, } if install_path: vars_['REZ_BUILD_INSTALL_PATH'] = install_path if config.rez_1_environment_variables and \ not config.disable_rez_1_compatibility and \ build_type == BuildType.central: vars_['REZ_IN_REZ_RELEASE'] = 1 return vars_
[ "def", "get_standard_vars", "(", "cls", ",", "context", ",", "variant", ",", "build_type", ",", "install", ",", "build_path", ",", "install_path", "=", "None", ")", ":", "from", "rez", ".", "config", "import", "config", "package", "=", "variant", ".", "parent", "variant_requires", "=", "map", "(", "str", ",", "variant", ".", "variant_requires", ")", "if", "variant", ".", "index", "is", "None", ":", "variant_subpath", "=", "''", "else", ":", "variant_subpath", "=", "variant", ".", "_non_shortlinked_subpath", "vars_", "=", "{", "'REZ_BUILD_ENV'", ":", "1", ",", "'REZ_BUILD_PATH'", ":", "build_path", ",", "'REZ_BUILD_THREAD_COUNT'", ":", "package", ".", "config", ".", "build_thread_count", ",", "'REZ_BUILD_VARIANT_INDEX'", ":", "variant", ".", "index", "or", "0", ",", "'REZ_BUILD_VARIANT_REQUIRES'", ":", "' '", ".", "join", "(", "variant_requires", ")", ",", "'REZ_BUILD_VARIANT_SUBPATH'", ":", "variant_subpath", ",", "'REZ_BUILD_PROJECT_VERSION'", ":", "str", "(", "package", ".", "version", ")", ",", "'REZ_BUILD_PROJECT_NAME'", ":", "package", ".", "name", ",", "'REZ_BUILD_PROJECT_DESCRIPTION'", ":", "(", "package", ".", "description", "or", "''", ")", ".", "strip", "(", ")", ",", "'REZ_BUILD_PROJECT_FILE'", ":", "package", ".", "filepath", ",", "'REZ_BUILD_SOURCE_PATH'", ":", "os", ".", "path", ".", "dirname", "(", "package", ".", "filepath", ")", ",", "'REZ_BUILD_REQUIRES'", ":", "' '", ".", "join", "(", "str", "(", "x", ")", "for", "x", "in", "context", ".", "requested_packages", "(", "True", ")", ")", ",", "'REZ_BUILD_REQUIRES_UNVERSIONED'", ":", "' '", ".", "join", "(", "x", ".", "name", "for", "x", "in", "context", ".", "requested_packages", "(", "True", ")", ")", ",", "'REZ_BUILD_TYPE'", ":", "build_type", ".", "name", ",", "'REZ_BUILD_INSTALL'", ":", "1", "if", "install", "else", "0", ",", "}", "if", "install_path", ":", "vars_", "[", "'REZ_BUILD_INSTALL_PATH'", "]", "=", "install_path", "if", "config", ".", "rez_1_environment_variables", "and", "not", "config", ".", "disable_rez_1_compatibility", "and", "build_type", "==", "BuildType", ".", "central", ":", "vars_", "[", "'REZ_IN_REZ_RELEASE'", "]", "=", "1", "return", "vars_" ]
Returns a standard set of environment variables that can be set for the build system to use
[ "Returns", "a", "standard", "set", "of", "environment", "variables", "that", "can", "be", "set", "for", "the", "build", "system", "to", "use" ]
python
train
dagster-io/dagster
python_modules/dagster/dagster/core/events/logging.py
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/python_modules/dagster/dagster/core/events/logging.py#L149-L167
def construct_json_event_logger(json_path): '''Record a stream of event records to json''' check.str_param(json_path, 'json_path') return construct_single_handler_logger( "json-event-record-logger", DEBUG, JsonEventLoggerHandler( json_path, lambda record: construct_event_record( StructuredLoggerMessage( name=record.name, message=record.msg, level=record.levelno, meta=record.dagster_meta, record=record, ) ), ), )
[ "def", "construct_json_event_logger", "(", "json_path", ")", ":", "check", ".", "str_param", "(", "json_path", ",", "'json_path'", ")", "return", "construct_single_handler_logger", "(", "\"json-event-record-logger\"", ",", "DEBUG", ",", "JsonEventLoggerHandler", "(", "json_path", ",", "lambda", "record", ":", "construct_event_record", "(", "StructuredLoggerMessage", "(", "name", "=", "record", ".", "name", ",", "message", "=", "record", ".", "msg", ",", "level", "=", "record", ".", "levelno", ",", "meta", "=", "record", ".", "dagster_meta", ",", "record", "=", "record", ",", ")", ")", ",", ")", ",", ")" ]
Record a stream of event records to json
[ "Record", "a", "stream", "of", "event", "records", "to", "json" ]
python
test
dnanexus/dx-toolkit
src/python/dxpy/api.py
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/api.py#L1361-L1367
def user_update(object_id, input_params={}, always_retry=False, **kwargs): """ Invokes the /user-xxxx/update API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Users#API-method%3A-%2Fuser-xxxx%2Fupdate """ return DXHTTPRequest('/%s/update' % object_id, input_params, always_retry=always_retry, **kwargs)
[ "def", "user_update", "(", "object_id", ",", "input_params", "=", "{", "}", ",", "always_retry", "=", "False", ",", "*", "*", "kwargs", ")", ":", "return", "DXHTTPRequest", "(", "'/%s/update'", "%", "object_id", ",", "input_params", ",", "always_retry", "=", "always_retry", ",", "*", "*", "kwargs", ")" ]
Invokes the /user-xxxx/update API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Users#API-method%3A-%2Fuser-xxxx%2Fupdate
[ "Invokes", "the", "/", "user", "-", "xxxx", "/", "update", "API", "method", "." ]
python
train
garenchan/policy
setup.py
https://github.com/garenchan/policy/blob/7709ae5f371146f8c90380d0877a5e59d731f644/setup.py#L61-L68
def get_install_requires(): """return package's install requires""" base = os.path.abspath(os.path.dirname(__file__)) requirements_file = os.path.join(base, 'requirements.txt') if not os.path.exists(requirements_file): return [] with open(requirements_file, mode='rt', encoding='utf-8') as f: return f.read().splitlines()
[ "def", "get_install_requires", "(", ")", ":", "base", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ")", "requirements_file", "=", "os", ".", "path", ".", "join", "(", "base", ",", "'requirements.txt'", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "requirements_file", ")", ":", "return", "[", "]", "with", "open", "(", "requirements_file", ",", "mode", "=", "'rt'", ",", "encoding", "=", "'utf-8'", ")", "as", "f", ":", "return", "f", ".", "read", "(", ")", ".", "splitlines", "(", ")" ]
return package's install requires
[ "return", "package", "s", "install", "requires" ]
python
train
flipagram/smarterling
smarterling/__init__.py
https://github.com/flipagram/smarterling/blob/2ea0957edad0657ba4c54280796869ffc1031b11/smarterling/__init__.py#L25-L33
def get(self, key, default_val=None, require_value=False): """ Returns a dictionary value """ val = dict.get(self, key, default_val) if val is None and require_value: raise KeyError('key "%s" not found' % key) if isinstance(val, dict): return AttributeDict(val) return val
[ "def", "get", "(", "self", ",", "key", ",", "default_val", "=", "None", ",", "require_value", "=", "False", ")", ":", "val", "=", "dict", ".", "get", "(", "self", ",", "key", ",", "default_val", ")", "if", "val", "is", "None", "and", "require_value", ":", "raise", "KeyError", "(", "'key \"%s\" not found'", "%", "key", ")", "if", "isinstance", "(", "val", ",", "dict", ")", ":", "return", "AttributeDict", "(", "val", ")", "return", "val" ]
Returns a dictionary value
[ "Returns", "a", "dictionary", "value" ]
python
train
twilio/twilio-python
twilio/rest/serverless/v1/service/asset/asset_version.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/serverless/v1/service/asset/asset_version.py#L209-L223
def get_instance(self, payload): """ Build an instance of AssetVersionInstance :param dict payload: Payload response from the API :returns: twilio.rest.serverless.v1.service.asset.asset_version.AssetVersionInstance :rtype: twilio.rest.serverless.v1.service.asset.asset_version.AssetVersionInstance """ return AssetVersionInstance( self._version, payload, service_sid=self._solution['service_sid'], asset_sid=self._solution['asset_sid'], )
[ "def", "get_instance", "(", "self", ",", "payload", ")", ":", "return", "AssetVersionInstance", "(", "self", ".", "_version", ",", "payload", ",", "service_sid", "=", "self", ".", "_solution", "[", "'service_sid'", "]", ",", "asset_sid", "=", "self", ".", "_solution", "[", "'asset_sid'", "]", ",", ")" ]
Build an instance of AssetVersionInstance :param dict payload: Payload response from the API :returns: twilio.rest.serverless.v1.service.asset.asset_version.AssetVersionInstance :rtype: twilio.rest.serverless.v1.service.asset.asset_version.AssetVersionInstance
[ "Build", "an", "instance", "of", "AssetVersionInstance" ]
python
train
fr33jc/bang
bang/providers/hpcloud/__init__.py
https://github.com/fr33jc/bang/blob/8f000713f88d2a9a8c1193b63ca10a6578560c16/bang/providers/hpcloud/__init__.py#L29-L104
def create_db(self, instance_name, instance_type, admin_username, admin_password, security_groups=None, db_name=None, storage_size_gb=DEFAULT_STORAGE_SIZE_GB, timeout_s=DEFAULT_TIMEOUT_S): """ Creates a database instance. This method blocks until the db instance is active, or until :attr:`timeout_s` has elapsed. By default, hpcloud *assigns* an automatically-generated set of credentials for an admin user. In addition to launching the db instance, this method uses the autogenerated credentials to login to the server and create the intended admin user based on the credentials supplied as method arguments. :param str instance_name: A name to assign to the db instance. :param str instance_type: The server instance type (e.g. ``medium``). :param str admin_username: The admin username. :param str admin_password: The admin password. :param security_groups: *Not used in hpcloud*. :param str db_name: The database name. If this is not specified, the database will be named the same as the :attr:`instance_name`. :param int storage_size_gb: The size of the storage volume in GB. :param float timeout_s: The number of seconds to poll for an active database server before failing. This value is also used when attempting to connect to the running mysql server. :rtype: :class:`dict` """ db = self._create_db(instance_name, instance_type, storage_size_gb) # hang on to these... hpcloud only provides a way to generate a new # set of username/password - there is no way to retrieve the originals. default_creds = db.credential log.debug('Credentials for %s: %s' % (instance_name, default_creds)) instance = self._poll_instance_status(db, timeout_s) # we're taking advantage of a security bug in hpcloud's dbaas security # group rules. the default *security* is to allow connections from # everywhere in the world. def connect(): try: return pymysql.connect( host=instance.hostname, port=instance.port, # db=self.database, user=default_creds['username'], passwd=default_creds['password'], connect_timeout=timeout_s, ) except: log.warn("Could not connect to db, %s" % instance_name) # log.debug("Connection exception", exc_info=True) log.info("Connecting to %s..." % instance_name) db = poll_with_timeout(timeout_s, connect, 10) cur = db.cursor() cur.execute( "grant all privileges on *.* " "to '%s'@'%%' identified by '%s' " "with grant option" % (admin_username, admin_password) ) cur.execute("flush privileges") return db_to_dict(instance)
[ "def", "create_db", "(", "self", ",", "instance_name", ",", "instance_type", ",", "admin_username", ",", "admin_password", ",", "security_groups", "=", "None", ",", "db_name", "=", "None", ",", "storage_size_gb", "=", "DEFAULT_STORAGE_SIZE_GB", ",", "timeout_s", "=", "DEFAULT_TIMEOUT_S", ")", ":", "db", "=", "self", ".", "_create_db", "(", "instance_name", ",", "instance_type", ",", "storage_size_gb", ")", "# hang on to these... hpcloud only provides a way to generate a new", "# set of username/password - there is no way to retrieve the originals.", "default_creds", "=", "db", ".", "credential", "log", ".", "debug", "(", "'Credentials for %s: %s'", "%", "(", "instance_name", ",", "default_creds", ")", ")", "instance", "=", "self", ".", "_poll_instance_status", "(", "db", ",", "timeout_s", ")", "# we're taking advantage of a security bug in hpcloud's dbaas security", "# group rules. the default *security* is to allow connections from", "# everywhere in the world.", "def", "connect", "(", ")", ":", "try", ":", "return", "pymysql", ".", "connect", "(", "host", "=", "instance", ".", "hostname", ",", "port", "=", "instance", ".", "port", ",", "# db=self.database,", "user", "=", "default_creds", "[", "'username'", "]", ",", "passwd", "=", "default_creds", "[", "'password'", "]", ",", "connect_timeout", "=", "timeout_s", ",", ")", "except", ":", "log", ".", "warn", "(", "\"Could not connect to db, %s\"", "%", "instance_name", ")", "# log.debug(\"Connection exception\", exc_info=True)", "log", ".", "info", "(", "\"Connecting to %s...\"", "%", "instance_name", ")", "db", "=", "poll_with_timeout", "(", "timeout_s", ",", "connect", ",", "10", ")", "cur", "=", "db", ".", "cursor", "(", ")", "cur", ".", "execute", "(", "\"grant all privileges on *.* \"", "\"to '%s'@'%%' identified by '%s' \"", "\"with grant option\"", "%", "(", "admin_username", ",", "admin_password", ")", ")", "cur", ".", "execute", "(", "\"flush privileges\"", ")", "return", "db_to_dict", "(", "instance", ")" ]
Creates a database instance. This method blocks until the db instance is active, or until :attr:`timeout_s` has elapsed. By default, hpcloud *assigns* an automatically-generated set of credentials for an admin user. In addition to launching the db instance, this method uses the autogenerated credentials to login to the server and create the intended admin user based on the credentials supplied as method arguments. :param str instance_name: A name to assign to the db instance. :param str instance_type: The server instance type (e.g. ``medium``). :param str admin_username: The admin username. :param str admin_password: The admin password. :param security_groups: *Not used in hpcloud*. :param str db_name: The database name. If this is not specified, the database will be named the same as the :attr:`instance_name`. :param int storage_size_gb: The size of the storage volume in GB. :param float timeout_s: The number of seconds to poll for an active database server before failing. This value is also used when attempting to connect to the running mysql server. :rtype: :class:`dict`
[ "Creates", "a", "database", "instance", "." ]
python
train
BerkeleyAutomation/perception
perception/orthographic_intrinsics.py
https://github.com/BerkeleyAutomation/perception/blob/03d9b37dd6b66896cdfe173905c9413c8c3c5df6/perception/orthographic_intrinsics.py#L144-L191
def project_to_image(self, point_cloud, round_px=True): """Projects a point cloud onto the camera image plane and creates a depth image. Zero depth means no point projected into the camera at that pixel location (i.e. infinite depth). Parameters ---------- point_cloud : :obj:`autolab_core.PointCloud` or :obj:`autolab_core.Point` A PointCloud or Point to project onto the camera image plane. round_px : bool If True, projections are rounded to the nearest pixel. Returns ------- :obj:`DepthImage` A DepthImage generated from projecting the point cloud into the camera. Raises ------ ValueError If the input is not a PointCloud or Point in the same reference frame as the camera. """ if not isinstance(point_cloud, PointCloud) and not (isinstance(point_cloud, Point) and point_cloud.dim == 3): raise ValueError('Must provide PointCloud or 3D Point object for projection') if point_cloud.frame != self._frame: raise ValueError('Cannot project points in frame %s into camera with frame %s' %(point_cloud.frame, self._frame)) points_proj = self.S.dot(point_cloud.data) + self.t if len(points_proj.shape) == 1: points_proj = points_proj[:, np.newaxis] point_depths = points_proj[2,:] point_z = np.tile(point_depths, [3, 1]) points_proj = np.divide(points_proj, point_z) if round_px: points_proj = np.round(points_proj) points_proj = points_proj[:2,:].astype(np.int16) valid_ind = np.where((points_proj[0,:] >= 0) & \ (points_proj[1,:] >= 0) & \ (points_proj[0,:] < self.width) & \ (points_proj[1,:] < self.height))[0] depth_data = np.zeros([self.height, self.width]) depth_data[points_proj[1,valid_ind], points_proj[0,valid_ind]] = point_depths[valid_ind] return DepthImage(depth_data, frame=self.frame)
[ "def", "project_to_image", "(", "self", ",", "point_cloud", ",", "round_px", "=", "True", ")", ":", "if", "not", "isinstance", "(", "point_cloud", ",", "PointCloud", ")", "and", "not", "(", "isinstance", "(", "point_cloud", ",", "Point", ")", "and", "point_cloud", ".", "dim", "==", "3", ")", ":", "raise", "ValueError", "(", "'Must provide PointCloud or 3D Point object for projection'", ")", "if", "point_cloud", ".", "frame", "!=", "self", ".", "_frame", ":", "raise", "ValueError", "(", "'Cannot project points in frame %s into camera with frame %s'", "%", "(", "point_cloud", ".", "frame", ",", "self", ".", "_frame", ")", ")", "points_proj", "=", "self", ".", "S", ".", "dot", "(", "point_cloud", ".", "data", ")", "+", "self", ".", "t", "if", "len", "(", "points_proj", ".", "shape", ")", "==", "1", ":", "points_proj", "=", "points_proj", "[", ":", ",", "np", ".", "newaxis", "]", "point_depths", "=", "points_proj", "[", "2", ",", ":", "]", "point_z", "=", "np", ".", "tile", "(", "point_depths", ",", "[", "3", ",", "1", "]", ")", "points_proj", "=", "np", ".", "divide", "(", "points_proj", ",", "point_z", ")", "if", "round_px", ":", "points_proj", "=", "np", ".", "round", "(", "points_proj", ")", "points_proj", "=", "points_proj", "[", ":", "2", ",", ":", "]", ".", "astype", "(", "np", ".", "int16", ")", "valid_ind", "=", "np", ".", "where", "(", "(", "points_proj", "[", "0", ",", ":", "]", ">=", "0", ")", "&", "(", "points_proj", "[", "1", ",", ":", "]", ">=", "0", ")", "&", "(", "points_proj", "[", "0", ",", ":", "]", "<", "self", ".", "width", ")", "&", "(", "points_proj", "[", "1", ",", ":", "]", "<", "self", ".", "height", ")", ")", "[", "0", "]", "depth_data", "=", "np", ".", "zeros", "(", "[", "self", ".", "height", ",", "self", ".", "width", "]", ")", "depth_data", "[", "points_proj", "[", "1", ",", "valid_ind", "]", ",", "points_proj", "[", "0", ",", "valid_ind", "]", "]", "=", "point_depths", "[", "valid_ind", "]", "return", "DepthImage", "(", "depth_data", ",", "frame", "=", "self", ".", "frame", ")" ]
Projects a point cloud onto the camera image plane and creates a depth image. Zero depth means no point projected into the camera at that pixel location (i.e. infinite depth). Parameters ---------- point_cloud : :obj:`autolab_core.PointCloud` or :obj:`autolab_core.Point` A PointCloud or Point to project onto the camera image plane. round_px : bool If True, projections are rounded to the nearest pixel. Returns ------- :obj:`DepthImage` A DepthImage generated from projecting the point cloud into the camera. Raises ------ ValueError If the input is not a PointCloud or Point in the same reference frame as the camera.
[ "Projects", "a", "point", "cloud", "onto", "the", "camera", "image", "plane", "and", "creates", "a", "depth", "image", ".", "Zero", "depth", "means", "no", "point", "projected", "into", "the", "camera", "at", "that", "pixel", "location", "(", "i", ".", "e", ".", "infinite", "depth", ")", "." ]
python
train
IBMStreams/pypi.streamsx
streamsx/rest_primitives.py
https://github.com/IBMStreams/pypi.streamsx/blob/abd67b4757120f6f805787fba390f53e9df9cdd8/streamsx/rest_primitives.py#L2465-L2488
def update(self, properties=None, description=None): """Update this application configuration. To create or update a property provide its key-value pair in `properties`. To delete a property provide its key with the value ``None`` in properties. Args: properties (dict): Property values to be updated. If ``None`` the properties are unchanged. description (str): Description for the configuration. If ``None`` the description is unchanged. Returns: ApplicationConfiguration: self """ cv = ApplicationConfiguration._props(properties=properties, description=description) res = self.rest_client.session.patch(self.rest_self, headers = {'Accept' : 'application/json', 'Content-Type' : 'application/json'}, json=cv) _handle_http_errors(res) self.json_rep = res.json() return self
[ "def", "update", "(", "self", ",", "properties", "=", "None", ",", "description", "=", "None", ")", ":", "cv", "=", "ApplicationConfiguration", ".", "_props", "(", "properties", "=", "properties", ",", "description", "=", "description", ")", "res", "=", "self", ".", "rest_client", ".", "session", ".", "patch", "(", "self", ".", "rest_self", ",", "headers", "=", "{", "'Accept'", ":", "'application/json'", ",", "'Content-Type'", ":", "'application/json'", "}", ",", "json", "=", "cv", ")", "_handle_http_errors", "(", "res", ")", "self", ".", "json_rep", "=", "res", ".", "json", "(", ")", "return", "self" ]
Update this application configuration. To create or update a property provide its key-value pair in `properties`. To delete a property provide its key with the value ``None`` in properties. Args: properties (dict): Property values to be updated. If ``None`` the properties are unchanged. description (str): Description for the configuration. If ``None`` the description is unchanged. Returns: ApplicationConfiguration: self
[ "Update", "this", "application", "configuration", "." ]
python
train
ReadabilityHoldings/python-readability-api
readability/clients.py
https://github.com/ReadabilityHoldings/python-readability-api/blob/4b746166877d5a8dc29222aedccb18c2506a5385/readability/clients.py#L128-L152
def get_bookmarks(self, **filters): """ Get Bookmarks for the current user. Filters: :param archive: Filter Bookmarks returned by archived status. :param favorite: Filter Bookmarks returned by favorite status. :param domain: Filter Bookmarks returned by a domain. :param added_since: Filter bookmarks by date added (since this date). :param added_until: Filter bookmarks by date added (until this date). :param opened_since: Filter bookmarks by date opened (since this date). :param opened_until: Filter bookmarks by date opened (until this date). :param archived_since: Filter bookmarks by date archived (since this date.) :param archived_until: Filter bookmarks by date archived (until this date.) :param updated_since: Filter bookmarks by date updated (since this date.) :param updated_until: Filter bookmarks by date updated (until this date.) :param page: What page of results to return. Default is 1. :param per_page: How many results to return per page. Default is 20, max is 50. :param only_deleted: Return only bookmarks that this user has deleted. :param tags: Comma separated string of tags to filter bookmarks. """ filter_dict = filter_args_to_dict(filters, ACCEPTED_BOOKMARK_FILTERS) url = self._generate_url('bookmarks', query_params=filter_dict) return self.get(url)
[ "def", "get_bookmarks", "(", "self", ",", "*", "*", "filters", ")", ":", "filter_dict", "=", "filter_args_to_dict", "(", "filters", ",", "ACCEPTED_BOOKMARK_FILTERS", ")", "url", "=", "self", ".", "_generate_url", "(", "'bookmarks'", ",", "query_params", "=", "filter_dict", ")", "return", "self", ".", "get", "(", "url", ")" ]
Get Bookmarks for the current user. Filters: :param archive: Filter Bookmarks returned by archived status. :param favorite: Filter Bookmarks returned by favorite status. :param domain: Filter Bookmarks returned by a domain. :param added_since: Filter bookmarks by date added (since this date). :param added_until: Filter bookmarks by date added (until this date). :param opened_since: Filter bookmarks by date opened (since this date). :param opened_until: Filter bookmarks by date opened (until this date). :param archived_since: Filter bookmarks by date archived (since this date.) :param archived_until: Filter bookmarks by date archived (until this date.) :param updated_since: Filter bookmarks by date updated (since this date.) :param updated_until: Filter bookmarks by date updated (until this date.) :param page: What page of results to return. Default is 1. :param per_page: How many results to return per page. Default is 20, max is 50. :param only_deleted: Return only bookmarks that this user has deleted. :param tags: Comma separated string of tags to filter bookmarks.
[ "Get", "Bookmarks", "for", "the", "current", "user", "." ]
python
train
ContextLab/hypertools
hypertools/tools/format_data.py
https://github.com/ContextLab/hypertools/blob/b76c7ac8061998b560e969ff8e4f4c915088e7a0/hypertools/tools/format_data.py#L10-L164
def format_data(x, vectorizer='CountVectorizer', semantic='LatentDirichletAllocation', corpus='wiki', ppca=True, text_align='hyper'): """ Formats data into a list of numpy arrays This function is useful to identify rows of your array that contain missing data or nans. The returned indices can be used to remove the rows with missing data, or label the missing data points that are interpolated using PPCA. Parameters ---------- x : numpy array, dataframe, string or (mixed) list The data to convert vectorizer : str, dict, class or class instance The vectorizer to use. Built-in options are 'CountVectorizer' or 'TfidfVectorizer'. To change default parameters, set to a dictionary e.g. {'model' : 'CountVectorizer', 'params' : {'max_features' : 10}}. See http://scikit-learn.org/stable/modules/classes.html#module-sklearn.feature_extraction.text for details. You can also specify your own vectorizer model as a class, or class instance. With either option, the class must have a fit_transform method (see here: http://scikit-learn.org/stable/data_transforms.html). If a class, pass any parameters as a dictionary to vectorizer_params. If a class instance, no parameters can be passed. semantic : str, dict, class or class instance Text model to use to transform text data. Built-in options are 'LatentDirichletAllocation' or 'NMF' (default: LDA). To change default parameters, set to a dictionary e.g. {'model' : 'NMF', 'params' : {'n_components' : 10}}. See http://scikit-learn.org/stable/modules/classes.html#module-sklearn.decomposition for details on the two model options. You can also specify your own text model as a class, or class instance. With either option, the class must have a fit_transform method (see here: http://scikit-learn.org/stable/data_transforms.html). If a class, pass any parameters as a dictionary to text_params. If a class instance, no parameters can be passed. corpus : list (or list of lists) of text samples or 'wiki', 'nips', 'sotus'. Text to use to fit the semantic model (optional). If set to 'wiki', 'nips' or 'sotus' and the default semantic and vectorizer models are used, a pretrained model will be loaded which can save a lot of time. ppca : bool Performs PPCA to fill in missing values (default: True) text_align : str Alignment algorithm to use when both text and numerical data are passed. If numerical arrays have the same shape, and the text data contains the same number of samples, the text and numerical data are automatically aligned to a common space. Example use case: an array of movie frames (frames by pixels) and text descriptions of the frame. In this case, the movie and text will be automatically aligned to the same space (default: hyperalignment). Returns ---------- data : list of numpy arrays A list of formatted arrays """ # not sure why i needed to import here, but its the only way I could get it to work from .df2mat import df2mat from .text2mat import text2mat from ..datageometry import DataGeometry # if x is not a list, make it one if type(x) is not list: x = [x] if all([isinstance(xi, six.string_types) for xi in x]): x = [x] # check data type for each element in list dtypes = list(map(get_type, x)) # handle text data: if any(map(lambda x: x in ['list_str', 'str', 'arr_str'], dtypes)): # default text args text_args = { 'vectorizer' : vectorizer, 'semantic' : semantic, 'corpus' : corpus } # filter text data text_data = [] for i,j in zip(x, dtypes): if j in ['list_str', 'str', 'arr_str']: text_data.append(np.array(i).reshape(-1, 1)) # convert text to numerical matrices text_data = text2mat(text_data, **text_args) # replace the text data with transformed data processed_x = [] textidx=0 for i, dtype in enumerate(dtypes): if dtype in ['list_str', 'str', 'arr_str']: processed_x.append(text_data[textidx]) textidx+=1 elif dtype == 'df': processed_x.append(df2mat(x[i])) elif dtype == 'geo': text_args = { 'vectorizer' : vectorizer, 'semantic' : semantic, 'corpus' : corpus } for j in format_data(x[i].get_data(), **text_args): processed_x.append(j) else: processed_x.append(x[i]) # reshape anything that is 1d if any([i.ndim<=1 for i in processed_x]): processed_x = [np.reshape(i,(i.shape[0],1)) if i.ndim==1 else i for i in processed_x] contains_text = any([dtype in ['list_str', 'str', 'arr_str'] for dtype in dtypes]) contains_num = any([dtype in ['list_num', 'array', 'df', 'arr_num'] for dtype in dtypes]) # if there are any nans in any of the lists, use ppca if ppca is True: if contains_num: num_data = [] for i,j in zip(processed_x, dtypes): if j in ['list_num', 'array', 'df', 'arr_num']: num_data.append(i) if np.isnan(np.vstack(num_data)).any(): warnings.warn('Missing data: Inexact solution computed with PPCA (see https://github.com/allentran/pca-magic for details)') num_data = fill_missing(num_data) x_temp = [] for dtype in dtypes: if dtype in ['list_str', 'str', 'arr_str']: x_temp.append(text_data.pop(0)) elif dtype in ['list_num', 'array', 'df', 'arr_num']: x_temp.append(num_data.pop(0)) processed_x = x_temp # if input data contains both text and numerical data if contains_num and contains_text: # and if they have the same number of samples if np.unique(np.array([i.shape[0] for i, j in zip(processed_x, dtypes)])).shape[0] == 1: from .align import align as aligner # align the data warnings.warn('Numerical and text data with same number of ' 'samples detected. Aligning data to a common space.') processed_x = aligner(processed_x, align=text_align, format_data=False) return processed_x
[ "def", "format_data", "(", "x", ",", "vectorizer", "=", "'CountVectorizer'", ",", "semantic", "=", "'LatentDirichletAllocation'", ",", "corpus", "=", "'wiki'", ",", "ppca", "=", "True", ",", "text_align", "=", "'hyper'", ")", ":", "# not sure why i needed to import here, but its the only way I could get it to work", "from", ".", "df2mat", "import", "df2mat", "from", ".", "text2mat", "import", "text2mat", "from", ".", ".", "datageometry", "import", "DataGeometry", "# if x is not a list, make it one", "if", "type", "(", "x", ")", "is", "not", "list", ":", "x", "=", "[", "x", "]", "if", "all", "(", "[", "isinstance", "(", "xi", ",", "six", ".", "string_types", ")", "for", "xi", "in", "x", "]", ")", ":", "x", "=", "[", "x", "]", "# check data type for each element in list", "dtypes", "=", "list", "(", "map", "(", "get_type", ",", "x", ")", ")", "# handle text data:", "if", "any", "(", "map", "(", "lambda", "x", ":", "x", "in", "[", "'list_str'", ",", "'str'", ",", "'arr_str'", "]", ",", "dtypes", ")", ")", ":", "# default text args", "text_args", "=", "{", "'vectorizer'", ":", "vectorizer", ",", "'semantic'", ":", "semantic", ",", "'corpus'", ":", "corpus", "}", "# filter text data", "text_data", "=", "[", "]", "for", "i", ",", "j", "in", "zip", "(", "x", ",", "dtypes", ")", ":", "if", "j", "in", "[", "'list_str'", ",", "'str'", ",", "'arr_str'", "]", ":", "text_data", ".", "append", "(", "np", ".", "array", "(", "i", ")", ".", "reshape", "(", "-", "1", ",", "1", ")", ")", "# convert text to numerical matrices", "text_data", "=", "text2mat", "(", "text_data", ",", "*", "*", "text_args", ")", "# replace the text data with transformed data", "processed_x", "=", "[", "]", "textidx", "=", "0", "for", "i", ",", "dtype", "in", "enumerate", "(", "dtypes", ")", ":", "if", "dtype", "in", "[", "'list_str'", ",", "'str'", ",", "'arr_str'", "]", ":", "processed_x", ".", "append", "(", "text_data", "[", "textidx", "]", ")", "textidx", "+=", "1", "elif", "dtype", "==", "'df'", ":", "processed_x", ".", "append", "(", "df2mat", "(", "x", "[", "i", "]", ")", ")", "elif", "dtype", "==", "'geo'", ":", "text_args", "=", "{", "'vectorizer'", ":", "vectorizer", ",", "'semantic'", ":", "semantic", ",", "'corpus'", ":", "corpus", "}", "for", "j", "in", "format_data", "(", "x", "[", "i", "]", ".", "get_data", "(", ")", ",", "*", "*", "text_args", ")", ":", "processed_x", ".", "append", "(", "j", ")", "else", ":", "processed_x", ".", "append", "(", "x", "[", "i", "]", ")", "# reshape anything that is 1d", "if", "any", "(", "[", "i", ".", "ndim", "<=", "1", "for", "i", "in", "processed_x", "]", ")", ":", "processed_x", "=", "[", "np", ".", "reshape", "(", "i", ",", "(", "i", ".", "shape", "[", "0", "]", ",", "1", ")", ")", "if", "i", ".", "ndim", "==", "1", "else", "i", "for", "i", "in", "processed_x", "]", "contains_text", "=", "any", "(", "[", "dtype", "in", "[", "'list_str'", ",", "'str'", ",", "'arr_str'", "]", "for", "dtype", "in", "dtypes", "]", ")", "contains_num", "=", "any", "(", "[", "dtype", "in", "[", "'list_num'", ",", "'array'", ",", "'df'", ",", "'arr_num'", "]", "for", "dtype", "in", "dtypes", "]", ")", "# if there are any nans in any of the lists, use ppca", "if", "ppca", "is", "True", ":", "if", "contains_num", ":", "num_data", "=", "[", "]", "for", "i", ",", "j", "in", "zip", "(", "processed_x", ",", "dtypes", ")", ":", "if", "j", "in", "[", "'list_num'", ",", "'array'", ",", "'df'", ",", "'arr_num'", "]", ":", "num_data", ".", "append", "(", "i", ")", "if", "np", ".", "isnan", "(", "np", ".", "vstack", "(", "num_data", ")", ")", ".", "any", "(", ")", ":", "warnings", ".", "warn", "(", "'Missing data: Inexact solution computed with PPCA (see https://github.com/allentran/pca-magic for details)'", ")", "num_data", "=", "fill_missing", "(", "num_data", ")", "x_temp", "=", "[", "]", "for", "dtype", "in", "dtypes", ":", "if", "dtype", "in", "[", "'list_str'", ",", "'str'", ",", "'arr_str'", "]", ":", "x_temp", ".", "append", "(", "text_data", ".", "pop", "(", "0", ")", ")", "elif", "dtype", "in", "[", "'list_num'", ",", "'array'", ",", "'df'", ",", "'arr_num'", "]", ":", "x_temp", ".", "append", "(", "num_data", ".", "pop", "(", "0", ")", ")", "processed_x", "=", "x_temp", "# if input data contains both text and numerical data", "if", "contains_num", "and", "contains_text", ":", "# and if they have the same number of samples", "if", "np", ".", "unique", "(", "np", ".", "array", "(", "[", "i", ".", "shape", "[", "0", "]", "for", "i", ",", "j", "in", "zip", "(", "processed_x", ",", "dtypes", ")", "]", ")", ")", ".", "shape", "[", "0", "]", "==", "1", ":", "from", ".", "align", "import", "align", "as", "aligner", "# align the data", "warnings", ".", "warn", "(", "'Numerical and text data with same number of '", "'samples detected. Aligning data to a common space.'", ")", "processed_x", "=", "aligner", "(", "processed_x", ",", "align", "=", "text_align", ",", "format_data", "=", "False", ")", "return", "processed_x" ]
Formats data into a list of numpy arrays This function is useful to identify rows of your array that contain missing data or nans. The returned indices can be used to remove the rows with missing data, or label the missing data points that are interpolated using PPCA. Parameters ---------- x : numpy array, dataframe, string or (mixed) list The data to convert vectorizer : str, dict, class or class instance The vectorizer to use. Built-in options are 'CountVectorizer' or 'TfidfVectorizer'. To change default parameters, set to a dictionary e.g. {'model' : 'CountVectorizer', 'params' : {'max_features' : 10}}. See http://scikit-learn.org/stable/modules/classes.html#module-sklearn.feature_extraction.text for details. You can also specify your own vectorizer model as a class, or class instance. With either option, the class must have a fit_transform method (see here: http://scikit-learn.org/stable/data_transforms.html). If a class, pass any parameters as a dictionary to vectorizer_params. If a class instance, no parameters can be passed. semantic : str, dict, class or class instance Text model to use to transform text data. Built-in options are 'LatentDirichletAllocation' or 'NMF' (default: LDA). To change default parameters, set to a dictionary e.g. {'model' : 'NMF', 'params' : {'n_components' : 10}}. See http://scikit-learn.org/stable/modules/classes.html#module-sklearn.decomposition for details on the two model options. You can also specify your own text model as a class, or class instance. With either option, the class must have a fit_transform method (see here: http://scikit-learn.org/stable/data_transforms.html). If a class, pass any parameters as a dictionary to text_params. If a class instance, no parameters can be passed. corpus : list (or list of lists) of text samples or 'wiki', 'nips', 'sotus'. Text to use to fit the semantic model (optional). If set to 'wiki', 'nips' or 'sotus' and the default semantic and vectorizer models are used, a pretrained model will be loaded which can save a lot of time. ppca : bool Performs PPCA to fill in missing values (default: True) text_align : str Alignment algorithm to use when both text and numerical data are passed. If numerical arrays have the same shape, and the text data contains the same number of samples, the text and numerical data are automatically aligned to a common space. Example use case: an array of movie frames (frames by pixels) and text descriptions of the frame. In this case, the movie and text will be automatically aligned to the same space (default: hyperalignment). Returns ---------- data : list of numpy arrays A list of formatted arrays
[ "Formats", "data", "into", "a", "list", "of", "numpy", "arrays" ]
python
train
thiagopbueno/tf-rddlsim
tfrddlsim/policy/random_policy.py
https://github.com/thiagopbueno/tf-rddlsim/blob/d7102a0ad37d179dbb23141640254ea383d3b43f/tfrddlsim/policy/random_policy.py#L70-L86
def _sample_actions(self, state: Sequence[tf.Tensor]) -> Tuple[Sequence[tf.Tensor], tf.Tensor, tf.Tensor]: '''Returns sampled action fluents and tensors related to the sampling. Args: state (Sequence[tf.Tensor]): A list of state fluents. Returns: Tuple[Sequence[tf.Tensor], tf.Tensor, tf.Tensor]: A tuple with action fluents, an integer tensor for the number of samples, and a boolean tensor for checking all action preconditions. ''' default = self.compiler.compile_default_action(self.batch_size) bound_constraints = self.compiler.compile_action_bound_constraints(state) action = self._sample_action(bound_constraints, default) n, action, checking = self._check_preconditions(state, action, bound_constraints, default) return action, n, checking
[ "def", "_sample_actions", "(", "self", ",", "state", ":", "Sequence", "[", "tf", ".", "Tensor", "]", ")", "->", "Tuple", "[", "Sequence", "[", "tf", ".", "Tensor", "]", ",", "tf", ".", "Tensor", ",", "tf", ".", "Tensor", "]", ":", "default", "=", "self", ".", "compiler", ".", "compile_default_action", "(", "self", ".", "batch_size", ")", "bound_constraints", "=", "self", ".", "compiler", ".", "compile_action_bound_constraints", "(", "state", ")", "action", "=", "self", ".", "_sample_action", "(", "bound_constraints", ",", "default", ")", "n", ",", "action", ",", "checking", "=", "self", ".", "_check_preconditions", "(", "state", ",", "action", ",", "bound_constraints", ",", "default", ")", "return", "action", ",", "n", ",", "checking" ]
Returns sampled action fluents and tensors related to the sampling. Args: state (Sequence[tf.Tensor]): A list of state fluents. Returns: Tuple[Sequence[tf.Tensor], tf.Tensor, tf.Tensor]: A tuple with action fluents, an integer tensor for the number of samples, and a boolean tensor for checking all action preconditions.
[ "Returns", "sampled", "action", "fluents", "and", "tensors", "related", "to", "the", "sampling", "." ]
python
train
wolfhong/formic
formic/formic.py
https://github.com/wolfhong/formic/blob/0d81eb88dcbb6fa705194fc6ccf2993f4abbaa76/formic/formic.py#L835-L857
def match(self, files): """Given a set of files in this directory, returns all the files that match the :class:`Pattern` instances which match this directory.""" if not files: return set() if (self.matched_inherit.all_files() or self.matched_and_subdir.all_files() or self.matched_no_subdir.all_files()): # Optimization: one of the matched patterns matches everything # So simply return it return set(files) unmatched = set(files) matched = set() for pattern_set in self._matching_pattern_sets(): pattern_set.match_files(matched, unmatched) if not unmatched: # Optimization: If we have matched all files already # simply return at this point - nothing else to do break return matched
[ "def", "match", "(", "self", ",", "files", ")", ":", "if", "not", "files", ":", "return", "set", "(", ")", "if", "(", "self", ".", "matched_inherit", ".", "all_files", "(", ")", "or", "self", ".", "matched_and_subdir", ".", "all_files", "(", ")", "or", "self", ".", "matched_no_subdir", ".", "all_files", "(", ")", ")", ":", "# Optimization: one of the matched patterns matches everything", "# So simply return it", "return", "set", "(", "files", ")", "unmatched", "=", "set", "(", "files", ")", "matched", "=", "set", "(", ")", "for", "pattern_set", "in", "self", ".", "_matching_pattern_sets", "(", ")", ":", "pattern_set", ".", "match_files", "(", "matched", ",", "unmatched", ")", "if", "not", "unmatched", ":", "# Optimization: If we have matched all files already", "# simply return at this point - nothing else to do", "break", "return", "matched" ]
Given a set of files in this directory, returns all the files that match the :class:`Pattern` instances which match this directory.
[ "Given", "a", "set", "of", "files", "in", "this", "directory", "returns", "all", "the", "files", "that", "match", "the", ":", "class", ":", "Pattern", "instances", "which", "match", "this", "directory", "." ]
python
train
20c/vaping
vaping/config.py
https://github.com/20c/vaping/blob/c51f00586c99edb3d51e4abdbdfe3174755533ee/vaping/config.py#L8-L33
def parse_interval(val): """ converts a string to float of seconds .5 = 500ms 90 = 1m30s """ re_intv = re.compile(r"([\d\.]+)([a-zA-Z]+)") val = val.strip() total = 0.0 for match in re_intv.findall(val): unit = match[1] count = float(match[0]) if unit == 's': total += count elif unit == 'm': total += count * 60 elif unit == 'ms': total += count / 1000 elif unit == "h": total += count * 3600 elif unit == 'd': total += count * 86400 else: raise ValueError("unknown unit from interval string '%s'" % val) return total
[ "def", "parse_interval", "(", "val", ")", ":", "re_intv", "=", "re", ".", "compile", "(", "r\"([\\d\\.]+)([a-zA-Z]+)\"", ")", "val", "=", "val", ".", "strip", "(", ")", "total", "=", "0.0", "for", "match", "in", "re_intv", ".", "findall", "(", "val", ")", ":", "unit", "=", "match", "[", "1", "]", "count", "=", "float", "(", "match", "[", "0", "]", ")", "if", "unit", "==", "'s'", ":", "total", "+=", "count", "elif", "unit", "==", "'m'", ":", "total", "+=", "count", "*", "60", "elif", "unit", "==", "'ms'", ":", "total", "+=", "count", "/", "1000", "elif", "unit", "==", "\"h\"", ":", "total", "+=", "count", "*", "3600", "elif", "unit", "==", "'d'", ":", "total", "+=", "count", "*", "86400", "else", ":", "raise", "ValueError", "(", "\"unknown unit from interval string '%s'\"", "%", "val", ")", "return", "total" ]
converts a string to float of seconds .5 = 500ms 90 = 1m30s
[ "converts", "a", "string", "to", "float", "of", "seconds", ".", "5", "=", "500ms", "90", "=", "1m30s" ]
python
train
awesto/djangoshop-stripe
shop_stripe/payment.py
https://github.com/awesto/djangoshop-stripe/blob/010d4642f971961cfeb415520ad819b3751281cb/shop_stripe/payment.py#L28-L38
def get_payment_request(self, cart, request): """ From the given request, add a snippet to the page. """ try: self.charge(cart, request) thank_you_url = OrderModel.objects.get_latest_url() js_expression = 'window.location.href="{}";'.format(thank_you_url) return js_expression except (KeyError, stripe.error.StripeError) as err: raise ValidationError(err)
[ "def", "get_payment_request", "(", "self", ",", "cart", ",", "request", ")", ":", "try", ":", "self", ".", "charge", "(", "cart", ",", "request", ")", "thank_you_url", "=", "OrderModel", ".", "objects", ".", "get_latest_url", "(", ")", "js_expression", "=", "'window.location.href=\"{}\";'", ".", "format", "(", "thank_you_url", ")", "return", "js_expression", "except", "(", "KeyError", ",", "stripe", ".", "error", ".", "StripeError", ")", "as", "err", ":", "raise", "ValidationError", "(", "err", ")" ]
From the given request, add a snippet to the page.
[ "From", "the", "given", "request", "add", "a", "snippet", "to", "the", "page", "." ]
python
train
KnorrFG/pyparadigm
pyparadigm/surface_composition.py
https://github.com/KnorrFG/pyparadigm/blob/69944cdf3ce2f6414ae1aa1d27a0d8c6e5fb3fd3/pyparadigm/surface_composition.py#L264-L282
def from_scale(scale_w, scale_h=None): """Creates a padding by the remaining space after scaling the content. E.g. Padding.from_scale(0.5) would produce Padding(0.25, 0.25, 0.25, 0.25) and Padding.from_scale(0.5, 1) would produce Padding(0.25, 0.25, 0, 0) because the content would not be scaled (since scale_h=1) and therefore there would be no vertical padding. If scale_h is not specified scale_h=scale_w is used as default :param scale_w: horizontal scaling factors :type scale_w: float :param scale_h: vertical scaling factor :type scale_h: float """ if not scale_h: scale_h = scale_w w_padding = [(1 - scale_w) * 0.5] * 2 h_padding = [(1 - scale_h) * 0.5] * 2 return Padding(*w_padding, *h_padding)
[ "def", "from_scale", "(", "scale_w", ",", "scale_h", "=", "None", ")", ":", "if", "not", "scale_h", ":", "scale_h", "=", "scale_w", "w_padding", "=", "[", "(", "1", "-", "scale_w", ")", "*", "0.5", "]", "*", "2", "h_padding", "=", "[", "(", "1", "-", "scale_h", ")", "*", "0.5", "]", "*", "2", "return", "Padding", "(", "*", "w_padding", ",", "*", "h_padding", ")" ]
Creates a padding by the remaining space after scaling the content. E.g. Padding.from_scale(0.5) would produce Padding(0.25, 0.25, 0.25, 0.25) and Padding.from_scale(0.5, 1) would produce Padding(0.25, 0.25, 0, 0) because the content would not be scaled (since scale_h=1) and therefore there would be no vertical padding. If scale_h is not specified scale_h=scale_w is used as default :param scale_w: horizontal scaling factors :type scale_w: float :param scale_h: vertical scaling factor :type scale_h: float
[ "Creates", "a", "padding", "by", "the", "remaining", "space", "after", "scaling", "the", "content", "." ]
python
train
twilio/twilio-python
twilio/rest/api/v2010/account/connect_app.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/api/v2010/account/connect_app.py#L231-L273
def update(self, authorize_redirect_url=values.unset, company_name=values.unset, deauthorize_callback_method=values.unset, deauthorize_callback_url=values.unset, description=values.unset, friendly_name=values.unset, homepage_url=values.unset, permissions=values.unset): """ Update the ConnectAppInstance :param unicode authorize_redirect_url: The URL to redirect the user to after authorization :param unicode company_name: The company name to set for the Connect App :param unicode deauthorize_callback_method: The HTTP method to use when calling deauthorize_callback_url :param unicode deauthorize_callback_url: The URL to call to de-authorize the Connect App :param unicode description: A description of the Connect App :param unicode friendly_name: A string to describe the resource :param unicode homepage_url: A public URL where users can obtain more information :param ConnectAppInstance.Permission permissions: The set of permissions that your ConnectApp will request :returns: Updated ConnectAppInstance :rtype: twilio.rest.api.v2010.account.connect_app.ConnectAppInstance """ data = values.of({ 'AuthorizeRedirectUrl': authorize_redirect_url, 'CompanyName': company_name, 'DeauthorizeCallbackMethod': deauthorize_callback_method, 'DeauthorizeCallbackUrl': deauthorize_callback_url, 'Description': description, 'FriendlyName': friendly_name, 'HomepageUrl': homepage_url, 'Permissions': serialize.map(permissions, lambda e: e), }) payload = self._version.update( 'POST', self._uri, data=data, ) return ConnectAppInstance( self._version, payload, account_sid=self._solution['account_sid'], sid=self._solution['sid'], )
[ "def", "update", "(", "self", ",", "authorize_redirect_url", "=", "values", ".", "unset", ",", "company_name", "=", "values", ".", "unset", ",", "deauthorize_callback_method", "=", "values", ".", "unset", ",", "deauthorize_callback_url", "=", "values", ".", "unset", ",", "description", "=", "values", ".", "unset", ",", "friendly_name", "=", "values", ".", "unset", ",", "homepage_url", "=", "values", ".", "unset", ",", "permissions", "=", "values", ".", "unset", ")", ":", "data", "=", "values", ".", "of", "(", "{", "'AuthorizeRedirectUrl'", ":", "authorize_redirect_url", ",", "'CompanyName'", ":", "company_name", ",", "'DeauthorizeCallbackMethod'", ":", "deauthorize_callback_method", ",", "'DeauthorizeCallbackUrl'", ":", "deauthorize_callback_url", ",", "'Description'", ":", "description", ",", "'FriendlyName'", ":", "friendly_name", ",", "'HomepageUrl'", ":", "homepage_url", ",", "'Permissions'", ":", "serialize", ".", "map", "(", "permissions", ",", "lambda", "e", ":", "e", ")", ",", "}", ")", "payload", "=", "self", ".", "_version", ".", "update", "(", "'POST'", ",", "self", ".", "_uri", ",", "data", "=", "data", ",", ")", "return", "ConnectAppInstance", "(", "self", ".", "_version", ",", "payload", ",", "account_sid", "=", "self", ".", "_solution", "[", "'account_sid'", "]", ",", "sid", "=", "self", ".", "_solution", "[", "'sid'", "]", ",", ")" ]
Update the ConnectAppInstance :param unicode authorize_redirect_url: The URL to redirect the user to after authorization :param unicode company_name: The company name to set for the Connect App :param unicode deauthorize_callback_method: The HTTP method to use when calling deauthorize_callback_url :param unicode deauthorize_callback_url: The URL to call to de-authorize the Connect App :param unicode description: A description of the Connect App :param unicode friendly_name: A string to describe the resource :param unicode homepage_url: A public URL where users can obtain more information :param ConnectAppInstance.Permission permissions: The set of permissions that your ConnectApp will request :returns: Updated ConnectAppInstance :rtype: twilio.rest.api.v2010.account.connect_app.ConnectAppInstance
[ "Update", "the", "ConnectAppInstance" ]
python
train
mozilla/elasticutils
elasticutils/contrib/django/__init__.py
https://github.com/mozilla/elasticutils/blob/b880cc5d51fb1079b0581255ec664c1ec934656e/elasticutils/contrib/django/__init__.py#L298-L310
def get_indexable(cls): """Returns the queryset of ids of all things to be indexed. Defaults to:: cls.get_model().objects.order_by('id').values_list( 'id', flat=True) :returns: iterable of ids of objects to be indexed """ model = cls.get_model() return model.objects.order_by('id').values_list('id', flat=True)
[ "def", "get_indexable", "(", "cls", ")", ":", "model", "=", "cls", ".", "get_model", "(", ")", "return", "model", ".", "objects", ".", "order_by", "(", "'id'", ")", ".", "values_list", "(", "'id'", ",", "flat", "=", "True", ")" ]
Returns the queryset of ids of all things to be indexed. Defaults to:: cls.get_model().objects.order_by('id').values_list( 'id', flat=True) :returns: iterable of ids of objects to be indexed
[ "Returns", "the", "queryset", "of", "ids", "of", "all", "things", "to", "be", "indexed", "." ]
python
train
williamgilpin/pypdb
pypdb/pypdb.py
https://github.com/williamgilpin/pypdb/blob/bfb9e1b15b4ad097c5add50c4c176ac6cb28ee15/pypdb/pypdb.py#L313-L341
def get_info(pdb_id, url_root='http://www.rcsb.org/pdb/rest/describeMol?structureId='): '''Look up all information about a given PDB ID Parameters ---------- pdb_id : string A 4 character string giving a pdb entry of interest url_root : string The string root of the specific url for the request type Returns ------- out : OrderedDict An ordered dictionary object corresponding to bare xml ''' url = url_root + pdb_id req = urllib.request.Request(url) f = urllib.request.urlopen(req) result = f.read() assert result out = xmltodict.parse(result,process_namespaces=True) return out
[ "def", "get_info", "(", "pdb_id", ",", "url_root", "=", "'http://www.rcsb.org/pdb/rest/describeMol?structureId='", ")", ":", "url", "=", "url_root", "+", "pdb_id", "req", "=", "urllib", ".", "request", ".", "Request", "(", "url", ")", "f", "=", "urllib", ".", "request", ".", "urlopen", "(", "req", ")", "result", "=", "f", ".", "read", "(", ")", "assert", "result", "out", "=", "xmltodict", ".", "parse", "(", "result", ",", "process_namespaces", "=", "True", ")", "return", "out" ]
Look up all information about a given PDB ID Parameters ---------- pdb_id : string A 4 character string giving a pdb entry of interest url_root : string The string root of the specific url for the request type Returns ------- out : OrderedDict An ordered dictionary object corresponding to bare xml
[ "Look", "up", "all", "information", "about", "a", "given", "PDB", "ID" ]
python
train
twilio/twilio-python
twilio/rest/ip_messaging/v1/service/channel/__init__.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/ip_messaging/v1/service/channel/__init__.py#L317-L330
def members(self): """ Access the members :returns: twilio.rest.chat.v1.service.channel.member.MemberList :rtype: twilio.rest.chat.v1.service.channel.member.MemberList """ if self._members is None: self._members = MemberList( self._version, service_sid=self._solution['service_sid'], channel_sid=self._solution['sid'], ) return self._members
[ "def", "members", "(", "self", ")", ":", "if", "self", ".", "_members", "is", "None", ":", "self", ".", "_members", "=", "MemberList", "(", "self", ".", "_version", ",", "service_sid", "=", "self", ".", "_solution", "[", "'service_sid'", "]", ",", "channel_sid", "=", "self", ".", "_solution", "[", "'sid'", "]", ",", ")", "return", "self", ".", "_members" ]
Access the members :returns: twilio.rest.chat.v1.service.channel.member.MemberList :rtype: twilio.rest.chat.v1.service.channel.member.MemberList
[ "Access", "the", "members" ]
python
train
saltstack/salt
salt/utils/vmware.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L1155-L1181
def update_dvs(dvs_ref, dvs_config_spec): ''' Updates a distributed virtual switch with the config_spec. dvs_ref The DVS reference. dvs_config_spec The updated config spec (vim.VMwareDVSConfigSpec) to be applied to the DVS. ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Updating dvs \'%s\'', dvs_name) try: task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, dvs_name, six.text_type(task.__class__))
[ "def", "update_dvs", "(", "dvs_ref", ",", "dvs_config_spec", ")", ":", "dvs_name", "=", "get_managed_object_name", "(", "dvs_ref", ")", "log", ".", "trace", "(", "'Updating dvs \\'%s\\''", ",", "dvs_name", ")", "try", ":", "task", "=", "dvs_ref", ".", "ReconfigureDvs_Task", "(", "dvs_config_spec", ")", "except", "vim", ".", "fault", ".", "NoPermission", "as", "exc", ":", "log", ".", "exception", "(", "exc", ")", "raise", "salt", ".", "exceptions", ".", "VMwareApiError", "(", "'Not enough permissions. Required privilege: '", "'{0}'", ".", "format", "(", "exc", ".", "privilegeId", ")", ")", "except", "vim", ".", "fault", ".", "VimFault", "as", "exc", ":", "log", ".", "exception", "(", "exc", ")", "raise", "salt", ".", "exceptions", ".", "VMwareApiError", "(", "exc", ".", "msg", ")", "except", "vmodl", ".", "RuntimeFault", "as", "exc", ":", "log", ".", "exception", "(", "exc", ")", "raise", "salt", ".", "exceptions", ".", "VMwareRuntimeError", "(", "exc", ".", "msg", ")", "wait_for_task", "(", "task", ",", "dvs_name", ",", "six", ".", "text_type", "(", "task", ".", "__class__", ")", ")" ]
Updates a distributed virtual switch with the config_spec. dvs_ref The DVS reference. dvs_config_spec The updated config spec (vim.VMwareDVSConfigSpec) to be applied to the DVS.
[ "Updates", "a", "distributed", "virtual", "switch", "with", "the", "config_spec", "." ]
python
train
wrboyce/telegrambot
telegrambot/api/__init__.py
https://github.com/wrboyce/telegrambot/blob/c35ce19886df4c306a2a19851cc1f63e3066d70d/telegrambot/api/__init__.py#L201-L208
def set_web_hook(self, url=None, certificate=None): """ Use this method to specify a url and receive incoming updates via an outgoing webhook. Whenever there is an update for the bot, we will send an HTTPS POST request to the specified url, containing a JSON-serialized Update. In case of an unsuccessful request, we will give up after a reasonable amount of attempts. """ payload = dict(url=url, certificate=certificate) return self._get('setWebHook', payload)
[ "def", "set_web_hook", "(", "self", ",", "url", "=", "None", ",", "certificate", "=", "None", ")", ":", "payload", "=", "dict", "(", "url", "=", "url", ",", "certificate", "=", "certificate", ")", "return", "self", ".", "_get", "(", "'setWebHook'", ",", "payload", ")" ]
Use this method to specify a url and receive incoming updates via an outgoing webhook. Whenever there is an update for the bot, we will send an HTTPS POST request to the specified url, containing a JSON-serialized Update. In case of an unsuccessful request, we will give up after a reasonable amount of attempts.
[ "Use", "this", "method", "to", "specify", "a", "url", "and", "receive", "incoming", "updates", "via", "an", "outgoing", "webhook", ".", "Whenever", "there", "is", "an", "update", "for", "the", "bot", "we", "will", "send", "an", "HTTPS", "POST", "request", "to", "the", "specified", "url", "containing", "a", "JSON", "-", "serialized", "Update", ".", "In", "case", "of", "an", "unsuccessful", "request", "we", "will", "give", "up", "after", "a", "reasonable", "amount", "of", "attempts", "." ]
python
train
loli/medpy
medpy/io/save.py
https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/io/save.py#L33-L120
def save(arr, filename, hdr = False, force = True, use_compression = False): r""" Save the image ``arr`` as filename using information encoded in ``hdr``. The target image format is determined by the ``filename`` suffix. If the ``force`` parameter is set to true, an already existing image is overwritten silently. Otherwise an error is thrown. The header (``hdr``) object is the one returned by `~medpy.io.load.load` and is used opportunistically, possibly loosing some meta-information. Generally this function does not guarantee, that metadata other than the image shape and pixel data type are kept. MedPy relies on SimpleITK, which enables the power of ITK for image loading and saving. The supported image file formats should include at least the following. Medical formats: - ITK MetaImage (.mha/.raw, .mhd) - Neuroimaging Informatics Technology Initiative (NIfTI) (.nia, .nii, .nii.gz, .hdr, .img, .img.gz) - Analyze (plain, SPM99, SPM2) (.hdr/.img, .img.gz) - Digital Imaging and Communications in Medicine (DICOM) (.dcm, .dicom) - Digital Imaging and Communications in Medicine (DICOM) series (<directory>/) - Nearly Raw Raster Data (Nrrd) (.nrrd, .nhdr) - Medical Imaging NetCDF (MINC) (.mnc, .MNC) - Guys Image Processing Lab (GIPL) (.gipl, .gipl.gz) Microscopy formats: - Medical Research Council (MRC) (.mrc, .rec) - Bio-Rad (.pic, .PIC) - LSM (Zeiss) microscopy images (.tif, .TIF, .tiff, .TIFF, .lsm, .LSM) - Stimulate / Signal Data (SDT) (.sdt) Visualization formats: - VTK images (.vtk) Other formats: - Portable Network Graphics (PNG) (.png, .PNG) - Joint Photographic Experts Group (JPEG) (.jpg, .JPG, .jpeg, .JPEG) - Tagged Image File Format (TIFF) (.tif, .TIF, .tiff, .TIFF) - Windows bitmap (.bmp, .BMP) - Hierarchical Data Format (HDF5) (.h5 , .hdf5 , .he5) - MSX-DOS Screen-x (.ge4, .ge5) For informations about which image formats, dimensionalities and pixel data types your current configuration supports, run `python3 tests/support.py > myformats.log`. Further information see https://simpleitk.readthedocs.io . Parameters ---------- arr : array_like The image data with order `x,y,z,c`. filename : string Where to save the image; path and filename including the image suffix. hdr : object The image header containing the metadata. force : bool Set to True to overwrite already exiting image silently. use_compression : bool Use data compression of the target format supports it. Raises ------ ImageSavingError If the image could not be saved due to various reasons """ logger = Logger.getInstance() logger.info('Saving image as {}...'.format(filename)) # Check image file existance if not force and os.path.exists(filename): raise ImageSavingError('The target file {} already exists.'.format(filename)) # Roll axes from x,y,z,c to z,y,x,c if arr.ndim == 4: arr = np.moveaxis(arr, -1, 0) arr = arr.T sitkimage = sitk.GetImageFromArray(arr) # Copy met-data as far as possible if hdr: hdr.copy_to(sitkimage) sitk.WriteImage(sitkimage, filename, use_compression)
[ "def", "save", "(", "arr", ",", "filename", ",", "hdr", "=", "False", ",", "force", "=", "True", ",", "use_compression", "=", "False", ")", ":", "logger", "=", "Logger", ".", "getInstance", "(", ")", "logger", ".", "info", "(", "'Saving image as {}...'", ".", "format", "(", "filename", ")", ")", "# Check image file existance", "if", "not", "force", "and", "os", ".", "path", ".", "exists", "(", "filename", ")", ":", "raise", "ImageSavingError", "(", "'The target file {} already exists.'", ".", "format", "(", "filename", ")", ")", "# Roll axes from x,y,z,c to z,y,x,c", "if", "arr", ".", "ndim", "==", "4", ":", "arr", "=", "np", ".", "moveaxis", "(", "arr", ",", "-", "1", ",", "0", ")", "arr", "=", "arr", ".", "T", "sitkimage", "=", "sitk", ".", "GetImageFromArray", "(", "arr", ")", "# Copy met-data as far as possible", "if", "hdr", ":", "hdr", ".", "copy_to", "(", "sitkimage", ")", "sitk", ".", "WriteImage", "(", "sitkimage", ",", "filename", ",", "use_compression", ")" ]
r""" Save the image ``arr`` as filename using information encoded in ``hdr``. The target image format is determined by the ``filename`` suffix. If the ``force`` parameter is set to true, an already existing image is overwritten silently. Otherwise an error is thrown. The header (``hdr``) object is the one returned by `~medpy.io.load.load` and is used opportunistically, possibly loosing some meta-information. Generally this function does not guarantee, that metadata other than the image shape and pixel data type are kept. MedPy relies on SimpleITK, which enables the power of ITK for image loading and saving. The supported image file formats should include at least the following. Medical formats: - ITK MetaImage (.mha/.raw, .mhd) - Neuroimaging Informatics Technology Initiative (NIfTI) (.nia, .nii, .nii.gz, .hdr, .img, .img.gz) - Analyze (plain, SPM99, SPM2) (.hdr/.img, .img.gz) - Digital Imaging and Communications in Medicine (DICOM) (.dcm, .dicom) - Digital Imaging and Communications in Medicine (DICOM) series (<directory>/) - Nearly Raw Raster Data (Nrrd) (.nrrd, .nhdr) - Medical Imaging NetCDF (MINC) (.mnc, .MNC) - Guys Image Processing Lab (GIPL) (.gipl, .gipl.gz) Microscopy formats: - Medical Research Council (MRC) (.mrc, .rec) - Bio-Rad (.pic, .PIC) - LSM (Zeiss) microscopy images (.tif, .TIF, .tiff, .TIFF, .lsm, .LSM) - Stimulate / Signal Data (SDT) (.sdt) Visualization formats: - VTK images (.vtk) Other formats: - Portable Network Graphics (PNG) (.png, .PNG) - Joint Photographic Experts Group (JPEG) (.jpg, .JPG, .jpeg, .JPEG) - Tagged Image File Format (TIFF) (.tif, .TIF, .tiff, .TIFF) - Windows bitmap (.bmp, .BMP) - Hierarchical Data Format (HDF5) (.h5 , .hdf5 , .he5) - MSX-DOS Screen-x (.ge4, .ge5) For informations about which image formats, dimensionalities and pixel data types your current configuration supports, run `python3 tests/support.py > myformats.log`. Further information see https://simpleitk.readthedocs.io . Parameters ---------- arr : array_like The image data with order `x,y,z,c`. filename : string Where to save the image; path and filename including the image suffix. hdr : object The image header containing the metadata. force : bool Set to True to overwrite already exiting image silently. use_compression : bool Use data compression of the target format supports it. Raises ------ ImageSavingError If the image could not be saved due to various reasons
[ "r", "Save", "the", "image", "arr", "as", "filename", "using", "information", "encoded", "in", "hdr", ".", "The", "target", "image", "format", "is", "determined", "by", "the", "filename", "suffix", ".", "If", "the", "force", "parameter", "is", "set", "to", "true", "an", "already", "existing", "image", "is", "overwritten", "silently", ".", "Otherwise", "an", "error", "is", "thrown", ".", "The", "header", "(", "hdr", ")", "object", "is", "the", "one", "returned", "by", "~medpy", ".", "io", ".", "load", ".", "load", "and", "is", "used", "opportunistically", "possibly", "loosing", "some", "meta", "-", "information", ".", "Generally", "this", "function", "does", "not", "guarantee", "that", "metadata", "other", "than", "the", "image", "shape", "and", "pixel", "data", "type", "are", "kept", ".", "MedPy", "relies", "on", "SimpleITK", "which", "enables", "the", "power", "of", "ITK", "for", "image", "loading", "and", "saving", ".", "The", "supported", "image", "file", "formats", "should", "include", "at", "least", "the", "following", "." ]
python
train
great-expectations/great_expectations
great_expectations/cli.py
https://github.com/great-expectations/great_expectations/blob/08385c40529d4f14a1c46916788aecc47f33ee9d/great_expectations/cli.py#L55-L112
def validate(parsed_args): """ Read a dataset file and validate it using a config saved in another file. Uses parameters defined in the dispatch method. :param parsed_args: A Namespace object containing parsed arguments from the dispatch method. :return: The number of unsucessful expectations """ parsed_args = vars(parsed_args) data_set = parsed_args['dataset'] expectations_config_file = parsed_args['expectations_config_file'] expectations_config = json.load(open(expectations_config_file)) if parsed_args["evaluation_parameters"] is not None: evaluation_parameters = json.load( open(parsed_args["evaluation_parameters"])) else: evaluation_parameters = None # Use a custom dataasset module and class if provided. Otherwise infer from the config. if parsed_args["custom_dataset_module"]: sys.path.insert(0, os.path.dirname( parsed_args["custom_dataset_module"])) module_name = os.path.basename( parsed_args["custom_dataset_module"]).split('.')[0] custom_module = __import__(module_name) dataset_class = getattr( custom_module, parsed_args["custom_dataset_class"]) elif "data_asset_type" in expectations_config: if expectations_config["data_asset_type"] == "Dataset" or expectations_config["data_asset_type"] == "PandasDataset": dataset_class = PandasDataset elif expectations_config["data_asset_type"].endswith("Dataset"): logger.info("Using PandasDataset to validate dataset of type %s." % expectations_config["data_asset_type"]) dataset_class = PandasDataset elif expectations_config["data_asset_type"] == "FileDataAsset": dataset_class = FileDataAsset else: logger.critical("Unrecognized data_asset_type %s. You may need to specifcy custom_dataset_module and custom_dataset_class." % expectations_config["data_asset_type"]) return -1 else: dataset_class = PandasDataset if issubclass(dataset_class, Dataset): da = read_csv(data_set, expectations_config=expectations_config, dataset_class=dataset_class) else: da = dataset_class(data_set, config=expectations_config) result = da.validate( evaluation_parameters=evaluation_parameters, result_format=parsed_args["result_format"], catch_exceptions=parsed_args["catch_exceptions"], only_return_failures=parsed_args["only_return_failures"], ) print(json.dumps(result, indent=2)) return result['statistics']['unsuccessful_expectations']
[ "def", "validate", "(", "parsed_args", ")", ":", "parsed_args", "=", "vars", "(", "parsed_args", ")", "data_set", "=", "parsed_args", "[", "'dataset'", "]", "expectations_config_file", "=", "parsed_args", "[", "'expectations_config_file'", "]", "expectations_config", "=", "json", ".", "load", "(", "open", "(", "expectations_config_file", ")", ")", "if", "parsed_args", "[", "\"evaluation_parameters\"", "]", "is", "not", "None", ":", "evaluation_parameters", "=", "json", ".", "load", "(", "open", "(", "parsed_args", "[", "\"evaluation_parameters\"", "]", ")", ")", "else", ":", "evaluation_parameters", "=", "None", "# Use a custom dataasset module and class if provided. Otherwise infer from the config.", "if", "parsed_args", "[", "\"custom_dataset_module\"", "]", ":", "sys", ".", "path", ".", "insert", "(", "0", ",", "os", ".", "path", ".", "dirname", "(", "parsed_args", "[", "\"custom_dataset_module\"", "]", ")", ")", "module_name", "=", "os", ".", "path", ".", "basename", "(", "parsed_args", "[", "\"custom_dataset_module\"", "]", ")", ".", "split", "(", "'.'", ")", "[", "0", "]", "custom_module", "=", "__import__", "(", "module_name", ")", "dataset_class", "=", "getattr", "(", "custom_module", ",", "parsed_args", "[", "\"custom_dataset_class\"", "]", ")", "elif", "\"data_asset_type\"", "in", "expectations_config", ":", "if", "expectations_config", "[", "\"data_asset_type\"", "]", "==", "\"Dataset\"", "or", "expectations_config", "[", "\"data_asset_type\"", "]", "==", "\"PandasDataset\"", ":", "dataset_class", "=", "PandasDataset", "elif", "expectations_config", "[", "\"data_asset_type\"", "]", ".", "endswith", "(", "\"Dataset\"", ")", ":", "logger", ".", "info", "(", "\"Using PandasDataset to validate dataset of type %s.\"", "%", "expectations_config", "[", "\"data_asset_type\"", "]", ")", "dataset_class", "=", "PandasDataset", "elif", "expectations_config", "[", "\"data_asset_type\"", "]", "==", "\"FileDataAsset\"", ":", "dataset_class", "=", "FileDataAsset", "else", ":", "logger", ".", "critical", "(", "\"Unrecognized data_asset_type %s. You may need to specifcy custom_dataset_module and custom_dataset_class.\"", "%", "expectations_config", "[", "\"data_asset_type\"", "]", ")", "return", "-", "1", "else", ":", "dataset_class", "=", "PandasDataset", "if", "issubclass", "(", "dataset_class", ",", "Dataset", ")", ":", "da", "=", "read_csv", "(", "data_set", ",", "expectations_config", "=", "expectations_config", ",", "dataset_class", "=", "dataset_class", ")", "else", ":", "da", "=", "dataset_class", "(", "data_set", ",", "config", "=", "expectations_config", ")", "result", "=", "da", ".", "validate", "(", "evaluation_parameters", "=", "evaluation_parameters", ",", "result_format", "=", "parsed_args", "[", "\"result_format\"", "]", ",", "catch_exceptions", "=", "parsed_args", "[", "\"catch_exceptions\"", "]", ",", "only_return_failures", "=", "parsed_args", "[", "\"only_return_failures\"", "]", ",", ")", "print", "(", "json", ".", "dumps", "(", "result", ",", "indent", "=", "2", ")", ")", "return", "result", "[", "'statistics'", "]", "[", "'unsuccessful_expectations'", "]" ]
Read a dataset file and validate it using a config saved in another file. Uses parameters defined in the dispatch method. :param parsed_args: A Namespace object containing parsed arguments from the dispatch method. :return: The number of unsucessful expectations
[ "Read", "a", "dataset", "file", "and", "validate", "it", "using", "a", "config", "saved", "in", "another", "file", ".", "Uses", "parameters", "defined", "in", "the", "dispatch", "method", "." ]
python
train
bids-standard/pybids
bids/utils.py
https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/utils.py#L29-L41
def natural_sort(l, field=None): ''' based on snippet found at http://stackoverflow.com/a/4836734/2445984 ''' convert = lambda text: int(text) if text.isdigit() else text.lower() def alphanum_key(key): if field is not None: key = getattr(key, field) if not isinstance(key, str): key = str(key) return [convert(c) for c in re.split('([0-9]+)', key)] return sorted(l, key=alphanum_key)
[ "def", "natural_sort", "(", "l", ",", "field", "=", "None", ")", ":", "convert", "=", "lambda", "text", ":", "int", "(", "text", ")", "if", "text", ".", "isdigit", "(", ")", "else", "text", ".", "lower", "(", ")", "def", "alphanum_key", "(", "key", ")", ":", "if", "field", "is", "not", "None", ":", "key", "=", "getattr", "(", "key", ",", "field", ")", "if", "not", "isinstance", "(", "key", ",", "str", ")", ":", "key", "=", "str", "(", "key", ")", "return", "[", "convert", "(", "c", ")", "for", "c", "in", "re", ".", "split", "(", "'([0-9]+)'", ",", "key", ")", "]", "return", "sorted", "(", "l", ",", "key", "=", "alphanum_key", ")" ]
based on snippet found at http://stackoverflow.com/a/4836734/2445984
[ "based", "on", "snippet", "found", "at", "http", ":", "//", "stackoverflow", ".", "com", "/", "a", "/", "4836734", "/", "2445984" ]
python
train
maas/python-libmaas
maas/client/viscera/ipranges.py
https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/viscera/ipranges.py#L31-L74
async def create( cls, start_ip: str, end_ip: str, *, type: IPRangeType = IPRangeType.RESERVED, comment: str = None, subnet: Union[Subnet, int] = None): """ Create a `IPRange` in MAAS. :param start_ip: First IP address in the range (required). :type start_ip: `str` :parma end_ip: Last IP address in the range (required). :type end_ip: `str` :param type: Type of IP address range (optional). :type type: `IPRangeType` :param comment: Reason for the IP address range (optional). :type comment: `str` :param subnet: Subnet the IP address range should be created on (optional). By default MAAS will calculate the correct subnet based on the `start_ip` and `end_ip`. :type subnet: `Subnet` or `int` :returns: The created IPRange :rtype: `IPRange` """ if not isinstance(type, IPRangeType): raise TypeError( "type must be an IPRangeType, not %s" % TYPE(type).__name__) params = { 'start_ip': start_ip, 'end_ip': end_ip, 'type': type.value, } if comment is not None: params["comment"] = comment if subnet is not None: if isinstance(subnet, Subnet): params["subnet"] = subnet.id elif isinstance(subnet, int): params["subnet"] = subnet else: raise TypeError( "subnet must be Subnet or int, not %s" % ( TYPE(subnet).__class__)) return cls._object(await cls._handler.create(**params))
[ "async", "def", "create", "(", "cls", ",", "start_ip", ":", "str", ",", "end_ip", ":", "str", ",", "*", ",", "type", ":", "IPRangeType", "=", "IPRangeType", ".", "RESERVED", ",", "comment", ":", "str", "=", "None", ",", "subnet", ":", "Union", "[", "Subnet", ",", "int", "]", "=", "None", ")", ":", "if", "not", "isinstance", "(", "type", ",", "IPRangeType", ")", ":", "raise", "TypeError", "(", "\"type must be an IPRangeType, not %s\"", "%", "TYPE", "(", "type", ")", ".", "__name__", ")", "params", "=", "{", "'start_ip'", ":", "start_ip", ",", "'end_ip'", ":", "end_ip", ",", "'type'", ":", "type", ".", "value", ",", "}", "if", "comment", "is", "not", "None", ":", "params", "[", "\"comment\"", "]", "=", "comment", "if", "subnet", "is", "not", "None", ":", "if", "isinstance", "(", "subnet", ",", "Subnet", ")", ":", "params", "[", "\"subnet\"", "]", "=", "subnet", ".", "id", "elif", "isinstance", "(", "subnet", ",", "int", ")", ":", "params", "[", "\"subnet\"", "]", "=", "subnet", "else", ":", "raise", "TypeError", "(", "\"subnet must be Subnet or int, not %s\"", "%", "(", "TYPE", "(", "subnet", ")", ".", "__class__", ")", ")", "return", "cls", ".", "_object", "(", "await", "cls", ".", "_handler", ".", "create", "(", "*", "*", "params", ")", ")" ]
Create a `IPRange` in MAAS. :param start_ip: First IP address in the range (required). :type start_ip: `str` :parma end_ip: Last IP address in the range (required). :type end_ip: `str` :param type: Type of IP address range (optional). :type type: `IPRangeType` :param comment: Reason for the IP address range (optional). :type comment: `str` :param subnet: Subnet the IP address range should be created on (optional). By default MAAS will calculate the correct subnet based on the `start_ip` and `end_ip`. :type subnet: `Subnet` or `int` :returns: The created IPRange :rtype: `IPRange`
[ "Create", "a", "IPRange", "in", "MAAS", "." ]
python
train
quantopian/empyrical
empyrical/utils.py
https://github.com/quantopian/empyrical/blob/badbdca75f5b293f28b5e947974894de041d6868/empyrical/utils.py#L349-L375
def load_portfolio_risk_factors(filepath_prefix=None, start=None, end=None): """ Load risk factors Mkt-Rf, SMB, HML, Rf, and UMD. Data is stored in HDF5 file. If the data is more than 2 days old, redownload from Dartmouth. Returns ------- five_factors : pd.DataFrame Risk factors timeseries. """ if start is None: start = '1/1/1970' if end is None: end = _1_bday_ago() start = get_utc_timestamp(start) end = get_utc_timestamp(end) if filepath_prefix is None: filepath = data_path('factors.csv') else: filepath = filepath_prefix five_factors = get_returns_cached(filepath, get_fama_french, end) return five_factors.loc[start:end]
[ "def", "load_portfolio_risk_factors", "(", "filepath_prefix", "=", "None", ",", "start", "=", "None", ",", "end", "=", "None", ")", ":", "if", "start", "is", "None", ":", "start", "=", "'1/1/1970'", "if", "end", "is", "None", ":", "end", "=", "_1_bday_ago", "(", ")", "start", "=", "get_utc_timestamp", "(", "start", ")", "end", "=", "get_utc_timestamp", "(", "end", ")", "if", "filepath_prefix", "is", "None", ":", "filepath", "=", "data_path", "(", "'factors.csv'", ")", "else", ":", "filepath", "=", "filepath_prefix", "five_factors", "=", "get_returns_cached", "(", "filepath", ",", "get_fama_french", ",", "end", ")", "return", "five_factors", ".", "loc", "[", "start", ":", "end", "]" ]
Load risk factors Mkt-Rf, SMB, HML, Rf, and UMD. Data is stored in HDF5 file. If the data is more than 2 days old, redownload from Dartmouth. Returns ------- five_factors : pd.DataFrame Risk factors timeseries.
[ "Load", "risk", "factors", "Mkt", "-", "Rf", "SMB", "HML", "Rf", "and", "UMD", ".", "Data", "is", "stored", "in", "HDF5", "file", ".", "If", "the", "data", "is", "more", "than", "2", "days", "old", "redownload", "from", "Dartmouth", ".", "Returns", "-------", "five_factors", ":", "pd", ".", "DataFrame", "Risk", "factors", "timeseries", "." ]
python
train
tradenity/python-sdk
tradenity/resources/free_item_coupon.py
https://github.com/tradenity/python-sdk/blob/d13fbe23f4d6ff22554c6d8d2deaf209371adaf1/tradenity/resources/free_item_coupon.py#L531-L551
def delete_free_item_coupon_by_id(cls, free_item_coupon_id, **kwargs): """Delete FreeItemCoupon Delete an instance of FreeItemCoupon by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_free_item_coupon_by_id(free_item_coupon_id, async=True) >>> result = thread.get() :param async bool :param str free_item_coupon_id: ID of freeItemCoupon to delete. (required) :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._delete_free_item_coupon_by_id_with_http_info(free_item_coupon_id, **kwargs) else: (data) = cls._delete_free_item_coupon_by_id_with_http_info(free_item_coupon_id, **kwargs) return data
[ "def", "delete_free_item_coupon_by_id", "(", "cls", ",", "free_item_coupon_id", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async'", ")", ":", "return", "cls", ".", "_delete_free_item_coupon_by_id_with_http_info", "(", "free_item_coupon_id", ",", "*", "*", "kwargs", ")", "else", ":", "(", "data", ")", "=", "cls", ".", "_delete_free_item_coupon_by_id_with_http_info", "(", "free_item_coupon_id", ",", "*", "*", "kwargs", ")", "return", "data" ]
Delete FreeItemCoupon Delete an instance of FreeItemCoupon by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_free_item_coupon_by_id(free_item_coupon_id, async=True) >>> result = thread.get() :param async bool :param str free_item_coupon_id: ID of freeItemCoupon to delete. (required) :return: None If the method is called asynchronously, returns the request thread.
[ "Delete", "FreeItemCoupon" ]
python
train
pydata/xarray
xarray/core/dataset.py
https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/core/dataset.py#L1547-L1588
def _get_indexers_coords_and_indexes(self, indexers): """ Extract coordinates from indexers. Returns an OrderedDict mapping from coordinate name to the coordinate variable. Only coordinate with a name different from any of self.variables will be attached. """ from .dataarray import DataArray coord_list = [] indexes = OrderedDict() for k, v in indexers.items(): if isinstance(v, DataArray): v_coords = v.coords if v.dtype.kind == 'b': if v.ndim != 1: # we only support 1-d boolean array raise ValueError( '{:d}d-boolean array is used for indexing along ' 'dimension {!r}, but only 1d boolean arrays are ' 'supported.'.format(v.ndim, k)) # Make sure in case of boolean DataArray, its # coordinate also should be indexed. v_coords = v[v.values.nonzero()[0]].coords coord_list.append({d: v_coords[d].variable for d in v.coords}) indexes.update(v.indexes) # we don't need to call align() explicitly or check indexes for # alignment, because merge_variables already checks for exact alignment # between dimension coordinates coords = merge_variables(coord_list) assert_coordinate_consistent(self, coords) # silently drop the conflicted variables. attached_coords = OrderedDict( (k, v) for k, v in coords.items() if k not in self._variables ) attached_indexes = OrderedDict( (k, v) for k, v in indexes.items() if k not in self._variables ) return attached_coords, attached_indexes
[ "def", "_get_indexers_coords_and_indexes", "(", "self", ",", "indexers", ")", ":", "from", ".", "dataarray", "import", "DataArray", "coord_list", "=", "[", "]", "indexes", "=", "OrderedDict", "(", ")", "for", "k", ",", "v", "in", "indexers", ".", "items", "(", ")", ":", "if", "isinstance", "(", "v", ",", "DataArray", ")", ":", "v_coords", "=", "v", ".", "coords", "if", "v", ".", "dtype", ".", "kind", "==", "'b'", ":", "if", "v", ".", "ndim", "!=", "1", ":", "# we only support 1-d boolean array", "raise", "ValueError", "(", "'{:d}d-boolean array is used for indexing along '", "'dimension {!r}, but only 1d boolean arrays are '", "'supported.'", ".", "format", "(", "v", ".", "ndim", ",", "k", ")", ")", "# Make sure in case of boolean DataArray, its", "# coordinate also should be indexed.", "v_coords", "=", "v", "[", "v", ".", "values", ".", "nonzero", "(", ")", "[", "0", "]", "]", ".", "coords", "coord_list", ".", "append", "(", "{", "d", ":", "v_coords", "[", "d", "]", ".", "variable", "for", "d", "in", "v", ".", "coords", "}", ")", "indexes", ".", "update", "(", "v", ".", "indexes", ")", "# we don't need to call align() explicitly or check indexes for", "# alignment, because merge_variables already checks for exact alignment", "# between dimension coordinates", "coords", "=", "merge_variables", "(", "coord_list", ")", "assert_coordinate_consistent", "(", "self", ",", "coords", ")", "# silently drop the conflicted variables.", "attached_coords", "=", "OrderedDict", "(", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "coords", ".", "items", "(", ")", "if", "k", "not", "in", "self", ".", "_variables", ")", "attached_indexes", "=", "OrderedDict", "(", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "indexes", ".", "items", "(", ")", "if", "k", "not", "in", "self", ".", "_variables", ")", "return", "attached_coords", ",", "attached_indexes" ]
Extract coordinates from indexers. Returns an OrderedDict mapping from coordinate name to the coordinate variable. Only coordinate with a name different from any of self.variables will be attached.
[ "Extract", "coordinates", "from", "indexers", ".", "Returns", "an", "OrderedDict", "mapping", "from", "coordinate", "name", "to", "the", "coordinate", "variable", "." ]
python
train
mikekatz04/BOWIE
bowie/plotutils/forminput.py
https://github.com/mikekatz04/BOWIE/blob/a941342a3536cb57c817a1643896d99a3f354a86/bowie/plotutils/forminput.py#L503-L571
def add_dataset(self, name=None, label=None, x_column_label=None, y_column_label=None, index=None, control=False): """Add a dataset to a specific plot. This method adds a dataset to a plot. Its functional use is imperative to the plot generation. It handles adding new files as well as indexing to files that are added to other plots. All Args default to None. However, these are note the defaults in the code. See DataImportContainer attributes for defaults in code. Args: name (str, optional): Name (path) for file. Required if reading from a file (at least one). Required if file_name is not in "general". Must be ".txt" or ".hdf5". Can include path from working directory. label (str, optional): Column label in the dataset corresponding to desired SNR value. Required if reading from a file (at least one). x_column_label/y_column_label (str, optional): Column label from input file identifying x/y values. This can override setting in "general". Default is `x`/`y`. index (int, optional): Index of plot with preloaded data. Required if not loading a file. control (bool, optional): If True, this dataset is set to the control. This is needed for Ratio plots. It sets the baseline. Default is False. Raises: ValueError: If no options are passes. This means no file indication nor index. """ if name is None and label is None and index is None: raise ValueError("Attempting to add a dataset without" + "supplying index or file information.") if index is None: trans_dict = DataImportContainer() if name is not None: trans_dict.file_name = name if label is not None: trans_dict.label = label if x_column_label is not None: trans_dict.x_column_label = x_column_label if y_column_label is not None: trans_dict.y_column_label = y_column_label if control: self.control = trans_dict else: # need to append file to file list. if 'file' not in self.__dict__: self.file = [] self.file.append(trans_dict) else: if control: self.control = DataImportContainer() self.control.index = index else: # need to append index to index list. if 'indices' not in self.__dict__: self.indices = [] self.indices.append(index) return
[ "def", "add_dataset", "(", "self", ",", "name", "=", "None", ",", "label", "=", "None", ",", "x_column_label", "=", "None", ",", "y_column_label", "=", "None", ",", "index", "=", "None", ",", "control", "=", "False", ")", ":", "if", "name", "is", "None", "and", "label", "is", "None", "and", "index", "is", "None", ":", "raise", "ValueError", "(", "\"Attempting to add a dataset without\"", "+", "\"supplying index or file information.\"", ")", "if", "index", "is", "None", ":", "trans_dict", "=", "DataImportContainer", "(", ")", "if", "name", "is", "not", "None", ":", "trans_dict", ".", "file_name", "=", "name", "if", "label", "is", "not", "None", ":", "trans_dict", ".", "label", "=", "label", "if", "x_column_label", "is", "not", "None", ":", "trans_dict", ".", "x_column_label", "=", "x_column_label", "if", "y_column_label", "is", "not", "None", ":", "trans_dict", ".", "y_column_label", "=", "y_column_label", "if", "control", ":", "self", ".", "control", "=", "trans_dict", "else", ":", "# need to append file to file list.", "if", "'file'", "not", "in", "self", ".", "__dict__", ":", "self", ".", "file", "=", "[", "]", "self", ".", "file", ".", "append", "(", "trans_dict", ")", "else", ":", "if", "control", ":", "self", ".", "control", "=", "DataImportContainer", "(", ")", "self", ".", "control", ".", "index", "=", "index", "else", ":", "# need to append index to index list.", "if", "'indices'", "not", "in", "self", ".", "__dict__", ":", "self", ".", "indices", "=", "[", "]", "self", ".", "indices", ".", "append", "(", "index", ")", "return" ]
Add a dataset to a specific plot. This method adds a dataset to a plot. Its functional use is imperative to the plot generation. It handles adding new files as well as indexing to files that are added to other plots. All Args default to None. However, these are note the defaults in the code. See DataImportContainer attributes for defaults in code. Args: name (str, optional): Name (path) for file. Required if reading from a file (at least one). Required if file_name is not in "general". Must be ".txt" or ".hdf5". Can include path from working directory. label (str, optional): Column label in the dataset corresponding to desired SNR value. Required if reading from a file (at least one). x_column_label/y_column_label (str, optional): Column label from input file identifying x/y values. This can override setting in "general". Default is `x`/`y`. index (int, optional): Index of plot with preloaded data. Required if not loading a file. control (bool, optional): If True, this dataset is set to the control. This is needed for Ratio plots. It sets the baseline. Default is False. Raises: ValueError: If no options are passes. This means no file indication nor index.
[ "Add", "a", "dataset", "to", "a", "specific", "plot", "." ]
python
train
TeamHG-Memex/tensorboard_logger
tensorboard_logger/tensorboard_logger.py
https://github.com/TeamHG-Memex/tensorboard_logger/blob/93968344a471532530f035622118693845f32649/tensorboard_logger/tensorboard_logger.py#L71-L92
def log_value(self, name, value, step=None): """Log new value for given name on given step. Args: name (str): name of the variable (it will be converted to a valid tensorflow summary name). value (float): this is a real number to be logged as a scalar. step (int): non-negative integer used for visualization: you can log several different variables on one step, but should not log different values of the same variable on the same step (this is not checked). """ if isinstance(value, six.string_types): raise TypeError('"value" should be a number, got {}' .format(type(value))) value = float(value) self._check_step(step) tf_name = self._ensure_tf_name(name) summary = self._scalar_summary(tf_name, value, step) self._log_summary(tf_name, summary, value, step=step)
[ "def", "log_value", "(", "self", ",", "name", ",", "value", ",", "step", "=", "None", ")", ":", "if", "isinstance", "(", "value", ",", "six", ".", "string_types", ")", ":", "raise", "TypeError", "(", "'\"value\" should be a number, got {}'", ".", "format", "(", "type", "(", "value", ")", ")", ")", "value", "=", "float", "(", "value", ")", "self", ".", "_check_step", "(", "step", ")", "tf_name", "=", "self", ".", "_ensure_tf_name", "(", "name", ")", "summary", "=", "self", ".", "_scalar_summary", "(", "tf_name", ",", "value", ",", "step", ")", "self", ".", "_log_summary", "(", "tf_name", ",", "summary", ",", "value", ",", "step", "=", "step", ")" ]
Log new value for given name on given step. Args: name (str): name of the variable (it will be converted to a valid tensorflow summary name). value (float): this is a real number to be logged as a scalar. step (int): non-negative integer used for visualization: you can log several different variables on one step, but should not log different values of the same variable on the same step (this is not checked).
[ "Log", "new", "value", "for", "given", "name", "on", "given", "step", "." ]
python
train
frictionlessdata/datapackage-py
datapackage/registry.py
https://github.com/frictionlessdata/datapackage-py/blob/aca085ea54541b087140b58a81332f8728baeeb2/datapackage/registry.py#L129-L135
def _get_absolute_path(self, relative_path): '''str: Return the received relative_path joined with the base path (None if there were some error).''' try: return os.path.join(self.base_path, relative_path) except (AttributeError, TypeError): pass
[ "def", "_get_absolute_path", "(", "self", ",", "relative_path", ")", ":", "try", ":", "return", "os", ".", "path", ".", "join", "(", "self", ".", "base_path", ",", "relative_path", ")", "except", "(", "AttributeError", ",", "TypeError", ")", ":", "pass" ]
str: Return the received relative_path joined with the base path (None if there were some error).
[ "str", ":", "Return", "the", "received", "relative_path", "joined", "with", "the", "base", "path", "(", "None", "if", "there", "were", "some", "error", ")", "." ]
python
valid
acutesoftware/AIKIF
aikif/index.py
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/index.py#L26-L58
def index(): """ main function - outputs in following format BEFORE consolidation (which is TODO) # filename, word, linenumbers # refAction.csv, ActionTypeName, 1 # refAction.csv, PhysicalType, 1 # goals.csv, Cleanliness, 11 """ lg = mod_log.Log(mod_cfg.fldrs['localPath']) lg.record_command('Starting indexing', 'index.py') # sys.modules[self.__module__].__file__) if silent == 'N': print('------------------') print('Rebuilding Indexes') print('------------------') with open(ndxFile, "w") as ndx: ndx.write('filename, word, linenumbers\n') files_to_index = mod_fl.FileList([mod_cfg.fldrs['public_data_path'] + os.sep + 'core'], ['*.csv'], ignore_files, "files_to_index_filelist.csv") if silent == 'N': print(format_op_hdr()) for f in files_to_index.get_list(): buildIndex(f, ndxFile, silent) # now build the one big index file consolidate(ndxFile, opIndex ) lg.record_command('Finished indexing', 'index.py') #, fle.GetModuleName()) if silent == 'N': print('Done')
[ "def", "index", "(", ")", ":", "lg", "=", "mod_log", ".", "Log", "(", "mod_cfg", ".", "fldrs", "[", "'localPath'", "]", ")", "lg", ".", "record_command", "(", "'Starting indexing'", ",", "'index.py'", ")", "# sys.modules[self.__module__].__file__)", "if", "silent", "==", "'N'", ":", "print", "(", "'------------------'", ")", "print", "(", "'Rebuilding Indexes'", ")", "print", "(", "'------------------'", ")", "with", "open", "(", "ndxFile", ",", "\"w\"", ")", "as", "ndx", ":", "ndx", ".", "write", "(", "'filename, word, linenumbers\\n'", ")", "files_to_index", "=", "mod_fl", ".", "FileList", "(", "[", "mod_cfg", ".", "fldrs", "[", "'public_data_path'", "]", "+", "os", ".", "sep", "+", "'core'", "]", ",", "[", "'*.csv'", "]", ",", "ignore_files", ",", "\"files_to_index_filelist.csv\"", ")", "if", "silent", "==", "'N'", ":", "print", "(", "format_op_hdr", "(", ")", ")", "for", "f", "in", "files_to_index", ".", "get_list", "(", ")", ":", "buildIndex", "(", "f", ",", "ndxFile", ",", "silent", ")", "# now build the one big index file", "consolidate", "(", "ndxFile", ",", "opIndex", ")", "lg", ".", "record_command", "(", "'Finished indexing'", ",", "'index.py'", ")", "#, fle.GetModuleName())", "if", "silent", "==", "'N'", ":", "print", "(", "'Done'", ")" ]
main function - outputs in following format BEFORE consolidation (which is TODO) # filename, word, linenumbers # refAction.csv, ActionTypeName, 1 # refAction.csv, PhysicalType, 1 # goals.csv, Cleanliness, 11
[ "main", "function", "-", "outputs", "in", "following", "format", "BEFORE", "consolidation", "(", "which", "is", "TODO", ")", "#", "filename", "word", "linenumbers", "#", "refAction", ".", "csv", "ActionTypeName", "1", "#", "refAction", ".", "csv", "PhysicalType", "1", "#", "goals", ".", "csv", "Cleanliness", "11" ]
python
train
redhat-cip/python-dciclient
dciclient/v1/shell_commands/job.py
https://github.com/redhat-cip/python-dciclient/blob/a4aa5899062802bbe4c30a075d8447f8d222d214/dciclient/v1/shell_commands/job.py#L120-L132
def attach_issue(context, id, url): """attach_issue(context, id, url) Attach an issue to a job. >>> dcictl job-attach-issue [OPTIONS] :param string id: ID of the job to attach the issue to [required] :param string url: URL of the issue to attach to the job [required] """ result = job.attach_issue(context, id=id, url=url) utils.format_output(result, context.format)
[ "def", "attach_issue", "(", "context", ",", "id", ",", "url", ")", ":", "result", "=", "job", ".", "attach_issue", "(", "context", ",", "id", "=", "id", ",", "url", "=", "url", ")", "utils", ".", "format_output", "(", "result", ",", "context", ".", "format", ")" ]
attach_issue(context, id, url) Attach an issue to a job. >>> dcictl job-attach-issue [OPTIONS] :param string id: ID of the job to attach the issue to [required] :param string url: URL of the issue to attach to the job [required]
[ "attach_issue", "(", "context", "id", "url", ")" ]
python
train
ethereum/py-evm
eth/db/journal.py
https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/db/journal.py#L93-L109
def record_changeset(self, custom_changeset_id: uuid.UUID = None) -> uuid.UUID: """ Creates a new changeset. Changesets are referenced by a random uuid4 to prevent collisions between multiple changesets. """ if custom_changeset_id is not None: if custom_changeset_id in self.journal_data: raise ValidationError( "Tried to record with an existing changeset id: %r" % custom_changeset_id ) else: changeset_id = custom_changeset_id else: changeset_id = uuid.uuid4() self.journal_data[changeset_id] = {} return changeset_id
[ "def", "record_changeset", "(", "self", ",", "custom_changeset_id", ":", "uuid", ".", "UUID", "=", "None", ")", "->", "uuid", ".", "UUID", ":", "if", "custom_changeset_id", "is", "not", "None", ":", "if", "custom_changeset_id", "in", "self", ".", "journal_data", ":", "raise", "ValidationError", "(", "\"Tried to record with an existing changeset id: %r\"", "%", "custom_changeset_id", ")", "else", ":", "changeset_id", "=", "custom_changeset_id", "else", ":", "changeset_id", "=", "uuid", ".", "uuid4", "(", ")", "self", ".", "journal_data", "[", "changeset_id", "]", "=", "{", "}", "return", "changeset_id" ]
Creates a new changeset. Changesets are referenced by a random uuid4 to prevent collisions between multiple changesets.
[ "Creates", "a", "new", "changeset", ".", "Changesets", "are", "referenced", "by", "a", "random", "uuid4", "to", "prevent", "collisions", "between", "multiple", "changesets", "." ]
python
train
optimizely/python-sdk
optimizely/helpers/condition_tree_evaluator.py
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/helpers/condition_tree_evaluator.py#L17-L40
def and_evaluator(conditions, leaf_evaluator): """ Evaluates a list of conditions as if the evaluator had been applied to each entry and the results AND-ed together. Args: conditions: List of conditions ex: [operand_1, operand_2]. leaf_evaluator: Function which will be called to evaluate leaf condition values. Returns: Boolean: - True if all operands evaluate to True. - False if a single operand evaluates to False. None: if conditions couldn't be evaluated. """ saw_null_result = False for condition in conditions: result = evaluate(condition, leaf_evaluator) if result is False: return False if result is None: saw_null_result = True return None if saw_null_result else True
[ "def", "and_evaluator", "(", "conditions", ",", "leaf_evaluator", ")", ":", "saw_null_result", "=", "False", "for", "condition", "in", "conditions", ":", "result", "=", "evaluate", "(", "condition", ",", "leaf_evaluator", ")", "if", "result", "is", "False", ":", "return", "False", "if", "result", "is", "None", ":", "saw_null_result", "=", "True", "return", "None", "if", "saw_null_result", "else", "True" ]
Evaluates a list of conditions as if the evaluator had been applied to each entry and the results AND-ed together. Args: conditions: List of conditions ex: [operand_1, operand_2]. leaf_evaluator: Function which will be called to evaluate leaf condition values. Returns: Boolean: - True if all operands evaluate to True. - False if a single operand evaluates to False. None: if conditions couldn't be evaluated.
[ "Evaluates", "a", "list", "of", "conditions", "as", "if", "the", "evaluator", "had", "been", "applied", "to", "each", "entry", "and", "the", "results", "AND", "-", "ed", "together", "." ]
python
train
tritemio/PyBroMo
pybromo/diffusion.py
https://github.com/tritemio/PyBroMo/blob/b75f82a4551ff37e7c7a7e6954c536451f3e6d06/pybromo/diffusion.py#L863-L990
def simulate_timestamps_mix_da(self, max_rates_d, max_rates_a, populations, bg_rate_d, bg_rate_a, rs=None, seed=1, chunksize=2**16, comp_filter=None, overwrite=False, skip_existing=False, scale=10, path=None, t_chunksize=2**19, timeslice=None): """Compute D and A timestamps arrays for a mixture of N populations. This method reads the emission from disk once, and generates a pair of timestamps arrays (e.g. donor and acceptor) from each chunk. Timestamp data are saved to disk and accessible as pytables arrays in `._timestamps_d/a` and `._tparticles_d/a`. The background generated timestamps are assigned a conventional particle number (last particle index + 1). Arguments: max_rates_d (list): list of the peak max emission rate in the donor channel for each population. max_rates_a (list): list of the peak max emission rate in the acceptor channel for each population. populations (list of slices): slices to `self.particles` defining each population. bg_rate_d (float, cps): rate for a Poisson background process in the donor channel. bg_rate_a (float, cps): rate for a Poisson background process in the acceptor channel. rs (RandomState object): random state object used as random number generator. If None, use a random state initialized from seed. seed (uint): when `rs` is None, `seed` is used to initialize the random state, otherwise is ignored. chunksize (int): chunk size used for the on-disk timestamp array comp_filter (tables.Filter or None): compression filter to use for the on-disk `timestamps` and `tparticles` arrays. If None use default compression. overwrite (bool): if True, overwrite any pre-existing timestamps array. If False, never overwrite. The outcome of simulating an existing array is controlled by `skip_existing` flag. skip_existing (bool): if True, skip simulation if the same timestamps array is already present. scale (int): `self.t_step` is multiplied by `scale` to obtain the timestamps units in seconds. path (string): folder where to save the data. timeslice (float or None): timestamps are simulated until `timeslice` seconds. If None, simulate until `self.t_max`. """ self.open_store_timestamp(chunksize=chunksize, path=path) rs = self._get_group_randomstate(rs, seed, self.ts_group) if t_chunksize is None: t_chunksize = self.emission.chunkshape[1] timeslice_size = self.n_samples if timeslice is not None: timeslice_size = timeslice // self.t_step name_d = self._get_ts_name_mix(max_rates_d, populations, bg_rate_d, rs) name_a = self._get_ts_name_mix(max_rates_a, populations, bg_rate_a, rs) kw = dict(clk_p=self.t_step / scale, populations=populations, num_particles=self.num_particles, bg_particle=self.num_particles, overwrite=overwrite, chunksize=chunksize) if comp_filter is not None: kw.update(comp_filter=comp_filter) kw.update(name=name_d, max_rates=max_rates_d, bg_rate=bg_rate_d) try: self._timestamps_d, self._tparticles_d = (self.ts_store .add_timestamps(**kw)) except ExistingArrayError as e: if skip_existing: print(' - Skipping already present timestamps array.') return else: raise e kw.update(name=name_a, max_rates=max_rates_a, bg_rate=bg_rate_a) try: self._timestamps_a, self._tparticles_a = (self.ts_store .add_timestamps(**kw)) except ExistingArrayError as e: if skip_existing: print(' - Skipping already present timestamps array.') return else: raise e self.ts_group._v_attrs['init_random_state'] = rs.get_state() self._timestamps_d.attrs['init_random_state'] = rs.get_state() self._timestamps_d.attrs['PyBroMo'] = __version__ self._timestamps_a.attrs['init_random_state'] = rs.get_state() self._timestamps_a.attrs['PyBroMo'] = __version__ # Load emission in chunks, and save only the final timestamps bg_rates_d = [None] * (len(max_rates_d) - 1) + [bg_rate_d] bg_rates_a = [None] * (len(max_rates_a) - 1) + [bg_rate_a] prev_time = 0 for i_start, i_end in iter_chunk_index(timeslice_size, t_chunksize): curr_time = np.around(i_start * self.t_step, decimals=1) if curr_time > prev_time: print(' %.1fs' % curr_time, end='', flush=True) prev_time = curr_time em_chunk = self.emission[:, i_start:i_end] times_chunk_s_d, par_index_chunk_s_d = \ self._sim_timestamps_populations( em_chunk, max_rates_d, populations, bg_rates_d, i_start, rs, scale) times_chunk_s_a, par_index_chunk_s_a = \ self._sim_timestamps_populations( em_chunk, max_rates_a, populations, bg_rates_a, i_start, rs, scale) # Save sorted timestamps (suffix '_s') and corresponding particles self._timestamps_d.append(times_chunk_s_d) self._tparticles_d.append(par_index_chunk_s_d) self._timestamps_a.append(times_chunk_s_a) self._tparticles_a.append(par_index_chunk_s_a) # Save current random state so it can be resumed in the next session self.ts_group._v_attrs['last_random_state'] = rs.get_state() self._timestamps_d._v_attrs['last_random_state'] = rs.get_state() self.ts_store.h5file.flush()
[ "def", "simulate_timestamps_mix_da", "(", "self", ",", "max_rates_d", ",", "max_rates_a", ",", "populations", ",", "bg_rate_d", ",", "bg_rate_a", ",", "rs", "=", "None", ",", "seed", "=", "1", ",", "chunksize", "=", "2", "**", "16", ",", "comp_filter", "=", "None", ",", "overwrite", "=", "False", ",", "skip_existing", "=", "False", ",", "scale", "=", "10", ",", "path", "=", "None", ",", "t_chunksize", "=", "2", "**", "19", ",", "timeslice", "=", "None", ")", ":", "self", ".", "open_store_timestamp", "(", "chunksize", "=", "chunksize", ",", "path", "=", "path", ")", "rs", "=", "self", ".", "_get_group_randomstate", "(", "rs", ",", "seed", ",", "self", ".", "ts_group", ")", "if", "t_chunksize", "is", "None", ":", "t_chunksize", "=", "self", ".", "emission", ".", "chunkshape", "[", "1", "]", "timeslice_size", "=", "self", ".", "n_samples", "if", "timeslice", "is", "not", "None", ":", "timeslice_size", "=", "timeslice", "//", "self", ".", "t_step", "name_d", "=", "self", ".", "_get_ts_name_mix", "(", "max_rates_d", ",", "populations", ",", "bg_rate_d", ",", "rs", ")", "name_a", "=", "self", ".", "_get_ts_name_mix", "(", "max_rates_a", ",", "populations", ",", "bg_rate_a", ",", "rs", ")", "kw", "=", "dict", "(", "clk_p", "=", "self", ".", "t_step", "/", "scale", ",", "populations", "=", "populations", ",", "num_particles", "=", "self", ".", "num_particles", ",", "bg_particle", "=", "self", ".", "num_particles", ",", "overwrite", "=", "overwrite", ",", "chunksize", "=", "chunksize", ")", "if", "comp_filter", "is", "not", "None", ":", "kw", ".", "update", "(", "comp_filter", "=", "comp_filter", ")", "kw", ".", "update", "(", "name", "=", "name_d", ",", "max_rates", "=", "max_rates_d", ",", "bg_rate", "=", "bg_rate_d", ")", "try", ":", "self", ".", "_timestamps_d", ",", "self", ".", "_tparticles_d", "=", "(", "self", ".", "ts_store", ".", "add_timestamps", "(", "*", "*", "kw", ")", ")", "except", "ExistingArrayError", "as", "e", ":", "if", "skip_existing", ":", "print", "(", "' - Skipping already present timestamps array.'", ")", "return", "else", ":", "raise", "e", "kw", ".", "update", "(", "name", "=", "name_a", ",", "max_rates", "=", "max_rates_a", ",", "bg_rate", "=", "bg_rate_a", ")", "try", ":", "self", ".", "_timestamps_a", ",", "self", ".", "_tparticles_a", "=", "(", "self", ".", "ts_store", ".", "add_timestamps", "(", "*", "*", "kw", ")", ")", "except", "ExistingArrayError", "as", "e", ":", "if", "skip_existing", ":", "print", "(", "' - Skipping already present timestamps array.'", ")", "return", "else", ":", "raise", "e", "self", ".", "ts_group", ".", "_v_attrs", "[", "'init_random_state'", "]", "=", "rs", ".", "get_state", "(", ")", "self", ".", "_timestamps_d", ".", "attrs", "[", "'init_random_state'", "]", "=", "rs", ".", "get_state", "(", ")", "self", ".", "_timestamps_d", ".", "attrs", "[", "'PyBroMo'", "]", "=", "__version__", "self", ".", "_timestamps_a", ".", "attrs", "[", "'init_random_state'", "]", "=", "rs", ".", "get_state", "(", ")", "self", ".", "_timestamps_a", ".", "attrs", "[", "'PyBroMo'", "]", "=", "__version__", "# Load emission in chunks, and save only the final timestamps", "bg_rates_d", "=", "[", "None", "]", "*", "(", "len", "(", "max_rates_d", ")", "-", "1", ")", "+", "[", "bg_rate_d", "]", "bg_rates_a", "=", "[", "None", "]", "*", "(", "len", "(", "max_rates_a", ")", "-", "1", ")", "+", "[", "bg_rate_a", "]", "prev_time", "=", "0", "for", "i_start", ",", "i_end", "in", "iter_chunk_index", "(", "timeslice_size", ",", "t_chunksize", ")", ":", "curr_time", "=", "np", ".", "around", "(", "i_start", "*", "self", ".", "t_step", ",", "decimals", "=", "1", ")", "if", "curr_time", ">", "prev_time", ":", "print", "(", "' %.1fs'", "%", "curr_time", ",", "end", "=", "''", ",", "flush", "=", "True", ")", "prev_time", "=", "curr_time", "em_chunk", "=", "self", ".", "emission", "[", ":", ",", "i_start", ":", "i_end", "]", "times_chunk_s_d", ",", "par_index_chunk_s_d", "=", "self", ".", "_sim_timestamps_populations", "(", "em_chunk", ",", "max_rates_d", ",", "populations", ",", "bg_rates_d", ",", "i_start", ",", "rs", ",", "scale", ")", "times_chunk_s_a", ",", "par_index_chunk_s_a", "=", "self", ".", "_sim_timestamps_populations", "(", "em_chunk", ",", "max_rates_a", ",", "populations", ",", "bg_rates_a", ",", "i_start", ",", "rs", ",", "scale", ")", "# Save sorted timestamps (suffix '_s') and corresponding particles", "self", ".", "_timestamps_d", ".", "append", "(", "times_chunk_s_d", ")", "self", ".", "_tparticles_d", ".", "append", "(", "par_index_chunk_s_d", ")", "self", ".", "_timestamps_a", ".", "append", "(", "times_chunk_s_a", ")", "self", ".", "_tparticles_a", ".", "append", "(", "par_index_chunk_s_a", ")", "# Save current random state so it can be resumed in the next session", "self", ".", "ts_group", ".", "_v_attrs", "[", "'last_random_state'", "]", "=", "rs", ".", "get_state", "(", ")", "self", ".", "_timestamps_d", ".", "_v_attrs", "[", "'last_random_state'", "]", "=", "rs", ".", "get_state", "(", ")", "self", ".", "ts_store", ".", "h5file", ".", "flush", "(", ")" ]
Compute D and A timestamps arrays for a mixture of N populations. This method reads the emission from disk once, and generates a pair of timestamps arrays (e.g. donor and acceptor) from each chunk. Timestamp data are saved to disk and accessible as pytables arrays in `._timestamps_d/a` and `._tparticles_d/a`. The background generated timestamps are assigned a conventional particle number (last particle index + 1). Arguments: max_rates_d (list): list of the peak max emission rate in the donor channel for each population. max_rates_a (list): list of the peak max emission rate in the acceptor channel for each population. populations (list of slices): slices to `self.particles` defining each population. bg_rate_d (float, cps): rate for a Poisson background process in the donor channel. bg_rate_a (float, cps): rate for a Poisson background process in the acceptor channel. rs (RandomState object): random state object used as random number generator. If None, use a random state initialized from seed. seed (uint): when `rs` is None, `seed` is used to initialize the random state, otherwise is ignored. chunksize (int): chunk size used for the on-disk timestamp array comp_filter (tables.Filter or None): compression filter to use for the on-disk `timestamps` and `tparticles` arrays. If None use default compression. overwrite (bool): if True, overwrite any pre-existing timestamps array. If False, never overwrite. The outcome of simulating an existing array is controlled by `skip_existing` flag. skip_existing (bool): if True, skip simulation if the same timestamps array is already present. scale (int): `self.t_step` is multiplied by `scale` to obtain the timestamps units in seconds. path (string): folder where to save the data. timeslice (float or None): timestamps are simulated until `timeslice` seconds. If None, simulate until `self.t_max`.
[ "Compute", "D", "and", "A", "timestamps", "arrays", "for", "a", "mixture", "of", "N", "populations", "." ]
python
valid
prompt-toolkit/pymux
pymux/main.py
https://github.com/prompt-toolkit/pymux/blob/3f66e62b9de4b2251c7f9afad6c516dc5a30ec67/pymux/main.py#L201-L227
def sync_focus(self, *_): """ Focus the focused window from the pymux arrangement. """ # Pop-up displayed? if self.display_popup: self.app.layout.focus(self.layout_manager.popup_dialog) return # Confirm. if self.confirm_text: return # Custom prompt. if self.prompt_command: return # Focus prompt # Command mode. if self.command_mode: return # Focus command # No windows left, return. We will quit soon. if not self.pymux.arrangement.windows: return pane = self.pymux.arrangement.get_active_pane() self.app.layout.focus(pane.terminal)
[ "def", "sync_focus", "(", "self", ",", "*", "_", ")", ":", "# Pop-up displayed?", "if", "self", ".", "display_popup", ":", "self", ".", "app", ".", "layout", ".", "focus", "(", "self", ".", "layout_manager", ".", "popup_dialog", ")", "return", "# Confirm.", "if", "self", ".", "confirm_text", ":", "return", "# Custom prompt.", "if", "self", ".", "prompt_command", ":", "return", "# Focus prompt", "# Command mode.", "if", "self", ".", "command_mode", ":", "return", "# Focus command", "# No windows left, return. We will quit soon.", "if", "not", "self", ".", "pymux", ".", "arrangement", ".", "windows", ":", "return", "pane", "=", "self", ".", "pymux", ".", "arrangement", ".", "get_active_pane", "(", ")", "self", ".", "app", ".", "layout", ".", "focus", "(", "pane", ".", "terminal", ")" ]
Focus the focused window from the pymux arrangement.
[ "Focus", "the", "focused", "window", "from", "the", "pymux", "arrangement", "." ]
python
train
mar10/wsgidav
wsgidav/util.py
https://github.com/mar10/wsgidav/blob/cec0d84222fc24bea01be1cea91729001963f172/wsgidav/util.py#L1103-L1185
def evaluate_http_conditionals(dav_res, last_modified, entitytag, environ): """Handle 'If-...:' headers (but not 'If:' header). If-Match @see: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.24 Only perform the action if the client supplied entity matches the same entity on the server. This is mainly for methods like PUT to only update a resource if it has not been modified since the user last updated it. If-Match: "737060cd8c284d8af7ad3082f209582d" If-Modified-Since @see: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.25 Allows a 304 Not Modified to be returned if content is unchanged If-Modified-Since: Sat, 29 Oct 1994 19:43:31 GMT If-None-Match @see: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.26 Allows a 304 Not Modified to be returned if content is unchanged, see HTTP ETag If-None-Match: "737060cd8c284d8af7ad3082f209582d" If-Unmodified-Since @see: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.28 Only send the response if the entity has not been modified since a specific time. """ if not dav_res: return # Conditions # An HTTP/1.1 origin server, upon receiving a conditional request that includes both a # Last-Modified date (e.g., in an If-Modified-Since or If-Unmodified-Since header field) and # one or more entity tags (e.g., in an If-Match, If-None-Match, or If-Range header field) as # cache validators, MUST NOT return a response status of 304 (Not Modified) unless doing so # is consistent with all of the conditional header fields in the request. if "HTTP_IF_MATCH" in environ and dav_res.support_etag(): ifmatchlist = environ["HTTP_IF_MATCH"].split(",") for ifmatchtag in ifmatchlist: ifmatchtag = ifmatchtag.strip(' "\t') if ifmatchtag == entitytag or ifmatchtag == "*": break raise DAVError(HTTP_PRECONDITION_FAILED, "If-Match header condition failed") # TODO: after the refactoring ifModifiedSinceFailed = False if "HTTP_IF_MODIFIED_SINCE" in environ and dav_res.support_modified(): ifmodtime = parse_time_string(environ["HTTP_IF_MODIFIED_SINCE"]) if ifmodtime and ifmodtime > last_modified: ifModifiedSinceFailed = True # If-None-Match # If none of the entity tags match, then the server MAY perform the requested method as if the # If-None-Match header field did not exist, but MUST also ignore any If-Modified-Since header # field (s) in the request. That is, if no entity tags match, then the server MUST NOT return # a 304 (Not Modified) response. ignoreIfModifiedSince = False if "HTTP_IF_NONE_MATCH" in environ and dav_res.support_etag(): ifmatchlist = environ["HTTP_IF_NONE_MATCH"].split(",") for ifmatchtag in ifmatchlist: ifmatchtag = ifmatchtag.strip(' "\t') if ifmatchtag == entitytag or ifmatchtag == "*": # ETag matched. If it's a GET request and we don't have an # conflicting If-Modified header, we return NOT_MODIFIED if ( environ["REQUEST_METHOD"] in ("GET", "HEAD") and not ifModifiedSinceFailed ): raise DAVError(HTTP_NOT_MODIFIED, "If-None-Match header failed") raise DAVError( HTTP_PRECONDITION_FAILED, "If-None-Match header condition failed" ) ignoreIfModifiedSince = True if "HTTP_IF_UNMODIFIED_SINCE" in environ and dav_res.support_modified(): ifunmodtime = parse_time_string(environ["HTTP_IF_UNMODIFIED_SINCE"]) if ifunmodtime and ifunmodtime <= last_modified: raise DAVError( HTTP_PRECONDITION_FAILED, "If-Unmodified-Since header condition failed" ) if ifModifiedSinceFailed and not ignoreIfModifiedSince: raise DAVError(HTTP_NOT_MODIFIED, "If-Modified-Since header condition failed") return
[ "def", "evaluate_http_conditionals", "(", "dav_res", ",", "last_modified", ",", "entitytag", ",", "environ", ")", ":", "if", "not", "dav_res", ":", "return", "# Conditions", "# An HTTP/1.1 origin server, upon receiving a conditional request that includes both a", "# Last-Modified date (e.g., in an If-Modified-Since or If-Unmodified-Since header field) and", "# one or more entity tags (e.g., in an If-Match, If-None-Match, or If-Range header field) as", "# cache validators, MUST NOT return a response status of 304 (Not Modified) unless doing so", "# is consistent with all of the conditional header fields in the request.", "if", "\"HTTP_IF_MATCH\"", "in", "environ", "and", "dav_res", ".", "support_etag", "(", ")", ":", "ifmatchlist", "=", "environ", "[", "\"HTTP_IF_MATCH\"", "]", ".", "split", "(", "\",\"", ")", "for", "ifmatchtag", "in", "ifmatchlist", ":", "ifmatchtag", "=", "ifmatchtag", ".", "strip", "(", "' \"\\t'", ")", "if", "ifmatchtag", "==", "entitytag", "or", "ifmatchtag", "==", "\"*\"", ":", "break", "raise", "DAVError", "(", "HTTP_PRECONDITION_FAILED", ",", "\"If-Match header condition failed\"", ")", "# TODO: after the refactoring", "ifModifiedSinceFailed", "=", "False", "if", "\"HTTP_IF_MODIFIED_SINCE\"", "in", "environ", "and", "dav_res", ".", "support_modified", "(", ")", ":", "ifmodtime", "=", "parse_time_string", "(", "environ", "[", "\"HTTP_IF_MODIFIED_SINCE\"", "]", ")", "if", "ifmodtime", "and", "ifmodtime", ">", "last_modified", ":", "ifModifiedSinceFailed", "=", "True", "# If-None-Match", "# If none of the entity tags match, then the server MAY perform the requested method as if the", "# If-None-Match header field did not exist, but MUST also ignore any If-Modified-Since header", "# field (s) in the request. That is, if no entity tags match, then the server MUST NOT return", "# a 304 (Not Modified) response.", "ignoreIfModifiedSince", "=", "False", "if", "\"HTTP_IF_NONE_MATCH\"", "in", "environ", "and", "dav_res", ".", "support_etag", "(", ")", ":", "ifmatchlist", "=", "environ", "[", "\"HTTP_IF_NONE_MATCH\"", "]", ".", "split", "(", "\",\"", ")", "for", "ifmatchtag", "in", "ifmatchlist", ":", "ifmatchtag", "=", "ifmatchtag", ".", "strip", "(", "' \"\\t'", ")", "if", "ifmatchtag", "==", "entitytag", "or", "ifmatchtag", "==", "\"*\"", ":", "# ETag matched. If it's a GET request and we don't have an", "# conflicting If-Modified header, we return NOT_MODIFIED", "if", "(", "environ", "[", "\"REQUEST_METHOD\"", "]", "in", "(", "\"GET\"", ",", "\"HEAD\"", ")", "and", "not", "ifModifiedSinceFailed", ")", ":", "raise", "DAVError", "(", "HTTP_NOT_MODIFIED", ",", "\"If-None-Match header failed\"", ")", "raise", "DAVError", "(", "HTTP_PRECONDITION_FAILED", ",", "\"If-None-Match header condition failed\"", ")", "ignoreIfModifiedSince", "=", "True", "if", "\"HTTP_IF_UNMODIFIED_SINCE\"", "in", "environ", "and", "dav_res", ".", "support_modified", "(", ")", ":", "ifunmodtime", "=", "parse_time_string", "(", "environ", "[", "\"HTTP_IF_UNMODIFIED_SINCE\"", "]", ")", "if", "ifunmodtime", "and", "ifunmodtime", "<=", "last_modified", ":", "raise", "DAVError", "(", "HTTP_PRECONDITION_FAILED", ",", "\"If-Unmodified-Since header condition failed\"", ")", "if", "ifModifiedSinceFailed", "and", "not", "ignoreIfModifiedSince", ":", "raise", "DAVError", "(", "HTTP_NOT_MODIFIED", ",", "\"If-Modified-Since header condition failed\"", ")", "return" ]
Handle 'If-...:' headers (but not 'If:' header). If-Match @see: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.24 Only perform the action if the client supplied entity matches the same entity on the server. This is mainly for methods like PUT to only update a resource if it has not been modified since the user last updated it. If-Match: "737060cd8c284d8af7ad3082f209582d" If-Modified-Since @see: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.25 Allows a 304 Not Modified to be returned if content is unchanged If-Modified-Since: Sat, 29 Oct 1994 19:43:31 GMT If-None-Match @see: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.26 Allows a 304 Not Modified to be returned if content is unchanged, see HTTP ETag If-None-Match: "737060cd8c284d8af7ad3082f209582d" If-Unmodified-Since @see: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.28 Only send the response if the entity has not been modified since a specific time.
[ "Handle", "If", "-", "...", ":", "headers", "(", "but", "not", "If", ":", "header", ")", "." ]
python
valid
gwastro/pycbc
pycbc/inference/io/base_hdf.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/inference/io/base_hdf.py#L554-L568
def write_injections(self, injection_file): """Writes injection parameters from the given injection file. Everything in the injection file is copied to ``injections_group``. Parameters ---------- injection_file : str Path to HDF injection file. """ try: with h5py.File(injection_file, "r") as fp: super(BaseInferenceFile, self).copy(fp, self.injections_group) except IOError: logging.warn("Could not read %s as an HDF file", injection_file)
[ "def", "write_injections", "(", "self", ",", "injection_file", ")", ":", "try", ":", "with", "h5py", ".", "File", "(", "injection_file", ",", "\"r\"", ")", "as", "fp", ":", "super", "(", "BaseInferenceFile", ",", "self", ")", ".", "copy", "(", "fp", ",", "self", ".", "injections_group", ")", "except", "IOError", ":", "logging", ".", "warn", "(", "\"Could not read %s as an HDF file\"", ",", "injection_file", ")" ]
Writes injection parameters from the given injection file. Everything in the injection file is copied to ``injections_group``. Parameters ---------- injection_file : str Path to HDF injection file.
[ "Writes", "injection", "parameters", "from", "the", "given", "injection", "file", "." ]
python
train
gtaylor/django-dynamodb-sessions
dynamodb_sessions/backends/dynamodb.py
https://github.com/gtaylor/django-dynamodb-sessions/blob/434031aa483b26b0b7b5acbdf683bbe1575956f1/dynamodb_sessions/backends/dynamodb.py#L136-L179
def save(self, must_create=False): """ Saves the current session data to the database. :keyword bool must_create: If ``True``, a ``CreateError`` exception will be raised if the saving operation doesn't create a *new* entry (as opposed to possibly updating an existing entry). :raises: ``CreateError`` if ``must_create`` is ``True`` and a session with the current session key already exists. """ # If the save method is called with must_create equal to True, I'm # setting self._session_key equal to None and when # self.get_or_create_session_key is called the new # session_key will be created. if must_create: self._session_key = None self._get_or_create_session_key() update_kwargs = { 'Key': {'session_key': self.session_key}, } attribute_names = {'#data': 'data'} attribute_values = { ':data': self.encode(self._get_session(no_load=must_create)) } set_updates = ['#data = :data'] if must_create: # Set condition to ensure session with same key doesnt exist update_kwargs['ConditionExpression'] = \ DynamoConditionAttr('session_key').not_exists() attribute_values[':created'] = int(time.time()) set_updates.append('created = :created') update_kwargs['UpdateExpression'] = 'SET ' + ','.join(set_updates) update_kwargs['ExpressionAttributeValues'] = attribute_values update_kwargs['ExpressionAttributeNames'] = attribute_names try: self.table.update_item(**update_kwargs) except ClientError as e: error_code = e.response['Error']['Code'] if error_code == 'ConditionalCheckFailedException': raise CreateError raise
[ "def", "save", "(", "self", ",", "must_create", "=", "False", ")", ":", "# If the save method is called with must_create equal to True, I'm", "# setting self._session_key equal to None and when", "# self.get_or_create_session_key is called the new", "# session_key will be created.", "if", "must_create", ":", "self", ".", "_session_key", "=", "None", "self", ".", "_get_or_create_session_key", "(", ")", "update_kwargs", "=", "{", "'Key'", ":", "{", "'session_key'", ":", "self", ".", "session_key", "}", ",", "}", "attribute_names", "=", "{", "'#data'", ":", "'data'", "}", "attribute_values", "=", "{", "':data'", ":", "self", ".", "encode", "(", "self", ".", "_get_session", "(", "no_load", "=", "must_create", ")", ")", "}", "set_updates", "=", "[", "'#data = :data'", "]", "if", "must_create", ":", "# Set condition to ensure session with same key doesnt exist", "update_kwargs", "[", "'ConditionExpression'", "]", "=", "DynamoConditionAttr", "(", "'session_key'", ")", ".", "not_exists", "(", ")", "attribute_values", "[", "':created'", "]", "=", "int", "(", "time", ".", "time", "(", ")", ")", "set_updates", ".", "append", "(", "'created = :created'", ")", "update_kwargs", "[", "'UpdateExpression'", "]", "=", "'SET '", "+", "','", ".", "join", "(", "set_updates", ")", "update_kwargs", "[", "'ExpressionAttributeValues'", "]", "=", "attribute_values", "update_kwargs", "[", "'ExpressionAttributeNames'", "]", "=", "attribute_names", "try", ":", "self", ".", "table", ".", "update_item", "(", "*", "*", "update_kwargs", ")", "except", "ClientError", "as", "e", ":", "error_code", "=", "e", ".", "response", "[", "'Error'", "]", "[", "'Code'", "]", "if", "error_code", "==", "'ConditionalCheckFailedException'", ":", "raise", "CreateError", "raise" ]
Saves the current session data to the database. :keyword bool must_create: If ``True``, a ``CreateError`` exception will be raised if the saving operation doesn't create a *new* entry (as opposed to possibly updating an existing entry). :raises: ``CreateError`` if ``must_create`` is ``True`` and a session with the current session key already exists.
[ "Saves", "the", "current", "session", "data", "to", "the", "database", "." ]
python
train
resync/resync
resync/dump.py
https://github.com/resync/resync/blob/98292c17b2c00f2d6f5191c6ab51fef8c292a018/resync/dump.py#L188-L197
def archive_path(self, real_path): """Return the archive path for file with real_path. Mapping is based on removal of self.path_prefix which is determined by self.check_files(). """ if (not self.path_prefix): return(real_path) else: return(os.path.relpath(real_path, self.path_prefix))
[ "def", "archive_path", "(", "self", ",", "real_path", ")", ":", "if", "(", "not", "self", ".", "path_prefix", ")", ":", "return", "(", "real_path", ")", "else", ":", "return", "(", "os", ".", "path", ".", "relpath", "(", "real_path", ",", "self", ".", "path_prefix", ")", ")" ]
Return the archive path for file with real_path. Mapping is based on removal of self.path_prefix which is determined by self.check_files().
[ "Return", "the", "archive", "path", "for", "file", "with", "real_path", "." ]
python
train
abilian/abilian-core
abilian/web/assets/mixin.py
https://github.com/abilian/abilian-core/blob/0a71275bf108c3d51e13ca9e093c0249235351e3/abilian/web/assets/mixin.py#L115-L138
def register_asset(self, type_, *assets): """Register webassets bundle to be served on all pages. :param type_: `"css"`, `"js-top"` or `"js""`. :param assets: a path to file, a :ref:`webassets.Bundle <webassets:bundles>` instance or a callable that returns a :ref:`webassets.Bundle <webassets:bundles>` instance. :raises KeyError: if `type_` is not supported. """ supported = list(self._assets_bundles.keys()) if type_ not in supported: msg = "Invalid type: {}. Valid types: {}".format( repr(type_), ", ".join(sorted(supported)) ) raise KeyError(msg) for asset in assets: if not isinstance(asset, Bundle) and callable(asset): asset = asset() self._assets_bundles[type_].setdefault("bundles", []).append(asset)
[ "def", "register_asset", "(", "self", ",", "type_", ",", "*", "assets", ")", ":", "supported", "=", "list", "(", "self", ".", "_assets_bundles", ".", "keys", "(", ")", ")", "if", "type_", "not", "in", "supported", ":", "msg", "=", "\"Invalid type: {}. Valid types: {}\"", ".", "format", "(", "repr", "(", "type_", ")", ",", "\", \"", ".", "join", "(", "sorted", "(", "supported", ")", ")", ")", "raise", "KeyError", "(", "msg", ")", "for", "asset", "in", "assets", ":", "if", "not", "isinstance", "(", "asset", ",", "Bundle", ")", "and", "callable", "(", "asset", ")", ":", "asset", "=", "asset", "(", ")", "self", ".", "_assets_bundles", "[", "type_", "]", ".", "setdefault", "(", "\"bundles\"", ",", "[", "]", ")", ".", "append", "(", "asset", ")" ]
Register webassets bundle to be served on all pages. :param type_: `"css"`, `"js-top"` or `"js""`. :param assets: a path to file, a :ref:`webassets.Bundle <webassets:bundles>` instance or a callable that returns a :ref:`webassets.Bundle <webassets:bundles>` instance. :raises KeyError: if `type_` is not supported.
[ "Register", "webassets", "bundle", "to", "be", "served", "on", "all", "pages", "." ]
python
train
googleapis/google-cloud-python
bigquery/google/cloud/bigquery/client.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/client.py#L1964-L1977
def schema_to_json(self, schema_list, destination): """Takes a list of schema field objects. Serializes the list of schema field objects as json to a file. Destination is a file path or a file object. """ json_schema_list = [f.to_api_repr() for f in schema_list] if isinstance(destination, io.IOBase): return self._schema_to_json_file_object(json_schema_list, destination) with open(destination, mode="w") as file_obj: return self._schema_to_json_file_object(json_schema_list, file_obj)
[ "def", "schema_to_json", "(", "self", ",", "schema_list", ",", "destination", ")", ":", "json_schema_list", "=", "[", "f", ".", "to_api_repr", "(", ")", "for", "f", "in", "schema_list", "]", "if", "isinstance", "(", "destination", ",", "io", ".", "IOBase", ")", ":", "return", "self", ".", "_schema_to_json_file_object", "(", "json_schema_list", ",", "destination", ")", "with", "open", "(", "destination", ",", "mode", "=", "\"w\"", ")", "as", "file_obj", ":", "return", "self", ".", "_schema_to_json_file_object", "(", "json_schema_list", ",", "file_obj", ")" ]
Takes a list of schema field objects. Serializes the list of schema field objects as json to a file. Destination is a file path or a file object.
[ "Takes", "a", "list", "of", "schema", "field", "objects", "." ]
python
train
utek/pyseaweed
pyseaweed/weed.py
https://github.com/utek/pyseaweed/blob/218049329885425a2b8370157fa44952e64516be/pyseaweed/weed.py#L140-L147
def delete_file(self, fid): """ Delete file from WeedFS :param string fid: File ID """ url = self.get_file_url(fid) return self.conn.delete_data(url)
[ "def", "delete_file", "(", "self", ",", "fid", ")", ":", "url", "=", "self", ".", "get_file_url", "(", "fid", ")", "return", "self", ".", "conn", ".", "delete_data", "(", "url", ")" ]
Delete file from WeedFS :param string fid: File ID
[ "Delete", "file", "from", "WeedFS" ]
python
train
mushkevych/scheduler
synergy/scheduler/timetable.py
https://github.com/mushkevych/scheduler/blob/6740331360f49083c208085fb5a60ce80ebf418b/synergy/scheduler/timetable.py#L125-L152
def skip_tree_node(self, tree_node, tx_context=None): """ method skips the node and all its dependants and child nodes """ if not tx_context: # create transaction context if one was not provided # format: {process_name: {timeperiod: AbstractTreeNode} } tx_context = collections.defaultdict(dict) if tree_node.timeperiod in tx_context[tree_node.process_name]: # the node has already been marked for skipping return tx_context if tree_node.job_record.is_finished: # the node is finished and does not require skipping pass else: state_machine_name = context.process_context[tree_node.process_name].state_machine_name state_machine = self.state_machines[state_machine_name] state_machine.skip_job(tree_node.job_record) tx_context[tree_node.process_name][tree_node.timeperiod] = tree_node for timeperiod, node in tree_node.children.items(): self.skip_tree_node(node, tx_context) dependant_nodes = self._find_dependant_tree_nodes(tree_node) for node in dependant_nodes: self.skip_tree_node(node, tx_context) return tx_context
[ "def", "skip_tree_node", "(", "self", ",", "tree_node", ",", "tx_context", "=", "None", ")", ":", "if", "not", "tx_context", ":", "# create transaction context if one was not provided", "# format: {process_name: {timeperiod: AbstractTreeNode} }", "tx_context", "=", "collections", ".", "defaultdict", "(", "dict", ")", "if", "tree_node", ".", "timeperiod", "in", "tx_context", "[", "tree_node", ".", "process_name", "]", ":", "# the node has already been marked for skipping", "return", "tx_context", "if", "tree_node", ".", "job_record", ".", "is_finished", ":", "# the node is finished and does not require skipping", "pass", "else", ":", "state_machine_name", "=", "context", ".", "process_context", "[", "tree_node", ".", "process_name", "]", ".", "state_machine_name", "state_machine", "=", "self", ".", "state_machines", "[", "state_machine_name", "]", "state_machine", ".", "skip_job", "(", "tree_node", ".", "job_record", ")", "tx_context", "[", "tree_node", ".", "process_name", "]", "[", "tree_node", ".", "timeperiod", "]", "=", "tree_node", "for", "timeperiod", ",", "node", "in", "tree_node", ".", "children", ".", "items", "(", ")", ":", "self", ".", "skip_tree_node", "(", "node", ",", "tx_context", ")", "dependant_nodes", "=", "self", ".", "_find_dependant_tree_nodes", "(", "tree_node", ")", "for", "node", "in", "dependant_nodes", ":", "self", ".", "skip_tree_node", "(", "node", ",", "tx_context", ")", "return", "tx_context" ]
method skips the node and all its dependants and child nodes
[ "method", "skips", "the", "node", "and", "all", "its", "dependants", "and", "child", "nodes" ]
python
train
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/configuration_db/sip_config_db/utils/generate_sbi_config.py
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/configuration_db/sip_config_db/utils/generate_sbi_config.py#L33-L59
def add_workflow_definitions(sbi_config: dict): """Add any missing SBI workflow definitions as placeholders. This is a utility function used in testing and adds mock / test workflow definitions to the database for workflows defined in the specified SBI config. Args: sbi_config (dict): SBI configuration dictionary. """ registered_workflows = [] for i in range(len(sbi_config['processing_blocks'])): workflow_config = sbi_config['processing_blocks'][i]['workflow'] workflow_name = '{}:{}'.format(workflow_config['id'], workflow_config['version']) if workflow_name in registered_workflows: continue workflow_definition = dict( id=workflow_config['id'], version=workflow_config['version'], stages=[] ) key = "workflow_definitions:{}:{}".format(workflow_config['id'], workflow_config['version']) DB.save_dict(key, workflow_definition, hierarchical=False) registered_workflows.append(workflow_name)
[ "def", "add_workflow_definitions", "(", "sbi_config", ":", "dict", ")", ":", "registered_workflows", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "sbi_config", "[", "'processing_blocks'", "]", ")", ")", ":", "workflow_config", "=", "sbi_config", "[", "'processing_blocks'", "]", "[", "i", "]", "[", "'workflow'", "]", "workflow_name", "=", "'{}:{}'", ".", "format", "(", "workflow_config", "[", "'id'", "]", ",", "workflow_config", "[", "'version'", "]", ")", "if", "workflow_name", "in", "registered_workflows", ":", "continue", "workflow_definition", "=", "dict", "(", "id", "=", "workflow_config", "[", "'id'", "]", ",", "version", "=", "workflow_config", "[", "'version'", "]", ",", "stages", "=", "[", "]", ")", "key", "=", "\"workflow_definitions:{}:{}\"", ".", "format", "(", "workflow_config", "[", "'id'", "]", ",", "workflow_config", "[", "'version'", "]", ")", "DB", ".", "save_dict", "(", "key", ",", "workflow_definition", ",", "hierarchical", "=", "False", ")", "registered_workflows", ".", "append", "(", "workflow_name", ")" ]
Add any missing SBI workflow definitions as placeholders. This is a utility function used in testing and adds mock / test workflow definitions to the database for workflows defined in the specified SBI config. Args: sbi_config (dict): SBI configuration dictionary.
[ "Add", "any", "missing", "SBI", "workflow", "definitions", "as", "placeholders", "." ]
python
train
dswah/pyGAM
pygam/utils.py
https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/utils.py#L802-L830
def flatten(iterable): """convenience tool to flatten any nested iterable example: flatten([[[],[4]],[[[5,[6,7, []]]]]]) >>> [4, 5, 6, 7] flatten('hello') >>> 'hello' Parameters ---------- iterable Returns ------- flattened object """ if isiterable(iterable): flat = [] for item in list(iterable): item = flatten(item) if not isiterable(item): item = [item] flat += item return flat else: return iterable
[ "def", "flatten", "(", "iterable", ")", ":", "if", "isiterable", "(", "iterable", ")", ":", "flat", "=", "[", "]", "for", "item", "in", "list", "(", "iterable", ")", ":", "item", "=", "flatten", "(", "item", ")", "if", "not", "isiterable", "(", "item", ")", ":", "item", "=", "[", "item", "]", "flat", "+=", "item", "return", "flat", "else", ":", "return", "iterable" ]
convenience tool to flatten any nested iterable example: flatten([[[],[4]],[[[5,[6,7, []]]]]]) >>> [4, 5, 6, 7] flatten('hello') >>> 'hello' Parameters ---------- iterable Returns ------- flattened object
[ "convenience", "tool", "to", "flatten", "any", "nested", "iterable" ]
python
train
intake/intake
intake/catalog/local.py
https://github.com/intake/intake/blob/277b96bfdee39d8a3048ea5408c6d6716d568336/intake/catalog/local.py#L263-L272
def get(self, **user_parameters): """Instantiate the DataSource for the given parameters""" plugin, open_args = self._create_open_args(user_parameters) data_source = plugin(**open_args) data_source.catalog_object = self._catalog data_source.name = self.name data_source.description = self._description data_source.cat = self._catalog return data_source
[ "def", "get", "(", "self", ",", "*", "*", "user_parameters", ")", ":", "plugin", ",", "open_args", "=", "self", ".", "_create_open_args", "(", "user_parameters", ")", "data_source", "=", "plugin", "(", "*", "*", "open_args", ")", "data_source", ".", "catalog_object", "=", "self", ".", "_catalog", "data_source", ".", "name", "=", "self", ".", "name", "data_source", ".", "description", "=", "self", ".", "_description", "data_source", ".", "cat", "=", "self", ".", "_catalog", "return", "data_source" ]
Instantiate the DataSource for the given parameters
[ "Instantiate", "the", "DataSource", "for", "the", "given", "parameters" ]
python
train
Kozea/cairocffi
cairocffi/context.py
https://github.com/Kozea/cairocffi/blob/450853add7e32eea20985b6aa5f54d9cb3cd04fe/cairocffi/context.py#L1709-L1752
def select_font_face(self, family='', slant=constants.FONT_SLANT_NORMAL, weight=constants.FONT_WEIGHT_NORMAL): """Selects a family and style of font from a simplified description as a family name, slant and weight. .. note:: The :meth:`select_font_face` method is part of what the cairo designers call the "toy" text API. It is convenient for short demos and simple programs, but it is not expected to be adequate for serious text-using applications. See :ref:`fonts` for details. Cairo provides no operation to list available family names on the system (this is a "toy", remember), but the standard CSS2 generic family names, (``"serif"``, ``"sans-serif"``, ``"cursive"``, ``"fantasy"``, ``"monospace"``), are likely to work as expected. If family starts with the string ``"cairo:"``, or if no native font backends are compiled in, cairo will use an internal font family. The internal font family recognizes many modifiers in the family string, most notably, it recognizes the string ``"monospace"``. That is, the family name ``"cairo:monospace"`` will use the monospace version of the internal font family. If text is drawn without a call to :meth:`select_font_face`, (nor :meth:`set_font_face` nor :meth:`set_scaled_font`), the default family is platform-specific, but is essentially ``"sans-serif"``. Default slant is :obj:`NORMAL <FONT_SLANT_NORMAL>`, and default weight is :obj:`NORMAL <FONT_WEIGHT_NORMAL>`. This method is equivalent to a call to :class:`ToyFontFace` followed by :meth:`set_font_face`. """ cairo.cairo_select_font_face( self._pointer, _encode_string(family), slant, weight) self._check_status()
[ "def", "select_font_face", "(", "self", ",", "family", "=", "''", ",", "slant", "=", "constants", ".", "FONT_SLANT_NORMAL", ",", "weight", "=", "constants", ".", "FONT_WEIGHT_NORMAL", ")", ":", "cairo", ".", "cairo_select_font_face", "(", "self", ".", "_pointer", ",", "_encode_string", "(", "family", ")", ",", "slant", ",", "weight", ")", "self", ".", "_check_status", "(", ")" ]
Selects a family and style of font from a simplified description as a family name, slant and weight. .. note:: The :meth:`select_font_face` method is part of what the cairo designers call the "toy" text API. It is convenient for short demos and simple programs, but it is not expected to be adequate for serious text-using applications. See :ref:`fonts` for details. Cairo provides no operation to list available family names on the system (this is a "toy", remember), but the standard CSS2 generic family names, (``"serif"``, ``"sans-serif"``, ``"cursive"``, ``"fantasy"``, ``"monospace"``), are likely to work as expected. If family starts with the string ``"cairo:"``, or if no native font backends are compiled in, cairo will use an internal font family. The internal font family recognizes many modifiers in the family string, most notably, it recognizes the string ``"monospace"``. That is, the family name ``"cairo:monospace"`` will use the monospace version of the internal font family. If text is drawn without a call to :meth:`select_font_face`, (nor :meth:`set_font_face` nor :meth:`set_scaled_font`), the default family is platform-specific, but is essentially ``"sans-serif"``. Default slant is :obj:`NORMAL <FONT_SLANT_NORMAL>`, and default weight is :obj:`NORMAL <FONT_WEIGHT_NORMAL>`. This method is equivalent to a call to :class:`ToyFontFace` followed by :meth:`set_font_face`.
[ "Selects", "a", "family", "and", "style", "of", "font", "from", "a", "simplified", "description", "as", "a", "family", "name", "slant", "and", "weight", "." ]
python
train
inveniosoftware/invenio-webhooks
invenio_webhooks/views.py
https://github.com/inveniosoftware/invenio-webhooks/blob/f407cb2245464543ee474a81189fb9d3978bdde5/invenio_webhooks/views.py#L69-L81
def make_response(event): """Make a response from webhook event.""" code, message = event.status response = jsonify(**event.response) response.headers['X-Hub-Event'] = event.receiver_id response.headers['X-Hub-Delivery'] = event.id if message: response.headers['X-Hub-Info'] = message add_link_header(response, {'self': url_for( '.event_item', receiver_id=event.receiver_id, event_id=event.id, _external=True )}) return response, code
[ "def", "make_response", "(", "event", ")", ":", "code", ",", "message", "=", "event", ".", "status", "response", "=", "jsonify", "(", "*", "*", "event", ".", "response", ")", "response", ".", "headers", "[", "'X-Hub-Event'", "]", "=", "event", ".", "receiver_id", "response", ".", "headers", "[", "'X-Hub-Delivery'", "]", "=", "event", ".", "id", "if", "message", ":", "response", ".", "headers", "[", "'X-Hub-Info'", "]", "=", "message", "add_link_header", "(", "response", ",", "{", "'self'", ":", "url_for", "(", "'.event_item'", ",", "receiver_id", "=", "event", ".", "receiver_id", ",", "event_id", "=", "event", ".", "id", ",", "_external", "=", "True", ")", "}", ")", "return", "response", ",", "code" ]
Make a response from webhook event.
[ "Make", "a", "response", "from", "webhook", "event", "." ]
python
train
erik/alexandra
alexandra/util.py
https://github.com/erik/alexandra/blob/8bea94efa1af465254a553dc4dfea3fa552b18da/alexandra/util.py#L87-L112
def validate_request_timestamp(req_body, max_diff=150): """Ensure the request's timestamp doesn't fall outside of the app's specified tolerance. Returns True if this request is valid, False otherwise. :param req_body: JSON object parsed out of the raw POST data of a request. :param max_diff: Maximum allowable difference in seconds between request timestamp and system clock. Amazon requires <= 150 seconds for published skills. """ time_str = req_body.get('request', {}).get('timestamp') if not time_str: log.error('timestamp not present %s', req_body) return False req_ts = datetime.strptime(time_str, "%Y-%m-%dT%H:%M:%SZ") diff = (datetime.utcnow() - req_ts).total_seconds() if abs(diff) > max_diff: log.error('timestamp difference too high: %d sec', diff) return False return True
[ "def", "validate_request_timestamp", "(", "req_body", ",", "max_diff", "=", "150", ")", ":", "time_str", "=", "req_body", ".", "get", "(", "'request'", ",", "{", "}", ")", ".", "get", "(", "'timestamp'", ")", "if", "not", "time_str", ":", "log", ".", "error", "(", "'timestamp not present %s'", ",", "req_body", ")", "return", "False", "req_ts", "=", "datetime", ".", "strptime", "(", "time_str", ",", "\"%Y-%m-%dT%H:%M:%SZ\"", ")", "diff", "=", "(", "datetime", ".", "utcnow", "(", ")", "-", "req_ts", ")", ".", "total_seconds", "(", ")", "if", "abs", "(", "diff", ")", ">", "max_diff", ":", "log", ".", "error", "(", "'timestamp difference too high: %d sec'", ",", "diff", ")", "return", "False", "return", "True" ]
Ensure the request's timestamp doesn't fall outside of the app's specified tolerance. Returns True if this request is valid, False otherwise. :param req_body: JSON object parsed out of the raw POST data of a request. :param max_diff: Maximum allowable difference in seconds between request timestamp and system clock. Amazon requires <= 150 seconds for published skills.
[ "Ensure", "the", "request", "s", "timestamp", "doesn", "t", "fall", "outside", "of", "the", "app", "s", "specified", "tolerance", "." ]
python
train
CalebBell/thermo
thermo/chemical.py
https://github.com/CalebBell/thermo/blob/3857ed023a3e64fd3039a32d53576c24990ef1c3/thermo/chemical.py#L2322-L2343
def alphal(self): r'''Thermal diffusivity of the liquid phase of the chemical at its current temperature and pressure, in units of [m^2/s]. .. math:: \alpha = \frac{k}{\rho Cp} Utilizes the temperature and pressure dependent object oriented interfaces :obj:`thermo.volume.VolumeLiquid`, :obj:`thermo.thermal_conductivity.ThermalConductivityLiquid`, and :obj:`thermo.heat_capacity.HeatCapacityLiquid` to calculate the actual properties. Examples -------- >>> Chemical('nitrogen', T=70).alphal 9.444949636299626e-08 ''' kl, rhol, Cpl = self.kl, self.rhol, self.Cpl if all([kl, rhol, Cpl]): return thermal_diffusivity(k=kl, rho=rhol, Cp=Cpl) return None
[ "def", "alphal", "(", "self", ")", ":", "kl", ",", "rhol", ",", "Cpl", "=", "self", ".", "kl", ",", "self", ".", "rhol", ",", "self", ".", "Cpl", "if", "all", "(", "[", "kl", ",", "rhol", ",", "Cpl", "]", ")", ":", "return", "thermal_diffusivity", "(", "k", "=", "kl", ",", "rho", "=", "rhol", ",", "Cp", "=", "Cpl", ")", "return", "None" ]
r'''Thermal diffusivity of the liquid phase of the chemical at its current temperature and pressure, in units of [m^2/s]. .. math:: \alpha = \frac{k}{\rho Cp} Utilizes the temperature and pressure dependent object oriented interfaces :obj:`thermo.volume.VolumeLiquid`, :obj:`thermo.thermal_conductivity.ThermalConductivityLiquid`, and :obj:`thermo.heat_capacity.HeatCapacityLiquid` to calculate the actual properties. Examples -------- >>> Chemical('nitrogen', T=70).alphal 9.444949636299626e-08
[ "r", "Thermal", "diffusivity", "of", "the", "liquid", "phase", "of", "the", "chemical", "at", "its", "current", "temperature", "and", "pressure", "in", "units", "of", "[", "m^2", "/", "s", "]", "." ]
python
valid
valohai/valohai-yaml
valohai_yaml/parsing.py
https://github.com/valohai/valohai-yaml/blob/3d2e92381633d84cdba039f6905df34c9633a2e1/valohai_yaml/parsing.py#L6-L21
def parse(yaml, validate=True): """ Parse the given YAML data into a `Config` object, optionally validating it first. :param yaml: YAML data (either a string, a stream, or pre-parsed Python dict/list) :type yaml: list|dict|str|file :param validate: Whether to validate the data before attempting to parse it. :type validate: bool :return: Config object :rtype: valohai_yaml.objs.Config """ data = read_yaml(yaml) if validate: # pragma: no branch from .validation import validate validate(data, raise_exc=True) return Config.parse(data)
[ "def", "parse", "(", "yaml", ",", "validate", "=", "True", ")", ":", "data", "=", "read_yaml", "(", "yaml", ")", "if", "validate", ":", "# pragma: no branch", "from", ".", "validation", "import", "validate", "validate", "(", "data", ",", "raise_exc", "=", "True", ")", "return", "Config", ".", "parse", "(", "data", ")" ]
Parse the given YAML data into a `Config` object, optionally validating it first. :param yaml: YAML data (either a string, a stream, or pre-parsed Python dict/list) :type yaml: list|dict|str|file :param validate: Whether to validate the data before attempting to parse it. :type validate: bool :return: Config object :rtype: valohai_yaml.objs.Config
[ "Parse", "the", "given", "YAML", "data", "into", "a", "Config", "object", "optionally", "validating", "it", "first", "." ]
python
train
uber/doubles
doubles/target.py
https://github.com/uber/doubles/blob/15e68dcf98f709b19a581915fa6af5ef49ebdd8a/doubles/target.py#L48-L62
def _determine_doubled_obj(self): """Return the target object. Returns the object that should be treated as the target object. For partial doubles, this will be the same as ``self.obj``, but for pure doubles, it's pulled from the special ``_doubles_target`` attribute. :return: The object to be doubled. :rtype: object """ if isinstance(self.obj, ObjectDouble): return self.obj._doubles_target else: return self.obj
[ "def", "_determine_doubled_obj", "(", "self", ")", ":", "if", "isinstance", "(", "self", ".", "obj", ",", "ObjectDouble", ")", ":", "return", "self", ".", "obj", ".", "_doubles_target", "else", ":", "return", "self", ".", "obj" ]
Return the target object. Returns the object that should be treated as the target object. For partial doubles, this will be the same as ``self.obj``, but for pure doubles, it's pulled from the special ``_doubles_target`` attribute. :return: The object to be doubled. :rtype: object
[ "Return", "the", "target", "object", "." ]
python
train
idlesign/uwsgiconf
uwsgiconf/options/alarms.py
https://github.com/idlesign/uwsgiconf/blob/475407acb44199edbf7e0a66261bfeb51de1afae/uwsgiconf/options/alarms.py#L133-L145
def alarm_on_segfault(self, alarm): """Raise the specified alarm when the segmentation fault handler is executed. Sends a backtrace. :param AlarmType|list[AlarmType] alarm: Alarm. """ self.register_alarm(alarm) for alarm in listify(alarm): self._set('alarm-segfault', alarm.alias, multi=True) return self._section
[ "def", "alarm_on_segfault", "(", "self", ",", "alarm", ")", ":", "self", ".", "register_alarm", "(", "alarm", ")", "for", "alarm", "in", "listify", "(", "alarm", ")", ":", "self", ".", "_set", "(", "'alarm-segfault'", ",", "alarm", ".", "alias", ",", "multi", "=", "True", ")", "return", "self", ".", "_section" ]
Raise the specified alarm when the segmentation fault handler is executed. Sends a backtrace. :param AlarmType|list[AlarmType] alarm: Alarm.
[ "Raise", "the", "specified", "alarm", "when", "the", "segmentation", "fault", "handler", "is", "executed", "." ]
python
train
niolabs/python-xbee
xbee/backend/base.py
https://github.com/niolabs/python-xbee/blob/b91be3d0ee7ccaa1990120b5b5490999d8e6cbc7/xbee/backend/base.py#L286-L326
def _parse_samples(self, io_bytes): """ _parse_samples: binary data in XBee IO data format -> [ {"dio-0":True, "dio-1":False, "adc-0":100"}, ...] _parse_samples reads binary data from an XBee device in the IO data format specified by the API. It will then return a dictionary indicating the status of each enabled IO port. """ sample_count, dio_chans, aio_chans, dio_mask, header_size = \ self._parse_samples_header(io_bytes) samples = [] # split the sample data into a list, so it can be pop()'d sample_bytes = [byteToInt(c) for c in io_bytes[header_size:]] # repeat for every sample provided for sample_ind in range(0, sample_count): tmp_samples = {} if dio_chans: # we have digital data digital_data_set = (sample_bytes.pop(0) << 8 | sample_bytes.pop(0)) digital_values = dio_mask & digital_data_set for i in dio_chans: tmp_samples['dio-{0}'.format(i)] = True \ if (digital_values >> i) & 1 else False for i in aio_chans: analog_sample = (sample_bytes.pop(0) << 8 | sample_bytes.pop(0)) tmp_samples['adc-{0}'.format(i)] = analog_sample samples.append(tmp_samples) return samples
[ "def", "_parse_samples", "(", "self", ",", "io_bytes", ")", ":", "sample_count", ",", "dio_chans", ",", "aio_chans", ",", "dio_mask", ",", "header_size", "=", "self", ".", "_parse_samples_header", "(", "io_bytes", ")", "samples", "=", "[", "]", "# split the sample data into a list, so it can be pop()'d", "sample_bytes", "=", "[", "byteToInt", "(", "c", ")", "for", "c", "in", "io_bytes", "[", "header_size", ":", "]", "]", "# repeat for every sample provided", "for", "sample_ind", "in", "range", "(", "0", ",", "sample_count", ")", ":", "tmp_samples", "=", "{", "}", "if", "dio_chans", ":", "# we have digital data", "digital_data_set", "=", "(", "sample_bytes", ".", "pop", "(", "0", ")", "<<", "8", "|", "sample_bytes", ".", "pop", "(", "0", ")", ")", "digital_values", "=", "dio_mask", "&", "digital_data_set", "for", "i", "in", "dio_chans", ":", "tmp_samples", "[", "'dio-{0}'", ".", "format", "(", "i", ")", "]", "=", "True", "if", "(", "digital_values", ">>", "i", ")", "&", "1", "else", "False", "for", "i", "in", "aio_chans", ":", "analog_sample", "=", "(", "sample_bytes", ".", "pop", "(", "0", ")", "<<", "8", "|", "sample_bytes", ".", "pop", "(", "0", ")", ")", "tmp_samples", "[", "'adc-{0}'", ".", "format", "(", "i", ")", "]", "=", "analog_sample", "samples", ".", "append", "(", "tmp_samples", ")", "return", "samples" ]
_parse_samples: binary data in XBee IO data format -> [ {"dio-0":True, "dio-1":False, "adc-0":100"}, ...] _parse_samples reads binary data from an XBee device in the IO data format specified by the API. It will then return a dictionary indicating the status of each enabled IO port.
[ "_parse_samples", ":", "binary", "data", "in", "XBee", "IO", "data", "format", "-", ">", "[", "{", "dio", "-", "0", ":", "True", "dio", "-", "1", ":", "False", "adc", "-", "0", ":", "100", "}", "...", "]" ]
python
train
openvax/mhcflurry
mhcflurry/parallelism.py
https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/parallelism.py#L115-L188
def make_worker_pool( processes=None, initializer=None, initializer_kwargs_per_process=None, max_tasks_per_worker=None): """ Convenience wrapper to create a multiprocessing.Pool. This function adds support for per-worker initializer arguments, which are not natively supported by the multiprocessing module. The motivation for this feature is to support allocating each worker to a (different) GPU. IMPLEMENTATION NOTE: The per-worker initializer arguments are implemented using a Queue. Each worker reads its arguments from this queue when it starts. When it terminates, it adds its initializer arguments back to the queue, so a future process can initialize itself using these arguments. There is one issue with this approach, however. If a worker crashes, it never repopulates the queue of initializer arguments. This will prevent any future worker from re-using those arguments. To deal with this issue we add a second 'backup queue'. This queue always contains the full set of initializer arguments: whenever a worker reads from it, it always pushes the pop'd args back to the end of the queue immediately. If the primary arg queue is ever empty, then workers will read from this backup queue. Parameters ---------- processes : int Number of workers. Default: num CPUs. initializer : function, optional Init function to call in each worker initializer_kwargs_per_process : list of dict, optional Arguments to pass to initializer function for each worker. Length of list must equal the number of workers. max_tasks_per_worker : int, optional Restart workers after this many tasks. Requires Python >=3.2. Returns ------- multiprocessing.Pool """ if not processes: processes = cpu_count() pool_kwargs = { 'processes': processes, } if max_tasks_per_worker: pool_kwargs["maxtasksperchild"] = max_tasks_per_worker if initializer: if initializer_kwargs_per_process: assert len(initializer_kwargs_per_process) == processes kwargs_queue = Queue() kwargs_queue_backup = Queue() for kwargs in initializer_kwargs_per_process: kwargs_queue.put(kwargs) kwargs_queue_backup.put(kwargs) pool_kwargs["initializer"] = worker_init_entry_point pool_kwargs["initargs"] = ( initializer, kwargs_queue, kwargs_queue_backup) else: pool_kwargs["initializer"] = initializer worker_pool = Pool(**pool_kwargs) print("Started pool: %s" % str(worker_pool)) pprint(pool_kwargs) return worker_pool
[ "def", "make_worker_pool", "(", "processes", "=", "None", ",", "initializer", "=", "None", ",", "initializer_kwargs_per_process", "=", "None", ",", "max_tasks_per_worker", "=", "None", ")", ":", "if", "not", "processes", ":", "processes", "=", "cpu_count", "(", ")", "pool_kwargs", "=", "{", "'processes'", ":", "processes", ",", "}", "if", "max_tasks_per_worker", ":", "pool_kwargs", "[", "\"maxtasksperchild\"", "]", "=", "max_tasks_per_worker", "if", "initializer", ":", "if", "initializer_kwargs_per_process", ":", "assert", "len", "(", "initializer_kwargs_per_process", ")", "==", "processes", "kwargs_queue", "=", "Queue", "(", ")", "kwargs_queue_backup", "=", "Queue", "(", ")", "for", "kwargs", "in", "initializer_kwargs_per_process", ":", "kwargs_queue", ".", "put", "(", "kwargs", ")", "kwargs_queue_backup", ".", "put", "(", "kwargs", ")", "pool_kwargs", "[", "\"initializer\"", "]", "=", "worker_init_entry_point", "pool_kwargs", "[", "\"initargs\"", "]", "=", "(", "initializer", ",", "kwargs_queue", ",", "kwargs_queue_backup", ")", "else", ":", "pool_kwargs", "[", "\"initializer\"", "]", "=", "initializer", "worker_pool", "=", "Pool", "(", "*", "*", "pool_kwargs", ")", "print", "(", "\"Started pool: %s\"", "%", "str", "(", "worker_pool", ")", ")", "pprint", "(", "pool_kwargs", ")", "return", "worker_pool" ]
Convenience wrapper to create a multiprocessing.Pool. This function adds support for per-worker initializer arguments, which are not natively supported by the multiprocessing module. The motivation for this feature is to support allocating each worker to a (different) GPU. IMPLEMENTATION NOTE: The per-worker initializer arguments are implemented using a Queue. Each worker reads its arguments from this queue when it starts. When it terminates, it adds its initializer arguments back to the queue, so a future process can initialize itself using these arguments. There is one issue with this approach, however. If a worker crashes, it never repopulates the queue of initializer arguments. This will prevent any future worker from re-using those arguments. To deal with this issue we add a second 'backup queue'. This queue always contains the full set of initializer arguments: whenever a worker reads from it, it always pushes the pop'd args back to the end of the queue immediately. If the primary arg queue is ever empty, then workers will read from this backup queue. Parameters ---------- processes : int Number of workers. Default: num CPUs. initializer : function, optional Init function to call in each worker initializer_kwargs_per_process : list of dict, optional Arguments to pass to initializer function for each worker. Length of list must equal the number of workers. max_tasks_per_worker : int, optional Restart workers after this many tasks. Requires Python >=3.2. Returns ------- multiprocessing.Pool
[ "Convenience", "wrapper", "to", "create", "a", "multiprocessing", ".", "Pool", "." ]
python
train
nephics/mat4py
mat4py/savemat.py
https://github.com/nephics/mat4py/blob/6c1a2ad903937437cc5f24f3c3f5aa2c5a77a1c1/mat4py/savemat.py#L207-L225
def write_numeric_array(fd, header, array): """Write the numeric array""" # make a memory file for writing array data bd = BytesIO() # write matrix header to memory file write_var_header(bd, header) if not isinstance(array, basestring) and header['dims'][0] > 1: # list array data in column major order array = list(chain.from_iterable(izip(*array))) # write matrix data to memory file write_elements(bd, header['mtp'], array) # write the variable to disk file data = bd.getvalue() bd.close() write_var_data(fd, data)
[ "def", "write_numeric_array", "(", "fd", ",", "header", ",", "array", ")", ":", "# make a memory file for writing array data", "bd", "=", "BytesIO", "(", ")", "# write matrix header to memory file", "write_var_header", "(", "bd", ",", "header", ")", "if", "not", "isinstance", "(", "array", ",", "basestring", ")", "and", "header", "[", "'dims'", "]", "[", "0", "]", ">", "1", ":", "# list array data in column major order", "array", "=", "list", "(", "chain", ".", "from_iterable", "(", "izip", "(", "*", "array", ")", ")", ")", "# write matrix data to memory file", "write_elements", "(", "bd", ",", "header", "[", "'mtp'", "]", ",", "array", ")", "# write the variable to disk file", "data", "=", "bd", ".", "getvalue", "(", ")", "bd", ".", "close", "(", ")", "write_var_data", "(", "fd", ",", "data", ")" ]
Write the numeric array
[ "Write", "the", "numeric", "array" ]
python
valid
spyder-ide/conda-manager
conda_manager/api/manager_api.py
https://github.com/spyder-ide/conda-manager/blob/89a2126cbecefc92185cf979347ccac1c5ee5d9d/conda_manager/api/manager_api.py#L151-L171
def _download_repodata(self, checked_repos): """Dowload repodata.""" self._files_downloaded = [] self._repodata_files = [] self.__counter = -1 if checked_repos: for repo in checked_repos: path = self._repo_url_to_path(repo) self._files_downloaded.append(path) self._repodata_files.append(path) worker = self.download_async(repo, path) worker.url = repo worker.path = path worker.sig_finished.connect(self._repodata_downloaded) else: # Empty, maybe there is no internet connection # Load information from conda-meta and save that file path = self._get_repodata_from_meta() self._repodata_files = [path] self._repodata_downloaded()
[ "def", "_download_repodata", "(", "self", ",", "checked_repos", ")", ":", "self", ".", "_files_downloaded", "=", "[", "]", "self", ".", "_repodata_files", "=", "[", "]", "self", ".", "__counter", "=", "-", "1", "if", "checked_repos", ":", "for", "repo", "in", "checked_repos", ":", "path", "=", "self", ".", "_repo_url_to_path", "(", "repo", ")", "self", ".", "_files_downloaded", ".", "append", "(", "path", ")", "self", ".", "_repodata_files", ".", "append", "(", "path", ")", "worker", "=", "self", ".", "download_async", "(", "repo", ",", "path", ")", "worker", ".", "url", "=", "repo", "worker", ".", "path", "=", "path", "worker", ".", "sig_finished", ".", "connect", "(", "self", ".", "_repodata_downloaded", ")", "else", ":", "# Empty, maybe there is no internet connection", "# Load information from conda-meta and save that file", "path", "=", "self", ".", "_get_repodata_from_meta", "(", ")", "self", ".", "_repodata_files", "=", "[", "path", "]", "self", ".", "_repodata_downloaded", "(", ")" ]
Dowload repodata.
[ "Dowload", "repodata", "." ]
python
train
stevelittlefish/easyforms
easyforms/form.py
https://github.com/stevelittlefish/easyforms/blob/f5dd2635b045beec9af970b249909f8429cedc57/easyforms/form.py#L763-L770
def disable_validation(self, field_name): """Disable the validation rules for a field""" field = self.field_dict.get(field_name) if not field: raise exceptions.FieldNotFound('Field not found: \'%s\' when trying to disable validation' % field_name) field.validators = []
[ "def", "disable_validation", "(", "self", ",", "field_name", ")", ":", "field", "=", "self", ".", "field_dict", ".", "get", "(", "field_name", ")", "if", "not", "field", ":", "raise", "exceptions", ".", "FieldNotFound", "(", "'Field not found: \\'%s\\' when trying to disable validation'", "%", "field_name", ")", "field", ".", "validators", "=", "[", "]" ]
Disable the validation rules for a field
[ "Disable", "the", "validation", "rules", "for", "a", "field" ]
python
train
daviddrysdale/python-phonenumbers
python/phonenumbers/phonenumberutil.py
https://github.com/daviddrysdale/python-phonenumbers/blob/9cc5bb4ab5e661e70789b4c64bf7a9383c7bdc20/python/phonenumbers/phonenumberutil.py#L704-L769
def length_of_national_destination_code(numobj): """Return length of the national destination code code for a number. Gets the length of the national destination code (NDC) from the PhoneNumber object passed in, so that clients could use it to split a national significant number into NDC and subscriber number. The NDC of a phone number is normally the first group of digit(s) right after the country calling code when the number is formatted in the international format, if there is a subscriber number part that follows. N.B.: similar to an area code, not all numbers have an NDC! An example of how this could be used: >>> import phonenumbers >>> numobj = phonenumbers.parse("18002530000", "US") >>> nsn = phonenumbers.national_significant_number(numobj) >>> ndc_len = phonenumbers.length_of_national_destination_code(numobj) >>> if ndc_len > 0: ... national_destination_code = nsn[:ndc_len] ... subscriber_number = nsn[ndc_len:] ... else: ... national_destination_code = "" ... subscriber_number = nsn Refer to the unittests to see the difference between this function and length_of_geographical_area_code. Arguments: numobj -- The PhoneNumber object to find the length of the NDC from. Returns the length of NDC of the PhoneNumber object passed in, which could be zero. """ if numobj.extension is not None: # We don't want to alter the object given to us, but we don't want to # include the extension when we format it, so we copy it and clear the # extension here. copied_numobj = PhoneNumber() copied_numobj.merge_from(numobj) copied_numobj.extension = None else: copied_numobj = numobj nsn = format_number(copied_numobj, PhoneNumberFormat.INTERNATIONAL) number_groups = re.split(NON_DIGITS_PATTERN, nsn) # The pattern will start with "+COUNTRY_CODE " so the first group will # always be the empty string (before the + symbol) and the second group # will be the country calling code. The third group will be area code if # it is not the last group. if len(number_groups) <= 3: return 0 if number_type(numobj) == PhoneNumberType.MOBILE: # For example Argentinian mobile numbers, when formatted in the # international format, are in the form of +54 9 NDC XXXX... As a # result, we take the length of the third group (NDC) and add the # length of the second group (which is the mobile token), which also # forms part of the national significant number. This assumes that # the mobile token is always formatted separately from the rest of the # phone number. mobile_token = country_mobile_token(numobj.country_code) if mobile_token != U_EMPTY_STRING: return len(number_groups[2]) + len(number_groups[3]) return len(number_groups[2])
[ "def", "length_of_national_destination_code", "(", "numobj", ")", ":", "if", "numobj", ".", "extension", "is", "not", "None", ":", "# We don't want to alter the object given to us, but we don't want to", "# include the extension when we format it, so we copy it and clear the", "# extension here.", "copied_numobj", "=", "PhoneNumber", "(", ")", "copied_numobj", ".", "merge_from", "(", "numobj", ")", "copied_numobj", ".", "extension", "=", "None", "else", ":", "copied_numobj", "=", "numobj", "nsn", "=", "format_number", "(", "copied_numobj", ",", "PhoneNumberFormat", ".", "INTERNATIONAL", ")", "number_groups", "=", "re", ".", "split", "(", "NON_DIGITS_PATTERN", ",", "nsn", ")", "# The pattern will start with \"+COUNTRY_CODE \" so the first group will", "# always be the empty string (before the + symbol) and the second group", "# will be the country calling code. The third group will be area code if", "# it is not the last group.", "if", "len", "(", "number_groups", ")", "<=", "3", ":", "return", "0", "if", "number_type", "(", "numobj", ")", "==", "PhoneNumberType", ".", "MOBILE", ":", "# For example Argentinian mobile numbers, when formatted in the", "# international format, are in the form of +54 9 NDC XXXX... As a", "# result, we take the length of the third group (NDC) and add the", "# length of the second group (which is the mobile token), which also", "# forms part of the national significant number. This assumes that", "# the mobile token is always formatted separately from the rest of the", "# phone number.", "mobile_token", "=", "country_mobile_token", "(", "numobj", ".", "country_code", ")", "if", "mobile_token", "!=", "U_EMPTY_STRING", ":", "return", "len", "(", "number_groups", "[", "2", "]", ")", "+", "len", "(", "number_groups", "[", "3", "]", ")", "return", "len", "(", "number_groups", "[", "2", "]", ")" ]
Return length of the national destination code code for a number. Gets the length of the national destination code (NDC) from the PhoneNumber object passed in, so that clients could use it to split a national significant number into NDC and subscriber number. The NDC of a phone number is normally the first group of digit(s) right after the country calling code when the number is formatted in the international format, if there is a subscriber number part that follows. N.B.: similar to an area code, not all numbers have an NDC! An example of how this could be used: >>> import phonenumbers >>> numobj = phonenumbers.parse("18002530000", "US") >>> nsn = phonenumbers.national_significant_number(numobj) >>> ndc_len = phonenumbers.length_of_national_destination_code(numobj) >>> if ndc_len > 0: ... national_destination_code = nsn[:ndc_len] ... subscriber_number = nsn[ndc_len:] ... else: ... national_destination_code = "" ... subscriber_number = nsn Refer to the unittests to see the difference between this function and length_of_geographical_area_code. Arguments: numobj -- The PhoneNumber object to find the length of the NDC from. Returns the length of NDC of the PhoneNumber object passed in, which could be zero.
[ "Return", "length", "of", "the", "national", "destination", "code", "code", "for", "a", "number", "." ]
python
train
f3at/feat
src/feat/models/setter.py
https://github.com/f3at/feat/blob/15da93fc9d6ec8154f52a9172824e25821195ef8/src/feat/models/setter.py#L50-L62
def source_attr(attr_name): """ Creates a setter that will set the specified source attribute to the current value. @param attr_name: the name of an attribute belonging to the source. @type attr_name: str """ def source_attr(value, context, **_params): setattr(context["model"].source, attr_name, value) return _attr() return source_attr
[ "def", "source_attr", "(", "attr_name", ")", ":", "def", "source_attr", "(", "value", ",", "context", ",", "*", "*", "_params", ")", ":", "setattr", "(", "context", "[", "\"model\"", "]", ".", "source", ",", "attr_name", ",", "value", ")", "return", "_attr", "(", ")", "return", "source_attr" ]
Creates a setter that will set the specified source attribute to the current value. @param attr_name: the name of an attribute belonging to the source. @type attr_name: str
[ "Creates", "a", "setter", "that", "will", "set", "the", "specified", "source", "attribute", "to", "the", "current", "value", "." ]
python
train
wandb/client
wandb/vendor/prompt_toolkit/layout/controls.py
https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/layout/controls.py#L136-L149
def get_height_for_line(self, lineno, width): """ Return the height that a given line would need if it is rendered in a space with the given width. """ try: return self._line_heights[lineno, width] except KeyError: text = token_list_to_text(self.get_line(lineno)) result = self.get_height_for_text(text, width) # Cache and return self._line_heights[lineno, width] = result return result
[ "def", "get_height_for_line", "(", "self", ",", "lineno", ",", "width", ")", ":", "try", ":", "return", "self", ".", "_line_heights", "[", "lineno", ",", "width", "]", "except", "KeyError", ":", "text", "=", "token_list_to_text", "(", "self", ".", "get_line", "(", "lineno", ")", ")", "result", "=", "self", ".", "get_height_for_text", "(", "text", ",", "width", ")", "# Cache and return", "self", ".", "_line_heights", "[", "lineno", ",", "width", "]", "=", "result", "return", "result" ]
Return the height that a given line would need if it is rendered in a space with the given width.
[ "Return", "the", "height", "that", "a", "given", "line", "would", "need", "if", "it", "is", "rendered", "in", "a", "space", "with", "the", "given", "width", "." ]
python
train
shreyaspotnis/rampage
rampage/daq/gpib.py
https://github.com/shreyaspotnis/rampage/blob/e2565aef7ee16ee06523de975e8aa41aca14e3b2/rampage/daq/gpib.py#L34-L56
def set_fm_ext(self, freq, amplitude, peak_freq_dev=None, output_state=True): """Sets the func generator to frequency modulation with external modulation. freq is the carrier frequency in Hz.""" if peak_freq_dev is None: peak_freq_dev = freq commands = ['FUNC SIN', # set to output sine functions 'FM:STAT ON', 'FREQ {0}'.format(freq), 'FM:SOUR EXT', # 'FM:FREQ {0}'.format(freq), 'FM:DEV {0}'.format(peak_freq_dev), 'VOLT {0}'.format(amplitude), 'VOLT:OFFS 0'] # set to frequency modulation if output_state is True: commands.append('OUTP ON') else: commands.append('OUTP OFF') command_string = '\n'.join(commands) print_string = '\n\t' + command_string.replace('\n', '\n\t') logging.info(print_string) self.instr.write(command_string)
[ "def", "set_fm_ext", "(", "self", ",", "freq", ",", "amplitude", ",", "peak_freq_dev", "=", "None", ",", "output_state", "=", "True", ")", ":", "if", "peak_freq_dev", "is", "None", ":", "peak_freq_dev", "=", "freq", "commands", "=", "[", "'FUNC SIN'", ",", "# set to output sine functions", "'FM:STAT ON'", ",", "'FREQ {0}'", ".", "format", "(", "freq", ")", ",", "'FM:SOUR EXT'", ",", "# 'FM:FREQ {0}'.format(freq),", "'FM:DEV {0}'", ".", "format", "(", "peak_freq_dev", ")", ",", "'VOLT {0}'", ".", "format", "(", "amplitude", ")", ",", "'VOLT:OFFS 0'", "]", "# set to frequency modulation", "if", "output_state", "is", "True", ":", "commands", ".", "append", "(", "'OUTP ON'", ")", "else", ":", "commands", ".", "append", "(", "'OUTP OFF'", ")", "command_string", "=", "'\\n'", ".", "join", "(", "commands", ")", "print_string", "=", "'\\n\\t'", "+", "command_string", ".", "replace", "(", "'\\n'", ",", "'\\n\\t'", ")", "logging", ".", "info", "(", "print_string", ")", "self", ".", "instr", ".", "write", "(", "command_string", ")" ]
Sets the func generator to frequency modulation with external modulation. freq is the carrier frequency in Hz.
[ "Sets", "the", "func", "generator", "to", "frequency", "modulation", "with", "external", "modulation", ".", "freq", "is", "the", "carrier", "frequency", "in", "Hz", "." ]
python
train
nathancahill/mimicdb
mimicdb/s3/bucket.py
https://github.com/nathancahill/mimicdb/blob/9d0e8ebcba31d937f73752f9b88e5a4fec860765/mimicdb/s3/bucket.py#L74-L92
def delete_keys(self, *args, **kwargs): """Remove each key or key name in an iterable from the bucket set. """ ikeys = iter(kwargs.get('keys', args[0] if args else [])) while True: try: key = ikeys.next() except StopIteration: break if isinstance(key, basestring): mimicdb.backend.srem(tpl.bucket % self.name, key) mimicdb.backend.delete(tpl.key % (self.name, key)) elif isinstance(key, BotoKey) or isinstance(key, Key): mimicdb.backend.srem(tpl.bucket % self.name, key.name) mimicdb.backend.delete(tpl.key % (self.name, key.name)) return super(Bucket, self).delete_keys(*args, **kwargs)
[ "def", "delete_keys", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "ikeys", "=", "iter", "(", "kwargs", ".", "get", "(", "'keys'", ",", "args", "[", "0", "]", "if", "args", "else", "[", "]", ")", ")", "while", "True", ":", "try", ":", "key", "=", "ikeys", ".", "next", "(", ")", "except", "StopIteration", ":", "break", "if", "isinstance", "(", "key", ",", "basestring", ")", ":", "mimicdb", ".", "backend", ".", "srem", "(", "tpl", ".", "bucket", "%", "self", ".", "name", ",", "key", ")", "mimicdb", ".", "backend", ".", "delete", "(", "tpl", ".", "key", "%", "(", "self", ".", "name", ",", "key", ")", ")", "elif", "isinstance", "(", "key", ",", "BotoKey", ")", "or", "isinstance", "(", "key", ",", "Key", ")", ":", "mimicdb", ".", "backend", ".", "srem", "(", "tpl", ".", "bucket", "%", "self", ".", "name", ",", "key", ".", "name", ")", "mimicdb", ".", "backend", ".", "delete", "(", "tpl", ".", "key", "%", "(", "self", ".", "name", ",", "key", ".", "name", ")", ")", "return", "super", "(", "Bucket", ",", "self", ")", ".", "delete_keys", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Remove each key or key name in an iterable from the bucket set.
[ "Remove", "each", "key", "or", "key", "name", "in", "an", "iterable", "from", "the", "bucket", "set", "." ]
python
valid
hydpy-dev/hydpy
hydpy/core/modeltools.py
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/core/modeltools.py#L751-L780
def extrapolate_error(self): """Estimate the numerical error to be expected when applying all methods available based on the results of the current and the last method. Note that this expolation strategy cannot be applied on the first method. If the current method is the first one, `-999.9` is returned. >>> from hydpy.models.test_v1 import * >>> parameterstep() >>> model.numvars.error = 1e-2 >>> model.numvars.last_error = 1e-1 >>> model.numvars.idx_method = 10 >>> model.extrapolate_error() >>> from hydpy import round_ >>> round_(model.numvars.extrapolated_error) 0.01 >>> model.numvars.idx_method = 9 >>> model.extrapolate_error() >>> round_(model.numvars.extrapolated_error) 0.001 """ if self.numvars.idx_method > 2: self.numvars.extrapolated_error = modelutils.exp( modelutils.log(self.numvars.error) + (modelutils.log(self.numvars.error) - modelutils.log(self.numvars.last_error)) * (self.numconsts.nmb_methods-self.numvars.idx_method)) else: self.numvars.extrapolated_error = -999.9
[ "def", "extrapolate_error", "(", "self", ")", ":", "if", "self", ".", "numvars", ".", "idx_method", ">", "2", ":", "self", ".", "numvars", ".", "extrapolated_error", "=", "modelutils", ".", "exp", "(", "modelutils", ".", "log", "(", "self", ".", "numvars", ".", "error", ")", "+", "(", "modelutils", ".", "log", "(", "self", ".", "numvars", ".", "error", ")", "-", "modelutils", ".", "log", "(", "self", ".", "numvars", ".", "last_error", ")", ")", "*", "(", "self", ".", "numconsts", ".", "nmb_methods", "-", "self", ".", "numvars", ".", "idx_method", ")", ")", "else", ":", "self", ".", "numvars", ".", "extrapolated_error", "=", "-", "999.9" ]
Estimate the numerical error to be expected when applying all methods available based on the results of the current and the last method. Note that this expolation strategy cannot be applied on the first method. If the current method is the first one, `-999.9` is returned. >>> from hydpy.models.test_v1 import * >>> parameterstep() >>> model.numvars.error = 1e-2 >>> model.numvars.last_error = 1e-1 >>> model.numvars.idx_method = 10 >>> model.extrapolate_error() >>> from hydpy import round_ >>> round_(model.numvars.extrapolated_error) 0.01 >>> model.numvars.idx_method = 9 >>> model.extrapolate_error() >>> round_(model.numvars.extrapolated_error) 0.001
[ "Estimate", "the", "numerical", "error", "to", "be", "expected", "when", "applying", "all", "methods", "available", "based", "on", "the", "results", "of", "the", "current", "and", "the", "last", "method", "." ]
python
train
opendatateam/udata
udata/core/spatial/models.py
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/spatial/models.py#L185-L191
def child_level(self): """Return the child level given handled levels.""" HANDLED_LEVELS = current_app.config.get('HANDLED_LEVELS') try: return HANDLED_LEVELS[HANDLED_LEVELS.index(self.level) - 1] except (IndexError, ValueError): return None
[ "def", "child_level", "(", "self", ")", ":", "HANDLED_LEVELS", "=", "current_app", ".", "config", ".", "get", "(", "'HANDLED_LEVELS'", ")", "try", ":", "return", "HANDLED_LEVELS", "[", "HANDLED_LEVELS", ".", "index", "(", "self", ".", "level", ")", "-", "1", "]", "except", "(", "IndexError", ",", "ValueError", ")", ":", "return", "None" ]
Return the child level given handled levels.
[ "Return", "the", "child", "level", "given", "handled", "levels", "." ]
python
train
snowman2/pangaea
pangaea/read.py
https://github.com/snowman2/pangaea/blob/a304e9a489cfc0bc1c74e7cb50c3335a4f3d596f/pangaea/read.py#L18-L145
def open_mfdataset(path_to_lsm_files, lat_var, lon_var, time_var, lat_dim, lon_dim, time_dim, lon_to_180=False, coords_projected=False, loader=None, engine=None, autoclose=True): """ Wrapper to open land surface model netcdf files using :func:`xarray.open_mfdataset`. .. warning:: The time dimension and variable will both be renamed to 'time' to enable slicing. Parameters ---------- path_to_lsm_files: :obj:`str` Path to land surface model files with wildcard. (Ex. '/path/to/files/*.nc') lat_var: :obj:`str` Latitude variable (Ex. lat). lon_var: :obj:`str` Longitude variable (Ex. lon). time_var: :obj:`str` Time variable (Ex. time). lat_dim: :obj:`str` Latitude dimension (Ex. lat). lon_dim: :obj:`str` Longitude dimension (Ex. lon). time_dim: :obj:`str` Time dimension (ex. time). lon_to_180: bool, optional, default=False It True, will convert longitude from [0 to 360] to [-180 to 180]. coords_projected: bool, optional, default=False It True, it will assume the coordinates are already in the projected coordinate system. loader: str, optional, default=None If 'hrrr', it will load in the HRRR dataset. engine: str, optional See: :func:`xarray.open_mfdataset` documentation. autoclose: :obj:`str`, optional, default=True If True, will use autoclose option with :func:`xarray.open_mfdataset`. Returns ------- :func:`xarray.Dataset` Read with pangaea example:: import pangaea as pa with pa.open_mfdataset('/path/to/ncfiles/*.nc', lat_var='lat', lon_var='lon', time_var='time', lat_dim='lat', lon_dim='lon', time_dim='time') as xds: print(xds.lsm.projection) """ def define_coords(xds): """xarray loader to ensure coordinates are loaded correctly""" # remove time dimension from lat, lon coordinates if xds[lat_var].ndim == 3: xds[lat_var] = xds[lat_var].squeeze(time_dim) # make sure coords are defined as coords if lat_var not in xds.coords \ or lon_var not in xds.coords \ or time_var not in xds.coords: xds.set_coords([lat_var, lon_var, time_var], inplace=True) return xds def extract_hrrr_date(xds): """xarray loader for HRRR""" for var in xds.variables: if 'initial_time' in xds[var].attrs.keys(): grid_time = pd.to_datetime(xds[var].attrs['initial_time'], format="%m/%d/%Y (%H:%M)") if 'forecast_time' in xds[var].attrs.keys(): time_units = 'h' if 'forecast_time_units' in xds[var].attrs.keys(): time_units = \ str(xds[var].attrs['forecast_time_units'][0]) time_dt = int(xds[var].attrs['forecast_time'][0]) grid_time += np.timedelta64(time_dt, time_units) return xds.assign(time=grid_time) return xds if loader == 'hrrr': preprocess = extract_hrrr_date engine = 'pynio' if engine is None else engine else: preprocess = define_coords xds = xr.open_mfdataset(path_to_lsm_files, autoclose=autoclose, preprocess=preprocess, concat_dim=time_dim, engine=engine, ) xds.lsm.y_var = lat_var xds.lsm.x_var = lon_var xds.lsm.y_dim = lat_dim xds.lsm.x_dim = lon_dim xds.lsm.lon_to_180 = lon_to_180 xds.lsm.coords_projected = coords_projected # make sure time dimensions are same for slicing xds.rename( { time_dim: 'time', time_var: 'time', }, inplace=True ) xds.lsm.to_datetime() return xds
[ "def", "open_mfdataset", "(", "path_to_lsm_files", ",", "lat_var", ",", "lon_var", ",", "time_var", ",", "lat_dim", ",", "lon_dim", ",", "time_dim", ",", "lon_to_180", "=", "False", ",", "coords_projected", "=", "False", ",", "loader", "=", "None", ",", "engine", "=", "None", ",", "autoclose", "=", "True", ")", ":", "def", "define_coords", "(", "xds", ")", ":", "\"\"\"xarray loader to ensure coordinates are loaded correctly\"\"\"", "# remove time dimension from lat, lon coordinates", "if", "xds", "[", "lat_var", "]", ".", "ndim", "==", "3", ":", "xds", "[", "lat_var", "]", "=", "xds", "[", "lat_var", "]", ".", "squeeze", "(", "time_dim", ")", "# make sure coords are defined as coords", "if", "lat_var", "not", "in", "xds", ".", "coords", "or", "lon_var", "not", "in", "xds", ".", "coords", "or", "time_var", "not", "in", "xds", ".", "coords", ":", "xds", ".", "set_coords", "(", "[", "lat_var", ",", "lon_var", ",", "time_var", "]", ",", "inplace", "=", "True", ")", "return", "xds", "def", "extract_hrrr_date", "(", "xds", ")", ":", "\"\"\"xarray loader for HRRR\"\"\"", "for", "var", "in", "xds", ".", "variables", ":", "if", "'initial_time'", "in", "xds", "[", "var", "]", ".", "attrs", ".", "keys", "(", ")", ":", "grid_time", "=", "pd", ".", "to_datetime", "(", "xds", "[", "var", "]", ".", "attrs", "[", "'initial_time'", "]", ",", "format", "=", "\"%m/%d/%Y (%H:%M)\"", ")", "if", "'forecast_time'", "in", "xds", "[", "var", "]", ".", "attrs", ".", "keys", "(", ")", ":", "time_units", "=", "'h'", "if", "'forecast_time_units'", "in", "xds", "[", "var", "]", ".", "attrs", ".", "keys", "(", ")", ":", "time_units", "=", "str", "(", "xds", "[", "var", "]", ".", "attrs", "[", "'forecast_time_units'", "]", "[", "0", "]", ")", "time_dt", "=", "int", "(", "xds", "[", "var", "]", ".", "attrs", "[", "'forecast_time'", "]", "[", "0", "]", ")", "grid_time", "+=", "np", ".", "timedelta64", "(", "time_dt", ",", "time_units", ")", "return", "xds", ".", "assign", "(", "time", "=", "grid_time", ")", "return", "xds", "if", "loader", "==", "'hrrr'", ":", "preprocess", "=", "extract_hrrr_date", "engine", "=", "'pynio'", "if", "engine", "is", "None", "else", "engine", "else", ":", "preprocess", "=", "define_coords", "xds", "=", "xr", ".", "open_mfdataset", "(", "path_to_lsm_files", ",", "autoclose", "=", "autoclose", ",", "preprocess", "=", "preprocess", ",", "concat_dim", "=", "time_dim", ",", "engine", "=", "engine", ",", ")", "xds", ".", "lsm", ".", "y_var", "=", "lat_var", "xds", ".", "lsm", ".", "x_var", "=", "lon_var", "xds", ".", "lsm", ".", "y_dim", "=", "lat_dim", "xds", ".", "lsm", ".", "x_dim", "=", "lon_dim", "xds", ".", "lsm", ".", "lon_to_180", "=", "lon_to_180", "xds", ".", "lsm", ".", "coords_projected", "=", "coords_projected", "# make sure time dimensions are same for slicing", "xds", ".", "rename", "(", "{", "time_dim", ":", "'time'", ",", "time_var", ":", "'time'", ",", "}", ",", "inplace", "=", "True", ")", "xds", ".", "lsm", ".", "to_datetime", "(", ")", "return", "xds" ]
Wrapper to open land surface model netcdf files using :func:`xarray.open_mfdataset`. .. warning:: The time dimension and variable will both be renamed to 'time' to enable slicing. Parameters ---------- path_to_lsm_files: :obj:`str` Path to land surface model files with wildcard. (Ex. '/path/to/files/*.nc') lat_var: :obj:`str` Latitude variable (Ex. lat). lon_var: :obj:`str` Longitude variable (Ex. lon). time_var: :obj:`str` Time variable (Ex. time). lat_dim: :obj:`str` Latitude dimension (Ex. lat). lon_dim: :obj:`str` Longitude dimension (Ex. lon). time_dim: :obj:`str` Time dimension (ex. time). lon_to_180: bool, optional, default=False It True, will convert longitude from [0 to 360] to [-180 to 180]. coords_projected: bool, optional, default=False It True, it will assume the coordinates are already in the projected coordinate system. loader: str, optional, default=None If 'hrrr', it will load in the HRRR dataset. engine: str, optional See: :func:`xarray.open_mfdataset` documentation. autoclose: :obj:`str`, optional, default=True If True, will use autoclose option with :func:`xarray.open_mfdataset`. Returns ------- :func:`xarray.Dataset` Read with pangaea example:: import pangaea as pa with pa.open_mfdataset('/path/to/ncfiles/*.nc', lat_var='lat', lon_var='lon', time_var='time', lat_dim='lat', lon_dim='lon', time_dim='time') as xds: print(xds.lsm.projection)
[ "Wrapper", "to", "open", "land", "surface", "model", "netcdf", "files", "using", ":", "func", ":", "xarray", ".", "open_mfdataset", "." ]
python
train
genialis/resolwe
resolwe/flow/serializers/contributor.py
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/serializers/contributor.py#L35-L45
def to_internal_value(self, data): """Format the internal value.""" # When setting the contributor, it may be passed as an integer. if isinstance(data, dict) and isinstance(data.get('id', None), int): data = data['id'] elif isinstance(data, int): pass else: raise ValidationError("Contributor must be an integer or a dictionary with key 'id'") return self.Meta.model.objects.get(pk=data)
[ "def", "to_internal_value", "(", "self", ",", "data", ")", ":", "# When setting the contributor, it may be passed as an integer.", "if", "isinstance", "(", "data", ",", "dict", ")", "and", "isinstance", "(", "data", ".", "get", "(", "'id'", ",", "None", ")", ",", "int", ")", ":", "data", "=", "data", "[", "'id'", "]", "elif", "isinstance", "(", "data", ",", "int", ")", ":", "pass", "else", ":", "raise", "ValidationError", "(", "\"Contributor must be an integer or a dictionary with key 'id'\"", ")", "return", "self", ".", "Meta", ".", "model", ".", "objects", ".", "get", "(", "pk", "=", "data", ")" ]
Format the internal value.
[ "Format", "the", "internal", "value", "." ]
python
train
Esri/ArcREST
src/arcrest/enrichment/_geoenrichment.py
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/enrichment/_geoenrichment.py#L548-L712
def standardGeographyQuery(self, sourceCountry=None, optionalCountryDataset=None, geographyLayers=None, geographyIDs=None, geographyQuery=None, returnSubGeographyLayer=False, subGeographyLayer=None, subGeographyQuery=None, outSR=4326, returnGeometry=False, returnCentroids=False, generalizationLevel=0, useFuzzySearch=False, featureLimit=1000): """ The GeoEnrichment service provides a helper method that returns standard geography IDs and features for the supported geographic levels in the United States and Canada. As indicated throughout this documentation guide, the GeoEnrichment service uses the concept of a study area to define the location of the point or area that you want to enrich with additional information. Locations can also be passed as one or many named statistical areas. This form of a study area lets you define an area by the ID of a standard geographic statistical feature, such as a census or postal area. For example, to obtain enrichment information for a U.S. state, county or ZIP Code or a Canadian province or postal code, the Standard Geography Query helper method allows you to search and query standard geography areas so that they can be used in the GeoEnrichment method to obtain facts about the location. The most common workflow for this service is to find a FIPS (standard geography ID) for a geographic name. For example, you can use this service to find the FIPS for the county of San Diego which is 06073. You can then use this FIPS ID within the GeoEnrichment service study area definition to get geometry and optional demographic data for the county. This study area definition is passed as a parameter to the GeoEnrichment service to return data defined in the enrichment pack and optionally return geometry for the feature. For examples and more help with this function see: http://resources.arcgis.com/en/help/arcgis-rest-api/#/Standard_geography_query/02r30000000q000000/ Inputs: sourceCountry - Optional parameter to specify the source country for the search. Use this parameter to limit the search and query of standard geographic features to one country. This parameter supports both the two-digit and three-digit country codes illustrated in the coverage table. optionalCountryDataset - Optional parameter to specify a specific dataset within a defined country. geographyLayers - Optional parameter to specify which standard geography layers are being queried or searched. If this parameter is not provided, all layers within the defined country will be queried. geographyIDs - Optional parameter to specify which IDs for the standard geography layers are being queried or searched. You can use this parameter to return attributes and/or geometry for standard geographic areas for administrative areas where you already know the ID, for example, if you know the Federal Information Processing Standard (FIPS) Codes for a U.S. state or county; or, in Canada, to return the geometry and attributes for a Forward Sortation Area (FSA). geographyQuery - Optional parameter to specify the text to query and search the standard geography layers specified. You can use this parameter to query and find standard geography features that meet an input term, for example, for a list of all the U.S. counties that contain the word "orange". The geographyQuery parameter can be a string that contains one or more words. returnSubGeographyLayer - Use this optional parameter to return all the subgeographic areas that are within a parent geography. For example, you could return all the U.S. counties for a given U.S. state or you could return all the Canadian postal areas (FSAs) within a Census Metropolitan Area (city). When this parameter is set to true, the output features will be defined in the subGeographyLayer. The output geometries will be in the spatial reference system defined by outSR. subGeographyLayer - Use this optional parameter to return all the subgeographic areas that are within a parent geography. For example, you could return all the U.S. counties within a given U.S. state or you could return all the Canadian postal areas (FSAs) within a Census Metropolitan Areas (city). When this parameter is set to true, the output features will be defined in the subGeographyLayer. The output geometries will be in the spatial reference system defined by outSR. subGeographyQuery - Optional parameter to filter the results of the subgeography features that are returned by a search term. You can use this parameter to query and find subgeography features that meet an input term. This parameter is used to filter the list of subgeography features that are within a parent geography. For example, you may want a list of all the ZIP Codes that are within "San Diego County" and filter the results so that only ZIP Codes that start with "921" are included in the output response. The subgeography query is a string that contains one or more words. outSR - Optional parameter to request the output geometries in a specified spatial reference system. returnGeometry - Optional parameter to request the output geometries in the response. returnCentroids - Optional Boolean parameter to request the output geometry to return the center point for each feature. Use this parameter to return all the geometries as points. For example, you could return all U.S. ZIP Code centroids (points) rather than providing the boundaries. generalizationLevel - Optional integer that specifies the level of generalization or detail in the area representations of the administrative boundary or standard geographic data layers. Values must be whole integers from 0 through 6, where 0 is most detailed and 6 is most generalized. useFuzzySearch - Optional Boolean parameter to define if text provided in the geographyQuery parameter should utilize fuzzy search logic. Fuzzy searches are based on the Levenshtein Distance or Edit Distance algorithm. featureLimit - Optional integer value where you can limit the number of features that are returned from the geographyQuery. """ url = self._base_url + self._url_standard_geography_query_execute params = { "f" : "json" } if not sourceCountry is None: params['sourceCountry'] = sourceCountry if not optionalCountryDataset is None: params['optionalCountryDataset'] = optionalCountryDataset if not geographyLayers is None: params['geographylayers'] = geographyLayers if not geographyIDs is None: params['geographyids'] = json.dumps(geographyIDs) if not geographyQuery is None: params['geographyQuery'] = geographyQuery if not returnSubGeographyLayer is None and \ isinstance(returnSubGeographyLayer, bool): params['returnSubGeographyLayer'] = returnSubGeographyLayer if not subGeographyLayer is None: params['subGeographyLayer'] = json.dumps(subGeographyLayer) if not subGeographyQuery is None: params['subGeographyQuery'] = subGeographyQuery if not outSR is None and \ isinstance(outSR, int): params['outSR'] = outSR if not returnGeometry is None and \ isinstance(returnGeometry, bool): params['returnGeometry'] = returnGeometry if not returnCentroids is None and \ isinstance(returnCentroids, bool): params['returnCentroids'] = returnCentroids if not generalizationLevel is None and \ isinstance(generalizationLevel, int): params['generalizationLevel'] = generalizationLevel if not useFuzzySearch is None and \ isinstance(useFuzzySearch, bool): params['useFuzzySearch'] = json.dumps(useFuzzySearch) if featureLimit is None: featureLimit = 1000 elif isinstance(featureLimit, int): params['featureLimit'] = featureLimit else: params['featureLimit'] = 1000 return self._post(url=url, param_dict=params, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port)
[ "def", "standardGeographyQuery", "(", "self", ",", "sourceCountry", "=", "None", ",", "optionalCountryDataset", "=", "None", ",", "geographyLayers", "=", "None", ",", "geographyIDs", "=", "None", ",", "geographyQuery", "=", "None", ",", "returnSubGeographyLayer", "=", "False", ",", "subGeographyLayer", "=", "None", ",", "subGeographyQuery", "=", "None", ",", "outSR", "=", "4326", ",", "returnGeometry", "=", "False", ",", "returnCentroids", "=", "False", ",", "generalizationLevel", "=", "0", ",", "useFuzzySearch", "=", "False", ",", "featureLimit", "=", "1000", ")", ":", "url", "=", "self", ".", "_base_url", "+", "self", ".", "_url_standard_geography_query_execute", "params", "=", "{", "\"f\"", ":", "\"json\"", "}", "if", "not", "sourceCountry", "is", "None", ":", "params", "[", "'sourceCountry'", "]", "=", "sourceCountry", "if", "not", "optionalCountryDataset", "is", "None", ":", "params", "[", "'optionalCountryDataset'", "]", "=", "optionalCountryDataset", "if", "not", "geographyLayers", "is", "None", ":", "params", "[", "'geographylayers'", "]", "=", "geographyLayers", "if", "not", "geographyIDs", "is", "None", ":", "params", "[", "'geographyids'", "]", "=", "json", ".", "dumps", "(", "geographyIDs", ")", "if", "not", "geographyQuery", "is", "None", ":", "params", "[", "'geographyQuery'", "]", "=", "geographyQuery", "if", "not", "returnSubGeographyLayer", "is", "None", "and", "isinstance", "(", "returnSubGeographyLayer", ",", "bool", ")", ":", "params", "[", "'returnSubGeographyLayer'", "]", "=", "returnSubGeographyLayer", "if", "not", "subGeographyLayer", "is", "None", ":", "params", "[", "'subGeographyLayer'", "]", "=", "json", ".", "dumps", "(", "subGeographyLayer", ")", "if", "not", "subGeographyQuery", "is", "None", ":", "params", "[", "'subGeographyQuery'", "]", "=", "subGeographyQuery", "if", "not", "outSR", "is", "None", "and", "isinstance", "(", "outSR", ",", "int", ")", ":", "params", "[", "'outSR'", "]", "=", "outSR", "if", "not", "returnGeometry", "is", "None", "and", "isinstance", "(", "returnGeometry", ",", "bool", ")", ":", "params", "[", "'returnGeometry'", "]", "=", "returnGeometry", "if", "not", "returnCentroids", "is", "None", "and", "isinstance", "(", "returnCentroids", ",", "bool", ")", ":", "params", "[", "'returnCentroids'", "]", "=", "returnCentroids", "if", "not", "generalizationLevel", "is", "None", "and", "isinstance", "(", "generalizationLevel", ",", "int", ")", ":", "params", "[", "'generalizationLevel'", "]", "=", "generalizationLevel", "if", "not", "useFuzzySearch", "is", "None", "and", "isinstance", "(", "useFuzzySearch", ",", "bool", ")", ":", "params", "[", "'useFuzzySearch'", "]", "=", "json", ".", "dumps", "(", "useFuzzySearch", ")", "if", "featureLimit", "is", "None", ":", "featureLimit", "=", "1000", "elif", "isinstance", "(", "featureLimit", ",", "int", ")", ":", "params", "[", "'featureLimit'", "]", "=", "featureLimit", "else", ":", "params", "[", "'featureLimit'", "]", "=", "1000", "return", "self", ".", "_post", "(", "url", "=", "url", ",", "param_dict", "=", "params", ",", "securityHandler", "=", "self", ".", "_securityHandler", ",", "proxy_url", "=", "self", ".", "_proxy_url", ",", "proxy_port", "=", "self", ".", "_proxy_port", ")" ]
The GeoEnrichment service provides a helper method that returns standard geography IDs and features for the supported geographic levels in the United States and Canada. As indicated throughout this documentation guide, the GeoEnrichment service uses the concept of a study area to define the location of the point or area that you want to enrich with additional information. Locations can also be passed as one or many named statistical areas. This form of a study area lets you define an area by the ID of a standard geographic statistical feature, such as a census or postal area. For example, to obtain enrichment information for a U.S. state, county or ZIP Code or a Canadian province or postal code, the Standard Geography Query helper method allows you to search and query standard geography areas so that they can be used in the GeoEnrichment method to obtain facts about the location. The most common workflow for this service is to find a FIPS (standard geography ID) for a geographic name. For example, you can use this service to find the FIPS for the county of San Diego which is 06073. You can then use this FIPS ID within the GeoEnrichment service study area definition to get geometry and optional demographic data for the county. This study area definition is passed as a parameter to the GeoEnrichment service to return data defined in the enrichment pack and optionally return geometry for the feature. For examples and more help with this function see: http://resources.arcgis.com/en/help/arcgis-rest-api/#/Standard_geography_query/02r30000000q000000/ Inputs: sourceCountry - Optional parameter to specify the source country for the search. Use this parameter to limit the search and query of standard geographic features to one country. This parameter supports both the two-digit and three-digit country codes illustrated in the coverage table. optionalCountryDataset - Optional parameter to specify a specific dataset within a defined country. geographyLayers - Optional parameter to specify which standard geography layers are being queried or searched. If this parameter is not provided, all layers within the defined country will be queried. geographyIDs - Optional parameter to specify which IDs for the standard geography layers are being queried or searched. You can use this parameter to return attributes and/or geometry for standard geographic areas for administrative areas where you already know the ID, for example, if you know the Federal Information Processing Standard (FIPS) Codes for a U.S. state or county; or, in Canada, to return the geometry and attributes for a Forward Sortation Area (FSA). geographyQuery - Optional parameter to specify the text to query and search the standard geography layers specified. You can use this parameter to query and find standard geography features that meet an input term, for example, for a list of all the U.S. counties that contain the word "orange". The geographyQuery parameter can be a string that contains one or more words. returnSubGeographyLayer - Use this optional parameter to return all the subgeographic areas that are within a parent geography. For example, you could return all the U.S. counties for a given U.S. state or you could return all the Canadian postal areas (FSAs) within a Census Metropolitan Area (city). When this parameter is set to true, the output features will be defined in the subGeographyLayer. The output geometries will be in the spatial reference system defined by outSR. subGeographyLayer - Use this optional parameter to return all the subgeographic areas that are within a parent geography. For example, you could return all the U.S. counties within a given U.S. state or you could return all the Canadian postal areas (FSAs) within a Census Metropolitan Areas (city). When this parameter is set to true, the output features will be defined in the subGeographyLayer. The output geometries will be in the spatial reference system defined by outSR. subGeographyQuery - Optional parameter to filter the results of the subgeography features that are returned by a search term. You can use this parameter to query and find subgeography features that meet an input term. This parameter is used to filter the list of subgeography features that are within a parent geography. For example, you may want a list of all the ZIP Codes that are within "San Diego County" and filter the results so that only ZIP Codes that start with "921" are included in the output response. The subgeography query is a string that contains one or more words. outSR - Optional parameter to request the output geometries in a specified spatial reference system. returnGeometry - Optional parameter to request the output geometries in the response. returnCentroids - Optional Boolean parameter to request the output geometry to return the center point for each feature. Use this parameter to return all the geometries as points. For example, you could return all U.S. ZIP Code centroids (points) rather than providing the boundaries. generalizationLevel - Optional integer that specifies the level of generalization or detail in the area representations of the administrative boundary or standard geographic data layers. Values must be whole integers from 0 through 6, where 0 is most detailed and 6 is most generalized. useFuzzySearch - Optional Boolean parameter to define if text provided in the geographyQuery parameter should utilize fuzzy search logic. Fuzzy searches are based on the Levenshtein Distance or Edit Distance algorithm. featureLimit - Optional integer value where you can limit the number of features that are returned from the geographyQuery.
[ "The", "GeoEnrichment", "service", "provides", "a", "helper", "method", "that", "returns", "standard", "geography", "IDs", "and", "features", "for", "the", "supported", "geographic", "levels", "in", "the", "United", "States", "and", "Canada", ".", "As", "indicated", "throughout", "this", "documentation", "guide", "the", "GeoEnrichment", "service", "uses", "the", "concept", "of", "a", "study", "area", "to", "define", "the", "location", "of", "the", "point", "or", "area", "that", "you", "want", "to", "enrich", "with", "additional", "information", ".", "Locations", "can", "also", "be", "passed", "as", "one", "or", "many", "named", "statistical", "areas", ".", "This", "form", "of", "a", "study", "area", "lets", "you", "define", "an", "area", "by", "the", "ID", "of", "a", "standard", "geographic", "statistical", "feature", "such", "as", "a", "census", "or", "postal", "area", ".", "For", "example", "to", "obtain", "enrichment", "information", "for", "a", "U", ".", "S", ".", "state", "county", "or", "ZIP", "Code", "or", "a", "Canadian", "province", "or", "postal", "code", "the", "Standard", "Geography", "Query", "helper", "method", "allows", "you", "to", "search", "and", "query", "standard", "geography", "areas", "so", "that", "they", "can", "be", "used", "in", "the", "GeoEnrichment", "method", "to", "obtain", "facts", "about", "the", "location", ".", "The", "most", "common", "workflow", "for", "this", "service", "is", "to", "find", "a", "FIPS", "(", "standard", "geography", "ID", ")", "for", "a", "geographic", "name", ".", "For", "example", "you", "can", "use", "this", "service", "to", "find", "the", "FIPS", "for", "the", "county", "of", "San", "Diego", "which", "is", "06073", ".", "You", "can", "then", "use", "this", "FIPS", "ID", "within", "the", "GeoEnrichment", "service", "study", "area", "definition", "to", "get", "geometry", "and", "optional", "demographic", "data", "for", "the", "county", ".", "This", "study", "area", "definition", "is", "passed", "as", "a", "parameter", "to", "the", "GeoEnrichment", "service", "to", "return", "data", "defined", "in", "the", "enrichment", "pack", "and", "optionally", "return", "geometry", "for", "the", "feature", "." ]
python
train
buildbot/buildbot
worker/buildbot_worker/runprocess.py
https://github.com/buildbot/buildbot/blob/5df3cfae6d760557d99156633c32b1822a1e130c/worker/buildbot_worker/runprocess.py#L589-L607
def _spawnProcess(self, processProtocol, executable, args=(), env=None, path=None, uid=None, gid=None, usePTY=False, childFDs=None): """private implementation of reactor.spawnProcess, to allow use of L{ProcGroupProcess}""" if env is None: env = {} # use the ProcGroupProcess class, if available if runtime.platformType == 'posix': if self.useProcGroup and not usePTY: return ProcGroupProcess(reactor, executable, args, env, path, processProtocol, uid, gid, childFDs) # fall back if self.using_comspec: return self._spawnAsBatch(processProtocol, executable, args, env, path, usePTY=usePTY) return reactor.spawnProcess(processProtocol, executable, args, env, path, usePTY=usePTY)
[ "def", "_spawnProcess", "(", "self", ",", "processProtocol", ",", "executable", ",", "args", "=", "(", ")", ",", "env", "=", "None", ",", "path", "=", "None", ",", "uid", "=", "None", ",", "gid", "=", "None", ",", "usePTY", "=", "False", ",", "childFDs", "=", "None", ")", ":", "if", "env", "is", "None", ":", "env", "=", "{", "}", "# use the ProcGroupProcess class, if available", "if", "runtime", ".", "platformType", "==", "'posix'", ":", "if", "self", ".", "useProcGroup", "and", "not", "usePTY", ":", "return", "ProcGroupProcess", "(", "reactor", ",", "executable", ",", "args", ",", "env", ",", "path", ",", "processProtocol", ",", "uid", ",", "gid", ",", "childFDs", ")", "# fall back", "if", "self", ".", "using_comspec", ":", "return", "self", ".", "_spawnAsBatch", "(", "processProtocol", ",", "executable", ",", "args", ",", "env", ",", "path", ",", "usePTY", "=", "usePTY", ")", "return", "reactor", ".", "spawnProcess", "(", "processProtocol", ",", "executable", ",", "args", ",", "env", ",", "path", ",", "usePTY", "=", "usePTY", ")" ]
private implementation of reactor.spawnProcess, to allow use of L{ProcGroupProcess}
[ "private", "implementation", "of", "reactor", ".", "spawnProcess", "to", "allow", "use", "of", "L", "{", "ProcGroupProcess", "}" ]
python
train