repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
awslabs/serverless-application-model
examples/apps/lex-make-appointment-python/lambda_function.py
https://github.com/awslabs/serverless-application-model/blob/cccb0c96b5c91e53355ebc07e542467303a5eedd/examples/apps/lex-make-appointment-python/lambda_function.py#L172-L184
def is_available(time, duration, availabilities): """ Helper function to check if the given time and duration fits within a known set of availability windows. Duration is assumed to be one of 30, 60 (meaning minutes). Availabilities is expected to contain entries of the format HH:MM. """ if duration == 30: return time in availabilities elif duration == 60: second_half_hour_time = increment_time_by_thirty_mins(time) return time in availabilities and second_half_hour_time in availabilities # Invalid duration ; throw error. We should not have reached this branch due to earlier validation. raise Exception('Was not able to understand duration {}'.format(duration))
[ "def", "is_available", "(", "time", ",", "duration", ",", "availabilities", ")", ":", "if", "duration", "==", "30", ":", "return", "time", "in", "availabilities", "elif", "duration", "==", "60", ":", "second_half_hour_time", "=", "increment_time_by_thirty_mins", "(", "time", ")", "return", "time", "in", "availabilities", "and", "second_half_hour_time", "in", "availabilities", "# Invalid duration ; throw error. We should not have reached this branch due to earlier validation.", "raise", "Exception", "(", "'Was not able to understand duration {}'", ".", "format", "(", "duration", ")", ")" ]
Helper function to check if the given time and duration fits within a known set of availability windows. Duration is assumed to be one of 30, 60 (meaning minutes). Availabilities is expected to contain entries of the format HH:MM.
[ "Helper", "function", "to", "check", "if", "the", "given", "time", "and", "duration", "fits", "within", "a", "known", "set", "of", "availability", "windows", ".", "Duration", "is", "assumed", "to", "be", "one", "of", "30", "60", "(", "meaning", "minutes", ")", ".", "Availabilities", "is", "expected", "to", "contain", "entries", "of", "the", "format", "HH", ":", "MM", "." ]
python
train
chrismattmann/tika-python
tika/tika.py
https://github.com/chrismattmann/tika-python/blob/ffd3879ac3eaa9142c0fb6557cc1dc52d458a75a/tika/tika.py#L605-L670
def startServer(tikaServerJar, java_path = TikaJava, serverHost = ServerHost, port = Port, classpath=None, config_path=None): ''' Starts Tika Server :param tikaServerJar: path to tika server jar :param serverHost: the host interface address to be used for binding the service :param port: the host port to be used for binding the service :param classpath: Class path value to pass to JVM :return: None ''' if classpath is None: classpath = TikaServerClasspath host = "localhost" if Windows: host = "0.0.0.0" if classpath: classpath += ":" + tikaServerJar else: classpath = tikaServerJar # setup command string cmd_string = "" if not config_path: cmd_string = '%s -cp %s org.apache.tika.server.TikaServerCli --port %s --host %s &' \ % (java_path, classpath, port, host) else: cmd_string = '%s -cp %s org.apache.tika.server.TikaServerCli --port %s --host %s --config %s &' \ % (java_path, classpath, port, host, config_path) # Check that we can write to log path try: tika_log_file_path = os.path.join(TikaServerLogFilePath, 'tika-server.log') logFile = open(tika_log_file_path, 'w') except PermissionError as e: log.error("Unable to create tika-server.log at %s due to permission error." % (TikaServerLogFilePath)) return False # Check that specified java binary is available on path try: _ = Popen(java_path, stdout=open(os.devnull, "w"), stderr=open(os.devnull, "w")) except FileNotFoundError as e: log.error("Unable to run java; is it installed?") return False # Run java with jar args cmd = Popen(cmd_string, stdout=logFile, stderr=STDOUT, shell=True) # Check logs and retry as configured try_count = 0 is_started = False while try_count < TikaStartupMaxRetry: with open(tika_log_file_path, "r") as tika_log_file_tmp: # check for INFO string to confirm listening endpoint if "Started Apache Tika server at" in tika_log_file_tmp.read(): is_started = True else: log.warning("Failed to see startup log message; retrying...") time.sleep(TikaStartupSleep) try_count += 1 if not is_started: log.error("Tika startup log message not received after %d tries." % (TikaStartupMaxRetry)) return False else: return True
[ "def", "startServer", "(", "tikaServerJar", ",", "java_path", "=", "TikaJava", ",", "serverHost", "=", "ServerHost", ",", "port", "=", "Port", ",", "classpath", "=", "None", ",", "config_path", "=", "None", ")", ":", "if", "classpath", "is", "None", ":", "classpath", "=", "TikaServerClasspath", "host", "=", "\"localhost\"", "if", "Windows", ":", "host", "=", "\"0.0.0.0\"", "if", "classpath", ":", "classpath", "+=", "\":\"", "+", "tikaServerJar", "else", ":", "classpath", "=", "tikaServerJar", "# setup command string", "cmd_string", "=", "\"\"", "if", "not", "config_path", ":", "cmd_string", "=", "'%s -cp %s org.apache.tika.server.TikaServerCli --port %s --host %s &'", "%", "(", "java_path", ",", "classpath", ",", "port", ",", "host", ")", "else", ":", "cmd_string", "=", "'%s -cp %s org.apache.tika.server.TikaServerCli --port %s --host %s --config %s &'", "%", "(", "java_path", ",", "classpath", ",", "port", ",", "host", ",", "config_path", ")", "# Check that we can write to log path", "try", ":", "tika_log_file_path", "=", "os", ".", "path", ".", "join", "(", "TikaServerLogFilePath", ",", "'tika-server.log'", ")", "logFile", "=", "open", "(", "tika_log_file_path", ",", "'w'", ")", "except", "PermissionError", "as", "e", ":", "log", ".", "error", "(", "\"Unable to create tika-server.log at %s due to permission error.\"", "%", "(", "TikaServerLogFilePath", ")", ")", "return", "False", "# Check that specified java binary is available on path", "try", ":", "_", "=", "Popen", "(", "java_path", ",", "stdout", "=", "open", "(", "os", ".", "devnull", ",", "\"w\"", ")", ",", "stderr", "=", "open", "(", "os", ".", "devnull", ",", "\"w\"", ")", ")", "except", "FileNotFoundError", "as", "e", ":", "log", ".", "error", "(", "\"Unable to run java; is it installed?\"", ")", "return", "False", "# Run java with jar args", "cmd", "=", "Popen", "(", "cmd_string", ",", "stdout", "=", "logFile", ",", "stderr", "=", "STDOUT", ",", "shell", "=", "True", ")", "# Check logs and retry as configured", "try_count", "=", "0", "is_started", "=", "False", "while", "try_count", "<", "TikaStartupMaxRetry", ":", "with", "open", "(", "tika_log_file_path", ",", "\"r\"", ")", "as", "tika_log_file_tmp", ":", "# check for INFO string to confirm listening endpoint", "if", "\"Started Apache Tika server at\"", "in", "tika_log_file_tmp", ".", "read", "(", ")", ":", "is_started", "=", "True", "else", ":", "log", ".", "warning", "(", "\"Failed to see startup log message; retrying...\"", ")", "time", ".", "sleep", "(", "TikaStartupSleep", ")", "try_count", "+=", "1", "if", "not", "is_started", ":", "log", ".", "error", "(", "\"Tika startup log message not received after %d tries.\"", "%", "(", "TikaStartupMaxRetry", ")", ")", "return", "False", "else", ":", "return", "True" ]
Starts Tika Server :param tikaServerJar: path to tika server jar :param serverHost: the host interface address to be used for binding the service :param port: the host port to be used for binding the service :param classpath: Class path value to pass to JVM :return: None
[ "Starts", "Tika", "Server", ":", "param", "tikaServerJar", ":", "path", "to", "tika", "server", "jar", ":", "param", "serverHost", ":", "the", "host", "interface", "address", "to", "be", "used", "for", "binding", "the", "service", ":", "param", "port", ":", "the", "host", "port", "to", "be", "used", "for", "binding", "the", "service", ":", "param", "classpath", ":", "Class", "path", "value", "to", "pass", "to", "JVM", ":", "return", ":", "None" ]
python
train
seebass/django-tooling
django_tooling/registeradmin.py
https://github.com/seebass/django-tooling/blob/aaee703040b299cae560c501c94b18e0c2620f0d/django_tooling/registeradmin.py#L5-L9
def registerAdminSite(appName, excludeModels=[]): """Registers the models of the app with the given "appName" for the admin site""" for model in apps.get_app_config(appName).get_models(): if model not in excludeModels: admin.site.register(model)
[ "def", "registerAdminSite", "(", "appName", ",", "excludeModels", "=", "[", "]", ")", ":", "for", "model", "in", "apps", ".", "get_app_config", "(", "appName", ")", ".", "get_models", "(", ")", ":", "if", "model", "not", "in", "excludeModels", ":", "admin", ".", "site", ".", "register", "(", "model", ")" ]
Registers the models of the app with the given "appName" for the admin site
[ "Registers", "the", "models", "of", "the", "app", "with", "the", "given", "appName", "for", "the", "admin", "site" ]
python
test
pytroll/pyspectral
pyspectral/solar.py
https://github.com/pytroll/pyspectral/blob/fd296c0e0bdf5364fa180134a1292665d6bc50a3/pyspectral/solar.py#L117-L121
def inband_solarflux(self, rsr, scale=1.0, **options): """Derive the inband solar flux for a given instrument relative spectral response valid for an earth-sun distance of one AU. """ return self._band_calculations(rsr, True, scale, **options)
[ "def", "inband_solarflux", "(", "self", ",", "rsr", ",", "scale", "=", "1.0", ",", "*", "*", "options", ")", ":", "return", "self", ".", "_band_calculations", "(", "rsr", ",", "True", ",", "scale", ",", "*", "*", "options", ")" ]
Derive the inband solar flux for a given instrument relative spectral response valid for an earth-sun distance of one AU.
[ "Derive", "the", "inband", "solar", "flux", "for", "a", "given", "instrument", "relative", "spectral", "response", "valid", "for", "an", "earth", "-", "sun", "distance", "of", "one", "AU", "." ]
python
train
jobovy/galpy
galpy/orbit/OrbitTop.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/orbit/OrbitTop.py#L215-L233
def r(self,*args,**kwargs): """ NAME: r PURPOSE: return spherical radius at time t INPUT: t - (optional) time at which to get the radius ro= (Object-wide default) physical scale for distances to use to convert use_physical= use to override Object-wide default for using a physical scale for output OUTPUT: r(t) HISTORY: 2016-04-19 - Written - Bovy (UofT) """ thiso= self(*args,**kwargs) onet= (len(thiso.shape) == 1) if onet: return nu.sqrt(thiso[0]**2.+thiso[3]**2.) else: return nu.sqrt(thiso[0,:]**2.+thiso[3,:]**2.)
[ "def", "r", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "thiso", "=", "self", "(", "*", "args", ",", "*", "*", "kwargs", ")", "onet", "=", "(", "len", "(", "thiso", ".", "shape", ")", "==", "1", ")", "if", "onet", ":", "return", "nu", ".", "sqrt", "(", "thiso", "[", "0", "]", "**", "2.", "+", "thiso", "[", "3", "]", "**", "2.", ")", "else", ":", "return", "nu", ".", "sqrt", "(", "thiso", "[", "0", ",", ":", "]", "**", "2.", "+", "thiso", "[", "3", ",", ":", "]", "**", "2.", ")" ]
NAME: r PURPOSE: return spherical radius at time t INPUT: t - (optional) time at which to get the radius ro= (Object-wide default) physical scale for distances to use to convert use_physical= use to override Object-wide default for using a physical scale for output OUTPUT: r(t) HISTORY: 2016-04-19 - Written - Bovy (UofT)
[ "NAME", ":", "r", "PURPOSE", ":", "return", "spherical", "radius", "at", "time", "t", "INPUT", ":", "t", "-", "(", "optional", ")", "time", "at", "which", "to", "get", "the", "radius", "ro", "=", "(", "Object", "-", "wide", "default", ")", "physical", "scale", "for", "distances", "to", "use", "to", "convert", "use_physical", "=", "use", "to", "override", "Object", "-", "wide", "default", "for", "using", "a", "physical", "scale", "for", "output", "OUTPUT", ":", "r", "(", "t", ")", "HISTORY", ":", "2016", "-", "04", "-", "19", "-", "Written", "-", "Bovy", "(", "UofT", ")" ]
python
train
ungarj/s2reader
s2reader/s2reader.py
https://github.com/ungarj/s2reader/blob/376fd7ee1d15cce0849709c149d694663a7bc0ef/s2reader/s2reader.py#L321-L339
def footprint(self): """Find and return footprint as Shapely Polygon.""" # Check whether product or granule footprint needs to be calculated. tile_geocoding = self._metadata.iter("Tile_Geocoding").next() resolution = 10 searchstring = ".//*[@resolution='%s']" % resolution size, geoposition = tile_geocoding.findall(searchstring) nrows, ncols = (int(i.text) for i in size) ulx, uly, xdim, ydim = (int(i.text) for i in geoposition) lrx = ulx + nrows * resolution lry = uly - ncols * resolution utm_footprint = box(ulx, lry, lrx, uly) project = partial( pyproj.transform, pyproj.Proj(init=self.srid), pyproj.Proj(init='EPSG:4326') ) footprint = transform(project, utm_footprint).buffer(0) return footprint
[ "def", "footprint", "(", "self", ")", ":", "# Check whether product or granule footprint needs to be calculated.", "tile_geocoding", "=", "self", ".", "_metadata", ".", "iter", "(", "\"Tile_Geocoding\"", ")", ".", "next", "(", ")", "resolution", "=", "10", "searchstring", "=", "\".//*[@resolution='%s']\"", "%", "resolution", "size", ",", "geoposition", "=", "tile_geocoding", ".", "findall", "(", "searchstring", ")", "nrows", ",", "ncols", "=", "(", "int", "(", "i", ".", "text", ")", "for", "i", "in", "size", ")", "ulx", ",", "uly", ",", "xdim", ",", "ydim", "=", "(", "int", "(", "i", ".", "text", ")", "for", "i", "in", "geoposition", ")", "lrx", "=", "ulx", "+", "nrows", "*", "resolution", "lry", "=", "uly", "-", "ncols", "*", "resolution", "utm_footprint", "=", "box", "(", "ulx", ",", "lry", ",", "lrx", ",", "uly", ")", "project", "=", "partial", "(", "pyproj", ".", "transform", ",", "pyproj", ".", "Proj", "(", "init", "=", "self", ".", "srid", ")", ",", "pyproj", ".", "Proj", "(", "init", "=", "'EPSG:4326'", ")", ")", "footprint", "=", "transform", "(", "project", ",", "utm_footprint", ")", ".", "buffer", "(", "0", ")", "return", "footprint" ]
Find and return footprint as Shapely Polygon.
[ "Find", "and", "return", "footprint", "as", "Shapely", "Polygon", "." ]
python
train
tensorflow/lucid
lucid/optvis/param/color.py
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/param/color.py#L32-L46
def _linear_decorelate_color(t): """Multiply input by sqrt of emperical (ImageNet) color correlation matrix. If you interpret t's innermost dimension as describing colors in a decorrelated version of the color space (which is a very natural way to describe colors -- see discussion in Feature Visualization article) the way to map back to normal colors is multiply the square root of your color correlations. """ # check that inner dimension is 3? t_flat = tf.reshape(t, [-1, 3]) color_correlation_normalized = color_correlation_svd_sqrt / max_norm_svd_sqrt t_flat = tf.matmul(t_flat, color_correlation_normalized.T) t = tf.reshape(t_flat, tf.shape(t)) return t
[ "def", "_linear_decorelate_color", "(", "t", ")", ":", "# check that inner dimension is 3?", "t_flat", "=", "tf", ".", "reshape", "(", "t", ",", "[", "-", "1", ",", "3", "]", ")", "color_correlation_normalized", "=", "color_correlation_svd_sqrt", "/", "max_norm_svd_sqrt", "t_flat", "=", "tf", ".", "matmul", "(", "t_flat", ",", "color_correlation_normalized", ".", "T", ")", "t", "=", "tf", ".", "reshape", "(", "t_flat", ",", "tf", ".", "shape", "(", "t", ")", ")", "return", "t" ]
Multiply input by sqrt of emperical (ImageNet) color correlation matrix. If you interpret t's innermost dimension as describing colors in a decorrelated version of the color space (which is a very natural way to describe colors -- see discussion in Feature Visualization article) the way to map back to normal colors is multiply the square root of your color correlations.
[ "Multiply", "input", "by", "sqrt", "of", "emperical", "(", "ImageNet", ")", "color", "correlation", "matrix", ".", "If", "you", "interpret", "t", "s", "innermost", "dimension", "as", "describing", "colors", "in", "a", "decorrelated", "version", "of", "the", "color", "space", "(", "which", "is", "a", "very", "natural", "way", "to", "describe", "colors", "--", "see", "discussion", "in", "Feature", "Visualization", "article", ")", "the", "way", "to", "map", "back", "to", "normal", "colors", "is", "multiply", "the", "square", "root", "of", "your", "color", "correlations", "." ]
python
train
mitsei/dlkit
dlkit/json_/resource/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/resource/sessions.py#L780-L815
def get_resource_form_for_create(self, resource_record_types): """Gets the resource form for creating new resources. A new form should be requested for each create transaction. arg: resource_record_types (osid.type.Type[]): array of resource record types return: (osid.resource.ResourceForm) - the resource form raise: NullArgument - ``resource_record_types`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - unable to get form with requested record types *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceAdminSession.get_resource_form_for_create_template for arg in resource_record_types: if not isinstance(arg, ABCType): raise errors.InvalidArgument('one or more argument array elements is not a valid OSID Type') if resource_record_types == []: obj_form = objects.ResourceForm( bin_id=self._catalog_id, runtime=self._runtime, effective_agent_id=self.get_effective_agent_id(), proxy=self._proxy) else: obj_form = objects.ResourceForm( bin_id=self._catalog_id, record_types=resource_record_types, runtime=self._runtime, effective_agent_id=self.get_effective_agent_id(), proxy=self._proxy) self._forms[obj_form.get_id().get_identifier()] = not CREATED return obj_form
[ "def", "get_resource_form_for_create", "(", "self", ",", "resource_record_types", ")", ":", "# Implemented from template for", "# osid.resource.ResourceAdminSession.get_resource_form_for_create_template", "for", "arg", "in", "resource_record_types", ":", "if", "not", "isinstance", "(", "arg", ",", "ABCType", ")", ":", "raise", "errors", ".", "InvalidArgument", "(", "'one or more argument array elements is not a valid OSID Type'", ")", "if", "resource_record_types", "==", "[", "]", ":", "obj_form", "=", "objects", ".", "ResourceForm", "(", "bin_id", "=", "self", ".", "_catalog_id", ",", "runtime", "=", "self", ".", "_runtime", ",", "effective_agent_id", "=", "self", ".", "get_effective_agent_id", "(", ")", ",", "proxy", "=", "self", ".", "_proxy", ")", "else", ":", "obj_form", "=", "objects", ".", "ResourceForm", "(", "bin_id", "=", "self", ".", "_catalog_id", ",", "record_types", "=", "resource_record_types", ",", "runtime", "=", "self", ".", "_runtime", ",", "effective_agent_id", "=", "self", ".", "get_effective_agent_id", "(", ")", ",", "proxy", "=", "self", ".", "_proxy", ")", "self", ".", "_forms", "[", "obj_form", ".", "get_id", "(", ")", ".", "get_identifier", "(", ")", "]", "=", "not", "CREATED", "return", "obj_form" ]
Gets the resource form for creating new resources. A new form should be requested for each create transaction. arg: resource_record_types (osid.type.Type[]): array of resource record types return: (osid.resource.ResourceForm) - the resource form raise: NullArgument - ``resource_record_types`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - unable to get form with requested record types *compliance: mandatory -- This method must be implemented.*
[ "Gets", "the", "resource", "form", "for", "creating", "new", "resources", "." ]
python
train
angr/angr
angr/analyses/reassembler.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/analyses/reassembler.py#L2615-L2639
def _cgc_package_list_identifier(self, data_addr, data_size): """ Identifies the CGC package list associated with the CGC binary. :param int data_addr: Address of the data in memory. :param int data_size: Maximum size possible. :return: A 2-tuple of data type and size. :rtype: tuple """ if data_size < 100: return None, None data = self.fast_memory_load(data_addr, data_size, str) if data[:10] != 'The DECREE': return None, None if not all(i in string.printable for i in data): return None, None if not re.match(r"The DECREE packages used in the creation of this challenge binary were:", data): return None, None return 'cgc-package-list', data_size
[ "def", "_cgc_package_list_identifier", "(", "self", ",", "data_addr", ",", "data_size", ")", ":", "if", "data_size", "<", "100", ":", "return", "None", ",", "None", "data", "=", "self", ".", "fast_memory_load", "(", "data_addr", ",", "data_size", ",", "str", ")", "if", "data", "[", ":", "10", "]", "!=", "'The DECREE'", ":", "return", "None", ",", "None", "if", "not", "all", "(", "i", "in", "string", ".", "printable", "for", "i", "in", "data", ")", ":", "return", "None", ",", "None", "if", "not", "re", ".", "match", "(", "r\"The DECREE packages used in the creation of this challenge binary were:\"", ",", "data", ")", ":", "return", "None", ",", "None", "return", "'cgc-package-list'", ",", "data_size" ]
Identifies the CGC package list associated with the CGC binary. :param int data_addr: Address of the data in memory. :param int data_size: Maximum size possible. :return: A 2-tuple of data type and size. :rtype: tuple
[ "Identifies", "the", "CGC", "package", "list", "associated", "with", "the", "CGC", "binary", "." ]
python
train
pytorch/text
torchtext/data/utils.py
https://github.com/pytorch/text/blob/26bfce6869dc704f1d86792f9a681d453d7e7bb8/torchtext/data/utils.py#L89-L98
def interleave_keys(a, b): """Interleave bits from two sort keys to form a joint sort key. Examples that are similar in both of the provided keys will have similar values for the key defined by this function. Useful for tasks with two text fields like machine translation or natural language inference. """ def interleave(args): return ''.join([x for t in zip(*args) for x in t]) return int(''.join(interleave(format(x, '016b') for x in (a, b))), base=2)
[ "def", "interleave_keys", "(", "a", ",", "b", ")", ":", "def", "interleave", "(", "args", ")", ":", "return", "''", ".", "join", "(", "[", "x", "for", "t", "in", "zip", "(", "*", "args", ")", "for", "x", "in", "t", "]", ")", "return", "int", "(", "''", ".", "join", "(", "interleave", "(", "format", "(", "x", ",", "'016b'", ")", "for", "x", "in", "(", "a", ",", "b", ")", ")", ")", ",", "base", "=", "2", ")" ]
Interleave bits from two sort keys to form a joint sort key. Examples that are similar in both of the provided keys will have similar values for the key defined by this function. Useful for tasks with two text fields like machine translation or natural language inference.
[ "Interleave", "bits", "from", "two", "sort", "keys", "to", "form", "a", "joint", "sort", "key", "." ]
python
train
blockstack/blockstack-core
blockstack/lib/nameset/db.py
https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/nameset/db.py#L1020-L1125
def namedb_state_transition( cur, opcode, op_data, block_id, vtxindex, txid, history_id, cur_record, record_table, constraints_ignored=[] ): """ Given an operation (opcode, op_data), a point in time (block_id, vtxindex, txid), and a current record (history_id, cur_record), apply the operation to the record and save the delta to the record's history. Also, insert or update the new record into the db. The cur_record must exist already. Return the newly updated record on success, with all compatibility quirks preserved. Raise an exception on failure. DO NOT CALL THIS METHOD DIRECTLY. """ # sanity check: must be a state-transitioning operation try: assert opcode in OPCODE_NAME_STATE_TRANSITIONS + OPCODE_NAMESPACE_STATE_TRANSITIONS, "BUG: opcode '%s' is not a state-transition" assert 'opcode' not in op_data, 'BUG: opcode not allowed in op_data' except Exception, e: log.exception(e) log.error("BUG: opcode '%s' is not a state-transition operation" % opcode) os.abort() # make sure we have a name/namespace_id and block number op_data_name = copy.deepcopy(op_data) if opcode in OPCODE_NAME_STATE_TRANSITIONS: # name state transition op_data_name['name'] = history_id elif opcode in OPCODE_NAMESPACE_STATE_TRANSITIONS: # namespace state transition op_data_name['namespace_id'] = history_id # sanity check make sure we got valid state transition data try: assert cur_record.has_key('block_number'), 'current record does not have a block number' op_data_name['block_number'] = cur_record['block_number'] rc = namedb_state_transition_sanity_check( opcode, op_data_name, history_id, cur_record, record_table ) if not rc: raise Exception("State transition sanity checks failed") rc = namedb_state_mutation_sanity_check( opcode, op_data_name ) if not rc: raise Exception("State mutation sanity checks failed") except Exception, e: log.exception(e) log.error("FATAL: state transition sanity checks failed") os.abort() # 1. generate the new record that will be used for consensus. # It will be the new data overlayed on the current record, with all quirks applied. new_record = {} new_record.update(cur_record) new_record.update(op_data_name) new_record['opcode'] = opcode canonicalized_record = op_canonicalize_quirks(opcode, new_record, cur_record) canonicalized_record['opcode'] = opcode rc = namedb_history_save(cur, opcode, history_id, None, new_record.get('value_hash', None), block_id, vtxindex, txid, canonicalized_record) if not rc: log.error("FATAL: failed to save history for '%s' at (%s, %s)" % (history_id, block_id, vtxindex)) os.abort() rc = False merged_new_record = None # 2. Store the actual op_data, to be returned on name lookups # Don't store extra fields that don't belong in the db (i.e. that we don't have colunms for), but preserve them across the write. stored_op_data = {} stored_op_data.update(op_data_name) # separate out the extras _, op_data_extra = namedb_find_missing_and_extra(cur, stored_op_data, record_table) if len(op_data_extra) > 0: log.debug("Remove extra fields: {}".format(','.join(op_data_extra))) for extra in op_data_extra: del stored_op_data[extra] if opcode in OPCODE_NAME_STATE_TRANSITIONS: # name state transition rc = namedb_name_update( cur, opcode, stored_op_data, constraints_ignored=constraints_ignored ) if not rc: log.error("FATAL: opcode is not a state-transition operation on names") os.abort() merged_new_record = namedb_get_name(cur, history_id, block_id, include_history=False, include_expired=True) elif opcode in OPCODE_NAMESPACE_STATE_TRANSITIONS: # namespace state transition rc = namedb_namespace_update( cur, opcode, stored_op_data, constraints_ignored=constraints_ignored ) if not rc: log.error("FATAL: opcode is not a state-transition operation on namespaces") os.abort() merged_new_record = namedb_get_namespace(cur, history_id, block_id, include_history=False, include_expired=True) # 3. success! make sure the merged_new_record is consistent with canonicalized_record for f in merged_new_record: if f not in canonicalized_record: raise Exception("canonicalized record is missing {}".format(f)) return canonicalized_record
[ "def", "namedb_state_transition", "(", "cur", ",", "opcode", ",", "op_data", ",", "block_id", ",", "vtxindex", ",", "txid", ",", "history_id", ",", "cur_record", ",", "record_table", ",", "constraints_ignored", "=", "[", "]", ")", ":", "# sanity check: must be a state-transitioning operation", "try", ":", "assert", "opcode", "in", "OPCODE_NAME_STATE_TRANSITIONS", "+", "OPCODE_NAMESPACE_STATE_TRANSITIONS", ",", "\"BUG: opcode '%s' is not a state-transition\"", "assert", "'opcode'", "not", "in", "op_data", ",", "'BUG: opcode not allowed in op_data'", "except", "Exception", ",", "e", ":", "log", ".", "exception", "(", "e", ")", "log", ".", "error", "(", "\"BUG: opcode '%s' is not a state-transition operation\"", "%", "opcode", ")", "os", ".", "abort", "(", ")", "# make sure we have a name/namespace_id and block number", "op_data_name", "=", "copy", ".", "deepcopy", "(", "op_data", ")", "if", "opcode", "in", "OPCODE_NAME_STATE_TRANSITIONS", ":", "# name state transition ", "op_data_name", "[", "'name'", "]", "=", "history_id", "elif", "opcode", "in", "OPCODE_NAMESPACE_STATE_TRANSITIONS", ":", "# namespace state transition ", "op_data_name", "[", "'namespace_id'", "]", "=", "history_id", "# sanity check make sure we got valid state transition data", "try", ":", "assert", "cur_record", ".", "has_key", "(", "'block_number'", ")", ",", "'current record does not have a block number'", "op_data_name", "[", "'block_number'", "]", "=", "cur_record", "[", "'block_number'", "]", "rc", "=", "namedb_state_transition_sanity_check", "(", "opcode", ",", "op_data_name", ",", "history_id", ",", "cur_record", ",", "record_table", ")", "if", "not", "rc", ":", "raise", "Exception", "(", "\"State transition sanity checks failed\"", ")", "rc", "=", "namedb_state_mutation_sanity_check", "(", "opcode", ",", "op_data_name", ")", "if", "not", "rc", ":", "raise", "Exception", "(", "\"State mutation sanity checks failed\"", ")", "except", "Exception", ",", "e", ":", "log", ".", "exception", "(", "e", ")", "log", ".", "error", "(", "\"FATAL: state transition sanity checks failed\"", ")", "os", ".", "abort", "(", ")", "# 1. generate the new record that will be used for consensus.", "# It will be the new data overlayed on the current record, with all quirks applied.", "new_record", "=", "{", "}", "new_record", ".", "update", "(", "cur_record", ")", "new_record", ".", "update", "(", "op_data_name", ")", "new_record", "[", "'opcode'", "]", "=", "opcode", "canonicalized_record", "=", "op_canonicalize_quirks", "(", "opcode", ",", "new_record", ",", "cur_record", ")", "canonicalized_record", "[", "'opcode'", "]", "=", "opcode", "rc", "=", "namedb_history_save", "(", "cur", ",", "opcode", ",", "history_id", ",", "None", ",", "new_record", ".", "get", "(", "'value_hash'", ",", "None", ")", ",", "block_id", ",", "vtxindex", ",", "txid", ",", "canonicalized_record", ")", "if", "not", "rc", ":", "log", ".", "error", "(", "\"FATAL: failed to save history for '%s' at (%s, %s)\"", "%", "(", "history_id", ",", "block_id", ",", "vtxindex", ")", ")", "os", ".", "abort", "(", ")", "rc", "=", "False", "merged_new_record", "=", "None", "# 2. Store the actual op_data, to be returned on name lookups", "# Don't store extra fields that don't belong in the db (i.e. that we don't have colunms for), but preserve them across the write.", "stored_op_data", "=", "{", "}", "stored_op_data", ".", "update", "(", "op_data_name", ")", "# separate out the extras", "_", ",", "op_data_extra", "=", "namedb_find_missing_and_extra", "(", "cur", ",", "stored_op_data", ",", "record_table", ")", "if", "len", "(", "op_data_extra", ")", ">", "0", ":", "log", ".", "debug", "(", "\"Remove extra fields: {}\"", ".", "format", "(", "','", ".", "join", "(", "op_data_extra", ")", ")", ")", "for", "extra", "in", "op_data_extra", ":", "del", "stored_op_data", "[", "extra", "]", "if", "opcode", "in", "OPCODE_NAME_STATE_TRANSITIONS", ":", "# name state transition ", "rc", "=", "namedb_name_update", "(", "cur", ",", "opcode", ",", "stored_op_data", ",", "constraints_ignored", "=", "constraints_ignored", ")", "if", "not", "rc", ":", "log", ".", "error", "(", "\"FATAL: opcode is not a state-transition operation on names\"", ")", "os", ".", "abort", "(", ")", "merged_new_record", "=", "namedb_get_name", "(", "cur", ",", "history_id", ",", "block_id", ",", "include_history", "=", "False", ",", "include_expired", "=", "True", ")", "elif", "opcode", "in", "OPCODE_NAMESPACE_STATE_TRANSITIONS", ":", "# namespace state transition ", "rc", "=", "namedb_namespace_update", "(", "cur", ",", "opcode", ",", "stored_op_data", ",", "constraints_ignored", "=", "constraints_ignored", ")", "if", "not", "rc", ":", "log", ".", "error", "(", "\"FATAL: opcode is not a state-transition operation on namespaces\"", ")", "os", ".", "abort", "(", ")", "merged_new_record", "=", "namedb_get_namespace", "(", "cur", ",", "history_id", ",", "block_id", ",", "include_history", "=", "False", ",", "include_expired", "=", "True", ")", "# 3. success! make sure the merged_new_record is consistent with canonicalized_record", "for", "f", "in", "merged_new_record", ":", "if", "f", "not", "in", "canonicalized_record", ":", "raise", "Exception", "(", "\"canonicalized record is missing {}\"", ".", "format", "(", "f", ")", ")", "return", "canonicalized_record" ]
Given an operation (opcode, op_data), a point in time (block_id, vtxindex, txid), and a current record (history_id, cur_record), apply the operation to the record and save the delta to the record's history. Also, insert or update the new record into the db. The cur_record must exist already. Return the newly updated record on success, with all compatibility quirks preserved. Raise an exception on failure. DO NOT CALL THIS METHOD DIRECTLY.
[ "Given", "an", "operation", "(", "opcode", "op_data", ")", "a", "point", "in", "time", "(", "block_id", "vtxindex", "txid", ")", "and", "a", "current", "record", "(", "history_id", "cur_record", ")", "apply", "the", "operation", "to", "the", "record", "and", "save", "the", "delta", "to", "the", "record", "s", "history", ".", "Also", "insert", "or", "update", "the", "new", "record", "into", "the", "db", "." ]
python
train
mathandy/svgpathtools
svgpathtools/path.py
https://github.com/mathandy/svgpathtools/blob/fd7348a1dfd88b65ea61da02325c6605aedf8c4f/svgpathtools/path.py#L1676-L1680
def unit_tangent(self, t): """returns the unit tangent vector of the segment at t (centered at the origin and expressed as a complex number).""" dseg = self.derivative(t) return dseg/abs(dseg)
[ "def", "unit_tangent", "(", "self", ",", "t", ")", ":", "dseg", "=", "self", ".", "derivative", "(", "t", ")", "return", "dseg", "/", "abs", "(", "dseg", ")" ]
returns the unit tangent vector of the segment at t (centered at the origin and expressed as a complex number).
[ "returns", "the", "unit", "tangent", "vector", "of", "the", "segment", "at", "t", "(", "centered", "at", "the", "origin", "and", "expressed", "as", "a", "complex", "number", ")", "." ]
python
train
brocade/pynos
pynos/versions/ver_7/ver_7_1_0/yang/brocade_firmware.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_firmware.py#L38-L49
def fwdl_status_output_fwdl_state(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") fwdl_status = ET.Element("fwdl_status") config = fwdl_status output = ET.SubElement(fwdl_status, "output") fwdl_state = ET.SubElement(output, "fwdl-state") fwdl_state.text = kwargs.pop('fwdl_state') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "fwdl_status_output_fwdl_state", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "fwdl_status", "=", "ET", ".", "Element", "(", "\"fwdl_status\"", ")", "config", "=", "fwdl_status", "output", "=", "ET", ".", "SubElement", "(", "fwdl_status", ",", "\"output\"", ")", "fwdl_state", "=", "ET", ".", "SubElement", "(", "output", ",", "\"fwdl-state\"", ")", "fwdl_state", ".", "text", "=", "kwargs", ".", "pop", "(", "'fwdl_state'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
thespacedoctor/qubits
qubits/universe.py
https://github.com/thespacedoctor/qubits/blob/3c02ace7226389841c6bb838d045c11bed61a3c2/qubits/universe.py#L439-L478
def random_peak_magnitudes( log, peakMagnitudeDistributions, snTypesArray, plot=True): """ *Generate a numpy array of random (distribution weighted) peak magnitudes for the given sn types.* **Key Arguments:** - ``log`` -- logger - ``peakMagnitudeDistributions`` -- yaml style dictionary of peak magnitude distributions - ``snTypesArray`` -- the pre-generated array of random sn types - ``plot`` -- generate plot? **Return:** - None """ ################ > IMPORTS ################ ## STANDARD LIB ## ## THIRD PARTY ## import matplotlib.pyplot as plt import numpy as np ## LOCAL APPLICATION ## ################ >ACTION(S) ################ magDistributions = {} for snType, peakMag in peakMagnitudeDistributions['magnitude'].iteritems(): sigma = peakMagnitudeDistributions['sigma'][snType] magDistributions[snType] = [peakMag, sigma] peakMagList = [] for item in snTypesArray: thisPeak = magDistributions[item][ 1] * np.random.randn() + magDistributions[item][0] peakMagList.append(thisPeak) peakMagArray = np.array(peakMagList) # log.debug('peakMagArray %s' % (peakMagArray,)) return peakMagArray
[ "def", "random_peak_magnitudes", "(", "log", ",", "peakMagnitudeDistributions", ",", "snTypesArray", ",", "plot", "=", "True", ")", ":", "################ > IMPORTS ################", "## STANDARD LIB ##", "## THIRD PARTY ##", "import", "matplotlib", ".", "pyplot", "as", "plt", "import", "numpy", "as", "np", "## LOCAL APPLICATION ##", "################ >ACTION(S) ################", "magDistributions", "=", "{", "}", "for", "snType", ",", "peakMag", "in", "peakMagnitudeDistributions", "[", "'magnitude'", "]", ".", "iteritems", "(", ")", ":", "sigma", "=", "peakMagnitudeDistributions", "[", "'sigma'", "]", "[", "snType", "]", "magDistributions", "[", "snType", "]", "=", "[", "peakMag", ",", "sigma", "]", "peakMagList", "=", "[", "]", "for", "item", "in", "snTypesArray", ":", "thisPeak", "=", "magDistributions", "[", "item", "]", "[", "1", "]", "*", "np", ".", "random", ".", "randn", "(", ")", "+", "magDistributions", "[", "item", "]", "[", "0", "]", "peakMagList", ".", "append", "(", "thisPeak", ")", "peakMagArray", "=", "np", ".", "array", "(", "peakMagList", ")", "# log.debug('peakMagArray %s' % (peakMagArray,))", "return", "peakMagArray" ]
*Generate a numpy array of random (distribution weighted) peak magnitudes for the given sn types.* **Key Arguments:** - ``log`` -- logger - ``peakMagnitudeDistributions`` -- yaml style dictionary of peak magnitude distributions - ``snTypesArray`` -- the pre-generated array of random sn types - ``plot`` -- generate plot? **Return:** - None
[ "*", "Generate", "a", "numpy", "array", "of", "random", "(", "distribution", "weighted", ")", "peak", "magnitudes", "for", "the", "given", "sn", "types", ".", "*" ]
python
train
bitesofcode/projexui
projexui/xapplication.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/xapplication.py#L492-L505
def unregisterWalkthrough(self, walkthrough): """ Unregisters the inputed walkthrough from the application walkthroug list. :param walkthrough | <XWalkthrough> """ if type(walkthrough) in (str, unicode): walkthrough = self.findWalkthrough(walkthrough) try: self._walkthroughs.remove(walkthrough) except ValueError: pass
[ "def", "unregisterWalkthrough", "(", "self", ",", "walkthrough", ")", ":", "if", "type", "(", "walkthrough", ")", "in", "(", "str", ",", "unicode", ")", ":", "walkthrough", "=", "self", ".", "findWalkthrough", "(", "walkthrough", ")", "try", ":", "self", ".", "_walkthroughs", ".", "remove", "(", "walkthrough", ")", "except", "ValueError", ":", "pass" ]
Unregisters the inputed walkthrough from the application walkthroug list. :param walkthrough | <XWalkthrough>
[ "Unregisters", "the", "inputed", "walkthrough", "from", "the", "application", "walkthroug", "list", ".", ":", "param", "walkthrough", "|", "<XWalkthrough", ">" ]
python
train
urinieto/msaf
msaf/algorithms/interface.py
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/algorithms/interface.py#L85-L100
def _preprocess(self, valid_features=["pcp", "tonnetz", "mfcc", "cqt", "tempogram"]): """This method obtains the actual features.""" # Use specific feature if self.feature_str not in valid_features: raise RuntimeError("Feature %s in not valid for algorithm: %s " "(valid features are %s)." % (self.feature_str, __name__, valid_features)) else: try: F = self.features.features except KeyError: raise RuntimeError("Feature %s in not supported by MSAF" % (self.feature_str)) return F
[ "def", "_preprocess", "(", "self", ",", "valid_features", "=", "[", "\"pcp\"", ",", "\"tonnetz\"", ",", "\"mfcc\"", ",", "\"cqt\"", ",", "\"tempogram\"", "]", ")", ":", "# Use specific feature", "if", "self", ".", "feature_str", "not", "in", "valid_features", ":", "raise", "RuntimeError", "(", "\"Feature %s in not valid for algorithm: %s \"", "\"(valid features are %s).\"", "%", "(", "self", ".", "feature_str", ",", "__name__", ",", "valid_features", ")", ")", "else", ":", "try", ":", "F", "=", "self", ".", "features", ".", "features", "except", "KeyError", ":", "raise", "RuntimeError", "(", "\"Feature %s in not supported by MSAF\"", "%", "(", "self", ".", "feature_str", ")", ")", "return", "F" ]
This method obtains the actual features.
[ "This", "method", "obtains", "the", "actual", "features", "." ]
python
test
bprinty/gems
gems/datatypes.py
https://github.com/bprinty/gems/blob/3ff76407af0e71621dada744cd964611e998699c/gems/datatypes.py#L466-L486
def json(self): """ Return JSON representation of object. """ if self.meta_type == 'list': ret = [] for dat in self._list: if not isinstance(dat, composite): ret.append(dat) else: ret.append(dat.json()) return ret elif self.meta_type == 'dict': ret = {} for key in self._dict: if not isinstance(self._dict[key], composite): ret[key] = self._dict[key] else: ret[key] = self._dict[key].json() return ret
[ "def", "json", "(", "self", ")", ":", "if", "self", ".", "meta_type", "==", "'list'", ":", "ret", "=", "[", "]", "for", "dat", "in", "self", ".", "_list", ":", "if", "not", "isinstance", "(", "dat", ",", "composite", ")", ":", "ret", ".", "append", "(", "dat", ")", "else", ":", "ret", ".", "append", "(", "dat", ".", "json", "(", ")", ")", "return", "ret", "elif", "self", ".", "meta_type", "==", "'dict'", ":", "ret", "=", "{", "}", "for", "key", "in", "self", ".", "_dict", ":", "if", "not", "isinstance", "(", "self", ".", "_dict", "[", "key", "]", ",", "composite", ")", ":", "ret", "[", "key", "]", "=", "self", ".", "_dict", "[", "key", "]", "else", ":", "ret", "[", "key", "]", "=", "self", ".", "_dict", "[", "key", "]", ".", "json", "(", ")", "return", "ret" ]
Return JSON representation of object.
[ "Return", "JSON", "representation", "of", "object", "." ]
python
valid
the01/python-paps
paps/si/sensorClientAdapter.py
https://github.com/the01/python-paps/blob/2dde5a71913e4c7b22901cf05c6ecedd890919c4/paps/si/sensorClientAdapter.py#L90-L106
def on_person_update(self, people): """ People have changed Should always include all people (all that were added via on_person_new) :param people: People to update :type people: list[paps.people.People] :rtype: None :raises Exception: On error (for now just an exception) """ try: self.sensor_client.person_update(people) except: self.exception("Failed to update people") raise Exception("Updating people failed")
[ "def", "on_person_update", "(", "self", ",", "people", ")", ":", "try", ":", "self", ".", "sensor_client", ".", "person_update", "(", "people", ")", "except", ":", "self", ".", "exception", "(", "\"Failed to update people\"", ")", "raise", "Exception", "(", "\"Updating people failed\"", ")" ]
People have changed Should always include all people (all that were added via on_person_new) :param people: People to update :type people: list[paps.people.People] :rtype: None :raises Exception: On error (for now just an exception)
[ "People", "have", "changed" ]
python
train
Britefury/batchup
batchup/config.py
https://github.com/Britefury/batchup/blob/3fc2304e629f813c05f9e7a85a18acef3581a536/batchup/config.py#L127-L146
def compute_sha256(path): """ Compute the SHA-256 hash of the file at the given path Parameters ---------- path: str The path of the file Returns ------- str The SHA-256 HEX digest """ hasher = hashlib.sha256() with open(path, 'rb') as f: # 10MB chunks for chunk in iter(lambda: f.read(10 * 1024 * 1024), b''): hasher.update(chunk) return hasher.hexdigest()
[ "def", "compute_sha256", "(", "path", ")", ":", "hasher", "=", "hashlib", ".", "sha256", "(", ")", "with", "open", "(", "path", ",", "'rb'", ")", "as", "f", ":", "# 10MB chunks", "for", "chunk", "in", "iter", "(", "lambda", ":", "f", ".", "read", "(", "10", "*", "1024", "*", "1024", ")", ",", "b''", ")", ":", "hasher", ".", "update", "(", "chunk", ")", "return", "hasher", ".", "hexdigest", "(", ")" ]
Compute the SHA-256 hash of the file at the given path Parameters ---------- path: str The path of the file Returns ------- str The SHA-256 HEX digest
[ "Compute", "the", "SHA", "-", "256", "hash", "of", "the", "file", "at", "the", "given", "path" ]
python
train
apple/turicreate
deps/src/libxml2-2.9.1/python/libxml2.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L4299-L4304
def newCDataBlock(self, content, len): """Creation of a new node containing a CDATA block. """ ret = libxml2mod.xmlNewCDataBlock(self._o, content, len) if ret is None:raise treeError('xmlNewCDataBlock() failed') __tmp = xmlNode(_obj=ret) return __tmp
[ "def", "newCDataBlock", "(", "self", ",", "content", ",", "len", ")", ":", "ret", "=", "libxml2mod", ".", "xmlNewCDataBlock", "(", "self", ".", "_o", ",", "content", ",", "len", ")", "if", "ret", "is", "None", ":", "raise", "treeError", "(", "'xmlNewCDataBlock() failed'", ")", "__tmp", "=", "xmlNode", "(", "_obj", "=", "ret", ")", "return", "__tmp" ]
Creation of a new node containing a CDATA block.
[ "Creation", "of", "a", "new", "node", "containing", "a", "CDATA", "block", "." ]
python
train
miso-belica/jusText
justext/core.py
https://github.com/miso-belica/jusText/blob/ad05130df2ca883f291693353f9d86e20fe94a4e/justext/core.py#L132-L136
def make_paragraphs(cls, root): """Converts DOM into paragraphs.""" handler = cls() lxml.sax.saxify(root, handler) return handler.paragraphs
[ "def", "make_paragraphs", "(", "cls", ",", "root", ")", ":", "handler", "=", "cls", "(", ")", "lxml", ".", "sax", ".", "saxify", "(", "root", ",", "handler", ")", "return", "handler", ".", "paragraphs" ]
Converts DOM into paragraphs.
[ "Converts", "DOM", "into", "paragraphs", "." ]
python
train
saltstack/salt
salt/modules/state.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/state.py#L579-L603
def template_str(tem, queue=False, **kwargs): ''' Execute the information stored in a string from an sls template CLI Example: .. code-block:: bash salt '*' state.template_str '<Template String>' ''' conflict = _check_queue(queue, kwargs) if conflict is not None: return conflict opts = salt.utils.state.get_sls_opts(__opts__, **kwargs) try: st_ = salt.state.State(opts, proxy=__proxy__, initial_pillar=_get_initial_pillar(opts)) except NameError: st_ = salt.state.State(opts, initial_pillar=_get_initial_pillar(opts)) ret = st_.call_template_str(tem) _set_retcode(ret) return ret
[ "def", "template_str", "(", "tem", ",", "queue", "=", "False", ",", "*", "*", "kwargs", ")", ":", "conflict", "=", "_check_queue", "(", "queue", ",", "kwargs", ")", "if", "conflict", "is", "not", "None", ":", "return", "conflict", "opts", "=", "salt", ".", "utils", ".", "state", ".", "get_sls_opts", "(", "__opts__", ",", "*", "*", "kwargs", ")", "try", ":", "st_", "=", "salt", ".", "state", ".", "State", "(", "opts", ",", "proxy", "=", "__proxy__", ",", "initial_pillar", "=", "_get_initial_pillar", "(", "opts", ")", ")", "except", "NameError", ":", "st_", "=", "salt", ".", "state", ".", "State", "(", "opts", ",", "initial_pillar", "=", "_get_initial_pillar", "(", "opts", ")", ")", "ret", "=", "st_", ".", "call_template_str", "(", "tem", ")", "_set_retcode", "(", "ret", ")", "return", "ret" ]
Execute the information stored in a string from an sls template CLI Example: .. code-block:: bash salt '*' state.template_str '<Template String>'
[ "Execute", "the", "information", "stored", "in", "a", "string", "from", "an", "sls", "template" ]
python
train
mozilla/python_moztelemetry
moztelemetry/standards.py
https://github.com/mozilla/python_moztelemetry/blob/09ddf1ec7d953a4308dfdcb0ed968f27bd5921bb/moztelemetry/standards.py#L130-L140
def get_last_month_range(): """ Gets the date for the first and the last day of the previous complete month. :returns: A tuple containing two date objects, for the first and the last day of the month respectively. """ today = date.today() # Get the last day for the previous month. end_of_last_month = snap_to_beginning_of_month(today) - timedelta(days=1) start_of_last_month = snap_to_beginning_of_month(end_of_last_month) return (start_of_last_month, end_of_last_month)
[ "def", "get_last_month_range", "(", ")", ":", "today", "=", "date", ".", "today", "(", ")", "# Get the last day for the previous month.", "end_of_last_month", "=", "snap_to_beginning_of_month", "(", "today", ")", "-", "timedelta", "(", "days", "=", "1", ")", "start_of_last_month", "=", "snap_to_beginning_of_month", "(", "end_of_last_month", ")", "return", "(", "start_of_last_month", ",", "end_of_last_month", ")" ]
Gets the date for the first and the last day of the previous complete month. :returns: A tuple containing two date objects, for the first and the last day of the month respectively.
[ "Gets", "the", "date", "for", "the", "first", "and", "the", "last", "day", "of", "the", "previous", "complete", "month", "." ]
python
train
LLNL/scraper
scripts/get_traffic.py
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scripts/get_traffic.py#L233-L254
def check_data_redundancy(self, file_path='', dict_to_check={}): """ Checks the given csv file against the json data scraped for the given dict. It will remove all data retrieved that has already been recorded so we don't write redundant data to file. Returns count of rows from file. """ count = 0 exists = os.path.isfile(file_path) previous_dates = {} if exists: with open(file_path, 'r') as input: input.readline()#skip header line for row in csv.reader(input): timestamp = calendar.timegm(time.strptime(row[0], '%Y-%m-%d')) if timestamp in dict_to_check:#our date is already recorded del dict_to_check[timestamp] #calc current id max count += 1 input.close() return count
[ "def", "check_data_redundancy", "(", "self", ",", "file_path", "=", "''", ",", "dict_to_check", "=", "{", "}", ")", ":", "count", "=", "0", "exists", "=", "os", ".", "path", ".", "isfile", "(", "file_path", ")", "previous_dates", "=", "{", "}", "if", "exists", ":", "with", "open", "(", "file_path", ",", "'r'", ")", "as", "input", ":", "input", ".", "readline", "(", ")", "#skip header line", "for", "row", "in", "csv", ".", "reader", "(", "input", ")", ":", "timestamp", "=", "calendar", ".", "timegm", "(", "time", ".", "strptime", "(", "row", "[", "0", "]", ",", "'%Y-%m-%d'", ")", ")", "if", "timestamp", "in", "dict_to_check", ":", "#our date is already recorded", "del", "dict_to_check", "[", "timestamp", "]", "#calc current id max", "count", "+=", "1", "input", ".", "close", "(", ")", "return", "count" ]
Checks the given csv file against the json data scraped for the given dict. It will remove all data retrieved that has already been recorded so we don't write redundant data to file. Returns count of rows from file.
[ "Checks", "the", "given", "csv", "file", "against", "the", "json", "data", "scraped", "for", "the", "given", "dict", ".", "It", "will", "remove", "all", "data", "retrieved", "that", "has", "already", "been", "recorded", "so", "we", "don", "t", "write", "redundant", "data", "to", "file", ".", "Returns", "count", "of", "rows", "from", "file", "." ]
python
test
googlefonts/ufo2ft
Lib/ufo2ft/outlineCompiler.py
https://github.com/googlefonts/ufo2ft/blob/915b986558e87bee288765d9218cc1cd4ebf7f4c/Lib/ufo2ft/outlineCompiler.py#L94-L132
def compile(self): """ Compile the OpenType binary. """ self.otf = TTFont(sfntVersion=self.sfntVersion) # only compile vertical metrics tables if vhea metrics a defined vertical_metrics = [ "openTypeVheaVertTypoAscender", "openTypeVheaVertTypoDescender", "openTypeVheaVertTypoLineGap", "openTypeVheaCaretSlopeRise", "openTypeVheaCaretSlopeRun", "openTypeVheaCaretOffset", ] self.vertical = all( getAttrWithFallback(self.ufo.info, metric) is not None for metric in vertical_metrics ) # write the glyph order self.otf.setGlyphOrder(self.glyphOrder) # populate basic tables self.setupTable_head() self.setupTable_hmtx() self.setupTable_hhea() self.setupTable_name() self.setupTable_maxp() self.setupTable_cmap() self.setupTable_OS2() self.setupTable_post() if self.vertical: self.setupTable_vmtx() self.setupTable_vhea() self.setupOtherTables() self.importTTX() return self.otf
[ "def", "compile", "(", "self", ")", ":", "self", ".", "otf", "=", "TTFont", "(", "sfntVersion", "=", "self", ".", "sfntVersion", ")", "# only compile vertical metrics tables if vhea metrics a defined", "vertical_metrics", "=", "[", "\"openTypeVheaVertTypoAscender\"", ",", "\"openTypeVheaVertTypoDescender\"", ",", "\"openTypeVheaVertTypoLineGap\"", ",", "\"openTypeVheaCaretSlopeRise\"", ",", "\"openTypeVheaCaretSlopeRun\"", ",", "\"openTypeVheaCaretOffset\"", ",", "]", "self", ".", "vertical", "=", "all", "(", "getAttrWithFallback", "(", "self", ".", "ufo", ".", "info", ",", "metric", ")", "is", "not", "None", "for", "metric", "in", "vertical_metrics", ")", "# write the glyph order", "self", ".", "otf", ".", "setGlyphOrder", "(", "self", ".", "glyphOrder", ")", "# populate basic tables", "self", ".", "setupTable_head", "(", ")", "self", ".", "setupTable_hmtx", "(", ")", "self", ".", "setupTable_hhea", "(", ")", "self", ".", "setupTable_name", "(", ")", "self", ".", "setupTable_maxp", "(", ")", "self", ".", "setupTable_cmap", "(", ")", "self", ".", "setupTable_OS2", "(", ")", "self", ".", "setupTable_post", "(", ")", "if", "self", ".", "vertical", ":", "self", ".", "setupTable_vmtx", "(", ")", "self", ".", "setupTable_vhea", "(", ")", "self", ".", "setupOtherTables", "(", ")", "self", ".", "importTTX", "(", ")", "return", "self", ".", "otf" ]
Compile the OpenType binary.
[ "Compile", "the", "OpenType", "binary", "." ]
python
train
honzajavorek/redis-collections
redis_collections/sets.py
https://github.com/honzajavorek/redis-collections/blob/07ca8efe88fb128f7dc7319dfa6a26cd39b3776b/redis_collections/sets.py#L131-L140
def pop(self): """ Remove and return an arbitrary element from the set. Raises :exc:`KeyError` if the set is empty. """ result = self.redis.spop(self.key) if result is None: raise KeyError return self._unpickle(result)
[ "def", "pop", "(", "self", ")", ":", "result", "=", "self", ".", "redis", ".", "spop", "(", "self", ".", "key", ")", "if", "result", "is", "None", ":", "raise", "KeyError", "return", "self", ".", "_unpickle", "(", "result", ")" ]
Remove and return an arbitrary element from the set. Raises :exc:`KeyError` if the set is empty.
[ "Remove", "and", "return", "an", "arbitrary", "element", "from", "the", "set", ".", "Raises", ":", "exc", ":", "KeyError", "if", "the", "set", "is", "empty", "." ]
python
train
apache/incubator-heron
heron/tools/tracker/src/python/config.py
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heron/tools/tracker/src/python/config.py#L44-L49
def load_configs(self): """load config files""" self.statemgr_config.set_state_locations(self.configs[STATEMGRS_KEY]) if EXTRA_LINKS_KEY in self.configs: for extra_link in self.configs[EXTRA_LINKS_KEY]: self.extra_links.append(self.validate_extra_link(extra_link))
[ "def", "load_configs", "(", "self", ")", ":", "self", ".", "statemgr_config", ".", "set_state_locations", "(", "self", ".", "configs", "[", "STATEMGRS_KEY", "]", ")", "if", "EXTRA_LINKS_KEY", "in", "self", ".", "configs", ":", "for", "extra_link", "in", "self", ".", "configs", "[", "EXTRA_LINKS_KEY", "]", ":", "self", ".", "extra_links", ".", "append", "(", "self", ".", "validate_extra_link", "(", "extra_link", ")", ")" ]
load config files
[ "load", "config", "files" ]
python
valid
googleapis/dialogflow-python-client-v2
dialogflow_v2/gapic/session_entity_types_client.py
https://github.com/googleapis/dialogflow-python-client-v2/blob/8c9c8709222efe427b76c9c8fcc04a0c4a0760b5/dialogflow_v2/gapic/session_entity_types_client.py#L351-L415
def create_session_entity_type( self, parent, session_entity_type, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None): """ Creates a session entity type. Example: >>> import dialogflow_v2 >>> >>> client = dialogflow_v2.SessionEntityTypesClient() >>> >>> parent = client.session_path('[PROJECT]', '[SESSION]') >>> >>> # TODO: Initialize ``session_entity_type``: >>> session_entity_type = {} >>> >>> response = client.create_session_entity_type(parent, session_entity_type) Args: parent (str): Required. The session to create a session entity type for. Format: ``projects/<Project ID>/agent/sessions/<Session ID>``. session_entity_type (Union[dict, ~google.cloud.dialogflow_v2.types.SessionEntityType]): Required. The session entity type to create. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.dialogflow_v2.types.SessionEntityType` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.dialogflow_v2.types.SessionEntityType` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if 'create_session_entity_type' not in self._inner_api_calls: self._inner_api_calls[ 'create_session_entity_type'] = google.api_core.gapic_v1.method.wrap_method( self.transport.create_session_entity_type, default_retry=self._method_configs[ 'CreateSessionEntityType'].retry, default_timeout=self._method_configs[ 'CreateSessionEntityType'].timeout, client_info=self._client_info, ) request = session_entity_type_pb2.CreateSessionEntityTypeRequest( parent=parent, session_entity_type=session_entity_type, ) return self._inner_api_calls['create_session_entity_type']( request, retry=retry, timeout=timeout, metadata=metadata)
[ "def", "create_session_entity_type", "(", "self", ",", "parent", ",", "session_entity_type", ",", "retry", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "method", ".", "DEFAULT", ",", "timeout", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "method", ".", "DEFAULT", ",", "metadata", "=", "None", ")", ":", "# Wrap the transport method to add retry and timeout logic.", "if", "'create_session_entity_type'", "not", "in", "self", ".", "_inner_api_calls", ":", "self", ".", "_inner_api_calls", "[", "'create_session_entity_type'", "]", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "method", ".", "wrap_method", "(", "self", ".", "transport", ".", "create_session_entity_type", ",", "default_retry", "=", "self", ".", "_method_configs", "[", "'CreateSessionEntityType'", "]", ".", "retry", ",", "default_timeout", "=", "self", ".", "_method_configs", "[", "'CreateSessionEntityType'", "]", ".", "timeout", ",", "client_info", "=", "self", ".", "_client_info", ",", ")", "request", "=", "session_entity_type_pb2", ".", "CreateSessionEntityTypeRequest", "(", "parent", "=", "parent", ",", "session_entity_type", "=", "session_entity_type", ",", ")", "return", "self", ".", "_inner_api_calls", "[", "'create_session_entity_type'", "]", "(", "request", ",", "retry", "=", "retry", ",", "timeout", "=", "timeout", ",", "metadata", "=", "metadata", ")" ]
Creates a session entity type. Example: >>> import dialogflow_v2 >>> >>> client = dialogflow_v2.SessionEntityTypesClient() >>> >>> parent = client.session_path('[PROJECT]', '[SESSION]') >>> >>> # TODO: Initialize ``session_entity_type``: >>> session_entity_type = {} >>> >>> response = client.create_session_entity_type(parent, session_entity_type) Args: parent (str): Required. The session to create a session entity type for. Format: ``projects/<Project ID>/agent/sessions/<Session ID>``. session_entity_type (Union[dict, ~google.cloud.dialogflow_v2.types.SessionEntityType]): Required. The session entity type to create. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.dialogflow_v2.types.SessionEntityType` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.dialogflow_v2.types.SessionEntityType` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid.
[ "Creates", "a", "session", "entity", "type", "." ]
python
train
bfrog/whizzer
whizzer/rpc/dispatch.py
https://github.com/bfrog/whizzer/blob/a1e43084b3ac8c1f3fb4ada081777cdbf791fd77/whizzer/rpc/dispatch.py#L30-L41
def call(self, function, args=(), kwargs={}): """Call a method given some args and kwargs. function -- string containing the method name to call args -- arguments, either a list or tuple returns the result of the method. May raise an exception if the method isn't in the dict. """ return self.functions[function](*args, **kwargs)
[ "def", "call", "(", "self", ",", "function", ",", "args", "=", "(", ")", ",", "kwargs", "=", "{", "}", ")", ":", "return", "self", ".", "functions", "[", "function", "]", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Call a method given some args and kwargs. function -- string containing the method name to call args -- arguments, either a list or tuple returns the result of the method. May raise an exception if the method isn't in the dict.
[ "Call", "a", "method", "given", "some", "args", "and", "kwargs", "." ]
python
train
manns/pyspread
pyspread/src/interfaces/xls.py
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/interfaces/xls.py#L557-L602
def _cell_attribute_append(self, selection, tab, attributes): """Appends to cell_attributes with checks""" cell_attributes = self.code_array.cell_attributes thick_bottom_cells = [] thick_right_cells = [] # Does any cell in selection.cells have a larger bottom border? if "borderwidth_bottom" in attributes: bwidth = attributes["borderwidth_bottom"] for row, col in selection.cells: __bwidth = cell_attributes[row, col, tab]["borderwidth_bottom"] if __bwidth > bwidth: thick_bottom_cells.append((row, col)) # Does any cell in selection.cells have a larger right border? if "borderwidth_right" in attributes: rwidth = attributes["borderwidth_right"] for row, col in selection.cells: __rwidth = cell_attributes[row, col, tab]["borderwidth_right"] if __rwidth > rwidth: thick_right_cells.append((row, col)) for thick_cell in thick_bottom_cells + thick_right_cells: try: selection.cells.remove(thick_cell) except ValueError: pass cell_attributes.append((selection, tab, attributes)) if thick_bottom_cells: bsel = copy(selection) bsel.cells = thick_bottom_cells battrs = copy(attributes) battrs.pop("borderwidth_bottom") cell_attributes.append((bsel, tab, battrs)) if thick_right_cells: rsel = copy(selection) rsel.cells = thick_right_cells rattrs = copy(attributes) rattrs.pop("borderwidth_right") cell_attributes.append((rsel, tab, rattrs))
[ "def", "_cell_attribute_append", "(", "self", ",", "selection", ",", "tab", ",", "attributes", ")", ":", "cell_attributes", "=", "self", ".", "code_array", ".", "cell_attributes", "thick_bottom_cells", "=", "[", "]", "thick_right_cells", "=", "[", "]", "# Does any cell in selection.cells have a larger bottom border?", "if", "\"borderwidth_bottom\"", "in", "attributes", ":", "bwidth", "=", "attributes", "[", "\"borderwidth_bottom\"", "]", "for", "row", ",", "col", "in", "selection", ".", "cells", ":", "__bwidth", "=", "cell_attributes", "[", "row", ",", "col", ",", "tab", "]", "[", "\"borderwidth_bottom\"", "]", "if", "__bwidth", ">", "bwidth", ":", "thick_bottom_cells", ".", "append", "(", "(", "row", ",", "col", ")", ")", "# Does any cell in selection.cells have a larger right border?", "if", "\"borderwidth_right\"", "in", "attributes", ":", "rwidth", "=", "attributes", "[", "\"borderwidth_right\"", "]", "for", "row", ",", "col", "in", "selection", ".", "cells", ":", "__rwidth", "=", "cell_attributes", "[", "row", ",", "col", ",", "tab", "]", "[", "\"borderwidth_right\"", "]", "if", "__rwidth", ">", "rwidth", ":", "thick_right_cells", ".", "append", "(", "(", "row", ",", "col", ")", ")", "for", "thick_cell", "in", "thick_bottom_cells", "+", "thick_right_cells", ":", "try", ":", "selection", ".", "cells", ".", "remove", "(", "thick_cell", ")", "except", "ValueError", ":", "pass", "cell_attributes", ".", "append", "(", "(", "selection", ",", "tab", ",", "attributes", ")", ")", "if", "thick_bottom_cells", ":", "bsel", "=", "copy", "(", "selection", ")", "bsel", ".", "cells", "=", "thick_bottom_cells", "battrs", "=", "copy", "(", "attributes", ")", "battrs", ".", "pop", "(", "\"borderwidth_bottom\"", ")", "cell_attributes", ".", "append", "(", "(", "bsel", ",", "tab", ",", "battrs", ")", ")", "if", "thick_right_cells", ":", "rsel", "=", "copy", "(", "selection", ")", "rsel", ".", "cells", "=", "thick_right_cells", "rattrs", "=", "copy", "(", "attributes", ")", "rattrs", ".", "pop", "(", "\"borderwidth_right\"", ")", "cell_attributes", ".", "append", "(", "(", "rsel", ",", "tab", ",", "rattrs", ")", ")" ]
Appends to cell_attributes with checks
[ "Appends", "to", "cell_attributes", "with", "checks" ]
python
train
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_1/task_agent/task_agent_client.py
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_1/task_agent/task_agent_client.py#L509-L530
def get_agent_queues(self, project=None, queue_name=None, action_filter=None): """GetAgentQueues. [Preview API] Get a list of agent queues. :param str project: Project ID or project name :param str queue_name: Filter on the agent queue name :param str action_filter: Filter by whether the calling user has use or manage permissions :rtype: [TaskAgentQueue] """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') query_parameters = {} if queue_name is not None: query_parameters['queueName'] = self._serialize.query('queue_name', queue_name, 'str') if action_filter is not None: query_parameters['actionFilter'] = self._serialize.query('action_filter', action_filter, 'str') response = self._send(http_method='GET', location_id='900fa995-c559-4923-aae7-f8424fe4fbea', version='5.1-preview.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[TaskAgentQueue]', self._unwrap_collection(response))
[ "def", "get_agent_queues", "(", "self", ",", "project", "=", "None", ",", "queue_name", "=", "None", ",", "action_filter", "=", "None", ")", ":", "route_values", "=", "{", "}", "if", "project", "is", "not", "None", ":", "route_values", "[", "'project'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'project'", ",", "project", ",", "'str'", ")", "query_parameters", "=", "{", "}", "if", "queue_name", "is", "not", "None", ":", "query_parameters", "[", "'queueName'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'queue_name'", ",", "queue_name", ",", "'str'", ")", "if", "action_filter", "is", "not", "None", ":", "query_parameters", "[", "'actionFilter'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'action_filter'", ",", "action_filter", ",", "'str'", ")", "response", "=", "self", ".", "_send", "(", "http_method", "=", "'GET'", ",", "location_id", "=", "'900fa995-c559-4923-aae7-f8424fe4fbea'", ",", "version", "=", "'5.1-preview.1'", ",", "route_values", "=", "route_values", ",", "query_parameters", "=", "query_parameters", ")", "return", "self", ".", "_deserialize", "(", "'[TaskAgentQueue]'", ",", "self", ".", "_unwrap_collection", "(", "response", ")", ")" ]
GetAgentQueues. [Preview API] Get a list of agent queues. :param str project: Project ID or project name :param str queue_name: Filter on the agent queue name :param str action_filter: Filter by whether the calling user has use or manage permissions :rtype: [TaskAgentQueue]
[ "GetAgentQueues", ".", "[", "Preview", "API", "]", "Get", "a", "list", "of", "agent", "queues", ".", ":", "param", "str", "project", ":", "Project", "ID", "or", "project", "name", ":", "param", "str", "queue_name", ":", "Filter", "on", "the", "agent", "queue", "name", ":", "param", "str", "action_filter", ":", "Filter", "by", "whether", "the", "calling", "user", "has", "use", "or", "manage", "permissions", ":", "rtype", ":", "[", "TaskAgentQueue", "]" ]
python
train
tensorflow/tensor2tensor
tensor2tensor/layers/modalities.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/modalities.py#L751-L765
def real_log_poisson_loss(top_out, targets, model_hparams, vocab_size, weights_fn): """Poisson loss for real.""" del model_hparams, vocab_size # unused arg predictions = top_out if (len(common_layers.shape_list(top_out)) != len( common_layers.shape_list(targets))): predictions = tf.squeeze(top_out, axis=[-1]) with tf.name_scope("log_possion"): weights = weights_fn(targets) lp_loss = tf.nn.log_poisson_loss(targets, predictions) return tf.reduce_sum(lp_loss * weights), tf.reduce_sum(weights)
[ "def", "real_log_poisson_loss", "(", "top_out", ",", "targets", ",", "model_hparams", ",", "vocab_size", ",", "weights_fn", ")", ":", "del", "model_hparams", ",", "vocab_size", "# unused arg", "predictions", "=", "top_out", "if", "(", "len", "(", "common_layers", ".", "shape_list", "(", "top_out", ")", ")", "!=", "len", "(", "common_layers", ".", "shape_list", "(", "targets", ")", ")", ")", ":", "predictions", "=", "tf", ".", "squeeze", "(", "top_out", ",", "axis", "=", "[", "-", "1", "]", ")", "with", "tf", ".", "name_scope", "(", "\"log_possion\"", ")", ":", "weights", "=", "weights_fn", "(", "targets", ")", "lp_loss", "=", "tf", ".", "nn", ".", "log_poisson_loss", "(", "targets", ",", "predictions", ")", "return", "tf", ".", "reduce_sum", "(", "lp_loss", "*", "weights", ")", ",", "tf", ".", "reduce_sum", "(", "weights", ")" ]
Poisson loss for real.
[ "Poisson", "loss", "for", "real", "." ]
python
train
lawsie/guizero
guizero/Combo.py
https://github.com/lawsie/guizero/blob/84c7f0b314fa86f9fc88eb11c9a0f6c4b57155e2/guizero/Combo.py#L247-L253
def clear(self): """ Clears all the options in a Combo """ self._options = [] self._combo_menu.tk.delete(0, END) self._selected.set("")
[ "def", "clear", "(", "self", ")", ":", "self", ".", "_options", "=", "[", "]", "self", ".", "_combo_menu", ".", "tk", ".", "delete", "(", "0", ",", "END", ")", "self", ".", "_selected", ".", "set", "(", "\"\"", ")" ]
Clears all the options in a Combo
[ "Clears", "all", "the", "options", "in", "a", "Combo" ]
python
train
mdavidsaver/p4p
src/p4p/client/thread.py
https://github.com/mdavidsaver/p4p/blob/c5e45eac01edfdad9cc2857bc283c7f2695802b8/src/p4p/client/thread.py#L65-L73
def close(self): """Close subscription. """ if self._S is not None: # after .close() self._event should never be called self._S.close() # wait for Cancelled to be delivered self._evt.wait() self._S = None
[ "def", "close", "(", "self", ")", ":", "if", "self", ".", "_S", "is", "not", "None", ":", "# after .close() self._event should never be called", "self", ".", "_S", ".", "close", "(", ")", "# wait for Cancelled to be delivered", "self", ".", "_evt", ".", "wait", "(", ")", "self", ".", "_S", "=", "None" ]
Close subscription.
[ "Close", "subscription", "." ]
python
train
qubell/contrib-python-qubell-client
qubell/__init__.py
https://github.com/qubell/contrib-python-qubell-client/blob/4586ea11d5103c2ff9607d3ed922b5a0991b8845/qubell/__init__.py#L47-L61
def deprecated(func, msg=None): """ A decorator which can be used to mark functions as deprecated.It will result in a deprecation warning being shown when the function is used. """ message = msg or "Use of deprecated function '{}`.".format(func.__name__) @functools.wraps(func) def wrapper_func(*args, **kwargs): warnings.warn(message, DeprecationWarning, stacklevel=2) return func(*args, **kwargs) return wrapper_func
[ "def", "deprecated", "(", "func", ",", "msg", "=", "None", ")", ":", "message", "=", "msg", "or", "\"Use of deprecated function '{}`.\"", ".", "format", "(", "func", ".", "__name__", ")", "@", "functools", ".", "wraps", "(", "func", ")", "def", "wrapper_func", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "warnings", ".", "warn", "(", "message", ",", "DeprecationWarning", ",", "stacklevel", "=", "2", ")", "return", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapper_func" ]
A decorator which can be used to mark functions as deprecated.It will result in a deprecation warning being shown when the function is used.
[ "A", "decorator", "which", "can", "be", "used", "to", "mark", "functions", "as", "deprecated", ".", "It", "will", "result", "in", "a", "deprecation", "warning", "being", "shown", "when", "the", "function", "is", "used", "." ]
python
train
wbond/oscrypto
oscrypto/_osx/_security.py
https://github.com/wbond/oscrypto/blob/af778bf1c88bf6c4a7342f5353b130686a5bbe1c/oscrypto/_osx/_security.py#L23-L56
def handle_sec_error(error, exception_class=None): """ Checks a Security OSStatus error code and throws an exception if there is an error to report :param error: An OSStatus :param exception_class: The exception class to use for the exception if an error occurred :raises: OSError - when the OSStatus contains an error """ if error == 0: return if error in set([SecurityConst.errSSLClosedNoNotify, SecurityConst.errSSLClosedAbort]): raise TLSDisconnectError('The remote end closed the connection') if error == SecurityConst.errSSLClosedGraceful: raise TLSGracefulDisconnectError('The remote end closed the connection') cf_error_string = Security.SecCopyErrorMessageString(error, null()) output = CFHelpers.cf_string_to_unicode(cf_error_string) CoreFoundation.CFRelease(cf_error_string) if output is None or output == '': output = 'OSStatus %s' % error if exception_class is None: exception_class = OSError raise exception_class(output)
[ "def", "handle_sec_error", "(", "error", ",", "exception_class", "=", "None", ")", ":", "if", "error", "==", "0", ":", "return", "if", "error", "in", "set", "(", "[", "SecurityConst", ".", "errSSLClosedNoNotify", ",", "SecurityConst", ".", "errSSLClosedAbort", "]", ")", ":", "raise", "TLSDisconnectError", "(", "'The remote end closed the connection'", ")", "if", "error", "==", "SecurityConst", ".", "errSSLClosedGraceful", ":", "raise", "TLSGracefulDisconnectError", "(", "'The remote end closed the connection'", ")", "cf_error_string", "=", "Security", ".", "SecCopyErrorMessageString", "(", "error", ",", "null", "(", ")", ")", "output", "=", "CFHelpers", ".", "cf_string_to_unicode", "(", "cf_error_string", ")", "CoreFoundation", ".", "CFRelease", "(", "cf_error_string", ")", "if", "output", "is", "None", "or", "output", "==", "''", ":", "output", "=", "'OSStatus %s'", "%", "error", "if", "exception_class", "is", "None", ":", "exception_class", "=", "OSError", "raise", "exception_class", "(", "output", ")" ]
Checks a Security OSStatus error code and throws an exception if there is an error to report :param error: An OSStatus :param exception_class: The exception class to use for the exception if an error occurred :raises: OSError - when the OSStatus contains an error
[ "Checks", "a", "Security", "OSStatus", "error", "code", "and", "throws", "an", "exception", "if", "there", "is", "an", "error", "to", "report" ]
python
valid
pandas-dev/pandas
pandas/core/internals/blocks.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/blocks.py#L2109-L2145
def _try_coerce_args(self, values, other): """ Coerce values and other to dtype 'i8'. NaN and NaT convert to the smallest i8, and will correctly round-trip to NaT if converted back in _try_coerce_result. values is always ndarray-like, other may not be Parameters ---------- values : ndarray-like other : ndarray-like or scalar Returns ------- base-type values, base-type other """ values = values.view('i8') if isinstance(other, bool): raise TypeError elif is_null_datetimelike(other): other = tslibs.iNaT elif isinstance(other, (datetime, np.datetime64, date)): other = self._box_func(other) if getattr(other, 'tz') is not None: raise TypeError("cannot coerce a Timestamp with a tz on a " "naive Block") other = other.asm8.view('i8') elif hasattr(other, 'dtype') and is_datetime64_dtype(other): other = other.astype('i8', copy=False).view('i8') else: # coercion issues # let higher levels handle raise TypeError(other) return values, other
[ "def", "_try_coerce_args", "(", "self", ",", "values", ",", "other", ")", ":", "values", "=", "values", ".", "view", "(", "'i8'", ")", "if", "isinstance", "(", "other", ",", "bool", ")", ":", "raise", "TypeError", "elif", "is_null_datetimelike", "(", "other", ")", ":", "other", "=", "tslibs", ".", "iNaT", "elif", "isinstance", "(", "other", ",", "(", "datetime", ",", "np", ".", "datetime64", ",", "date", ")", ")", ":", "other", "=", "self", ".", "_box_func", "(", "other", ")", "if", "getattr", "(", "other", ",", "'tz'", ")", "is", "not", "None", ":", "raise", "TypeError", "(", "\"cannot coerce a Timestamp with a tz on a \"", "\"naive Block\"", ")", "other", "=", "other", ".", "asm8", ".", "view", "(", "'i8'", ")", "elif", "hasattr", "(", "other", ",", "'dtype'", ")", "and", "is_datetime64_dtype", "(", "other", ")", ":", "other", "=", "other", ".", "astype", "(", "'i8'", ",", "copy", "=", "False", ")", ".", "view", "(", "'i8'", ")", "else", ":", "# coercion issues", "# let higher levels handle", "raise", "TypeError", "(", "other", ")", "return", "values", ",", "other" ]
Coerce values and other to dtype 'i8'. NaN and NaT convert to the smallest i8, and will correctly round-trip to NaT if converted back in _try_coerce_result. values is always ndarray-like, other may not be Parameters ---------- values : ndarray-like other : ndarray-like or scalar Returns ------- base-type values, base-type other
[ "Coerce", "values", "and", "other", "to", "dtype", "i8", ".", "NaN", "and", "NaT", "convert", "to", "the", "smallest", "i8", "and", "will", "correctly", "round", "-", "trip", "to", "NaT", "if", "converted", "back", "in", "_try_coerce_result", ".", "values", "is", "always", "ndarray", "-", "like", "other", "may", "not", "be" ]
python
train
iotile/coretools
iotilebuild/iotile/build/config/site_scons/release.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/site_scons/release.py#L99-L125
def copy_extra_files(tile): """Copy all files listed in a copy_files and copy_products section. Files listed in copy_files will be copied from the specified location in the current component to the specified path under the output folder. Files listed in copy_products will be looked up with a ProductResolver and copied copied to the specified path in the output folder. There is not currently a way to specify what type of product is being resolved. The `short_name` given must be unique across all products from this component and its direct dependencies. """ env = Environment(tools=[]) outputbase = os.path.join('build', 'output') for src, dest in tile.settings.get('copy_files', {}).items(): outputfile = os.path.join(outputbase, dest) env.Command([outputfile], [src], Copy("$TARGET", "$SOURCE")) resolver = ProductResolver.Create() for src, dest in tile.settings.get('copy_products', {}).items(): prod = resolver.find_unique(None, src) outputfile = os.path.join(outputbase, dest) env.Command([outputfile], [prod.full_path], Copy("$TARGET", "$SOURCE"))
[ "def", "copy_extra_files", "(", "tile", ")", ":", "env", "=", "Environment", "(", "tools", "=", "[", "]", ")", "outputbase", "=", "os", ".", "path", ".", "join", "(", "'build'", ",", "'output'", ")", "for", "src", ",", "dest", "in", "tile", ".", "settings", ".", "get", "(", "'copy_files'", ",", "{", "}", ")", ".", "items", "(", ")", ":", "outputfile", "=", "os", ".", "path", ".", "join", "(", "outputbase", ",", "dest", ")", "env", ".", "Command", "(", "[", "outputfile", "]", ",", "[", "src", "]", ",", "Copy", "(", "\"$TARGET\"", ",", "\"$SOURCE\"", ")", ")", "resolver", "=", "ProductResolver", ".", "Create", "(", ")", "for", "src", ",", "dest", "in", "tile", ".", "settings", ".", "get", "(", "'copy_products'", ",", "{", "}", ")", ".", "items", "(", ")", ":", "prod", "=", "resolver", ".", "find_unique", "(", "None", ",", "src", ")", "outputfile", "=", "os", ".", "path", ".", "join", "(", "outputbase", ",", "dest", ")", "env", ".", "Command", "(", "[", "outputfile", "]", ",", "[", "prod", ".", "full_path", "]", ",", "Copy", "(", "\"$TARGET\"", ",", "\"$SOURCE\"", ")", ")" ]
Copy all files listed in a copy_files and copy_products section. Files listed in copy_files will be copied from the specified location in the current component to the specified path under the output folder. Files listed in copy_products will be looked up with a ProductResolver and copied copied to the specified path in the output folder. There is not currently a way to specify what type of product is being resolved. The `short_name` given must be unique across all products from this component and its direct dependencies.
[ "Copy", "all", "files", "listed", "in", "a", "copy_files", "and", "copy_products", "section", "." ]
python
train
quantumlib/Cirq
cirq/protocols/mixture.py
https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/cirq/protocols/mixture.py#L100-L115
def has_mixture(val: Any) -> bool: """Returns whether the value has a mixture representation. Returns: If `val` has a `_has_mixture_` method and its result is not NotImplemented, that result is returned. Otherwise, if the value has a `_mixture_` method return True if that has a non-default value. Returns False if neither function exists. """ getter = getattr(val, '_has_mixture_', None) result = NotImplemented if getter is None else getter() if result is not NotImplemented: return result # No _has_mixture_ function, use _mixture_ instead return mixture(val, None) is not None
[ "def", "has_mixture", "(", "val", ":", "Any", ")", "->", "bool", ":", "getter", "=", "getattr", "(", "val", ",", "'_has_mixture_'", ",", "None", ")", "result", "=", "NotImplemented", "if", "getter", "is", "None", "else", "getter", "(", ")", "if", "result", "is", "not", "NotImplemented", ":", "return", "result", "# No _has_mixture_ function, use _mixture_ instead", "return", "mixture", "(", "val", ",", "None", ")", "is", "not", "None" ]
Returns whether the value has a mixture representation. Returns: If `val` has a `_has_mixture_` method and its result is not NotImplemented, that result is returned. Otherwise, if the value has a `_mixture_` method return True if that has a non-default value. Returns False if neither function exists.
[ "Returns", "whether", "the", "value", "has", "a", "mixture", "representation", "." ]
python
train
mortada/fredapi
fredapi/fred.py
https://github.com/mortada/fredapi/blob/d3ca79efccb9525f2752a0d6da90e793e87c3fd8/fredapi/fred.py#L307-L349
def __get_search_results(self, url, limit, order_by, sort_order, filter): """ helper function for getting search results up to specified limit on the number of results. The Fred HTTP API truncates to 1000 results per request, so this may issue multiple HTTP requests to obtain more available data. """ order_by_options = ['search_rank', 'series_id', 'title', 'units', 'frequency', 'seasonal_adjustment', 'realtime_start', 'realtime_end', 'last_updated', 'observation_start', 'observation_end', 'popularity'] if order_by is not None: if order_by in order_by_options: url = url + '&order_by=' + order_by else: raise ValueError('%s is not in the valid list of order_by options: %s' % (order_by, str(order_by_options))) if filter is not None: if len(filter) == 2: url = url + '&filter_variable=%s&filter_value=%s' % (filter[0], filter[1]) else: raise ValueError('Filter should be a 2 item tuple like (filter_variable, filter_value)') sort_order_options = ['asc', 'desc'] if sort_order is not None: if sort_order in sort_order_options: url = url + '&sort_order=' + sort_order else: raise ValueError('%s is not in the valid list of sort_order options: %s' % (sort_order, str(sort_order_options))) data, num_results_total = self.__do_series_search(url) if data is None: return data if limit == 0: max_results_needed = num_results_total else: max_results_needed = limit if max_results_needed > self.max_results_per_request: for i in range(1, max_results_needed // self.max_results_per_request + 1): offset = i * self.max_results_per_request next_data, _ = self.__do_series_search(url + '&offset=' + str(offset)) data = data.append(next_data) return data.head(max_results_needed)
[ "def", "__get_search_results", "(", "self", ",", "url", ",", "limit", ",", "order_by", ",", "sort_order", ",", "filter", ")", ":", "order_by_options", "=", "[", "'search_rank'", ",", "'series_id'", ",", "'title'", ",", "'units'", ",", "'frequency'", ",", "'seasonal_adjustment'", ",", "'realtime_start'", ",", "'realtime_end'", ",", "'last_updated'", ",", "'observation_start'", ",", "'observation_end'", ",", "'popularity'", "]", "if", "order_by", "is", "not", "None", ":", "if", "order_by", "in", "order_by_options", ":", "url", "=", "url", "+", "'&order_by='", "+", "order_by", "else", ":", "raise", "ValueError", "(", "'%s is not in the valid list of order_by options: %s'", "%", "(", "order_by", ",", "str", "(", "order_by_options", ")", ")", ")", "if", "filter", "is", "not", "None", ":", "if", "len", "(", "filter", ")", "==", "2", ":", "url", "=", "url", "+", "'&filter_variable=%s&filter_value=%s'", "%", "(", "filter", "[", "0", "]", ",", "filter", "[", "1", "]", ")", "else", ":", "raise", "ValueError", "(", "'Filter should be a 2 item tuple like (filter_variable, filter_value)'", ")", "sort_order_options", "=", "[", "'asc'", ",", "'desc'", "]", "if", "sort_order", "is", "not", "None", ":", "if", "sort_order", "in", "sort_order_options", ":", "url", "=", "url", "+", "'&sort_order='", "+", "sort_order", "else", ":", "raise", "ValueError", "(", "'%s is not in the valid list of sort_order options: %s'", "%", "(", "sort_order", ",", "str", "(", "sort_order_options", ")", ")", ")", "data", ",", "num_results_total", "=", "self", ".", "__do_series_search", "(", "url", ")", "if", "data", "is", "None", ":", "return", "data", "if", "limit", "==", "0", ":", "max_results_needed", "=", "num_results_total", "else", ":", "max_results_needed", "=", "limit", "if", "max_results_needed", ">", "self", ".", "max_results_per_request", ":", "for", "i", "in", "range", "(", "1", ",", "max_results_needed", "//", "self", ".", "max_results_per_request", "+", "1", ")", ":", "offset", "=", "i", "*", "self", ".", "max_results_per_request", "next_data", ",", "_", "=", "self", ".", "__do_series_search", "(", "url", "+", "'&offset='", "+", "str", "(", "offset", ")", ")", "data", "=", "data", ".", "append", "(", "next_data", ")", "return", "data", ".", "head", "(", "max_results_needed", ")" ]
helper function for getting search results up to specified limit on the number of results. The Fred HTTP API truncates to 1000 results per request, so this may issue multiple HTTP requests to obtain more available data.
[ "helper", "function", "for", "getting", "search", "results", "up", "to", "specified", "limit", "on", "the", "number", "of", "results", ".", "The", "Fred", "HTTP", "API", "truncates", "to", "1000", "results", "per", "request", "so", "this", "may", "issue", "multiple", "HTTP", "requests", "to", "obtain", "more", "available", "data", "." ]
python
train
incf-nidash/nidmresults
nidmresults/objects/inference.py
https://github.com/incf-nidash/nidmresults/blob/438f7cce6abc4a4379b629bd76f4d427891e033f/nidmresults/objects/inference.py#L189-L223
def export(self, nidm_version, export_dir): """ Create prov entities and activities. """ # Create "Excursion set" entity self.add_attributes(( (PROV['type'], self.type), (NIDM_IN_COORDINATE_SPACE, self.coord_space.id), (PROV['label'], self.label), )) if self.visu is not None: self.add_attributes(( (DC['description'], self.visu.id), )) if self.clust_map is not None: self.add_attributes(( (NIDM_HAS_CLUSTER_LABELS_MAP, self.clust_map.id), )) if self.mip is not None: self.add_attributes(( (NIDM_HAS_MAXIMUM_INTENSITY_PROJECTION, self.mip.id), )) if self.num_clusters is not None: self.add_attributes(( (NIDM_NUMBER_OF_CLUSTERS, self.num_clusters), )) if self.p_value is not None: self.add_attributes(( (NIDM_P_VALUE, self.p_value), ))
[ "def", "export", "(", "self", ",", "nidm_version", ",", "export_dir", ")", ":", "# Create \"Excursion set\" entity", "self", ".", "add_attributes", "(", "(", "(", "PROV", "[", "'type'", "]", ",", "self", ".", "type", ")", ",", "(", "NIDM_IN_COORDINATE_SPACE", ",", "self", ".", "coord_space", ".", "id", ")", ",", "(", "PROV", "[", "'label'", "]", ",", "self", ".", "label", ")", ",", ")", ")", "if", "self", ".", "visu", "is", "not", "None", ":", "self", ".", "add_attributes", "(", "(", "(", "DC", "[", "'description'", "]", ",", "self", ".", "visu", ".", "id", ")", ",", ")", ")", "if", "self", ".", "clust_map", "is", "not", "None", ":", "self", ".", "add_attributes", "(", "(", "(", "NIDM_HAS_CLUSTER_LABELS_MAP", ",", "self", ".", "clust_map", ".", "id", ")", ",", ")", ")", "if", "self", ".", "mip", "is", "not", "None", ":", "self", ".", "add_attributes", "(", "(", "(", "NIDM_HAS_MAXIMUM_INTENSITY_PROJECTION", ",", "self", ".", "mip", ".", "id", ")", ",", ")", ")", "if", "self", ".", "num_clusters", "is", "not", "None", ":", "self", ".", "add_attributes", "(", "(", "(", "NIDM_NUMBER_OF_CLUSTERS", ",", "self", ".", "num_clusters", ")", ",", ")", ")", "if", "self", ".", "p_value", "is", "not", "None", ":", "self", ".", "add_attributes", "(", "(", "(", "NIDM_P_VALUE", ",", "self", ".", "p_value", ")", ",", ")", ")" ]
Create prov entities and activities.
[ "Create", "prov", "entities", "and", "activities", "." ]
python
train
kensho-technologies/graphql-compiler
graphql_compiler/compiler/filters.py
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/compiler/filters.py#L17-L37
def scalar_leaf_only(operator): """Ensure the filter function is only applied to scalar leaf types.""" def decorator(f): """Decorate the supplied function with the "scalar_leaf_only" logic.""" @wraps(f) def wrapper(filter_operation_info, context, parameters, *args, **kwargs): """Check that the type on which the operator operates is a scalar leaf type.""" if 'operator' in kwargs: current_operator = kwargs['operator'] else: # Because "operator" is from an enclosing scope, it is immutable in Python 2.x. current_operator = operator if not is_leaf_type(filter_operation_info.field_type): raise GraphQLCompilationError(u'Cannot apply "{}" filter to non-leaf type' u'{}'.format(current_operator, filter_operation_info)) return f(filter_operation_info, context, parameters, *args, **kwargs) return wrapper return decorator
[ "def", "scalar_leaf_only", "(", "operator", ")", ":", "def", "decorator", "(", "f", ")", ":", "\"\"\"Decorate the supplied function with the \"scalar_leaf_only\" logic.\"\"\"", "@", "wraps", "(", "f", ")", "def", "wrapper", "(", "filter_operation_info", ",", "context", ",", "parameters", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"Check that the type on which the operator operates is a scalar leaf type.\"\"\"", "if", "'operator'", "in", "kwargs", ":", "current_operator", "=", "kwargs", "[", "'operator'", "]", "else", ":", "# Because \"operator\" is from an enclosing scope, it is immutable in Python 2.x.", "current_operator", "=", "operator", "if", "not", "is_leaf_type", "(", "filter_operation_info", ".", "field_type", ")", ":", "raise", "GraphQLCompilationError", "(", "u'Cannot apply \"{}\" filter to non-leaf type'", "u'{}'", ".", "format", "(", "current_operator", ",", "filter_operation_info", ")", ")", "return", "f", "(", "filter_operation_info", ",", "context", ",", "parameters", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapper", "return", "decorator" ]
Ensure the filter function is only applied to scalar leaf types.
[ "Ensure", "the", "filter", "function", "is", "only", "applied", "to", "scalar", "leaf", "types", "." ]
python
train
uberVU/mongo-oplogreplay
oplogreplay/oplogwatcher.py
https://github.com/uberVU/mongo-oplogreplay/blob/c1998663f3ccb93c778a7fe5baaf94884251cdc2/oplogreplay/oplogwatcher.py#L38-L76
def start(self): """ Starts the OplogWatcher. """ oplog = self.connection.local['oplog.rs'] if self.ts is None: cursor = oplog.find().sort('$natural', -1) obj = cursor[0] if obj: self.ts = obj['ts'] else: # In case no oplogs are present. self.ts = None if self.ts: logging.info('Watching oplogs with timestamp > %s' % self.ts) else: logging.info('Watching all oplogs') while self.running: query = { 'ts': {'$gt': self.ts} } try: logging.debug('Tailing over %r...' % query) cursor = oplog.find(query, tailable=True) # OplogReplay flag greatly improves scanning for ts performance. cursor.add_option(pymongo.cursor._QUERY_OPTIONS['oplog_replay']) while self.running: for op in cursor: self.process_op(op['ns'], op) time.sleep(self.poll_time) if not cursor.alive: break except AutoReconnect, e: logging.warning(e) time.sleep(self.poll_time) except OperationFailure, e: logging.exception(e) time.sleep(self.poll_time)
[ "def", "start", "(", "self", ")", ":", "oplog", "=", "self", ".", "connection", ".", "local", "[", "'oplog.rs'", "]", "if", "self", ".", "ts", "is", "None", ":", "cursor", "=", "oplog", ".", "find", "(", ")", ".", "sort", "(", "'$natural'", ",", "-", "1", ")", "obj", "=", "cursor", "[", "0", "]", "if", "obj", ":", "self", ".", "ts", "=", "obj", "[", "'ts'", "]", "else", ":", "# In case no oplogs are present.", "self", ".", "ts", "=", "None", "if", "self", ".", "ts", ":", "logging", ".", "info", "(", "'Watching oplogs with timestamp > %s'", "%", "self", ".", "ts", ")", "else", ":", "logging", ".", "info", "(", "'Watching all oplogs'", ")", "while", "self", ".", "running", ":", "query", "=", "{", "'ts'", ":", "{", "'$gt'", ":", "self", ".", "ts", "}", "}", "try", ":", "logging", ".", "debug", "(", "'Tailing over %r...'", "%", "query", ")", "cursor", "=", "oplog", ".", "find", "(", "query", ",", "tailable", "=", "True", ")", "# OplogReplay flag greatly improves scanning for ts performance.", "cursor", ".", "add_option", "(", "pymongo", ".", "cursor", ".", "_QUERY_OPTIONS", "[", "'oplog_replay'", "]", ")", "while", "self", ".", "running", ":", "for", "op", "in", "cursor", ":", "self", ".", "process_op", "(", "op", "[", "'ns'", "]", ",", "op", ")", "time", ".", "sleep", "(", "self", ".", "poll_time", ")", "if", "not", "cursor", ".", "alive", ":", "break", "except", "AutoReconnect", ",", "e", ":", "logging", ".", "warning", "(", "e", ")", "time", ".", "sleep", "(", "self", ".", "poll_time", ")", "except", "OperationFailure", ",", "e", ":", "logging", ".", "exception", "(", "e", ")", "time", ".", "sleep", "(", "self", ".", "poll_time", ")" ]
Starts the OplogWatcher.
[ "Starts", "the", "OplogWatcher", "." ]
python
train
mokelly/wabbit_wappa
examples/capitalization_demo.py
https://github.com/mokelly/wabbit_wappa/blob/dfe5bf6d6036079e473c4148335cd6f339d0299b/examples/capitalization_demo.py#L21-L33
def get_example(): """Make an example for training and testing. Outputs a tuple (label, features) where label is +1 if capital letters are the majority, and -1 otherwise; and features is a list of letters. """ features = random.sample(string.ascii_letters, NUM_SAMPLES) num_capitalized = len([ letter for letter in features if letter in string.ascii_uppercase ]) num_lowercase = len([ letter for letter in features if letter in string.ascii_lowercase ]) if num_capitalized > num_lowercase: label = 1 else: label = -1 return (label, features)
[ "def", "get_example", "(", ")", ":", "features", "=", "random", ".", "sample", "(", "string", ".", "ascii_letters", ",", "NUM_SAMPLES", ")", "num_capitalized", "=", "len", "(", "[", "letter", "for", "letter", "in", "features", "if", "letter", "in", "string", ".", "ascii_uppercase", "]", ")", "num_lowercase", "=", "len", "(", "[", "letter", "for", "letter", "in", "features", "if", "letter", "in", "string", ".", "ascii_lowercase", "]", ")", "if", "num_capitalized", ">", "num_lowercase", ":", "label", "=", "1", "else", ":", "label", "=", "-", "1", "return", "(", "label", ",", "features", ")" ]
Make an example for training and testing. Outputs a tuple (label, features) where label is +1 if capital letters are the majority, and -1 otherwise; and features is a list of letters.
[ "Make", "an", "example", "for", "training", "and", "testing", ".", "Outputs", "a", "tuple", "(", "label", "features", ")", "where", "label", "is", "+", "1", "if", "capital", "letters", "are", "the", "majority", "and", "-", "1", "otherwise", ";", "and", "features", "is", "a", "list", "of", "letters", "." ]
python
train
h2oai/h2o-3
h2o-py/h2o/job.py
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/job.py#L45-L81
def poll(self, verbose_model_scoring_history = False): """ Wait until the job finishes. This method will continuously query the server about the status of the job, until the job reaches a completion. During this time we will display (in stdout) a progress bar with % completion status. """ try: hidden = not H2OJob.__PROGRESS_BAR__ pb = ProgressBar(title=self._job_type + " progress", hidden=hidden) if verbose_model_scoring_history: pb.execute(self._refresh_job_status, print_verbose_info=lambda x: self._print_verbose_info() if int(x * 10) % 5 == 0 else " ") else: pb.execute(self._refresh_job_status) except StopIteration as e: if str(e) == "cancelled": h2o.api("POST /3/Jobs/%s/cancel" % self.job_key) self.status = "CANCELLED" # Potentially we may want to re-raise the exception here assert self.status in {"DONE", "CANCELLED", "FAILED"} or self._poll_count <= 0, \ "Polling finished while the job has status %s" % self.status if self.warnings: for w in self.warnings: warnings.warn(w) # check if failed... and politely print relevant message if self.status == "CANCELLED": raise H2OJobCancelled("Job<%s> was cancelled by the user." % self.job_key) if self.status == "FAILED": if (isinstance(self.job, dict)) and ("stacktrace" in list(self.job)): raise EnvironmentError("Job with key {} failed with an exception: {}\nstacktrace: " "\n{}".format(self.job_key, self.exception, self.job["stacktrace"])) else: raise EnvironmentError("Job with key %s failed with an exception: %s" % (self.job_key, self.exception)) return self
[ "def", "poll", "(", "self", ",", "verbose_model_scoring_history", "=", "False", ")", ":", "try", ":", "hidden", "=", "not", "H2OJob", ".", "__PROGRESS_BAR__", "pb", "=", "ProgressBar", "(", "title", "=", "self", ".", "_job_type", "+", "\" progress\"", ",", "hidden", "=", "hidden", ")", "if", "verbose_model_scoring_history", ":", "pb", ".", "execute", "(", "self", ".", "_refresh_job_status", ",", "print_verbose_info", "=", "lambda", "x", ":", "self", ".", "_print_verbose_info", "(", ")", "if", "int", "(", "x", "*", "10", ")", "%", "5", "==", "0", "else", "\" \"", ")", "else", ":", "pb", ".", "execute", "(", "self", ".", "_refresh_job_status", ")", "except", "StopIteration", "as", "e", ":", "if", "str", "(", "e", ")", "==", "\"cancelled\"", ":", "h2o", ".", "api", "(", "\"POST /3/Jobs/%s/cancel\"", "%", "self", ".", "job_key", ")", "self", ".", "status", "=", "\"CANCELLED\"", "# Potentially we may want to re-raise the exception here", "assert", "self", ".", "status", "in", "{", "\"DONE\"", ",", "\"CANCELLED\"", ",", "\"FAILED\"", "}", "or", "self", ".", "_poll_count", "<=", "0", ",", "\"Polling finished while the job has status %s\"", "%", "self", ".", "status", "if", "self", ".", "warnings", ":", "for", "w", "in", "self", ".", "warnings", ":", "warnings", ".", "warn", "(", "w", ")", "# check if failed... and politely print relevant message", "if", "self", ".", "status", "==", "\"CANCELLED\"", ":", "raise", "H2OJobCancelled", "(", "\"Job<%s> was cancelled by the user.\"", "%", "self", ".", "job_key", ")", "if", "self", ".", "status", "==", "\"FAILED\"", ":", "if", "(", "isinstance", "(", "self", ".", "job", ",", "dict", ")", ")", "and", "(", "\"stacktrace\"", "in", "list", "(", "self", ".", "job", ")", ")", ":", "raise", "EnvironmentError", "(", "\"Job with key {} failed with an exception: {}\\nstacktrace: \"", "\"\\n{}\"", ".", "format", "(", "self", ".", "job_key", ",", "self", ".", "exception", ",", "self", ".", "job", "[", "\"stacktrace\"", "]", ")", ")", "else", ":", "raise", "EnvironmentError", "(", "\"Job with key %s failed with an exception: %s\"", "%", "(", "self", ".", "job_key", ",", "self", ".", "exception", ")", ")", "return", "self" ]
Wait until the job finishes. This method will continuously query the server about the status of the job, until the job reaches a completion. During this time we will display (in stdout) a progress bar with % completion status.
[ "Wait", "until", "the", "job", "finishes", "." ]
python
test
facelessuser/pyspelling
pyspelling/__init__.py
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/__init__.py#L532-L567
def setup_command(self, encoding, options, personal_dict, file_name=None): """Setup command.""" cmd = [ self.binary, '-l' ] if encoding: cmd.extend(['-i', encoding]) if personal_dict: cmd.extend(['-p', personal_dict]) allowed = { 'check-apostrophe', 'check-url', 'd', 'H', 'i', 'n', 'O', 'r', 't', 'X' } for k, v in options.items(): if k in allowed: key = ('-%s' if len(k) == 1 else '--%s') % k if isinstance(v, bool) and v is True: cmd.append(key) elif isinstance(v, str): cmd.extend([key, v]) elif isinstance(v, int): cmd.extend([key, str(v)]) elif isinstance(v, list): for value in v: cmd.extend([key, str(value)]) if file_name is not None: cmd.append(file_name) return cmd
[ "def", "setup_command", "(", "self", ",", "encoding", ",", "options", ",", "personal_dict", ",", "file_name", "=", "None", ")", ":", "cmd", "=", "[", "self", ".", "binary", ",", "'-l'", "]", "if", "encoding", ":", "cmd", ".", "extend", "(", "[", "'-i'", ",", "encoding", "]", ")", "if", "personal_dict", ":", "cmd", ".", "extend", "(", "[", "'-p'", ",", "personal_dict", "]", ")", "allowed", "=", "{", "'check-apostrophe'", ",", "'check-url'", ",", "'d'", ",", "'H'", ",", "'i'", ",", "'n'", ",", "'O'", ",", "'r'", ",", "'t'", ",", "'X'", "}", "for", "k", ",", "v", "in", "options", ".", "items", "(", ")", ":", "if", "k", "in", "allowed", ":", "key", "=", "(", "'-%s'", "if", "len", "(", "k", ")", "==", "1", "else", "'--%s'", ")", "%", "k", "if", "isinstance", "(", "v", ",", "bool", ")", "and", "v", "is", "True", ":", "cmd", ".", "append", "(", "key", ")", "elif", "isinstance", "(", "v", ",", "str", ")", ":", "cmd", ".", "extend", "(", "[", "key", ",", "v", "]", ")", "elif", "isinstance", "(", "v", ",", "int", ")", ":", "cmd", ".", "extend", "(", "[", "key", ",", "str", "(", "v", ")", "]", ")", "elif", "isinstance", "(", "v", ",", "list", ")", ":", "for", "value", "in", "v", ":", "cmd", ".", "extend", "(", "[", "key", ",", "str", "(", "value", ")", "]", ")", "if", "file_name", "is", "not", "None", ":", "cmd", ".", "append", "(", "file_name", ")", "return", "cmd" ]
Setup command.
[ "Setup", "command", "." ]
python
train
smarie/python-parsyfiles
parsyfiles/plugins_optional/support_for_pandas.py
https://github.com/smarie/python-parsyfiles/blob/344b37e1151e8d4e7c2ee49ae09d6568715ae64e/parsyfiles/plugins_optional/support_for_pandas.py#L82-L97
def get_default_pandas_parsers() -> List[AnyParser]: """ Utility method to return the default parsers able to parse a dictionary from a file. :return: """ return [SingleFileParserFunction(parser_function=read_dataframe_from_xls, streaming_mode=False, supported_exts={'.xls', '.xlsx', '.xlsm'}, supported_types={pd.DataFrame}, option_hints=pandas_parsers_option_hints_xls), SingleFileParserFunction(parser_function=read_df_or_series_from_csv, streaming_mode=False, supported_exts={'.csv', '.txt'}, supported_types={pd.DataFrame, pd.Series}, option_hints=pandas_parsers_option_hints_csv), ]
[ "def", "get_default_pandas_parsers", "(", ")", "->", "List", "[", "AnyParser", "]", ":", "return", "[", "SingleFileParserFunction", "(", "parser_function", "=", "read_dataframe_from_xls", ",", "streaming_mode", "=", "False", ",", "supported_exts", "=", "{", "'.xls'", ",", "'.xlsx'", ",", "'.xlsm'", "}", ",", "supported_types", "=", "{", "pd", ".", "DataFrame", "}", ",", "option_hints", "=", "pandas_parsers_option_hints_xls", ")", ",", "SingleFileParserFunction", "(", "parser_function", "=", "read_df_or_series_from_csv", ",", "streaming_mode", "=", "False", ",", "supported_exts", "=", "{", "'.csv'", ",", "'.txt'", "}", ",", "supported_types", "=", "{", "pd", ".", "DataFrame", ",", "pd", ".", "Series", "}", ",", "option_hints", "=", "pandas_parsers_option_hints_csv", ")", ",", "]" ]
Utility method to return the default parsers able to parse a dictionary from a file. :return:
[ "Utility", "method", "to", "return", "the", "default", "parsers", "able", "to", "parse", "a", "dictionary", "from", "a", "file", ".", ":", "return", ":" ]
python
train
pedrotgn/pyactor
pyactor/context.py
https://github.com/pedrotgn/pyactor/blob/24d98d134dd4228f2ba38e83611e9c3f50ec2fd4/pyactor/context.py#L152-L176
def load_transport(self, url): ''' For remote communication. Sets the communication dispatcher of the host at the address and port specified. The scheme must be http if using a XMLRPC dispatcher. amqp for RabbitMQ communications. This methos is internal. Automatically called when creating the host. :param str. url: URL where to bind the host. Must be provided in the tipical form: 'scheme://address:port/hierarchical_path' ''' aurl = urlparse(url) addrl = aurl.netloc.split(':') self.addr = addrl[0], addrl[1] self.transport = aurl.scheme self.host_url = aurl if aurl.scheme == 'http': self.launch_actor('http', rpcactor.RPCDispatcher(url, self, 'rpc')) elif aurl.scheme == 'amqp': self.launch_actor('amqp', rpcactor.RPCDispatcher(url, self, 'rabbit'))
[ "def", "load_transport", "(", "self", ",", "url", ")", ":", "aurl", "=", "urlparse", "(", "url", ")", "addrl", "=", "aurl", ".", "netloc", ".", "split", "(", "':'", ")", "self", ".", "addr", "=", "addrl", "[", "0", "]", ",", "addrl", "[", "1", "]", "self", ".", "transport", "=", "aurl", ".", "scheme", "self", ".", "host_url", "=", "aurl", "if", "aurl", ".", "scheme", "==", "'http'", ":", "self", ".", "launch_actor", "(", "'http'", ",", "rpcactor", ".", "RPCDispatcher", "(", "url", ",", "self", ",", "'rpc'", ")", ")", "elif", "aurl", ".", "scheme", "==", "'amqp'", ":", "self", ".", "launch_actor", "(", "'amqp'", ",", "rpcactor", ".", "RPCDispatcher", "(", "url", ",", "self", ",", "'rabbit'", ")", ")" ]
For remote communication. Sets the communication dispatcher of the host at the address and port specified. The scheme must be http if using a XMLRPC dispatcher. amqp for RabbitMQ communications. This methos is internal. Automatically called when creating the host. :param str. url: URL where to bind the host. Must be provided in the tipical form: 'scheme://address:port/hierarchical_path'
[ "For", "remote", "communication", ".", "Sets", "the", "communication", "dispatcher", "of", "the", "host", "at", "the", "address", "and", "port", "specified", "." ]
python
train
davenquinn/Attitude
attitude/orientation/pca.py
https://github.com/davenquinn/Attitude/blob/2ce97b9aba0aa5deedc6617c2315e07e6396d240/attitude/orientation/pca.py#L297-L307
def angular_errors(self, degrees=True): """ Minimum and maximum angular errors corresponding to 1st and 2nd axes of PCA distribution. """ hyp_axes = self.method(self) v = angular_errors(hyp_axes) if degrees: v = N.degrees(v) return tuple(v)
[ "def", "angular_errors", "(", "self", ",", "degrees", "=", "True", ")", ":", "hyp_axes", "=", "self", ".", "method", "(", "self", ")", "v", "=", "angular_errors", "(", "hyp_axes", ")", "if", "degrees", ":", "v", "=", "N", ".", "degrees", "(", "v", ")", "return", "tuple", "(", "v", ")" ]
Minimum and maximum angular errors corresponding to 1st and 2nd axes of PCA distribution.
[ "Minimum", "and", "maximum", "angular", "errors", "corresponding", "to", "1st", "and", "2nd", "axes", "of", "PCA", "distribution", "." ]
python
train
dnanexus/dx-toolkit
src/python/dxpy/api.py
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/api.py#L21-L27
def analysis_describe(object_id, input_params={}, always_retry=True, **kwargs): """ Invokes the /analysis-xxxx/describe API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Workflows-and-Analyses#API-method%3A-%2Fanalysis-xxxx%2Fdescribe """ return DXHTTPRequest('/%s/describe' % object_id, input_params, always_retry=always_retry, **kwargs)
[ "def", "analysis_describe", "(", "object_id", ",", "input_params", "=", "{", "}", ",", "always_retry", "=", "True", ",", "*", "*", "kwargs", ")", ":", "return", "DXHTTPRequest", "(", "'/%s/describe'", "%", "object_id", ",", "input_params", ",", "always_retry", "=", "always_retry", ",", "*", "*", "kwargs", ")" ]
Invokes the /analysis-xxxx/describe API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Workflows-and-Analyses#API-method%3A-%2Fanalysis-xxxx%2Fdescribe
[ "Invokes", "the", "/", "analysis", "-", "xxxx", "/", "describe", "API", "method", "." ]
python
train
lorien/grab
grab/proxylist.py
https://github.com/lorien/grab/blob/8b301db2a08c830245b61c589e58af6234f4db79/grab/proxylist.py#L32-L51
def parse_proxy_line(line): """ Parse proxy details from the raw text line. The text line could be in one of the following formats: * host:port * host:port:username:password """ line = line.strip() match = RE_SIMPLE_PROXY.search(line) if match: return match.group(1), match.group(2), None, None match = RE_AUTH_PROXY.search(line) if match: host, port, user, pwd = match.groups() return host, port, user, pwd raise InvalidProxyLine('Invalid proxy line: %s' % line)
[ "def", "parse_proxy_line", "(", "line", ")", ":", "line", "=", "line", ".", "strip", "(", ")", "match", "=", "RE_SIMPLE_PROXY", ".", "search", "(", "line", ")", "if", "match", ":", "return", "match", ".", "group", "(", "1", ")", ",", "match", ".", "group", "(", "2", ")", ",", "None", ",", "None", "match", "=", "RE_AUTH_PROXY", ".", "search", "(", "line", ")", "if", "match", ":", "host", ",", "port", ",", "user", ",", "pwd", "=", "match", ".", "groups", "(", ")", "return", "host", ",", "port", ",", "user", ",", "pwd", "raise", "InvalidProxyLine", "(", "'Invalid proxy line: %s'", "%", "line", ")" ]
Parse proxy details from the raw text line. The text line could be in one of the following formats: * host:port * host:port:username:password
[ "Parse", "proxy", "details", "from", "the", "raw", "text", "line", "." ]
python
train
areebbeigh/profanityfilter
profanityfilter/profanityfilter.py
https://github.com/areebbeigh/profanityfilter/blob/f7e1c1bb1b7aea401e0d09219610cc690acd5476/profanityfilter/profanityfilter.py#L105-L117
def censor(self, input_text): """Returns input_text with any profane words censored.""" bad_words = self.get_profane_words() res = input_text for word in bad_words: # Apply word boundaries to the bad word regex_string = r'{0}' if self._no_word_boundaries else r'\b{0}\b' regex_string = regex_string.format(word) regex = re.compile(regex_string, re.IGNORECASE) res = regex.sub(self._censor_char * len(word), res) return res
[ "def", "censor", "(", "self", ",", "input_text", ")", ":", "bad_words", "=", "self", ".", "get_profane_words", "(", ")", "res", "=", "input_text", "for", "word", "in", "bad_words", ":", "# Apply word boundaries to the bad word", "regex_string", "=", "r'{0}'", "if", "self", ".", "_no_word_boundaries", "else", "r'\\b{0}\\b'", "regex_string", "=", "regex_string", ".", "format", "(", "word", ")", "regex", "=", "re", ".", "compile", "(", "regex_string", ",", "re", ".", "IGNORECASE", ")", "res", "=", "regex", ".", "sub", "(", "self", ".", "_censor_char", "*", "len", "(", "word", ")", ",", "res", ")", "return", "res" ]
Returns input_text with any profane words censored.
[ "Returns", "input_text", "with", "any", "profane", "words", "censored", "." ]
python
train
trailofbits/manticore
scripts/binaryninja/manticore_viz/__init__.py
https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/scripts/binaryninja/manticore_viz/__init__.py#L175-L184
def viz_live_trace(view): """ Given a Manticore trace file, highlight the basic blocks. """ tv = TraceVisualizer(view, None, live=True) if tv.workspace is None: tv.workspace = get_workspace() # update due to singleton in case we are called after a clear tv.live_update = True tv.visualize()
[ "def", "viz_live_trace", "(", "view", ")", ":", "tv", "=", "TraceVisualizer", "(", "view", ",", "None", ",", "live", "=", "True", ")", "if", "tv", ".", "workspace", "is", "None", ":", "tv", ".", "workspace", "=", "get_workspace", "(", ")", "# update due to singleton in case we are called after a clear", "tv", ".", "live_update", "=", "True", "tv", ".", "visualize", "(", ")" ]
Given a Manticore trace file, highlight the basic blocks.
[ "Given", "a", "Manticore", "trace", "file", "highlight", "the", "basic", "blocks", "." ]
python
valid
ingolemo/python-lenses
lenses/hooks/hook_funcs.py
https://github.com/ingolemo/python-lenses/blob/a3a6ed0a31f6674451e542e7380a8aa16e6f8edf/lenses/hooks/hook_funcs.py#L121-L155
def setattr(self, name, value): # type: (Any, Any, Any) -> Any '''Takes an object, a string, and a value and produces a new object that is a copy of the original but with the attribute called ``name`` set to ``value``. The following equality should hold for your definition: .. code-block:: python setattr(obj, 'attr', obj.attr) == obj This function is used by many lenses (particularly GetattrLens) to set attributes on states even when those states do not ordinarily support ``setattr``. This function is designed to have a similar signature as python's built-in ``setattr`` except that it returns a new object that has the attribute set rather than mutating the object in place. It's what enables the ``lens.some_attribute`` functionality. The corresponding method call for this hook is ``obj._lens_setattr(name, value)``. The default implementation makes a copy of the object using ``copy.copy`` and then mutates the new object by calling python's built in ``setattr`` on it. ''' try: self._lens_setattr except AttributeError: selfcopy = copy.copy(self) builtin_setattr(selfcopy, name, value) return selfcopy else: return self._lens_setattr(name, value)
[ "def", "setattr", "(", "self", ",", "name", ",", "value", ")", ":", "# type: (Any, Any, Any) -> Any", "try", ":", "self", ".", "_lens_setattr", "except", "AttributeError", ":", "selfcopy", "=", "copy", ".", "copy", "(", "self", ")", "builtin_setattr", "(", "selfcopy", ",", "name", ",", "value", ")", "return", "selfcopy", "else", ":", "return", "self", ".", "_lens_setattr", "(", "name", ",", "value", ")" ]
Takes an object, a string, and a value and produces a new object that is a copy of the original but with the attribute called ``name`` set to ``value``. The following equality should hold for your definition: .. code-block:: python setattr(obj, 'attr', obj.attr) == obj This function is used by many lenses (particularly GetattrLens) to set attributes on states even when those states do not ordinarily support ``setattr``. This function is designed to have a similar signature as python's built-in ``setattr`` except that it returns a new object that has the attribute set rather than mutating the object in place. It's what enables the ``lens.some_attribute`` functionality. The corresponding method call for this hook is ``obj._lens_setattr(name, value)``. The default implementation makes a copy of the object using ``copy.copy`` and then mutates the new object by calling python's built in ``setattr`` on it.
[ "Takes", "an", "object", "a", "string", "and", "a", "value", "and", "produces", "a", "new", "object", "that", "is", "a", "copy", "of", "the", "original", "but", "with", "the", "attribute", "called", "name", "set", "to", "value", "." ]
python
test
seung-lab/cloud-volume
cloudvolume/txrx.py
https://github.com/seung-lab/cloud-volume/blob/d2fd4500333f1bc3cd3e3919a8b649cec5d8e214/cloudvolume/txrx.py#L263-L271
def check_grid_aligned(vol, img, offset): """Returns (is_aligned, img bounds Bbox, nearest bbox inflated to grid aligned)""" shape = Vec(*img.shape)[:3] offset = Vec(*offset)[:3] bounds = Bbox( offset, shape + offset) alignment_check = bounds.expand_to_chunk_size(vol.underlying, vol.voxel_offset) alignment_check = Bbox.clamp(alignment_check, vol.bounds) is_aligned = np.all(alignment_check.minpt == bounds.minpt) and np.all(alignment_check.maxpt == bounds.maxpt) return (is_aligned, bounds, alignment_check)
[ "def", "check_grid_aligned", "(", "vol", ",", "img", ",", "offset", ")", ":", "shape", "=", "Vec", "(", "*", "img", ".", "shape", ")", "[", ":", "3", "]", "offset", "=", "Vec", "(", "*", "offset", ")", "[", ":", "3", "]", "bounds", "=", "Bbox", "(", "offset", ",", "shape", "+", "offset", ")", "alignment_check", "=", "bounds", ".", "expand_to_chunk_size", "(", "vol", ".", "underlying", ",", "vol", ".", "voxel_offset", ")", "alignment_check", "=", "Bbox", ".", "clamp", "(", "alignment_check", ",", "vol", ".", "bounds", ")", "is_aligned", "=", "np", ".", "all", "(", "alignment_check", ".", "minpt", "==", "bounds", ".", "minpt", ")", "and", "np", ".", "all", "(", "alignment_check", ".", "maxpt", "==", "bounds", ".", "maxpt", ")", "return", "(", "is_aligned", ",", "bounds", ",", "alignment_check", ")" ]
Returns (is_aligned, img bounds Bbox, nearest bbox inflated to grid aligned)
[ "Returns", "(", "is_aligned", "img", "bounds", "Bbox", "nearest", "bbox", "inflated", "to", "grid", "aligned", ")" ]
python
train
mrstephenneal/dirutility
dirutility/walk/walk.py
https://github.com/mrstephenneal/dirutility/blob/339378659e2d7e09c53acfc51c5df745bb0cd517/dirutility/walk/walk.py#L227-L235
def folders(self): """Return list of folders in root directory""" for directory in self.directory: for path in os.listdir(directory): full_path = os.path.join(directory, path) if os.path.isdir(full_path): if not path.startswith('.'): self.filepaths.append(full_path) return self._get_filepaths()
[ "def", "folders", "(", "self", ")", ":", "for", "directory", "in", "self", ".", "directory", ":", "for", "path", "in", "os", ".", "listdir", "(", "directory", ")", ":", "full_path", "=", "os", ".", "path", ".", "join", "(", "directory", ",", "path", ")", "if", "os", ".", "path", ".", "isdir", "(", "full_path", ")", ":", "if", "not", "path", ".", "startswith", "(", "'.'", ")", ":", "self", ".", "filepaths", ".", "append", "(", "full_path", ")", "return", "self", ".", "_get_filepaths", "(", ")" ]
Return list of folders in root directory
[ "Return", "list", "of", "folders", "in", "root", "directory" ]
python
train
BerkeleyAutomation/perception
perception/primesense_sensor.py
https://github.com/BerkeleyAutomation/perception/blob/03d9b37dd6b66896cdfe173905c9413c8c3c5df6/perception/primesense_sensor.py#L67-L73
def color_intrinsics(self): """:obj:`CameraIntrinsics` : The camera intrinsics for the primesense color camera. """ return CameraIntrinsics(self._ir_frame, PrimesenseSensor.FOCAL_X, PrimesenseSensor.FOCAL_Y, PrimesenseSensor.CENTER_X, PrimesenseSensor.CENTER_Y, height=PrimesenseSensor.DEPTH_IM_HEIGHT, width=PrimesenseSensor.DEPTH_IM_WIDTH)
[ "def", "color_intrinsics", "(", "self", ")", ":", "return", "CameraIntrinsics", "(", "self", ".", "_ir_frame", ",", "PrimesenseSensor", ".", "FOCAL_X", ",", "PrimesenseSensor", ".", "FOCAL_Y", ",", "PrimesenseSensor", ".", "CENTER_X", ",", "PrimesenseSensor", ".", "CENTER_Y", ",", "height", "=", "PrimesenseSensor", ".", "DEPTH_IM_HEIGHT", ",", "width", "=", "PrimesenseSensor", ".", "DEPTH_IM_WIDTH", ")" ]
:obj:`CameraIntrinsics` : The camera intrinsics for the primesense color camera.
[ ":", "obj", ":", "CameraIntrinsics", ":", "The", "camera", "intrinsics", "for", "the", "primesense", "color", "camera", "." ]
python
train
esheldon/fitsio
fitsio/fitslib.py
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/fitslib.py#L1053-L1090
def _append_hdu_info(self, ext): """ internal routine append info for indiciated extension """ # raised IOError if not found hdu_type = self._FITS.movabs_hdu(ext+1) if hdu_type == IMAGE_HDU: hdu = ImageHDU(self._FITS, ext, **self.keys) elif hdu_type == BINARY_TBL: hdu = TableHDU(self._FITS, ext, **self.keys) elif hdu_type == ASCII_TBL: hdu = AsciiTableHDU(self._FITS, ext, **self.keys) else: mess = ("extension %s is of unknown type %s " "this is probably a bug") mess = mess % (ext, hdu_type) raise IOError(mess) self.hdu_list.append(hdu) self.hdu_map[ext] = hdu extname = hdu.get_extname() if not self.case_sensitive: extname = extname.lower() if extname != '': # this will guarantee we default to *first* version, # if version is not requested, using __getitem__ if extname not in self.hdu_map: self.hdu_map[extname] = hdu ver = hdu.get_extver() if ver > 0: key = '%s-%s' % (extname, ver) self.hdu_map[key] = hdu
[ "def", "_append_hdu_info", "(", "self", ",", "ext", ")", ":", "# raised IOError if not found", "hdu_type", "=", "self", ".", "_FITS", ".", "movabs_hdu", "(", "ext", "+", "1", ")", "if", "hdu_type", "==", "IMAGE_HDU", ":", "hdu", "=", "ImageHDU", "(", "self", ".", "_FITS", ",", "ext", ",", "*", "*", "self", ".", "keys", ")", "elif", "hdu_type", "==", "BINARY_TBL", ":", "hdu", "=", "TableHDU", "(", "self", ".", "_FITS", ",", "ext", ",", "*", "*", "self", ".", "keys", ")", "elif", "hdu_type", "==", "ASCII_TBL", ":", "hdu", "=", "AsciiTableHDU", "(", "self", ".", "_FITS", ",", "ext", ",", "*", "*", "self", ".", "keys", ")", "else", ":", "mess", "=", "(", "\"extension %s is of unknown type %s \"", "\"this is probably a bug\"", ")", "mess", "=", "mess", "%", "(", "ext", ",", "hdu_type", ")", "raise", "IOError", "(", "mess", ")", "self", ".", "hdu_list", ".", "append", "(", "hdu", ")", "self", ".", "hdu_map", "[", "ext", "]", "=", "hdu", "extname", "=", "hdu", ".", "get_extname", "(", ")", "if", "not", "self", ".", "case_sensitive", ":", "extname", "=", "extname", ".", "lower", "(", ")", "if", "extname", "!=", "''", ":", "# this will guarantee we default to *first* version,", "# if version is not requested, using __getitem__", "if", "extname", "not", "in", "self", ".", "hdu_map", ":", "self", ".", "hdu_map", "[", "extname", "]", "=", "hdu", "ver", "=", "hdu", ".", "get_extver", "(", ")", "if", "ver", ">", "0", ":", "key", "=", "'%s-%s'", "%", "(", "extname", ",", "ver", ")", "self", ".", "hdu_map", "[", "key", "]", "=", "hdu" ]
internal routine append info for indiciated extension
[ "internal", "routine" ]
python
train
piglei/uwsgi-sloth
uwsgi_sloth/models.py
https://github.com/piglei/uwsgi-sloth/blob/2834ac5ed17d89ca5f19151c649ac610f6f37bd1/uwsgi_sloth/models.py#L65-L84
def merge_requests_data_to(to, food={}): """Merge a small analyzed result to a big one, this function will modify the original ``to``""" if not to: to.update(food) to['requests_counter']['normal'] += food['requests_counter']['normal'] to['requests_counter']['slow'] += food['requests_counter']['slow'] to['total_slow_duration'] += food['total_slow_duration'] for group_name, urls in food['data_details'].items(): if group_name not in to['data_details']: to['data_details'][group_name] = urls else: to_urls = to['data_details'][group_name] to_urls['duration_agr_data'] = to_urls['duration_agr_data'].merge_with( urls['duration_agr_data']) # Merge urls data merge_urls_data_to(to_urls['urls'], urls['urls'])
[ "def", "merge_requests_data_to", "(", "to", ",", "food", "=", "{", "}", ")", ":", "if", "not", "to", ":", "to", ".", "update", "(", "food", ")", "to", "[", "'requests_counter'", "]", "[", "'normal'", "]", "+=", "food", "[", "'requests_counter'", "]", "[", "'normal'", "]", "to", "[", "'requests_counter'", "]", "[", "'slow'", "]", "+=", "food", "[", "'requests_counter'", "]", "[", "'slow'", "]", "to", "[", "'total_slow_duration'", "]", "+=", "food", "[", "'total_slow_duration'", "]", "for", "group_name", ",", "urls", "in", "food", "[", "'data_details'", "]", ".", "items", "(", ")", ":", "if", "group_name", "not", "in", "to", "[", "'data_details'", "]", ":", "to", "[", "'data_details'", "]", "[", "group_name", "]", "=", "urls", "else", ":", "to_urls", "=", "to", "[", "'data_details'", "]", "[", "group_name", "]", "to_urls", "[", "'duration_agr_data'", "]", "=", "to_urls", "[", "'duration_agr_data'", "]", ".", "merge_with", "(", "urls", "[", "'duration_agr_data'", "]", ")", "# Merge urls data", "merge_urls_data_to", "(", "to_urls", "[", "'urls'", "]", ",", "urls", "[", "'urls'", "]", ")" ]
Merge a small analyzed result to a big one, this function will modify the original ``to``
[ "Merge", "a", "small", "analyzed", "result", "to", "a", "big", "one", "this", "function", "will", "modify", "the", "original", "to" ]
python
train
delph-in/pydelphin
delphin/mrs/query.py
https://github.com/delph-in/pydelphin/blob/7bd2cd63ab7cf74803e1d6547b9ebc014b382abd/delphin/mrs/query.py#L183-L205
def select_icons(xmrs, left=None, relation=None, right=None): """ Return the list of matching ICONS for *xmrs*. :class:`~delphin.mrs.components.IndividualConstraint` objects for *xmrs* match if their `left` matches *left*, `relation` matches *relation*, and `right` matches *right*. The *left*, *relation*, and *right* filters are ignored if they are `None`. Args: xmrs (:class:`~delphin.mrs.xmrs.Xmrs`): semantic structure to query left (str, optional): left variable to match relation (str, optional): individual constraint relation to match right (str, optional): right variable to match Returns: list: matching ICONS """ icmatch = lambda ic: ( (left is None or ic.left == left) and (relation is None or ic.relation == relation) and (right is None or ic.right == right)) return list(filter(icmatch, xmrs.icons()))
[ "def", "select_icons", "(", "xmrs", ",", "left", "=", "None", ",", "relation", "=", "None", ",", "right", "=", "None", ")", ":", "icmatch", "=", "lambda", "ic", ":", "(", "(", "left", "is", "None", "or", "ic", ".", "left", "==", "left", ")", "and", "(", "relation", "is", "None", "or", "ic", ".", "relation", "==", "relation", ")", "and", "(", "right", "is", "None", "or", "ic", ".", "right", "==", "right", ")", ")", "return", "list", "(", "filter", "(", "icmatch", ",", "xmrs", ".", "icons", "(", ")", ")", ")" ]
Return the list of matching ICONS for *xmrs*. :class:`~delphin.mrs.components.IndividualConstraint` objects for *xmrs* match if their `left` matches *left*, `relation` matches *relation*, and `right` matches *right*. The *left*, *relation*, and *right* filters are ignored if they are `None`. Args: xmrs (:class:`~delphin.mrs.xmrs.Xmrs`): semantic structure to query left (str, optional): left variable to match relation (str, optional): individual constraint relation to match right (str, optional): right variable to match Returns: list: matching ICONS
[ "Return", "the", "list", "of", "matching", "ICONS", "for", "*", "xmrs", "*", "." ]
python
train
edx/pa11ycrawler
pa11ycrawler/pipelines/__init__.py
https://github.com/edx/pa11ycrawler/blob/fc672d4524463bc050ade4c7c97801c0d5bf8c9e/pa11ycrawler/pipelines/__init__.py#L27-L39
def is_sequence_start_page(self, url): """ Does this URL represent the first page in a section sequence? E.g. /courses/{coursename}/courseware/{block_id}/{section_id}/1 This will return the same page as the pattern /courses/{coursename}/courseware/{block_id}/{section_id}. """ return ( len(url.path.segments) == 6 and url.path.segments[0] == 'courses' and url.path.segments[2] == 'courseware' and url.path.segments[5] == '1' )
[ "def", "is_sequence_start_page", "(", "self", ",", "url", ")", ":", "return", "(", "len", "(", "url", ".", "path", ".", "segments", ")", "==", "6", "and", "url", ".", "path", ".", "segments", "[", "0", "]", "==", "'courses'", "and", "url", ".", "path", ".", "segments", "[", "2", "]", "==", "'courseware'", "and", "url", ".", "path", ".", "segments", "[", "5", "]", "==", "'1'", ")" ]
Does this URL represent the first page in a section sequence? E.g. /courses/{coursename}/courseware/{block_id}/{section_id}/1 This will return the same page as the pattern /courses/{coursename}/courseware/{block_id}/{section_id}.
[ "Does", "this", "URL", "represent", "the", "first", "page", "in", "a", "section", "sequence?", "E", ".", "g", ".", "/", "courses", "/", "{", "coursename", "}", "/", "courseware", "/", "{", "block_id", "}", "/", "{", "section_id", "}", "/", "1", "This", "will", "return", "the", "same", "page", "as", "the", "pattern", "/", "courses", "/", "{", "coursename", "}", "/", "courseware", "/", "{", "block_id", "}", "/", "{", "section_id", "}", "." ]
python
train
LordGaav/python-chaos
chaos/amqp/exchange.py
https://github.com/LordGaav/python-chaos/blob/52cd29a6fd15693ee1e53786b93bcb23fbf84ddd/chaos/amqp/exchange.py#L74-L104
def publish(self, message, properties=None, mandatory=False): """ Publish a message to an AMQP exchange. Parameters ---------- message: string Message to publish. properties: dict Properties to set on message. This parameter is optional, but if set, at least the following options must be set: content_type: string - what content_type to specify, default is 'text/plain'. delivery_mode: int - what delivery_mode to use. By default message are not persistent, but this can be set by specifying PERSISTENT_MESSAGE . The following options are also available: routing_key: string - what routing_key to use. MUST be set if this was not set during __init__. exchange: string - what exchange to use. MUST be set if this was not set during __init__. mandatory: boolean If set to True, the mandatory bit will be set on the published message. Returns ------- Depending on the mode of the Channel, the return value can signify different things: basic_Confirm is active: True means that the message has been delivered to a queue, False means it hasn't. mandatory bit was set on message: True means that the message has been delivered to a consumer, False means that it has been returned. No special bit or mode has been set: None is returned. """ return publish_message(self.channel, self.exchange_name, self.default_routing_key, message, properties, mandatory)
[ "def", "publish", "(", "self", ",", "message", ",", "properties", "=", "None", ",", "mandatory", "=", "False", ")", ":", "return", "publish_message", "(", "self", ".", "channel", ",", "self", ".", "exchange_name", ",", "self", ".", "default_routing_key", ",", "message", ",", "properties", ",", "mandatory", ")" ]
Publish a message to an AMQP exchange. Parameters ---------- message: string Message to publish. properties: dict Properties to set on message. This parameter is optional, but if set, at least the following options must be set: content_type: string - what content_type to specify, default is 'text/plain'. delivery_mode: int - what delivery_mode to use. By default message are not persistent, but this can be set by specifying PERSISTENT_MESSAGE . The following options are also available: routing_key: string - what routing_key to use. MUST be set if this was not set during __init__. exchange: string - what exchange to use. MUST be set if this was not set during __init__. mandatory: boolean If set to True, the mandatory bit will be set on the published message. Returns ------- Depending on the mode of the Channel, the return value can signify different things: basic_Confirm is active: True means that the message has been delivered to a queue, False means it hasn't. mandatory bit was set on message: True means that the message has been delivered to a consumer, False means that it has been returned. No special bit or mode has been set: None is returned.
[ "Publish", "a", "message", "to", "an", "AMQP", "exchange", "." ]
python
train
quantumlib/Cirq
cirq/protocols/resolve_parameters.py
https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/cirq/protocols/resolve_parameters.py#L40-L62
def is_parameterized(val: Any) -> bool: """Returns whether the object is parameterized with any Symbols. A value is parameterized when it has an `_is_parameterized_` method and that method returns a truthy value, or if the value is an instance of sympy.Basic. Returns: True if the gate has any unresolved Symbols and False otherwise. If no implementation of the magic method above exists or if that method returns NotImplemented, this will default to False. """ if isinstance(val, sympy.Basic): return True getter = getattr(val, '_is_parameterized_', None) result = NotImplemented if getter is None else getter() if result is not NotImplemented: return result else: return False
[ "def", "is_parameterized", "(", "val", ":", "Any", ")", "->", "bool", ":", "if", "isinstance", "(", "val", ",", "sympy", ".", "Basic", ")", ":", "return", "True", "getter", "=", "getattr", "(", "val", ",", "'_is_parameterized_'", ",", "None", ")", "result", "=", "NotImplemented", "if", "getter", "is", "None", "else", "getter", "(", ")", "if", "result", "is", "not", "NotImplemented", ":", "return", "result", "else", ":", "return", "False" ]
Returns whether the object is parameterized with any Symbols. A value is parameterized when it has an `_is_parameterized_` method and that method returns a truthy value, or if the value is an instance of sympy.Basic. Returns: True if the gate has any unresolved Symbols and False otherwise. If no implementation of the magic method above exists or if that method returns NotImplemented, this will default to False.
[ "Returns", "whether", "the", "object", "is", "parameterized", "with", "any", "Symbols", "." ]
python
train
Faylixe/pygame_vkeyboard
pygame_vkeyboard/vkeyboard.py
https://github.com/Faylixe/pygame_vkeyboard/blob/72753a47b4d1d8bf22c9c51ca877aef742481d2a/pygame_vkeyboard/vkeyboard.py#L323-L334
def add_key(self, key, first=False): """Adds the given key to this row. :param key: Key to be added to this row. :param first: BOolean flag that indicates if key is added at the beginning or at the end. """ if first: self.keys = [key] + self.keys else: self.keys.append(key) if isinstance(key, VSpaceKey): self.space = key
[ "def", "add_key", "(", "self", ",", "key", ",", "first", "=", "False", ")", ":", "if", "first", ":", "self", ".", "keys", "=", "[", "key", "]", "+", "self", ".", "keys", "else", ":", "self", ".", "keys", ".", "append", "(", "key", ")", "if", "isinstance", "(", "key", ",", "VSpaceKey", ")", ":", "self", ".", "space", "=", "key" ]
Adds the given key to this row. :param key: Key to be added to this row. :param first: BOolean flag that indicates if key is added at the beginning or at the end.
[ "Adds", "the", "given", "key", "to", "this", "row", "." ]
python
train
crytic/slither
slither/solc_parsing/variables/event_variable.py
https://github.com/crytic/slither/blob/04c147f7e50223c6af458ca430befae747ccd259/slither/solc_parsing/variables/event_variable.py#L7-L18
def _analyze_variable_attributes(self, attributes): """ Analyze event variable attributes :param attributes: The event variable attributes to parse. :return: None """ # Check for the indexed attribute if 'indexed' in attributes: self._indexed = attributes['indexed'] super(EventVariableSolc, self)._analyze_variable_attributes(attributes)
[ "def", "_analyze_variable_attributes", "(", "self", ",", "attributes", ")", ":", "# Check for the indexed attribute", "if", "'indexed'", "in", "attributes", ":", "self", ".", "_indexed", "=", "attributes", "[", "'indexed'", "]", "super", "(", "EventVariableSolc", ",", "self", ")", ".", "_analyze_variable_attributes", "(", "attributes", ")" ]
Analyze event variable attributes :param attributes: The event variable attributes to parse. :return: None
[ "Analyze", "event", "variable", "attributes", ":", "param", "attributes", ":", "The", "event", "variable", "attributes", "to", "parse", ".", ":", "return", ":", "None" ]
python
train
TissueMAPS/TmDeploy
elasticluster/elasticluster/cluster.py
https://github.com/TissueMAPS/TmDeploy/blob/f891b4ffb21431988bc4a063ae871da3bf284a45/elasticluster/elasticluster/cluster.py#L1107-L1132
def is_alive(self): """Checks if the current node is up and running in the cloud. It only checks the status provided by the cloud interface. Therefore a node might be running, but not yet ready to ssh into it. """ running = False if not self.instance_id: return False try: log.debug("Getting information for instance %s", self.instance_id) running = self._cloud_provider.is_instance_running( self.instance_id) except Exception as ex: log.debug("Ignoring error while looking for vm id %s: %s", self.instance_id, str(ex)) if running: log.debug("node `%s` (instance id %s) is up and running", self.name, self.instance_id) self.update_ips() else: log.debug("node `%s` (instance id `%s`) still building...", self.name, self.instance_id) return running
[ "def", "is_alive", "(", "self", ")", ":", "running", "=", "False", "if", "not", "self", ".", "instance_id", ":", "return", "False", "try", ":", "log", ".", "debug", "(", "\"Getting information for instance %s\"", ",", "self", ".", "instance_id", ")", "running", "=", "self", ".", "_cloud_provider", ".", "is_instance_running", "(", "self", ".", "instance_id", ")", "except", "Exception", "as", "ex", ":", "log", ".", "debug", "(", "\"Ignoring error while looking for vm id %s: %s\"", ",", "self", ".", "instance_id", ",", "str", "(", "ex", ")", ")", "if", "running", ":", "log", ".", "debug", "(", "\"node `%s` (instance id %s) is up and running\"", ",", "self", ".", "name", ",", "self", ".", "instance_id", ")", "self", ".", "update_ips", "(", ")", "else", ":", "log", ".", "debug", "(", "\"node `%s` (instance id `%s`) still building...\"", ",", "self", ".", "name", ",", "self", ".", "instance_id", ")", "return", "running" ]
Checks if the current node is up and running in the cloud. It only checks the status provided by the cloud interface. Therefore a node might be running, but not yet ready to ssh into it.
[ "Checks", "if", "the", "current", "node", "is", "up", "and", "running", "in", "the", "cloud", ".", "It", "only", "checks", "the", "status", "provided", "by", "the", "cloud", "interface", ".", "Therefore", "a", "node", "might", "be", "running", "but", "not", "yet", "ready", "to", "ssh", "into", "it", "." ]
python
train
datadotworld/data.world-py
datadotworld/client/_swagger/apis/datasets_api.py
https://github.com/datadotworld/data.world-py/blob/ffaeb115f358731ab0b805b0c43b7ff2e3cf0a77/datadotworld/client/_swagger/apis/datasets_api.py#L519-L545
def delete_files_and_sync_sources(self, owner, id, name, **kwargs): """ Delete files Delete one or more files from a dataset by their name, including files added via URL. **Batching** Note that the `name` parameter can be include multiple times in the query string, once for each file that is to be deleted together in a single request. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.delete_files_and_sync_sources(owner, id, name, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str owner: User name and unique identifier of the creator of a dataset or project. For example, in the URL: [https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), jonloyens is the unique identifier of the owner. (required) :param str id: Dataset unique identifier. For example, in the URL:[https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), an-intro-to-dataworld-dataset is the unique identifier of the dataset. (required) :param list[str] name: Names of files to be deleted. Multiple can be provided in a single request by repeating the query string parameter name as many times as necessary. (required) :return: SuccessMessage If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.delete_files_and_sync_sources_with_http_info(owner, id, name, **kwargs) else: (data) = self.delete_files_and_sync_sources_with_http_info(owner, id, name, **kwargs) return data
[ "def", "delete_files_and_sync_sources", "(", "self", ",", "owner", ",", "id", ",", "name", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'callback'", ")", ":", "return", "self", ".", "delete_files_and_sync_sources_with_http_info", "(", "owner", ",", "id", ",", "name", ",", "*", "*", "kwargs", ")", "else", ":", "(", "data", ")", "=", "self", ".", "delete_files_and_sync_sources_with_http_info", "(", "owner", ",", "id", ",", "name", ",", "*", "*", "kwargs", ")", "return", "data" ]
Delete files Delete one or more files from a dataset by their name, including files added via URL. **Batching** Note that the `name` parameter can be include multiple times in the query string, once for each file that is to be deleted together in a single request. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.delete_files_and_sync_sources(owner, id, name, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str owner: User name and unique identifier of the creator of a dataset or project. For example, in the URL: [https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), jonloyens is the unique identifier of the owner. (required) :param str id: Dataset unique identifier. For example, in the URL:[https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), an-intro-to-dataworld-dataset is the unique identifier of the dataset. (required) :param list[str] name: Names of files to be deleted. Multiple can be provided in a single request by repeating the query string parameter name as many times as necessary. (required) :return: SuccessMessage If the method is called asynchronously, returns the request thread.
[ "Delete", "files", "Delete", "one", "or", "more", "files", "from", "a", "dataset", "by", "their", "name", "including", "files", "added", "via", "URL", ".", "**", "Batching", "**", "Note", "that", "the", "name", "parameter", "can", "be", "include", "multiple", "times", "in", "the", "query", "string", "once", "for", "each", "file", "that", "is", "to", "be", "deleted", "together", "in", "a", "single", "request", ".", "This", "method", "makes", "a", "synchronous", "HTTP", "request", "by", "default", ".", "To", "make", "an", "asynchronous", "HTTP", "request", "please", "define", "a", "callback", "function", "to", "be", "invoked", "when", "receiving", "the", "response", ".", ">>>", "def", "callback_function", "(", "response", ")", ":", ">>>", "pprint", "(", "response", ")", ">>>", ">>>", "thread", "=", "api", ".", "delete_files_and_sync_sources", "(", "owner", "id", "name", "callback", "=", "callback_function", ")" ]
python
train
googlesamples/assistant-sdk-python
google-assistant-sdk/googlesamples/assistant/grpc/devicetool.py
https://github.com/googlesamples/assistant-sdk-python/blob/84995692f35be8e085de8dfa7032039a13ae3fab/google-assistant-sdk/googlesamples/assistant/grpc/devicetool.py#L62-L73
def pretty_print_model(devicemodel): """Prints out a device model in the terminal by parsing dict.""" PRETTY_PRINT_MODEL = """Device Model ID: %(deviceModelId)s Project ID: %(projectId)s Device Type: %(deviceType)s""" logging.info(PRETTY_PRINT_MODEL % devicemodel) if 'traits' in devicemodel: for trait in devicemodel['traits']: logging.info(' Trait %s' % trait) else: logging.info('No traits') logging.info('')
[ "def", "pretty_print_model", "(", "devicemodel", ")", ":", "PRETTY_PRINT_MODEL", "=", "\"\"\"Device Model ID: %(deviceModelId)s\n Project ID: %(projectId)s\n Device Type: %(deviceType)s\"\"\"", "logging", ".", "info", "(", "PRETTY_PRINT_MODEL", "%", "devicemodel", ")", "if", "'traits'", "in", "devicemodel", ":", "for", "trait", "in", "devicemodel", "[", "'traits'", "]", ":", "logging", ".", "info", "(", "' Trait %s'", "%", "trait", ")", "else", ":", "logging", ".", "info", "(", "'No traits'", ")", "logging", ".", "info", "(", "''", ")" ]
Prints out a device model in the terminal by parsing dict.
[ "Prints", "out", "a", "device", "model", "in", "the", "terminal", "by", "parsing", "dict", "." ]
python
train
juicer/juicer
juicer/common/Cart.py
https://github.com/juicer/juicer/blob/0c9f0fd59e293d45df6b46e81f675d33221c600d/juicer/common/Cart.py#L99-L119
def load(self, json_file): """ Build a cart from a json file """ cart_file = os.path.join(CART_LOCATION, json_file) try: cart_body = juicer.utils.read_json_document(cart_file) except IOError as e: juicer.utils.Log.log_error('an error occured while accessing %s:' % cart_file) raise JuicerError(e.message) self.cart_name = cart_body['_id'] if cart_body['current_env'] == '': self.current_env = juicer.utils.get_login_info()[1]['start_in'] else: self.current_env = cart_body['current_env'] for repo, items in cart_body['repos_items'].iteritems(): self.add_repo(repo, items)
[ "def", "load", "(", "self", ",", "json_file", ")", ":", "cart_file", "=", "os", ".", "path", ".", "join", "(", "CART_LOCATION", ",", "json_file", ")", "try", ":", "cart_body", "=", "juicer", ".", "utils", ".", "read_json_document", "(", "cart_file", ")", "except", "IOError", "as", "e", ":", "juicer", ".", "utils", ".", "Log", ".", "log_error", "(", "'an error occured while accessing %s:'", "%", "cart_file", ")", "raise", "JuicerError", "(", "e", ".", "message", ")", "self", ".", "cart_name", "=", "cart_body", "[", "'_id'", "]", "if", "cart_body", "[", "'current_env'", "]", "==", "''", ":", "self", ".", "current_env", "=", "juicer", ".", "utils", ".", "get_login_info", "(", ")", "[", "1", "]", "[", "'start_in'", "]", "else", ":", "self", ".", "current_env", "=", "cart_body", "[", "'current_env'", "]", "for", "repo", ",", "items", "in", "cart_body", "[", "'repos_items'", "]", ".", "iteritems", "(", ")", ":", "self", ".", "add_repo", "(", "repo", ",", "items", ")" ]
Build a cart from a json file
[ "Build", "a", "cart", "from", "a", "json", "file" ]
python
train
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/util.py
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/util.py#L136-L152
def is_generator_function(obj): """Return true if the object is a user-defined generator function. Generator function objects provides same attributes as functions. See isfunction.__doc__ for attributes listing. Adapted from Python 2.6. Args: obj: an object to test. Returns: true if the object is generator function. """ CO_GENERATOR = 0x20 return bool(((inspect.isfunction(obj) or inspect.ismethod(obj)) and obj.func_code.co_flags & CO_GENERATOR))
[ "def", "is_generator_function", "(", "obj", ")", ":", "CO_GENERATOR", "=", "0x20", "return", "bool", "(", "(", "(", "inspect", ".", "isfunction", "(", "obj", ")", "or", "inspect", ".", "ismethod", "(", "obj", ")", ")", "and", "obj", ".", "func_code", ".", "co_flags", "&", "CO_GENERATOR", ")", ")" ]
Return true if the object is a user-defined generator function. Generator function objects provides same attributes as functions. See isfunction.__doc__ for attributes listing. Adapted from Python 2.6. Args: obj: an object to test. Returns: true if the object is generator function.
[ "Return", "true", "if", "the", "object", "is", "a", "user", "-", "defined", "generator", "function", "." ]
python
train
numenta/htmresearch
projects/sequence_prediction/reberGrammar/reberSequencePrediction_HMM.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/projects/sequence_prediction/reberGrammar/reberSequencePrediction_HMM.py#L143-L175
def runExperiment(): """ Experiment 1: Calculate error rate as a function of training sequence numbers :return: """ trainSeqN = [5, 10, 20, 50, 100, 200] rptPerCondition = 20 correctRateAll = np.zeros((len(trainSeqN), rptPerCondition)) missRateAll = np.zeros((len(trainSeqN), rptPerCondition)) fpRateAll = np.zeros((len(trainSeqN), rptPerCondition)) for i in xrange(len(trainSeqN)): for rpt in xrange(rptPerCondition): numTrainSequence = trainSeqN[i] correctRate, missRate, fpRate = runSingleExperiment(numTrainSequence=numTrainSequence) correctRateAll[i, rpt] = correctRate missRateAll[i, rpt] = missRate fpRateAll[i, rpt] = fpRate plt.figure() plt.subplot(2,2,1) plt.semilogx(trainSeqN, 100*np.mean(correctRateAll,1),'-*') plt.xlabel(' Training Sequence Number') plt.ylabel(' Hit Rate - Best Match (%)') plt.subplot(2,2,2) plt.semilogx(trainSeqN, 100*np.mean(missRateAll,1),'-*') plt.xlabel(' Training Sequence Number') plt.ylabel(' Miss Rate (%)') plt.subplot(2,2,3) plt.semilogx(trainSeqN, 100*np.mean(fpRateAll,1),'-*') plt.xlabel(' Training Sequence Number') plt.ylabel(' False Positive Rate (%)') plt.savefig('result/ReberSequence_HMMperformance.pdf') plt.show()
[ "def", "runExperiment", "(", ")", ":", "trainSeqN", "=", "[", "5", ",", "10", ",", "20", ",", "50", ",", "100", ",", "200", "]", "rptPerCondition", "=", "20", "correctRateAll", "=", "np", ".", "zeros", "(", "(", "len", "(", "trainSeqN", ")", ",", "rptPerCondition", ")", ")", "missRateAll", "=", "np", ".", "zeros", "(", "(", "len", "(", "trainSeqN", ")", ",", "rptPerCondition", ")", ")", "fpRateAll", "=", "np", ".", "zeros", "(", "(", "len", "(", "trainSeqN", ")", ",", "rptPerCondition", ")", ")", "for", "i", "in", "xrange", "(", "len", "(", "trainSeqN", ")", ")", ":", "for", "rpt", "in", "xrange", "(", "rptPerCondition", ")", ":", "numTrainSequence", "=", "trainSeqN", "[", "i", "]", "correctRate", ",", "missRate", ",", "fpRate", "=", "runSingleExperiment", "(", "numTrainSequence", "=", "numTrainSequence", ")", "correctRateAll", "[", "i", ",", "rpt", "]", "=", "correctRate", "missRateAll", "[", "i", ",", "rpt", "]", "=", "missRate", "fpRateAll", "[", "i", ",", "rpt", "]", "=", "fpRate", "plt", ".", "figure", "(", ")", "plt", ".", "subplot", "(", "2", ",", "2", ",", "1", ")", "plt", ".", "semilogx", "(", "trainSeqN", ",", "100", "*", "np", ".", "mean", "(", "correctRateAll", ",", "1", ")", ",", "'-*'", ")", "plt", ".", "xlabel", "(", "' Training Sequence Number'", ")", "plt", ".", "ylabel", "(", "' Hit Rate - Best Match (%)'", ")", "plt", ".", "subplot", "(", "2", ",", "2", ",", "2", ")", "plt", ".", "semilogx", "(", "trainSeqN", ",", "100", "*", "np", ".", "mean", "(", "missRateAll", ",", "1", ")", ",", "'-*'", ")", "plt", ".", "xlabel", "(", "' Training Sequence Number'", ")", "plt", ".", "ylabel", "(", "' Miss Rate (%)'", ")", "plt", ".", "subplot", "(", "2", ",", "2", ",", "3", ")", "plt", ".", "semilogx", "(", "trainSeqN", ",", "100", "*", "np", ".", "mean", "(", "fpRateAll", ",", "1", ")", ",", "'-*'", ")", "plt", ".", "xlabel", "(", "' Training Sequence Number'", ")", "plt", ".", "ylabel", "(", "' False Positive Rate (%)'", ")", "plt", ".", "savefig", "(", "'result/ReberSequence_HMMperformance.pdf'", ")", "plt", ".", "show", "(", ")" ]
Experiment 1: Calculate error rate as a function of training sequence numbers :return:
[ "Experiment", "1", ":", "Calculate", "error", "rate", "as", "a", "function", "of", "training", "sequence", "numbers", ":", "return", ":" ]
python
train
django-parler/django-parler
parler/views.py
https://github.com/django-parler/django-parler/blob/11ae4af5e8faddb74c69c848870122df4006a54e/parler/views.py#L205-L212
def get_object(self, queryset=None): """ Assign the language for the retrieved object. """ object = super(LanguageChoiceMixin, self).get_object(queryset) if isinstance(object, TranslatableModelMixin): object.set_current_language(self.get_language(), initialize=True) return object
[ "def", "get_object", "(", "self", ",", "queryset", "=", "None", ")", ":", "object", "=", "super", "(", "LanguageChoiceMixin", ",", "self", ")", ".", "get_object", "(", "queryset", ")", "if", "isinstance", "(", "object", ",", "TranslatableModelMixin", ")", ":", "object", ".", "set_current_language", "(", "self", ".", "get_language", "(", ")", ",", "initialize", "=", "True", ")", "return", "object" ]
Assign the language for the retrieved object.
[ "Assign", "the", "language", "for", "the", "retrieved", "object", "." ]
python
train
confirm/ansibleci
ansibleci/config.py
https://github.com/confirm/ansibleci/blob/6a53ae8c4a4653624977e146092422857f661b8f/ansibleci/config.py#L61-L67
def add_module(self, module): ''' Adds configuration parameters from a Python module. ''' for key, value in module.__dict__.iteritems(): if key[0:2] != '__': self.__setattr__(attr=key, value=value)
[ "def", "add_module", "(", "self", ",", "module", ")", ":", "for", "key", ",", "value", "in", "module", ".", "__dict__", ".", "iteritems", "(", ")", ":", "if", "key", "[", "0", ":", "2", "]", "!=", "'__'", ":", "self", ".", "__setattr__", "(", "attr", "=", "key", ",", "value", "=", "value", ")" ]
Adds configuration parameters from a Python module.
[ "Adds", "configuration", "parameters", "from", "a", "Python", "module", "." ]
python
train
baliame/http-hmac-python
httphmac/v2.py
https://github.com/baliame/http-hmac-python/blob/9884c0cbfdb712f9f37080a8efbfdce82850785f/httphmac/v2.py#L226-L242
def check(self, request, response, secret): """Checks the response for the appropriate signature. Returns True if the signature matches the expected value. Keyword arguments: request -- A request object which can be consumed by this API. response -- A requests response object or compatible signed response object. secret -- The base64-encoded secret key for the HMAC authorization. """ auth = request.get_header('Authorization') if auth == '': raise KeyError('Authorization header is required for the request.') ah = self.orig.parse_auth_headers(auth) act = response.headers['X-Server-Authorization-HMAC-SHA256'] if act == '': raise KeyError('Response is missing the signature header X-Server-Authorization-HMAC-SHA256.') sig = self.sign(request, ah, response.text, secret) return sig == act
[ "def", "check", "(", "self", ",", "request", ",", "response", ",", "secret", ")", ":", "auth", "=", "request", ".", "get_header", "(", "'Authorization'", ")", "if", "auth", "==", "''", ":", "raise", "KeyError", "(", "'Authorization header is required for the request.'", ")", "ah", "=", "self", ".", "orig", ".", "parse_auth_headers", "(", "auth", ")", "act", "=", "response", ".", "headers", "[", "'X-Server-Authorization-HMAC-SHA256'", "]", "if", "act", "==", "''", ":", "raise", "KeyError", "(", "'Response is missing the signature header X-Server-Authorization-HMAC-SHA256.'", ")", "sig", "=", "self", ".", "sign", "(", "request", ",", "ah", ",", "response", ".", "text", ",", "secret", ")", "return", "sig", "==", "act" ]
Checks the response for the appropriate signature. Returns True if the signature matches the expected value. Keyword arguments: request -- A request object which can be consumed by this API. response -- A requests response object or compatible signed response object. secret -- The base64-encoded secret key for the HMAC authorization.
[ "Checks", "the", "response", "for", "the", "appropriate", "signature", ".", "Returns", "True", "if", "the", "signature", "matches", "the", "expected", "value", "." ]
python
train
GPflow/GPflow
gpflow/training/monitor.py
https://github.com/GPflow/GPflow/blob/549394f0b1b0696c7b521a065e49bdae6e7acf27/gpflow/training/monitor.py#L836-L854
def _eval_summary(self, context: MonitorContext, feed_dict: Optional[Dict]=None) -> None: """ Evaluates the summary tensor and writes the result to the event file. :param context: Monitor context :param feed_dict: Input values dictionary to be provided to the `session.run` when evaluating the summary tensor. """ if self._summary is None: raise RuntimeError('TensorBoard monitor task should set the Tensorflow.Summary object') if context.session is None: raise RuntimeError('To run a TensorBoard monitor task the TF session object' ' must be provided when creating an instance of the Monitor') summary = context.session.run(self._summary, feed_dict=feed_dict) self._file_writer.add_summary(summary, context.global_step) if self._flush_immediately: self.flush()
[ "def", "_eval_summary", "(", "self", ",", "context", ":", "MonitorContext", ",", "feed_dict", ":", "Optional", "[", "Dict", "]", "=", "None", ")", "->", "None", ":", "if", "self", ".", "_summary", "is", "None", ":", "raise", "RuntimeError", "(", "'TensorBoard monitor task should set the Tensorflow.Summary object'", ")", "if", "context", ".", "session", "is", "None", ":", "raise", "RuntimeError", "(", "'To run a TensorBoard monitor task the TF session object'", "' must be provided when creating an instance of the Monitor'", ")", "summary", "=", "context", ".", "session", ".", "run", "(", "self", ".", "_summary", ",", "feed_dict", "=", "feed_dict", ")", "self", ".", "_file_writer", ".", "add_summary", "(", "summary", ",", "context", ".", "global_step", ")", "if", "self", ".", "_flush_immediately", ":", "self", ".", "flush", "(", ")" ]
Evaluates the summary tensor and writes the result to the event file. :param context: Monitor context :param feed_dict: Input values dictionary to be provided to the `session.run` when evaluating the summary tensor.
[ "Evaluates", "the", "summary", "tensor", "and", "writes", "the", "result", "to", "the", "event", "file", ".", ":", "param", "context", ":", "Monitor", "context", ":", "param", "feed_dict", ":", "Input", "values", "dictionary", "to", "be", "provided", "to", "the", "session", ".", "run", "when", "evaluating", "the", "summary", "tensor", "." ]
python
train
wglass/lighthouse
lighthouse/haproxy/control.py
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/haproxy/control.py#L97-L119
def get_info(self): """ Parses the output of a "show info" HAProxy command and returns a simple dictionary of the results. """ info_response = self.send_command("show info") if not info_response: return {} def convert_camel_case(string): return all_cap_re.sub( r'\1_\2', first_cap_re.sub(r'\1_\2', string) ).lower() return dict( (convert_camel_case(label), value) for label, value in [ line.split(": ") for line in info_response.split("\n") ] )
[ "def", "get_info", "(", "self", ")", ":", "info_response", "=", "self", ".", "send_command", "(", "\"show info\"", ")", "if", "not", "info_response", ":", "return", "{", "}", "def", "convert_camel_case", "(", "string", ")", ":", "return", "all_cap_re", ".", "sub", "(", "r'\\1_\\2'", ",", "first_cap_re", ".", "sub", "(", "r'\\1_\\2'", ",", "string", ")", ")", ".", "lower", "(", ")", "return", "dict", "(", "(", "convert_camel_case", "(", "label", ")", ",", "value", ")", "for", "label", ",", "value", "in", "[", "line", ".", "split", "(", "\": \"", ")", "for", "line", "in", "info_response", ".", "split", "(", "\"\\n\"", ")", "]", ")" ]
Parses the output of a "show info" HAProxy command and returns a simple dictionary of the results.
[ "Parses", "the", "output", "of", "a", "show", "info", "HAProxy", "command", "and", "returns", "a", "simple", "dictionary", "of", "the", "results", "." ]
python
train
LLNL/scraper
scraper/tfs/__init__.py
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scraper/tfs/__init__.py#L54-L71
def create_tfs_core_client(url, token=None): """ Create a core_client.py client for a Team Foundation Server Enterprise connection instance If token is not provided, will attempt to use the TFS_API_TOKEN environment variable if present. """ if token is None: token = os.environ.get('TFS_API_TOKEN', None) tfs_connection = create_tfs_connection(url, token) tfs_client = tfs_connection.get_client('vsts.core.v4_1.core_client.CoreClient') if tfs_client is None: msg = 'Unable to connect to TFS Enterprise (%s) with provided token.' raise RuntimeError(msg, url) return tfs_client
[ "def", "create_tfs_core_client", "(", "url", ",", "token", "=", "None", ")", ":", "if", "token", "is", "None", ":", "token", "=", "os", ".", "environ", ".", "get", "(", "'TFS_API_TOKEN'", ",", "None", ")", "tfs_connection", "=", "create_tfs_connection", "(", "url", ",", "token", ")", "tfs_client", "=", "tfs_connection", ".", "get_client", "(", "'vsts.core.v4_1.core_client.CoreClient'", ")", "if", "tfs_client", "is", "None", ":", "msg", "=", "'Unable to connect to TFS Enterprise (%s) with provided token.'", "raise", "RuntimeError", "(", "msg", ",", "url", ")", "return", "tfs_client" ]
Create a core_client.py client for a Team Foundation Server Enterprise connection instance If token is not provided, will attempt to use the TFS_API_TOKEN environment variable if present.
[ "Create", "a", "core_client", ".", "py", "client", "for", "a", "Team", "Foundation", "Server", "Enterprise", "connection", "instance" ]
python
test
stanfordnlp/stanza
stanza/text/vocab.py
https://github.com/stanfordnlp/stanza/blob/920c55d8eaa1e7105971059c66eb448a74c100d6/stanza/text/vocab.py#L153-L174
def _index2word(self): """Mapping from indices to words. WARNING: this may go out-of-date, because it is a copy, not a view into the Vocab. :return: a list of strings """ # TODO(kelvinguu): it would be nice to just use `dict.viewkeys`, but unfortunately those are not indexable compute_index2word = lambda: self.keys() # this works because self is an OrderedDict # create if it doesn't exist try: self._index2word_cache except AttributeError: self._index2word_cache = compute_index2word() # update if it is out of date if len(self._index2word_cache) != len(self): self._index2word_cache = compute_index2word() return self._index2word_cache
[ "def", "_index2word", "(", "self", ")", ":", "# TODO(kelvinguu): it would be nice to just use `dict.viewkeys`, but unfortunately those are not indexable", "compute_index2word", "=", "lambda", ":", "self", ".", "keys", "(", ")", "# this works because self is an OrderedDict", "# create if it doesn't exist", "try", ":", "self", ".", "_index2word_cache", "except", "AttributeError", ":", "self", ".", "_index2word_cache", "=", "compute_index2word", "(", ")", "# update if it is out of date", "if", "len", "(", "self", ".", "_index2word_cache", ")", "!=", "len", "(", "self", ")", ":", "self", ".", "_index2word_cache", "=", "compute_index2word", "(", ")", "return", "self", ".", "_index2word_cache" ]
Mapping from indices to words. WARNING: this may go out-of-date, because it is a copy, not a view into the Vocab. :return: a list of strings
[ "Mapping", "from", "indices", "to", "words", "." ]
python
train
znc-sistemas/django-municipios
municipios/utils/ibge.py
https://github.com/znc-sistemas/django-municipios/blob/bdaf56315af7e1db4e0005a241e9fd95271a7ce1/municipios/utils/ibge.py#L54-L120
def convert_shapefile(shapefilename, srid=4674): """ shapefilename: considera nomenclatura de shapefile do IBGE para determinar se é UF ou Municípios. ex. 55UF2500GC_SIR.shp para UF e 55MU2500GC_SIR.shp para Municípios srid: 4674 (Projeção SIRGAS 2000) """ # /home/nando/Desktop/IBGE/2010/55MU2500GC_SIR.shp ds = DataSource(shapefilename) is_uf = shapefilename.upper().find('UF') != -1 transform_coord = None if srid != SRID: transform_coord = CoordTransform(SpatialReference(srid), SpatialReference(SRID)) if is_uf: model = UF else: model = Municipio ct = 0 for f in ds[0]: # 3D para 2D se necessário if f.geom.coord_dim != 2: f.geom.coord_dim = 2 # converte para MultiPolygon se necessário if isinstance(f.geom, Polygon): g = OGRGeometry(OGRGeomType('MultiPolygon')) g.add(f.geom) else: g = f.geom # transforma coordenadas se necessário if transform_coord: g.transform(transform_coord) # força 2D g.coord_dim = 2 kwargs = {} if is_uf: kwargs['nome'] = capitalize_name(unicode(f.get(CAMPO_NOME_UF), 'latin1')) kwargs['geom'] = g.ewkt kwargs['id_ibge'] = f.get(CAMPO_GEOCODIGO_UF) kwargs['regiao'] = capitalize_name(unicode(f.get(CAMPO_REGIAO_UF), 'latin1')) kwargs['uf'] = UF_SIGLAS_DICT.get(kwargs['id_ibge']) else: kwargs['nome'] = capitalize_name(unicode(f.get(CAMPO_NOME_MU), 'latin1')) kwargs['geom'] = g.ewkt kwargs['id_ibge'] = f.get(CAMPO_GEOCODIGO_MU) kwargs['uf'] = UF.objects.get(pk=f.get(CAMPO_GEOCODIGO_MU)[:2]) kwargs['uf_sigla'] = kwargs['uf'].uf kwargs['nome_abreviado'] = slugify(kwargs['nome']) # tenta corrigir nomes duplicados, são em torno de 242 nomes repetidos # adicionando a sigla do estado no final if Municipio.objects.filter(nome_abreviado=kwargs['nome_abreviado']).count() > 0: kwargs['nome_abreviado'] = u'%s-%s' % (kwargs['nome_abreviado'], kwargs['uf_sigla'].lower()) instance = model(**kwargs) instance.save() ct += 1 print(ct, (is_uf and "Unidades Federativas criadas" or "Municipios criados"))
[ "def", "convert_shapefile", "(", "shapefilename", ",", "srid", "=", "4674", ")", ":", "# /home/nando/Desktop/IBGE/2010/55MU2500GC_SIR.shp", "ds", "=", "DataSource", "(", "shapefilename", ")", "is_uf", "=", "shapefilename", ".", "upper", "(", ")", ".", "find", "(", "'UF'", ")", "!=", "-", "1", "transform_coord", "=", "None", "if", "srid", "!=", "SRID", ":", "transform_coord", "=", "CoordTransform", "(", "SpatialReference", "(", "srid", ")", ",", "SpatialReference", "(", "SRID", ")", ")", "if", "is_uf", ":", "model", "=", "UF", "else", ":", "model", "=", "Municipio", "ct", "=", "0", "for", "f", "in", "ds", "[", "0", "]", ":", "# 3D para 2D se necessário", "if", "f", ".", "geom", ".", "coord_dim", "!=", "2", ":", "f", ".", "geom", ".", "coord_dim", "=", "2", "# converte para MultiPolygon se necessário", "if", "isinstance", "(", "f", ".", "geom", ",", "Polygon", ")", ":", "g", "=", "OGRGeometry", "(", "OGRGeomType", "(", "'MultiPolygon'", ")", ")", "g", ".", "add", "(", "f", ".", "geom", ")", "else", ":", "g", "=", "f", ".", "geom", "# transforma coordenadas se necessário", "if", "transform_coord", ":", "g", ".", "transform", "(", "transform_coord", ")", "# força 2D", "g", ".", "coord_dim", "=", "2", "kwargs", "=", "{", "}", "if", "is_uf", ":", "kwargs", "[", "'nome'", "]", "=", "capitalize_name", "(", "unicode", "(", "f", ".", "get", "(", "CAMPO_NOME_UF", ")", ",", "'latin1'", ")", ")", "kwargs", "[", "'geom'", "]", "=", "g", ".", "ewkt", "kwargs", "[", "'id_ibge'", "]", "=", "f", ".", "get", "(", "CAMPO_GEOCODIGO_UF", ")", "kwargs", "[", "'regiao'", "]", "=", "capitalize_name", "(", "unicode", "(", "f", ".", "get", "(", "CAMPO_REGIAO_UF", ")", ",", "'latin1'", ")", ")", "kwargs", "[", "'uf'", "]", "=", "UF_SIGLAS_DICT", ".", "get", "(", "kwargs", "[", "'id_ibge'", "]", ")", "else", ":", "kwargs", "[", "'nome'", "]", "=", "capitalize_name", "(", "unicode", "(", "f", ".", "get", "(", "CAMPO_NOME_MU", ")", ",", "'latin1'", ")", ")", "kwargs", "[", "'geom'", "]", "=", "g", ".", "ewkt", "kwargs", "[", "'id_ibge'", "]", "=", "f", ".", "get", "(", "CAMPO_GEOCODIGO_MU", ")", "kwargs", "[", "'uf'", "]", "=", "UF", ".", "objects", ".", "get", "(", "pk", "=", "f", ".", "get", "(", "CAMPO_GEOCODIGO_MU", ")", "[", ":", "2", "]", ")", "kwargs", "[", "'uf_sigla'", "]", "=", "kwargs", "[", "'uf'", "]", ".", "uf", "kwargs", "[", "'nome_abreviado'", "]", "=", "slugify", "(", "kwargs", "[", "'nome'", "]", ")", "# tenta corrigir nomes duplicados, são em torno de 242 nomes repetidos", "# adicionando a sigla do estado no final", "if", "Municipio", ".", "objects", ".", "filter", "(", "nome_abreviado", "=", "kwargs", "[", "'nome_abreviado'", "]", ")", ".", "count", "(", ")", ">", "0", ":", "kwargs", "[", "'nome_abreviado'", "]", "=", "u'%s-%s'", "%", "(", "kwargs", "[", "'nome_abreviado'", "]", ",", "kwargs", "[", "'uf_sigla'", "]", ".", "lower", "(", ")", ")", "instance", "=", "model", "(", "*", "*", "kwargs", ")", "instance", ".", "save", "(", ")", "ct", "+=", "1", "print", "(", "ct", ",", "(", "is_uf", "and", "\"Unidades Federativas criadas\"", "or", "\"Municipios criados\"", ")", ")" ]
shapefilename: considera nomenclatura de shapefile do IBGE para determinar se é UF ou Municípios. ex. 55UF2500GC_SIR.shp para UF e 55MU2500GC_SIR.shp para Municípios srid: 4674 (Projeção SIRGAS 2000)
[ "shapefilename", ":", "considera", "nomenclatura", "de", "shapefile", "do", "IBGE", "para", "determinar", "se", "é", "UF", "ou", "Municípios", ".", "ex", ".", "55UF2500GC_SIR", ".", "shp", "para", "UF", "e", "55MU2500GC_SIR", ".", "shp", "para", "Municípios", "srid", ":", "4674", "(", "Projeção", "SIRGAS", "2000", ")" ]
python
train
thombashi/pytablereader
pytablereader/sqlite/core.py
https://github.com/thombashi/pytablereader/blob/bc3c057a2cc775bcce690e0e9019c2907b638101/pytablereader/sqlite/core.py#L40-L68
def load(self): """ Extract tabular data as |TableData| instances from a SQLite database file. |load_source_desc_file| :return: Loaded table data iterator. |load_table_name_desc| =================== ============================================== Format specifier Value after the replacement =================== ============================================== ``%(filename)s`` |filename_desc| ``%(key)s`` ``%(format_name)s%(format_id)s`` ``%(format_name)s`` ``"sqlite"`` ``%(format_id)s`` |format_id_desc| ``%(global_id)s`` |global_id| =================== ============================================== :rtype: |TableData| iterator :raises pytablereader.DataError: If the SQLite database file data is invalid or empty. """ self._validate() formatter = SqliteTableFormatter(self.source) formatter.accept(self) return formatter.to_table_data()
[ "def", "load", "(", "self", ")", ":", "self", ".", "_validate", "(", ")", "formatter", "=", "SqliteTableFormatter", "(", "self", ".", "source", ")", "formatter", ".", "accept", "(", "self", ")", "return", "formatter", ".", "to_table_data", "(", ")" ]
Extract tabular data as |TableData| instances from a SQLite database file. |load_source_desc_file| :return: Loaded table data iterator. |load_table_name_desc| =================== ============================================== Format specifier Value after the replacement =================== ============================================== ``%(filename)s`` |filename_desc| ``%(key)s`` ``%(format_name)s%(format_id)s`` ``%(format_name)s`` ``"sqlite"`` ``%(format_id)s`` |format_id_desc| ``%(global_id)s`` |global_id| =================== ============================================== :rtype: |TableData| iterator :raises pytablereader.DataError: If the SQLite database file data is invalid or empty.
[ "Extract", "tabular", "data", "as", "|TableData|", "instances", "from", "a", "SQLite", "database", "file", ".", "|load_source_desc_file|" ]
python
train
MacHu-GWU/pymongo_mate-project
pymongo_mate/pkg/pandas_mate/sql_io.py
https://github.com/MacHu-GWU/pymongo_mate-project/blob/be53170c2db54cb705b9e548d32ef26c773ff7f3/pymongo_mate/pkg/pandas_mate/sql_io.py#L16-L52
def smart_insert(df, table, engine, minimal_size=5): """An optimized Insert strategy. **中文文档** 一种优化的将大型DataFrame中的数据, 在有IntegrityError的情况下将所有 好数据存入数据库的方法。 """ from sqlalchemy.exc import IntegrityError try: table_name = table.name except: table_name = table # 首先进行尝试bulk insert try: df.to_sql(table_name, engine, index=False, if_exists="append") # 失败了 except IntegrityError: # 分析数据量 n = df.shape[0] # 如果数据条数多于一定数量 if n >= minimal_size ** 2: # 则进行分包 n_chunk = math.floor(math.sqrt(n)) for sub_df in grouper_df(df, n_chunk): smart_insert( sub_df, table_name, engine, minimal_size) # 否则则一条条地逐条插入 else: for sub_df in grouper_df(df, 1): try: sub_df.to_sql( table_name, engine, index=False, if_exists="append") except IntegrityError: pass
[ "def", "smart_insert", "(", "df", ",", "table", ",", "engine", ",", "minimal_size", "=", "5", ")", ":", "from", "sqlalchemy", ".", "exc", "import", "IntegrityError", "try", ":", "table_name", "=", "table", ".", "name", "except", ":", "table_name", "=", "table", "# 首先进行尝试bulk insert", "try", ":", "df", ".", "to_sql", "(", "table_name", ",", "engine", ",", "index", "=", "False", ",", "if_exists", "=", "\"append\"", ")", "# 失败了", "except", "IntegrityError", ":", "# 分析数据量", "n", "=", "df", ".", "shape", "[", "0", "]", "# 如果数据条数多于一定数量", "if", "n", ">=", "minimal_size", "**", "2", ":", "# 则进行分包", "n_chunk", "=", "math", ".", "floor", "(", "math", ".", "sqrt", "(", "n", ")", ")", "for", "sub_df", "in", "grouper_df", "(", "df", ",", "n_chunk", ")", ":", "smart_insert", "(", "sub_df", ",", "table_name", ",", "engine", ",", "minimal_size", ")", "# 否则则一条条地逐条插入", "else", ":", "for", "sub_df", "in", "grouper_df", "(", "df", ",", "1", ")", ":", "try", ":", "sub_df", ".", "to_sql", "(", "table_name", ",", "engine", ",", "index", "=", "False", ",", "if_exists", "=", "\"append\"", ")", "except", "IntegrityError", ":", "pass" ]
An optimized Insert strategy. **中文文档** 一种优化的将大型DataFrame中的数据, 在有IntegrityError的情况下将所有 好数据存入数据库的方法。
[ "An", "optimized", "Insert", "strategy", "." ]
python
train
gem/oq-engine
openquake/hazardlib/gsim/dowrickrhoades_2005.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/dowrickrhoades_2005.py#L98-L120
def _compute_mean(self, C, mag, rrup, hypo_depth, delta_R, delta_S, delta_V, delta_I, vs30): """ Compute MMI Intensity Value as per Equation in Table 5 and Table 7 pag 198. """ # mean is calculated for all the 4 classes using the same equation. # For DowrickRhoades2005SSlab, the coefficients which don't appear in # Model 3 equationare assigned to zero mean = (C['A1'] + (C['A2'] + C['A2R'] * delta_R + C['A2V'] * delta_V) * mag + (C['A3'] + C['A3S'] * delta_S + C['A3V'] * delta_V) * np.log10(np.power((rrup**3 + C['d']**3), 1.0 / 3.0)) + C['A4'] * hypo_depth + C['A5'] * delta_I) # Get S site class term S = self._get_site_class(vs30, mean) # Add S amplification term to mean value mean = mean + S return mean
[ "def", "_compute_mean", "(", "self", ",", "C", ",", "mag", ",", "rrup", ",", "hypo_depth", ",", "delta_R", ",", "delta_S", ",", "delta_V", ",", "delta_I", ",", "vs30", ")", ":", "# mean is calculated for all the 4 classes using the same equation.", "# For DowrickRhoades2005SSlab, the coefficients which don't appear in", "# Model 3 equationare assigned to zero", "mean", "=", "(", "C", "[", "'A1'", "]", "+", "(", "C", "[", "'A2'", "]", "+", "C", "[", "'A2R'", "]", "*", "delta_R", "+", "C", "[", "'A2V'", "]", "*", "delta_V", ")", "*", "mag", "+", "(", "C", "[", "'A3'", "]", "+", "C", "[", "'A3S'", "]", "*", "delta_S", "+", "C", "[", "'A3V'", "]", "*", "delta_V", ")", "*", "np", ".", "log10", "(", "np", ".", "power", "(", "(", "rrup", "**", "3", "+", "C", "[", "'d'", "]", "**", "3", ")", ",", "1.0", "/", "3.0", ")", ")", "+", "C", "[", "'A4'", "]", "*", "hypo_depth", "+", "C", "[", "'A5'", "]", "*", "delta_I", ")", "# Get S site class term", "S", "=", "self", ".", "_get_site_class", "(", "vs30", ",", "mean", ")", "# Add S amplification term to mean value", "mean", "=", "mean", "+", "S", "return", "mean" ]
Compute MMI Intensity Value as per Equation in Table 5 and Table 7 pag 198.
[ "Compute", "MMI", "Intensity", "Value", "as", "per", "Equation", "in", "Table", "5", "and", "Table", "7", "pag", "198", "." ]
python
train
maxcountryman/flask-uploads
flask_uploads.py
https://github.com/maxcountryman/flask-uploads/blob/dc24fa0c53d605876e5b4502cadffdf1a4345b1d/flask_uploads.py#L345-L358
def path(self, filename, folder=None): """ This returns the absolute path of a file uploaded to this set. It doesn't actually check whether said file exists. :param filename: The filename to return the path for. :param folder: The subfolder within the upload set previously used to save to. """ if folder is not None: target_folder = os.path.join(self.config.destination, folder) else: target_folder = self.config.destination return os.path.join(target_folder, filename)
[ "def", "path", "(", "self", ",", "filename", ",", "folder", "=", "None", ")", ":", "if", "folder", "is", "not", "None", ":", "target_folder", "=", "os", ".", "path", ".", "join", "(", "self", ".", "config", ".", "destination", ",", "folder", ")", "else", ":", "target_folder", "=", "self", ".", "config", ".", "destination", "return", "os", ".", "path", ".", "join", "(", "target_folder", ",", "filename", ")" ]
This returns the absolute path of a file uploaded to this set. It doesn't actually check whether said file exists. :param filename: The filename to return the path for. :param folder: The subfolder within the upload set previously used to save to.
[ "This", "returns", "the", "absolute", "path", "of", "a", "file", "uploaded", "to", "this", "set", ".", "It", "doesn", "t", "actually", "check", "whether", "said", "file", "exists", "." ]
python
test
tino/pyFirmata
pyfirmata/util.py
https://github.com/tino/pyFirmata/blob/05881909c4d7c4e808e9ed457144670b2136706e/pyfirmata/util.py#L161-L225
def pin_list_to_board_dict(pinlist): """ Capability Response codes: INPUT: 0, 1 OUTPUT: 1, 1 ANALOG: 2, 10 PWM: 3, 8 SERV0: 4, 14 I2C: 6, 1 """ board_dict = { "digital": [], "analog": [], "pwm": [], "servo": [], # 2.2 specs # 'i2c': [], # 2.3 specs "disabled": [], } for i, pin in enumerate(pinlist): pin.pop() # removes the 0x79 on end if not pin: board_dict["disabled"] += [i] board_dict["digital"] += [i] continue for j, _ in enumerate(pin): # Iterate over evens if j % 2 == 0: # This is safe. try: range(10)[5:50] if pin[j:j + 4] == [0, 1, 1, 1]: board_dict["digital"] += [i] if pin[j:j + 2] == [2, 10]: board_dict["analog"] += [i] if pin[j:j + 2] == [3, 8]: board_dict["pwm"] += [i] if pin[j:j + 2] == [4, 14]: board_dict["servo"] += [i] # Desable I2C if pin[j:j + 2] == [6, 1]: pass # We have to deal with analog pins: # - (14, 15, 16, 17, 18, 19) # + (0, 1, 2, 3, 4, 5) diff = set(board_dict["digital"]) - set(board_dict["analog"]) board_dict["analog"] = [n for n, _ in enumerate(board_dict["analog"])] # Digital pin problems: # - (2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15) # + (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13) board_dict["digital"] = [n for n, _ in enumerate(diff)] # Based on lib Arduino 0017 board_dict["servo"] = board_dict["digital"] # Turn lists into tuples # Using dict for Python 2.6 compatibility board_dict = dict([(key, tuple(value)) for key, value in board_dict.items()]) return board_dict
[ "def", "pin_list_to_board_dict", "(", "pinlist", ")", ":", "board_dict", "=", "{", "\"digital\"", ":", "[", "]", ",", "\"analog\"", ":", "[", "]", ",", "\"pwm\"", ":", "[", "]", ",", "\"servo\"", ":", "[", "]", ",", "# 2.2 specs", "# 'i2c': [], # 2.3 specs", "\"disabled\"", ":", "[", "]", ",", "}", "for", "i", ",", "pin", "in", "enumerate", "(", "pinlist", ")", ":", "pin", ".", "pop", "(", ")", "# removes the 0x79 on end", "if", "not", "pin", ":", "board_dict", "[", "\"disabled\"", "]", "+=", "[", "i", "]", "board_dict", "[", "\"digital\"", "]", "+=", "[", "i", "]", "continue", "for", "j", ",", "_", "in", "enumerate", "(", "pin", ")", ":", "# Iterate over evens", "if", "j", "%", "2", "==", "0", ":", "# This is safe. try: range(10)[5:50]", "if", "pin", "[", "j", ":", "j", "+", "4", "]", "==", "[", "0", ",", "1", ",", "1", ",", "1", "]", ":", "board_dict", "[", "\"digital\"", "]", "+=", "[", "i", "]", "if", "pin", "[", "j", ":", "j", "+", "2", "]", "==", "[", "2", ",", "10", "]", ":", "board_dict", "[", "\"analog\"", "]", "+=", "[", "i", "]", "if", "pin", "[", "j", ":", "j", "+", "2", "]", "==", "[", "3", ",", "8", "]", ":", "board_dict", "[", "\"pwm\"", "]", "+=", "[", "i", "]", "if", "pin", "[", "j", ":", "j", "+", "2", "]", "==", "[", "4", ",", "14", "]", ":", "board_dict", "[", "\"servo\"", "]", "+=", "[", "i", "]", "# Desable I2C", "if", "pin", "[", "j", ":", "j", "+", "2", "]", "==", "[", "6", ",", "1", "]", ":", "pass", "# We have to deal with analog pins:", "# - (14, 15, 16, 17, 18, 19)", "# + (0, 1, 2, 3, 4, 5)", "diff", "=", "set", "(", "board_dict", "[", "\"digital\"", "]", ")", "-", "set", "(", "board_dict", "[", "\"analog\"", "]", ")", "board_dict", "[", "\"analog\"", "]", "=", "[", "n", "for", "n", ",", "_", "in", "enumerate", "(", "board_dict", "[", "\"analog\"", "]", ")", "]", "# Digital pin problems:", "# - (2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15)", "# + (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13)", "board_dict", "[", "\"digital\"", "]", "=", "[", "n", "for", "n", ",", "_", "in", "enumerate", "(", "diff", ")", "]", "# Based on lib Arduino 0017", "board_dict", "[", "\"servo\"", "]", "=", "board_dict", "[", "\"digital\"", "]", "# Turn lists into tuples", "# Using dict for Python 2.6 compatibility", "board_dict", "=", "dict", "(", "[", "(", "key", ",", "tuple", "(", "value", ")", ")", "for", "key", ",", "value", "in", "board_dict", ".", "items", "(", ")", "]", ")", "return", "board_dict" ]
Capability Response codes: INPUT: 0, 1 OUTPUT: 1, 1 ANALOG: 2, 10 PWM: 3, 8 SERV0: 4, 14 I2C: 6, 1
[ "Capability", "Response", "codes", ":", "INPUT", ":", "0", "1", "OUTPUT", ":", "1", "1", "ANALOG", ":", "2", "10", "PWM", ":", "3", "8", "SERV0", ":", "4", "14", "I2C", ":", "6", "1" ]
python
train
Leeps-Lab/otree-redwood
otree_redwood/models.py
https://github.com/Leeps-Lab/otree-redwood/blob/59212f61a256ef77e0a9ed392ff497ea83ee6245/otree_redwood/models.py#L135-L142
def _on_disconnect(self, participant): """Trigger the :meth:`when_player_disconnects` callback.""" player = None for p in self.get_players(): if p.participant == participant: player = p break self.when_player_disconnects(player)
[ "def", "_on_disconnect", "(", "self", ",", "participant", ")", ":", "player", "=", "None", "for", "p", "in", "self", ".", "get_players", "(", ")", ":", "if", "p", ".", "participant", "==", "participant", ":", "player", "=", "p", "break", "self", ".", "when_player_disconnects", "(", "player", ")" ]
Trigger the :meth:`when_player_disconnects` callback.
[ "Trigger", "the", ":", "meth", ":", "when_player_disconnects", "callback", "." ]
python
train
rossant/ipymd
ipymd/core/prompt.py
https://github.com/rossant/ipymd/blob/d87c9ebc59d67fe78b0139ee00e0e5307682e303/ipymd/core/prompt.py#L231-L252
def create_prompt(prompt): """Create a prompt manager. Parameters ---------- prompt : str or class driving from BasePromptManager The prompt name ('python' or 'ipython') or a custom PromptManager class. """ if prompt is None: prompt = 'python' if prompt == 'python': prompt = PythonPromptManager elif prompt == 'ipython': prompt = IPythonPromptManager # Instanciate the class. if isinstance(prompt, BasePromptManager): return prompt else: return prompt()
[ "def", "create_prompt", "(", "prompt", ")", ":", "if", "prompt", "is", "None", ":", "prompt", "=", "'python'", "if", "prompt", "==", "'python'", ":", "prompt", "=", "PythonPromptManager", "elif", "prompt", "==", "'ipython'", ":", "prompt", "=", "IPythonPromptManager", "# Instanciate the class.", "if", "isinstance", "(", "prompt", ",", "BasePromptManager", ")", ":", "return", "prompt", "else", ":", "return", "prompt", "(", ")" ]
Create a prompt manager. Parameters ---------- prompt : str or class driving from BasePromptManager The prompt name ('python' or 'ipython') or a custom PromptManager class.
[ "Create", "a", "prompt", "manager", "." ]
python
train
phaethon/kamene
kamene/utils.py
https://github.com/phaethon/kamene/blob/11d4064844f4f68ac5d7546f5633ac7d02082914/kamene/utils.py#L1003-L1007
def wireshark(pktlist, *args): """Run wireshark on a list of packets""" fname = get_temp_file() wrpcap(fname, pktlist) subprocess.Popen([conf.prog.wireshark, "-r", fname] + list(args))
[ "def", "wireshark", "(", "pktlist", ",", "*", "args", ")", ":", "fname", "=", "get_temp_file", "(", ")", "wrpcap", "(", "fname", ",", "pktlist", ")", "subprocess", ".", "Popen", "(", "[", "conf", ".", "prog", ".", "wireshark", ",", "\"-r\"", ",", "fname", "]", "+", "list", "(", "args", ")", ")" ]
Run wireshark on a list of packets
[ "Run", "wireshark", "on", "a", "list", "of", "packets" ]
python
train
DataONEorg/d1_python
gmn/src/d1_gmn/app/views/create.py
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/gmn/src/d1_gmn/app/views/create.py#L35-L83
def create_sciobj(request, sysmeta_pyxb): """Create object file and database entries for a new native locally stored (non- proxied) science object. This method takes a request object and is only called from the views that handle: - MNStorage.create() - MNStorage.update() Various sanity checking is performed. Raises D1 exceptions that are returned directly to the client. Adds create event to the event log. Preconditions: - None. This method should check everything. Postconditions: - A new file containing sciobj bytes, and models (database rows) for the newly added object. """ pid = d1_common.xml.get_req_val(sysmeta_pyxb.identifier) set_mn_controlled_values(request, sysmeta_pyxb, is_modification=False) d1_gmn.app.views.assert_db.is_valid_pid_for_create(pid) d1_gmn.app.views.assert_sysmeta.sanity(request, sysmeta_pyxb) if _is_proxy_sciobj(request): sciobj_url = _get_sciobj_proxy_url(request) _sanity_check_proxy_url(sciobj_url) else: sciobj_url = d1_gmn.app.sciobj_store.get_rel_sciobj_file_url_by_pid(pid) if not _is_proxy_sciobj(request): if d1_gmn.app.resource_map.is_resource_map_sysmeta_pyxb(sysmeta_pyxb): _create_resource_map(pid, request, sysmeta_pyxb, sciobj_url) else: _save_sciobj_bytes_from_request(request, pid) d1_gmn.app.scimeta.assert_valid(sysmeta_pyxb, pid) d1_gmn.app.sysmeta.create_or_update(sysmeta_pyxb, sciobj_url) d1_gmn.app.event_log.create( d1_common.xml.get_req_val(sysmeta_pyxb.identifier), request, timestamp=d1_common.date_time.normalize_datetime_to_utc( sysmeta_pyxb.dateUploaded ), )
[ "def", "create_sciobj", "(", "request", ",", "sysmeta_pyxb", ")", ":", "pid", "=", "d1_common", ".", "xml", ".", "get_req_val", "(", "sysmeta_pyxb", ".", "identifier", ")", "set_mn_controlled_values", "(", "request", ",", "sysmeta_pyxb", ",", "is_modification", "=", "False", ")", "d1_gmn", ".", "app", ".", "views", ".", "assert_db", ".", "is_valid_pid_for_create", "(", "pid", ")", "d1_gmn", ".", "app", ".", "views", ".", "assert_sysmeta", ".", "sanity", "(", "request", ",", "sysmeta_pyxb", ")", "if", "_is_proxy_sciobj", "(", "request", ")", ":", "sciobj_url", "=", "_get_sciobj_proxy_url", "(", "request", ")", "_sanity_check_proxy_url", "(", "sciobj_url", ")", "else", ":", "sciobj_url", "=", "d1_gmn", ".", "app", ".", "sciobj_store", ".", "get_rel_sciobj_file_url_by_pid", "(", "pid", ")", "if", "not", "_is_proxy_sciobj", "(", "request", ")", ":", "if", "d1_gmn", ".", "app", ".", "resource_map", ".", "is_resource_map_sysmeta_pyxb", "(", "sysmeta_pyxb", ")", ":", "_create_resource_map", "(", "pid", ",", "request", ",", "sysmeta_pyxb", ",", "sciobj_url", ")", "else", ":", "_save_sciobj_bytes_from_request", "(", "request", ",", "pid", ")", "d1_gmn", ".", "app", ".", "scimeta", ".", "assert_valid", "(", "sysmeta_pyxb", ",", "pid", ")", "d1_gmn", ".", "app", ".", "sysmeta", ".", "create_or_update", "(", "sysmeta_pyxb", ",", "sciobj_url", ")", "d1_gmn", ".", "app", ".", "event_log", ".", "create", "(", "d1_common", ".", "xml", ".", "get_req_val", "(", "sysmeta_pyxb", ".", "identifier", ")", ",", "request", ",", "timestamp", "=", "d1_common", ".", "date_time", ".", "normalize_datetime_to_utc", "(", "sysmeta_pyxb", ".", "dateUploaded", ")", ",", ")" ]
Create object file and database entries for a new native locally stored (non- proxied) science object. This method takes a request object and is only called from the views that handle: - MNStorage.create() - MNStorage.update() Various sanity checking is performed. Raises D1 exceptions that are returned directly to the client. Adds create event to the event log. Preconditions: - None. This method should check everything. Postconditions: - A new file containing sciobj bytes, and models (database rows) for the newly added object.
[ "Create", "object", "file", "and", "database", "entries", "for", "a", "new", "native", "locally", "stored", "(", "non", "-", "proxied", ")", "science", "object", "." ]
python
train
Erotemic/utool
utool/util_str.py
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_str.py#L2941-L2967
def highlight_multi_regex(str_, pat_to_color, reflags=0): """ FIXME Use pygments instead. must be mututally exclusive """ #import colorama # from colorama import Fore, Style #color = Fore.MAGENTA # color = Fore.RED #match = re.search(pat, str_, flags=reflags) colored = str_ to_replace = [] for pat, color in pat_to_color.items(): matches = list(re.finditer(pat, str_, flags=reflags)) for match in matches: start = match.start() end = match.end() to_replace.append((end, start, color)) for tup in reversed(sorted(to_replace)): end, start, color = tup colored_part = color_text(colored[start:end], color) colored = colored[:start] + colored_part + colored[end:] return colored
[ "def", "highlight_multi_regex", "(", "str_", ",", "pat_to_color", ",", "reflags", "=", "0", ")", ":", "#import colorama", "# from colorama import Fore, Style", "#color = Fore.MAGENTA", "# color = Fore.RED", "#match = re.search(pat, str_, flags=reflags)", "colored", "=", "str_", "to_replace", "=", "[", "]", "for", "pat", ",", "color", "in", "pat_to_color", ".", "items", "(", ")", ":", "matches", "=", "list", "(", "re", ".", "finditer", "(", "pat", ",", "str_", ",", "flags", "=", "reflags", ")", ")", "for", "match", "in", "matches", ":", "start", "=", "match", ".", "start", "(", ")", "end", "=", "match", ".", "end", "(", ")", "to_replace", ".", "append", "(", "(", "end", ",", "start", ",", "color", ")", ")", "for", "tup", "in", "reversed", "(", "sorted", "(", "to_replace", ")", ")", ":", "end", ",", "start", ",", "color", "=", "tup", "colored_part", "=", "color_text", "(", "colored", "[", "start", ":", "end", "]", ",", "color", ")", "colored", "=", "colored", "[", ":", "start", "]", "+", "colored_part", "+", "colored", "[", "end", ":", "]", "return", "colored" ]
FIXME Use pygments instead. must be mututally exclusive
[ "FIXME", "Use", "pygments", "instead", ".", "must", "be", "mututally", "exclusive" ]
python
train
lsbardel/python-stdnet
stdnet/backends/redisb/client/prefixed.py
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/backends/redisb/client/prefixed.py#L122-L125
def execute_command(self, cmnd, *args, **options): "Execute a command and return a parsed response" args, options = self.preprocess_command(cmnd, *args, **options) return self.client.execute_command(cmnd, *args, **options)
[ "def", "execute_command", "(", "self", ",", "cmnd", ",", "*", "args", ",", "*", "*", "options", ")", ":", "args", ",", "options", "=", "self", ".", "preprocess_command", "(", "cmnd", ",", "*", "args", ",", "*", "*", "options", ")", "return", "self", ".", "client", ".", "execute_command", "(", "cmnd", ",", "*", "args", ",", "*", "*", "options", ")" ]
Execute a command and return a parsed response
[ "Execute", "a", "command", "and", "return", "a", "parsed", "response" ]
python
train
inveniosoftware-contrib/invenio-classifier
requirements.py
https://github.com/inveniosoftware-contrib/invenio-classifier/blob/3c758cf34dca6bf0548e7da5de34e5f72e3b255e/requirements.py#L59-L101
def parse_pip_file(path): """Parse pip requirements file.""" # requirement lines sorted by importance # also collect other pip commands rdev = dict() rnormal = [] stuff = [] try: with open(path) as f: for line in f: line = line.strip() # see https://pip.readthedocs.org/en/1.1/requirements.html if line.startswith('-e'): # devel requirement splitted = line.split('#egg=') rdev[splitted[1].lower()] = line elif line.startswith('-r'): # recursive file command splitted = re.split('-r\\s+', line) subrdev, subrnormal, substuff = parse_pip_file(splitted[1]) for k, v in subrdev.iteritems(): if k not in rdev: rdev[k] = v rnormal.extend(subrnormal) result.extend(substuff) elif line.startswith('-'): # another special command we don't recognize stuff.append(line) else: # ordenary requirement, similary to them used in setup.py rnormal.append(line) except IOError: print( 'Warning: could not parse requirements file "{}"!', file=sys.stderr ) return rdev, rnormal, stuff
[ "def", "parse_pip_file", "(", "path", ")", ":", "# requirement lines sorted by importance", "# also collect other pip commands", "rdev", "=", "dict", "(", ")", "rnormal", "=", "[", "]", "stuff", "=", "[", "]", "try", ":", "with", "open", "(", "path", ")", "as", "f", ":", "for", "line", "in", "f", ":", "line", "=", "line", ".", "strip", "(", ")", "# see https://pip.readthedocs.org/en/1.1/requirements.html", "if", "line", ".", "startswith", "(", "'-e'", ")", ":", "# devel requirement", "splitted", "=", "line", ".", "split", "(", "'#egg='", ")", "rdev", "[", "splitted", "[", "1", "]", ".", "lower", "(", ")", "]", "=", "line", "elif", "line", ".", "startswith", "(", "'-r'", ")", ":", "# recursive file command", "splitted", "=", "re", ".", "split", "(", "'-r\\\\s+'", ",", "line", ")", "subrdev", ",", "subrnormal", ",", "substuff", "=", "parse_pip_file", "(", "splitted", "[", "1", "]", ")", "for", "k", ",", "v", "in", "subrdev", ".", "iteritems", "(", ")", ":", "if", "k", "not", "in", "rdev", ":", "rdev", "[", "k", "]", "=", "v", "rnormal", ".", "extend", "(", "subrnormal", ")", "result", ".", "extend", "(", "substuff", ")", "elif", "line", ".", "startswith", "(", "'-'", ")", ":", "# another special command we don't recognize", "stuff", ".", "append", "(", "line", ")", "else", ":", "# ordenary requirement, similary to them used in setup.py", "rnormal", ".", "append", "(", "line", ")", "except", "IOError", ":", "print", "(", "'Warning: could not parse requirements file \"{}\"!'", ",", "file", "=", "sys", ".", "stderr", ")", "return", "rdev", ",", "rnormal", ",", "stuff" ]
Parse pip requirements file.
[ "Parse", "pip", "requirements", "file", "." ]
python
train
coleifer/walrus
walrus/models.py
https://github.com/coleifer/walrus/blob/82bf15a6613487b5b5fefeb488f186d7e0106547/walrus/models.py#L752-L767
def query_delete(cls, expression=None): """ Delete model instances matching the given expression (if specified). If no expression is provided, then all model instances will be deleted. :param expression: A boolean expression to filter by. """ if expression is not None: executor = Executor(cls.__database__) result = executor.execute(expression) else: result = cls._query.all_index() for hash_id in result: cls.load(hash_id, convert_key=False).delete()
[ "def", "query_delete", "(", "cls", ",", "expression", "=", "None", ")", ":", "if", "expression", "is", "not", "None", ":", "executor", "=", "Executor", "(", "cls", ".", "__database__", ")", "result", "=", "executor", ".", "execute", "(", "expression", ")", "else", ":", "result", "=", "cls", ".", "_query", ".", "all_index", "(", ")", "for", "hash_id", "in", "result", ":", "cls", ".", "load", "(", "hash_id", ",", "convert_key", "=", "False", ")", ".", "delete", "(", ")" ]
Delete model instances matching the given expression (if specified). If no expression is provided, then all model instances will be deleted. :param expression: A boolean expression to filter by.
[ "Delete", "model", "instances", "matching", "the", "given", "expression", "(", "if", "specified", ")", ".", "If", "no", "expression", "is", "provided", "then", "all", "model", "instances", "will", "be", "deleted", "." ]
python
train
smarie/python-valid8
valid8/entry_points_annotations.py
https://github.com/smarie/python-valid8/blob/5e15d1de11602933c5114eb9f73277ad91d97800/valid8/entry_points_annotations.py#L666-L696
def decorate_several_with_validation(func, _out_=None, # type: ValidationFuncs none_policy=None, # type: int **validation_funcs # type: ValidationFuncs ): # type: (...) -> Callable """ This method is equivalent to applying `decorate_with_validation` once for each of the provided arguments of the function `func` as well as output `_out_`. validation_funcs keyword arguments are validation functions for each arg name. Note that this method is less flexible than decorate_with_validation since * it does not allow to associate a custom error message or error type with each validation. * the none_policy is the same for all inputs and outputs :param func: :param _out_: :param validation_funcs: :param none_policy: :return: a function decorated with validation for all of the listed arguments and output if provided. """ # add validation for output if provided if _out_ is not None: func = decorate_with_validation(func, _OUT_KEY, _out_, none_policy=none_policy) # add validation for each of the listed arguments for att_name, att_validation_funcs in validation_funcs.items(): func = decorate_with_validation(func, att_name, att_validation_funcs, none_policy=none_policy) return func
[ "def", "decorate_several_with_validation", "(", "func", ",", "_out_", "=", "None", ",", "# type: ValidationFuncs", "none_policy", "=", "None", ",", "# type: int", "*", "*", "validation_funcs", "# type: ValidationFuncs", ")", ":", "# type: (...) -> Callable", "# add validation for output if provided", "if", "_out_", "is", "not", "None", ":", "func", "=", "decorate_with_validation", "(", "func", ",", "_OUT_KEY", ",", "_out_", ",", "none_policy", "=", "none_policy", ")", "# add validation for each of the listed arguments", "for", "att_name", ",", "att_validation_funcs", "in", "validation_funcs", ".", "items", "(", ")", ":", "func", "=", "decorate_with_validation", "(", "func", ",", "att_name", ",", "att_validation_funcs", ",", "none_policy", "=", "none_policy", ")", "return", "func" ]
This method is equivalent to applying `decorate_with_validation` once for each of the provided arguments of the function `func` as well as output `_out_`. validation_funcs keyword arguments are validation functions for each arg name. Note that this method is less flexible than decorate_with_validation since * it does not allow to associate a custom error message or error type with each validation. * the none_policy is the same for all inputs and outputs :param func: :param _out_: :param validation_funcs: :param none_policy: :return: a function decorated with validation for all of the listed arguments and output if provided.
[ "This", "method", "is", "equivalent", "to", "applying", "decorate_with_validation", "once", "for", "each", "of", "the", "provided", "arguments", "of", "the", "function", "func", "as", "well", "as", "output", "_out_", ".", "validation_funcs", "keyword", "arguments", "are", "validation", "functions", "for", "each", "arg", "name", "." ]
python
train
gwpy/gwpy
gwpy/signal/filter_design.py
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/signal/filter_design.py#L500-L566
def bandpass(flow, fhigh, sample_rate, fstop=None, gpass=2, gstop=30, type='iir', **kwargs): """Design a band-pass filter for the given cutoff frequencies Parameters ---------- flow : `float` lower corner frequency of pass band fhigh : `float` upper corner frequency of pass band sample_rate : `float` sampling rate of target data fstop : `tuple` of `float`, optional `(low, high)` edge-frequencies of stop band gpass : `float`, optional, default: 2 the maximum loss in the passband (dB) gstop : `float`, optional, default: 30 the minimum attenuation in the stopband (dB) type : `str`, optional, default: ``'iir'`` the filter type, either ``'iir'`` or ``'fir'`` **kwargs other keyword arguments are passed directly to :func:`~scipy.signal.iirdesign` or :func:`~scipy.signal.firwin` Returns ------- filter the formatted filter. the output format for an IIR filter depends on the input arguments, default is a tuple of `(zeros, poles, gain)` Notes ----- By default a digital filter is returned, meaning the zeros and poles are given in the Z-domain in units of radians/sample. Examples -------- To create a band-pass filter for 100-1000 Hz for 4096 Hz-sampled data: >>> from gwpy.signal.filter_design import bandpass >>> bp = bandpass(100, 1000, 4096) To view the filter, you can use the `~gwpy.plot.BodePlot`: >>> from gwpy.plot import BodePlot >>> plot = BodePlot(bp, sample_rate=4096) >>> plot.show() """ sample_rate = _as_float(sample_rate) flow = _as_float(flow) fhigh = _as_float(fhigh) if fstop is None: fstop = (flow * 2/3., min(fhigh * 1.5, sample_rate/2.)) fstop = (_as_float(fstop[0]), _as_float(fstop[1])) if type == 'iir': return _design_iir((flow, fhigh), fstop, sample_rate, gpass, gstop, **kwargs) return _design_fir((flow, fhigh), fstop, sample_rate, gpass, gstop, pass_zero=False, **kwargs)
[ "def", "bandpass", "(", "flow", ",", "fhigh", ",", "sample_rate", ",", "fstop", "=", "None", ",", "gpass", "=", "2", ",", "gstop", "=", "30", ",", "type", "=", "'iir'", ",", "*", "*", "kwargs", ")", ":", "sample_rate", "=", "_as_float", "(", "sample_rate", ")", "flow", "=", "_as_float", "(", "flow", ")", "fhigh", "=", "_as_float", "(", "fhigh", ")", "if", "fstop", "is", "None", ":", "fstop", "=", "(", "flow", "*", "2", "/", "3.", ",", "min", "(", "fhigh", "*", "1.5", ",", "sample_rate", "/", "2.", ")", ")", "fstop", "=", "(", "_as_float", "(", "fstop", "[", "0", "]", ")", ",", "_as_float", "(", "fstop", "[", "1", "]", ")", ")", "if", "type", "==", "'iir'", ":", "return", "_design_iir", "(", "(", "flow", ",", "fhigh", ")", ",", "fstop", ",", "sample_rate", ",", "gpass", ",", "gstop", ",", "*", "*", "kwargs", ")", "return", "_design_fir", "(", "(", "flow", ",", "fhigh", ")", ",", "fstop", ",", "sample_rate", ",", "gpass", ",", "gstop", ",", "pass_zero", "=", "False", ",", "*", "*", "kwargs", ")" ]
Design a band-pass filter for the given cutoff frequencies Parameters ---------- flow : `float` lower corner frequency of pass band fhigh : `float` upper corner frequency of pass band sample_rate : `float` sampling rate of target data fstop : `tuple` of `float`, optional `(low, high)` edge-frequencies of stop band gpass : `float`, optional, default: 2 the maximum loss in the passband (dB) gstop : `float`, optional, default: 30 the minimum attenuation in the stopband (dB) type : `str`, optional, default: ``'iir'`` the filter type, either ``'iir'`` or ``'fir'`` **kwargs other keyword arguments are passed directly to :func:`~scipy.signal.iirdesign` or :func:`~scipy.signal.firwin` Returns ------- filter the formatted filter. the output format for an IIR filter depends on the input arguments, default is a tuple of `(zeros, poles, gain)` Notes ----- By default a digital filter is returned, meaning the zeros and poles are given in the Z-domain in units of radians/sample. Examples -------- To create a band-pass filter for 100-1000 Hz for 4096 Hz-sampled data: >>> from gwpy.signal.filter_design import bandpass >>> bp = bandpass(100, 1000, 4096) To view the filter, you can use the `~gwpy.plot.BodePlot`: >>> from gwpy.plot import BodePlot >>> plot = BodePlot(bp, sample_rate=4096) >>> plot.show()
[ "Design", "a", "band", "-", "pass", "filter", "for", "the", "given", "cutoff", "frequencies" ]
python
train
helixyte/everest
everest/ini.py
https://github.com/helixyte/everest/blob/70c9b93c3061db5cb62428349d18b8fb8566411b/everest/ini.py#L32-L44
def options(self, parser, env=None): """ Adds command-line options for this plugin. """ if env is None: env = os.environ env_opt_name = 'NOSE_%s' % self.__dest_opt_name.upper() parser.add_option("--%s" % self.__opt_name, dest=self.__dest_opt_name, type="string", default=env.get(env_opt_name), help=".ini file providing the environment for the " "test web application.")
[ "def", "options", "(", "self", ",", "parser", ",", "env", "=", "None", ")", ":", "if", "env", "is", "None", ":", "env", "=", "os", ".", "environ", "env_opt_name", "=", "'NOSE_%s'", "%", "self", ".", "__dest_opt_name", ".", "upper", "(", ")", "parser", ".", "add_option", "(", "\"--%s\"", "%", "self", ".", "__opt_name", ",", "dest", "=", "self", ".", "__dest_opt_name", ",", "type", "=", "\"string\"", ",", "default", "=", "env", ".", "get", "(", "env_opt_name", ")", ",", "help", "=", "\".ini file providing the environment for the \"", "\"test web application.\"", ")" ]
Adds command-line options for this plugin.
[ "Adds", "command", "-", "line", "options", "for", "this", "plugin", "." ]
python
train
BlueBrain/NeuroM
neurom/morphmath.py
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/morphmath.py#L316-L325
def segment_area(seg): '''Compute the surface area of a segment. Approximated as a conical frustum. Does not include the surface area of the bounding circles. ''' r0 = seg[0][COLS.R] r1 = seg[1][COLS.R] h2 = point_dist2(seg[0], seg[1]) return math.pi * (r0 + r1) * math.sqrt((r0 - r1) ** 2 + h2)
[ "def", "segment_area", "(", "seg", ")", ":", "r0", "=", "seg", "[", "0", "]", "[", "COLS", ".", "R", "]", "r1", "=", "seg", "[", "1", "]", "[", "COLS", ".", "R", "]", "h2", "=", "point_dist2", "(", "seg", "[", "0", "]", ",", "seg", "[", "1", "]", ")", "return", "math", ".", "pi", "*", "(", "r0", "+", "r1", ")", "*", "math", ".", "sqrt", "(", "(", "r0", "-", "r1", ")", "**", "2", "+", "h2", ")" ]
Compute the surface area of a segment. Approximated as a conical frustum. Does not include the surface area of the bounding circles.
[ "Compute", "the", "surface", "area", "of", "a", "segment", "." ]
python
train
sebdah/dynamic-dynamodb
dynamic_dynamodb/calculators.py
https://github.com/sebdah/dynamic-dynamodb/blob/bfd0ca806b1c3301e724696de90ef0f973410493/dynamic_dynamodb/calculators.py#L118-L151
def decrease_writes_in_units( current_provisioning, units, min_provisioned_writes, log_tag): """ Decrease the current_provisioning with units units :type current_provisioning: int :param current_provisioning: The current provisioning :type units: int :param units: How many units should we decrease with :returns: int -- New provisioning value :type min_provisioned_writes: int :param min_provisioned_writes: Configured min provisioned writes :type log_tag: str :param log_tag: Prefix for the log """ updated_provisioning = int(current_provisioning) - int(units) min_provisioned_writes = __get_min_writes( current_provisioning, min_provisioned_writes, log_tag) if updated_provisioning < min_provisioned_writes: logger.info( '{0} - Reached provisioned writes min limit: {1:d}'.format( log_tag, int(min_provisioned_writes))) return min_provisioned_writes logger.debug( '{0} - Write provisioning will be decreased to {1:d} units'.format( log_tag, int(updated_provisioning))) return updated_provisioning
[ "def", "decrease_writes_in_units", "(", "current_provisioning", ",", "units", ",", "min_provisioned_writes", ",", "log_tag", ")", ":", "updated_provisioning", "=", "int", "(", "current_provisioning", ")", "-", "int", "(", "units", ")", "min_provisioned_writes", "=", "__get_min_writes", "(", "current_provisioning", ",", "min_provisioned_writes", ",", "log_tag", ")", "if", "updated_provisioning", "<", "min_provisioned_writes", ":", "logger", ".", "info", "(", "'{0} - Reached provisioned writes min limit: {1:d}'", ".", "format", "(", "log_tag", ",", "int", "(", "min_provisioned_writes", ")", ")", ")", "return", "min_provisioned_writes", "logger", ".", "debug", "(", "'{0} - Write provisioning will be decreased to {1:d} units'", ".", "format", "(", "log_tag", ",", "int", "(", "updated_provisioning", ")", ")", ")", "return", "updated_provisioning" ]
Decrease the current_provisioning with units units :type current_provisioning: int :param current_provisioning: The current provisioning :type units: int :param units: How many units should we decrease with :returns: int -- New provisioning value :type min_provisioned_writes: int :param min_provisioned_writes: Configured min provisioned writes :type log_tag: str :param log_tag: Prefix for the log
[ "Decrease", "the", "current_provisioning", "with", "units", "units" ]
python
train
luckydonald/pytgbot
pytgbot/api_types/receivable/payments.py
https://github.com/luckydonald/pytgbot/blob/67f4b5a1510d4583d40b5477e876b1ef0eb8971b/pytgbot/api_types/receivable/payments.py#L544-L561
def to_array(self): """ Serializes this SuccessfulPayment to a dictionary. :return: dictionary representation of this object. :rtype: dict """ array = super(SuccessfulPayment, self).to_array() array['currency'] = u(self.currency) # py2: type unicode, py3: type str array['total_amount'] = int(self.total_amount) # type int array['invoice_payload'] = u(self.invoice_payload) # py2: type unicode, py3: type str array['telegram_payment_charge_id'] = u(self.telegram_payment_charge_id) # py2: type unicode, py3: type str array['provider_payment_charge_id'] = u(self.provider_payment_charge_id) # py2: type unicode, py3: type str if self.shipping_option_id is not None: array['shipping_option_id'] = u(self.shipping_option_id) # py2: type unicode, py3: type str if self.order_info is not None: array['order_info'] = self.order_info.to_array() # type OrderInfo return array
[ "def", "to_array", "(", "self", ")", ":", "array", "=", "super", "(", "SuccessfulPayment", ",", "self", ")", ".", "to_array", "(", ")", "array", "[", "'currency'", "]", "=", "u", "(", "self", ".", "currency", ")", "# py2: type unicode, py3: type str", "array", "[", "'total_amount'", "]", "=", "int", "(", "self", ".", "total_amount", ")", "# type int", "array", "[", "'invoice_payload'", "]", "=", "u", "(", "self", ".", "invoice_payload", ")", "# py2: type unicode, py3: type str", "array", "[", "'telegram_payment_charge_id'", "]", "=", "u", "(", "self", ".", "telegram_payment_charge_id", ")", "# py2: type unicode, py3: type str", "array", "[", "'provider_payment_charge_id'", "]", "=", "u", "(", "self", ".", "provider_payment_charge_id", ")", "# py2: type unicode, py3: type str", "if", "self", ".", "shipping_option_id", "is", "not", "None", ":", "array", "[", "'shipping_option_id'", "]", "=", "u", "(", "self", ".", "shipping_option_id", ")", "# py2: type unicode, py3: type str", "if", "self", ".", "order_info", "is", "not", "None", ":", "array", "[", "'order_info'", "]", "=", "self", ".", "order_info", ".", "to_array", "(", ")", "# type OrderInfo", "return", "array" ]
Serializes this SuccessfulPayment to a dictionary. :return: dictionary representation of this object. :rtype: dict
[ "Serializes", "this", "SuccessfulPayment", "to", "a", "dictionary", "." ]
python
train
Shinichi-Nakagawa/pitchpx
pitchpx/game/inning.py
https://github.com/Shinichi-Nakagawa/pitchpx/blob/5747402a0b3416f5e910b479e100df858f0b6440/pitchpx/game/inning.py#L551-L572
def _inning_events(self, soup, inning_number, inning_id, hit_location): """ Inning Events. :param soup: Beautifulsoup object :param inning_number: Inning Number :param inning_id: Inning Id(0:home, 1:away) :param hit_location: Hitlocation data(dict) """ # at bat(batter box data) & pitching data out_ct = 0 for ab in soup.find_all('atbat'): # plate appearance data(pa) at_bat = AtBat.pa(ab, self.game, self.players.rosters, inning_number, inning_id, out_ct, hit_location) # pitching data pitching_stats = self._get_pitch(ab, at_bat) # at bat(pa result) pa_result = AtBat.result(ab, at_bat, pitching_stats) at_bat.update(pa_result) self.atbats.append(at_bat) self.pitches.extend(pitching_stats) # out count out_ct = at_bat['event_outs_ct']
[ "def", "_inning_events", "(", "self", ",", "soup", ",", "inning_number", ",", "inning_id", ",", "hit_location", ")", ":", "# at bat(batter box data) & pitching data", "out_ct", "=", "0", "for", "ab", "in", "soup", ".", "find_all", "(", "'atbat'", ")", ":", "# plate appearance data(pa)", "at_bat", "=", "AtBat", ".", "pa", "(", "ab", ",", "self", ".", "game", ",", "self", ".", "players", ".", "rosters", ",", "inning_number", ",", "inning_id", ",", "out_ct", ",", "hit_location", ")", "# pitching data", "pitching_stats", "=", "self", ".", "_get_pitch", "(", "ab", ",", "at_bat", ")", "# at bat(pa result)", "pa_result", "=", "AtBat", ".", "result", "(", "ab", ",", "at_bat", ",", "pitching_stats", ")", "at_bat", ".", "update", "(", "pa_result", ")", "self", ".", "atbats", ".", "append", "(", "at_bat", ")", "self", ".", "pitches", ".", "extend", "(", "pitching_stats", ")", "# out count", "out_ct", "=", "at_bat", "[", "'event_outs_ct'", "]" ]
Inning Events. :param soup: Beautifulsoup object :param inning_number: Inning Number :param inning_id: Inning Id(0:home, 1:away) :param hit_location: Hitlocation data(dict)
[ "Inning", "Events", ".", ":", "param", "soup", ":", "Beautifulsoup", "object", ":", "param", "inning_number", ":", "Inning", "Number", ":", "param", "inning_id", ":", "Inning", "Id", "(", "0", ":", "home", "1", ":", "away", ")", ":", "param", "hit_location", ":", "Hitlocation", "data", "(", "dict", ")" ]
python
train
phoebe-project/phoebe2
phoebe/parameters/parameters.py
https://github.com/phoebe-project/phoebe2/blob/e64b8be683977064e2d55dd1b3ac400f64c3e379/phoebe/parameters/parameters.py#L3053-L3067
def save(self, filename, incl_uniqueid=False): """ Save the Parameter to a JSON-formatted ASCII file :parameter str filename: relative or fullpath to the file :return: filename :rtype: str """ filename = os.path.expanduser(filename) f = open(filename, 'w') json.dump(self.to_json(incl_uniqueid=incl_uniqueid), f, sort_keys=True, indent=0, separators=(',', ': ')) f.close() return filename
[ "def", "save", "(", "self", ",", "filename", ",", "incl_uniqueid", "=", "False", ")", ":", "filename", "=", "os", ".", "path", ".", "expanduser", "(", "filename", ")", "f", "=", "open", "(", "filename", ",", "'w'", ")", "json", ".", "dump", "(", "self", ".", "to_json", "(", "incl_uniqueid", "=", "incl_uniqueid", ")", ",", "f", ",", "sort_keys", "=", "True", ",", "indent", "=", "0", ",", "separators", "=", "(", "','", ",", "': '", ")", ")", "f", ".", "close", "(", ")", "return", "filename" ]
Save the Parameter to a JSON-formatted ASCII file :parameter str filename: relative or fullpath to the file :return: filename :rtype: str
[ "Save", "the", "Parameter", "to", "a", "JSON", "-", "formatted", "ASCII", "file" ]
python
train