repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
ray-project/ray
python/ray/monitor.py
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/monitor.py#L102-L135
def xray_heartbeat_batch_handler(self, unused_channel, data): """Handle an xray heartbeat batch message from Redis.""" gcs_entries = ray.gcs_utils.GcsTableEntry.GetRootAsGcsTableEntry( data, 0) heartbeat_data = gcs_entries.Entries(0) message = (ray.gcs_utils.HeartbeatBatchTableData. GetRootAsHeartbeatBatchTableData(heartbeat_data, 0)) for j in range(message.BatchLength()): heartbeat_message = message.Batch(j) num_resources = heartbeat_message.ResourcesAvailableLabelLength() static_resources = {} dynamic_resources = {} for i in range(num_resources): dyn = heartbeat_message.ResourcesAvailableLabel(i) static = heartbeat_message.ResourcesTotalLabel(i) dynamic_resources[dyn] = ( heartbeat_message.ResourcesAvailableCapacity(i)) static_resources[static] = ( heartbeat_message.ResourcesTotalCapacity(i)) # Update the load metrics for this raylet. client_id = ray.utils.binary_to_hex(heartbeat_message.ClientId()) ip = self.raylet_id_to_ip_map.get(client_id) if ip: self.load_metrics.update(ip, static_resources, dynamic_resources) else: logger.warning( "Monitor: " "could not find ip for client {}".format(client_id))
[ "def", "xray_heartbeat_batch_handler", "(", "self", ",", "unused_channel", ",", "data", ")", ":", "gcs_entries", "=", "ray", ".", "gcs_utils", ".", "GcsTableEntry", ".", "GetRootAsGcsTableEntry", "(", "data", ",", "0", ")", "heartbeat_data", "=", "gcs_entries", ".", "Entries", "(", "0", ")", "message", "=", "(", "ray", ".", "gcs_utils", ".", "HeartbeatBatchTableData", ".", "GetRootAsHeartbeatBatchTableData", "(", "heartbeat_data", ",", "0", ")", ")", "for", "j", "in", "range", "(", "message", ".", "BatchLength", "(", ")", ")", ":", "heartbeat_message", "=", "message", ".", "Batch", "(", "j", ")", "num_resources", "=", "heartbeat_message", ".", "ResourcesAvailableLabelLength", "(", ")", "static_resources", "=", "{", "}", "dynamic_resources", "=", "{", "}", "for", "i", "in", "range", "(", "num_resources", ")", ":", "dyn", "=", "heartbeat_message", ".", "ResourcesAvailableLabel", "(", "i", ")", "static", "=", "heartbeat_message", ".", "ResourcesTotalLabel", "(", "i", ")", "dynamic_resources", "[", "dyn", "]", "=", "(", "heartbeat_message", ".", "ResourcesAvailableCapacity", "(", "i", ")", ")", "static_resources", "[", "static", "]", "=", "(", "heartbeat_message", ".", "ResourcesTotalCapacity", "(", "i", ")", ")", "# Update the load metrics for this raylet.", "client_id", "=", "ray", ".", "utils", ".", "binary_to_hex", "(", "heartbeat_message", ".", "ClientId", "(", ")", ")", "ip", "=", "self", ".", "raylet_id_to_ip_map", ".", "get", "(", "client_id", ")", "if", "ip", ":", "self", ".", "load_metrics", ".", "update", "(", "ip", ",", "static_resources", ",", "dynamic_resources", ")", "else", ":", "logger", ".", "warning", "(", "\"Monitor: \"", "\"could not find ip for client {}\"", ".", "format", "(", "client_id", ")", ")" ]
Handle an xray heartbeat batch message from Redis.
[ "Handle", "an", "xray", "heartbeat", "batch", "message", "from", "Redis", "." ]
python
train
44.088235
mitsei/dlkit
dlkit/aws_adapter/osid/queries.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/aws_adapter/osid/queries.py#L119-L132
def _match_maximum_decimal(self, match_key, decimal_value, match): """Matches a minimum decimal value""" if decimal_value is None: raise NullArgument() if match is None: match = True if match: ltegt = '$lte' else: ltegt = '$gt' if match_key in self._query_terms: self._query_terms[match_key][ltegt] = decimal_value else: self._query_terms[match_key] = {ltegt: decimal_value}
[ "def", "_match_maximum_decimal", "(", "self", ",", "match_key", ",", "decimal_value", ",", "match", ")", ":", "if", "decimal_value", "is", "None", ":", "raise", "NullArgument", "(", ")", "if", "match", "is", "None", ":", "match", "=", "True", "if", "match", ":", "ltegt", "=", "'$lte'", "else", ":", "ltegt", "=", "'$gt'", "if", "match_key", "in", "self", ".", "_query_terms", ":", "self", ".", "_query_terms", "[", "match_key", "]", "[", "ltegt", "]", "=", "decimal_value", "else", ":", "self", ".", "_query_terms", "[", "match_key", "]", "=", "{", "ltegt", ":", "decimal_value", "}" ]
Matches a minimum decimal value
[ "Matches", "a", "minimum", "decimal", "value" ]
python
train
34.928571
lesscpy/lesscpy
lesscpy/lessc/scope.py
https://github.com/lesscpy/lesscpy/blob/51e392fb4a3cd4ccfb6175e0e42ce7d2f6b78126/lesscpy/lessc/scope.py#L91-L107
def variables(self, name): """Search for variable by name. Searches scope top down Args: name (string): Search term Returns: Variable object OR False """ if isinstance(name, tuple): name = name[0] if name.startswith('@{'): name = '@' + name[2:-1] i = len(self) while i >= 0: i -= 1 if name in self[i]['__variables__']: return self[i]['__variables__'][name] return False
[ "def", "variables", "(", "self", ",", "name", ")", ":", "if", "isinstance", "(", "name", ",", "tuple", ")", ":", "name", "=", "name", "[", "0", "]", "if", "name", ".", "startswith", "(", "'@{'", ")", ":", "name", "=", "'@'", "+", "name", "[", "2", ":", "-", "1", "]", "i", "=", "len", "(", "self", ")", "while", "i", ">=", "0", ":", "i", "-=", "1", "if", "name", "in", "self", "[", "i", "]", "[", "'__variables__'", "]", ":", "return", "self", "[", "i", "]", "[", "'__variables__'", "]", "[", "name", "]", "return", "False" ]
Search for variable by name. Searches scope top down Args: name (string): Search term Returns: Variable object OR False
[ "Search", "for", "variable", "by", "name", ".", "Searches", "scope", "top", "down", "Args", ":", "name", "(", "string", ")", ":", "Search", "term", "Returns", ":", "Variable", "object", "OR", "False" ]
python
valid
30.176471
pjuren/pyokit
src/pyokit/statistics/beta.py
https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/statistics/beta.py#L84-L103
def reg_incomplete_beta(a, b, x): """ Incomplete beta function; code translated from: Numerical Recipes in C. :param a: a > 0 :param b: b > 0 :param x: 0 <= x <= 1. """ if (x == 0): return 0 elif (x == 1): return 1 else: lbeta = (math.lgamma(a + b) - math.lgamma(a) - math.lgamma(b) + a * math.log(x) + b * math.log(1 - x)) if (x < (a + 1) / (a + b + 2)): return math.exp(lbeta) * __contfractbeta(a, b, x) / a else: return 1 - math.exp(lbeta) * __contfractbeta(b, a, 1 - x) / b
[ "def", "reg_incomplete_beta", "(", "a", ",", "b", ",", "x", ")", ":", "if", "(", "x", "==", "0", ")", ":", "return", "0", "elif", "(", "x", "==", "1", ")", ":", "return", "1", "else", ":", "lbeta", "=", "(", "math", ".", "lgamma", "(", "a", "+", "b", ")", "-", "math", ".", "lgamma", "(", "a", ")", "-", "math", ".", "lgamma", "(", "b", ")", "+", "a", "*", "math", ".", "log", "(", "x", ")", "+", "b", "*", "math", ".", "log", "(", "1", "-", "x", ")", ")", "if", "(", "x", "<", "(", "a", "+", "1", ")", "/", "(", "a", "+", "b", "+", "2", ")", ")", ":", "return", "math", ".", "exp", "(", "lbeta", ")", "*", "__contfractbeta", "(", "a", ",", "b", ",", "x", ")", "/", "a", "else", ":", "return", "1", "-", "math", ".", "exp", "(", "lbeta", ")", "*", "__contfractbeta", "(", "b", ",", "a", ",", "1", "-", "x", ")", "/", "b" ]
Incomplete beta function; code translated from: Numerical Recipes in C. :param a: a > 0 :param b: b > 0 :param x: 0 <= x <= 1.
[ "Incomplete", "beta", "function", ";", "code", "translated", "from", ":", "Numerical", "Recipes", "in", "C", "." ]
python
train
26.15
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/mavproxy.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/mavproxy.py#L122-L127
def add_input(cmd, immediate=False): '''add some command input to be processed''' if immediate: process_stdin(cmd) else: mpstate.input_queue.put(cmd)
[ "def", "add_input", "(", "cmd", ",", "immediate", "=", "False", ")", ":", "if", "immediate", ":", "process_stdin", "(", "cmd", ")", "else", ":", "mpstate", ".", "input_queue", ".", "put", "(", "cmd", ")" ]
add some command input to be processed
[ "add", "some", "command", "input", "to", "be", "processed" ]
python
train
28.666667
mikedh/trimesh
trimesh/path/entities.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/path/entities.py#L427-L440
def explode(self): """ If the current Line entity consists of multiple line break it up into n Line entities. Returns ---------- exploded: (n,) Line entities """ points = np.column_stack(( self.points, self.points)).ravel()[1:-1].reshape((-1, 2)) exploded = [Line(i) for i in points] return exploded
[ "def", "explode", "(", "self", ")", ":", "points", "=", "np", ".", "column_stack", "(", "(", "self", ".", "points", ",", "self", ".", "points", ")", ")", ".", "ravel", "(", ")", "[", "1", ":", "-", "1", "]", ".", "reshape", "(", "(", "-", "1", ",", "2", ")", ")", "exploded", "=", "[", "Line", "(", "i", ")", "for", "i", "in", "points", "]", "return", "exploded" ]
If the current Line entity consists of multiple line break it up into n Line entities. Returns ---------- exploded: (n,) Line entities
[ "If", "the", "current", "Line", "entity", "consists", "of", "multiple", "line", "break", "it", "up", "into", "n", "Line", "entities", "." ]
python
train
27.928571
opencast/pyCA
pyca/ingest.py
https://github.com/opencast/pyCA/blob/c89b168d4780d157e1b3f7676628c1b131956a88/pyca/ingest.py#L42-L105
def ingest(event): '''Ingest a finished recording to the Opencast server. ''' # Update status set_service_status(Service.INGEST, ServiceStatus.BUSY) notify.notify('STATUS=Uploading') recording_state(event.uid, 'uploading') update_event_status(event, Status.UPLOADING) # Select ingest service # The ingest service to use is selected at random from the available # ingest services to ensure that not every capture agent uses the same # service at the same time service = config('service-ingest') service = service[randrange(0, len(service))] logger.info('Selecting ingest service to use: ' + service) # create mediapackage logger.info('Creating new mediapackage') mediapackage = http_request(service + '/createMediaPackage') # extract workflow_def, workflow_config and add DC catalogs prop = 'org.opencastproject.capture.agent.properties' dcns = 'http://www.opencastproject.org/xsd/1.0/dublincore/' for attachment in event.get_data().get('attach'): data = attachment.get('data') if attachment.get('x-apple-filename') == prop: workflow_def, workflow_config = get_config_params(data) # Check for dublincore catalogs elif attachment.get('fmttype') == 'application/xml' and dcns in data: name = attachment.get('x-apple-filename', '').rsplit('.', 1)[0] logger.info('Adding %s DC catalog' % name) fields = [('mediaPackage', mediapackage), ('flavor', 'dublincore/%s' % name), ('dublinCore', data.encode('utf-8'))] mediapackage = http_request(service + '/addDCCatalog', fields) # add track for (flavor, track) in event.get_tracks(): logger.info('Adding track ({0} -> {1})'.format(flavor, track)) track = track.encode('ascii', 'ignore') fields = [('mediaPackage', mediapackage), ('flavor', flavor), ('BODY1', (pycurl.FORM_FILE, track))] mediapackage = http_request(service + '/addTrack', fields) # ingest logger.info('Ingest recording') fields = [('mediaPackage', mediapackage)] if workflow_def: fields.append(('workflowDefinitionId', workflow_def)) if event.uid: fields.append(('workflowInstanceId', event.uid.encode('ascii', 'ignore'))) fields += workflow_config mediapackage = http_request(service + '/ingest', fields) # Update status recording_state(event.uid, 'upload_finished') update_event_status(event, Status.FINISHED_UPLOADING) notify.notify('STATUS=Running') set_service_status_immediate(Service.INGEST, ServiceStatus.IDLE) logger.info('Finished ingest')
[ "def", "ingest", "(", "event", ")", ":", "# Update status", "set_service_status", "(", "Service", ".", "INGEST", ",", "ServiceStatus", ".", "BUSY", ")", "notify", ".", "notify", "(", "'STATUS=Uploading'", ")", "recording_state", "(", "event", ".", "uid", ",", "'uploading'", ")", "update_event_status", "(", "event", ",", "Status", ".", "UPLOADING", ")", "# Select ingest service", "# The ingest service to use is selected at random from the available", "# ingest services to ensure that not every capture agent uses the same", "# service at the same time", "service", "=", "config", "(", "'service-ingest'", ")", "service", "=", "service", "[", "randrange", "(", "0", ",", "len", "(", "service", ")", ")", "]", "logger", ".", "info", "(", "'Selecting ingest service to use: '", "+", "service", ")", "# create mediapackage", "logger", ".", "info", "(", "'Creating new mediapackage'", ")", "mediapackage", "=", "http_request", "(", "service", "+", "'/createMediaPackage'", ")", "# extract workflow_def, workflow_config and add DC catalogs", "prop", "=", "'org.opencastproject.capture.agent.properties'", "dcns", "=", "'http://www.opencastproject.org/xsd/1.0/dublincore/'", "for", "attachment", "in", "event", ".", "get_data", "(", ")", ".", "get", "(", "'attach'", ")", ":", "data", "=", "attachment", ".", "get", "(", "'data'", ")", "if", "attachment", ".", "get", "(", "'x-apple-filename'", ")", "==", "prop", ":", "workflow_def", ",", "workflow_config", "=", "get_config_params", "(", "data", ")", "# Check for dublincore catalogs", "elif", "attachment", ".", "get", "(", "'fmttype'", ")", "==", "'application/xml'", "and", "dcns", "in", "data", ":", "name", "=", "attachment", ".", "get", "(", "'x-apple-filename'", ",", "''", ")", ".", "rsplit", "(", "'.'", ",", "1", ")", "[", "0", "]", "logger", ".", "info", "(", "'Adding %s DC catalog'", "%", "name", ")", "fields", "=", "[", "(", "'mediaPackage'", ",", "mediapackage", ")", ",", "(", "'flavor'", ",", "'dublincore/%s'", "%", "name", ")", ",", "(", "'dublinCore'", ",", "data", ".", "encode", "(", "'utf-8'", ")", ")", "]", "mediapackage", "=", "http_request", "(", "service", "+", "'/addDCCatalog'", ",", "fields", ")", "# add track", "for", "(", "flavor", ",", "track", ")", "in", "event", ".", "get_tracks", "(", ")", ":", "logger", ".", "info", "(", "'Adding track ({0} -> {1})'", ".", "format", "(", "flavor", ",", "track", ")", ")", "track", "=", "track", ".", "encode", "(", "'ascii'", ",", "'ignore'", ")", "fields", "=", "[", "(", "'mediaPackage'", ",", "mediapackage", ")", ",", "(", "'flavor'", ",", "flavor", ")", ",", "(", "'BODY1'", ",", "(", "pycurl", ".", "FORM_FILE", ",", "track", ")", ")", "]", "mediapackage", "=", "http_request", "(", "service", "+", "'/addTrack'", ",", "fields", ")", "# ingest", "logger", ".", "info", "(", "'Ingest recording'", ")", "fields", "=", "[", "(", "'mediaPackage'", ",", "mediapackage", ")", "]", "if", "workflow_def", ":", "fields", ".", "append", "(", "(", "'workflowDefinitionId'", ",", "workflow_def", ")", ")", "if", "event", ".", "uid", ":", "fields", ".", "append", "(", "(", "'workflowInstanceId'", ",", "event", ".", "uid", ".", "encode", "(", "'ascii'", ",", "'ignore'", ")", ")", ")", "fields", "+=", "workflow_config", "mediapackage", "=", "http_request", "(", "service", "+", "'/ingest'", ",", "fields", ")", "# Update status", "recording_state", "(", "event", ".", "uid", ",", "'upload_finished'", ")", "update_event_status", "(", "event", ",", "Status", ".", "FINISHED_UPLOADING", ")", "notify", ".", "notify", "(", "'STATUS=Running'", ")", "set_service_status_immediate", "(", "Service", ".", "INGEST", ",", "ServiceStatus", ".", "IDLE", ")", "logger", ".", "info", "(", "'Finished ingest'", ")" ]
Ingest a finished recording to the Opencast server.
[ "Ingest", "a", "finished", "recording", "to", "the", "Opencast", "server", "." ]
python
test
41.6875
abseil/abseil-py
absl/logging/__init__.py
https://github.com/abseil/abseil-py/blob/9d73fdaa23a6b6726aa5731390f388c0c6250ee5/absl/logging/__init__.py#L284-L304
def set_stderrthreshold(s): """Sets the stderr threshold to the value passed in. Args: s: str|int, valid strings values are case-insensitive 'debug', 'info', 'warning', 'error', and 'fatal'; valid integer values are logging.DEBUG|INFO|WARNING|ERROR|FATAL. Raises: ValueError: Raised when s is an invalid value. """ if s in converter.ABSL_LEVELS: FLAGS.stderrthreshold = converter.ABSL_LEVELS[s] elif isinstance(s, str) and s.upper() in converter.ABSL_NAMES: FLAGS.stderrthreshold = s else: raise ValueError( 'set_stderrthreshold only accepts integer absl logging level ' 'from -3 to 1, or case-insensitive string values ' "'debug', 'info', 'warning', 'error', and 'fatal'. " 'But found "{}" ({}).'.format(s, type(s)))
[ "def", "set_stderrthreshold", "(", "s", ")", ":", "if", "s", "in", "converter", ".", "ABSL_LEVELS", ":", "FLAGS", ".", "stderrthreshold", "=", "converter", ".", "ABSL_LEVELS", "[", "s", "]", "elif", "isinstance", "(", "s", ",", "str", ")", "and", "s", ".", "upper", "(", ")", "in", "converter", ".", "ABSL_NAMES", ":", "FLAGS", ".", "stderrthreshold", "=", "s", "else", ":", "raise", "ValueError", "(", "'set_stderrthreshold only accepts integer absl logging level '", "'from -3 to 1, or case-insensitive string values '", "\"'debug', 'info', 'warning', 'error', and 'fatal'. \"", "'But found \"{}\" ({}).'", ".", "format", "(", "s", ",", "type", "(", "s", ")", ")", ")" ]
Sets the stderr threshold to the value passed in. Args: s: str|int, valid strings values are case-insensitive 'debug', 'info', 'warning', 'error', and 'fatal'; valid integer values are logging.DEBUG|INFO|WARNING|ERROR|FATAL. Raises: ValueError: Raised when s is an invalid value.
[ "Sets", "the", "stderr", "threshold", "to", "the", "value", "passed", "in", "." ]
python
train
37.285714
pymupdf/PyMuPDF
examples/colordbRGB.py
https://github.com/pymupdf/PyMuPDF/blob/917f2d83482510e26ba0ff01fd2392c26f3a8e90/examples/colordbRGB.py#L24-L27
def sortkey(x): """Return '001002003' for (colorname, 1, 2, 3)""" k = str(x[1]).zfill(3) + str(x[2]).zfill(3) + str(x[3]).zfill(3) return k
[ "def", "sortkey", "(", "x", ")", ":", "k", "=", "str", "(", "x", "[", "1", "]", ")", ".", "zfill", "(", "3", ")", "+", "str", "(", "x", "[", "2", "]", ")", ".", "zfill", "(", "3", ")", "+", "str", "(", "x", "[", "3", "]", ")", ".", "zfill", "(", "3", ")", "return", "k" ]
Return '001002003' for (colorname, 1, 2, 3)
[ "Return", "001002003", "for", "(", "colorname", "1", "2", "3", ")" ]
python
train
37
fake-name/ChromeController
ChromeController/Generator/Generated.py
https://github.com/fake-name/ChromeController/blob/914dd136184e8f1165c7aa6ef30418aaf10c61f0/ChromeController/Generator/Generated.py#L5170-L5195
def ServiceWorker_deliverPushMessage(self, origin, registrationId, data): """ Function path: ServiceWorker.deliverPushMessage Domain: ServiceWorker Method name: deliverPushMessage Parameters: Required arguments: 'origin' (type: string) -> No description 'registrationId' (type: string) -> No description 'data' (type: string) -> No description No return value. """ assert isinstance(origin, (str,) ), "Argument 'origin' must be of type '['str']'. Received type: '%s'" % type( origin) assert isinstance(registrationId, (str,) ), "Argument 'registrationId' must be of type '['str']'. Received type: '%s'" % type( registrationId) assert isinstance(data, (str,) ), "Argument 'data' must be of type '['str']'. Received type: '%s'" % type( data) subdom_funcs = self.synchronous_command('ServiceWorker.deliverPushMessage', origin=origin, registrationId=registrationId, data=data) return subdom_funcs
[ "def", "ServiceWorker_deliverPushMessage", "(", "self", ",", "origin", ",", "registrationId", ",", "data", ")", ":", "assert", "isinstance", "(", "origin", ",", "(", "str", ",", ")", ")", ",", "\"Argument 'origin' must be of type '['str']'. Received type: '%s'\"", "%", "type", "(", "origin", ")", "assert", "isinstance", "(", "registrationId", ",", "(", "str", ",", ")", ")", ",", "\"Argument 'registrationId' must be of type '['str']'. Received type: '%s'\"", "%", "type", "(", "registrationId", ")", "assert", "isinstance", "(", "data", ",", "(", "str", ",", ")", ")", ",", "\"Argument 'data' must be of type '['str']'. Received type: '%s'\"", "%", "type", "(", "data", ")", "subdom_funcs", "=", "self", ".", "synchronous_command", "(", "'ServiceWorker.deliverPushMessage'", ",", "origin", "=", "origin", ",", "registrationId", "=", "registrationId", ",", "data", "=", "data", ")", "return", "subdom_funcs" ]
Function path: ServiceWorker.deliverPushMessage Domain: ServiceWorker Method name: deliverPushMessage Parameters: Required arguments: 'origin' (type: string) -> No description 'registrationId' (type: string) -> No description 'data' (type: string) -> No description No return value.
[ "Function", "path", ":", "ServiceWorker", ".", "deliverPushMessage", "Domain", ":", "ServiceWorker", "Method", "name", ":", "deliverPushMessage", "Parameters", ":", "Required", "arguments", ":", "origin", "(", "type", ":", "string", ")", "-", ">", "No", "description", "registrationId", "(", "type", ":", "string", ")", "-", ">", "No", "description", "data", "(", "type", ":", "string", ")", "-", ">", "No", "description", "No", "return", "value", "." ]
python
train
37
libtcod/python-tcod
tcod/noise.py
https://github.com/libtcod/python-tcod/blob/8ba10c5cfb813eaf3e834de971ba2d6acb7838e4/tcod/noise.py#L162-L196
def sample_mgrid(self, mgrid: np.array) -> np.array: """Sample a mesh-grid array and return the result. The :any:`sample_ogrid` method performs better as there is a lot of overhead when working with large mesh-grids. Args: mgrid (numpy.ndarray): A mesh-grid array of points to sample. A contiguous array of type `numpy.float32` is preferred. Returns: numpy.ndarray: An array of sampled points. This array has the shape: ``mgrid.shape[:-1]``. The ``dtype`` is `numpy.float32`. """ mgrid = np.ascontiguousarray(mgrid, np.float32) if mgrid.shape[0] != self.dimensions: raise ValueError( "mgrid.shape[0] must equal self.dimensions, " "%r[0] != %r" % (mgrid.shape, self.dimensions) ) out = np.ndarray(mgrid.shape[1:], np.float32) if mgrid.shape[1:] != out.shape: raise ValueError( "mgrid.shape[1:] must equal out.shape, " "%r[1:] != %r" % (mgrid.shape, out.shape) ) lib.NoiseSampleMeshGrid( self._tdl_noise_c, out.size, ffi.cast("float*", mgrid.ctypes.data), ffi.cast("float*", out.ctypes.data), ) return out
[ "def", "sample_mgrid", "(", "self", ",", "mgrid", ":", "np", ".", "array", ")", "->", "np", ".", "array", ":", "mgrid", "=", "np", ".", "ascontiguousarray", "(", "mgrid", ",", "np", ".", "float32", ")", "if", "mgrid", ".", "shape", "[", "0", "]", "!=", "self", ".", "dimensions", ":", "raise", "ValueError", "(", "\"mgrid.shape[0] must equal self.dimensions, \"", "\"%r[0] != %r\"", "%", "(", "mgrid", ".", "shape", ",", "self", ".", "dimensions", ")", ")", "out", "=", "np", ".", "ndarray", "(", "mgrid", ".", "shape", "[", "1", ":", "]", ",", "np", ".", "float32", ")", "if", "mgrid", ".", "shape", "[", "1", ":", "]", "!=", "out", ".", "shape", ":", "raise", "ValueError", "(", "\"mgrid.shape[1:] must equal out.shape, \"", "\"%r[1:] != %r\"", "%", "(", "mgrid", ".", "shape", ",", "out", ".", "shape", ")", ")", "lib", ".", "NoiseSampleMeshGrid", "(", "self", ".", "_tdl_noise_c", ",", "out", ".", "size", ",", "ffi", ".", "cast", "(", "\"float*\"", ",", "mgrid", ".", "ctypes", ".", "data", ")", ",", "ffi", ".", "cast", "(", "\"float*\"", ",", "out", ".", "ctypes", ".", "data", ")", ",", ")", "return", "out" ]
Sample a mesh-grid array and return the result. The :any:`sample_ogrid` method performs better as there is a lot of overhead when working with large mesh-grids. Args: mgrid (numpy.ndarray): A mesh-grid array of points to sample. A contiguous array of type `numpy.float32` is preferred. Returns: numpy.ndarray: An array of sampled points. This array has the shape: ``mgrid.shape[:-1]``. The ``dtype`` is `numpy.float32`.
[ "Sample", "a", "mesh", "-", "grid", "array", "and", "return", "the", "result", "." ]
python
train
37.4
SheffieldML/GPyOpt
GPyOpt/acquisitions/MPI_mcmc.py
https://github.com/SheffieldML/GPyOpt/blob/255539dc5927819ca701e44fe3d76cd4864222fa/GPyOpt/acquisitions/MPI_mcmc.py#L41-L59
def _compute_acq_withGradients(self, x): """ Integrated Expected Improvement and its derivative """ means, stds, dmdxs, dsdxs = self.model.predict_withGradients(x) fmins = self.model.get_fmin() f_acqu = None df_acqu = None for m, s, fmin, dmdx, dsdx in zip(means, stds, fmins, dmdxs, dsdxs): phi, Phi, u = get_quantiles(self.jitter, fmin, m, s) f = Phi df = -(phi/s)* (dmdx + dsdx * u) if f_acqu is None: f_acqu = f df_acqu = df else: f_acqu += f df_acqu += df return f_acqu/(len(means)), df_acqu/(len(means))
[ "def", "_compute_acq_withGradients", "(", "self", ",", "x", ")", ":", "means", ",", "stds", ",", "dmdxs", ",", "dsdxs", "=", "self", ".", "model", ".", "predict_withGradients", "(", "x", ")", "fmins", "=", "self", ".", "model", ".", "get_fmin", "(", ")", "f_acqu", "=", "None", "df_acqu", "=", "None", "for", "m", ",", "s", ",", "fmin", ",", "dmdx", ",", "dsdx", "in", "zip", "(", "means", ",", "stds", ",", "fmins", ",", "dmdxs", ",", "dsdxs", ")", ":", "phi", ",", "Phi", ",", "u", "=", "get_quantiles", "(", "self", ".", "jitter", ",", "fmin", ",", "m", ",", "s", ")", "f", "=", "Phi", "df", "=", "-", "(", "phi", "/", "s", ")", "*", "(", "dmdx", "+", "dsdx", "*", "u", ")", "if", "f_acqu", "is", "None", ":", "f_acqu", "=", "f", "df_acqu", "=", "df", "else", ":", "f_acqu", "+=", "f", "df_acqu", "+=", "df", "return", "f_acqu", "/", "(", "len", "(", "means", ")", ")", ",", "df_acqu", "/", "(", "len", "(", "means", ")", ")" ]
Integrated Expected Improvement and its derivative
[ "Integrated", "Expected", "Improvement", "and", "its", "derivative" ]
python
train
36.157895
edx/opaque-keys
opaque_keys/edx/asides.py
https://github.com/edx/opaque-keys/blob/9807168660c12e0551c8fdd58fd1bc6b0bcb0a54/opaque_keys/edx/asides.py#L76-L87
def _decode_v2(value): """ Decode ':' and '$' characters encoded by `_encode`. """ if re.search(r'(?<!\$):', value): raise ValueError("Unescaped ':' in the encoded string") decode_colons = value.replace('$:', ':') if re.search(r'(?<!\$)(\$\$)*\$([^$]|\Z)', decode_colons): raise ValueError("Unescaped '$' in encoded string") return decode_colons.replace('$$', '$')
[ "def", "_decode_v2", "(", "value", ")", ":", "if", "re", ".", "search", "(", "r'(?<!\\$):'", ",", "value", ")", ":", "raise", "ValueError", "(", "\"Unescaped ':' in the encoded string\"", ")", "decode_colons", "=", "value", ".", "replace", "(", "'$:'", ",", "':'", ")", "if", "re", ".", "search", "(", "r'(?<!\\$)(\\$\\$)*\\$([^$]|\\Z)'", ",", "decode_colons", ")", ":", "raise", "ValueError", "(", "\"Unescaped '$' in encoded string\"", ")", "return", "decode_colons", ".", "replace", "(", "'$$'", ",", "'$'", ")" ]
Decode ':' and '$' characters encoded by `_encode`.
[ "Decode", ":", "and", "$", "characters", "encoded", "by", "_encode", "." ]
python
train
33.25
limodou/uliweb
uliweb/lib/werkzeug/script.py
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/lib/werkzeug/script.py#L195-L219
def print_usage(actions): """Print the usage information. (Help screen)""" actions = actions.items() actions.sort() print('usage: %s <action> [<options>]' % basename(sys.argv[0])) print(' %s --help' % basename(sys.argv[0])) print() print('actions:') for name, (func, doc, arguments) in actions: print(' %s:' % name) for line in doc.splitlines(): print(' %s' % line) if arguments: print() for arg, shortcut, default, argtype in arguments: if isinstance(default, bool): print(' %s' % ( (shortcut and '-%s, ' % shortcut or '') + '--' + arg )) else: print(' %-30s%-10s%s' % ( (shortcut and '-%s, ' % shortcut or '') + '--' + arg, argtype, default )) print()
[ "def", "print_usage", "(", "actions", ")", ":", "actions", "=", "actions", ".", "items", "(", ")", "actions", ".", "sort", "(", ")", "print", "(", "'usage: %s <action> [<options>]'", "%", "basename", "(", "sys", ".", "argv", "[", "0", "]", ")", ")", "print", "(", "' %s --help'", "%", "basename", "(", "sys", ".", "argv", "[", "0", "]", ")", ")", "print", "(", ")", "print", "(", "'actions:'", ")", "for", "name", ",", "(", "func", ",", "doc", ",", "arguments", ")", "in", "actions", ":", "print", "(", "' %s:'", "%", "name", ")", "for", "line", "in", "doc", ".", "splitlines", "(", ")", ":", "print", "(", "' %s'", "%", "line", ")", "if", "arguments", ":", "print", "(", ")", "for", "arg", ",", "shortcut", ",", "default", ",", "argtype", "in", "arguments", ":", "if", "isinstance", "(", "default", ",", "bool", ")", ":", "print", "(", "' %s'", "%", "(", "(", "shortcut", "and", "'-%s, '", "%", "shortcut", "or", "''", ")", "+", "'--'", "+", "arg", ")", ")", "else", ":", "print", "(", "' %-30s%-10s%s'", "%", "(", "(", "shortcut", "and", "'-%s, '", "%", "shortcut", "or", "''", ")", "+", "'--'", "+", "arg", ",", "argtype", ",", "default", ")", ")", "print", "(", ")" ]
Print the usage information. (Help screen)
[ "Print", "the", "usage", "information", ".", "(", "Help", "screen", ")" ]
python
train
35.6
happyleavesaoc/python-snapcast
snapcast/client/gstreamer.py
https://github.com/happyleavesaoc/python-snapcast/blob/9b3c483358677327c7fd6d0666bf474c19d87f19/snapcast/client/gstreamer.py#L59-L61
def push(self, buf): """ Push a buffer into the source. """ self._src.emit('push-buffer', Gst.Buffer.new_wrapped(buf))
[ "def", "push", "(", "self", ",", "buf", ")", ":", "self", ".", "_src", ".", "emit", "(", "'push-buffer'", ",", "Gst", ".", "Buffer", ".", "new_wrapped", "(", "buf", ")", ")" ]
Push a buffer into the source.
[ "Push", "a", "buffer", "into", "the", "source", "." ]
python
train
44
Yelp/kafka-utils
kafka_utils/util/monitoring.py
https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/util/monitoring.py#L124-L147
def get_watermark_for_topic( kafka_client, topic, ): """This method: * refreshes metadata for the kafka client * fetches watermarks :param kafka_client: KafkaToolClient instance :param topic: the topic :returns: dict <topic>: [ConsumerPartitionOffsets] """ # Refresh client metadata. We do not use the topic list, because we # don't want to accidentally create the topic if it does not exist. # If Kafka is unavailable, let's retry loading client metadata try: kafka_client.load_metadata_for_topics() except KafkaUnavailableError: kafka_client.load_metadata_for_topics() watermarks = get_topics_watermarks( kafka_client, [topic] ) return watermarks
[ "def", "get_watermark_for_topic", "(", "kafka_client", ",", "topic", ",", ")", ":", "# Refresh client metadata. We do not use the topic list, because we", "# don't want to accidentally create the topic if it does not exist.", "# If Kafka is unavailable, let's retry loading client metadata", "try", ":", "kafka_client", ".", "load_metadata_for_topics", "(", ")", "except", "KafkaUnavailableError", ":", "kafka_client", ".", "load_metadata_for_topics", "(", ")", "watermarks", "=", "get_topics_watermarks", "(", "kafka_client", ",", "[", "topic", "]", ")", "return", "watermarks" ]
This method: * refreshes metadata for the kafka client * fetches watermarks :param kafka_client: KafkaToolClient instance :param topic: the topic :returns: dict <topic>: [ConsumerPartitionOffsets]
[ "This", "method", ":", "*", "refreshes", "metadata", "for", "the", "kafka", "client", "*", "fetches", "watermarks" ]
python
train
30.291667
Thermondo/viewflow-extensions
viewflow_extensions/views.py
https://github.com/Thermondo/viewflow-extensions/blob/5d2bbfe28ced7dda3e6832b96ea031c1b871053e/viewflow_extensions/views.py#L47-L51
def save_task(self): """Transition to save the task and return to ``ASSIGNED`` state.""" task = self.request.activation.task task.status = STATUS.ASSIGNED task.save()
[ "def", "save_task", "(", "self", ")", ":", "task", "=", "self", ".", "request", ".", "activation", ".", "task", "task", ".", "status", "=", "STATUS", ".", "ASSIGNED", "task", ".", "save", "(", ")" ]
Transition to save the task and return to ``ASSIGNED`` state.
[ "Transition", "to", "save", "the", "task", "and", "return", "to", "ASSIGNED", "state", "." ]
python
train
38.8
DataBiosphere/dsub
dsub/lib/dsub_util.py
https://github.com/DataBiosphere/dsub/blob/443ce31daa6023dc2fd65ef2051796e19d18d5a7/dsub/lib/dsub_util.py#L55-L76
def replace_print(fileobj=sys.stderr): """Sys.out replacer, by default with stderr. Use it like this: with replace_print_with(fileobj): print "hello" # writes to the file print "done" # prints to stdout Args: fileobj: a file object to replace stdout. Yields: The printer. """ printer = _Printer(fileobj) previous_stdout = sys.stdout sys.stdout = printer try: yield printer finally: sys.stdout = previous_stdout
[ "def", "replace_print", "(", "fileobj", "=", "sys", ".", "stderr", ")", ":", "printer", "=", "_Printer", "(", "fileobj", ")", "previous_stdout", "=", "sys", ".", "stdout", "sys", ".", "stdout", "=", "printer", "try", ":", "yield", "printer", "finally", ":", "sys", ".", "stdout", "=", "previous_stdout" ]
Sys.out replacer, by default with stderr. Use it like this: with replace_print_with(fileobj): print "hello" # writes to the file print "done" # prints to stdout Args: fileobj: a file object to replace stdout. Yields: The printer.
[ "Sys", ".", "out", "replacer", "by", "default", "with", "stderr", "." ]
python
valid
19.954545
dereneaton/ipyrad
ipyrad/analysis/raxml.py
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/analysis/raxml.py#L151-L207
def run(self, ipyclient=None, quiet=False, force=False, block=False, ): """ Submits raxml job to run. If no ipyclient object is provided then the function will block until the raxml run is finished. If an ipyclient is provided then the job is sent to a remote engine and an asynchronous result object is returned which can be queried or awaited until it finishes. Parameters ----------- ipyclient: Not yet supported... quiet: suppress print statements force: overwrite existing results files with this job name. block: will block progress in notebook until job finishes, even if job is running on a remote ipyclient. """ ## stop before trying in raxml if force: for key, oldfile in self.trees: if os.path.exists(oldfile): os.remove(oldfile) if os.path.exists(self.trees.info): print("Error: set a new name for this job or use Force flag.\nFile exists: {}"\ .format(self.trees.info)) return ## TODO: add a progress bar tracker here. It could even read it from ## the info file that is being written. ## submit it if not ipyclient: proc = _call_raxml(self._command_list) self.stdout = proc[0] self.stderr = proc[1] else: ## find all hosts and submit job to the host with most available engines lbview = ipyclient.load_balanced_view() self.async = lbview.apply(_call_raxml, self._command_list) ## initiate random seed if not quiet: if not ipyclient: ## look for errors if "Overall execution time" not in self.stdout: print("Error in raxml run\n" + self.stdout) else: print("job {} finished successfully".format(self.params.n)) else: print("job {} submitted to cluster".format(self.params.n))
[ "def", "run", "(", "self", ",", "ipyclient", "=", "None", ",", "quiet", "=", "False", ",", "force", "=", "False", ",", "block", "=", "False", ",", ")", ":", "## stop before trying in raxml", "if", "force", ":", "for", "key", ",", "oldfile", "in", "self", ".", "trees", ":", "if", "os", ".", "path", ".", "exists", "(", "oldfile", ")", ":", "os", ".", "remove", "(", "oldfile", ")", "if", "os", ".", "path", ".", "exists", "(", "self", ".", "trees", ".", "info", ")", ":", "print", "(", "\"Error: set a new name for this job or use Force flag.\\nFile exists: {}\"", ".", "format", "(", "self", ".", "trees", ".", "info", ")", ")", "return", "## TODO: add a progress bar tracker here. It could even read it from", "## the info file that is being written. ", "## submit it", "if", "not", "ipyclient", ":", "proc", "=", "_call_raxml", "(", "self", ".", "_command_list", ")", "self", ".", "stdout", "=", "proc", "[", "0", "]", "self", ".", "stderr", "=", "proc", "[", "1", "]", "else", ":", "## find all hosts and submit job to the host with most available engines", "lbview", "=", "ipyclient", ".", "load_balanced_view", "(", ")", "self", ".", "async", "=", "lbview", ".", "apply", "(", "_call_raxml", ",", "self", ".", "_command_list", ")", "## initiate random seed", "if", "not", "quiet", ":", "if", "not", "ipyclient", ":", "## look for errors", "if", "\"Overall execution time\"", "not", "in", "self", ".", "stdout", ":", "print", "(", "\"Error in raxml run\\n\"", "+", "self", ".", "stdout", ")", "else", ":", "print", "(", "\"job {} finished successfully\"", ".", "format", "(", "self", ".", "params", ".", "n", ")", ")", "else", ":", "print", "(", "\"job {} submitted to cluster\"", ".", "format", "(", "self", ".", "params", ".", "n", ")", ")" ]
Submits raxml job to run. If no ipyclient object is provided then the function will block until the raxml run is finished. If an ipyclient is provided then the job is sent to a remote engine and an asynchronous result object is returned which can be queried or awaited until it finishes. Parameters ----------- ipyclient: Not yet supported... quiet: suppress print statements force: overwrite existing results files with this job name. block: will block progress in notebook until job finishes, even if job is running on a remote ipyclient.
[ "Submits", "raxml", "job", "to", "run", ".", "If", "no", "ipyclient", "object", "is", "provided", "then", "the", "function", "will", "block", "until", "the", "raxml", "run", "is", "finished", ".", "If", "an", "ipyclient", "is", "provided", "then", "the", "job", "is", "sent", "to", "a", "remote", "engine", "and", "an", "asynchronous", "result", "object", "is", "returned", "which", "can", "be", "queried", "or", "awaited", "until", "it", "finishes", "." ]
python
valid
37.035088
phaethon/kamene
kamene/utils6.py
https://github.com/phaethon/kamene/blob/11d4064844f4f68ac5d7546f5633ac7d02082914/kamene/utils6.py#L400-L428
def in6_getLocalUniquePrefix(): """ Returns a pseudo-randomly generated Local Unique prefix. Function follows recommandation of Section 3.2.2 of RFC 4193 for prefix generation. """ # Extracted from RFC 1305 (NTP) : # NTP timestamps are represented as a 64-bit unsigned fixed-point number, # in seconds relative to 0h on 1 January 1900. The integer part is in the # first 32 bits and the fraction part in the last 32 bits. # epoch = (1900, 1, 1, 0, 0, 0, 5, 1, 0) # x = time.time() # from time import gmtime, strftime, gmtime, mktime # delta = mktime(gmtime(0)) - mktime(self.epoch) # x = x-delta tod = time.time() # time of day. Will bother with epoch later i = int(tod) j = int((tod - i)*(2**32)) tod = struct.pack("!II", i,j) # TODO: Add some check regarding system address gathering rawmac = get_if_raw_hwaddr(conf.iface6) mac = b":".join(map(lambda x: b"%.02x" % ord(x), list(rawmac))) # construct modified EUI-64 ID eui64 = inet_pton(socket.AF_INET6, '::' + in6_mactoifaceid(mac))[8:] import sha globalid = sha.new(tod+eui64).digest()[:5] return inet_ntop(socket.AF_INET6, b'\xfd' + globalid + b'\x00'*10)
[ "def", "in6_getLocalUniquePrefix", "(", ")", ":", "# Extracted from RFC 1305 (NTP) :", "# NTP timestamps are represented as a 64-bit unsigned fixed-point number, ", "# in seconds relative to 0h on 1 January 1900. The integer part is in the ", "# first 32 bits and the fraction part in the last 32 bits.", "# epoch = (1900, 1, 1, 0, 0, 0, 5, 1, 0) ", "# x = time.time()", "# from time import gmtime, strftime, gmtime, mktime", "# delta = mktime(gmtime(0)) - mktime(self.epoch)", "# x = x-delta", "tod", "=", "time", ".", "time", "(", ")", "# time of day. Will bother with epoch later", "i", "=", "int", "(", "tod", ")", "j", "=", "int", "(", "(", "tod", "-", "i", ")", "*", "(", "2", "**", "32", ")", ")", "tod", "=", "struct", ".", "pack", "(", "\"!II\"", ",", "i", ",", "j", ")", "# TODO: Add some check regarding system address gathering", "rawmac", "=", "get_if_raw_hwaddr", "(", "conf", ".", "iface6", ")", "mac", "=", "b\":\"", ".", "join", "(", "map", "(", "lambda", "x", ":", "b\"%.02x\"", "%", "ord", "(", "x", ")", ",", "list", "(", "rawmac", ")", ")", ")", "# construct modified EUI-64 ID", "eui64", "=", "inet_pton", "(", "socket", ".", "AF_INET6", ",", "'::'", "+", "in6_mactoifaceid", "(", "mac", ")", ")", "[", "8", ":", "]", "import", "sha", "globalid", "=", "sha", ".", "new", "(", "tod", "+", "eui64", ")", ".", "digest", "(", ")", "[", ":", "5", "]", "return", "inet_ntop", "(", "socket", ".", "AF_INET6", ",", "b'\\xfd'", "+", "globalid", "+", "b'\\x00'", "*", "10", ")" ]
Returns a pseudo-randomly generated Local Unique prefix. Function follows recommandation of Section 3.2.2 of RFC 4193 for prefix generation.
[ "Returns", "a", "pseudo", "-", "randomly", "generated", "Local", "Unique", "prefix", ".", "Function", "follows", "recommandation", "of", "Section", "3", ".", "2", ".", "2", "of", "RFC", "4193", "for", "prefix", "generation", "." ]
python
train
41.103448
twitterdev/search-tweets-python
searchtweets/result_stream.py
https://github.com/twitterdev/search-tweets-python/blob/7875afb4f3ee125a9fdcf2e50b5ae761da5f46b5/searchtweets/result_stream.py#L276-L308
def collect_results(rule, max_results=500, result_stream_args=None): """ Utility function to quickly get a list of tweets from a ``ResultStream`` without keeping the object around. Requires your args to be configured prior to using. Args: rule (str): valid powertrack rule for your account, preferably generated by the `gen_rule_payload` function. max_results (int): maximum number of tweets or counts to return from the API / underlying ``ResultStream`` object. result_stream_args (dict): configuration dict that has connection information for a ``ResultStream`` object. Returns: list of results Example: >>> from searchtweets import collect_results >>> tweets = collect_results(rule, max_results=500, result_stream_args=search_args) """ if result_stream_args is None: logger.error("This function requires a configuration dict for the " "inner ResultStream object.") raise KeyError rs = ResultStream(rule_payload=rule, max_results=max_results, **result_stream_args) return list(rs.stream())
[ "def", "collect_results", "(", "rule", ",", "max_results", "=", "500", ",", "result_stream_args", "=", "None", ")", ":", "if", "result_stream_args", "is", "None", ":", "logger", ".", "error", "(", "\"This function requires a configuration dict for the \"", "\"inner ResultStream object.\"", ")", "raise", "KeyError", "rs", "=", "ResultStream", "(", "rule_payload", "=", "rule", ",", "max_results", "=", "max_results", ",", "*", "*", "result_stream_args", ")", "return", "list", "(", "rs", ".", "stream", "(", ")", ")" ]
Utility function to quickly get a list of tweets from a ``ResultStream`` without keeping the object around. Requires your args to be configured prior to using. Args: rule (str): valid powertrack rule for your account, preferably generated by the `gen_rule_payload` function. max_results (int): maximum number of tweets or counts to return from the API / underlying ``ResultStream`` object. result_stream_args (dict): configuration dict that has connection information for a ``ResultStream`` object. Returns: list of results Example: >>> from searchtweets import collect_results >>> tweets = collect_results(rule, max_results=500, result_stream_args=search_args)
[ "Utility", "function", "to", "quickly", "get", "a", "list", "of", "tweets", "from", "a", "ResultStream", "without", "keeping", "the", "object", "around", ".", "Requires", "your", "args", "to", "be", "configured", "prior", "to", "using", "." ]
python
train
37.424242
KxSystems/pyq
src/pyq/magic.py
https://github.com/KxSystems/pyq/blob/ad7b807abde94615a7344aaa930bb01fb1552cc5/src/pyq/magic.py#L23-L38
def logical_lines(lines): """Merge lines into chunks according to q rules""" if isinstance(lines, string_types): lines = StringIO(lines) buf = [] for line in lines: if buf and not line.startswith(' '): chunk = ''.join(buf).strip() if chunk: yield chunk buf[:] = [] buf.append(line) chunk = ''.join(buf).strip() if chunk: yield chunk
[ "def", "logical_lines", "(", "lines", ")", ":", "if", "isinstance", "(", "lines", ",", "string_types", ")", ":", "lines", "=", "StringIO", "(", "lines", ")", "buf", "=", "[", "]", "for", "line", "in", "lines", ":", "if", "buf", "and", "not", "line", ".", "startswith", "(", "' '", ")", ":", "chunk", "=", "''", ".", "join", "(", "buf", ")", ".", "strip", "(", ")", "if", "chunk", ":", "yield", "chunk", "buf", "[", ":", "]", "=", "[", "]", "buf", ".", "append", "(", "line", ")", "chunk", "=", "''", ".", "join", "(", "buf", ")", ".", "strip", "(", ")", "if", "chunk", ":", "yield", "chunk" ]
Merge lines into chunks according to q rules
[ "Merge", "lines", "into", "chunks", "according", "to", "q", "rules" ]
python
train
26.625
saltstack/salt
salt/minion.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L2380-L2393
def _mine_send(self, tag, data): ''' Send mine data to the master ''' channel = salt.transport.client.ReqChannel.factory(self.opts) data['tok'] = self.tok try: ret = channel.send(data) return ret except SaltReqTimeoutError: log.warning('Unable to send mine data to master.') return None finally: channel.close()
[ "def", "_mine_send", "(", "self", ",", "tag", ",", "data", ")", ":", "channel", "=", "salt", ".", "transport", ".", "client", ".", "ReqChannel", ".", "factory", "(", "self", ".", "opts", ")", "data", "[", "'tok'", "]", "=", "self", ".", "tok", "try", ":", "ret", "=", "channel", ".", "send", "(", "data", ")", "return", "ret", "except", "SaltReqTimeoutError", ":", "log", ".", "warning", "(", "'Unable to send mine data to master.'", ")", "return", "None", "finally", ":", "channel", ".", "close", "(", ")" ]
Send mine data to the master
[ "Send", "mine", "data", "to", "the", "master" ]
python
train
30.142857
wbond/asn1crypto
asn1crypto/core.py
https://github.com/wbond/asn1crypto/blob/ecda20176f55d37021cbca1f6da9083a8e491197/asn1crypto/core.py#L953-L968
def dump(self, force=False): """ Encodes the value using DER :param force: If the encoded contents already exist, clear them and regenerate to ensure they are in DER format instead of BER format :return: A byte string of the DER-encoded value """ if self._parsed is None: self.parse() return self._parsed[0].dump(force=force)
[ "def", "dump", "(", "self", ",", "force", "=", "False", ")", ":", "if", "self", ".", "_parsed", "is", "None", ":", "self", ".", "parse", "(", ")", "return", "self", ".", "_parsed", "[", "0", "]", ".", "dump", "(", "force", "=", "force", ")" ]
Encodes the value using DER :param force: If the encoded contents already exist, clear them and regenerate to ensure they are in DER format instead of BER format :return: A byte string of the DER-encoded value
[ "Encodes", "the", "value", "using", "DER" ]
python
train
26.125
python-openxml/python-docx
docx/styles/styles.py
https://github.com/python-openxml/python-docx/blob/6756f6cd145511d3eb6d1d188beea391b1ddfd53/docx/styles/styles.py#L141-L153
def _get_style_id_from_style(self, style, style_type): """ Return the id of *style*, or |None| if it is the default style of *style_type*. Raises |ValueError| if style is not of *style_type*. """ if style.type != style_type: raise ValueError( "assigned style is type %s, need type %s" % (style.type, style_type) ) if style == self.default(style_type): return None return style.style_id
[ "def", "_get_style_id_from_style", "(", "self", ",", "style", ",", "style_type", ")", ":", "if", "style", ".", "type", "!=", "style_type", ":", "raise", "ValueError", "(", "\"assigned style is type %s, need type %s\"", "%", "(", "style", ".", "type", ",", "style_type", ")", ")", "if", "style", "==", "self", ".", "default", "(", "style_type", ")", ":", "return", "None", "return", "style", ".", "style_id" ]
Return the id of *style*, or |None| if it is the default style of *style_type*. Raises |ValueError| if style is not of *style_type*.
[ "Return", "the", "id", "of", "*", "style", "*", "or", "|None|", "if", "it", "is", "the", "default", "style", "of", "*", "style_type", "*", ".", "Raises", "|ValueError|", "if", "style", "is", "not", "of", "*", "style_type", "*", "." ]
python
train
38.230769
twilio/twilio-python
twilio/rest/monitor/v1/__init__.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/monitor/v1/__init__.py#L29-L35
def alerts(self): """ :rtype: twilio.rest.monitor.v1.alert.AlertList """ if self._alerts is None: self._alerts = AlertList(self) return self._alerts
[ "def", "alerts", "(", "self", ")", ":", "if", "self", ".", "_alerts", "is", "None", ":", "self", ".", "_alerts", "=", "AlertList", "(", "self", ")", "return", "self", ".", "_alerts" ]
:rtype: twilio.rest.monitor.v1.alert.AlertList
[ ":", "rtype", ":", "twilio", ".", "rest", ".", "monitor", ".", "v1", ".", "alert", ".", "AlertList" ]
python
train
27.714286
mar10/wsgidav
wsgidav/fs_dav_provider.py
https://github.com/mar10/wsgidav/blob/cec0d84222fc24bea01be1cea91729001963f172/wsgidav/fs_dav_provider.py#L278-L287
def delete(self): """Remove this resource or collection (recursive). See DAVResource.delete() """ if self.provider.readonly: raise DAVError(HTTP_FORBIDDEN) shutil.rmtree(self._file_path, ignore_errors=False) self.remove_all_properties(True) self.remove_all_locks(True)
[ "def", "delete", "(", "self", ")", ":", "if", "self", ".", "provider", ".", "readonly", ":", "raise", "DAVError", "(", "HTTP_FORBIDDEN", ")", "shutil", ".", "rmtree", "(", "self", ".", "_file_path", ",", "ignore_errors", "=", "False", ")", "self", ".", "remove_all_properties", "(", "True", ")", "self", ".", "remove_all_locks", "(", "True", ")" ]
Remove this resource or collection (recursive). See DAVResource.delete()
[ "Remove", "this", "resource", "or", "collection", "(", "recursive", ")", "." ]
python
valid
32.8
boriel/zxbasic
arch/zx48k/optimizer.py
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/arch/zx48k/optimizer.py#L537-L546
def eq(self, r1, r2): """ True if values of r1 and r2 registers are equal """ if not is_register(r1) or not is_register(r2): return False if self.regs[r1] is None or self.regs[r2] is None: # HINT: This's been never USED?? return False return self.regs[r1] == self.regs[r2]
[ "def", "eq", "(", "self", ",", "r1", ",", "r2", ")", ":", "if", "not", "is_register", "(", "r1", ")", "or", "not", "is_register", "(", "r2", ")", ":", "return", "False", "if", "self", ".", "regs", "[", "r1", "]", "is", "None", "or", "self", ".", "regs", "[", "r2", "]", "is", "None", ":", "# HINT: This's been never USED??", "return", "False", "return", "self", ".", "regs", "[", "r1", "]", "==", "self", ".", "regs", "[", "r2", "]" ]
True if values of r1 and r2 registers are equal
[ "True", "if", "values", "of", "r1", "and", "r2", "registers", "are", "equal" ]
python
train
33
tcalmant/ipopo
pelix/internals/registry.py
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/internals/registry.py#L904-L963
def _filter_with_hooks(self, svc_event, listeners): """ Filters listeners with EventListenerHooks :param svc_event: ServiceEvent being triggered :param listeners: Listeners to filter :return: A list of listeners with hook references """ svc_ref = svc_event.get_service_reference() # Get EventListenerHooks service refs from registry hook_refs = self._registry.find_service_references( SERVICE_EVENT_LISTENER_HOOK ) # only do something if there are some hook_refs if hook_refs: # Associate bundle context to hooks ctx_listeners = {} for listener in listeners: context = listener.bundle_context ctx_listeners.setdefault(context, []).append(listener) # Convert the dictionary to a shrinkable one, # with shrinkable lists of listeners shrinkable_ctx_listeners = ShrinkableMap( { context: ShrinkableList(value) for context, value in ctx_listeners.items() } ) for hook_ref in hook_refs: if not svc_ref == hook_ref: # Get the bundle of the hook service hook_bundle = hook_ref.get_bundle() # lookup service from registry hook_svc = self._registry.get_service(hook_bundle, hook_ref) if hook_svc is not None: # call event method of the hook service, # pass in svc_event and shrinkable_ctx_listeners # (which can be modified by hook) try: hook_svc.event(svc_event, shrinkable_ctx_listeners) except: self._logger.exception( "Error calling EventListenerHook" ) finally: # Clean up the service self._registry.unget_service(hook_bundle, hook_ref) # Convert the shrinkable_ctx_listeners back to a list of listeners # before returning ret_listeners = set() for bnd_listeners in shrinkable_ctx_listeners.values(): ret_listeners.update(bnd_listeners) return ret_listeners # No hook ref return listeners
[ "def", "_filter_with_hooks", "(", "self", ",", "svc_event", ",", "listeners", ")", ":", "svc_ref", "=", "svc_event", ".", "get_service_reference", "(", ")", "# Get EventListenerHooks service refs from registry", "hook_refs", "=", "self", ".", "_registry", ".", "find_service_references", "(", "SERVICE_EVENT_LISTENER_HOOK", ")", "# only do something if there are some hook_refs", "if", "hook_refs", ":", "# Associate bundle context to hooks", "ctx_listeners", "=", "{", "}", "for", "listener", "in", "listeners", ":", "context", "=", "listener", ".", "bundle_context", "ctx_listeners", ".", "setdefault", "(", "context", ",", "[", "]", ")", ".", "append", "(", "listener", ")", "# Convert the dictionary to a shrinkable one,", "# with shrinkable lists of listeners", "shrinkable_ctx_listeners", "=", "ShrinkableMap", "(", "{", "context", ":", "ShrinkableList", "(", "value", ")", "for", "context", ",", "value", "in", "ctx_listeners", ".", "items", "(", ")", "}", ")", "for", "hook_ref", "in", "hook_refs", ":", "if", "not", "svc_ref", "==", "hook_ref", ":", "# Get the bundle of the hook service", "hook_bundle", "=", "hook_ref", ".", "get_bundle", "(", ")", "# lookup service from registry", "hook_svc", "=", "self", ".", "_registry", ".", "get_service", "(", "hook_bundle", ",", "hook_ref", ")", "if", "hook_svc", "is", "not", "None", ":", "# call event method of the hook service,", "# pass in svc_event and shrinkable_ctx_listeners", "# (which can be modified by hook)", "try", ":", "hook_svc", ".", "event", "(", "svc_event", ",", "shrinkable_ctx_listeners", ")", "except", ":", "self", ".", "_logger", ".", "exception", "(", "\"Error calling EventListenerHook\"", ")", "finally", ":", "# Clean up the service", "self", ".", "_registry", ".", "unget_service", "(", "hook_bundle", ",", "hook_ref", ")", "# Convert the shrinkable_ctx_listeners back to a list of listeners", "# before returning", "ret_listeners", "=", "set", "(", ")", "for", "bnd_listeners", "in", "shrinkable_ctx_listeners", ".", "values", "(", ")", ":", "ret_listeners", ".", "update", "(", "bnd_listeners", ")", "return", "ret_listeners", "# No hook ref", "return", "listeners" ]
Filters listeners with EventListenerHooks :param svc_event: ServiceEvent being triggered :param listeners: Listeners to filter :return: A list of listeners with hook references
[ "Filters", "listeners", "with", "EventListenerHooks" ]
python
train
41.033333
raphaelvallat/pingouin
pingouin/external/tabulate.py
https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/external/tabulate.py#L772-L781
def _prepend_row_index(rows, index): """Add a left-most index column.""" if index is None or index is False: return rows if len(index) != len(rows): print('index=', index) print('rows=', rows) raise ValueError('index must be as long as the number of data rows') rows = [[v] + list(row) for v, row in zip(index, rows)] return rows
[ "def", "_prepend_row_index", "(", "rows", ",", "index", ")", ":", "if", "index", "is", "None", "or", "index", "is", "False", ":", "return", "rows", "if", "len", "(", "index", ")", "!=", "len", "(", "rows", ")", ":", "print", "(", "'index='", ",", "index", ")", "print", "(", "'rows='", ",", "rows", ")", "raise", "ValueError", "(", "'index must be as long as the number of data rows'", ")", "rows", "=", "[", "[", "v", "]", "+", "list", "(", "row", ")", "for", "v", ",", "row", "in", "zip", "(", "index", ",", "rows", ")", "]", "return", "rows" ]
Add a left-most index column.
[ "Add", "a", "left", "-", "most", "index", "column", "." ]
python
train
37.2
developersociety/django-glitter
glitter/publisher/models.py
https://github.com/developersociety/django-glitter/blob/2c0280ec83afee80deee94ee3934fc54239c2e87/glitter/publisher/models.py#L89-L104
def _unpublish(self): """ Process an unpublish action on the related object, returns a boolean if a change is made. Only objects with a current active version will be updated. """ obj = self.content_object actioned = False # Only update if needed if obj.current_version is not None: obj.current_version = None obj.save(update_fields=['current_version']) actioned = True return actioned
[ "def", "_unpublish", "(", "self", ")", ":", "obj", "=", "self", ".", "content_object", "actioned", "=", "False", "# Only update if needed", "if", "obj", ".", "current_version", "is", "not", "None", ":", "obj", ".", "current_version", "=", "None", "obj", ".", "save", "(", "update_fields", "=", "[", "'current_version'", "]", ")", "actioned", "=", "True", "return", "actioned" ]
Process an unpublish action on the related object, returns a boolean if a change is made. Only objects with a current active version will be updated.
[ "Process", "an", "unpublish", "action", "on", "the", "related", "object", "returns", "a", "boolean", "if", "a", "change", "is", "made", "." ]
python
train
30.0625
apple/turicreate
deps/src/libxml2-2.9.1/python/libxml2.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L7218-L7225
def validatePopElement(self, doc, elem, qname): """Pop the element end from the validation stack. """ if doc is None: doc__o = None else: doc__o = doc._o if elem is None: elem__o = None else: elem__o = elem._o ret = libxml2mod.xmlValidatePopElement(self._o, doc__o, elem__o, qname) return ret
[ "def", "validatePopElement", "(", "self", ",", "doc", ",", "elem", ",", "qname", ")", ":", "if", "doc", "is", "None", ":", "doc__o", "=", "None", "else", ":", "doc__o", "=", "doc", ".", "_o", "if", "elem", "is", "None", ":", "elem__o", "=", "None", "else", ":", "elem__o", "=", "elem", ".", "_o", "ret", "=", "libxml2mod", ".", "xmlValidatePopElement", "(", "self", ".", "_o", ",", "doc__o", ",", "elem__o", ",", "qname", ")", "return", "ret" ]
Pop the element end from the validation stack.
[ "Pop", "the", "element", "end", "from", "the", "validation", "stack", "." ]
python
train
42.625
tensorflow/cleverhans
examples/nips17_adversarial_competition/dev_toolkit/run_attacks_and_defenses.py
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/dev_toolkit/run_attacks_and_defenses.py#L321-L325
def save_target_classes(self, filename): """Saves target classed for all dataset images into given file.""" with open(filename, 'w') as f: for k, v in self._target_classes.items(): f.write('{0}.png,{1}\n'.format(k, v))
[ "def", "save_target_classes", "(", "self", ",", "filename", ")", ":", "with", "open", "(", "filename", ",", "'w'", ")", "as", "f", ":", "for", "k", ",", "v", "in", "self", ".", "_target_classes", ".", "items", "(", ")", ":", "f", ".", "write", "(", "'{0}.png,{1}\\n'", ".", "format", "(", "k", ",", "v", ")", ")" ]
Saves target classed for all dataset images into given file.
[ "Saves", "target", "classed", "for", "all", "dataset", "images", "into", "given", "file", "." ]
python
train
47.2
dmwm/DBS
Server/Python/src/dbs/web/DBSWriterModel.py
https://github.com/dmwm/DBS/blob/9619bafce3783b3e77f0415f8f9a258e33dd1e6f/Server/Python/src/dbs/web/DBSWriterModel.py#L254-L283
def insertBulkBlock(self): """ API to insert a bulk block :param blockDump: Output of the block dump command :type blockDump: dict """ try: body = request.body.read() indata = cjson.decode(body) if (indata.get("file_parent_list", []) and indata.get("dataset_parent_list", [])): dbsExceptionHandler("dbsException-invalid-input2", "insertBulkBlock: dataset and file parentages cannot be in the input at the same time", self.logger.exception, "insertBulkBlock: datset and file parentages cannot be in the input at the same time.") indata = validateJSONInputNoCopy("blockBulk", indata) self.dbsBlockInsert.putBlock(indata) except cjson.DecodeError as dc: dbsExceptionHandler("dbsException-invalid-input2", "Wrong format/data from insert BulkBlock input", self.logger.exception, str(dc)) except dbsException as de: dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.message) except HTTPError as he: raise he except Exception as ex: #illegal variable name/number if str(ex).find("ORA-01036") != -1: dbsExceptionHandler("dbsException-invalid-input2", "illegal variable name/number from input", self.logger.exception, str(ex)) else: sError = "DBSWriterModel/insertBulkBlock. %s\n. Exception trace: \n %s" \ % (ex, traceback.format_exc()) dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError)
[ "def", "insertBulkBlock", "(", "self", ")", ":", "try", ":", "body", "=", "request", ".", "body", ".", "read", "(", ")", "indata", "=", "cjson", ".", "decode", "(", "body", ")", "if", "(", "indata", ".", "get", "(", "\"file_parent_list\"", ",", "[", "]", ")", "and", "indata", ".", "get", "(", "\"dataset_parent_list\"", ",", "[", "]", ")", ")", ":", "dbsExceptionHandler", "(", "\"dbsException-invalid-input2\"", ",", "\"insertBulkBlock: dataset and file parentages cannot be in the input at the same time\"", ",", "self", ".", "logger", ".", "exception", ",", "\"insertBulkBlock: datset and file parentages cannot be in the input at the same time.\"", ")", "indata", "=", "validateJSONInputNoCopy", "(", "\"blockBulk\"", ",", "indata", ")", "self", ".", "dbsBlockInsert", ".", "putBlock", "(", "indata", ")", "except", "cjson", ".", "DecodeError", "as", "dc", ":", "dbsExceptionHandler", "(", "\"dbsException-invalid-input2\"", ",", "\"Wrong format/data from insert BulkBlock input\"", ",", "self", ".", "logger", ".", "exception", ",", "str", "(", "dc", ")", ")", "except", "dbsException", "as", "de", ":", "dbsExceptionHandler", "(", "de", ".", "eCode", ",", "de", ".", "message", ",", "self", ".", "logger", ".", "exception", ",", "de", ".", "message", ")", "except", "HTTPError", "as", "he", ":", "raise", "he", "except", "Exception", "as", "ex", ":", "#illegal variable name/number", "if", "str", "(", "ex", ")", ".", "find", "(", "\"ORA-01036\"", ")", "!=", "-", "1", ":", "dbsExceptionHandler", "(", "\"dbsException-invalid-input2\"", ",", "\"illegal variable name/number from input\"", ",", "self", ".", "logger", ".", "exception", ",", "str", "(", "ex", ")", ")", "else", ":", "sError", "=", "\"DBSWriterModel/insertBulkBlock. %s\\n. Exception trace: \\n %s\"", "%", "(", "ex", ",", "traceback", ".", "format_exc", "(", ")", ")", "dbsExceptionHandler", "(", "'dbsException-server-error'", ",", "dbsExceptionCode", "[", "'dbsException-server-error'", "]", ",", "self", ".", "logger", ".", "exception", ",", "sError", ")" ]
API to insert a bulk block :param blockDump: Output of the block dump command :type blockDump: dict
[ "API", "to", "insert", "a", "bulk", "block" ]
python
train
55.733333
saltstack/salt
salt/modules/serverdensity_device.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/serverdensity_device.py#L76-L110
def create(name, **params): ''' Function to create device in Server Density. For more info, see the `API docs`__. .. __: https://apidocs.serverdensity.com/Inventory/Devices/Creating CLI Example: .. code-block:: bash salt '*' serverdensity_device.create lama salt '*' serverdensity_device.create rich_lama group=lama_band installedRAM=32768 ''' log.debug('Server Density params: %s', params) params = _clean_salt_variables(params) params['name'] = name api_response = requests.post( 'https://api.serverdensity.io/inventory/devices/', params={'token': get_sd_auth('api_token')}, data=params ) log.debug('Server Density API Response: %s', api_response) log.debug('Server Density API Response content: %s', api_response.content) if api_response.status_code == 200: try: return salt.utils.json.loads(api_response.content) except ValueError: log.error('Could not parse API Response content: %s', api_response.content) raise CommandExecutionError( 'Failed to create, API Response: {0}'.format(api_response) ) else: return None
[ "def", "create", "(", "name", ",", "*", "*", "params", ")", ":", "log", ".", "debug", "(", "'Server Density params: %s'", ",", "params", ")", "params", "=", "_clean_salt_variables", "(", "params", ")", "params", "[", "'name'", "]", "=", "name", "api_response", "=", "requests", ".", "post", "(", "'https://api.serverdensity.io/inventory/devices/'", ",", "params", "=", "{", "'token'", ":", "get_sd_auth", "(", "'api_token'", ")", "}", ",", "data", "=", "params", ")", "log", ".", "debug", "(", "'Server Density API Response: %s'", ",", "api_response", ")", "log", ".", "debug", "(", "'Server Density API Response content: %s'", ",", "api_response", ".", "content", ")", "if", "api_response", ".", "status_code", "==", "200", ":", "try", ":", "return", "salt", ".", "utils", ".", "json", ".", "loads", "(", "api_response", ".", "content", ")", "except", "ValueError", ":", "log", ".", "error", "(", "'Could not parse API Response content: %s'", ",", "api_response", ".", "content", ")", "raise", "CommandExecutionError", "(", "'Failed to create, API Response: {0}'", ".", "format", "(", "api_response", ")", ")", "else", ":", "return", "None" ]
Function to create device in Server Density. For more info, see the `API docs`__. .. __: https://apidocs.serverdensity.com/Inventory/Devices/Creating CLI Example: .. code-block:: bash salt '*' serverdensity_device.create lama salt '*' serverdensity_device.create rich_lama group=lama_band installedRAM=32768
[ "Function", "to", "create", "device", "in", "Server", "Density", ".", "For", "more", "info", "see", "the", "API", "docs", "__", "." ]
python
train
33.771429
jobovy/galpy
galpy/potential/HenonHeilesPotential.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/potential/HenonHeilesPotential.py#L41-L56
def _evaluate(self,R,phi=0.,t=0.): """ NAME: _evaluate PURPOSE: evaluate the potential at R,phi,t INPUT: R - Galactocentric cylindrical radius phi - azimuth t - time OUTPUT: Phi(R,phi,t) HISTORY: 2017-10-16 - Written - Bovy (UofT) """ return 0.5*R*R*(1.+2./3.*R*numpy.sin(3.*phi))
[ "def", "_evaluate", "(", "self", ",", "R", ",", "phi", "=", "0.", ",", "t", "=", "0.", ")", ":", "return", "0.5", "*", "R", "*", "R", "*", "(", "1.", "+", "2.", "/", "3.", "*", "R", "*", "numpy", ".", "sin", "(", "3.", "*", "phi", ")", ")" ]
NAME: _evaluate PURPOSE: evaluate the potential at R,phi,t INPUT: R - Galactocentric cylindrical radius phi - azimuth t - time OUTPUT: Phi(R,phi,t) HISTORY: 2017-10-16 - Written - Bovy (UofT)
[ "NAME", ":", "_evaluate", "PURPOSE", ":", "evaluate", "the", "potential", "at", "R", "phi", "t", "INPUT", ":", "R", "-", "Galactocentric", "cylindrical", "radius", "phi", "-", "azimuth", "t", "-", "time", "OUTPUT", ":", "Phi", "(", "R", "phi", "t", ")", "HISTORY", ":", "2017", "-", "10", "-", "16", "-", "Written", "-", "Bovy", "(", "UofT", ")" ]
python
train
25.375
deepmind/pysc2
pysc2/lib/renderer_human.py
https://github.com/deepmind/pysc2/blob/df4cc4b00f07a2242be9ba153d4a7f4ad2017897/pysc2/lib/renderer_human.py#L1079-L1094
def draw_commands(self, surf): """Draw the list of available commands.""" past_abilities = {act.ability for act in self._past_actions if act.ability} for y, cmd in enumerate(sorted(self._abilities( lambda c: c.name != "Smart"), key=lambda c: c.name), start=2): if self._queued_action and cmd == self._queued_action: color = colors.green elif self._queued_hotkey and cmd.hotkey.startswith(self._queued_hotkey): color = colors.green * 0.75 elif cmd.ability_id in past_abilities: color = colors.red else: color = colors.yellow hotkey = cmd.hotkey[0:3] # truncate "escape" -> "esc" surf.write_screen(self._font_large, color, (0.2, y), hotkey) surf.write_screen(self._font_large, color, (3, y), cmd.name)
[ "def", "draw_commands", "(", "self", ",", "surf", ")", ":", "past_abilities", "=", "{", "act", ".", "ability", "for", "act", "in", "self", ".", "_past_actions", "if", "act", ".", "ability", "}", "for", "y", ",", "cmd", "in", "enumerate", "(", "sorted", "(", "self", ".", "_abilities", "(", "lambda", "c", ":", "c", ".", "name", "!=", "\"Smart\"", ")", ",", "key", "=", "lambda", "c", ":", "c", ".", "name", ")", ",", "start", "=", "2", ")", ":", "if", "self", ".", "_queued_action", "and", "cmd", "==", "self", ".", "_queued_action", ":", "color", "=", "colors", ".", "green", "elif", "self", ".", "_queued_hotkey", "and", "cmd", ".", "hotkey", ".", "startswith", "(", "self", ".", "_queued_hotkey", ")", ":", "color", "=", "colors", ".", "green", "*", "0.75", "elif", "cmd", ".", "ability_id", "in", "past_abilities", ":", "color", "=", "colors", ".", "red", "else", ":", "color", "=", "colors", ".", "yellow", "hotkey", "=", "cmd", ".", "hotkey", "[", "0", ":", "3", "]", "# truncate \"escape\" -> \"esc\"", "surf", ".", "write_screen", "(", "self", ".", "_font_large", ",", "color", ",", "(", "0.2", ",", "y", ")", ",", "hotkey", ")", "surf", ".", "write_screen", "(", "self", ".", "_font_large", ",", "color", ",", "(", "3", ",", "y", ")", ",", "cmd", ".", "name", ")" ]
Draw the list of available commands.
[ "Draw", "the", "list", "of", "available", "commands", "." ]
python
train
48.6875
Xion/taipan
taipan/functional/combinators.py
https://github.com/Xion/taipan/blob/f333f0287c8bd0915182c7d5308e5f05ef0cca78/taipan/functional/combinators.py#L35-L41
def flip(f): """Flip the order of positonal arguments of given function.""" ensure_callable(f) result = lambda *args, **kwargs: f(*reversed(args), **kwargs) functools.update_wrapper(result, f, ('__name__', '__module__')) return result
[ "def", "flip", "(", "f", ")", ":", "ensure_callable", "(", "f", ")", "result", "=", "lambda", "*", "args", ",", "*", "*", "kwargs", ":", "f", "(", "*", "reversed", "(", "args", ")", ",", "*", "*", "kwargs", ")", "functools", ".", "update_wrapper", "(", "result", ",", "f", ",", "(", "'__name__'", ",", "'__module__'", ")", ")", "return", "result" ]
Flip the order of positonal arguments of given function.
[ "Flip", "the", "order", "of", "positonal", "arguments", "of", "given", "function", "." ]
python
train
35.571429
saltstack/salt
salt/modules/k8s.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/k8s.py#L777-L811
def delete_secret(namespace, name, apiserver_url=None, force=True): ''' .. versionadded:: 2016.3.0 Delete kubernetes secret in the defined namespace. Namespace is the mandatory parameter as well as name. CLI Example: .. code-block:: bash salt '*' k8s.delete_secret namespace_name secret_name salt '*' k8s.delete_secret namespace_name secret_name http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # we need namespace to delete secret in it if not _get_namespaces(apiserver_url, namespace): return {'name': name, 'result': False, 'comment': "Namespace doesn't exists, can't delete anything there", 'changes': {}} url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) res = http.query(url, method='DELETE') if res.get('body'): ret['comment'] = "Removed secret {0} in {1} namespace".format(name, namespace) return ret
[ "def", "delete_secret", "(", "namespace", ",", "name", ",", "apiserver_url", "=", "None", ",", "force", "=", "True", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'result'", ":", "True", ",", "'comment'", ":", "''", ",", "'changes'", ":", "{", "}", "}", "# Try to get kubernetes master", "apiserver_url", "=", "_guess_apiserver", "(", "apiserver_url", ")", "if", "apiserver_url", "is", "None", ":", "return", "False", "# we need namespace to delete secret in it", "if", "not", "_get_namespaces", "(", "apiserver_url", ",", "namespace", ")", ":", "return", "{", "'name'", ":", "name", ",", "'result'", ":", "False", ",", "'comment'", ":", "\"Namespace doesn't exists, can't delete anything there\"", ",", "'changes'", ":", "{", "}", "}", "url", "=", "\"{0}/api/v1/namespaces/{1}/secrets/{2}\"", ".", "format", "(", "apiserver_url", ",", "namespace", ",", "name", ")", "res", "=", "http", ".", "query", "(", "url", ",", "method", "=", "'DELETE'", ")", "if", "res", ".", "get", "(", "'body'", ")", ":", "ret", "[", "'comment'", "]", "=", "\"Removed secret {0} in {1} namespace\"", ".", "format", "(", "name", ",", "namespace", ")", "return", "ret" ]
.. versionadded:: 2016.3.0 Delete kubernetes secret in the defined namespace. Namespace is the mandatory parameter as well as name. CLI Example: .. code-block:: bash salt '*' k8s.delete_secret namespace_name secret_name salt '*' k8s.delete_secret namespace_name secret_name http://kube-master.cluster.local
[ "..", "versionadded", "::", "2016", ".", "3", ".", "0" ]
python
train
35.828571
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_fc_auth.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_fc_auth.py#L12-L24
def fcsp_sa_fcsp_auth_proto_auth_type(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") fcsp_sa = ET.SubElement(config, "fcsp-sa", xmlns="urn:brocade.com:mgmt:brocade-fc-auth") fcsp = ET.SubElement(fcsp_sa, "fcsp") auth = ET.SubElement(fcsp, "auth") proto = ET.SubElement(auth, "proto") auth_type = ET.SubElement(proto, "auth-type") auth_type.text = kwargs.pop('auth_type') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "fcsp_sa_fcsp_auth_proto_auth_type", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "fcsp_sa", "=", "ET", ".", "SubElement", "(", "config", ",", "\"fcsp-sa\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-fc-auth\"", ")", "fcsp", "=", "ET", ".", "SubElement", "(", "fcsp_sa", ",", "\"fcsp\"", ")", "auth", "=", "ET", ".", "SubElement", "(", "fcsp", ",", "\"auth\"", ")", "proto", "=", "ET", ".", "SubElement", "(", "auth", ",", "\"proto\"", ")", "auth_type", "=", "ET", ".", "SubElement", "(", "proto", ",", "\"auth-type\"", ")", "auth_type", ".", "text", "=", "kwargs", ".", "pop", "(", "'auth_type'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
42.153846
twisted/mantissa
xmantissa/webtheme.py
https://github.com/twisted/mantissa/blob/53e5502aba23ce99be78b27f923a276593033fe8/xmantissa/webtheme.py#L78-L86
def getInstalledThemes(self, store): """ Collect themes from all offerings installed on this store, or (if called multiple times) return the previously collected list. """ if not store in self._getInstalledThemesCache: self._getInstalledThemesCache[store] = (self. _realGetInstalledThemes(store)) return self._getInstalledThemesCache[store]
[ "def", "getInstalledThemes", "(", "self", ",", "store", ")", ":", "if", "not", "store", "in", "self", ".", "_getInstalledThemesCache", ":", "self", ".", "_getInstalledThemesCache", "[", "store", "]", "=", "(", "self", ".", "_realGetInstalledThemes", "(", "store", ")", ")", "return", "self", ".", "_getInstalledThemesCache", "[", "store", "]" ]
Collect themes from all offerings installed on this store, or (if called multiple times) return the previously collected list.
[ "Collect", "themes", "from", "all", "offerings", "installed", "on", "this", "store", "or", "(", "if", "called", "multiple", "times", ")", "return", "the", "previously", "collected", "list", "." ]
python
train
49
Robin8Put/pmes
ams/utils/tornado_components/mongo.py
https://github.com/Robin8Put/pmes/blob/338bec94162098f05b75bad035417317e1252fd2/ams/utils/tornado_components/mongo.py#L62-L85
async def insert(self, **kwargs): """ Accepts request object, retrieves data from the one`s body and creates new account. """ if kwargs: # Create autoincrement for account pk = await self.autoincrement() kwargs.update({"id": pk}) # Create account with received data and autoincrement await self.collection.insert_one(kwargs) row = await self.collection.find_one({"id": pk}) else: row = None if row: return {i:row[i] for i in row if i != "_id"} else: return {"error":500, "reason":"Not created"}
[ "async", "def", "insert", "(", "self", ",", "*", "*", "kwargs", ")", ":", "if", "kwargs", ":", "# Create autoincrement for account", "pk", "=", "await", "self", ".", "autoincrement", "(", ")", "kwargs", ".", "update", "(", "{", "\"id\"", ":", "pk", "}", ")", "# Create account with received data and autoincrement", "await", "self", ".", "collection", ".", "insert_one", "(", "kwargs", ")", "row", "=", "await", "self", ".", "collection", ".", "find_one", "(", "{", "\"id\"", ":", "pk", "}", ")", "else", ":", "row", "=", "None", "if", "row", ":", "return", "{", "i", ":", "row", "[", "i", "]", "for", "i", "in", "row", "if", "i", "!=", "\"_id\"", "}", "else", ":", "return", "{", "\"error\"", ":", "500", ",", "\"reason\"", ":", "\"Not created\"", "}" ]
Accepts request object, retrieves data from the one`s body and creates new account.
[ "Accepts", "request", "object", "retrieves", "data", "from", "the", "one", "s", "body", "and", "creates", "new", "account", "." ]
python
train
22
PiotrDabkowski/Js2Py
js2py/internals/operations.py
https://github.com/PiotrDabkowski/Js2Py/blob/c0fa43f5679cf91ca8986c5747fcb07a433dc584/js2py/internals/operations.py#L283-L289
def in_op(self, other): '''checks if self is in other''' if not is_object(other): raise MakeError( 'TypeError', "You can\'t use 'in' operator to search in non-objects") return other.has_property(to_string(self))
[ "def", "in_op", "(", "self", ",", "other", ")", ":", "if", "not", "is_object", "(", "other", ")", ":", "raise", "MakeError", "(", "'TypeError'", ",", "\"You can\\'t use 'in' operator to search in non-objects\"", ")", "return", "other", ".", "has_property", "(", "to_string", "(", "self", ")", ")" ]
checks if self is in other
[ "checks", "if", "self", "is", "in", "other" ]
python
valid
35.571429
hotdoc/hotdoc
hotdoc/extensions/c/clang/cindex.py
https://github.com/hotdoc/hotdoc/blob/1067cdc8482b585b364a38fb52ca5d904e486280/hotdoc/extensions/c/clang/cindex.py#L2026-L2037
def element_count(self): """Retrieve the number of elements in this type. Returns an int. If the Type is not an array or vector, this raises. """ result = conf.lib.clang_getNumElements(self) if result < 0: raise Exception('Type does not have elements.') return result
[ "def", "element_count", "(", "self", ")", ":", "result", "=", "conf", ".", "lib", ".", "clang_getNumElements", "(", "self", ")", "if", "result", "<", "0", ":", "raise", "Exception", "(", "'Type does not have elements.'", ")", "return", "result" ]
Retrieve the number of elements in this type. Returns an int. If the Type is not an array or vector, this raises.
[ "Retrieve", "the", "number", "of", "elements", "in", "this", "type", "." ]
python
train
27.25
MIT-LCP/wfdb-python
wfdb/io/_signal.py
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/_signal.py#L516-L622
def calc_adc_params(self): """ Compute appropriate adc_gain and baseline parameters for adc conversion, given the physical signal and the fmts. Returns ------- adc_gains : list List of calculated `adc_gain` values for each channel. baselines : list List of calculated `baseline` values for each channel. Notes ----- This is the mapping equation: `digital - baseline / adc_gain = physical` `physical * adc_gain + baseline = digital` The original WFDB library stores `baseline` as int32. Constrain abs(adc_gain) <= 2**31 == 2147483648 This function does carefully deal with overflow for calculated int32 `baseline` values, but does not consider over/underflow for calculated float `adc_gain` values. """ adc_gains = [] baselines = [] if np.where(np.isinf(self.p_signal))[0].size: raise ValueError('Signal contains inf. Cannot perform adc.') # min and max ignoring nans, unless whole channel is nan. # Should suppress warning message. minvals = np.nanmin(self.p_signal, axis=0) maxvals = np.nanmax(self.p_signal, axis=0) for ch in range(np.shape(self.p_signal)[1]): # Get the minimum and maximum (valid) storage values dmin, dmax = _digi_bounds(self.fmt[ch]) # add 1 because the lowest value is used to store nans dmin = dmin + 1 pmin = minvals[ch] pmax = maxvals[ch] # Figure out digital samples used to store physical samples # If the entire signal is nan, gain/baseline won't be used if pmin == np.nan: adc_gain = 1 baseline = 1 # If the signal is just one value, store one digital value. elif pmin == pmax: if pmin == 0: adc_gain = 1 baseline = 1 else: # All digital values are +1 or -1. Keep adc_gain > 0 adc_gain = abs(1 / pmin) baseline = 0 # Regular varied signal case. else: # The equation is: p = (d - b) / g # Approximately, pmax maps to dmax, and pmin maps to # dmin. Gradient will be equal to, or close to # delta(d) / delta(p), since intercept baseline has # to be an integer. # Constraint: baseline must be between +/- 2**31 adc_gain = (dmax-dmin) / (pmax-pmin) baseline = dmin - adc_gain*pmin # Make adjustments for baseline to be an integer # This up/down round logic of baseline is to ensure # there is no overshoot of dmax. Now pmax will map # to dmax or dmax-1 which is also fine. if pmin > 0: baseline = int(np.ceil(baseline)) else: baseline = int(np.floor(baseline)) # After baseline is set, adjust gain correspondingly.Set # the gain to map pmin to dmin, and p==0 to baseline. # In the case where pmin == 0 and dmin == baseline, # adc_gain is already correct. Avoid dividing by 0. if dmin != baseline: adc_gain = (dmin - baseline) / pmin # Remap signal if baseline exceeds boundaries. # This may happen if pmax < 0 if baseline > MAX_I32: # pmin maps to dmin, baseline maps to 2**31 - 1 # pmax will map to a lower value than before adc_gain = (MAX_I32) - dmin / abs(pmin) baseline = MAX_I32 # This may happen if pmin > 0 elif baseline < MIN_I32: # pmax maps to dmax, baseline maps to -2**31 + 1 adc_gain = (dmax - MIN_I32) / pmax baseline = MIN_I32 adc_gains.append(adc_gain) baselines.append(baseline) return (adc_gains, baselines)
[ "def", "calc_adc_params", "(", "self", ")", ":", "adc_gains", "=", "[", "]", "baselines", "=", "[", "]", "if", "np", ".", "where", "(", "np", ".", "isinf", "(", "self", ".", "p_signal", ")", ")", "[", "0", "]", ".", "size", ":", "raise", "ValueError", "(", "'Signal contains inf. Cannot perform adc.'", ")", "# min and max ignoring nans, unless whole channel is nan.", "# Should suppress warning message.", "minvals", "=", "np", ".", "nanmin", "(", "self", ".", "p_signal", ",", "axis", "=", "0", ")", "maxvals", "=", "np", ".", "nanmax", "(", "self", ".", "p_signal", ",", "axis", "=", "0", ")", "for", "ch", "in", "range", "(", "np", ".", "shape", "(", "self", ".", "p_signal", ")", "[", "1", "]", ")", ":", "# Get the minimum and maximum (valid) storage values", "dmin", ",", "dmax", "=", "_digi_bounds", "(", "self", ".", "fmt", "[", "ch", "]", ")", "# add 1 because the lowest value is used to store nans", "dmin", "=", "dmin", "+", "1", "pmin", "=", "minvals", "[", "ch", "]", "pmax", "=", "maxvals", "[", "ch", "]", "# Figure out digital samples used to store physical samples", "# If the entire signal is nan, gain/baseline won't be used", "if", "pmin", "==", "np", ".", "nan", ":", "adc_gain", "=", "1", "baseline", "=", "1", "# If the signal is just one value, store one digital value.", "elif", "pmin", "==", "pmax", ":", "if", "pmin", "==", "0", ":", "adc_gain", "=", "1", "baseline", "=", "1", "else", ":", "# All digital values are +1 or -1. Keep adc_gain > 0", "adc_gain", "=", "abs", "(", "1", "/", "pmin", ")", "baseline", "=", "0", "# Regular varied signal case.", "else", ":", "# The equation is: p = (d - b) / g", "# Approximately, pmax maps to dmax, and pmin maps to", "# dmin. Gradient will be equal to, or close to", "# delta(d) / delta(p), since intercept baseline has", "# to be an integer.", "# Constraint: baseline must be between +/- 2**31", "adc_gain", "=", "(", "dmax", "-", "dmin", ")", "/", "(", "pmax", "-", "pmin", ")", "baseline", "=", "dmin", "-", "adc_gain", "*", "pmin", "# Make adjustments for baseline to be an integer", "# This up/down round logic of baseline is to ensure", "# there is no overshoot of dmax. Now pmax will map", "# to dmax or dmax-1 which is also fine.", "if", "pmin", ">", "0", ":", "baseline", "=", "int", "(", "np", ".", "ceil", "(", "baseline", ")", ")", "else", ":", "baseline", "=", "int", "(", "np", ".", "floor", "(", "baseline", ")", ")", "# After baseline is set, adjust gain correspondingly.Set", "# the gain to map pmin to dmin, and p==0 to baseline.", "# In the case where pmin == 0 and dmin == baseline,", "# adc_gain is already correct. Avoid dividing by 0.", "if", "dmin", "!=", "baseline", ":", "adc_gain", "=", "(", "dmin", "-", "baseline", ")", "/", "pmin", "# Remap signal if baseline exceeds boundaries.", "# This may happen if pmax < 0", "if", "baseline", ">", "MAX_I32", ":", "# pmin maps to dmin, baseline maps to 2**31 - 1", "# pmax will map to a lower value than before", "adc_gain", "=", "(", "MAX_I32", ")", "-", "dmin", "/", "abs", "(", "pmin", ")", "baseline", "=", "MAX_I32", "# This may happen if pmin > 0", "elif", "baseline", "<", "MIN_I32", ":", "# pmax maps to dmax, baseline maps to -2**31 + 1", "adc_gain", "=", "(", "dmax", "-", "MIN_I32", ")", "/", "pmax", "baseline", "=", "MIN_I32", "adc_gains", ".", "append", "(", "adc_gain", ")", "baselines", ".", "append", "(", "baseline", ")", "return", "(", "adc_gains", ",", "baselines", ")" ]
Compute appropriate adc_gain and baseline parameters for adc conversion, given the physical signal and the fmts. Returns ------- adc_gains : list List of calculated `adc_gain` values for each channel. baselines : list List of calculated `baseline` values for each channel. Notes ----- This is the mapping equation: `digital - baseline / adc_gain = physical` `physical * adc_gain + baseline = digital` The original WFDB library stores `baseline` as int32. Constrain abs(adc_gain) <= 2**31 == 2147483648 This function does carefully deal with overflow for calculated int32 `baseline` values, but does not consider over/underflow for calculated float `adc_gain` values.
[ "Compute", "appropriate", "adc_gain", "and", "baseline", "parameters", "for", "adc", "conversion", "given", "the", "physical", "signal", "and", "the", "fmts", "." ]
python
train
38.345794
dossier/dossier.label
dossier/label/combined_store.py
https://github.com/dossier/dossier.label/blob/d445e56b02ffd91ad46b0872cfbff62b9afef7ec/dossier/label/combined_store.py#L40-L54
def get_related_flat(self, content_id, min_strength=None): '''Follow coreference relationships to get full related graph. This differs from ``get_related_coref_relationships`` in that it returns a flat list of all identifiers found through the coreference layer of indirection. :rtype: list of identifiers ''' rel_id_to_idents = self.get_related_coref_relationships( content_id, min_strength=min_strength) flat_list = [] for val in rel_id_to_idents.values(): flat_list.extend(val) return flat_list
[ "def", "get_related_flat", "(", "self", ",", "content_id", ",", "min_strength", "=", "None", ")", ":", "rel_id_to_idents", "=", "self", ".", "get_related_coref_relationships", "(", "content_id", ",", "min_strength", "=", "min_strength", ")", "flat_list", "=", "[", "]", "for", "val", "in", "rel_id_to_idents", ".", "values", "(", ")", ":", "flat_list", ".", "extend", "(", "val", ")", "return", "flat_list" ]
Follow coreference relationships to get full related graph. This differs from ``get_related_coref_relationships`` in that it returns a flat list of all identifiers found through the coreference layer of indirection. :rtype: list of identifiers
[ "Follow", "coreference", "relationships", "to", "get", "full", "related", "graph", "." ]
python
train
39.266667
digidotcom/python-devicecloud
devicecloud/monitor.py
https://github.com/digidotcom/python-devicecloud/blob/32529684a348a7830a269c32601604c78036bcb8/devicecloud/monitor.py#L213-L263
def create_http_monitor(self, topics, transport_url, transport_token=None, transport_method='PUT', connect_timeout=0, response_timeout=0, batch_size=1, batch_duration=0, compression='none', format_type='json'): """Creates a HTTP Monitor instance in Device Cloud for a given list of topics :param topics: a string list of topics (e.g. ['DeviceCore[U]', 'FileDataCore']). :param transport_url: URL of the customer web server. :param transport_token: Credentials for basic authentication in the following format: username:password :param transport_method: HTTP method to use for sending data: PUT or POST. The default is PUT. :param connect_timeout: A value of 0 means use the system default of 5000 (5 seconds). :param response_timeout: A value of 0 means use the system default of 5000 (5 seconds). :param batch_size: How many Msgs received before sending data. :param batch_duration: How long to wait before sending batch if it does not exceed batch_size. :param compression: Compression value (i.e. 'gzip'). :param format_type: What format server should send data in (i.e. 'xml' or 'json'). Returns an object of the created Monitor """ monitor_xml = """\ <Monitor> <monTopic>{topics}</monTopic> <monBatchSize>{batch_size}</monBatchSize> <monFormatType>{format_type}</monFormatType> <monTransportType>http</monTransportType> <monTransportUrl>{transport_url}</monTransportUrl> <monTransportToken>{transport_token}</monTransportToken> <monTransportMethod>{transport_method}</monTransportMethod> <monConnectTimeout>{connect_timeout}</monConnectTimeout> <monResponseTimeout>{response_timeout}</monResponseTimeout> <monCompression>{compression}</monCompression> </Monitor> """.format( topics=','.join(topics), transport_url=transport_url, transport_token=transport_token, transport_method=transport_method, connect_timeout=connect_timeout, response_timeout=response_timeout, batch_size=batch_size, batch_duration=batch_duration, format_type=format_type, compression=compression, ) monitor_xml = textwrap.dedent(monitor_xml) response = self._conn.post("/ws/Monitor", monitor_xml) location = ET.fromstring(response.text).find('.//location').text monitor_id = int(location.split('/')[-1]) return HTTPDeviceCloudMonitor(self._conn, monitor_id)
[ "def", "create_http_monitor", "(", "self", ",", "topics", ",", "transport_url", ",", "transport_token", "=", "None", ",", "transport_method", "=", "'PUT'", ",", "connect_timeout", "=", "0", ",", "response_timeout", "=", "0", ",", "batch_size", "=", "1", ",", "batch_duration", "=", "0", ",", "compression", "=", "'none'", ",", "format_type", "=", "'json'", ")", ":", "monitor_xml", "=", "\"\"\"\\\n <Monitor>\n <monTopic>{topics}</monTopic>\n <monBatchSize>{batch_size}</monBatchSize>\n <monFormatType>{format_type}</monFormatType>\n <monTransportType>http</monTransportType>\n <monTransportUrl>{transport_url}</monTransportUrl>\n <monTransportToken>{transport_token}</monTransportToken>\n <monTransportMethod>{transport_method}</monTransportMethod>\n <monConnectTimeout>{connect_timeout}</monConnectTimeout>\n <monResponseTimeout>{response_timeout}</monResponseTimeout>\n <monCompression>{compression}</monCompression>\n </Monitor>\n \"\"\"", ".", "format", "(", "topics", "=", "','", ".", "join", "(", "topics", ")", ",", "transport_url", "=", "transport_url", ",", "transport_token", "=", "transport_token", ",", "transport_method", "=", "transport_method", ",", "connect_timeout", "=", "connect_timeout", ",", "response_timeout", "=", "response_timeout", ",", "batch_size", "=", "batch_size", ",", "batch_duration", "=", "batch_duration", ",", "format_type", "=", "format_type", ",", "compression", "=", "compression", ",", ")", "monitor_xml", "=", "textwrap", ".", "dedent", "(", "monitor_xml", ")", "response", "=", "self", ".", "_conn", ".", "post", "(", "\"/ws/Monitor\"", ",", "monitor_xml", ")", "location", "=", "ET", ".", "fromstring", "(", "response", ".", "text", ")", ".", "find", "(", "'.//location'", ")", ".", "text", "monitor_id", "=", "int", "(", "location", ".", "split", "(", "'/'", ")", "[", "-", "1", "]", ")", "return", "HTTPDeviceCloudMonitor", "(", "self", ".", "_conn", ",", "monitor_id", ")" ]
Creates a HTTP Monitor instance in Device Cloud for a given list of topics :param topics: a string list of topics (e.g. ['DeviceCore[U]', 'FileDataCore']). :param transport_url: URL of the customer web server. :param transport_token: Credentials for basic authentication in the following format: username:password :param transport_method: HTTP method to use for sending data: PUT or POST. The default is PUT. :param connect_timeout: A value of 0 means use the system default of 5000 (5 seconds). :param response_timeout: A value of 0 means use the system default of 5000 (5 seconds). :param batch_size: How many Msgs received before sending data. :param batch_duration: How long to wait before sending batch if it does not exceed batch_size. :param compression: Compression value (i.e. 'gzip'). :param format_type: What format server should send data in (i.e. 'xml' or 'json'). Returns an object of the created Monitor
[ "Creates", "a", "HTTP", "Monitor", "instance", "in", "Device", "Cloud", "for", "a", "given", "list", "of", "topics" ]
python
train
52.352941
bkg/django-spillway
spillway/query.py
https://github.com/bkg/django-spillway/blob/c488a62642430b005f1e0d4a19e160d8d5964b67/spillway/query.py#L122-L157
def tile(self, bbox, z=0, format=None, clip=True): """Returns a GeoQuerySet intersecting a tile boundary. Arguments: bbox -- tile extent as geometry Keyword args: z -- tile zoom level used as basis for geometry simplification format -- vector tile format as str (pbf, geojson) clip -- clip geometries to tile boundary as boolean """ # Tile grid uses 3857, but GeoJSON coordinates should be in 4326. tile_srid = 3857 bbox = getattr(bbox, 'geos', bbox) clone = filter_geometry(self, intersects=bbox) field = clone.geo_field srid = field.srid sql = field.name try: tilew = self.tilewidths[z] except IndexError: tilew = self.tilewidths[-1] if bbox.srid != srid: bbox = bbox.transform(srid, clone=True) # Estimate tile width in degrees instead of meters. if bbox.srs.geographic: p = geos.Point(tilew, tilew, srid=tile_srid) p.transform(srid) tilew = p.x if clip: bufbox = bbox.buffer(tilew) sql = geofn.Intersection(sql, bufbox.envelope) sql = SimplifyPreserveTopology(sql, tilew) if format == 'pbf': return clone.pbf(bbox, geo_col=sql) sql = geofn.Transform(sql, 4326) return clone.annotate(**{format: sql})
[ "def", "tile", "(", "self", ",", "bbox", ",", "z", "=", "0", ",", "format", "=", "None", ",", "clip", "=", "True", ")", ":", "# Tile grid uses 3857, but GeoJSON coordinates should be in 4326.", "tile_srid", "=", "3857", "bbox", "=", "getattr", "(", "bbox", ",", "'geos'", ",", "bbox", ")", "clone", "=", "filter_geometry", "(", "self", ",", "intersects", "=", "bbox", ")", "field", "=", "clone", ".", "geo_field", "srid", "=", "field", ".", "srid", "sql", "=", "field", ".", "name", "try", ":", "tilew", "=", "self", ".", "tilewidths", "[", "z", "]", "except", "IndexError", ":", "tilew", "=", "self", ".", "tilewidths", "[", "-", "1", "]", "if", "bbox", ".", "srid", "!=", "srid", ":", "bbox", "=", "bbox", ".", "transform", "(", "srid", ",", "clone", "=", "True", ")", "# Estimate tile width in degrees instead of meters.", "if", "bbox", ".", "srs", ".", "geographic", ":", "p", "=", "geos", ".", "Point", "(", "tilew", ",", "tilew", ",", "srid", "=", "tile_srid", ")", "p", ".", "transform", "(", "srid", ")", "tilew", "=", "p", ".", "x", "if", "clip", ":", "bufbox", "=", "bbox", ".", "buffer", "(", "tilew", ")", "sql", "=", "geofn", ".", "Intersection", "(", "sql", ",", "bufbox", ".", "envelope", ")", "sql", "=", "SimplifyPreserveTopology", "(", "sql", ",", "tilew", ")", "if", "format", "==", "'pbf'", ":", "return", "clone", ".", "pbf", "(", "bbox", ",", "geo_col", "=", "sql", ")", "sql", "=", "geofn", ".", "Transform", "(", "sql", ",", "4326", ")", "return", "clone", ".", "annotate", "(", "*", "*", "{", "format", ":", "sql", "}", ")" ]
Returns a GeoQuerySet intersecting a tile boundary. Arguments: bbox -- tile extent as geometry Keyword args: z -- tile zoom level used as basis for geometry simplification format -- vector tile format as str (pbf, geojson) clip -- clip geometries to tile boundary as boolean
[ "Returns", "a", "GeoQuerySet", "intersecting", "a", "tile", "boundary", "." ]
python
train
38.25
OCHA-DAP/hdx-python-api
src/hdx/data/dataset.py
https://github.com/OCHA-DAP/hdx-python-api/blob/212440f54f73805826a16db77dbcb6033b18a313/src/hdx/data/dataset.py#L333-L353
def check_required_fields(self, ignore_fields=list(), allow_no_resources=False): # type: (List[str], bool) -> None """Check that metadata for dataset and its resources is complete. The parameter ignore_fields should be set if required to any fields that should be ignored for the particular operation. Args: ignore_fields (List[str]): Fields to ignore. Default is []. allow_no_resources (bool): Whether to allow no resources. Defaults to False. Returns: None """ if self.is_requestable(): self._check_required_fields('dataset-requestable', ignore_fields) else: self._check_required_fields('dataset', ignore_fields) if len(self.resources) == 0 and not allow_no_resources: raise HDXError('There are no resources! Please add at least one resource!') for resource in self.resources: ignore_fields = ['package_id'] resource.check_required_fields(ignore_fields=ignore_fields)
[ "def", "check_required_fields", "(", "self", ",", "ignore_fields", "=", "list", "(", ")", ",", "allow_no_resources", "=", "False", ")", ":", "# type: (List[str], bool) -> None", "if", "self", ".", "is_requestable", "(", ")", ":", "self", ".", "_check_required_fields", "(", "'dataset-requestable'", ",", "ignore_fields", ")", "else", ":", "self", ".", "_check_required_fields", "(", "'dataset'", ",", "ignore_fields", ")", "if", "len", "(", "self", ".", "resources", ")", "==", "0", "and", "not", "allow_no_resources", ":", "raise", "HDXError", "(", "'There are no resources! Please add at least one resource!'", ")", "for", "resource", "in", "self", ".", "resources", ":", "ignore_fields", "=", "[", "'package_id'", "]", "resource", ".", "check_required_fields", "(", "ignore_fields", "=", "ignore_fields", ")" ]
Check that metadata for dataset and its resources is complete. The parameter ignore_fields should be set if required to any fields that should be ignored for the particular operation. Args: ignore_fields (List[str]): Fields to ignore. Default is []. allow_no_resources (bool): Whether to allow no resources. Defaults to False. Returns: None
[ "Check", "that", "metadata", "for", "dataset", "and", "its", "resources", "is", "complete", ".", "The", "parameter", "ignore_fields", "should", "be", "set", "if", "required", "to", "any", "fields", "that", "should", "be", "ignored", "for", "the", "particular", "operation", "." ]
python
train
49.857143
django-salesforce/django-salesforce
salesforce/dbapi/driver.py
https://github.com/django-salesforce/django-salesforce/blob/6fd5643dba69d49c5881de50875cf90204a8f808/salesforce/dbapi/driver.py#L159-L172
def make_session(self): """Authenticate and get the name of assigned SFDC data server""" with connect_lock: if self._sf_session is None: sf_session = requests.Session() # TODO configurable class Salesforce***Auth sf_session.auth = SalesforcePasswordAuth(db_alias=self.alias, settings_dict=self.settings_dict) sf_instance_url = sf_session.auth.instance_url sf_requests_adapter = HTTPAdapter(max_retries=get_max_retries()) sf_session.mount(sf_instance_url, sf_requests_adapter) # Additional headers work, but the same are added automatically by "requests' package. # sf_session.header = {'accept-encoding': 'gzip, deflate', 'connection': 'keep-alive'} # TODO self._sf_session = sf_session
[ "def", "make_session", "(", "self", ")", ":", "with", "connect_lock", ":", "if", "self", ".", "_sf_session", "is", "None", ":", "sf_session", "=", "requests", ".", "Session", "(", ")", "# TODO configurable class Salesforce***Auth", "sf_session", ".", "auth", "=", "SalesforcePasswordAuth", "(", "db_alias", "=", "self", ".", "alias", ",", "settings_dict", "=", "self", ".", "settings_dict", ")", "sf_instance_url", "=", "sf_session", ".", "auth", ".", "instance_url", "sf_requests_adapter", "=", "HTTPAdapter", "(", "max_retries", "=", "get_max_retries", "(", ")", ")", "sf_session", ".", "mount", "(", "sf_instance_url", ",", "sf_requests_adapter", ")", "# Additional headers work, but the same are added automatically by \"requests' package.", "# sf_session.header = {'accept-encoding': 'gzip, deflate', 'connection': 'keep-alive'} # TODO", "self", ".", "_sf_session", "=", "sf_session" ]
Authenticate and get the name of assigned SFDC data server
[ "Authenticate", "and", "get", "the", "name", "of", "assigned", "SFDC", "data", "server" ]
python
train
64.428571
PedalPi/PluginsManager
pluginsmanager/observer/host_observer/host_observer.py
https://github.com/PedalPi/PluginsManager/blob/2dcc9f6a79b48e9c9be82efffd855352fa15c5c7/pluginsmanager/observer/host_observer/host_observer.py#L120-L127
def _load_params_of(self, effect): """ Called only when a effect has created Param changes calls :meth:`~pluginsmanager.observer.host_observer.host_observer.HostObserver.on_param_value_changed()` """ for param in effect.params: if param.value != param.default: self._set_param_value(param)
[ "def", "_load_params_of", "(", "self", ",", "effect", ")", ":", "for", "param", "in", "effect", ".", "params", ":", "if", "param", ".", "value", "!=", "param", ".", "default", ":", "self", ".", "_set_param_value", "(", "param", ")" ]
Called only when a effect has created Param changes calls :meth:`~pluginsmanager.observer.host_observer.host_observer.HostObserver.on_param_value_changed()`
[ "Called", "only", "when", "a", "effect", "has", "created", "Param", "changes", "calls", ":", "meth", ":", "~pluginsmanager", ".", "observer", ".", "host_observer", ".", "host_observer", ".", "HostObserver", ".", "on_param_value_changed", "()" ]
python
train
43.75
vstconsulting/vstutils
vstutils/utils.py
https://github.com/vstconsulting/vstutils/blob/3d6d140c2463952dc9835a4e40caf758468b3049/vstutils/utils.py#L671-L680
def get_object(self, name, obj): """ :param name: -- string name of backend :param name: str :param obj: -- model object :type obj: django.db.models.Model :return: backend object :rtype: object """ return self[name](obj, **self.opts(name))
[ "def", "get_object", "(", "self", ",", "name", ",", "obj", ")", ":", "return", "self", "[", "name", "]", "(", "obj", ",", "*", "*", "self", ".", "opts", "(", "name", ")", ")" ]
:param name: -- string name of backend :param name: str :param obj: -- model object :type obj: django.db.models.Model :return: backend object :rtype: object
[ ":", "param", "name", ":", "--", "string", "name", "of", "backend", ":", "param", "name", ":", "str", ":", "param", "obj", ":", "--", "model", "object", ":", "type", "obj", ":", "django", ".", "db", ".", "models", ".", "Model", ":", "return", ":", "backend", "object", ":", "rtype", ":", "object" ]
python
train
30.2
micropython/micropython
examples/switch.py
https://github.com/micropython/micropython/blob/8031b7a25c21fb864fe9dd1fa40740030be66c11/examples/switch.py#L27-L42
def run_loop(leds=all_leds): """ Start the loop. :param `leds`: Which LEDs to light up upon switch press. :type `leds`: sequence of LED objects """ print('Loop started.\nPress Ctrl+C to break out of the loop.') while 1: try: if switch(): [led.on() for led in leds] else: [led.off() for led in leds] except OSError: # VCPInterrupt # Ctrl+C in interpreter mode. break
[ "def", "run_loop", "(", "leds", "=", "all_leds", ")", ":", "print", "(", "'Loop started.\\nPress Ctrl+C to break out of the loop.'", ")", "while", "1", ":", "try", ":", "if", "switch", "(", ")", ":", "[", "led", ".", "on", "(", ")", "for", "led", "in", "leds", "]", "else", ":", "[", "led", ".", "off", "(", ")", "for", "led", "in", "leds", "]", "except", "OSError", ":", "# VCPInterrupt # Ctrl+C in interpreter mode.", "break" ]
Start the loop. :param `leds`: Which LEDs to light up upon switch press. :type `leds`: sequence of LED objects
[ "Start", "the", "loop", "." ]
python
train
28.9375
alpacahq/pylivetrader
pylivetrader/loader.py
https://github.com/alpacahq/pylivetrader/blob/fd328b6595428c0789d9f218df34623f83a02b8b/pylivetrader/loader.py#L72-L79
def translate(script): '''translate zipline script into pylivetrader script. ''' tree = ast.parse(script) ZiplineImportVisitor().visit(tree) return astor.to_source(tree)
[ "def", "translate", "(", "script", ")", ":", "tree", "=", "ast", ".", "parse", "(", "script", ")", "ZiplineImportVisitor", "(", ")", ".", "visit", "(", "tree", ")", "return", "astor", ".", "to_source", "(", "tree", ")" ]
translate zipline script into pylivetrader script.
[ "translate", "zipline", "script", "into", "pylivetrader", "script", "." ]
python
train
23
martinpitt/python-dbusmock
dbusmock/templates/upower.py
https://github.com/martinpitt/python-dbusmock/blob/26f65f78bc0ed347233f699a8d6ee0e6880e7eb0/dbusmock/templates/upower.py#L197-L234
def SetupDisplayDevice(self, type, state, percentage, energy, energy_full, energy_rate, time_to_empty, time_to_full, is_present, icon_name, warning_level): '''Convenience method to configure DisplayDevice properties This calls Set() for all properties that the DisplayDevice is defined to have, and is shorter if you have to completely set it up instead of changing just one or two properties. This is only available when mocking the 1.0 API. ''' if not self.api1: raise dbus.exceptions.DBusException( 'SetupDisplayDevice() can only be used with the 1.0 API', name=MOCK_IFACE + '.APIVersion') display_props = mockobject.objects[self.p_display_dev] display_props.Set(DEVICE_IFACE, 'Type', dbus.UInt32(type)) display_props.Set(DEVICE_IFACE, 'State', dbus.UInt32(state)) display_props.Set(DEVICE_IFACE, 'Percentage', percentage) display_props.Set(DEVICE_IFACE, 'Energy', energy) display_props.Set(DEVICE_IFACE, 'EnergyFull', energy_full) display_props.Set(DEVICE_IFACE, 'EnergyRate', energy_rate) display_props.Set(DEVICE_IFACE, 'TimeToEmpty', dbus.Int64(time_to_empty)) display_props.Set(DEVICE_IFACE, 'TimeToFull', dbus.Int64(time_to_full)) display_props.Set(DEVICE_IFACE, 'IsPresent', is_present) display_props.Set(DEVICE_IFACE, 'IconName', icon_name) display_props.Set(DEVICE_IFACE, 'WarningLevel', dbus.UInt32(warning_level))
[ "def", "SetupDisplayDevice", "(", "self", ",", "type", ",", "state", ",", "percentage", ",", "energy", ",", "energy_full", ",", "energy_rate", ",", "time_to_empty", ",", "time_to_full", ",", "is_present", ",", "icon_name", ",", "warning_level", ")", ":", "if", "not", "self", ".", "api1", ":", "raise", "dbus", ".", "exceptions", ".", "DBusException", "(", "'SetupDisplayDevice() can only be used with the 1.0 API'", ",", "name", "=", "MOCK_IFACE", "+", "'.APIVersion'", ")", "display_props", "=", "mockobject", ".", "objects", "[", "self", ".", "p_display_dev", "]", "display_props", ".", "Set", "(", "DEVICE_IFACE", ",", "'Type'", ",", "dbus", ".", "UInt32", "(", "type", ")", ")", "display_props", ".", "Set", "(", "DEVICE_IFACE", ",", "'State'", ",", "dbus", ".", "UInt32", "(", "state", ")", ")", "display_props", ".", "Set", "(", "DEVICE_IFACE", ",", "'Percentage'", ",", "percentage", ")", "display_props", ".", "Set", "(", "DEVICE_IFACE", ",", "'Energy'", ",", "energy", ")", "display_props", ".", "Set", "(", "DEVICE_IFACE", ",", "'EnergyFull'", ",", "energy_full", ")", "display_props", ".", "Set", "(", "DEVICE_IFACE", ",", "'EnergyRate'", ",", "energy_rate", ")", "display_props", ".", "Set", "(", "DEVICE_IFACE", ",", "'TimeToEmpty'", ",", "dbus", ".", "Int64", "(", "time_to_empty", ")", ")", "display_props", ".", "Set", "(", "DEVICE_IFACE", ",", "'TimeToFull'", ",", "dbus", ".", "Int64", "(", "time_to_full", ")", ")", "display_props", ".", "Set", "(", "DEVICE_IFACE", ",", "'IsPresent'", ",", "is_present", ")", "display_props", ".", "Set", "(", "DEVICE_IFACE", ",", "'IconName'", ",", "icon_name", ")", "display_props", ".", "Set", "(", "DEVICE_IFACE", ",", "'WarningLevel'", ",", "dbus", ".", "UInt32", "(", "warning_level", ")", ")" ]
Convenience method to configure DisplayDevice properties This calls Set() for all properties that the DisplayDevice is defined to have, and is shorter if you have to completely set it up instead of changing just one or two properties. This is only available when mocking the 1.0 API.
[ "Convenience", "method", "to", "configure", "DisplayDevice", "properties" ]
python
train
43.868421
jleinonen/pytmatrix
pytmatrix/orientation.py
https://github.com/jleinonen/pytmatrix/blob/8803507fe5332786feab105fa74acf63e7121718/pytmatrix/orientation.py#L78-L117
def orient_averaged_adaptive(tm): """Compute the T-matrix using variable orientation scatterers. This method uses a very slow adaptive routine and should mainly be used for reference purposes. Uses the set particle orientation PDF, ignoring the alpha and beta attributes. Args: tm: TMatrix (or descendant) instance Returns: The amplitude (S) and phase (Z) matrices. """ S = np.zeros((2,2), dtype=complex) Z = np.zeros((4,4)) def Sfunc(beta, alpha, i, j, real): (S_ang, Z_ang) = tm.get_SZ_single(alpha=alpha, beta=beta) s = S_ang[i,j].real if real else S_ang[i,j].imag return s * tm.or_pdf(beta) ind = range(2) for i in ind: for j in ind: S.real[i,j] = dblquad(Sfunc, 0.0, 360.0, lambda x: 0.0, lambda x: 180.0, (i,j,True))[0]/360.0 S.imag[i,j] = dblquad(Sfunc, 0.0, 360.0, lambda x: 0.0, lambda x: 180.0, (i,j,False))[0]/360.0 def Zfunc(beta, alpha, i, j): (S_and, Z_ang) = tm.get_SZ_single(alpha=alpha, beta=beta) return Z_ang[i,j] * tm.or_pdf(beta) ind = range(4) for i in ind: for j in ind: Z[i,j] = dblquad(Zfunc, 0.0, 360.0, lambda x: 0.0, lambda x: 180.0, (i,j))[0]/360.0 return (S, Z)
[ "def", "orient_averaged_adaptive", "(", "tm", ")", ":", "S", "=", "np", ".", "zeros", "(", "(", "2", ",", "2", ")", ",", "dtype", "=", "complex", ")", "Z", "=", "np", ".", "zeros", "(", "(", "4", ",", "4", ")", ")", "def", "Sfunc", "(", "beta", ",", "alpha", ",", "i", ",", "j", ",", "real", ")", ":", "(", "S_ang", ",", "Z_ang", ")", "=", "tm", ".", "get_SZ_single", "(", "alpha", "=", "alpha", ",", "beta", "=", "beta", ")", "s", "=", "S_ang", "[", "i", ",", "j", "]", ".", "real", "if", "real", "else", "S_ang", "[", "i", ",", "j", "]", ".", "imag", "return", "s", "*", "tm", ".", "or_pdf", "(", "beta", ")", "ind", "=", "range", "(", "2", ")", "for", "i", "in", "ind", ":", "for", "j", "in", "ind", ":", "S", ".", "real", "[", "i", ",", "j", "]", "=", "dblquad", "(", "Sfunc", ",", "0.0", ",", "360.0", ",", "lambda", "x", ":", "0.0", ",", "lambda", "x", ":", "180.0", ",", "(", "i", ",", "j", ",", "True", ")", ")", "[", "0", "]", "/", "360.0", "S", ".", "imag", "[", "i", ",", "j", "]", "=", "dblquad", "(", "Sfunc", ",", "0.0", ",", "360.0", ",", "lambda", "x", ":", "0.0", ",", "lambda", "x", ":", "180.0", ",", "(", "i", ",", "j", ",", "False", ")", ")", "[", "0", "]", "/", "360.0", "def", "Zfunc", "(", "beta", ",", "alpha", ",", "i", ",", "j", ")", ":", "(", "S_and", ",", "Z_ang", ")", "=", "tm", ".", "get_SZ_single", "(", "alpha", "=", "alpha", ",", "beta", "=", "beta", ")", "return", "Z_ang", "[", "i", ",", "j", "]", "*", "tm", ".", "or_pdf", "(", "beta", ")", "ind", "=", "range", "(", "4", ")", "for", "i", "in", "ind", ":", "for", "j", "in", "ind", ":", "Z", "[", "i", ",", "j", "]", "=", "dblquad", "(", "Zfunc", ",", "0.0", ",", "360.0", ",", "lambda", "x", ":", "0.0", ",", "lambda", "x", ":", "180.0", ",", "(", "i", ",", "j", ")", ")", "[", "0", "]", "/", "360.0", "return", "(", "S", ",", "Z", ")" ]
Compute the T-matrix using variable orientation scatterers. This method uses a very slow adaptive routine and should mainly be used for reference purposes. Uses the set particle orientation PDF, ignoring the alpha and beta attributes. Args: tm: TMatrix (or descendant) instance Returns: The amplitude (S) and phase (Z) matrices.
[ "Compute", "the", "T", "-", "matrix", "using", "variable", "orientation", "scatterers", ".", "This", "method", "uses", "a", "very", "slow", "adaptive", "routine", "and", "should", "mainly", "be", "used", "for", "reference", "purposes", ".", "Uses", "the", "set", "particle", "orientation", "PDF", "ignoring", "the", "alpha", "and", "beta", "attributes", "." ]
python
train
32.675
googleapis/dialogflow-python-client-v2
samples/knowledge_base_management.py
https://github.com/googleapis/dialogflow-python-client-v2/blob/8c9c8709222efe427b76c9c8fcc04a0c4a0760b5/samples/knowledge_base_management.py#L52-L69
def create_knowledge_base(project_id, display_name): """Creates a Knowledge base. Args: project_id: The GCP project linked with the agent. display_name: The display name of the Knowledge base.""" import dialogflow_v2beta1 as dialogflow client = dialogflow.KnowledgeBasesClient() project_path = client.project_path(project_id) knowledge_base = dialogflow.types.KnowledgeBase( display_name=display_name) response = client.create_knowledge_base(project_path, knowledge_base) print('Knowledge Base created:\n') print('Display Name: {}\n'.format(response.display_name)) print('Knowledge ID: {}\n'.format(response.name))
[ "def", "create_knowledge_base", "(", "project_id", ",", "display_name", ")", ":", "import", "dialogflow_v2beta1", "as", "dialogflow", "client", "=", "dialogflow", ".", "KnowledgeBasesClient", "(", ")", "project_path", "=", "client", ".", "project_path", "(", "project_id", ")", "knowledge_base", "=", "dialogflow", ".", "types", ".", "KnowledgeBase", "(", "display_name", "=", "display_name", ")", "response", "=", "client", ".", "create_knowledge_base", "(", "project_path", ",", "knowledge_base", ")", "print", "(", "'Knowledge Base created:\\n'", ")", "print", "(", "'Display Name: {}\\n'", ".", "format", "(", "response", ".", "display_name", ")", ")", "print", "(", "'Knowledge ID: {}\\n'", ".", "format", "(", "response", ".", "name", ")", ")" ]
Creates a Knowledge base. Args: project_id: The GCP project linked with the agent. display_name: The display name of the Knowledge base.
[ "Creates", "a", "Knowledge", "base", "." ]
python
train
36.944444
Alignak-monitoring/alignak
alignak/external_command.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/external_command.py#L2333-L2346
def disable_servicegroup_passive_host_checks(self, servicegroup): """Disable passive host checks for a servicegroup Format of the line that triggers function call:: DISABLE_SERVICEGROUP_PASSIVE_HOST_CHECKS;<servicegroup_name> :param servicegroup: servicegroup to disable :type servicegroup: alignak.objects.servicegroup.Servicegroup :return: None """ for service_id in servicegroup.get_services(): if service_id in self.daemon.services: host_id = self.daemon.services[service_id].host self.disable_passive_host_checks(self.daemon.hosts[host_id])
[ "def", "disable_servicegroup_passive_host_checks", "(", "self", ",", "servicegroup", ")", ":", "for", "service_id", "in", "servicegroup", ".", "get_services", "(", ")", ":", "if", "service_id", "in", "self", ".", "daemon", ".", "services", ":", "host_id", "=", "self", ".", "daemon", ".", "services", "[", "service_id", "]", ".", "host", "self", ".", "disable_passive_host_checks", "(", "self", ".", "daemon", ".", "hosts", "[", "host_id", "]", ")" ]
Disable passive host checks for a servicegroup Format of the line that triggers function call:: DISABLE_SERVICEGROUP_PASSIVE_HOST_CHECKS;<servicegroup_name> :param servicegroup: servicegroup to disable :type servicegroup: alignak.objects.servicegroup.Servicegroup :return: None
[ "Disable", "passive", "host", "checks", "for", "a", "servicegroup", "Format", "of", "the", "line", "that", "triggers", "function", "call", "::" ]
python
train
45.857143
honeynet/beeswarm
beeswarm/drones/honeypot/honeypot.py
https://github.com/honeynet/beeswarm/blob/db51ea0bc29f631c3e3b5312b479ac9d5e31079a/beeswarm/drones/honeypot/honeypot.py#L181-L193
def prepare_environment(work_dir): """ Performs a few maintenance tasks before the Honeypot is run. Copies the data directory, and the config file to the cwd. The config file copied here is overwritten if the __init__ method is called with a configuration URL. :param work_dir: The directory to copy files to. """ package_directory = os.path.dirname(os.path.abspath(beeswarm.__file__)) logger.info('Copying data files to workdir.') shutil.copytree(os.path.join(package_directory, 'drones/honeypot/data'), os.path.join(work_dir, 'data/'), ignore=Honeypot._ignore_copy_files)
[ "def", "prepare_environment", "(", "work_dir", ")", ":", "package_directory", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "abspath", "(", "beeswarm", ".", "__file__", ")", ")", "logger", ".", "info", "(", "'Copying data files to workdir.'", ")", "shutil", ".", "copytree", "(", "os", ".", "path", ".", "join", "(", "package_directory", ",", "'drones/honeypot/data'", ")", ",", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'data/'", ")", ",", "ignore", "=", "Honeypot", ".", "_ignore_copy_files", ")" ]
Performs a few maintenance tasks before the Honeypot is run. Copies the data directory, and the config file to the cwd. The config file copied here is overwritten if the __init__ method is called with a configuration URL. :param work_dir: The directory to copy files to.
[ "Performs", "a", "few", "maintenance", "tasks", "before", "the", "Honeypot", "is", "run", ".", "Copies", "the", "data", "directory", "and", "the", "config", "file", "to", "the", "cwd", ".", "The", "config", "file", "copied", "here", "is", "overwritten", "if", "the", "__init__", "method", "is", "called", "with", "a", "configuration", "URL", "." ]
python
train
51.615385
ffcalculator/fantasydata-python
fantasy_data/FantasyData.py
https://github.com/ffcalculator/fantasydata-python/blob/af90cac1e80d8356cffaa80621ee513201f6c661/fantasy_data/FantasyData.py#L89-L102
def get_schedules_for_season(self, season, season_type="REG"): """ Game schedule for a specified season. """ try: season = int(season) if season_type not in ["REG", "PRE", "POST"]: raise ValueError except (ValueError, TypeError): raise FantasyDataError('Error: Invalid method parameters') season_param = "{0}{1}".format(season, season_type) result = self._method_call("Schedules/{season}", "stats", season=season_param) return result
[ "def", "get_schedules_for_season", "(", "self", ",", "season", ",", "season_type", "=", "\"REG\"", ")", ":", "try", ":", "season", "=", "int", "(", "season", ")", "if", "season_type", "not", "in", "[", "\"REG\"", ",", "\"PRE\"", ",", "\"POST\"", "]", ":", "raise", "ValueError", "except", "(", "ValueError", ",", "TypeError", ")", ":", "raise", "FantasyDataError", "(", "'Error: Invalid method parameters'", ")", "season_param", "=", "\"{0}{1}\"", ".", "format", "(", "season", ",", "season_type", ")", "result", "=", "self", ".", "_method_call", "(", "\"Schedules/{season}\"", ",", "\"stats\"", ",", "season", "=", "season_param", ")", "return", "result" ]
Game schedule for a specified season.
[ "Game", "schedule", "for", "a", "specified", "season", "." ]
python
train
38.357143
rwl/pylon
pylon/opf.py
https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pylon/opf.py#L216-L224
def _pwl1_to_poly(self, generators): """ Converts single-block piecewise-linear costs into linear polynomial. """ for g in generators: if (g.pcost_model == PW_LINEAR) and (len(g.p_cost) == 2): g.pwl_to_poly() return generators
[ "def", "_pwl1_to_poly", "(", "self", ",", "generators", ")", ":", "for", "g", "in", "generators", ":", "if", "(", "g", ".", "pcost_model", "==", "PW_LINEAR", ")", "and", "(", "len", "(", "g", ".", "p_cost", ")", "==", "2", ")", ":", "g", ".", "pwl_to_poly", "(", ")", "return", "generators" ]
Converts single-block piecewise-linear costs into linear polynomial.
[ "Converts", "single", "-", "block", "piecewise", "-", "linear", "costs", "into", "linear", "polynomial", "." ]
python
train
31.888889
androguard/androguard
androguard/core/bytecodes/apk.py
https://github.com/androguard/androguard/blob/984c0d981be2950cf0451e484f7b0d4d53bc4911/androguard/core/bytecodes/apk.py#L2084-L2104
def ensure_final_value(packageName, arsc, value): """Ensure incoming value is always the value, not the resid androguard will sometimes return the Android "resId" aka Resource ID instead of the actual value. This checks whether the value is actually a resId, then performs the Android Resource lookup as needed. """ if value: returnValue = value if value[0] == '@': # TODO: @packagename:DEADBEEF is not supported here! try: # can be a literal value or a resId res_id = int('0x' + value[1:], 16) res_id = arsc.get_id(packageName, res_id)[1] returnValue = arsc.get_string(packageName, res_id)[1] except (ValueError, TypeError): pass return returnValue return ''
[ "def", "ensure_final_value", "(", "packageName", ",", "arsc", ",", "value", ")", ":", "if", "value", ":", "returnValue", "=", "value", "if", "value", "[", "0", "]", "==", "'@'", ":", "# TODO: @packagename:DEADBEEF is not supported here!", "try", ":", "# can be a literal value or a resId", "res_id", "=", "int", "(", "'0x'", "+", "value", "[", "1", ":", "]", ",", "16", ")", "res_id", "=", "arsc", ".", "get_id", "(", "packageName", ",", "res_id", ")", "[", "1", "]", "returnValue", "=", "arsc", ".", "get_string", "(", "packageName", ",", "res_id", ")", "[", "1", "]", "except", "(", "ValueError", ",", "TypeError", ")", ":", "pass", "return", "returnValue", "return", "''" ]
Ensure incoming value is always the value, not the resid androguard will sometimes return the Android "resId" aka Resource ID instead of the actual value. This checks whether the value is actually a resId, then performs the Android Resource lookup as needed.
[ "Ensure", "incoming", "value", "is", "always", "the", "value", "not", "the", "resid" ]
python
train
38.047619
wummel/linkchecker
third_party/dnspython/dns/tokenizer.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/third_party/dnspython/dns/tokenizer.py#L267-L285
def skip_whitespace(self): """Consume input until a non-whitespace character is encountered. The non-whitespace character is then ungotten, and the number of whitespace characters consumed is returned. If the tokenizer is in multiline mode, then newlines are whitespace. @rtype: int """ skipped = 0 while True: c = self._get_char() if c != ' ' and c != '\t': if (c != '\n') or not self.multiline: self._unget_char(c) return skipped skipped += 1
[ "def", "skip_whitespace", "(", "self", ")", ":", "skipped", "=", "0", "while", "True", ":", "c", "=", "self", ".", "_get_char", "(", ")", "if", "c", "!=", "' '", "and", "c", "!=", "'\\t'", ":", "if", "(", "c", "!=", "'\\n'", ")", "or", "not", "self", ".", "multiline", ":", "self", ".", "_unget_char", "(", "c", ")", "return", "skipped", "skipped", "+=", "1" ]
Consume input until a non-whitespace character is encountered. The non-whitespace character is then ungotten, and the number of whitespace characters consumed is returned. If the tokenizer is in multiline mode, then newlines are whitespace. @rtype: int
[ "Consume", "input", "until", "a", "non", "-", "whitespace", "character", "is", "encountered", "." ]
python
train
30.842105
cuducos/getgist
getgist/__init__.py
https://github.com/cuducos/getgist/blob/c70a0a9353eca43360b82c759d1e1514ec265d3b/getgist/__init__.py#L21-L30
def output(self, message, color=None): """ A helper to used like print() or click's secho() tunneling all the outputs to sys.stdout or sys.stderr :param message: (str) :param color: (str) check click.secho() documentation :return: (None) prints to sys.stdout or sys.stderr """ output_to = stderr if color == "red" else stdout secho(self.indent(message), fg=color, file=output_to)
[ "def", "output", "(", "self", ",", "message", ",", "color", "=", "None", ")", ":", "output_to", "=", "stderr", "if", "color", "==", "\"red\"", "else", "stdout", "secho", "(", "self", ".", "indent", "(", "message", ")", ",", "fg", "=", "color", ",", "file", "=", "output_to", ")" ]
A helper to used like print() or click's secho() tunneling all the outputs to sys.stdout or sys.stderr :param message: (str) :param color: (str) check click.secho() documentation :return: (None) prints to sys.stdout or sys.stderr
[ "A", "helper", "to", "used", "like", "print", "()", "or", "click", "s", "secho", "()", "tunneling", "all", "the", "outputs", "to", "sys", ".", "stdout", "or", "sys", ".", "stderr", ":", "param", "message", ":", "(", "str", ")", ":", "param", "color", ":", "(", "str", ")", "check", "click", ".", "secho", "()", "documentation", ":", "return", ":", "(", "None", ")", "prints", "to", "sys", ".", "stdout", "or", "sys", ".", "stderr" ]
python
train
44.2
namuyan/nem-ed25519
nem_ed25519/utils.py
https://github.com/namuyan/nem-ed25519/blob/4f506a5335eb860a4cf1d102f76fcad93f9a55fc/nem_ed25519/utils.py#L190-L202
def scalarmult_B(e): """ Implements scalarmult(B, e) more efficiently. """ # scalarmult(B, l) is the identity e %= L P = IDENT for i in range(253): if e & 1: P = edwards_add(P=P, Q=Bpow[i]) e //= 2 assert e == 0, e return P
[ "def", "scalarmult_B", "(", "e", ")", ":", "# scalarmult(B, l) is the identity", "e", "%=", "L", "P", "=", "IDENT", "for", "i", "in", "range", "(", "253", ")", ":", "if", "e", "&", "1", ":", "P", "=", "edwards_add", "(", "P", "=", "P", ",", "Q", "=", "Bpow", "[", "i", "]", ")", "e", "//=", "2", "assert", "e", "==", "0", ",", "e", "return", "P" ]
Implements scalarmult(B, e) more efficiently.
[ "Implements", "scalarmult", "(", "B", "e", ")", "more", "efficiently", "." ]
python
train
21.153846
progrium/skypipe
skypipe/cloud.py
https://github.com/progrium/skypipe/blob/6162610a1876282ff1cc8eeca6c8669b8f605482/skypipe/cloud.py#L65-L84
def setup_dotcloud_account(cli): """Gets user/pass for dotcloud, performs auth, and stores keys""" client = RESTClient(endpoint=cli.client.endpoint) client.authenticator = NullAuth() urlmap = client.get('/auth/discovery').item username = cli.prompt('dotCloud email') password = cli.prompt('Password', noecho=True) credential = {'token_url': urlmap.get('token'), 'key': CLIENT_KEY, 'secret': CLIENT_SECRET} try: token = cli.authorize_client(urlmap.get('token'), credential, username, password) except Exception as e: cli.die('Username and password do not match. Try again.') token['url'] = credential['token_url'] config = GlobalConfig() config.data = {'token': token} config.save() cli.global_config = GlobalConfig() # reload cli.setup_auth() cli.get_keys()
[ "def", "setup_dotcloud_account", "(", "cli", ")", ":", "client", "=", "RESTClient", "(", "endpoint", "=", "cli", ".", "client", ".", "endpoint", ")", "client", ".", "authenticator", "=", "NullAuth", "(", ")", "urlmap", "=", "client", ".", "get", "(", "'/auth/discovery'", ")", ".", "item", "username", "=", "cli", ".", "prompt", "(", "'dotCloud email'", ")", "password", "=", "cli", ".", "prompt", "(", "'Password'", ",", "noecho", "=", "True", ")", "credential", "=", "{", "'token_url'", ":", "urlmap", ".", "get", "(", "'token'", ")", ",", "'key'", ":", "CLIENT_KEY", ",", "'secret'", ":", "CLIENT_SECRET", "}", "try", ":", "token", "=", "cli", ".", "authorize_client", "(", "urlmap", ".", "get", "(", "'token'", ")", ",", "credential", ",", "username", ",", "password", ")", "except", "Exception", "as", "e", ":", "cli", ".", "die", "(", "'Username and password do not match. Try again.'", ")", "token", "[", "'url'", "]", "=", "credential", "[", "'token_url'", "]", "config", "=", "GlobalConfig", "(", ")", "config", ".", "data", "=", "{", "'token'", ":", "token", "}", "config", ".", "save", "(", ")", "cli", ".", "global_config", "=", "GlobalConfig", "(", ")", "# reload", "cli", ".", "setup_auth", "(", ")", "cli", ".", "get_keys", "(", ")" ]
Gets user/pass for dotcloud, performs auth, and stores keys
[ "Gets", "user", "/", "pass", "for", "dotcloud", "performs", "auth", "and", "stores", "keys" ]
python
train
41.35
SeabornGames/Table
seaborn_table/table.py
https://github.com/SeabornGames/Table/blob/0c474ef2fb00db0e7cf47e8af91e3556c2e7485a/seaborn_table/table.py#L368-L394
def rst_to_obj(cls, file_path=None, text='', columns=None, remove_empty_rows=True, key_on=None, deliminator=' ', eval_cells=True): """ This will convert a rst file or text to a seaborn table :param file_path: str of the path to the file :param text: str of the csv text :param columns: list of str of columns to use :param remove_empty_rows: bool if True will remove empty rows :param key_on: list of str of columns to key on :param deliminator: str to use as a deliminator :param eval_cells: bool if True will try to evaluate numbers :return: SeabornTable """ text = cls._get_lines(file_path, text) if len(text) == 1: text = text[0].split('\r') for i in [-1, 2, 0]: if not text[i].replace('=', '').strip(): text.pop(i) # get rid of bar lines = [row.split() for row in text] list_of_list = cls._merge_quoted_cells(lines, deliminator, remove_empty_rows, eval_cells, excel_boolean=False) return cls.list_to_obj(list_of_list, key_on=key_on, columns=columns)
[ "def", "rst_to_obj", "(", "cls", ",", "file_path", "=", "None", ",", "text", "=", "''", ",", "columns", "=", "None", ",", "remove_empty_rows", "=", "True", ",", "key_on", "=", "None", ",", "deliminator", "=", "' '", ",", "eval_cells", "=", "True", ")", ":", "text", "=", "cls", ".", "_get_lines", "(", "file_path", ",", "text", ")", "if", "len", "(", "text", ")", "==", "1", ":", "text", "=", "text", "[", "0", "]", ".", "split", "(", "'\\r'", ")", "for", "i", "in", "[", "-", "1", ",", "2", ",", "0", "]", ":", "if", "not", "text", "[", "i", "]", ".", "replace", "(", "'='", ",", "''", ")", ".", "strip", "(", ")", ":", "text", ".", "pop", "(", "i", ")", "# get rid of bar", "lines", "=", "[", "row", ".", "split", "(", ")", "for", "row", "in", "text", "]", "list_of_list", "=", "cls", ".", "_merge_quoted_cells", "(", "lines", ",", "deliminator", ",", "remove_empty_rows", ",", "eval_cells", ",", "excel_boolean", "=", "False", ")", "return", "cls", ".", "list_to_obj", "(", "list_of_list", ",", "key_on", "=", "key_on", ",", "columns", "=", "columns", ")" ]
This will convert a rst file or text to a seaborn table :param file_path: str of the path to the file :param text: str of the csv text :param columns: list of str of columns to use :param remove_empty_rows: bool if True will remove empty rows :param key_on: list of str of columns to key on :param deliminator: str to use as a deliminator :param eval_cells: bool if True will try to evaluate numbers :return: SeabornTable
[ "This", "will", "convert", "a", "rst", "file", "or", "text", "to", "a", "seaborn", "table", ":", "param", "file_path", ":", "str", "of", "the", "path", "to", "the", "file", ":", "param", "text", ":", "str", "of", "the", "csv", "text", ":", "param", "columns", ":", "list", "of", "str", "of", "columns", "to", "use", ":", "param", "remove_empty_rows", ":", "bool", "if", "True", "will", "remove", "empty", "rows", ":", "param", "key_on", ":", "list", "of", "str", "of", "columns", "to", "key", "on", ":", "param", "deliminator", ":", "str", "to", "use", "as", "a", "deliminator", ":", "param", "eval_cells", ":", "bool", "if", "True", "will", "try", "to", "evaluate", "numbers", ":", "return", ":", "SeabornTable" ]
python
train
45.888889
stanfordnlp/stanza
stanza/monitoring/progress.py
https://github.com/stanfordnlp/stanza/blob/920c55d8eaa1e7105971059c66eb448a74c100d6/stanza/monitoring/progress.py#L83-L88
def end_task(self): ''' Remove the current task from the stack. ''' self.progress(self.task_stack[-1].size) self.task_stack.pop()
[ "def", "end_task", "(", "self", ")", ":", "self", ".", "progress", "(", "self", ".", "task_stack", "[", "-", "1", "]", ".", "size", ")", "self", ".", "task_stack", ".", "pop", "(", ")" ]
Remove the current task from the stack.
[ "Remove", "the", "current", "task", "from", "the", "stack", "." ]
python
train
27.333333
mabuchilab/QNET
src/qnet/algebra/core/circuit_algebra.py
https://github.com/mabuchilab/QNET/blob/cc20d26dad78691d34c67173e5cd67dcac94208a/src/qnet/algebra/core/circuit_algebra.py#L1677-L1698
def prepare_adiabatic_limit(slh, k=None): """Prepare the adiabatic elimination on an SLH object Args: slh: The SLH object to take the limit for k: The scaling parameter $k \rightarrow \infty$. The default is a positive symbol 'k' Returns: tuple: The objects ``Y, A, B, F, G, N`` necessary to compute the limiting system. """ if k is None: k = symbols('k', positive=True) Ld = slh.L.dag() LdL = (Ld * slh.L)[0, 0] K = (-LdL / 2 + I * slh.H).expand().simplify_scalar() N = slh.S.dag() B, A, Y = K.series_expand(k, 0, 2) G, F = Ld.series_expand(k, 0, 1) return Y, A, B, F, G, N
[ "def", "prepare_adiabatic_limit", "(", "slh", ",", "k", "=", "None", ")", ":", "if", "k", "is", "None", ":", "k", "=", "symbols", "(", "'k'", ",", "positive", "=", "True", ")", "Ld", "=", "slh", ".", "L", ".", "dag", "(", ")", "LdL", "=", "(", "Ld", "*", "slh", ".", "L", ")", "[", "0", ",", "0", "]", "K", "=", "(", "-", "LdL", "/", "2", "+", "I", "*", "slh", ".", "H", ")", ".", "expand", "(", ")", ".", "simplify_scalar", "(", ")", "N", "=", "slh", ".", "S", ".", "dag", "(", ")", "B", ",", "A", ",", "Y", "=", "K", ".", "series_expand", "(", "k", ",", "0", ",", "2", ")", "G", ",", "F", "=", "Ld", ".", "series_expand", "(", "k", ",", "0", ",", "1", ")", "return", "Y", ",", "A", ",", "B", ",", "F", ",", "G", ",", "N" ]
Prepare the adiabatic elimination on an SLH object Args: slh: The SLH object to take the limit for k: The scaling parameter $k \rightarrow \infty$. The default is a positive symbol 'k' Returns: tuple: The objects ``Y, A, B, F, G, N`` necessary to compute the limiting system.
[ "Prepare", "the", "adiabatic", "elimination", "on", "an", "SLH", "object" ]
python
train
29.818182
bhmm/bhmm
bhmm/hmm/generic_sampled_hmm.py
https://github.com/bhmm/bhmm/blob/9804d18c2ddb684fb4d90b544cc209617a89ca9a/bhmm/hmm/generic_sampled_hmm.py#L185-L190
def eigenvectors_right_samples(self): r""" Samples of the right eigenvectors of the hidden transition matrix """ res = np.empty((self.nsamples, self.nstates, self.nstates), dtype=config.dtype) for i in range(self.nsamples): res[i, :, :] = self._sampled_hmms[i].eigenvectors_right return res
[ "def", "eigenvectors_right_samples", "(", "self", ")", ":", "res", "=", "np", ".", "empty", "(", "(", "self", ".", "nsamples", ",", "self", ".", "nstates", ",", "self", ".", "nstates", ")", ",", "dtype", "=", "config", ".", "dtype", ")", "for", "i", "in", "range", "(", "self", ".", "nsamples", ")", ":", "res", "[", "i", ",", ":", ",", ":", "]", "=", "self", ".", "_sampled_hmms", "[", "i", "]", ".", "eigenvectors_right", "return", "res" ]
r""" Samples of the right eigenvectors of the hidden transition matrix
[ "r", "Samples", "of", "the", "right", "eigenvectors", "of", "the", "hidden", "transition", "matrix" ]
python
train
54.833333
choderalab/pymbar
examples/heat-capacity/heat-capacity.py
https://github.com/choderalab/pymbar/blob/69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740/examples/heat-capacity/heat-capacity.py#L110-L131
def read_simulation_temps(pathname,NumTemps): """Reads in the various temperatures from each TEMP#/simul.output file by knowing beforehand the total number of temperatures (parameter at top) """ print("--Reading temperatures from %s/..." % pathname) # Initialize return variable temps_from_file = numpy.zeros(NumTemps, numpy.float64) for k in range(NumTemps): infile = open(os.path.join(pathname,'TEMP'+ str(k), 'simul'+str(k)+'.output'), 'r') lines = infile.readlines() infile.close() for line in lines: if (line[0:11] == 'Temperature'): vals = line.split(':') break temps_from_file[k] = float(vals[1]) return temps_from_file
[ "def", "read_simulation_temps", "(", "pathname", ",", "NumTemps", ")", ":", "print", "(", "\"--Reading temperatures from %s/...\"", "%", "pathname", ")", "# Initialize return variable", "temps_from_file", "=", "numpy", ".", "zeros", "(", "NumTemps", ",", "numpy", ".", "float64", ")", "for", "k", "in", "range", "(", "NumTemps", ")", ":", "infile", "=", "open", "(", "os", ".", "path", ".", "join", "(", "pathname", ",", "'TEMP'", "+", "str", "(", "k", ")", ",", "'simul'", "+", "str", "(", "k", ")", "+", "'.output'", ")", ",", "'r'", ")", "lines", "=", "infile", ".", "readlines", "(", ")", "infile", ".", "close", "(", ")", "for", "line", "in", "lines", ":", "if", "(", "line", "[", "0", ":", "11", "]", "==", "'Temperature'", ")", ":", "vals", "=", "line", ".", "split", "(", "':'", ")", "break", "temps_from_file", "[", "k", "]", "=", "float", "(", "vals", "[", "1", "]", ")", "return", "temps_from_file" ]
Reads in the various temperatures from each TEMP#/simul.output file by knowing beforehand the total number of temperatures (parameter at top)
[ "Reads", "in", "the", "various", "temperatures", "from", "each", "TEMP#", "/", "simul", ".", "output", "file", "by", "knowing", "beforehand", "the", "total", "number", "of", "temperatures", "(", "parameter", "at", "top", ")" ]
python
train
33.181818
Trax-air/swagger-parser
swagger_parser/swagger_parser.py
https://github.com/Trax-air/swagger-parser/blob/d97f962a417e76320c59c33dcb223e4373e516d5/swagger_parser/swagger_parser.py#L317-L330
def _example_from_allof(self, prop_spec): """Get the examples from an allOf section. Args: prop_spec: property specification you want an example of. Returns: An example dict """ example_dict = {} for definition in prop_spec['allOf']: update = self.get_example_from_prop_spec(definition, True) example_dict.update(update) return example_dict
[ "def", "_example_from_allof", "(", "self", ",", "prop_spec", ")", ":", "example_dict", "=", "{", "}", "for", "definition", "in", "prop_spec", "[", "'allOf'", "]", ":", "update", "=", "self", ".", "get_example_from_prop_spec", "(", "definition", ",", "True", ")", "example_dict", ".", "update", "(", "update", ")", "return", "example_dict" ]
Get the examples from an allOf section. Args: prop_spec: property specification you want an example of. Returns: An example dict
[ "Get", "the", "examples", "from", "an", "allOf", "section", "." ]
python
train
30.928571
idmillington/layout
layout/rl_utils.py
https://github.com/idmillington/layout/blob/c452d1d7a74c9a74f7639c1b49e2a41c4e354bb5/layout/rl_utils.py#L98-L127
def draw_polygon( self, *pts, close_path=True, stroke=None, stroke_width=1, stroke_dash=None, fill=None ) -> None: """Draws the given polygon.""" c = self.c c.saveState() if stroke is not None: c.setStrokeColorRGB(*stroke) c.setLineWidth(stroke_width) c.setDash(stroke_dash) if fill is not None: c.setFillColorRGB(*fill) p = c.beginPath() fn = p.moveTo for x,y in zip(*[iter(pts)]*2): fn(x, y) fn = p.lineTo if close_path: p.close() c.drawPath(p, stroke=(stroke is not None), fill=(fill is not None)) c.restoreState()
[ "def", "draw_polygon", "(", "self", ",", "*", "pts", ",", "close_path", "=", "True", ",", "stroke", "=", "None", ",", "stroke_width", "=", "1", ",", "stroke_dash", "=", "None", ",", "fill", "=", "None", ")", "->", "None", ":", "c", "=", "self", ".", "c", "c", ".", "saveState", "(", ")", "if", "stroke", "is", "not", "None", ":", "c", ".", "setStrokeColorRGB", "(", "*", "stroke", ")", "c", ".", "setLineWidth", "(", "stroke_width", ")", "c", ".", "setDash", "(", "stroke_dash", ")", "if", "fill", "is", "not", "None", ":", "c", ".", "setFillColorRGB", "(", "*", "fill", ")", "p", "=", "c", ".", "beginPath", "(", ")", "fn", "=", "p", ".", "moveTo", "for", "x", ",", "y", "in", "zip", "(", "*", "[", "iter", "(", "pts", ")", "]", "*", "2", ")", ":", "fn", "(", "x", ",", "y", ")", "fn", "=", "p", ".", "lineTo", "if", "close_path", ":", "p", ".", "close", "(", ")", "c", ".", "drawPath", "(", "p", ",", "stroke", "=", "(", "stroke", "is", "not", "None", ")", ",", "fill", "=", "(", "fill", "is", "not", "None", ")", ")", "c", ".", "restoreState", "(", ")" ]
Draws the given polygon.
[ "Draws", "the", "given", "polygon", "." ]
python
train
25.3
blockstack/blockstack-core
blockstack/blockstackd.py
https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/blockstackd.py#L2614-L2638
def server_shutdown(server_state): """ Shut down server subsystems. Remove PID file. """ set_running( False ) # stop API servers rpc_stop(server_state) api_stop(server_state) # stop atlas node server_atlas_shutdown(server_state) # stopping GC gc_stop() # clear PID file try: if os.path.exists(server_state['pid_file']): os.unlink(server_state['pid_file']) except: pass return True
[ "def", "server_shutdown", "(", "server_state", ")", ":", "set_running", "(", "False", ")", "# stop API servers", "rpc_stop", "(", "server_state", ")", "api_stop", "(", "server_state", ")", "# stop atlas node", "server_atlas_shutdown", "(", "server_state", ")", "# stopping GC", "gc_stop", "(", ")", "# clear PID file", "try", ":", "if", "os", ".", "path", ".", "exists", "(", "server_state", "[", "'pid_file'", "]", ")", ":", "os", ".", "unlink", "(", "server_state", "[", "'pid_file'", "]", ")", "except", ":", "pass", "return", "True" ]
Shut down server subsystems. Remove PID file.
[ "Shut", "down", "server", "subsystems", ".", "Remove", "PID", "file", "." ]
python
train
18.12
hyperledger/indy-plenum
stp_zmq/authenticator.py
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/stp_zmq/authenticator.py#L88-L93
def start(self): """Start ZAP authentication""" super().start() self.__poller = zmq.asyncio.Poller() self.__poller.register(self.zap_socket, zmq.POLLIN) self.__task = asyncio.ensure_future(self.__handle_zap())
[ "def", "start", "(", "self", ")", ":", "super", "(", ")", ".", "start", "(", ")", "self", ".", "__poller", "=", "zmq", ".", "asyncio", ".", "Poller", "(", ")", "self", ".", "__poller", ".", "register", "(", "self", ".", "zap_socket", ",", "zmq", ".", "POLLIN", ")", "self", ".", "__task", "=", "asyncio", ".", "ensure_future", "(", "self", ".", "__handle_zap", "(", ")", ")" ]
Start ZAP authentication
[ "Start", "ZAP", "authentication" ]
python
train
40.666667
manns/pyspread
pyspread/src/gui/_toolbars.py
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/gui/_toolbars.py#L148-L152
def OnTool(self, event): """Toolbar event handler""" msgtype = self.ids_msgs[event.GetId()] post_command_event(self, msgtype)
[ "def", "OnTool", "(", "self", ",", "event", ")", ":", "msgtype", "=", "self", ".", "ids_msgs", "[", "event", ".", "GetId", "(", ")", "]", "post_command_event", "(", "self", ",", "msgtype", ")" ]
Toolbar event handler
[ "Toolbar", "event", "handler" ]
python
train
29.2
refinery29/chassis
chassis/services/dependency_injection/__init__.py
https://github.com/refinery29/chassis/blob/1238d5214cbb8f3e1fe7c0dc2fa72f45bf085192/chassis/services/dependency_injection/__init__.py#L264-L269
def _replace_scalar(self, scalar): """ Replace scalar name with scalar value """ if not is_arg_scalar(scalar): return scalar name = scalar[1:] return self.get_scalar_value(name)
[ "def", "_replace_scalar", "(", "self", ",", "scalar", ")", ":", "if", "not", "is_arg_scalar", "(", "scalar", ")", ":", "return", "scalar", "name", "=", "scalar", "[", "1", ":", "]", "return", "self", ".", "get_scalar_value", "(", "name", ")" ]
Replace scalar name with scalar value
[ "Replace", "scalar", "name", "with", "scalar", "value" ]
python
train
36
oauthlib/oauthlib
oauthlib/oauth2/rfc6749/clients/base.py
https://github.com/oauthlib/oauthlib/blob/30321dd3c0ca784d3508a1970cf90d9f76835c79/oauthlib/oauth2/rfc6749/clients/base.py#L425-L446
def prepare_refresh_body(self, body='', refresh_token=None, scope=None, **kwargs): """Prepare an access token request, using a refresh token. If the authorization server issued a refresh token to the client, the client makes a refresh request to the token endpoint by adding the following parameters using the "application/x-www-form-urlencoded" format in the HTTP request entity-body: grant_type REQUIRED. Value MUST be set to "refresh_token". refresh_token REQUIRED. The refresh token issued to the client. scope OPTIONAL. The scope of the access request as described by Section 3.3. The requested scope MUST NOT include any scope not originally granted by the resource owner, and if omitted is treated as equal to the scope originally granted by the resource owner. """ refresh_token = refresh_token or self.refresh_token return prepare_token_request(self.refresh_token_key, body=body, scope=scope, refresh_token=refresh_token, **kwargs)
[ "def", "prepare_refresh_body", "(", "self", ",", "body", "=", "''", ",", "refresh_token", "=", "None", ",", "scope", "=", "None", ",", "*", "*", "kwargs", ")", ":", "refresh_token", "=", "refresh_token", "or", "self", ".", "refresh_token", "return", "prepare_token_request", "(", "self", ".", "refresh_token_key", ",", "body", "=", "body", ",", "scope", "=", "scope", ",", "refresh_token", "=", "refresh_token", ",", "*", "*", "kwargs", ")" ]
Prepare an access token request, using a refresh token. If the authorization server issued a refresh token to the client, the client makes a refresh request to the token endpoint by adding the following parameters using the "application/x-www-form-urlencoded" format in the HTTP request entity-body: grant_type REQUIRED. Value MUST be set to "refresh_token". refresh_token REQUIRED. The refresh token issued to the client. scope OPTIONAL. The scope of the access request as described by Section 3.3. The requested scope MUST NOT include any scope not originally granted by the resource owner, and if omitted is treated as equal to the scope originally granted by the resource owner.
[ "Prepare", "an", "access", "token", "request", "using", "a", "refresh", "token", "." ]
python
train
52.818182
bwhite/hadoopy
hadoopy/_freeze.py
https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/_freeze.py#L70-L86
def _md5_file(fn, block_size=1048576): """Builds the MD5 of a file block by block Args: fn: File path block_size: Size of the blocks to consider (default 1048576) Returns: File MD5 """ h = hashlib.md5() with open(fn) as fp: d = 1 while d: d = fp.read(block_size) h.update(d) return h.hexdigest()
[ "def", "_md5_file", "(", "fn", ",", "block_size", "=", "1048576", ")", ":", "h", "=", "hashlib", ".", "md5", "(", ")", "with", "open", "(", "fn", ")", "as", "fp", ":", "d", "=", "1", "while", "d", ":", "d", "=", "fp", ".", "read", "(", "block_size", ")", "h", ".", "update", "(", "d", ")", "return", "h", ".", "hexdigest", "(", ")" ]
Builds the MD5 of a file block by block Args: fn: File path block_size: Size of the blocks to consider (default 1048576) Returns: File MD5
[ "Builds", "the", "MD5", "of", "a", "file", "block", "by", "block" ]
python
train
21.941176
UCBerkeleySETI/blimpy
blimpy/sigproc.py
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/sigproc.py#L143-L157
def is_filterbank(filename): """ Open file and confirm if it is a filterbank file or not. """ with open(filename, 'rb') as fh: is_fil = True # Check this is a blimpy file try: keyword, value, idx = read_next_header_keyword(fh) try: assert keyword == b'HEADER_START' except AssertionError: is_fil = False except KeyError: is_fil = False return is_fil
[ "def", "is_filterbank", "(", "filename", ")", ":", "with", "open", "(", "filename", ",", "'rb'", ")", "as", "fh", ":", "is_fil", "=", "True", "# Check this is a blimpy file", "try", ":", "keyword", ",", "value", ",", "idx", "=", "read_next_header_keyword", "(", "fh", ")", "try", ":", "assert", "keyword", "==", "b'HEADER_START'", "except", "AssertionError", ":", "is_fil", "=", "False", "except", "KeyError", ":", "is_fil", "=", "False", "return", "is_fil" ]
Open file and confirm if it is a filterbank file or not.
[ "Open", "file", "and", "confirm", "if", "it", "is", "a", "filterbank", "file", "or", "not", "." ]
python
test
30.933333
googledatalab/pydatalab
datalab/storage/_bucket.py
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/storage/_bucket.py#L134-L144
def item(self, key): """Retrieves an Item object for the specified key in this bucket. The item need not exist. Args: key: the key of the item within the bucket. Returns: An Item instance representing the specified key. """ return _item.Item(self._name, key, context=self._context)
[ "def", "item", "(", "self", ",", "key", ")", ":", "return", "_item", ".", "Item", "(", "self", ".", "_name", ",", "key", ",", "context", "=", "self", ".", "_context", ")" ]
Retrieves an Item object for the specified key in this bucket. The item need not exist. Args: key: the key of the item within the bucket. Returns: An Item instance representing the specified key.
[ "Retrieves", "an", "Item", "object", "for", "the", "specified", "key", "in", "this", "bucket", "." ]
python
train
28.090909
saltstack/salt
salt/modules/win_service.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_service.py#L425-L444
def get_all(): ''' Return all installed services Returns: list: Returns a list of all services on the system. CLI Example: .. code-block:: bash salt '*' service.get_all ''' services = _get_services() ret = set() for service in services: ret.add(service['ServiceName']) return sorted(ret)
[ "def", "get_all", "(", ")", ":", "services", "=", "_get_services", "(", ")", "ret", "=", "set", "(", ")", "for", "service", "in", "services", ":", "ret", ".", "add", "(", "service", "[", "'ServiceName'", "]", ")", "return", "sorted", "(", "ret", ")" ]
Return all installed services Returns: list: Returns a list of all services on the system. CLI Example: .. code-block:: bash salt '*' service.get_all
[ "Return", "all", "installed", "services" ]
python
train
16.9
wakatime/wakatime
wakatime/packages/pygments/formatters/img.py
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/formatters/img.py#L527-L555
def format(self, tokensource, outfile): """ Format ``tokensource``, an iterable of ``(tokentype, tokenstring)`` tuples and write it into ``outfile``. This implementation calculates where it should draw each token on the pixmap, then calculates the required pixmap size and draws the items. """ self._create_drawables(tokensource) self._draw_line_numbers() im = Image.new( 'RGB', self._get_image_size(self.maxcharno, self.maxlineno), self.background_color ) self._paint_line_number_bg(im) draw = ImageDraw.Draw(im) # Highlight if self.hl_lines: x = self.image_pad + self.line_number_width - self.line_number_pad + 1 recth = self._get_line_height() rectw = im.size[0] - x for linenumber in self.hl_lines: y = self._get_line_y(linenumber - 1) draw.rectangle([(x, y), (x + rectw, y + recth)], fill=self.hl_color) for pos, value, font, kw in self.drawables: draw.text(pos, value, font=font, **kw) im.save(outfile, self.image_format.upper())
[ "def", "format", "(", "self", ",", "tokensource", ",", "outfile", ")", ":", "self", ".", "_create_drawables", "(", "tokensource", ")", "self", ".", "_draw_line_numbers", "(", ")", "im", "=", "Image", ".", "new", "(", "'RGB'", ",", "self", ".", "_get_image_size", "(", "self", ".", "maxcharno", ",", "self", ".", "maxlineno", ")", ",", "self", ".", "background_color", ")", "self", ".", "_paint_line_number_bg", "(", "im", ")", "draw", "=", "ImageDraw", ".", "Draw", "(", "im", ")", "# Highlight", "if", "self", ".", "hl_lines", ":", "x", "=", "self", ".", "image_pad", "+", "self", ".", "line_number_width", "-", "self", ".", "line_number_pad", "+", "1", "recth", "=", "self", ".", "_get_line_height", "(", ")", "rectw", "=", "im", ".", "size", "[", "0", "]", "-", "x", "for", "linenumber", "in", "self", ".", "hl_lines", ":", "y", "=", "self", ".", "_get_line_y", "(", "linenumber", "-", "1", ")", "draw", ".", "rectangle", "(", "[", "(", "x", ",", "y", ")", ",", "(", "x", "+", "rectw", ",", "y", "+", "recth", ")", "]", ",", "fill", "=", "self", ".", "hl_color", ")", "for", "pos", ",", "value", ",", "font", ",", "kw", "in", "self", ".", "drawables", ":", "draw", ".", "text", "(", "pos", ",", "value", ",", "font", "=", "font", ",", "*", "*", "kw", ")", "im", ".", "save", "(", "outfile", ",", "self", ".", "image_format", ".", "upper", "(", ")", ")" ]
Format ``tokensource``, an iterable of ``(tokentype, tokenstring)`` tuples and write it into ``outfile``. This implementation calculates where it should draw each token on the pixmap, then calculates the required pixmap size and draws the items.
[ "Format", "tokensource", "an", "iterable", "of", "(", "tokentype", "tokenstring", ")", "tuples", "and", "write", "it", "into", "outfile", "." ]
python
train
41.206897
CalebBell/thermo
thermo/eos.py
https://github.com/CalebBell/thermo/blob/3857ed023a3e64fd3039a32d53576c24990ef1c3/thermo/eos.py#L1118-L1141
def Trebble_Bishnoi(self, T, full=True, quick=True): r'''Method to calculate `a_alpha` and its first and second derivatives according to Trebble and Bishnoi (1987) [1]_. Returns `a_alpha`, `da_alpha_dT`, and `d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives` for more documentation. One coefficient needed. .. math:: \alpha = e^{c_{1} \left(- \frac{T}{Tc} + 1\right)} References ---------- .. [1] Trebble, M. A., and P. R. Bishnoi. "Development of a New Four- Parameter Cubic Equation of State." Fluid Phase Equilibria 35, no. 1 (September 1, 1987): 1-18. doi:10.1016/0378-3812(87)80001-8. ''' c1 = self.alpha_function_coeffs T, Tc, a = self.T, self.Tc, self.a a_alpha = a*exp(c1*(-T/Tc + 1)) if not full: return a_alpha else: da_alpha_dT = a*-c1*exp(c1*(-T/Tc + 1))/Tc d2a_alpha_dT2 = a*c1**2*exp(-c1*(T/Tc - 1))/Tc**2 return a_alpha, da_alpha_dT, d2a_alpha_dT2
[ "def", "Trebble_Bishnoi", "(", "self", ",", "T", ",", "full", "=", "True", ",", "quick", "=", "True", ")", ":", "c1", "=", "self", ".", "alpha_function_coeffs", "T", ",", "Tc", ",", "a", "=", "self", ".", "T", ",", "self", ".", "Tc", ",", "self", ".", "a", "a_alpha", "=", "a", "*", "exp", "(", "c1", "*", "(", "-", "T", "/", "Tc", "+", "1", ")", ")", "if", "not", "full", ":", "return", "a_alpha", "else", ":", "da_alpha_dT", "=", "a", "*", "-", "c1", "*", "exp", "(", "c1", "*", "(", "-", "T", "/", "Tc", "+", "1", ")", ")", "/", "Tc", "d2a_alpha_dT2", "=", "a", "*", "c1", "**", "2", "*", "exp", "(", "-", "c1", "*", "(", "T", "/", "Tc", "-", "1", ")", ")", "/", "Tc", "**", "2", "return", "a_alpha", ",", "da_alpha_dT", ",", "d2a_alpha_dT2" ]
r'''Method to calculate `a_alpha` and its first and second derivatives according to Trebble and Bishnoi (1987) [1]_. Returns `a_alpha`, `da_alpha_dT`, and `d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives` for more documentation. One coefficient needed. .. math:: \alpha = e^{c_{1} \left(- \frac{T}{Tc} + 1\right)} References ---------- .. [1] Trebble, M. A., and P. R. Bishnoi. "Development of a New Four- Parameter Cubic Equation of State." Fluid Phase Equilibria 35, no. 1 (September 1, 1987): 1-18. doi:10.1016/0378-3812(87)80001-8.
[ "r", "Method", "to", "calculate", "a_alpha", "and", "its", "first", "and", "second", "derivatives", "according", "to", "Trebble", "and", "Bishnoi", "(", "1987", ")", "[", "1", "]", "_", ".", "Returns", "a_alpha", "da_alpha_dT", "and", "d2a_alpha_dT2", ".", "See", "GCEOS", ".", "a_alpha_and_derivatives", "for", "more", "documentation", ".", "One", "coefficient", "needed", ".", "..", "math", "::", "\\", "alpha", "=", "e^", "{", "c_", "{", "1", "}", "\\", "left", "(", "-", "\\", "frac", "{", "T", "}", "{", "Tc", "}", "+", "1", "\\", "right", ")", "}" ]
python
valid
43.666667
penguinmenac3/starttf
starttf/estimators/tf_estimator.py
https://github.com/penguinmenac3/starttf/blob/f4086489d169757c0504e822165db2fea534b944/starttf/estimators/tf_estimator.py#L78-L201
def easy_train_and_evaluate(hyper_params, Model=None, create_loss=None, training_data=None, validation_data=None, inline_plotting=False, session_config=None, log_suffix=None, continue_training=False, continue_with_specific_checkpointpath=None): """ Train and evaluate your model without any boilerplate code. 1) Write your data using the starttf.tfrecords.autorecords.write_data method. 2) Create your hyper parameter file containing all required fields and then load it using starttf.utils.hyper_params.load_params method. Minimal Sample Hyperparams File: {"train": { "learning_rate": { "type": "const", "start_value": 0.001 }, "optimizer": { "type": "adam" }, "batch_size": 1024, "iters": 10000, "summary_iters": 100, "checkpoint_path": "checkpoints/mnist", "tf_records_path": "data/.records/mnist" } } 3) Pass everything required to this method and that's it. :param hyper_params: The hyper parameters obejct loaded via starttf.utils.hyper_params.load_params :param Model: A keras model. :param create_loss: A create_loss function like that in starttf.examples.mnist.loss. :param inline_plotting: When you are using jupyter notebooks you can tell it to plot the loss directly inside the notebook. :param continue_training: Bool, continue last training in the checkpoint path specified in the hyper parameters. :param session_config: A configuration for the session. :param log_suffix: A suffix for the log folder, so you can remember what was special about the run. :return: """ time_stamp = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d_%H.%M.%S') chkpt_path = hyper_params.train.checkpoint_path + "/" + time_stamp if log_suffix is not None: chkpt_path = chkpt_path + "_" + log_suffix if session_config is None: session_config = get_default_config() if continue_with_specific_checkpointpath: chkpt_path = hyper_params.train.checkpoint_path + "/" + continue_with_specific_checkpointpath print("Continue with checkpoint: {}".format(chkpt_path)) elif continue_training: chkpts = sorted([name for name in os.listdir(hyper_params.train.checkpoint_path)]) chkpt_path = hyper_params.train.checkpoint_path + "/" + chkpts[-1] print("Latest found checkpoint: {}".format(chkpt_path)) if not os.path.exists(chkpt_path): os.makedirs(chkpt_path) # If hyperparam config is used, load and save code if Model is None: model_backup = os.path.join(chkpt_path, "model.py") copyfile(hyperparams["arch"]["model"].replace(".", os.sep), model_backup) arch_model = __import__(hyperparams["arch"]["model"], fromlist=["Model"]) Model = arch_model.Model if create_loss is None: loss_backup = os.path.join(chkpt_path, "loss.py") copyfile(hyperparams["arch"]["loss"].replace(".", os.sep), loss_backup) arch_loss = __import__(hyperparams["arch"]["loss"], fromlist=["create_loss"]) create_loss = arch_loss.create_loss # Load training data print("Load data") if training_data is None: training_data = create_input_fn(os.path.join(hyper_params.train.tf_records_path, PHASE_TRAIN), hyper_params.train.batch_size) if validation_data is None: validation_data = create_input_fn(os.path.join(hyper_params.train.tf_records_path, PHASE_VALIDATION), hyper_params.train.batch_size) # Write hyper parameters to be able to track what config you had. with open(chkpt_path + "/hyperparameters.json", "w") as json_file: json_file.write(json.dumps(hyper_params.to_dict(), indent=4, sort_keys=True)) estimator_spec = create_tf_estimator_spec(chkpt_path, Model, create_loss, inline_plotting) # Create a run configuration config = None if hyper_params.train.get("distributed", False): distribution = tf.contrib.distribute.MirroredStrategy() config = tf.estimator.RunConfig(model_dir=chkpt_path, save_summary_steps=hyper_params.train.summary_steps, train_distribute=distribution, save_checkpoints_steps=hyper_params.train.save_checkpoint_steps, keep_checkpoint_max=hyper_params.train.keep_checkpoint_max, keep_checkpoint_every_n_hours=1) else: config = tf.estimator.RunConfig(session_config=session_config, model_dir=chkpt_path, save_summary_steps=hyper_params.train.summary_steps, save_checkpoints_steps=hyper_params.train.save_checkpoint_steps, keep_checkpoint_max=hyper_params.train.keep_checkpoint_max, keep_checkpoint_every_n_hours=1) # Create the estimator. estimator = None if hyper_params.train.get("warm_start_checkpoint", None) is not None: warm_start_dir = hyper_params.train.warm_start_checkpoint estimator = tf.estimator.Estimator(estimator_spec, config=config, warm_start_from=warm_start_dir, params=hyper_params) else: estimator = tf.estimator.Estimator(estimator_spec, config=config, params=hyper_params) # Specify training and actually train. throttle_secs = hyper_params.train.get("throttle_secs", 120) train_spec = tf.estimator.TrainSpec(input_fn=training_data, max_steps=hyper_params.train.steps) eval_spec = tf.estimator.EvalSpec(input_fn=validation_data, throttle_secs=throttle_secs) print("Start training") tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec) return estimator
[ "def", "easy_train_and_evaluate", "(", "hyper_params", ",", "Model", "=", "None", ",", "create_loss", "=", "None", ",", "training_data", "=", "None", ",", "validation_data", "=", "None", ",", "inline_plotting", "=", "False", ",", "session_config", "=", "None", ",", "log_suffix", "=", "None", ",", "continue_training", "=", "False", ",", "continue_with_specific_checkpointpath", "=", "None", ")", ":", "time_stamp", "=", "datetime", ".", "datetime", ".", "fromtimestamp", "(", "time", ".", "time", "(", ")", ")", ".", "strftime", "(", "'%Y-%m-%d_%H.%M.%S'", ")", "chkpt_path", "=", "hyper_params", ".", "train", ".", "checkpoint_path", "+", "\"/\"", "+", "time_stamp", "if", "log_suffix", "is", "not", "None", ":", "chkpt_path", "=", "chkpt_path", "+", "\"_\"", "+", "log_suffix", "if", "session_config", "is", "None", ":", "session_config", "=", "get_default_config", "(", ")", "if", "continue_with_specific_checkpointpath", ":", "chkpt_path", "=", "hyper_params", ".", "train", ".", "checkpoint_path", "+", "\"/\"", "+", "continue_with_specific_checkpointpath", "print", "(", "\"Continue with checkpoint: {}\"", ".", "format", "(", "chkpt_path", ")", ")", "elif", "continue_training", ":", "chkpts", "=", "sorted", "(", "[", "name", "for", "name", "in", "os", ".", "listdir", "(", "hyper_params", ".", "train", ".", "checkpoint_path", ")", "]", ")", "chkpt_path", "=", "hyper_params", ".", "train", ".", "checkpoint_path", "+", "\"/\"", "+", "chkpts", "[", "-", "1", "]", "print", "(", "\"Latest found checkpoint: {}\"", ".", "format", "(", "chkpt_path", ")", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "chkpt_path", ")", ":", "os", ".", "makedirs", "(", "chkpt_path", ")", "# If hyperparam config is used, load and save code", "if", "Model", "is", "None", ":", "model_backup", "=", "os", ".", "path", ".", "join", "(", "chkpt_path", ",", "\"model.py\"", ")", "copyfile", "(", "hyperparams", "[", "\"arch\"", "]", "[", "\"model\"", "]", ".", "replace", "(", "\".\"", ",", "os", ".", "sep", ")", ",", "model_backup", ")", "arch_model", "=", "__import__", "(", "hyperparams", "[", "\"arch\"", "]", "[", "\"model\"", "]", ",", "fromlist", "=", "[", "\"Model\"", "]", ")", "Model", "=", "arch_model", ".", "Model", "if", "create_loss", "is", "None", ":", "loss_backup", "=", "os", ".", "path", ".", "join", "(", "chkpt_path", ",", "\"loss.py\"", ")", "copyfile", "(", "hyperparams", "[", "\"arch\"", "]", "[", "\"loss\"", "]", ".", "replace", "(", "\".\"", ",", "os", ".", "sep", ")", ",", "loss_backup", ")", "arch_loss", "=", "__import__", "(", "hyperparams", "[", "\"arch\"", "]", "[", "\"loss\"", "]", ",", "fromlist", "=", "[", "\"create_loss\"", "]", ")", "create_loss", "=", "arch_loss", ".", "create_loss", "# Load training data", "print", "(", "\"Load data\"", ")", "if", "training_data", "is", "None", ":", "training_data", "=", "create_input_fn", "(", "os", ".", "path", ".", "join", "(", "hyper_params", ".", "train", ".", "tf_records_path", ",", "PHASE_TRAIN", ")", ",", "hyper_params", ".", "train", ".", "batch_size", ")", "if", "validation_data", "is", "None", ":", "validation_data", "=", "create_input_fn", "(", "os", ".", "path", ".", "join", "(", "hyper_params", ".", "train", ".", "tf_records_path", ",", "PHASE_VALIDATION", ")", ",", "hyper_params", ".", "train", ".", "batch_size", ")", "# Write hyper parameters to be able to track what config you had.", "with", "open", "(", "chkpt_path", "+", "\"/hyperparameters.json\"", ",", "\"w\"", ")", "as", "json_file", ":", "json_file", ".", "write", "(", "json", ".", "dumps", "(", "hyper_params", ".", "to_dict", "(", ")", ",", "indent", "=", "4", ",", "sort_keys", "=", "True", ")", ")", "estimator_spec", "=", "create_tf_estimator_spec", "(", "chkpt_path", ",", "Model", ",", "create_loss", ",", "inline_plotting", ")", "# Create a run configuration", "config", "=", "None", "if", "hyper_params", ".", "train", ".", "get", "(", "\"distributed\"", ",", "False", ")", ":", "distribution", "=", "tf", ".", "contrib", ".", "distribute", ".", "MirroredStrategy", "(", ")", "config", "=", "tf", ".", "estimator", ".", "RunConfig", "(", "model_dir", "=", "chkpt_path", ",", "save_summary_steps", "=", "hyper_params", ".", "train", ".", "summary_steps", ",", "train_distribute", "=", "distribution", ",", "save_checkpoints_steps", "=", "hyper_params", ".", "train", ".", "save_checkpoint_steps", ",", "keep_checkpoint_max", "=", "hyper_params", ".", "train", ".", "keep_checkpoint_max", ",", "keep_checkpoint_every_n_hours", "=", "1", ")", "else", ":", "config", "=", "tf", ".", "estimator", ".", "RunConfig", "(", "session_config", "=", "session_config", ",", "model_dir", "=", "chkpt_path", ",", "save_summary_steps", "=", "hyper_params", ".", "train", ".", "summary_steps", ",", "save_checkpoints_steps", "=", "hyper_params", ".", "train", ".", "save_checkpoint_steps", ",", "keep_checkpoint_max", "=", "hyper_params", ".", "train", ".", "keep_checkpoint_max", ",", "keep_checkpoint_every_n_hours", "=", "1", ")", "# Create the estimator.", "estimator", "=", "None", "if", "hyper_params", ".", "train", ".", "get", "(", "\"warm_start_checkpoint\"", ",", "None", ")", "is", "not", "None", ":", "warm_start_dir", "=", "hyper_params", ".", "train", ".", "warm_start_checkpoint", "estimator", "=", "tf", ".", "estimator", ".", "Estimator", "(", "estimator_spec", ",", "config", "=", "config", ",", "warm_start_from", "=", "warm_start_dir", ",", "params", "=", "hyper_params", ")", "else", ":", "estimator", "=", "tf", ".", "estimator", ".", "Estimator", "(", "estimator_spec", ",", "config", "=", "config", ",", "params", "=", "hyper_params", ")", "# Specify training and actually train.", "throttle_secs", "=", "hyper_params", ".", "train", ".", "get", "(", "\"throttle_secs\"", ",", "120", ")", "train_spec", "=", "tf", ".", "estimator", ".", "TrainSpec", "(", "input_fn", "=", "training_data", ",", "max_steps", "=", "hyper_params", ".", "train", ".", "steps", ")", "eval_spec", "=", "tf", ".", "estimator", ".", "EvalSpec", "(", "input_fn", "=", "validation_data", ",", "throttle_secs", "=", "throttle_secs", ")", "print", "(", "\"Start training\"", ")", "tf", ".", "estimator", ".", "train_and_evaluate", "(", "estimator", ",", "train_spec", ",", "eval_spec", ")", "return", "estimator" ]
Train and evaluate your model without any boilerplate code. 1) Write your data using the starttf.tfrecords.autorecords.write_data method. 2) Create your hyper parameter file containing all required fields and then load it using starttf.utils.hyper_params.load_params method. Minimal Sample Hyperparams File: {"train": { "learning_rate": { "type": "const", "start_value": 0.001 }, "optimizer": { "type": "adam" }, "batch_size": 1024, "iters": 10000, "summary_iters": 100, "checkpoint_path": "checkpoints/mnist", "tf_records_path": "data/.records/mnist" } } 3) Pass everything required to this method and that's it. :param hyper_params: The hyper parameters obejct loaded via starttf.utils.hyper_params.load_params :param Model: A keras model. :param create_loss: A create_loss function like that in starttf.examples.mnist.loss. :param inline_plotting: When you are using jupyter notebooks you can tell it to plot the loss directly inside the notebook. :param continue_training: Bool, continue last training in the checkpoint path specified in the hyper parameters. :param session_config: A configuration for the session. :param log_suffix: A suffix for the log folder, so you can remember what was special about the run. :return:
[ "Train", "and", "evaluate", "your", "model", "without", "any", "boilerplate", "code", "." ]
python
train
50.991935
tipsi/aiozk
aiozk/protocol/part.py
https://github.com/tipsi/aiozk/blob/96d2f543de248c6d993b5bfe6621167dd1eb8223/aiozk/protocol/part.py#L59-L74
def parse(cls, buff, offset): """ Given a buffer and offset, returns the parsed value and new offset. Calls `parse()` on the given buffer for each sub-part in order and creates a new instance with the results. """ values = {} for name, part in cls.parts: value, new_offset = part.parse(buff, offset) values[name] = value offset = new_offset return cls(**values), offset
[ "def", "parse", "(", "cls", ",", "buff", ",", "offset", ")", ":", "values", "=", "{", "}", "for", "name", ",", "part", "in", "cls", ".", "parts", ":", "value", ",", "new_offset", "=", "part", ".", "parse", "(", "buff", ",", "offset", ")", "values", "[", "name", "]", "=", "value", "offset", "=", "new_offset", "return", "cls", "(", "*", "*", "values", ")", ",", "offset" ]
Given a buffer and offset, returns the parsed value and new offset. Calls `parse()` on the given buffer for each sub-part in order and creates a new instance with the results.
[ "Given", "a", "buffer", "and", "offset", "returns", "the", "parsed", "value", "and", "new", "offset", "." ]
python
train
28.625
tyiannak/pyAudioAnalysis
pyAudioAnalysis/audioSegmentation.py
https://github.com/tyiannak/pyAudioAnalysis/blob/e3da991e7247492deba50648a4c7c0f41e684af4/pyAudioAnalysis/audioSegmentation.py#L990-L1021
def speakerDiarizationEvaluateScript(folder_name, ldas): ''' This function prints the cluster purity and speaker purity for each WAV file stored in a provided directory (.SEGMENT files are needed as ground-truth) ARGUMENTS: - folder_name: the full path of the folder where the WAV and SEGMENT (ground-truth) files are stored - ldas: a list of LDA dimensions (0 for no LDA) ''' types = ('*.wav', ) wavFilesList = [] for files in types: wavFilesList.extend(glob.glob(os.path.join(folder_name, files))) wavFilesList = sorted(wavFilesList) # get number of unique speakers per file (from ground-truth) N = [] for wav_file in wavFilesList: gt_file = wav_file.replace('.wav', '.segments'); if os.path.isfile(gt_file): [seg_start, seg_end, seg_labs] = readSegmentGT(gt_file) N.append(len(list(set(seg_labs)))) else: N.append(-1) for l in ldas: print("LDA = {0:d}".format(l)) for i, wav_file in enumerate(wavFilesList): speakerDiarization(wav_file, N[i], 2.0, 0.2, 0.05, l, plot_res=False) print
[ "def", "speakerDiarizationEvaluateScript", "(", "folder_name", ",", "ldas", ")", ":", "types", "=", "(", "'*.wav'", ",", ")", "wavFilesList", "=", "[", "]", "for", "files", "in", "types", ":", "wavFilesList", ".", "extend", "(", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "folder_name", ",", "files", ")", ")", ")", "wavFilesList", "=", "sorted", "(", "wavFilesList", ")", "# get number of unique speakers per file (from ground-truth) ", "N", "=", "[", "]", "for", "wav_file", "in", "wavFilesList", ":", "gt_file", "=", "wav_file", ".", "replace", "(", "'.wav'", ",", "'.segments'", ")", "if", "os", ".", "path", ".", "isfile", "(", "gt_file", ")", ":", "[", "seg_start", ",", "seg_end", ",", "seg_labs", "]", "=", "readSegmentGT", "(", "gt_file", ")", "N", ".", "append", "(", "len", "(", "list", "(", "set", "(", "seg_labs", ")", ")", ")", ")", "else", ":", "N", ".", "append", "(", "-", "1", ")", "for", "l", "in", "ldas", ":", "print", "(", "\"LDA = {0:d}\"", ".", "format", "(", "l", ")", ")", "for", "i", ",", "wav_file", "in", "enumerate", "(", "wavFilesList", ")", ":", "speakerDiarization", "(", "wav_file", ",", "N", "[", "i", "]", ",", "2.0", ",", "0.2", ",", "0.05", ",", "l", ",", "plot_res", "=", "False", ")", "print" ]
This function prints the cluster purity and speaker purity for each WAV file stored in a provided directory (.SEGMENT files are needed as ground-truth) ARGUMENTS: - folder_name: the full path of the folder where the WAV and SEGMENT (ground-truth) files are stored - ldas: a list of LDA dimensions (0 for no LDA)
[ "This", "function", "prints", "the", "cluster", "purity", "and", "speaker", "purity", "for", "each", "WAV", "file", "stored", "in", "a", "provided", "directory", "(", ".", "SEGMENT", "files", "are", "needed", "as", "ground", "-", "truth", ")", "ARGUMENTS", ":", "-", "folder_name", ":", "the", "full", "path", "of", "the", "folder", "where", "the", "WAV", "and", "SEGMENT", "(", "ground", "-", "truth", ")", "files", "are", "stored", "-", "ldas", ":", "a", "list", "of", "LDA", "dimensions", "(", "0", "for", "no", "LDA", ")" ]
python
train
38.375
underworldcode/stripy
stripy-src/stripy/cartesian.py
https://github.com/underworldcode/stripy/blob/d4c3480c3e58c88489ded695eadbe7cd5bf94b48/stripy-src/stripy/cartesian.py#L129-L134
def _is_collinear(self, x, y): """ Checks if first three points are collinear """ pts = np.column_stack([x[:3], y[:3], np.ones(3)]) return np.linalg.det(pts) == 0.0
[ "def", "_is_collinear", "(", "self", ",", "x", ",", "y", ")", ":", "pts", "=", "np", ".", "column_stack", "(", "[", "x", "[", ":", "3", "]", ",", "y", "[", ":", "3", "]", ",", "np", ".", "ones", "(", "3", ")", "]", ")", "return", "np", ".", "linalg", ".", "det", "(", "pts", ")", "==", "0.0" ]
Checks if first three points are collinear
[ "Checks", "if", "first", "three", "points", "are", "collinear" ]
python
train
33.166667
developmentseed/landsat-util
landsat/image.py
https://github.com/developmentseed/landsat-util/blob/92dc81771ddaa64a8a9124a89a6516b52485374b/landsat/image.py#L161-L176
def _unzip(self, src, dst, scene, force_unzip=False): """ Unzip tar files """ self.output("Unzipping %s - It might take some time" % scene, normal=True, arrow=True) try: # check if file is already unzipped, skip if isdir(dst) and not force_unzip: self.output('%s is already unzipped.' % scene, normal=True, color='green', indent=1) return else: tar = tarfile.open(src, 'r') tar.extractall(path=dst) tar.close() except tarfile.ReadError: check_create_folder(dst) subprocess.check_call(['tar', '-xf', src, '-C', dst])
[ "def", "_unzip", "(", "self", ",", "src", ",", "dst", ",", "scene", ",", "force_unzip", "=", "False", ")", ":", "self", ".", "output", "(", "\"Unzipping %s - It might take some time\"", "%", "scene", ",", "normal", "=", "True", ",", "arrow", "=", "True", ")", "try", ":", "# check if file is already unzipped, skip", "if", "isdir", "(", "dst", ")", "and", "not", "force_unzip", ":", "self", ".", "output", "(", "'%s is already unzipped.'", "%", "scene", ",", "normal", "=", "True", ",", "color", "=", "'green'", ",", "indent", "=", "1", ")", "return", "else", ":", "tar", "=", "tarfile", ".", "open", "(", "src", ",", "'r'", ")", "tar", ".", "extractall", "(", "path", "=", "dst", ")", "tar", ".", "close", "(", ")", "except", "tarfile", ".", "ReadError", ":", "check_create_folder", "(", "dst", ")", "subprocess", ".", "check_call", "(", "[", "'tar'", ",", "'-xf'", ",", "src", ",", "'-C'", ",", "dst", "]", ")" ]
Unzip tar files
[ "Unzip", "tar", "files" ]
python
train
42.0625
cloud-custodian/cloud-custodian
c7n/tags.py
https://github.com/cloud-custodian/cloud-custodian/blob/52ef732eb3d7bc939d1579faf519314814695c08/c7n/tags.py#L758-L776
def process_transform(self, tag_value, resource_set): """ Transform tag value - Collect value from tag - Transform Tag value - Assign new value for key """ self.log.info("Transforming tag value on %s instances" % ( len(resource_set))) key = self.data.get('key') c = utils.local_session(self.manager.session_factory).client('ec2') self.create_tag( c, [r[self.id_key] for r in resource_set if len( r.get('Tags', [])) < 50], key, tag_value)
[ "def", "process_transform", "(", "self", ",", "tag_value", ",", "resource_set", ")", ":", "self", ".", "log", ".", "info", "(", "\"Transforming tag value on %s instances\"", "%", "(", "len", "(", "resource_set", ")", ")", ")", "key", "=", "self", ".", "data", ".", "get", "(", "'key'", ")", "c", "=", "utils", ".", "local_session", "(", "self", ".", "manager", ".", "session_factory", ")", ".", "client", "(", "'ec2'", ")", "self", ".", "create_tag", "(", "c", ",", "[", "r", "[", "self", ".", "id_key", "]", "for", "r", "in", "resource_set", "if", "len", "(", "r", ".", "get", "(", "'Tags'", ",", "[", "]", ")", ")", "<", "50", "]", ",", "key", ",", "tag_value", ")" ]
Transform tag value - Collect value from tag - Transform Tag value - Assign new value for key
[ "Transform", "tag", "value" ]
python
train
29.789474
nerdvegas/rez
src/rez/resolved_context.py
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/resolved_context.py#L566-L571
def read_from_buffer(cls, buf, identifier_str=None): """Load the context from a buffer.""" try: return cls._read_from_buffer(buf, identifier_str) except Exception as e: cls._load_error(e, identifier_str)
[ "def", "read_from_buffer", "(", "cls", ",", "buf", ",", "identifier_str", "=", "None", ")", ":", "try", ":", "return", "cls", ".", "_read_from_buffer", "(", "buf", ",", "identifier_str", ")", "except", "Exception", "as", "e", ":", "cls", ".", "_load_error", "(", "e", ",", "identifier_str", ")" ]
Load the context from a buffer.
[ "Load", "the", "context", "from", "a", "buffer", "." ]
python
train
41
vinci1it2000/schedula
schedula/utils/alg.py
https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/alg.py#L75-L105
def get_unused_node_id(graph, initial_guess='unknown', _format='{}<%d>'): """ Finds an unused node id in `graph`. :param graph: A directed graph. :type graph: networkx.classes.digraph.DiGraph :param initial_guess: Initial node id guess. :type initial_guess: str, optional :param _format: Format to generate the new node id if the given is already used. :type _format: str, optional :return: An unused node id. :rtype: str """ has_node = graph.has_node # Namespace shortcut for speed. n = counter() # Counter. node_id_format = _format.format(initial_guess) # Node id format. node_id = initial_guess # Initial guess. while has_node(node_id): # Check if node id is used. node_id = node_id_format % n() # Guess. return node_id
[ "def", "get_unused_node_id", "(", "graph", ",", "initial_guess", "=", "'unknown'", ",", "_format", "=", "'{}<%d>'", ")", ":", "has_node", "=", "graph", ".", "has_node", "# Namespace shortcut for speed.", "n", "=", "counter", "(", ")", "# Counter.", "node_id_format", "=", "_format", ".", "format", "(", "initial_guess", ")", "# Node id format.", "node_id", "=", "initial_guess", "# Initial guess.", "while", "has_node", "(", "node_id", ")", ":", "# Check if node id is used.", "node_id", "=", "node_id_format", "%", "n", "(", ")", "# Guess.", "return", "node_id" ]
Finds an unused node id in `graph`. :param graph: A directed graph. :type graph: networkx.classes.digraph.DiGraph :param initial_guess: Initial node id guess. :type initial_guess: str, optional :param _format: Format to generate the new node id if the given is already used. :type _format: str, optional :return: An unused node id. :rtype: str
[ "Finds", "an", "unused", "node", "id", "in", "graph", "." ]
python
train
26.258065
ArduPilot/MAVProxy
MAVProxy/modules/lib/wxhorizon_ui.py
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/lib/wxhorizon_ui.py#L422-L429
def updateWPText(self): '''Updates the current waypoint and distance to it.''' self.wpText.set_position((self.leftPos+(1.5*self.vertSize/10.0),0.97-(1.5*self.vertSize)+(0.5*self.vertSize/10.0))) self.wpText.set_size(self.fontSize) if type(self.nextWPTime) is str: self.wpText.set_text('%.f/%.f\n(%.f m, ~ s)' % (self.currentWP,self.finalWP,self.wpDist)) else: self.wpText.set_text('%.f/%.f\n(%.f m, %.f s)' % (self.currentWP,self.finalWP,self.wpDist,self.nextWPTime))
[ "def", "updateWPText", "(", "self", ")", ":", "self", ".", "wpText", ".", "set_position", "(", "(", "self", ".", "leftPos", "+", "(", "1.5", "*", "self", ".", "vertSize", "/", "10.0", ")", ",", "0.97", "-", "(", "1.5", "*", "self", ".", "vertSize", ")", "+", "(", "0.5", "*", "self", ".", "vertSize", "/", "10.0", ")", ")", ")", "self", ".", "wpText", ".", "set_size", "(", "self", ".", "fontSize", ")", "if", "type", "(", "self", ".", "nextWPTime", ")", "is", "str", ":", "self", ".", "wpText", ".", "set_text", "(", "'%.f/%.f\\n(%.f m, ~ s)'", "%", "(", "self", ".", "currentWP", ",", "self", ".", "finalWP", ",", "self", ".", "wpDist", ")", ")", "else", ":", "self", ".", "wpText", ".", "set_text", "(", "'%.f/%.f\\n(%.f m, %.f s)'", "%", "(", "self", ".", "currentWP", ",", "self", ".", "finalWP", ",", "self", ".", "wpDist", ",", "self", ".", "nextWPTime", ")", ")" ]
Updates the current waypoint and distance to it.
[ "Updates", "the", "current", "waypoint", "and", "distance", "to", "it", "." ]
python
train
65.625
akissa/sachannelupdate
sachannelupdate/base.py
https://github.com/akissa/sachannelupdate/blob/a1c3c3d86b874f9c92c2407e2608963165d3ae98/sachannelupdate/base.py#L187-L193
def queue_files(dirpath, queue): """Add files in a directory to a queue""" for root, _, files in os.walk(os.path.abspath(dirpath)): if not files: continue for filename in files: queue.put(os.path.join(root, filename))
[ "def", "queue_files", "(", "dirpath", ",", "queue", ")", ":", "for", "root", ",", "_", ",", "files", "in", "os", ".", "walk", "(", "os", ".", "path", ".", "abspath", "(", "dirpath", ")", ")", ":", "if", "not", "files", ":", "continue", "for", "filename", "in", "files", ":", "queue", ".", "put", "(", "os", ".", "path", ".", "join", "(", "root", ",", "filename", ")", ")" ]
Add files in a directory to a queue
[ "Add", "files", "in", "a", "directory", "to", "a", "queue" ]
python
train
37
openstack/networking-cisco
networking_cisco/ml2_drivers/ucsm/ucsm_network_driver.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/ml2_drivers/ucsm/ucsm_network_driver.py#L792-L844
def _remove_vlan_from_all_sp_templates(self, handle, vlan_id, ucsm_ip): """Deletes VLAN config from all SP Templates that have it.""" sp_template_info_list = ( CONF.ml2_cisco_ucsm.ucsms[ucsm_ip].sp_template_list.values()) vlan_name = self.make_vlan_name(vlan_id) virtio_port_list = ( CONF.ml2_cisco_ucsm.ucsms[ucsm_ip].ucsm_virtio_eth_ports) try: # sp_template_info_list is a list of tuples. # Each tuple is of the form : # (ucsm_ip, sp_template_path, sp_template) for sp_template_info in sp_template_info_list: sp_template_path = sp_template_info.path sp_template = sp_template_info.name sp_template_full_path = (sp_template_path + const.SP_TEMPLATE_PREFIX + sp_template) obj = handle.query_dn(sp_template_full_path) if not obj: LOG.error('UCS Manager network driver could not ' 'find Service Profile template %s', sp_template_full_path) continue eth_port_paths = ["%s%s" % (sp_template_full_path, ep) for ep in virtio_port_list] for eth_port_path in eth_port_paths: eth = handle.query_dn(eth_port_path) if eth: vlan_path = (eth_port_path + const.VLAN_PATH_PREFIX + vlan_name) vlan = handle.query_dn(vlan_path) if vlan: # Found vlan config. Now remove it. handle.remove_mo(vlan) else: LOG.debug('UCS Manager network driver did not ' 'find VLAN %s at %s', vlan_name, eth_port_path) else: LOG.debug('UCS Manager network driver did not ' 'find ethernet port at %s', eth_port_path) handle.commit() return True except Exception as e: # Raise a Neutron exception. Include a description of # the original exception. raise cexc.UcsmConfigDeleteFailed(config=vlan_id, ucsm_ip=ucsm_ip, exc=e)
[ "def", "_remove_vlan_from_all_sp_templates", "(", "self", ",", "handle", ",", "vlan_id", ",", "ucsm_ip", ")", ":", "sp_template_info_list", "=", "(", "CONF", ".", "ml2_cisco_ucsm", ".", "ucsms", "[", "ucsm_ip", "]", ".", "sp_template_list", ".", "values", "(", ")", ")", "vlan_name", "=", "self", ".", "make_vlan_name", "(", "vlan_id", ")", "virtio_port_list", "=", "(", "CONF", ".", "ml2_cisco_ucsm", ".", "ucsms", "[", "ucsm_ip", "]", ".", "ucsm_virtio_eth_ports", ")", "try", ":", "# sp_template_info_list is a list of tuples.", "# Each tuple is of the form :", "# (ucsm_ip, sp_template_path, sp_template)", "for", "sp_template_info", "in", "sp_template_info_list", ":", "sp_template_path", "=", "sp_template_info", ".", "path", "sp_template", "=", "sp_template_info", ".", "name", "sp_template_full_path", "=", "(", "sp_template_path", "+", "const", ".", "SP_TEMPLATE_PREFIX", "+", "sp_template", ")", "obj", "=", "handle", ".", "query_dn", "(", "sp_template_full_path", ")", "if", "not", "obj", ":", "LOG", ".", "error", "(", "'UCS Manager network driver could not '", "'find Service Profile template %s'", ",", "sp_template_full_path", ")", "continue", "eth_port_paths", "=", "[", "\"%s%s\"", "%", "(", "sp_template_full_path", ",", "ep", ")", "for", "ep", "in", "virtio_port_list", "]", "for", "eth_port_path", "in", "eth_port_paths", ":", "eth", "=", "handle", ".", "query_dn", "(", "eth_port_path", ")", "if", "eth", ":", "vlan_path", "=", "(", "eth_port_path", "+", "const", ".", "VLAN_PATH_PREFIX", "+", "vlan_name", ")", "vlan", "=", "handle", ".", "query_dn", "(", "vlan_path", ")", "if", "vlan", ":", "# Found vlan config. Now remove it.", "handle", ".", "remove_mo", "(", "vlan", ")", "else", ":", "LOG", ".", "debug", "(", "'UCS Manager network driver did not '", "'find VLAN %s at %s'", ",", "vlan_name", ",", "eth_port_path", ")", "else", ":", "LOG", ".", "debug", "(", "'UCS Manager network driver did not '", "'find ethernet port at %s'", ",", "eth_port_path", ")", "handle", ".", "commit", "(", ")", "return", "True", "except", "Exception", "as", "e", ":", "# Raise a Neutron exception. Include a description of", "# the original exception.", "raise", "cexc", ".", "UcsmConfigDeleteFailed", "(", "config", "=", "vlan_id", ",", "ucsm_ip", "=", "ucsm_ip", ",", "exc", "=", "e", ")" ]
Deletes VLAN config from all SP Templates that have it.
[ "Deletes", "VLAN", "config", "from", "all", "SP", "Templates", "that", "have", "it", "." ]
python
train
45.396226
PinLin/KCOJ_api
KCOJ_api/api.py
https://github.com/PinLin/KCOJ_api/blob/64f6ef0f9e64dc1efd692cbe6d5738ee7cfb78ec/KCOJ_api/api.py#L294-L304
def get_notices(self): """ [deprecated] 建議使用方法 `get_notice()` 及 `get_notice_content()` """ result = [] # 取得公布欄訊息列表 for date, title in self.get_notice().items(): content = self.get_notice_content(date) result.append([date, title, content]) # 回傳結果 return result
[ "def", "get_notices", "(", "self", ")", ":", "result", "=", "[", "]", "# 取得公布欄訊息列表", "for", "date", ",", "title", "in", "self", ".", "get_notice", "(", ")", ".", "items", "(", ")", ":", "content", "=", "self", ".", "get_notice_content", "(", "date", ")", "result", ".", "append", "(", "[", "date", ",", "title", ",", "content", "]", ")", "# 回傳結果", "return", "result" ]
[deprecated] 建議使用方法 `get_notice()` 及 `get_notice_content()`
[ "[", "deprecated", "]", "建議使用方法", "get_notice", "()", "及", "get_notice_content", "()" ]
python
train
30.636364
taskcluster/taskcluster-client.py
taskcluster/auth.py
https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/auth.py#L230-L243
def listRoleIds(self, *args, **kwargs): """ List Role IDs If no limit is given, the roleIds of all roles are returned. Since this list may become long, callers can use the `limit` and `continuationToken` query arguments to page through the responses. This method gives output: ``v1/list-role-ids-response.json#`` This method is ``stable`` """ return self._makeApiCall(self.funcinfo["listRoleIds"], *args, **kwargs)
[ "def", "listRoleIds", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_makeApiCall", "(", "self", ".", "funcinfo", "[", "\"listRoleIds\"", "]", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
List Role IDs If no limit is given, the roleIds of all roles are returned. Since this list may become long, callers can use the `limit` and `continuationToken` query arguments to page through the responses. This method gives output: ``v1/list-role-ids-response.json#`` This method is ``stable``
[ "List", "Role", "IDs" ]
python
train
34.071429
davedoesdev/dxf
dxf/__init__.py
https://github.com/davedoesdev/dxf/blob/63fad55e0f0086e5f6d3511670db1ef23b5298f6/dxf/__init__.py#L378-L435
def push_blob(self, filename=None, progress=None, data=None, digest=None, check_exists=True): # pylint: disable=too-many-arguments """ Upload a file to the registry and return its (SHA-256) hash. The registry is content-addressable so the file's content (aka blob) can be retrieved later by passing the hash to :meth:`pull_blob`. :param filename: File to upload. :type filename: str :param data: Data to upload if ``filename`` isn't given. The data is uploaded in chunks and you must also pass ``digest``. :type data: Generator or iterator :param digest: Hash of the data to be uploaded in ``data``, if specified. :type digest: str (hex-encoded SHA-256, prefixed by ``sha256:``) :param progress: Optional function to call as the upload progresses. The function will be called with the hash of the file's content (or ``digest``), the blob just read from the file (or chunk from ``data``) and if ``filename`` is specified the total size of the file. :type progress: function(dgst, chunk, size) :param check_exists: Whether to check if a blob with the same hash already exists in the registry. If so, it won't be uploaded again. :type check_exists: bool :rtype: str :returns: Hash of file's content. """ if filename is None: dgst = digest else: dgst = hash_file(filename) if check_exists: try: self._request('head', 'blobs/' + dgst) return dgst except requests.exceptions.HTTPError as ex: # pylint: disable=no-member if ex.response.status_code != requests.codes.not_found: raise r = self._request('post', 'blobs/uploads/') upload_url = r.headers['Location'] url_parts = list(urlparse.urlparse(upload_url)) query = urlparse.parse_qs(url_parts[4]) query.update({'digest': dgst}) url_parts[4] = urlencode(query, True) url_parts[0] = 'http' if self._insecure else 'https' upload_url = urlparse.urlunparse(url_parts) if filename is None: data = _ReportingChunks(dgst, data, progress) if progress else data self._base_request('put', upload_url, data=data) else: with open(filename, 'rb') as f: data = _ReportingFile(dgst, f, progress) if progress else f self._base_request('put', upload_url, data=data) return dgst
[ "def", "push_blob", "(", "self", ",", "filename", "=", "None", ",", "progress", "=", "None", ",", "data", "=", "None", ",", "digest", "=", "None", ",", "check_exists", "=", "True", ")", ":", "# pylint: disable=too-many-arguments", "if", "filename", "is", "None", ":", "dgst", "=", "digest", "else", ":", "dgst", "=", "hash_file", "(", "filename", ")", "if", "check_exists", ":", "try", ":", "self", ".", "_request", "(", "'head'", ",", "'blobs/'", "+", "dgst", ")", "return", "dgst", "except", "requests", ".", "exceptions", ".", "HTTPError", "as", "ex", ":", "# pylint: disable=no-member", "if", "ex", ".", "response", ".", "status_code", "!=", "requests", ".", "codes", ".", "not_found", ":", "raise", "r", "=", "self", ".", "_request", "(", "'post'", ",", "'blobs/uploads/'", ")", "upload_url", "=", "r", ".", "headers", "[", "'Location'", "]", "url_parts", "=", "list", "(", "urlparse", ".", "urlparse", "(", "upload_url", ")", ")", "query", "=", "urlparse", ".", "parse_qs", "(", "url_parts", "[", "4", "]", ")", "query", ".", "update", "(", "{", "'digest'", ":", "dgst", "}", ")", "url_parts", "[", "4", "]", "=", "urlencode", "(", "query", ",", "True", ")", "url_parts", "[", "0", "]", "=", "'http'", "if", "self", ".", "_insecure", "else", "'https'", "upload_url", "=", "urlparse", ".", "urlunparse", "(", "url_parts", ")", "if", "filename", "is", "None", ":", "data", "=", "_ReportingChunks", "(", "dgst", ",", "data", ",", "progress", ")", "if", "progress", "else", "data", "self", ".", "_base_request", "(", "'put'", ",", "upload_url", ",", "data", "=", "data", ")", "else", ":", "with", "open", "(", "filename", ",", "'rb'", ")", "as", "f", ":", "data", "=", "_ReportingFile", "(", "dgst", ",", "f", ",", "progress", ")", "if", "progress", "else", "f", "self", ".", "_base_request", "(", "'put'", ",", "upload_url", ",", "data", "=", "data", ")", "return", "dgst" ]
Upload a file to the registry and return its (SHA-256) hash. The registry is content-addressable so the file's content (aka blob) can be retrieved later by passing the hash to :meth:`pull_blob`. :param filename: File to upload. :type filename: str :param data: Data to upload if ``filename`` isn't given. The data is uploaded in chunks and you must also pass ``digest``. :type data: Generator or iterator :param digest: Hash of the data to be uploaded in ``data``, if specified. :type digest: str (hex-encoded SHA-256, prefixed by ``sha256:``) :param progress: Optional function to call as the upload progresses. The function will be called with the hash of the file's content (or ``digest``), the blob just read from the file (or chunk from ``data``) and if ``filename`` is specified the total size of the file. :type progress: function(dgst, chunk, size) :param check_exists: Whether to check if a blob with the same hash already exists in the registry. If so, it won't be uploaded again. :type check_exists: bool :rtype: str :returns: Hash of file's content.
[ "Upload", "a", "file", "to", "the", "registry", "and", "return", "its", "(", "SHA", "-", "256", ")", "hash", "." ]
python
train
44.534483
Alignak-monitoring/alignak
alignak/objects/contact.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/contact.py#L322-L347
def get_notification_commands(self, notifways, n_type, command_name=False): """Get notification commands for object type :param notifways: list of alignak.objects.NotificationWay objects :type notifways: NotificationWays :param n_type: object type (host or service) :type n_type: string :param command_name: True to update the inner property with the name of the command, False to update with the Command objects list :type command_name: bool :return: command list :rtype: list[alignak.objects.command.Command] """ res = [] for notifway_id in self.notificationways: notifway = notifways[notifway_id] res.extend(notifway.get_notification_commands(n_type)) # Update inner notification commands property with command name or command if command_name: setattr(self, n_type + '_notification_commands', [c.get_name() for c in res]) else: setattr(self, n_type + '_notification_commands', res) return res
[ "def", "get_notification_commands", "(", "self", ",", "notifways", ",", "n_type", ",", "command_name", "=", "False", ")", ":", "res", "=", "[", "]", "for", "notifway_id", "in", "self", ".", "notificationways", ":", "notifway", "=", "notifways", "[", "notifway_id", "]", "res", ".", "extend", "(", "notifway", ".", "get_notification_commands", "(", "n_type", ")", ")", "# Update inner notification commands property with command name or command", "if", "command_name", ":", "setattr", "(", "self", ",", "n_type", "+", "'_notification_commands'", ",", "[", "c", ".", "get_name", "(", ")", "for", "c", "in", "res", "]", ")", "else", ":", "setattr", "(", "self", ",", "n_type", "+", "'_notification_commands'", ",", "res", ")", "return", "res" ]
Get notification commands for object type :param notifways: list of alignak.objects.NotificationWay objects :type notifways: NotificationWays :param n_type: object type (host or service) :type n_type: string :param command_name: True to update the inner property with the name of the command, False to update with the Command objects list :type command_name: bool :return: command list :rtype: list[alignak.objects.command.Command]
[ "Get", "notification", "commands", "for", "object", "type" ]
python
train
41.5
a1ezzz/wasp-general
wasp_general/task/scheduler/scheduler.py
https://github.com/a1ezzz/wasp-general/blob/1029839d33eb663f8dec76c1c46754d53c1de4a9/wasp_general/task/scheduler/scheduler.py#L507-L525
def check(self): """ Check if there are records that are ready to start and return them if there are any :return: tuple of WScheduleRecord or None (if there are no tasks to start) """ if self.__next_start is not None: utc_now = utc_datetime() if utc_now >= self.__next_start: result = [] for task_source in self.__next_sources: records = task_source.has_records() if records is not None: result.extend(records) self.__update_all() if len(result) > 0: return tuple(result)
[ "def", "check", "(", "self", ")", ":", "if", "self", ".", "__next_start", "is", "not", "None", ":", "utc_now", "=", "utc_datetime", "(", ")", "if", "utc_now", ">=", "self", ".", "__next_start", ":", "result", "=", "[", "]", "for", "task_source", "in", "self", ".", "__next_sources", ":", "records", "=", "task_source", ".", "has_records", "(", ")", "if", "records", "is", "not", "None", ":", "result", ".", "extend", "(", "records", ")", "self", ".", "__update_all", "(", ")", "if", "len", "(", "result", ")", ">", "0", ":", "return", "tuple", "(", "result", ")" ]
Check if there are records that are ready to start and return them if there are any :return: tuple of WScheduleRecord or None (if there are no tasks to start)
[ "Check", "if", "there", "are", "records", "that", "are", "ready", "to", "start", "and", "return", "them", "if", "there", "are", "any" ]
python
train
26.736842