text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def record_manifest(self): """ Called after a deployment to record any data necessary to detect changes for a future deployment. """ manifest = super(GitTrackerSatchel, self).record_manifest() manifest[CURRENT_COMMIT] = self.get_current_commit() return manifest
[ "def", "record_manifest", "(", "self", ")", ":", "manifest", "=", "super", "(", "GitTrackerSatchel", ",", "self", ")", ".", "record_manifest", "(", ")", "manifest", "[", "CURRENT_COMMIT", "]", "=", "self", ".", "get_current_commit", "(", ")", "return", "manifest" ]
38.75
15.75
def items(self): """Get an iter of VenvDirs and VenvFiles within the directory.""" contents = self.paths contents = ( VenvFile(path.path) if path.is_file else VenvDir(path.path) for path in contents ) return contents
[ "def", "items", "(", "self", ")", ":", "contents", "=", "self", ".", "paths", "contents", "=", "(", "VenvFile", "(", "path", ".", "path", ")", "if", "path", ".", "is_file", "else", "VenvDir", "(", "path", ".", "path", ")", "for", "path", "in", "contents", ")", "return", "contents" ]
34.125
17.75
def clip_by_extent(layer, extent): """Clip a raster using a bounding box using processing. Issue https://github.com/inasafe/inasafe/issues/3183 :param layer: The layer to clip. :type layer: QgsRasterLayer :param extent: The extent. :type extent: QgsRectangle :return: Clipped layer. :rtype: QgsRasterLayer .. versionadded:: 4.0 """ parameters = dict() # noinspection PyBroadException try: output_layer_name = quick_clip_steps['output_layer_name'] output_layer_name = output_layer_name % layer.keywords['layer_purpose'] output_raster = unique_filename(suffix='.tif', dir=temp_dir()) # We make one pixel size buffer on the extent to cover every pixels. # See https://github.com/inasafe/inasafe/issues/3655 pixel_size_x = layer.rasterUnitsPerPixelX() pixel_size_y = layer.rasterUnitsPerPixelY() buffer_size = max(pixel_size_x, pixel_size_y) extent = extent.buffered(buffer_size) if is_raster_y_inverted(layer): # The raster is Y inverted. We need to switch Y min and Y max. bbox = [ str(extent.xMinimum()), str(extent.xMaximum()), str(extent.yMaximum()), str(extent.yMinimum()) ] else: # The raster is normal. bbox = [ str(extent.xMinimum()), str(extent.xMaximum()), str(extent.yMinimum()), str(extent.yMaximum()) ] # These values are all from the processing algorithm. # https://github.com/qgis/QGIS/blob/master/python/plugins/processing/ # algs/gdal/ClipByExtent.py # Please read the file to know these parameters. parameters['INPUT'] = layer.source() parameters['NO_DATA'] = '' parameters['PROJWIN'] = ','.join(bbox) parameters['DATA_TYPE'] = 5 parameters['COMPRESS'] = 4 parameters['JPEGCOMPRESSION'] = 75 parameters['ZLEVEL'] = 6 parameters['PREDICTOR'] = 1 parameters['TILED'] = False parameters['BIGTIFF'] = 0 parameters['TFW'] = False parameters['EXTRA'] = '' parameters['OUTPUT'] = output_raster initialize_processing() feedback = create_processing_feedback() context = create_processing_context(feedback=feedback) result = processing.run( "gdal:cliprasterbyextent", parameters, context=context) if result is None: raise ProcessingInstallationError clipped = QgsRasterLayer(result['OUTPUT'], output_layer_name) # We transfer keywords to the output. clipped.keywords = layer.keywords.copy() clipped.keywords['title'] = output_layer_name check_layer(clipped) except Exception as e: # This step clip_raster_by_extent was nice to speedup the analysis. # As we got an exception because the layer is invalid, we are not going # to stop the analysis. We will return the original raster layer. # It will take more processing time until we clip the vector layer. # Check https://github.com/inasafe/inasafe/issues/4026 why we got some # exceptions with this step. LOGGER.exception(parameters) LOGGER.exception( 'Error from QGIS clip raster by extent. Please check the QGIS ' 'logs too !') LOGGER.info( 'Even if we got an exception, we are continuing the analysis. The ' 'layer was not clipped.') LOGGER.exception(str(e)) LOGGER.exception(get_error_message(e).to_text()) clipped = layer return clipped
[ "def", "clip_by_extent", "(", "layer", ",", "extent", ")", ":", "parameters", "=", "dict", "(", ")", "# noinspection PyBroadException", "try", ":", "output_layer_name", "=", "quick_clip_steps", "[", "'output_layer_name'", "]", "output_layer_name", "=", "output_layer_name", "%", "layer", ".", "keywords", "[", "'layer_purpose'", "]", "output_raster", "=", "unique_filename", "(", "suffix", "=", "'.tif'", ",", "dir", "=", "temp_dir", "(", ")", ")", "# We make one pixel size buffer on the extent to cover every pixels.", "# See https://github.com/inasafe/inasafe/issues/3655", "pixel_size_x", "=", "layer", ".", "rasterUnitsPerPixelX", "(", ")", "pixel_size_y", "=", "layer", ".", "rasterUnitsPerPixelY", "(", ")", "buffer_size", "=", "max", "(", "pixel_size_x", ",", "pixel_size_y", ")", "extent", "=", "extent", ".", "buffered", "(", "buffer_size", ")", "if", "is_raster_y_inverted", "(", "layer", ")", ":", "# The raster is Y inverted. We need to switch Y min and Y max.", "bbox", "=", "[", "str", "(", "extent", ".", "xMinimum", "(", ")", ")", ",", "str", "(", "extent", ".", "xMaximum", "(", ")", ")", ",", "str", "(", "extent", ".", "yMaximum", "(", ")", ")", ",", "str", "(", "extent", ".", "yMinimum", "(", ")", ")", "]", "else", ":", "# The raster is normal.", "bbox", "=", "[", "str", "(", "extent", ".", "xMinimum", "(", ")", ")", ",", "str", "(", "extent", ".", "xMaximum", "(", ")", ")", ",", "str", "(", "extent", ".", "yMinimum", "(", ")", ")", ",", "str", "(", "extent", ".", "yMaximum", "(", ")", ")", "]", "# These values are all from the processing algorithm.", "# https://github.com/qgis/QGIS/blob/master/python/plugins/processing/", "# algs/gdal/ClipByExtent.py", "# Please read the file to know these parameters.", "parameters", "[", "'INPUT'", "]", "=", "layer", ".", "source", "(", ")", "parameters", "[", "'NO_DATA'", "]", "=", "''", "parameters", "[", "'PROJWIN'", "]", "=", "','", ".", "join", "(", "bbox", ")", "parameters", "[", "'DATA_TYPE'", "]", "=", "5", "parameters", "[", "'COMPRESS'", "]", "=", "4", "parameters", "[", "'JPEGCOMPRESSION'", "]", "=", "75", "parameters", "[", "'ZLEVEL'", "]", "=", "6", "parameters", "[", "'PREDICTOR'", "]", "=", "1", "parameters", "[", "'TILED'", "]", "=", "False", "parameters", "[", "'BIGTIFF'", "]", "=", "0", "parameters", "[", "'TFW'", "]", "=", "False", "parameters", "[", "'EXTRA'", "]", "=", "''", "parameters", "[", "'OUTPUT'", "]", "=", "output_raster", "initialize_processing", "(", ")", "feedback", "=", "create_processing_feedback", "(", ")", "context", "=", "create_processing_context", "(", "feedback", "=", "feedback", ")", "result", "=", "processing", ".", "run", "(", "\"gdal:cliprasterbyextent\"", ",", "parameters", ",", "context", "=", "context", ")", "if", "result", "is", "None", ":", "raise", "ProcessingInstallationError", "clipped", "=", "QgsRasterLayer", "(", "result", "[", "'OUTPUT'", "]", ",", "output_layer_name", ")", "# We transfer keywords to the output.", "clipped", ".", "keywords", "=", "layer", ".", "keywords", ".", "copy", "(", ")", "clipped", ".", "keywords", "[", "'title'", "]", "=", "output_layer_name", "check_layer", "(", "clipped", ")", "except", "Exception", "as", "e", ":", "# This step clip_raster_by_extent was nice to speedup the analysis.", "# As we got an exception because the layer is invalid, we are not going", "# to stop the analysis. We will return the original raster layer.", "# It will take more processing time until we clip the vector layer.", "# Check https://github.com/inasafe/inasafe/issues/4026 why we got some", "# exceptions with this step.", "LOGGER", ".", "exception", "(", "parameters", ")", "LOGGER", ".", "exception", "(", "'Error from QGIS clip raster by extent. Please check the QGIS '", "'logs too !'", ")", "LOGGER", ".", "info", "(", "'Even if we got an exception, we are continuing the analysis. The '", "'layer was not clipped.'", ")", "LOGGER", ".", "exception", "(", "str", "(", "e", ")", ")", "LOGGER", ".", "exception", "(", "get_error_message", "(", "e", ")", ".", "to_text", "(", ")", ")", "clipped", "=", "layer", "return", "clipped" ]
34.904762
18.085714
def handle(self, **options): """ Command handler. """ self.flushdb = options.get('flushdb') self._pre_tasks() self._create_users() self._create_badges() self._create_awards()
[ "def", "handle", "(", "self", ",", "*", "*", "options", ")", ":", "self", ".", "flushdb", "=", "options", ".", "get", "(", "'flushdb'", ")", "self", ".", "_pre_tasks", "(", ")", "self", ".", "_create_users", "(", ")", "self", ".", "_create_badges", "(", ")", "self", ".", "_create_awards", "(", ")" ]
25.555556
9.111111
def process(self, input_data, topic=None, **kwargs): """ Splits tuple received from PacketHandler into packet UID and packet message. Decodes packet and inserts into database backend. Logs any exceptions raised. Params: input_data: message received from inbound stream through PacketHandler topic: name of inbound stream message received from **kwargs: any args required for connected to the backend """ try: split = input_data[1:-1].split(',', 1) uid, pkt = int(split[0]), split[1] defn = self.packet_dict[uid] decoded = tlm.Packet(defn, data=bytearray(pkt)) self.dbconn.insert(decoded, **kwargs) except Exception as e: log.error('Data archival failed with error: {}.'.format(e))
[ "def", "process", "(", "self", ",", "input_data", ",", "topic", "=", "None", ",", "*", "*", "kwargs", ")", ":", "try", ":", "split", "=", "input_data", "[", "1", ":", "-", "1", "]", ".", "split", "(", "','", ",", "1", ")", "uid", ",", "pkt", "=", "int", "(", "split", "[", "0", "]", ")", ",", "split", "[", "1", "]", "defn", "=", "self", ".", "packet_dict", "[", "uid", "]", "decoded", "=", "tlm", ".", "Packet", "(", "defn", ",", "data", "=", "bytearray", "(", "pkt", ")", ")", "self", ".", "dbconn", ".", "insert", "(", "decoded", ",", "*", "*", "kwargs", ")", "except", "Exception", "as", "e", ":", "log", ".", "error", "(", "'Data archival failed with error: {}.'", ".", "format", "(", "e", ")", ")" ]
44.473684
18.894737
def encode_uvarint(n, data): '''encodes integer into variable-length format into data.''' if n < 0: raise ValueError('only support positive integer') while True: this_byte = n & 127 n >>= 7 if n == 0: data.append(this_byte) break data.append(this_byte | 128)
[ "def", "encode_uvarint", "(", "n", ",", "data", ")", ":", "if", "n", "<", "0", ":", "raise", "ValueError", "(", "'only support positive integer'", ")", "while", "True", ":", "this_byte", "=", "n", "&", "127", "n", ">>=", "7", "if", "n", "==", "0", ":", "data", ".", "append", "(", "this_byte", ")", "break", "data", ".", "append", "(", "this_byte", "|", "128", ")" ]
29.454545
18
def failed_update(self, exception): """Update cluster state given a failed MetadataRequest.""" f = None with self._lock: if self._future: f = self._future self._future = None if f: f.failure(exception) self._last_refresh_ms = time.time() * 1000
[ "def", "failed_update", "(", "self", ",", "exception", ")", ":", "f", "=", "None", "with", "self", ".", "_lock", ":", "if", "self", ".", "_future", ":", "f", "=", "self", ".", "_future", "self", ".", "_future", "=", "None", "if", "f", ":", "f", ".", "failure", "(", "exception", ")", "self", ".", "_last_refresh_ms", "=", "time", ".", "time", "(", ")", "*", "1000" ]
33.1
11.5
def _make_sj_out_dict(fns, jxns=None, define_sample_name=None): """Read multiple sj_outs, return dict with keys as sample names and values as sj_out dataframes. Parameters ---------- fns : list of strs of filenames or file handles List of filename of the SJ.out.tab files to read in jxns : set If provided, only keep junctions in this set. define_sample_name : function that takes string as input Function mapping filename to sample name. For instance, you may have the sample name in the path and use a regex to extract it. The sample names will be used as the column names. If this is not provided, the columns will be named as the input files. Returns ------- sj_outD : dict Dict whose keys are sample names and values are sj_out dataframes """ if define_sample_name == None: define_sample_name = lambda x: x else: assert len(set([define_sample_name(x) for x in fns])) == len(fns) sj_outD = dict() for fn in fns: sample = define_sample_name(fn) df = read_sj_out_tab(fn) # Remove any junctions that don't have any uniquely mapped junction # reads. Even if a junction passes the cutoff in other samples, we are # only concerned with unique counts. df = df[df.unique_junction_reads > 0] index = (df.chrom + ':' + df.start.astype(str) + '-' + df.end.astype(str)) assert len(index) == len(set(index)) df.index = index # If jxns is provided, only keep those. if jxns: df = df.ix[set(df.index) & jxns] sj_outD[sample] = df return sj_outD
[ "def", "_make_sj_out_dict", "(", "fns", ",", "jxns", "=", "None", ",", "define_sample_name", "=", "None", ")", ":", "if", "define_sample_name", "==", "None", ":", "define_sample_name", "=", "lambda", "x", ":", "x", "else", ":", "assert", "len", "(", "set", "(", "[", "define_sample_name", "(", "x", ")", "for", "x", "in", "fns", "]", ")", ")", "==", "len", "(", "fns", ")", "sj_outD", "=", "dict", "(", ")", "for", "fn", "in", "fns", ":", "sample", "=", "define_sample_name", "(", "fn", ")", "df", "=", "read_sj_out_tab", "(", "fn", ")", "# Remove any junctions that don't have any uniquely mapped junction", "# reads. Even if a junction passes the cutoff in other samples, we are", "# only concerned with unique counts.", "df", "=", "df", "[", "df", ".", "unique_junction_reads", ">", "0", "]", "index", "=", "(", "df", ".", "chrom", "+", "':'", "+", "df", ".", "start", ".", "astype", "(", "str", ")", "+", "'-'", "+", "df", ".", "end", ".", "astype", "(", "str", ")", ")", "assert", "len", "(", "index", ")", "==", "len", "(", "set", "(", "index", ")", ")", "df", ".", "index", "=", "index", "# If jxns is provided, only keep those.", "if", "jxns", ":", "df", "=", "df", ".", "ix", "[", "set", "(", "df", ".", "index", ")", "&", "jxns", "]", "sj_outD", "[", "sample", "]", "=", "df", "return", "sj_outD" ]
35.382979
21.319149
def contiguous_slice(in1): """ This function unpads an array on the GPU in such a way as to make it contiguous. INPUTS: in1 (no default): Array containing data which has been padded. OUTPUTS: gpu_out1 Array containing unpadded, contiguous data. """ ker = SourceModule(""" __global__ void contiguous_slice_ker(float *in1, float *out1) { const int len = gridDim.x*blockDim.x; const int col = (blockDim.x * blockIdx.x + threadIdx.x); const int row = (blockDim.y * blockIdx.y + threadIdx.y); const int tid2 = col + len*row; const int first_idx = len/4; const int last_idx = (3*len)/4; const int out_idx = (col-first_idx)+(row-first_idx)*(len/2); if (((col>=first_idx)&(row>=first_idx))&((col<last_idx)&(row<last_idx))) { out1[out_idx] = in1[tid2]; } } """, keep=True) gpu_out1 = gpuarray.empty([in1.shape[0]/2,in1.shape[1]/2], np.float32) contiguous_slice_ker = ker.get_function("contiguous_slice_ker") contiguous_slice_ker(in1, gpu_out1, block=(32,32,1), grid=(int(in1.shape[1]//32), int(in1.shape[0]//32))) return gpu_out1
[ "def", "contiguous_slice", "(", "in1", ")", ":", "ker", "=", "SourceModule", "(", "\"\"\"\n __global__ void contiguous_slice_ker(float *in1, float *out1)\n {\n const int len = gridDim.x*blockDim.x;\n const int col = (blockDim.x * blockIdx.x + threadIdx.x);\n const int row = (blockDim.y * blockIdx.y + threadIdx.y);\n const int tid2 = col + len*row;\n\n const int first_idx = len/4;\n const int last_idx = (3*len)/4;\n\n const int out_idx = (col-first_idx)+(row-first_idx)*(len/2);\n\n if (((col>=first_idx)&(row>=first_idx))&((col<last_idx)&(row<last_idx)))\n { out1[out_idx] = in1[tid2]; }\n\n }\n \"\"\"", ",", "keep", "=", "True", ")", "gpu_out1", "=", "gpuarray", ".", "empty", "(", "[", "in1", ".", "shape", "[", "0", "]", "/", "2", ",", "in1", ".", "shape", "[", "1", "]", "/", "2", "]", ",", "np", ".", "float32", ")", "contiguous_slice_ker", "=", "ker", ".", "get_function", "(", "\"contiguous_slice_ker\"", ")", "contiguous_slice_ker", "(", "in1", ",", "gpu_out1", ",", "block", "=", "(", "32", ",", "32", ",", "1", ")", ",", "grid", "=", "(", "int", "(", "in1", ".", "shape", "[", "1", "]", "//", "32", ")", ",", "int", "(", "in1", ".", "shape", "[", "0", "]", "//", "32", ")", ")", ")", "return", "gpu_out1" ]
39.305556
30.583333
def _makeResult(self): """Return a Result that doesn't print dots. Nose's ResultProxy will wrap it, and other plugins can still print stuff---but without smashing into our progress bar, care of ProgressivePlugin's stderr/out wrapping. """ return ProgressiveResult(self._cwd, self._totalTests, self.stream, config=self.config)
[ "def", "_makeResult", "(", "self", ")", ":", "return", "ProgressiveResult", "(", "self", ".", "_cwd", ",", "self", ".", "_totalTests", ",", "self", ".", "stream", ",", "config", "=", "self", ".", "config", ")" ]
38.583333
16.416667
def update(self, key, value): """Updates the map by updating the value of a key :type key: :class: '~opencensus.tags.tag_key.TagKey' :param key: A tag key to be updated :type value: :class: '~opencensus.tags.tag_value.TagValue' :param value: The value to update the key to in the map """ if key in self.map: self.map[key] = value
[ "def", "update", "(", "self", ",", "key", ",", "value", ")", ":", "if", "key", "in", "self", ".", "map", ":", "self", ".", "map", "[", "key", "]", "=", "value" ]
32.416667
18.583333
def error_handler(_, err, arg): """Update the mutable integer `arg` with the error code.""" arg.value = err.error return libnl.handlers.NL_STOP
[ "def", "error_handler", "(", "_", ",", "err", ",", "arg", ")", ":", "arg", ".", "value", "=", "err", ".", "error", "return", "libnl", ".", "handlers", ".", "NL_STOP" ]
38
7.75
def get_event_times(self): """ Return event times of Fault, Breaker and other timed events Returns ------- list A sorted list of event times """ times = [] times.extend(self.Breaker.get_times()) for model in self.__dict__['Event'].all_models: times.extend(self.__dict__[model].get_times()) if times: times = sorted(list(set(times))) return times
[ "def", "get_event_times", "(", "self", ")", ":", "times", "=", "[", "]", "times", ".", "extend", "(", "self", ".", "Breaker", ".", "get_times", "(", ")", ")", "for", "model", "in", "self", ".", "__dict__", "[", "'Event'", "]", ".", "all_models", ":", "times", ".", "extend", "(", "self", ".", "__dict__", "[", "model", "]", ".", "get_times", "(", ")", ")", "if", "times", ":", "times", "=", "sorted", "(", "list", "(", "set", "(", "times", ")", ")", ")", "return", "times" ]
22.75
21.35
def add_group_user(self, group_id, user_id): """ Adds an existing user to a group. :param group_id: The unique ID of the group. :type group_id: ``str`` :param user_id: The unique ID of the user. :type user_id: ``str`` """ data = { "id": user_id } response = self._perform_request( url='/um/groups/%s/users' % group_id, method='POST', data=json.dumps(data)) return response
[ "def", "add_group_user", "(", "self", ",", "group_id", ",", "user_id", ")", ":", "data", "=", "{", "\"id\"", ":", "user_id", "}", "response", "=", "self", ".", "_perform_request", "(", "url", "=", "'/um/groups/%s/users'", "%", "group_id", ",", "method", "=", "'POST'", ",", "data", "=", "json", ".", "dumps", "(", "data", ")", ")", "return", "response" ]
24.52381
17.190476
def variables(self) -> tuple: """Variables.""" try: assert self._variables is not None except (AssertionError, AttributeError): self._variables = [self[n] for n in self.variable_names] finally: return tuple(self._variables)
[ "def", "variables", "(", "self", ")", "->", "tuple", ":", "try", ":", "assert", "self", ".", "_variables", "is", "not", "None", "except", "(", "AssertionError", ",", "AttributeError", ")", ":", "self", ".", "_variables", "=", "[", "self", "[", "n", "]", "for", "n", "in", "self", ".", "variable_names", "]", "finally", ":", "return", "tuple", "(", "self", ".", "_variables", ")" ]
35.5
13.25
def _score(estimator, X_test, y_test, scorer): """Compute the score of an estimator on a given test set.""" if y_test is None: score = scorer(estimator, X_test) else: score = scorer(estimator, X_test, y_test) if not isinstance(score, numbers.Number): raise ValueError("scoring must return a number, got %s (%s) instead." % (str(score), type(score))) return score
[ "def", "_score", "(", "estimator", ",", "X_test", ",", "y_test", ",", "scorer", ")", ":", "if", "y_test", "is", "None", ":", "score", "=", "scorer", "(", "estimator", ",", "X_test", ")", "else", ":", "score", "=", "scorer", "(", "estimator", ",", "X_test", ",", "y_test", ")", "if", "not", "isinstance", "(", "score", ",", "numbers", ".", "Number", ")", ":", "raise", "ValueError", "(", "\"scoring must return a number, got %s (%s) instead.\"", "%", "(", "str", "(", "score", ")", ",", "type", "(", "score", ")", ")", ")", "return", "score" ]
42.2
14.4
def to_query(self, fields=None): """ Return a Query for this Table. Args: fields: the fields to return. If None, all fields will be returned. This can be a string which will be injected into the Query after SELECT, or a list of field names. Returns: A Query object that will return the specified fields from the records in the Table. """ # Do import here to avoid top-level circular dependencies. from . import _query if fields is None: fields = '*' elif isinstance(fields, list): fields = ','.join(fields) return _query.Query('SELECT %s FROM %s' % (fields, self._repr_sql_()), context=self._context)
[ "def", "to_query", "(", "self", ",", "fields", "=", "None", ")", ":", "# Do import here to avoid top-level circular dependencies.", "from", ".", "import", "_query", "if", "fields", "is", "None", ":", "fields", "=", "'*'", "elif", "isinstance", "(", "fields", ",", "list", ")", ":", "fields", "=", "','", ".", "join", "(", "fields", ")", "return", "_query", ".", "Query", "(", "'SELECT %s FROM %s'", "%", "(", "fields", ",", "self", ".", "_repr_sql_", "(", ")", ")", ",", "context", "=", "self", ".", "_context", ")" ]
38.588235
26.294118
def decode_sent_msg(pref, message, pretty=False): """decode_sent_msg: Return a string of the decoded message """ newline = "\n" if pretty else " " indent = " " if pretty else "" start = newline + indent out = [] out.append("%s%s{%sSEQNUM: %d," % (pref, newline, start, message[Const.W_SEQ])) out.append("%sCOMPRESSION: %d," % (start, message[Const.W_COMPRESSION])) out.append("%sHASH: %s...," % (start, str(binascii.b2a_hex(message[Const.W_HASH]).decode('ascii'))[:10])) out.append("%sMESSAGE:%s{%sCLIENTREF: %s," % (start, start, start + indent, message[Const.W_MESSAGE][Const.M_CLIENTREF])) out.append("%sRESOURCE: %s," % (start + indent, R_TYPES[message[Const.W_MESSAGE][Const.M_RESOURCE]])) out.append("%sTYPE: %s," % (start + indent, C_TYPES[message[Const.W_MESSAGE][Const.M_TYPE]])) out.append("%sACTION: %s," % (start + indent, message[Const.W_MESSAGE][Const.M_ACTION])) if Const.M_RANGE in message[Const.W_MESSAGE]: out.append("%sRANGE: %s," % (start + indent, message[Const.W_MESSAGE][Const.M_RANGE])) out.append("%sPAYLOAD: %s%s}%s}" % (start + indent, message[Const.W_MESSAGE][Const.M_PAYLOAD], start, newline)) return ''.join(out)
[ "def", "decode_sent_msg", "(", "pref", ",", "message", ",", "pretty", "=", "False", ")", ":", "newline", "=", "\"\\n\"", "if", "pretty", "else", "\" \"", "indent", "=", "\" \"", "if", "pretty", "else", "\"\"", "start", "=", "newline", "+", "indent", "out", "=", "[", "]", "out", ".", "append", "(", "\"%s%s{%sSEQNUM: %d,\"", "%", "(", "pref", ",", "newline", ",", "start", ",", "message", "[", "Const", ".", "W_SEQ", "]", ")", ")", "out", ".", "append", "(", "\"%sCOMPRESSION: %d,\"", "%", "(", "start", ",", "message", "[", "Const", ".", "W_COMPRESSION", "]", ")", ")", "out", ".", "append", "(", "\"%sHASH: %s...,\"", "%", "(", "start", ",", "str", "(", "binascii", ".", "b2a_hex", "(", "message", "[", "Const", ".", "W_HASH", "]", ")", ".", "decode", "(", "'ascii'", ")", ")", "[", ":", "10", "]", ")", ")", "out", ".", "append", "(", "\"%sMESSAGE:%s{%sCLIENTREF: %s,\"", "%", "(", "start", ",", "start", ",", "start", "+", "indent", ",", "message", "[", "Const", ".", "W_MESSAGE", "]", "[", "Const", ".", "M_CLIENTREF", "]", ")", ")", "out", ".", "append", "(", "\"%sRESOURCE: %s,\"", "%", "(", "start", "+", "indent", ",", "R_TYPES", "[", "message", "[", "Const", ".", "W_MESSAGE", "]", "[", "Const", ".", "M_RESOURCE", "]", "]", ")", ")", "out", ".", "append", "(", "\"%sTYPE: %s,\"", "%", "(", "start", "+", "indent", ",", "C_TYPES", "[", "message", "[", "Const", ".", "W_MESSAGE", "]", "[", "Const", ".", "M_TYPE", "]", "]", ")", ")", "out", ".", "append", "(", "\"%sACTION: %s,\"", "%", "(", "start", "+", "indent", ",", "message", "[", "Const", ".", "W_MESSAGE", "]", "[", "Const", ".", "M_ACTION", "]", ")", ")", "if", "Const", ".", "M_RANGE", "in", "message", "[", "Const", ".", "W_MESSAGE", "]", ":", "out", ".", "append", "(", "\"%sRANGE: %s,\"", "%", "(", "start", "+", "indent", ",", "message", "[", "Const", ".", "W_MESSAGE", "]", "[", "Const", ".", "M_RANGE", "]", ")", ")", "out", ".", "append", "(", "\"%sPAYLOAD: %s%s}%s}\"", "%", "(", "start", "+", "indent", ",", "message", "[", "Const", ".", "W_MESSAGE", "]", "[", "Const", ".", "M_PAYLOAD", "]", ",", "start", ",", "newline", ")", ")", "return", "''", ".", "join", "(", "out", ")" ]
59.47619
33.619048
def get_value(self) -> Decimal: """ Returns the current value of stocks """ quantity = self.get_quantity() price = self.get_last_available_price() if not price: # raise ValueError("no price found for", self.full_symbol) return Decimal(0) value = quantity * price.value return value
[ "def", "get_value", "(", "self", ")", "->", "Decimal", ":", "quantity", "=", "self", ".", "get_quantity", "(", ")", "price", "=", "self", ".", "get_last_available_price", "(", ")", "if", "not", "price", ":", "# raise ValueError(\"no price found for\", self.full_symbol)", "return", "Decimal", "(", "0", ")", "value", "=", "quantity", "*", "price", ".", "value", "return", "value" ]
34.5
14
def fingerprint(self): """Returns a fingerprint for the identity of the task. A task fingerprint is composed of the options the task is currently running under. Useful for invalidating unchanging targets being executed beneath changing task options that affect outputted artifacts. A task's fingerprint is only valid after the task has been fully initialized. """ hasher = sha1() hasher.update(self.stable_name().encode('utf-8')) hasher.update(self._options_fingerprint(self.options_scope).encode('utf-8')) hasher.update(self.implementation_version_str().encode('utf-8')) for dep in self.subsystem_closure_iter(): hasher.update(self._options_fingerprint(dep.options_scope).encode('utf-8')) return hasher.hexdigest() if PY3 else hasher.hexdigest().decode('utf-8')
[ "def", "fingerprint", "(", "self", ")", ":", "hasher", "=", "sha1", "(", ")", "hasher", ".", "update", "(", "self", ".", "stable_name", "(", ")", ".", "encode", "(", "'utf-8'", ")", ")", "hasher", ".", "update", "(", "self", ".", "_options_fingerprint", "(", "self", ".", "options_scope", ")", ".", "encode", "(", "'utf-8'", ")", ")", "hasher", ".", "update", "(", "self", ".", "implementation_version_str", "(", ")", ".", "encode", "(", "'utf-8'", ")", ")", "for", "dep", "in", "self", ".", "subsystem_closure_iter", "(", ")", ":", "hasher", ".", "update", "(", "self", ".", "_options_fingerprint", "(", "dep", ".", "options_scope", ")", ".", "encode", "(", "'utf-8'", ")", ")", "return", "hasher", ".", "hexdigest", "(", ")", "if", "PY3", "else", "hasher", ".", "hexdigest", "(", ")", ".", "decode", "(", "'utf-8'", ")" ]
50.1875
26
def SA_tank(D, L, sideA=None, sideB=None, sideA_a=0, sideB_a=0, sideA_f=None, sideA_k=None, sideB_f=None, sideB_k=None, full_output=False): r'''Calculates the surface are of a cylindrical tank with optional heads. In the degenerate case of being provided with only `D` and `L`, provides the surface area of a cylinder. Parameters ---------- D : float Diameter of the cylindrical section of the tank, [m] L : float Length of the main cylindrical section of the tank, [m] sideA : string, optional The left (or bottom for vertical) head of the tank's type; one of [None, 'conical', 'ellipsoidal', 'torispherical', 'guppy', 'spherical']. sideB : string, optional The right (or top for vertical) head of the tank's type; one of [None, 'conical', 'ellipsoidal', 'torispherical', 'guppy', 'spherical']. sideA_a : float, optional The distance the head as specified by sideA extends down or to the left from the main cylindrical section, [m] sideB_a : float, optional The distance the head as specified by sideB extends up or to the right from the main cylindrical section, [m] sideA_f : float, optional Dish-radius parameter for side A; fD = dish radius [1/m] sideA_k : float, optional knuckle-radius parameter for side A; kD = knuckle radius [1/m] sideB_f : float, optional Dish-radius parameter for side B; fD = dish radius [1/m] sideB_k : float, optional knuckle-radius parameter for side B; kD = knuckle radius [1/m] Returns ------- SA : float Surface area of the tank [m^2] areas : tuple, only returned if full_output == True (sideA_SA, sideB_SA, lateral_SA) Other Parameters ---------------- full_output : bool, optional Returns a tuple of (sideA_SA, sideB_SA, lateral_SA) if True Examples -------- Cylinder, Spheroid, Long Cones, and spheres. All checked. >>> SA_tank(D=2, L=2) 18.84955592153876 >>> SA_tank(D=1., L=0, sideA='ellipsoidal', sideA_a=2, sideB='ellipsoidal', ... sideB_a=2) 28.480278854014387 >>> SA_tank(D=1., L=5, sideA='conical', sideA_a=2, sideB='conical', ... sideB_a=2) 22.18452243965656 >>> SA_tank(D=1., L=5, sideA='spherical', sideA_a=0.5, sideB='spherical', ... sideB_a=0.5) 18.84955592153876 ''' # Side A if sideA == 'conical': sideA_SA = SA_conical_head(D=D, a=sideA_a) elif sideA == 'ellipsoidal': sideA_SA = SA_ellipsoidal_head(D=D, a=sideA_a) elif sideA == 'guppy': sideA_SA = SA_guppy_head(D=D, a=sideA_a) elif sideA == 'spherical': sideA_SA = SA_partial_sphere(D=D, h=sideA_a) elif sideA == 'torispherical': sideA_SA = SA_torispheroidal(D=D, fd=sideA_f, fk=sideA_k) else: sideA_SA = pi/4*D**2 # Circle # Side B if sideB == 'conical': sideB_SA = SA_conical_head(D=D, a=sideB_a) elif sideB == 'ellipsoidal': sideB_SA = SA_ellipsoidal_head(D=D, a=sideB_a) elif sideB == 'guppy': sideB_SA = SA_guppy_head(D=D, a=sideB_a) elif sideB == 'spherical': sideB_SA = SA_partial_sphere(D=D, h=sideB_a) elif sideB == 'torispherical': sideB_SA = SA_torispheroidal(D=D, fd=sideB_f, fk=sideB_k) else: sideB_SA = pi/4*D**2 # Circle lateral_SA = pi*D*L SA = sideA_SA + sideB_SA + lateral_SA if full_output: return SA, (sideA_SA, sideB_SA, lateral_SA) else: return SA
[ "def", "SA_tank", "(", "D", ",", "L", ",", "sideA", "=", "None", ",", "sideB", "=", "None", ",", "sideA_a", "=", "0", ",", "sideB_a", "=", "0", ",", "sideA_f", "=", "None", ",", "sideA_k", "=", "None", ",", "sideB_f", "=", "None", ",", "sideB_k", "=", "None", ",", "full_output", "=", "False", ")", ":", "# Side A", "if", "sideA", "==", "'conical'", ":", "sideA_SA", "=", "SA_conical_head", "(", "D", "=", "D", ",", "a", "=", "sideA_a", ")", "elif", "sideA", "==", "'ellipsoidal'", ":", "sideA_SA", "=", "SA_ellipsoidal_head", "(", "D", "=", "D", ",", "a", "=", "sideA_a", ")", "elif", "sideA", "==", "'guppy'", ":", "sideA_SA", "=", "SA_guppy_head", "(", "D", "=", "D", ",", "a", "=", "sideA_a", ")", "elif", "sideA", "==", "'spherical'", ":", "sideA_SA", "=", "SA_partial_sphere", "(", "D", "=", "D", ",", "h", "=", "sideA_a", ")", "elif", "sideA", "==", "'torispherical'", ":", "sideA_SA", "=", "SA_torispheroidal", "(", "D", "=", "D", ",", "fd", "=", "sideA_f", ",", "fk", "=", "sideA_k", ")", "else", ":", "sideA_SA", "=", "pi", "/", "4", "*", "D", "**", "2", "# Circle", "# Side B", "if", "sideB", "==", "'conical'", ":", "sideB_SA", "=", "SA_conical_head", "(", "D", "=", "D", ",", "a", "=", "sideB_a", ")", "elif", "sideB", "==", "'ellipsoidal'", ":", "sideB_SA", "=", "SA_ellipsoidal_head", "(", "D", "=", "D", ",", "a", "=", "sideB_a", ")", "elif", "sideB", "==", "'guppy'", ":", "sideB_SA", "=", "SA_guppy_head", "(", "D", "=", "D", ",", "a", "=", "sideB_a", ")", "elif", "sideB", "==", "'spherical'", ":", "sideB_SA", "=", "SA_partial_sphere", "(", "D", "=", "D", ",", "h", "=", "sideB_a", ")", "elif", "sideB", "==", "'torispherical'", ":", "sideB_SA", "=", "SA_torispheroidal", "(", "D", "=", "D", ",", "fd", "=", "sideB_f", ",", "fk", "=", "sideB_k", ")", "else", ":", "sideB_SA", "=", "pi", "/", "4", "*", "D", "**", "2", "# Circle", "lateral_SA", "=", "pi", "*", "D", "*", "L", "SA", "=", "sideA_SA", "+", "sideB_SA", "+", "lateral_SA", "if", "full_output", ":", "return", "SA", ",", "(", "sideA_SA", ",", "sideB_SA", ",", "lateral_SA", ")", "else", ":", "return", "SA" ]
36.291667
21
def request(self, action, data={}, headers={}, method='GET'): """ Append the REST headers to every request """ headers = { "Authorization": "Bearer " + self.token, "Content-Type": "application/json", "X-Version": "1", "Accept": "application/json" } return Transport.request(self, action, data, headers, method)
[ "def", "request", "(", "self", ",", "action", ",", "data", "=", "{", "}", ",", "headers", "=", "{", "}", ",", "method", "=", "'GET'", ")", ":", "headers", "=", "{", "\"Authorization\"", ":", "\"Bearer \"", "+", "self", ".", "token", ",", "\"Content-Type\"", ":", "\"application/json\"", ",", "\"X-Version\"", ":", "\"1\"", ",", "\"Accept\"", ":", "\"application/json\"", "}", "return", "Transport", ".", "request", "(", "self", ",", "action", ",", "data", ",", "headers", ",", "method", ")" ]
33
15
def sparse_var(X): ''' Compute variance from :param X: :return: ''' Xc = X.copy() Xc.data **= 2 return np.array(Xc.mean(axis=0) - np.power(X.mean(axis=0), 2))[0]
[ "def", "sparse_var", "(", "X", ")", ":", "Xc", "=", "X", ".", "copy", "(", ")", "Xc", ".", "data", "**=", "2", "return", "np", ".", "array", "(", "Xc", ".", "mean", "(", "axis", "=", "0", ")", "-", "np", ".", "power", "(", "X", ".", "mean", "(", "axis", "=", "0", ")", ",", "2", ")", ")", "[", "0", "]" ]
20.555556
25.888889
def properties_for(self, index): """ Returns a list of properties, such that each entry in the list corresponds to the element of the index given. Example: let properties: 'one':[1,2,3,4], 'two':[3,5,6] >>> properties_for([2,3,5]) [['one'], ['one', 'two'], ['two']] """ return vectorize(lambda i: [prop for prop in self.properties() if i in self[prop]], otypes=[list])(index)
[ "def", "properties_for", "(", "self", ",", "index", ")", ":", "return", "vectorize", "(", "lambda", "i", ":", "[", "prop", "for", "prop", "in", "self", ".", "properties", "(", ")", "if", "i", "in", "self", "[", "prop", "]", "]", ",", "otypes", "=", "[", "list", "]", ")", "(", "index", ")" ]
36.5
20.833333
def unseal(self, data, return_options=False): '''Unseal data''' data = self._remove_magic(data) data = urlsafe_nopadding_b64decode(data) options = self._read_header(data) data = self._add_magic(data) data = self._unsign_data(data, options) data = self._remove_magic(data) data = self._remove_header(data, options) data = self._decrypt_data(data, options) data = self._decompress_data(data, options) data = self._unserialize_data(data, options) if return_options: return data, options else: return data
[ "def", "unseal", "(", "self", ",", "data", ",", "return_options", "=", "False", ")", ":", "data", "=", "self", ".", "_remove_magic", "(", "data", ")", "data", "=", "urlsafe_nopadding_b64decode", "(", "data", ")", "options", "=", "self", ".", "_read_header", "(", "data", ")", "data", "=", "self", ".", "_add_magic", "(", "data", ")", "data", "=", "self", ".", "_unsign_data", "(", "data", ",", "options", ")", "data", "=", "self", ".", "_remove_magic", "(", "data", ")", "data", "=", "self", ".", "_remove_header", "(", "data", ",", "options", ")", "data", "=", "self", ".", "_decrypt_data", "(", "data", ",", "options", ")", "data", "=", "self", ".", "_decompress_data", "(", "data", ",", "options", ")", "data", "=", "self", ".", "_unserialize_data", "(", "data", ",", "options", ")", "if", "return_options", ":", "return", "data", ",", "options", "else", ":", "return", "data" ]
34.111111
12.666667
def empty_like(array, dtype=None, keepmeta=True): """Create an array of empty with the same shape and type as the input array. Args: array (xarray.DataArray): The shape and data-type of it define these same attributes of the output array. dtype (data-type, optional): If spacified, this function overrides the data-type of the output array. keepmeta (bool, optional): Whether *coords, attrs, and name of the input array are kept in the output one. Default is True. Returns: array (decode.array): Decode array without initializing entries. """ if keepmeta: return dc.empty(array.shape, dtype, tcoords=array.dca.tcoords, chcoords=array.dca.chcoords, scalarcoords=array.dca.scalarcoords, attrs=array.attrs, name=array.name ) else: return dc.empty(array.shape, dtype)
[ "def", "empty_like", "(", "array", ",", "dtype", "=", "None", ",", "keepmeta", "=", "True", ")", ":", "if", "keepmeta", ":", "return", "dc", ".", "empty", "(", "array", ".", "shape", ",", "dtype", ",", "tcoords", "=", "array", ".", "dca", ".", "tcoords", ",", "chcoords", "=", "array", ".", "dca", ".", "chcoords", ",", "scalarcoords", "=", "array", ".", "dca", ".", "scalarcoords", ",", "attrs", "=", "array", ".", "attrs", ",", "name", "=", "array", ".", "name", ")", "else", ":", "return", "dc", ".", "empty", "(", "array", ".", "shape", ",", "dtype", ")" ]
42.142857
23.238095
def inserir(self, name, id_equipment_type, id_model, id_group, maintenance=False): """Inserts a new Equipment and returns its identifier Além de inserir o equipamento, a networkAPI também associa o equipamento ao grupo informado. :param name: Equipment name. String with a minimum 3 and maximum of 30 characters :param id_equipment_type: Identifier of the Equipment Type. Integer value and greater than zero. :param id_model: Identifier of the Model. Integer value and greater than zero. :param id_group: Identifier of the Group. Integer value and greater than zero. :return: Dictionary with the following structure: :: {'equipamento': {'id': < id_equipamento >}, 'equipamento_grupo': {'id': < id_grupo_equipamento >}} :raise InvalidParameterError: The identifier of Equipment type, model, group or name is null and invalid. :raise TipoEquipamentoNaoExisteError: Equipment Type not registered. :raise ModeloEquipamentoNaoExisteError: Model not registered. :raise GrupoEquipamentoNaoExisteError: Group not registered. :raise EquipamentoError: Equipamento com o nome duplicado ou Equipamento do grupo “Equipamentos Orquestração” somente poderá ser criado com tipo igual a “Servidor Virtual". :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response """ equip_map = dict() equip_map['id_tipo_equipamento'] = id_equipment_type equip_map['id_modelo'] = id_model equip_map['nome'] = name equip_map['id_grupo'] = id_group equip_map['maintenance'] = maintenance code, xml = self.submit( {'equipamento': equip_map}, 'POST', 'equipamento/') return self.response(code, xml)
[ "def", "inserir", "(", "self", ",", "name", ",", "id_equipment_type", ",", "id_model", ",", "id_group", ",", "maintenance", "=", "False", ")", ":", "equip_map", "=", "dict", "(", ")", "equip_map", "[", "'id_tipo_equipamento'", "]", "=", "id_equipment_type", "equip_map", "[", "'id_modelo'", "]", "=", "id_model", "equip_map", "[", "'nome'", "]", "=", "name", "equip_map", "[", "'id_grupo'", "]", "=", "id_group", "equip_map", "[", "'maintenance'", "]", "=", "maintenance", "code", ",", "xml", "=", "self", ".", "submit", "(", "{", "'equipamento'", ":", "equip_map", "}", ",", "'POST'", ",", "'equipamento/'", ")", "return", "self", ".", "response", "(", "code", ",", "xml", ")" ]
45.658537
28.585366
async def workerTypeStats(self, *args, **kwargs): """ Look up the resource stats for a workerType Return an object which has a generic state description. This only contains counts of instances This method gives output: ``v1/worker-type-resources.json#`` This method is ``experimental`` """ return await self._makeApiCall(self.funcinfo["workerTypeStats"], *args, **kwargs)
[ "async", "def", "workerTypeStats", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "await", "self", ".", "_makeApiCall", "(", "self", ".", "funcinfo", "[", "\"workerTypeStats\"", "]", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
35
26.666667
def Update(self, size): ''' Custom wrapper calculator to account for the increased size of the _msg widget after being inlined with the wx.CheckBox ''' if self._msg is None: return help_msg = self._msg width, height = size content_area = int((width / 3) * .70) wiggle_room = range(int(content_area - content_area * .05), int(content_area + content_area * .05)) if help_msg.Size[0] not in wiggle_room: self._msg.SetLabel(self._msg.GetLabelText().replace('\n', ' ')) self._msg.Wrap(content_area)
[ "def", "Update", "(", "self", ",", "size", ")", ":", "if", "self", ".", "_msg", "is", "None", ":", "return", "help_msg", "=", "self", ".", "_msg", "width", ",", "height", "=", "size", "content_area", "=", "int", "(", "(", "width", "/", "3", ")", "*", ".70", ")", "wiggle_room", "=", "range", "(", "int", "(", "content_area", "-", "content_area", "*", ".05", ")", ",", "int", "(", "content_area", "+", "content_area", "*", ".05", ")", ")", "if", "help_msg", ".", "Size", "[", "0", "]", "not", "in", "wiggle_room", ":", "self", ".", "_msg", ".", "SetLabel", "(", "self", ".", "_msg", ".", "GetLabelText", "(", ")", ".", "replace", "(", "'\\n'", ",", "' '", ")", ")", "self", ".", "_msg", ".", "Wrap", "(", "content_area", ")" ]
33.8125
20.3125
def get_dataset(ds,dataDir,removecompressed=1): """ A function which attempts downloads and uncompresses the latest version of an openfmri.fmri dataset. PARAMETERS :ds: dataset number of the openfMRI.org dataset (integer) without zero padding. I.e. can just be 212 (doesn't need to be 000212). :dataDir: where to save the data. Will get saved in 'dataDir/openfmri/ds000XXX' :removecompressed: delete compressed data once unzipped. 1=yes. 0=no. NOTES There is no "default" way to download data from openfMRI so this solution is a little hacky. It may not be a universal functoin and it is best to verify that all necessary data has been downloaded. """ #Convert input ds to string incase it is put in via function ds = str(ds) #The final character of the dataset can be a letter lettersuffix='' if re.search('[A-Za-z]$',ds): lettersuffix = ds[-1] ds = ds[:-1] openfMRI_dataset_string = '{0:06d}'.format(int(ds)) + lettersuffix #Some datasets include try: os.mkdir(dataDir) except: pass datasetDir = os.path.join(dataDir, 'openfmri/') try: os.mkdir(datasetDir) except: pass openfMRI_url = 'https://openfmri.org/dataset/ds' + openfMRI_dataset_string + '/' r = urlopen(openfMRI_url).read() soup = BeautifulSoup(r,'lxml') #Isolate only the links from the latest revision. The text "data associated with revision". If the website changes its static text, this needs to be changed unformatted_soup=soup.prettify() firstOccurance=unformatted_soup.find('Data Associated with Revision') secondOccurancce=unformatted_soup[firstOccurance+1:].find('Data Associated with Revision') #If there is only one "Data Associated..." (i.e. only one revision) this returns -1. This should be kept. Otherwise add on the firstOccurance index if secondOccurancce != -1: secondOccurancce+=firstOccurance #The latest links are confined within this part of the text soup_latestversion = BeautifulSoup(unformatted_soup[firstOccurance:secondOccurancce],'lxml') # Loop through all links and dowload files filelist = [] for a in soup_latestversion.find_all('a', href=True): #This assumes that all files include ds.... if re.search('ds[A-Za-z_0-9.-]*$',a['href']): filename_start=re.search('ds[A-Za-z_0-9.-]*$',a['href']).start() filelist.append(a['href'][filename_start:]) print('Downloading: ' + a['href'][filename_start:]) urlretrieve(a['href'],datasetDir + a['href'][filename_start:]) print('--- Download complete ---') for f in filelist: untar_or_unzip(datasetDir,f) print('--- Uncompressing complete ---') if removecompressed==1: for f in filelist: print('Clean up. Deleting: ' + f) os.remove(datasetDir+f) print('--- Clean up complete ---') print('NOTE: It is best to verify manually that all the correct data has been downloaded and uncompressed correctly. \n If data is used in any publication, see openfmri.org about how to appropriately cite/credit the data.') print('--- Script complete ---')
[ "def", "get_dataset", "(", "ds", ",", "dataDir", ",", "removecompressed", "=", "1", ")", ":", "#Convert input ds to string incase it is put in via function", "ds", "=", "str", "(", "ds", ")", "#The final character of the dataset can be a letter", "lettersuffix", "=", "''", "if", "re", ".", "search", "(", "'[A-Za-z]$'", ",", "ds", ")", ":", "lettersuffix", "=", "ds", "[", "-", "1", "]", "ds", "=", "ds", "[", ":", "-", "1", "]", "openfMRI_dataset_string", "=", "'{0:06d}'", ".", "format", "(", "int", "(", "ds", ")", ")", "+", "lettersuffix", "#Some datasets include", "try", ":", "os", ".", "mkdir", "(", "dataDir", ")", "except", ":", "pass", "datasetDir", "=", "os", ".", "path", ".", "join", "(", "dataDir", ",", "'openfmri/'", ")", "try", ":", "os", ".", "mkdir", "(", "datasetDir", ")", "except", ":", "pass", "openfMRI_url", "=", "'https://openfmri.org/dataset/ds'", "+", "openfMRI_dataset_string", "+", "'/'", "r", "=", "urlopen", "(", "openfMRI_url", ")", ".", "read", "(", ")", "soup", "=", "BeautifulSoup", "(", "r", ",", "'lxml'", ")", "#Isolate only the links from the latest revision. The text \"data associated with revision\". If the website changes its static text, this needs to be changed", "unformatted_soup", "=", "soup", ".", "prettify", "(", ")", "firstOccurance", "=", "unformatted_soup", ".", "find", "(", "'Data Associated with Revision'", ")", "secondOccurancce", "=", "unformatted_soup", "[", "firstOccurance", "+", "1", ":", "]", ".", "find", "(", "'Data Associated with Revision'", ")", "#If there is only one \"Data Associated...\" (i.e. only one revision) this returns -1. This should be kept. Otherwise add on the firstOccurance index", "if", "secondOccurancce", "!=", "-", "1", ":", "secondOccurancce", "+=", "firstOccurance", "#The latest links are confined within this part of the text", "soup_latestversion", "=", "BeautifulSoup", "(", "unformatted_soup", "[", "firstOccurance", ":", "secondOccurancce", "]", ",", "'lxml'", ")", "# Loop through all links and dowload files", "filelist", "=", "[", "]", "for", "a", "in", "soup_latestversion", ".", "find_all", "(", "'a'", ",", "href", "=", "True", ")", ":", "#This assumes that all files include ds....", "if", "re", ".", "search", "(", "'ds[A-Za-z_0-9.-]*$'", ",", "a", "[", "'href'", "]", ")", ":", "filename_start", "=", "re", ".", "search", "(", "'ds[A-Za-z_0-9.-]*$'", ",", "a", "[", "'href'", "]", ")", ".", "start", "(", ")", "filelist", ".", "append", "(", "a", "[", "'href'", "]", "[", "filename_start", ":", "]", ")", "print", "(", "'Downloading: '", "+", "a", "[", "'href'", "]", "[", "filename_start", ":", "]", ")", "urlretrieve", "(", "a", "[", "'href'", "]", ",", "datasetDir", "+", "a", "[", "'href'", "]", "[", "filename_start", ":", "]", ")", "print", "(", "'--- Download complete ---'", ")", "for", "f", "in", "filelist", ":", "untar_or_unzip", "(", "datasetDir", ",", "f", ")", "print", "(", "'--- Uncompressing complete ---'", ")", "if", "removecompressed", "==", "1", ":", "for", "f", "in", "filelist", ":", "print", "(", "'Clean up. Deleting: '", "+", "f", ")", "os", ".", "remove", "(", "datasetDir", "+", "f", ")", "print", "(", "'--- Clean up complete ---'", ")", "print", "(", "'NOTE: It is best to verify manually that all the correct data has been downloaded and uncompressed correctly. \\n If data is used in any publication, see openfmri.org about how to appropriately cite/credit the data.'", ")", "print", "(", "'--- Script complete ---'", ")" ]
44.9
30.585714
def notimplemented (func): """Raises a NotImplementedError if the function is called.""" def newfunc (*args, **kwargs): """Raise NotImplementedError""" co = func.func_code attrs = (co.co_name, co.co_filename, co.co_firstlineno) raise NotImplementedError("function %s at %s:%d is not implemented" % attrs) return update_func_meta(newfunc, func)
[ "def", "notimplemented", "(", "func", ")", ":", "def", "newfunc", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"Raise NotImplementedError\"\"\"", "co", "=", "func", ".", "func_code", "attrs", "=", "(", "co", ".", "co_name", ",", "co", ".", "co_filename", ",", "co", ".", "co_firstlineno", ")", "raise", "NotImplementedError", "(", "\"function %s at %s:%d is not implemented\"", "%", "attrs", ")", "return", "update_func_meta", "(", "newfunc", ",", "func", ")" ]
47.5
12.75
def scale_xy(self, x=0, y=None): """Scale element separately across the two axes x and y. If y is not provided, it is assumed equal to x (according to the W3 specification). Parameters ---------- x : float x-axis scaling factor. To scale down x < 1, scale up x > 1. y : (optional) float y-axis scaling factor. To scale down y < 1, scale up y > 1. """ self.root.set("transform", "%s scale(%f %f)" % (self.root.get("transform") or '', x, y if y is not None else ''))
[ "def", "scale_xy", "(", "self", ",", "x", "=", "0", ",", "y", "=", "None", ")", ":", "self", ".", "root", ".", "set", "(", "\"transform\"", ",", "\"%s scale(%f %f)\"", "%", "(", "self", ".", "root", ".", "get", "(", "\"transform\"", ")", "or", "''", ",", "x", ",", "y", "if", "y", "is", "not", "None", "else", "''", ")", ")" ]
37.5
19.9375
def get_banks_by_assessment_taken(self, assessment_taken_id): """Gets the list of ``Banks`` mapped to an ``AssessmentTaken``. arg: assessment_taken_id (osid.id.Id): ``Id`` of an ``AssessmentTaken`` return: (osid.assessment.BankList) - list of banks raise: NotFound - ``assessment_taken_id`` is not found raise: NullArgument - ``assessment_taken_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure occurred *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceBinSession.get_bins_by_resource mgr = self._get_provider_manager('ASSESSMENT', local=True) lookup_session = mgr.get_bank_lookup_session(proxy=self._proxy) return lookup_session.get_banks_by_ids( self.get_bank_ids_by_assessment_taken(assessment_taken_id))
[ "def", "get_banks_by_assessment_taken", "(", "self", ",", "assessment_taken_id", ")", ":", "# Implemented from template for", "# osid.resource.ResourceBinSession.get_bins_by_resource", "mgr", "=", "self", ".", "_get_provider_manager", "(", "'ASSESSMENT'", ",", "local", "=", "True", ")", "lookup_session", "=", "mgr", ".", "get_bank_lookup_session", "(", "proxy", "=", "self", ".", "_proxy", ")", "return", "lookup_session", ".", "get_banks_by_ids", "(", "self", ".", "get_bank_ids_by_assessment_taken", "(", "assessment_taken_id", ")", ")" ]
51.368421
20.315789
def c2r(self): """Get real matrix from complex one suitable for solving complex linear system with real solver. For matrix :math:`M(i_1,j_1,\\ldots,i_d,j_d) = \\Re M + i\\Im M` returns (d+1)-dimensional matrix :math:`\\tilde{M}(i_1,j_1,\\ldots,i_d,j_d,i_{d+1},j_{d+1})` of form :math:`\\begin{bmatrix}\\Re M & -\\Im M \\\\ \\Im M & \\Re M \\end{bmatrix}`. This function is useful for solving complex linear system :math:`\\mathcal{A}X = B` with real solver by transforming it into .. math:: \\begin{bmatrix}\\Re\\mathcal{A} & -\\Im\\mathcal{A} \\\\ \\Im\\mathcal{A} & \\Re\\mathcal{A} \\end{bmatrix} \\begin{bmatrix}\\Re X \\\\ \\Im X\\end{bmatrix} = \\begin{bmatrix}\\Re B \\\\ \\Im B\\end{bmatrix}. """ return matrix(a=self.tt.__complex_op('M'), n=_np.concatenate( (self.n, [2])), m=_np.concatenate((self.m, [2])))
[ "def", "c2r", "(", "self", ")", ":", "return", "matrix", "(", "a", "=", "self", ".", "tt", ".", "__complex_op", "(", "'M'", ")", ",", "n", "=", "_np", ".", "concatenate", "(", "(", "self", ".", "n", ",", "[", "2", "]", ")", ")", ",", "m", "=", "_np", ".", "concatenate", "(", "(", "self", ".", "m", ",", "[", "2", "]", ")", ")", ")" ]
52.777778
30.944444
def get_edgestore_handle( client: arango.client.ArangoClient, username=None, password=None, edgestore_db_name: str = edgestore_db_name, edgestore_edges_name: str = edgestore_edges_name, edgestore_nodes_name: str = edgestore_nodes_name, edgestore_pipeline_name: str = edgestore_pipeline_name, edgestore_pipeline_stats_name: str = edgestore_pipeline_stats_name, edgestore_pipeline_errors_name: str = edgestore_pipeline_errors_name, ) -> arango.database.StandardDatabase: """Get Edgestore arangodb database handle Args: client (arango.client.ArangoClient): Description username (None, optional): Description password (None, optional): Description edgestore_db_name (str, optional): Description edgestore_edges_name (str, optional): Description edgestore_nodes_name (str, optional): Description Returns: arango.database.StandardDatabase: Description """ (username, password) = get_user_creds(username, password) sys_db = client.db("_system", username=username, password=password) # Create a new database named "edgestore" try: if username and password: edgestore_db = sys_db.create_database( name=edgestore_db_name, users=[{"username": username, "password": password, "active": True}], ) else: edgestore_db = sys_db.create_database(name=edgestore_db_name) except arango.exceptions.DatabaseCreateError: if username and password: edgestore_db = client.db( edgestore_db_name, username=username, password=password ) else: edgestore_db = client.db(edgestore_db_name) # TODO - add a skiplist index for _from? or _key? to be able to do paging? # has_collection function doesn't seem to be working # if not edgestore_db.has_collection(edgestore_nodes_name): try: nodes = edgestore_db.create_collection( edgestore_nodes_name, index_bucket_count=64 ) nodes.add_hash_index(fields=["name"], unique=False) nodes.add_hash_index( fields=["components"], unique=False ) # add subject/object components as node properties except Exception: pass # if not edgestore_db.has_collection(edgestore_edges_name): try: edges = edgestore_db.create_collection( edgestore_edges_name, edge=True, index_bucket_count=64 ) edges.add_hash_index(fields=["relation"], unique=False) edges.add_hash_index(fields=["edge_types"], unique=False) edges.add_hash_index(fields=["nanopub_id"], unique=False) edges.add_hash_index(fields=["metadata.project"], unique=False) edges.add_hash_index(fields=["annotations[*].id"], unique=False) except Exception: pass # if not edgestore_db.has_collection(edgestore_pipeline_name): try: edgestore_db.create_collection(edgestore_pipeline_name) except Exception: pass try: edgestore_db.create_collection(edgestore_pipeline_errors_name) except Exception: pass try: edgestore_db.create_collection(edgestore_pipeline_stats_name) except arango.exceptions.CollectionCreateError as e: pass return edgestore_db
[ "def", "get_edgestore_handle", "(", "client", ":", "arango", ".", "client", ".", "ArangoClient", ",", "username", "=", "None", ",", "password", "=", "None", ",", "edgestore_db_name", ":", "str", "=", "edgestore_db_name", ",", "edgestore_edges_name", ":", "str", "=", "edgestore_edges_name", ",", "edgestore_nodes_name", ":", "str", "=", "edgestore_nodes_name", ",", "edgestore_pipeline_name", ":", "str", "=", "edgestore_pipeline_name", ",", "edgestore_pipeline_stats_name", ":", "str", "=", "edgestore_pipeline_stats_name", ",", "edgestore_pipeline_errors_name", ":", "str", "=", "edgestore_pipeline_errors_name", ",", ")", "->", "arango", ".", "database", ".", "StandardDatabase", ":", "(", "username", ",", "password", ")", "=", "get_user_creds", "(", "username", ",", "password", ")", "sys_db", "=", "client", ".", "db", "(", "\"_system\"", ",", "username", "=", "username", ",", "password", "=", "password", ")", "# Create a new database named \"edgestore\"", "try", ":", "if", "username", "and", "password", ":", "edgestore_db", "=", "sys_db", ".", "create_database", "(", "name", "=", "edgestore_db_name", ",", "users", "=", "[", "{", "\"username\"", ":", "username", ",", "\"password\"", ":", "password", ",", "\"active\"", ":", "True", "}", "]", ",", ")", "else", ":", "edgestore_db", "=", "sys_db", ".", "create_database", "(", "name", "=", "edgestore_db_name", ")", "except", "arango", ".", "exceptions", ".", "DatabaseCreateError", ":", "if", "username", "and", "password", ":", "edgestore_db", "=", "client", ".", "db", "(", "edgestore_db_name", ",", "username", "=", "username", ",", "password", "=", "password", ")", "else", ":", "edgestore_db", "=", "client", ".", "db", "(", "edgestore_db_name", ")", "# TODO - add a skiplist index for _from? or _key? to be able to do paging?", "# has_collection function doesn't seem to be working", "# if not edgestore_db.has_collection(edgestore_nodes_name):", "try", ":", "nodes", "=", "edgestore_db", ".", "create_collection", "(", "edgestore_nodes_name", ",", "index_bucket_count", "=", "64", ")", "nodes", ".", "add_hash_index", "(", "fields", "=", "[", "\"name\"", "]", ",", "unique", "=", "False", ")", "nodes", ".", "add_hash_index", "(", "fields", "=", "[", "\"components\"", "]", ",", "unique", "=", "False", ")", "# add subject/object components as node properties", "except", "Exception", ":", "pass", "# if not edgestore_db.has_collection(edgestore_edges_name):", "try", ":", "edges", "=", "edgestore_db", ".", "create_collection", "(", "edgestore_edges_name", ",", "edge", "=", "True", ",", "index_bucket_count", "=", "64", ")", "edges", ".", "add_hash_index", "(", "fields", "=", "[", "\"relation\"", "]", ",", "unique", "=", "False", ")", "edges", ".", "add_hash_index", "(", "fields", "=", "[", "\"edge_types\"", "]", ",", "unique", "=", "False", ")", "edges", ".", "add_hash_index", "(", "fields", "=", "[", "\"nanopub_id\"", "]", ",", "unique", "=", "False", ")", "edges", ".", "add_hash_index", "(", "fields", "=", "[", "\"metadata.project\"", "]", ",", "unique", "=", "False", ")", "edges", ".", "add_hash_index", "(", "fields", "=", "[", "\"annotations[*].id\"", "]", ",", "unique", "=", "False", ")", "except", "Exception", ":", "pass", "# if not edgestore_db.has_collection(edgestore_pipeline_name):", "try", ":", "edgestore_db", ".", "create_collection", "(", "edgestore_pipeline_name", ")", "except", "Exception", ":", "pass", "try", ":", "edgestore_db", ".", "create_collection", "(", "edgestore_pipeline_errors_name", ")", "except", "Exception", ":", "pass", "try", ":", "edgestore_db", ".", "create_collection", "(", "edgestore_pipeline_stats_name", ")", "except", "arango", ".", "exceptions", ".", "CollectionCreateError", "as", "e", ":", "pass", "return", "edgestore_db" ]
36.155556
22.8
def getheaderAnim(self, im): """ getheaderAnim(im) Get animation header. To replace PILs getheader()[0] """ bb = "GIF89a" bb += intToBin(im.size[0]) bb += intToBin(im.size[1]) bb += "\x87\x00\x00" return bb
[ "def", "getheaderAnim", "(", "self", ",", "im", ")", ":", "bb", "=", "\"GIF89a\"", "bb", "+=", "intToBin", "(", "im", ".", "size", "[", "0", "]", ")", "bb", "+=", "intToBin", "(", "im", ".", "size", "[", "1", "]", ")", "bb", "+=", "\"\\x87\\x00\\x00\"", "return", "bb" ]
23.818182
16.181818
def rsys2dot(rsys, tex=False, rprefix='r', rref0=1, nodeparams='[label="{}",shape=diamond]', colors=('maroon', 'darkgreen'), penwidths=None, include_inactive=True): """ Returns list of lines of DOT (graph description language) formated graph. Parameters ========== rsys: ReactionSystem tex: bool (default False) If set True, output will be LaTeX formated (Substance need to have latex_name attribute set) rprefix: string Reaction enumeration prefix, default: r rref0: integer Reaction enumeration inital counter value, default: 1 nodeparams: string DOT formated param list, default: [label={} shape=diamond] Returns ======= list of lines of DOT representation of the graph representation. """ lines = ['digraph "' + str(rsys.name) + '" {\n'] ind = ' ' # indentation if penwidths is None: penwidths = [1.0]*rsys.nr categories = rsys.categorize_substances(checks=()) def add_substance(key): fc = 'black' if key in categories['depleted']: fc = colors[0] if key in categories['accumulated']: fc = colors[1] label = ('$%s$' if tex else '%s') % getattr(rsys.substances[key], 'latex_name' if tex else 'name') lines.append(ind + '"{key}" [fontcolor={fc} label="{lbl}"];\n'.format(key=key, fc=fc, lbl=label)) for sk in rsys.substances: add_substance(sk) def add_vertex(key, num, reac, penwidth): snum = str(num) if num > 1 else '' fmt = ','.join( ['label="{}"'.format(snum)] + (['penwidth={}'.format(penwidth)] if penwidth != 1 else []) ) lines.append(ind + '"{}" -> "{}" [color={},fontcolor={},{}];\n'.format( *((key, rid, colors[0], colors[0], fmt) if reac else (rid, key, colors[1], colors[1], fmt)) )) if include_inactive: reac_stoichs = rsys.all_reac_stoichs() prod_stoichs = rsys.all_prod_stoichs() else: reac_stoichs = rsys.active_reac_stoichs() prod_stoichs = rsys.active_prod_stoichs() for ri, rxn in enumerate(rsys.rxns): rid = rprefix + str(ri+rref0) lines.append(ind + '{') lines.append(ind*2 + 'node ' + nodeparams.format(rxn.name or rid)) lines.append(ind*2 + rid) lines.append(ind + '}\n') for idx, key in enumerate(rsys.substances): num = reac_stoichs[ri, idx] if num == 0: continue add_vertex(key, num, True, penwidths[ri]) for idx, key in enumerate(rsys.substances): num = prod_stoichs[ri, idx] if num == 0: continue add_vertex(key, num, False, penwidths[ri]) lines.append('}\n') return lines
[ "def", "rsys2dot", "(", "rsys", ",", "tex", "=", "False", ",", "rprefix", "=", "'r'", ",", "rref0", "=", "1", ",", "nodeparams", "=", "'[label=\"{}\",shape=diamond]'", ",", "colors", "=", "(", "'maroon'", ",", "'darkgreen'", ")", ",", "penwidths", "=", "None", ",", "include_inactive", "=", "True", ")", ":", "lines", "=", "[", "'digraph \"'", "+", "str", "(", "rsys", ".", "name", ")", "+", "'\" {\\n'", "]", "ind", "=", "' '", "# indentation", "if", "penwidths", "is", "None", ":", "penwidths", "=", "[", "1.0", "]", "*", "rsys", ".", "nr", "categories", "=", "rsys", ".", "categorize_substances", "(", "checks", "=", "(", ")", ")", "def", "add_substance", "(", "key", ")", ":", "fc", "=", "'black'", "if", "key", "in", "categories", "[", "'depleted'", "]", ":", "fc", "=", "colors", "[", "0", "]", "if", "key", "in", "categories", "[", "'accumulated'", "]", ":", "fc", "=", "colors", "[", "1", "]", "label", "=", "(", "'$%s$'", "if", "tex", "else", "'%s'", ")", "%", "getattr", "(", "rsys", ".", "substances", "[", "key", "]", ",", "'latex_name'", "if", "tex", "else", "'name'", ")", "lines", ".", "append", "(", "ind", "+", "'\"{key}\" [fontcolor={fc} label=\"{lbl}\"];\\n'", ".", "format", "(", "key", "=", "key", ",", "fc", "=", "fc", ",", "lbl", "=", "label", ")", ")", "for", "sk", "in", "rsys", ".", "substances", ":", "add_substance", "(", "sk", ")", "def", "add_vertex", "(", "key", ",", "num", ",", "reac", ",", "penwidth", ")", ":", "snum", "=", "str", "(", "num", ")", "if", "num", ">", "1", "else", "''", "fmt", "=", "','", ".", "join", "(", "[", "'label=\"{}\"'", ".", "format", "(", "snum", ")", "]", "+", "(", "[", "'penwidth={}'", ".", "format", "(", "penwidth", ")", "]", "if", "penwidth", "!=", "1", "else", "[", "]", ")", ")", "lines", ".", "append", "(", "ind", "+", "'\"{}\" -> \"{}\" [color={},fontcolor={},{}];\\n'", ".", "format", "(", "*", "(", "(", "key", ",", "rid", ",", "colors", "[", "0", "]", ",", "colors", "[", "0", "]", ",", "fmt", ")", "if", "reac", "else", "(", "rid", ",", "key", ",", "colors", "[", "1", "]", ",", "colors", "[", "1", "]", ",", "fmt", ")", ")", ")", ")", "if", "include_inactive", ":", "reac_stoichs", "=", "rsys", ".", "all_reac_stoichs", "(", ")", "prod_stoichs", "=", "rsys", ".", "all_prod_stoichs", "(", ")", "else", ":", "reac_stoichs", "=", "rsys", ".", "active_reac_stoichs", "(", ")", "prod_stoichs", "=", "rsys", ".", "active_prod_stoichs", "(", ")", "for", "ri", ",", "rxn", "in", "enumerate", "(", "rsys", ".", "rxns", ")", ":", "rid", "=", "rprefix", "+", "str", "(", "ri", "+", "rref0", ")", "lines", ".", "append", "(", "ind", "+", "'{'", ")", "lines", ".", "append", "(", "ind", "*", "2", "+", "'node '", "+", "nodeparams", ".", "format", "(", "rxn", ".", "name", "or", "rid", ")", ")", "lines", ".", "append", "(", "ind", "*", "2", "+", "rid", ")", "lines", ".", "append", "(", "ind", "+", "'}\\n'", ")", "for", "idx", ",", "key", "in", "enumerate", "(", "rsys", ".", "substances", ")", ":", "num", "=", "reac_stoichs", "[", "ri", ",", "idx", "]", "if", "num", "==", "0", ":", "continue", "add_vertex", "(", "key", ",", "num", ",", "True", ",", "penwidths", "[", "ri", "]", ")", "for", "idx", ",", "key", "in", "enumerate", "(", "rsys", ".", "substances", ")", ":", "num", "=", "prod_stoichs", "[", "ri", ",", "idx", "]", "if", "num", "==", "0", ":", "continue", "add_vertex", "(", "key", ",", "num", ",", "False", ",", "penwidths", "[", "ri", "]", ")", "lines", ".", "append", "(", "'}\\n'", ")", "return", "lines" ]
34.797468
19.835443
def save(name, data, rc_file='~/.odoorpcrc'): """Save the `data` session configuration under the name `name` in the `rc_file` file. >>> import odoorpc >>> odoorpc.session.save( ... 'foo', ... {'type': 'ODOO', 'host': 'localhost', 'protocol': 'jsonrpc', ... 'port': 8069, 'timeout': 120, 'database': 'db_name' ... 'user': 'admin', 'passwd': 'password'}) # doctest: +SKIP .. doctest:: :hide: >>> import odoorpc >>> session = '%s_session' % DB >>> odoorpc.session.save( ... session, ... {'type': 'ODOO', 'host': HOST, 'protocol': PROTOCOL, ... 'port': PORT, 'timeout': 120, 'database': DB, ... 'user': USER, 'passwd': PWD}) """ conf = ConfigParser() conf.read([os.path.expanduser(rc_file)]) if not conf.has_section(name): conf.add_section(name) for key in data: value = data[key] conf.set(name, key, str(value)) with open(os.path.expanduser(rc_file), 'w') as file_: os.chmod(os.path.expanduser(rc_file), stat.S_IREAD | stat.S_IWRITE) conf.write(file_)
[ "def", "save", "(", "name", ",", "data", ",", "rc_file", "=", "'~/.odoorpcrc'", ")", ":", "conf", "=", "ConfigParser", "(", ")", "conf", ".", "read", "(", "[", "os", ".", "path", ".", "expanduser", "(", "rc_file", ")", "]", ")", "if", "not", "conf", ".", "has_section", "(", "name", ")", ":", "conf", ".", "add_section", "(", "name", ")", "for", "key", "in", "data", ":", "value", "=", "data", "[", "key", "]", "conf", ".", "set", "(", "name", ",", "key", ",", "str", "(", "value", ")", ")", "with", "open", "(", "os", ".", "path", ".", "expanduser", "(", "rc_file", ")", ",", "'w'", ")", "as", "file_", ":", "os", ".", "chmod", "(", "os", ".", "path", ".", "expanduser", "(", "rc_file", ")", ",", "stat", ".", "S_IREAD", "|", "stat", ".", "S_IWRITE", ")", "conf", ".", "write", "(", "file_", ")" ]
35.09375
17.5
def find_first_TP(self): """ Find first TP of the TPAGB phase and returns the model number at its LHe maximum. Parameters ---------- """ star_mass = self.get('star_mass') he_lumi = self.get('log_LHe') h_lumi = self.get('log_LH') mx2_bot = self.get('mx2_bot')*star_mass try: h1_boundary_mass = self.get('h1_boundary_mass') he4_boundary_mass = self.get('he4_boundary_mass') except: try: h1_boundary_mass = self.get('he_core_mass') he4_boundary_mass = self.get('c_core_mass') except: pass TP_bot=np.array(self.get('conv_mx2_bot'))*np.array(self.get('star_mass')) TP_top=np.array(self.get('conv_mx2_top'))*np.array(self.get('star_mass')) lum_array=[] activate=False models=[] pdcz_size=[] for i in range(len(h1_boundary_mass)): if (h1_boundary_mass[i]-he4_boundary_mass[i] <0.2) and (he4_boundary_mass[i]>0.2): if (mx2_bot[i]>he4_boundary_mass[i]) and (he_lumi[i]>h_lumi[i]): if TP_top[i]>he4_boundary_mass[i]: pdcz_size.append(TP_top[i]-TP_bot[i]) activate=True lum_array.append(he_lumi[i]) models.append(i) #print(TP_bot[i],TP_top[i]) if (activate == True) and (he_lumi[i]<h_lumi[i]): #if fake tp if max(pdcz_size)<1e-5: active=False lum_array=[] models=[] print('fake tp') else: break t0_model = models[np.argmax(lum_array)] return t0_model
[ "def", "find_first_TP", "(", "self", ")", ":", "star_mass", "=", "self", ".", "get", "(", "'star_mass'", ")", "he_lumi", "=", "self", ".", "get", "(", "'log_LHe'", ")", "h_lumi", "=", "self", ".", "get", "(", "'log_LH'", ")", "mx2_bot", "=", "self", ".", "get", "(", "'mx2_bot'", ")", "*", "star_mass", "try", ":", "h1_boundary_mass", "=", "self", ".", "get", "(", "'h1_boundary_mass'", ")", "he4_boundary_mass", "=", "self", ".", "get", "(", "'he4_boundary_mass'", ")", "except", ":", "try", ":", "h1_boundary_mass", "=", "self", ".", "get", "(", "'he_core_mass'", ")", "he4_boundary_mass", "=", "self", ".", "get", "(", "'c_core_mass'", ")", "except", ":", "pass", "TP_bot", "=", "np", ".", "array", "(", "self", ".", "get", "(", "'conv_mx2_bot'", ")", ")", "*", "np", ".", "array", "(", "self", ".", "get", "(", "'star_mass'", ")", ")", "TP_top", "=", "np", ".", "array", "(", "self", ".", "get", "(", "'conv_mx2_top'", ")", ")", "*", "np", ".", "array", "(", "self", ".", "get", "(", "'star_mass'", ")", ")", "lum_array", "=", "[", "]", "activate", "=", "False", "models", "=", "[", "]", "pdcz_size", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "h1_boundary_mass", ")", ")", ":", "if", "(", "h1_boundary_mass", "[", "i", "]", "-", "he4_boundary_mass", "[", "i", "]", "<", "0.2", ")", "and", "(", "he4_boundary_mass", "[", "i", "]", ">", "0.2", ")", ":", "if", "(", "mx2_bot", "[", "i", "]", ">", "he4_boundary_mass", "[", "i", "]", ")", "and", "(", "he_lumi", "[", "i", "]", ">", "h_lumi", "[", "i", "]", ")", ":", "if", "TP_top", "[", "i", "]", ">", "he4_boundary_mass", "[", "i", "]", ":", "pdcz_size", ".", "append", "(", "TP_top", "[", "i", "]", "-", "TP_bot", "[", "i", "]", ")", "activate", "=", "True", "lum_array", ".", "append", "(", "he_lumi", "[", "i", "]", ")", "models", ".", "append", "(", "i", ")", "#print(TP_bot[i],TP_top[i])", "if", "(", "activate", "==", "True", ")", "and", "(", "he_lumi", "[", "i", "]", "<", "h_lumi", "[", "i", "]", ")", ":", "#if fake tp", "if", "max", "(", "pdcz_size", ")", "<", "1e-5", ":", "active", "=", "False", "lum_array", "=", "[", "]", "models", "=", "[", "]", "print", "(", "'fake tp'", ")", "else", ":", "break", "t0_model", "=", "models", "[", "np", ".", "argmax", "(", "lum_array", ")", "]", "return", "t0_model" ]
37.596154
19.596154
def roster(self, year): """Returns the roster table for the given year. :year: The year for which we want the roster; defaults to current year. :returns: A DataFrame containing roster information for that year. """ doc = self.get_year_doc('{}_roster'.format(year)) roster_table = doc('table#games_played_team') df = sportsref.utils.parse_table(roster_table) starter_table = doc('table#starters') if not starter_table.empty: start_df = sportsref.utils.parse_table(starter_table) start_df = start_df.dropna(axis=0, subset=['position']) starters = start_df.set_index('position').player_id df['is_starter'] = df.player_id.isin(starters) df['starting_pos'] = df.player_id.map( lambda pid: (starters[starters == pid].index[0] if pid in starters.values else None) ) return df
[ "def", "roster", "(", "self", ",", "year", ")", ":", "doc", "=", "self", ".", "get_year_doc", "(", "'{}_roster'", ".", "format", "(", "year", ")", ")", "roster_table", "=", "doc", "(", "'table#games_played_team'", ")", "df", "=", "sportsref", ".", "utils", ".", "parse_table", "(", "roster_table", ")", "starter_table", "=", "doc", "(", "'table#starters'", ")", "if", "not", "starter_table", ".", "empty", ":", "start_df", "=", "sportsref", ".", "utils", ".", "parse_table", "(", "starter_table", ")", "start_df", "=", "start_df", ".", "dropna", "(", "axis", "=", "0", ",", "subset", "=", "[", "'position'", "]", ")", "starters", "=", "start_df", ".", "set_index", "(", "'position'", ")", ".", "player_id", "df", "[", "'is_starter'", "]", "=", "df", ".", "player_id", ".", "isin", "(", "starters", ")", "df", "[", "'starting_pos'", "]", "=", "df", ".", "player_id", ".", "map", "(", "lambda", "pid", ":", "(", "starters", "[", "starters", "==", "pid", "]", ".", "index", "[", "0", "]", "if", "pid", "in", "starters", ".", "values", "else", "None", ")", ")", "return", "df" ]
47.35
19.25
def __format_filters(filters): """ Format filters for the api query (to filter[<filter-name>]) :param filters: dict: can be None, filters for the query :return: the formatted filters, or None """ if filters is not None: for k in filters: if 'filter[' not in k: filters['filter[{}]'.format(k)] = filters.pop(k) return filters
[ "def", "__format_filters", "(", "filters", ")", ":", "if", "filters", "is", "not", "None", ":", "for", "k", "in", "filters", ":", "if", "'filter['", "not", "in", "k", ":", "filters", "[", "'filter[{}]'", ".", "format", "(", "k", ")", "]", "=", "filters", ".", "pop", "(", "k", ")", "return", "filters" ]
34.833333
14.666667
def data_to_binary(self): """ :return: bytes """ return bytes([ COMMAND_CODE, int(self.temp_type), int(self.temp) ])
[ "def", "data_to_binary", "(", "self", ")", ":", "return", "bytes", "(", "[", "COMMAND_CODE", ",", "int", "(", "self", ".", "temp_type", ")", ",", "int", "(", "self", ".", "temp", ")", "]", ")" ]
20.444444
13.111111
def _serialize_model_helper(self, model, field_dict=None): """ A recursive function for serializing a model into a json ready format. """ field_dict = field_dict or self.dot_field_list_to_dict() if model is None: return None if isinstance(model, Query): model = model.all() if isinstance(model, (list, set)): return [self.serialize_model(m, field_dict=field_dict) for m in model] model_dict = {} for name, sub in six.iteritems(field_dict): value = getattr(model, name) if sub: value = self.serialize_model(value, field_dict=sub) model_dict[name] = value return model_dict
[ "def", "_serialize_model_helper", "(", "self", ",", "model", ",", "field_dict", "=", "None", ")", ":", "field_dict", "=", "field_dict", "or", "self", ".", "dot_field_list_to_dict", "(", ")", "if", "model", "is", "None", ":", "return", "None", "if", "isinstance", "(", "model", ",", "Query", ")", ":", "model", "=", "model", ".", "all", "(", ")", "if", "isinstance", "(", "model", ",", "(", "list", ",", "set", ")", ")", ":", "return", "[", "self", ".", "serialize_model", "(", "m", ",", "field_dict", "=", "field_dict", ")", "for", "m", "in", "model", "]", "model_dict", "=", "{", "}", "for", "name", ",", "sub", "in", "six", ".", "iteritems", "(", "field_dict", ")", ":", "value", "=", "getattr", "(", "model", ",", "name", ")", "if", "sub", ":", "value", "=", "self", ".", "serialize_model", "(", "value", ",", "field_dict", "=", "sub", ")", "model_dict", "[", "name", "]", "=", "value", "return", "model_dict" ]
33.136364
16.590909
def decode(self): "Decode self.buffer, populating instance variables and return self." buflen = len(self.buffer) tftpassert(buflen >= 4, "malformed ERR packet, too short") log.debug("Decoding ERR packet, length %s bytes", buflen) if buflen == 4: log.debug("Allowing this affront to the RFC of a 4-byte packet") fmt = b"!HH" log.debug("Decoding ERR packet with fmt: %s", fmt) self.opcode, self.errorcode = struct.unpack(fmt, self.buffer) else: log.debug("Good ERR packet > 4 bytes") fmt = b"!HH%dsx" % (len(self.buffer) - 5) log.debug("Decoding ERR packet with fmt: %s", fmt) self.opcode, self.errorcode, self.errmsg = struct.unpack(fmt, self.buffer) log.error("ERR packet - errorcode: %d, message: %s" % (self.errorcode, self.errmsg)) return self
[ "def", "decode", "(", "self", ")", ":", "buflen", "=", "len", "(", "self", ".", "buffer", ")", "tftpassert", "(", "buflen", ">=", "4", ",", "\"malformed ERR packet, too short\"", ")", "log", ".", "debug", "(", "\"Decoding ERR packet, length %s bytes\"", ",", "buflen", ")", "if", "buflen", "==", "4", ":", "log", ".", "debug", "(", "\"Allowing this affront to the RFC of a 4-byte packet\"", ")", "fmt", "=", "b\"!HH\"", "log", ".", "debug", "(", "\"Decoding ERR packet with fmt: %s\"", ",", "fmt", ")", "self", ".", "opcode", ",", "self", ".", "errorcode", "=", "struct", ".", "unpack", "(", "fmt", ",", "self", ".", "buffer", ")", "else", ":", "log", ".", "debug", "(", "\"Good ERR packet > 4 bytes\"", ")", "fmt", "=", "b\"!HH%dsx\"", "%", "(", "len", "(", "self", ".", "buffer", ")", "-", "5", ")", "log", ".", "debug", "(", "\"Decoding ERR packet with fmt: %s\"", ",", "fmt", ")", "self", ".", "opcode", ",", "self", ".", "errorcode", ",", "self", ".", "errmsg", "=", "struct", ".", "unpack", "(", "fmt", ",", "self", ".", "buffer", ")", "log", ".", "error", "(", "\"ERR packet - errorcode: %d, message: %s\"", "%", "(", "self", ".", "errorcode", ",", "self", ".", "errmsg", ")", ")", "return", "self" ]
51.65
22.75
def addResource(self, key, filePath, text): """ The add resource operation allows the administrator to add a file resource, for example, the organization's logo or custom banner. The resource can be used by any member of the organization. File resources use storage space from your quota and are scanned for viruses. Inputs: key - The name the resource should be stored under. filePath - path of file to upload text - Some text to be written (for example, JSON or JavaScript) directly to the resource from a web client. """ url = self.root + "/addresource" params = { "f": "json", "token" : self._securityHandler.token, "key" : key, "text" : text } files = {} files['file'] = filePath res = self._post(url=url, param_dict=params, files=files, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port) return res
[ "def", "addResource", "(", "self", ",", "key", ",", "filePath", ",", "text", ")", ":", "url", "=", "self", ".", "root", "+", "\"/addresource\"", "params", "=", "{", "\"f\"", ":", "\"json\"", ",", "\"token\"", ":", "self", ".", "_securityHandler", ".", "token", ",", "\"key\"", ":", "key", ",", "\"text\"", ":", "text", "}", "files", "=", "{", "}", "files", "[", "'file'", "]", "=", "filePath", "res", "=", "self", ".", "_post", "(", "url", "=", "url", ",", "param_dict", "=", "params", ",", "files", "=", "files", ",", "securityHandler", "=", "self", ".", "_securityHandler", ",", "proxy_url", "=", "self", ".", "_proxy_url", ",", "proxy_port", "=", "self", ".", "_proxy_port", ")", "return", "res" ]
37.032258
18.451613
def validate(cls, cpf): u""" Válida o CPF. >>> CPF.validate(58119443659) True >>> CPF.validate(58119443650) False >>> CPF.validate('58119443659') True >>> CPF.validate('581.194.436-59') True """ if cpf is None: return False cpf = CPF.clean(cpf) def mod11(value): return (value % 11) % 10 dig1 = mod11(sum([(i + 1) * int(cpf[i]) for i in range(0, 9)])) dig2 = mod11(sum([i * int(cpf[i]) for i in range(1, 10)])) return cpf[-2:] == '{0}{1}'.format(dig1, dig2)
[ "def", "validate", "(", "cls", ",", "cpf", ")", ":", "if", "cpf", "is", "None", ":", "return", "False", "cpf", "=", "CPF", ".", "clean", "(", "cpf", ")", "def", "mod11", "(", "value", ")", ":", "return", "(", "value", "%", "11", ")", "%", "10", "dig1", "=", "mod11", "(", "sum", "(", "[", "(", "i", "+", "1", ")", "*", "int", "(", "cpf", "[", "i", "]", ")", "for", "i", "in", "range", "(", "0", ",", "9", ")", "]", ")", ")", "dig2", "=", "mod11", "(", "sum", "(", "[", "i", "*", "int", "(", "cpf", "[", "i", "]", ")", "for", "i", "in", "range", "(", "1", ",", "10", ")", "]", ")", ")", "return", "cpf", "[", "-", "2", ":", "]", "==", "'{0}{1}'", ".", "format", "(", "dig1", ",", "dig2", ")" ]
23.92
19.64
def parse_arg(arg): """ Parses arguments for convenience. Argument can be a csv list ('a,b,c'), a string, a list, a tuple. Returns a list. """ # handle string input if type(arg) == str: arg = arg.strip() # parse csv as tickers and create children if ',' in arg: arg = arg.split(',') arg = [x.strip() for x in arg] # assume single string - create single item list else: arg = [arg] return arg
[ "def", "parse_arg", "(", "arg", ")", ":", "# handle string input", "if", "type", "(", "arg", ")", "==", "str", ":", "arg", "=", "arg", ".", "strip", "(", ")", "# parse csv as tickers and create children", "if", "','", "in", "arg", ":", "arg", "=", "arg", ".", "split", "(", "','", ")", "arg", "=", "[", "x", ".", "strip", "(", ")", "for", "x", "in", "arg", "]", "# assume single string - create single item list", "else", ":", "arg", "=", "[", "arg", "]", "return", "arg" ]
25.421053
16.684211
def _view_clear_queue(self, queue_type, queue_id): """remove queueu from SharQ based on the queue_type and queue_id.""" response = { 'status': 'failure' } try: request_data = json.loads(request.data) except Exception, e: response['message'] = e.message return jsonify(**response), 400 request_data.update({ 'queue_type': queue_type, 'queue_id': queue_id }) try: response = self.sq.clear_queue(**request_data) except Exception, e: response['message'] = e.message return jsonify(**response), 400 return jsonify(**response)
[ "def", "_view_clear_queue", "(", "self", ",", "queue_type", ",", "queue_id", ")", ":", "response", "=", "{", "'status'", ":", "'failure'", "}", "try", ":", "request_data", "=", "json", ".", "loads", "(", "request", ".", "data", ")", "except", "Exception", ",", "e", ":", "response", "[", "'message'", "]", "=", "e", ".", "message", "return", "jsonify", "(", "*", "*", "response", ")", ",", "400", "request_data", ".", "update", "(", "{", "'queue_type'", ":", "queue_type", ",", "'queue_id'", ":", "queue_id", "}", ")", "try", ":", "response", "=", "self", ".", "sq", ".", "clear_queue", "(", "*", "*", "request_data", ")", "except", "Exception", ",", "e", ":", "response", "[", "'message'", "]", "=", "e", ".", "message", "return", "jsonify", "(", "*", "*", "response", ")", ",", "400", "return", "jsonify", "(", "*", "*", "response", ")" ]
31.681818
14.590909
def datetime_f(dttm): """Formats datetime to take less room when it is recent""" if dttm: dttm = dttm.isoformat() now_iso = datetime.now().isoformat() if now_iso[:10] == dttm[:10]: dttm = dttm[11:] elif now_iso[:4] == dttm[:4]: dttm = dttm[5:] return '<nobr>{}</nobr>'.format(dttm)
[ "def", "datetime_f", "(", "dttm", ")", ":", "if", "dttm", ":", "dttm", "=", "dttm", ".", "isoformat", "(", ")", "now_iso", "=", "datetime", ".", "now", "(", ")", ".", "isoformat", "(", ")", "if", "now_iso", "[", ":", "10", "]", "==", "dttm", "[", ":", "10", "]", ":", "dttm", "=", "dttm", "[", "11", ":", "]", "elif", "now_iso", "[", ":", "4", "]", "==", "dttm", "[", ":", "4", "]", ":", "dttm", "=", "dttm", "[", "5", ":", "]", "return", "'<nobr>{}</nobr>'", ".", "format", "(", "dttm", ")" ]
34
9.2
def _merge_simple_selectors(a, b): """Merge two simple selectors, for the purposes of the LCS algorithm below. In practice this returns the more specific selector if one is a subset of the other, else it returns None. """ # TODO what about combinators if a.is_superset_of(b): return b elif b.is_superset_of(a): return a else: return None
[ "def", "_merge_simple_selectors", "(", "a", ",", "b", ")", ":", "# TODO what about combinators", "if", "a", ".", "is_superset_of", "(", "b", ")", ":", "return", "b", "elif", "b", ".", "is_superset_of", "(", "a", ")", ":", "return", "a", "else", ":", "return", "None" ]
29.384615
16.769231
def _prune_hit(hit, model): """ Check whether a document should be pruned. This method uses the SearchDocumentManagerMixin.in_search_queryset method to determine whether a 'hit' (search document) should be pruned from an index, and if so it returns the hit as a Django object(id=hit_id). Args: hit: dict object the represents a document as returned from the scan_index function. (Contains object id and index.) model: the Django model (not object) from which the document was derived. Used to get the correct model manager and bulk action. Returns: an object of type model, with id=hit_id. NB this is not the object itself, which by definition may not exist in the underlying database, but a temporary object with the document id - which is enough to create a 'delete' action. """ hit_id = hit["_id"] hit_index = hit["_index"] if model.objects.in_search_queryset(hit_id, index=hit_index): logger.debug( "%s with id=%s exists in the '%s' index queryset.", model, hit_id, hit_index ) return None else: logger.debug( "%s with id=%s does not exist in the '%s' index queryset and will be pruned.", model, hit_id, hit_index, ) # we don't need the full obj for a delete action, just the id. # (the object itself may not even exist.) return model(pk=hit_id)
[ "def", "_prune_hit", "(", "hit", ",", "model", ")", ":", "hit_id", "=", "hit", "[", "\"_id\"", "]", "hit_index", "=", "hit", "[", "\"_index\"", "]", "if", "model", ".", "objects", ".", "in_search_queryset", "(", "hit_id", ",", "index", "=", "hit_index", ")", ":", "logger", ".", "debug", "(", "\"%s with id=%s exists in the '%s' index queryset.\"", ",", "model", ",", "hit_id", ",", "hit_index", ")", "return", "None", "else", ":", "logger", ".", "debug", "(", "\"%s with id=%s does not exist in the '%s' index queryset and will be pruned.\"", ",", "model", ",", "hit_id", ",", "hit_index", ",", ")", "# we don't need the full obj for a delete action, just the id.", "# (the object itself may not even exist.)", "return", "model", "(", "pk", "=", "hit_id", ")" ]
38.421053
26.263158
def callgrind(self, out, filename=None, commandline=None, relative_path=False): """ Dump statistics in callgrind format. Contains: - per-line hit count, time and time-per-hit - call associations (call tree) Note: hit count is not inclusive, in that it is not the sum of all hits inside that call. Time unit: microsecond (1e-6 second). out (file-ish opened for writing) Destination of callgrind profiling data. filename (str, collection of str) If provided, dump stats for given source file(s) only. By default, list for all known files. commandline (anything with __str__) If provided, will be output as the command line used to generate this profiling data. relative_path (bool) When True, absolute elements are stripped from path. Useful when maintaining several copies of source trees with their own profiling result, so kcachegrind does not look in system-wide files which may not match with profiled code. """ print(u'# callgrind format', file=out) print(u'version: 1', file=out) print(u'creator: pprofile', file=out) print(u'event: usphit :microseconds/hit', file=out) print(u'events: hits microseconds usphit', file=out) if commandline is not None: print(u'cmd:', commandline, file=out) file_dict = self._mergeFileTiming() if relative_path: convertPath = _relpath else: convertPath = lambda x: x if os.path.sep != "/": # qCacheGrind (windows build) needs at least one UNIX separator # in path to find the file. Adapt here even if this is probably # more of a qCacheGrind issue... convertPath = lambda x, cascade=convertPath: cascade( '/'.join(x.split(os.path.sep)) ) code_to_name_dict = {} homonym_counter = {} def getCodeName(filename, code): # Tracks code objects globally, because callee information needs # to be consistent accross files. # Inside a file, grants unique names to each code object. try: return code_to_name_dict[code] except KeyError: name = code.co_name + ':%i' % code.co_firstlineno key = (filename, name) homonym_count = homonym_counter.get(key, 0) if homonym_count: name += '_%i' % homonym_count homonym_counter[key] = homonym_count + 1 code_to_name_dict[code] = name return name for current_file in self._getFileNameList(filename, may_sort=False): file_timing = file_dict[current_file] print(u'fl=%s' % convertPath(current_file), file=out) # When a local callable is created an immediately executed, this # loop would start a new "fn=" section but would not end it before # emitting "cfn=" lines, making the callee appear as not being # called by interrupted "fn=" section. # So dispatch all functions in a first pass, and build # uninterrupted sections in a second pass. # Note: cost line is a list just to be mutable. A single item is # expected. func_dict = defaultdict(lambda: defaultdict(lambda: ([], []))) for lineno, code, hits, duration in file_timing.iterHits(): func_dict[getCodeName(current_file, code)][lineno][0].append( (hits, int(duration * 1000000)), ) for ( lineno, caller, call_hits, call_duration, callee_file, callee, ) in file_timing.iterCalls(): call_ticks = int(call_duration * 1000000) func_call_list = func_dict[ getCodeName(current_file, caller) ][lineno][1] append = func_call_list.append append(u'cfl=' + convertPath(callee_file)) append(u'cfn=' + getCodeName(callee_file, callee)) append(u'calls=%i %i' % (call_hits, callee.co_firstlineno)) append(u'%i %i %i %i' % (lineno, call_hits, call_ticks, call_ticks // call_hits)) for func_name, line_dict in func_dict.iteritems(): print(u'fn=%s' % func_name, file=out) for lineno, (func_hit_list, func_call_list) in sorted(line_dict.iteritems()): if func_hit_list: # Multiple function objects may "reside" on the same # line of the same file (same global dict). # Sum these up and produce a single cachegrind event. hits = sum(x for x, _ in func_hit_list) ticks = sum(x for _, x in func_hit_list) print( u'%i %i %i %i' % ( lineno, hits, ticks, ticks // hits, ), file=out, ) for line in func_call_list: print(line, file=out)
[ "def", "callgrind", "(", "self", ",", "out", ",", "filename", "=", "None", ",", "commandline", "=", "None", ",", "relative_path", "=", "False", ")", ":", "print", "(", "u'# callgrind format'", ",", "file", "=", "out", ")", "print", "(", "u'version: 1'", ",", "file", "=", "out", ")", "print", "(", "u'creator: pprofile'", ",", "file", "=", "out", ")", "print", "(", "u'event: usphit :microseconds/hit'", ",", "file", "=", "out", ")", "print", "(", "u'events: hits microseconds usphit'", ",", "file", "=", "out", ")", "if", "commandline", "is", "not", "None", ":", "print", "(", "u'cmd:'", ",", "commandline", ",", "file", "=", "out", ")", "file_dict", "=", "self", ".", "_mergeFileTiming", "(", ")", "if", "relative_path", ":", "convertPath", "=", "_relpath", "else", ":", "convertPath", "=", "lambda", "x", ":", "x", "if", "os", ".", "path", ".", "sep", "!=", "\"/\"", ":", "# qCacheGrind (windows build) needs at least one UNIX separator", "# in path to find the file. Adapt here even if this is probably", "# more of a qCacheGrind issue...", "convertPath", "=", "lambda", "x", ",", "cascade", "=", "convertPath", ":", "cascade", "(", "'/'", ".", "join", "(", "x", ".", "split", "(", "os", ".", "path", ".", "sep", ")", ")", ")", "code_to_name_dict", "=", "{", "}", "homonym_counter", "=", "{", "}", "def", "getCodeName", "(", "filename", ",", "code", ")", ":", "# Tracks code objects globally, because callee information needs", "# to be consistent accross files.", "# Inside a file, grants unique names to each code object.", "try", ":", "return", "code_to_name_dict", "[", "code", "]", "except", "KeyError", ":", "name", "=", "code", ".", "co_name", "+", "':%i'", "%", "code", ".", "co_firstlineno", "key", "=", "(", "filename", ",", "name", ")", "homonym_count", "=", "homonym_counter", ".", "get", "(", "key", ",", "0", ")", "if", "homonym_count", ":", "name", "+=", "'_%i'", "%", "homonym_count", "homonym_counter", "[", "key", "]", "=", "homonym_count", "+", "1", "code_to_name_dict", "[", "code", "]", "=", "name", "return", "name", "for", "current_file", "in", "self", ".", "_getFileNameList", "(", "filename", ",", "may_sort", "=", "False", ")", ":", "file_timing", "=", "file_dict", "[", "current_file", "]", "print", "(", "u'fl=%s'", "%", "convertPath", "(", "current_file", ")", ",", "file", "=", "out", ")", "# When a local callable is created an immediately executed, this", "# loop would start a new \"fn=\" section but would not end it before", "# emitting \"cfn=\" lines, making the callee appear as not being", "# called by interrupted \"fn=\" section.", "# So dispatch all functions in a first pass, and build", "# uninterrupted sections in a second pass.", "# Note: cost line is a list just to be mutable. A single item is", "# expected.", "func_dict", "=", "defaultdict", "(", "lambda", ":", "defaultdict", "(", "lambda", ":", "(", "[", "]", ",", "[", "]", ")", ")", ")", "for", "lineno", ",", "code", ",", "hits", ",", "duration", "in", "file_timing", ".", "iterHits", "(", ")", ":", "func_dict", "[", "getCodeName", "(", "current_file", ",", "code", ")", "]", "[", "lineno", "]", "[", "0", "]", ".", "append", "(", "(", "hits", ",", "int", "(", "duration", "*", "1000000", ")", ")", ",", ")", "for", "(", "lineno", ",", "caller", ",", "call_hits", ",", "call_duration", ",", "callee_file", ",", "callee", ",", ")", "in", "file_timing", ".", "iterCalls", "(", ")", ":", "call_ticks", "=", "int", "(", "call_duration", "*", "1000000", ")", "func_call_list", "=", "func_dict", "[", "getCodeName", "(", "current_file", ",", "caller", ")", "]", "[", "lineno", "]", "[", "1", "]", "append", "=", "func_call_list", ".", "append", "append", "(", "u'cfl='", "+", "convertPath", "(", "callee_file", ")", ")", "append", "(", "u'cfn='", "+", "getCodeName", "(", "callee_file", ",", "callee", ")", ")", "append", "(", "u'calls=%i %i'", "%", "(", "call_hits", ",", "callee", ".", "co_firstlineno", ")", ")", "append", "(", "u'%i %i %i %i'", "%", "(", "lineno", ",", "call_hits", ",", "call_ticks", ",", "call_ticks", "//", "call_hits", ")", ")", "for", "func_name", ",", "line_dict", "in", "func_dict", ".", "iteritems", "(", ")", ":", "print", "(", "u'fn=%s'", "%", "func_name", ",", "file", "=", "out", ")", "for", "lineno", ",", "(", "func_hit_list", ",", "func_call_list", ")", "in", "sorted", "(", "line_dict", ".", "iteritems", "(", ")", ")", ":", "if", "func_hit_list", ":", "# Multiple function objects may \"reside\" on the same", "# line of the same file (same global dict).", "# Sum these up and produce a single cachegrind event.", "hits", "=", "sum", "(", "x", "for", "x", ",", "_", "in", "func_hit_list", ")", "ticks", "=", "sum", "(", "x", "for", "_", ",", "x", "in", "func_hit_list", ")", "print", "(", "u'%i %i %i %i'", "%", "(", "lineno", ",", "hits", ",", "ticks", ",", "ticks", "//", "hits", ",", ")", ",", "file", "=", "out", ",", ")", "for", "line", "in", "func_call_list", ":", "print", "(", "line", ",", "file", "=", "out", ")" ]
48.954545
16.518182
def assertion(func): """Extend sure with a custom assertion method.""" func = assertionmethod(func) setattr(AssertionBuilder, func.__name__, func) return func
[ "def", "assertion", "(", "func", ")", ":", "func", "=", "assertionmethod", "(", "func", ")", "setattr", "(", "AssertionBuilder", ",", "func", ".", "__name__", ",", "func", ")", "return", "func" ]
34
12.6
def collect_info(self): """ Collect info about the connection and fill the info dictionary. """ try: info = {} res = self._send_request('GET', "/") info['server'] = {} info['server']['name'] = res['name'] info['server']['version'] = res['version'] info['allinfo'] = res info['status'] = self.cluster.status() info['aliases'] = self.indices.aliases() self.info = info return True except: self.info = {} return False
[ "def", "collect_info", "(", "self", ")", ":", "try", ":", "info", "=", "{", "}", "res", "=", "self", ".", "_send_request", "(", "'GET'", ",", "\"/\"", ")", "info", "[", "'server'", "]", "=", "{", "}", "info", "[", "'server'", "]", "[", "'name'", "]", "=", "res", "[", "'name'", "]", "info", "[", "'server'", "]", "[", "'version'", "]", "=", "res", "[", "'version'", "]", "info", "[", "'allinfo'", "]", "=", "res", "info", "[", "'status'", "]", "=", "self", ".", "cluster", ".", "status", "(", ")", "info", "[", "'aliases'", "]", "=", "self", ".", "indices", ".", "aliases", "(", ")", "self", ".", "info", "=", "info", "return", "True", "except", ":", "self", ".", "info", "=", "{", "}", "return", "False" ]
32.277778
13.722222
def get_interpolated(self, target, extent): """ Return a new vector that has been moved towards the given target by the given extent. The extent should be between 0 and 1. """ result = self.copy() result.interpolate(target, extent) return result
[ "def", "get_interpolated", "(", "self", ",", "target", ",", "extent", ")", ":", "result", "=", "self", ".", "copy", "(", ")", "result", ".", "interpolate", "(", "target", ",", "extent", ")", "return", "result" ]
47
6
def _ReadPropertySet(self, property_set): """Reads properties from a property set. Args: property_set (pyolecf.property_set): OLECF property set. """ # Combine the values of multiple property sections # but do not override properties that are already set. for property_section in property_set.sections: if property_section.class_identifier != self._CLASS_IDENTIFIER: continue for property_value in property_section.properties: property_name = self._PROPERTY_NAMES.get( property_value.identifier, None) if not property_name: property_name = '0x{0:04}'.format(property_value.identifier) value = self._GetValueAsObject(property_value) if self._PROPERTY_VALUE_MAPPINGS: value_callback_name = self._PROPERTY_VALUE_MAPPINGS.get( property_name, None) if value_callback_name: value_callback_method = getattr(self, value_callback_name, None) if value_callback_method: value = value_callback_method(value) if property_name in self._DATE_TIME_PROPERTIES: properties_dict = self.date_time_properties value = dfdatetime_filetime.Filetime(timestamp=value) else: properties_dict = self._properties if property_name not in properties_dict: properties_dict[property_name] = value
[ "def", "_ReadPropertySet", "(", "self", ",", "property_set", ")", ":", "# Combine the values of multiple property sections", "# but do not override properties that are already set.", "for", "property_section", "in", "property_set", ".", "sections", ":", "if", "property_section", ".", "class_identifier", "!=", "self", ".", "_CLASS_IDENTIFIER", ":", "continue", "for", "property_value", "in", "property_section", ".", "properties", ":", "property_name", "=", "self", ".", "_PROPERTY_NAMES", ".", "get", "(", "property_value", ".", "identifier", ",", "None", ")", "if", "not", "property_name", ":", "property_name", "=", "'0x{0:04}'", ".", "format", "(", "property_value", ".", "identifier", ")", "value", "=", "self", ".", "_GetValueAsObject", "(", "property_value", ")", "if", "self", ".", "_PROPERTY_VALUE_MAPPINGS", ":", "value_callback_name", "=", "self", ".", "_PROPERTY_VALUE_MAPPINGS", ".", "get", "(", "property_name", ",", "None", ")", "if", "value_callback_name", ":", "value_callback_method", "=", "getattr", "(", "self", ",", "value_callback_name", ",", "None", ")", "if", "value_callback_method", ":", "value", "=", "value_callback_method", "(", "value", ")", "if", "property_name", "in", "self", ".", "_DATE_TIME_PROPERTIES", ":", "properties_dict", "=", "self", ".", "date_time_properties", "value", "=", "dfdatetime_filetime", ".", "Filetime", "(", "timestamp", "=", "value", ")", "else", ":", "properties_dict", "=", "self", ".", "_properties", "if", "property_name", "not", "in", "properties_dict", ":", "properties_dict", "[", "property_name", "]", "=", "value" ]
39.228571
17.714286
def from_thread(cls,thread): """Convert a vanilla thread object into an instance of this class. This method "upgrades" a vanilla thread object to an instance of this extended class. You might need to call this if you obtain a reference to a thread by some means other than (a) creating it, or (b) from the methods of the threading2 module. """ new_classes = [] for new_cls in cls.__mro__: if new_cls not in thread.__class__.__mro__: new_classes.append(new_cls) if isinstance(thread,cls): pass elif issubclass(cls,thread.__class__): thread.__class__ = cls else: class UpgradedThread(thread.__class__,cls): pass thread.__class__ = UpgradedThread for new_cls in new_classes: if hasattr(new_cls,"_upgrade_thread"): new_cls._upgrade_thread(thread) return thread
[ "def", "from_thread", "(", "cls", ",", "thread", ")", ":", "new_classes", "=", "[", "]", "for", "new_cls", "in", "cls", ".", "__mro__", ":", "if", "new_cls", "not", "in", "thread", ".", "__class__", ".", "__mro__", ":", "new_classes", ".", "append", "(", "new_cls", ")", "if", "isinstance", "(", "thread", ",", "cls", ")", ":", "pass", "elif", "issubclass", "(", "cls", ",", "thread", ".", "__class__", ")", ":", "thread", ".", "__class__", "=", "cls", "else", ":", "class", "UpgradedThread", "(", "thread", ".", "__class__", ",", "cls", ")", ":", "pass", "thread", ".", "__class__", "=", "UpgradedThread", "for", "new_cls", "in", "new_classes", ":", "if", "hasattr", "(", "new_cls", ",", "\"_upgrade_thread\"", ")", ":", "new_cls", ".", "_upgrade_thread", "(", "thread", ")", "return", "thread" ]
40
14.791667
def set_shaders(self, vert, frag): """ This function takes care of setting the shading code and compiling+linking it into a working program object that is ready to use. """ self._linked = False # Create temporary shader objects vert_handle = gl.glCreateShader(gl.GL_VERTEX_SHADER) frag_handle = gl.glCreateShader(gl.GL_FRAGMENT_SHADER) # For both vertex and fragment shader: set source, compile, check for code, handle, type_ in [(vert, vert_handle, 'vertex'), (frag, frag_handle, 'fragment')]: gl.glShaderSource(handle, code) gl.glCompileShader(handle) status = gl.glGetShaderParameter(handle, gl.GL_COMPILE_STATUS) if not status: errors = gl.glGetShaderInfoLog(handle) errormsg = self._get_error(code, errors, 4) raise RuntimeError("Shader compilation error in %s:\n%s" % (type_ + ' shader', errormsg)) # Attach shaders gl.glAttachShader(self._handle, vert_handle) gl.glAttachShader(self._handle, frag_handle) # Link the program and check gl.glLinkProgram(self._handle) if not gl.glGetProgramParameter(self._handle, gl.GL_LINK_STATUS): raise RuntimeError('Program linking error:\n%s' % gl.glGetProgramInfoLog(self._handle)) # Now we can remove the shaders. We no longer need them and it # frees up precious GPU memory: # http://gamedev.stackexchange.com/questions/47910 gl.glDetachShader(self._handle, vert_handle) gl.glDetachShader(self._handle, frag_handle) gl.glDeleteShader(vert_handle) gl.glDeleteShader(frag_handle) # Now we know what variables will be used by the program self._unset_variables = self._get_active_attributes_and_uniforms() self._handles = {} self._known_invalid = set() self._linked = True
[ "def", "set_shaders", "(", "self", ",", "vert", ",", "frag", ")", ":", "self", ".", "_linked", "=", "False", "# Create temporary shader objects", "vert_handle", "=", "gl", ".", "glCreateShader", "(", "gl", ".", "GL_VERTEX_SHADER", ")", "frag_handle", "=", "gl", ".", "glCreateShader", "(", "gl", ".", "GL_FRAGMENT_SHADER", ")", "# For both vertex and fragment shader: set source, compile, check", "for", "code", ",", "handle", ",", "type_", "in", "[", "(", "vert", ",", "vert_handle", ",", "'vertex'", ")", ",", "(", "frag", ",", "frag_handle", ",", "'fragment'", ")", "]", ":", "gl", ".", "glShaderSource", "(", "handle", ",", "code", ")", "gl", ".", "glCompileShader", "(", "handle", ")", "status", "=", "gl", ".", "glGetShaderParameter", "(", "handle", ",", "gl", ".", "GL_COMPILE_STATUS", ")", "if", "not", "status", ":", "errors", "=", "gl", ".", "glGetShaderInfoLog", "(", "handle", ")", "errormsg", "=", "self", ".", "_get_error", "(", "code", ",", "errors", ",", "4", ")", "raise", "RuntimeError", "(", "\"Shader compilation error in %s:\\n%s\"", "%", "(", "type_", "+", "' shader'", ",", "errormsg", ")", ")", "# Attach shaders", "gl", ".", "glAttachShader", "(", "self", ".", "_handle", ",", "vert_handle", ")", "gl", ".", "glAttachShader", "(", "self", ".", "_handle", ",", "frag_handle", ")", "# Link the program and check", "gl", ".", "glLinkProgram", "(", "self", ".", "_handle", ")", "if", "not", "gl", ".", "glGetProgramParameter", "(", "self", ".", "_handle", ",", "gl", ".", "GL_LINK_STATUS", ")", ":", "raise", "RuntimeError", "(", "'Program linking error:\\n%s'", "%", "gl", ".", "glGetProgramInfoLog", "(", "self", ".", "_handle", ")", ")", "# Now we can remove the shaders. We no longer need them and it", "# frees up precious GPU memory:", "# http://gamedev.stackexchange.com/questions/47910", "gl", ".", "glDetachShader", "(", "self", ".", "_handle", ",", "vert_handle", ")", "gl", ".", "glDetachShader", "(", "self", ".", "_handle", ",", "frag_handle", ")", "gl", ".", "glDeleteShader", "(", "vert_handle", ")", "gl", ".", "glDeleteShader", "(", "frag_handle", ")", "# Now we know what variables will be used by the program", "self", ".", "_unset_variables", "=", "self", ".", "_get_active_attributes_and_uniforms", "(", ")", "self", ".", "_handles", "=", "{", "}", "self", ".", "_known_invalid", "=", "set", "(", ")", "self", ".", "_linked", "=", "True" ]
50.225
16.15
def get_percentage_lattice_parameter_changes(self): """ Returns the percentage lattice parameter changes. Returns: A dict of the percentage change in lattice parameter, e.g., {'a': 0.012, 'b': 0.021, 'c': -0.031} implies a change of 1.2%, 2.1% and -3.1% in the a, b and c lattice parameters respectively. """ initial_latt = self.initial.lattice final_latt = self.final.lattice d = {l: getattr(final_latt, l) / getattr(initial_latt, l) - 1 for l in ["a", "b", "c"]} return d
[ "def", "get_percentage_lattice_parameter_changes", "(", "self", ")", ":", "initial_latt", "=", "self", ".", "initial", ".", "lattice", "final_latt", "=", "self", ".", "final", ".", "lattice", "d", "=", "{", "l", ":", "getattr", "(", "final_latt", ",", "l", ")", "/", "getattr", "(", "initial_latt", ",", "l", ")", "-", "1", "for", "l", "in", "[", "\"a\"", ",", "\"b\"", ",", "\"c\"", "]", "}", "return", "d" ]
41
18.142857
def response_to_dict(self): """This method helps in returning the output JSON data from the URL and also it helps in converting the XML output/response (string) to a JSON object Returns: data (dict): JSON data from the output/response """ try: return json.loads(self.incidents_data.text) except Exception: return json.loads(json.dumps(xmltodict.parse( self.incidents_data.text)))
[ "def", "response_to_dict", "(", "self", ")", ":", "try", ":", "return", "json", ".", "loads", "(", "self", ".", "incidents_data", ".", "text", ")", "except", "Exception", ":", "return", "json", ".", "loads", "(", "json", ".", "dumps", "(", "xmltodict", ".", "parse", "(", "self", ".", "incidents_data", ".", "text", ")", ")", ")" ]
36.615385
17.846154
def add_data(self, minimum_address, maximum_address, data, overwrite): """Add given data to this segment. The added data must be adjacent to the current segment data, otherwise an exception is thrown. """ if minimum_address == self.maximum_address: self.maximum_address = maximum_address self.data += data elif maximum_address == self.minimum_address: self.minimum_address = minimum_address self.data = data + self.data elif (overwrite and minimum_address < self.maximum_address and maximum_address > self.minimum_address): self_data_offset = minimum_address - self.minimum_address # Prepend data. if self_data_offset < 0: self_data_offset *= -1 self.data = data[:self_data_offset] + self.data del data[:self_data_offset] self.minimum_address = minimum_address # Overwrite overlapping part. self_data_left = len(self.data) - self_data_offset if len(data) <= self_data_left: self.data[self_data_offset:self_data_offset + len(data)] = data data = bytearray() else: self.data[self_data_offset:] = data[:self_data_left] data = data[self_data_left:] # Append data. if len(data) > 0: self.data += data self.maximum_address = maximum_address else: raise AddDataError( 'data added to a segment must be adjacent to or overlapping ' 'with the original segment data')
[ "def", "add_data", "(", "self", ",", "minimum_address", ",", "maximum_address", ",", "data", ",", "overwrite", ")", ":", "if", "minimum_address", "==", "self", ".", "maximum_address", ":", "self", ".", "maximum_address", "=", "maximum_address", "self", ".", "data", "+=", "data", "elif", "maximum_address", "==", "self", ".", "minimum_address", ":", "self", ".", "minimum_address", "=", "minimum_address", "self", ".", "data", "=", "data", "+", "self", ".", "data", "elif", "(", "overwrite", "and", "minimum_address", "<", "self", ".", "maximum_address", "and", "maximum_address", ">", "self", ".", "minimum_address", ")", ":", "self_data_offset", "=", "minimum_address", "-", "self", ".", "minimum_address", "# Prepend data.", "if", "self_data_offset", "<", "0", ":", "self_data_offset", "*=", "-", "1", "self", ".", "data", "=", "data", "[", ":", "self_data_offset", "]", "+", "self", ".", "data", "del", "data", "[", ":", "self_data_offset", "]", "self", ".", "minimum_address", "=", "minimum_address", "# Overwrite overlapping part.", "self_data_left", "=", "len", "(", "self", ".", "data", ")", "-", "self_data_offset", "if", "len", "(", "data", ")", "<=", "self_data_left", ":", "self", ".", "data", "[", "self_data_offset", ":", "self_data_offset", "+", "len", "(", "data", ")", "]", "=", "data", "data", "=", "bytearray", "(", ")", "else", ":", "self", ".", "data", "[", "self_data_offset", ":", "]", "=", "data", "[", ":", "self_data_left", "]", "data", "=", "data", "[", "self_data_left", ":", "]", "# Append data.", "if", "len", "(", "data", ")", ">", "0", ":", "self", ".", "data", "+=", "data", "self", ".", "maximum_address", "=", "maximum_address", "else", ":", "raise", "AddDataError", "(", "'data added to a segment must be adjacent to or overlapping '", "'with the original segment data'", ")" ]
39.642857
17.738095
def calculate_rate(phone_number, address_country_code=None, address_exception=None): """ Calculates the VAT rate based on a telephone number :param phone_number: The string phone number, in international format with leading + :param address_country_code: The user's country_code, as detected from billing_address or declared_residence. This prevents an UndefinitiveError from being raised. :param address_exception: The user's exception name, as detected from billing_address or declared_residence. This prevents an UndefinitiveError from being raised. :raises: ValueError - error with phone number provided UndefinitiveError - when no address_country_code and address_exception are provided and the phone number area code matching isn't specific enough :return: A tuple of (Decimal percentage rate, country code, exception name [or None]) """ if not phone_number: raise ValueError('No phone number provided') if not isinstance(phone_number, str_cls): raise ValueError('Phone number is not a string') phone_number = phone_number.strip() phone_number = re.sub('[^+0-9]', '', phone_number) if not phone_number or phone_number[0] != '+': raise ValueError('Phone number is not in international format with a leading +') phone_number = phone_number[1:] if not phone_number: raise ValueError('Phone number does not appear to contain any digits') country_code = _lookup_country_code(phone_number) if not country_code: raise ValueError('Phone number does not appear to be a valid international phone number') if country_code in CALLING_CODE_EXCEPTIONS: for info in CALLING_CODE_EXCEPTIONS[country_code]: if not re.match(info['regex'], phone_number): continue mapped_country = info['country_code'] mapped_name = info['name'] if not info['definitive']: if address_country_code is None: raise UndefinitiveError('It is not possible to determine the users VAT rates based on the information provided') if address_country_code != mapped_country: continue if address_exception != info['name']: continue rate = rates.BY_COUNTRY[mapped_country]['exceptions'][mapped_name] return (rate, mapped_country, mapped_name) if country_code not in rates.BY_COUNTRY: return (Decimal('0.0'), country_code, None) return (rates.BY_COUNTRY[country_code]['rate'], country_code, None)
[ "def", "calculate_rate", "(", "phone_number", ",", "address_country_code", "=", "None", ",", "address_exception", "=", "None", ")", ":", "if", "not", "phone_number", ":", "raise", "ValueError", "(", "'No phone number provided'", ")", "if", "not", "isinstance", "(", "phone_number", ",", "str_cls", ")", ":", "raise", "ValueError", "(", "'Phone number is not a string'", ")", "phone_number", "=", "phone_number", ".", "strip", "(", ")", "phone_number", "=", "re", ".", "sub", "(", "'[^+0-9]'", ",", "''", ",", "phone_number", ")", "if", "not", "phone_number", "or", "phone_number", "[", "0", "]", "!=", "'+'", ":", "raise", "ValueError", "(", "'Phone number is not in international format with a leading +'", ")", "phone_number", "=", "phone_number", "[", "1", ":", "]", "if", "not", "phone_number", ":", "raise", "ValueError", "(", "'Phone number does not appear to contain any digits'", ")", "country_code", "=", "_lookup_country_code", "(", "phone_number", ")", "if", "not", "country_code", ":", "raise", "ValueError", "(", "'Phone number does not appear to be a valid international phone number'", ")", "if", "country_code", "in", "CALLING_CODE_EXCEPTIONS", ":", "for", "info", "in", "CALLING_CODE_EXCEPTIONS", "[", "country_code", "]", ":", "if", "not", "re", ".", "match", "(", "info", "[", "'regex'", "]", ",", "phone_number", ")", ":", "continue", "mapped_country", "=", "info", "[", "'country_code'", "]", "mapped_name", "=", "info", "[", "'name'", "]", "if", "not", "info", "[", "'definitive'", "]", ":", "if", "address_country_code", "is", "None", ":", "raise", "UndefinitiveError", "(", "'It is not possible to determine the users VAT rates based on the information provided'", ")", "if", "address_country_code", "!=", "mapped_country", ":", "continue", "if", "address_exception", "!=", "info", "[", "'name'", "]", ":", "continue", "rate", "=", "rates", ".", "BY_COUNTRY", "[", "mapped_country", "]", "[", "'exceptions'", "]", "[", "mapped_name", "]", "return", "(", "rate", ",", "mapped_country", ",", "mapped_name", ")", "if", "country_code", "not", "in", "rates", ".", "BY_COUNTRY", ":", "return", "(", "Decimal", "(", "'0.0'", ")", ",", "country_code", ",", "None", ")", "return", "(", "rates", ".", "BY_COUNTRY", "[", "country_code", "]", "[", "'rate'", "]", ",", "country_code", ",", "None", ")" ]
36.788732
26.985915
def name(self): """ Return the String assosciated with the tag name """ if self.m_name == -1 or (self.m_event != START_TAG and self.m_event != END_TAG): return '' return self.sb[self.m_name]
[ "def", "name", "(", "self", ")", ":", "if", "self", ".", "m_name", "==", "-", "1", "or", "(", "self", ".", "m_event", "!=", "START_TAG", "and", "self", ".", "m_event", "!=", "END_TAG", ")", ":", "return", "''", "return", "self", ".", "sb", "[", "self", ".", "m_name", "]" ]
29.5
19
def handle_response(self, msg, address): """Deal with incoming response packets. All answers are held in the cache, and listeners are notified.""" now = current_time_millis() sigs = [] precache = [] for record in msg.answers: if isinstance(record, DNSSignature): sigs.append(record) else: precache.append(record) for e in precache: for s in sigs: if self.verify(e, s): # print "DNS: %s verified with %s" % (e,s) if self.adaptive and e.type == _TYPE_A: if e.address == '\x00\x00\x00\x00': e.address = socket.inet_aton(address) if e in self.cache.entries(): if e.is_expired(now): for i in self.hooks: try: i.remove(e) except: pass self.cache.remove(e) self.cache.remove(s) else: entry = self.cache.get(e) sig = self.cache.get(s) if (entry is not None) and (sig is not None): for i in self.hooks: try: i.update(e) except: pass entry.reset_ttl(e) sig.reset_ttl(s) else: e.rrsig = s self.cache.add(e) self.cache.add(s) for i in self.hooks: try: i.add(e) except: pass precache.remove(e) sigs.remove(s) self.update_record(now, record) if self.bypass: for e in precache: if e in self.cache.entries(): if e.is_expired(now): for i in self.hooks: try: i.remove(e) except: pass self.cache.remove(e) else: entry = self.cache.get(e) if (entry is not None): for i in self.hooks: try: i.update(e) except: pass entry.reset_ttl(e) else: self.cache.add(e) for i in self.hooks: try: i.add(e) except: pass self.update_record(now, record)
[ "def", "handle_response", "(", "self", ",", "msg", ",", "address", ")", ":", "now", "=", "current_time_millis", "(", ")", "sigs", "=", "[", "]", "precache", "=", "[", "]", "for", "record", "in", "msg", ".", "answers", ":", "if", "isinstance", "(", "record", ",", "DNSSignature", ")", ":", "sigs", ".", "append", "(", "record", ")", "else", ":", "precache", ".", "append", "(", "record", ")", "for", "e", "in", "precache", ":", "for", "s", "in", "sigs", ":", "if", "self", ".", "verify", "(", "e", ",", "s", ")", ":", "# print \"DNS: %s verified with %s\" % (e,s)", "if", "self", ".", "adaptive", "and", "e", ".", "type", "==", "_TYPE_A", ":", "if", "e", ".", "address", "==", "'\\x00\\x00\\x00\\x00'", ":", "e", ".", "address", "=", "socket", ".", "inet_aton", "(", "address", ")", "if", "e", "in", "self", ".", "cache", ".", "entries", "(", ")", ":", "if", "e", ".", "is_expired", "(", "now", ")", ":", "for", "i", "in", "self", ".", "hooks", ":", "try", ":", "i", ".", "remove", "(", "e", ")", "except", ":", "pass", "self", ".", "cache", ".", "remove", "(", "e", ")", "self", ".", "cache", ".", "remove", "(", "s", ")", "else", ":", "entry", "=", "self", ".", "cache", ".", "get", "(", "e", ")", "sig", "=", "self", ".", "cache", ".", "get", "(", "s", ")", "if", "(", "entry", "is", "not", "None", ")", "and", "(", "sig", "is", "not", "None", ")", ":", "for", "i", "in", "self", ".", "hooks", ":", "try", ":", "i", ".", "update", "(", "e", ")", "except", ":", "pass", "entry", ".", "reset_ttl", "(", "e", ")", "sig", ".", "reset_ttl", "(", "s", ")", "else", ":", "e", ".", "rrsig", "=", "s", "self", ".", "cache", ".", "add", "(", "e", ")", "self", ".", "cache", ".", "add", "(", "s", ")", "for", "i", "in", "self", ".", "hooks", ":", "try", ":", "i", ".", "add", "(", "e", ")", "except", ":", "pass", "precache", ".", "remove", "(", "e", ")", "sigs", ".", "remove", "(", "s", ")", "self", ".", "update_record", "(", "now", ",", "record", ")", "if", "self", ".", "bypass", ":", "for", "e", "in", "precache", ":", "if", "e", "in", "self", ".", "cache", ".", "entries", "(", ")", ":", "if", "e", ".", "is_expired", "(", "now", ")", ":", "for", "i", "in", "self", ".", "hooks", ":", "try", ":", "i", ".", "remove", "(", "e", ")", "except", ":", "pass", "self", ".", "cache", ".", "remove", "(", "e", ")", "else", ":", "entry", "=", "self", ".", "cache", ".", "get", "(", "e", ")", "if", "(", "entry", "is", "not", "None", ")", ":", "for", "i", "in", "self", ".", "hooks", ":", "try", ":", "i", ".", "update", "(", "e", ")", "except", ":", "pass", "entry", ".", "reset_ttl", "(", "e", ")", "else", ":", "self", ".", "cache", ".", "add", "(", "e", ")", "for", "i", "in", "self", ".", "hooks", ":", "try", ":", "i", ".", "add", "(", "e", ")", "except", ":", "pass", "self", ".", "update_record", "(", "now", ",", "record", ")" ]
38.882353
11.741176
def dispatch(self, request, *args, **kwargs): """Validate if user can use view""" if False: # TODO do permission check based on Model raise PermissionDenied return super().dispatch(request, *args, **kwargs)
[ "def", "dispatch", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "False", ":", "# TODO do permission check based on Model", "raise", "PermissionDenied", "return", "super", "(", ")", ".", "dispatch", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
47.8
9.6
def reboot(name, **kwargs): ''' Reboot a domain via ACPI request :param vm_: domain name :param connection: libvirt connection URI, overriding defaults .. versionadded:: 2019.2.0 :param username: username to connect with, overriding defaults .. versionadded:: 2019.2.0 :param password: password to connect with, overriding defaults .. versionadded:: 2019.2.0 CLI Example: .. code-block:: bash salt '*' virt.reboot <domain> ''' conn = __get_conn(**kwargs) ret = _get_domain(conn, name).reboot(libvirt.VIR_DOMAIN_REBOOT_DEFAULT) == 0 conn.close() return ret
[ "def", "reboot", "(", "name", ",", "*", "*", "kwargs", ")", ":", "conn", "=", "__get_conn", "(", "*", "*", "kwargs", ")", "ret", "=", "_get_domain", "(", "conn", ",", "name", ")", ".", "reboot", "(", "libvirt", ".", "VIR_DOMAIN_REBOOT_DEFAULT", ")", "==", "0", "conn", ".", "close", "(", ")", "return", "ret" ]
24.88
24.56
def wf_info(workflow_path): """ Returns the version of the file and the file extension. Assumes that the file path is to the file directly ie, ends with a valid file extension.Supports checking local files as well as files at http:// and https:// locations. Files at these remote locations are recreated locally to enable our approach to version checking, then removed after version is extracted. """ supported_formats = ['py', 'wdl', 'cwl'] file_type = workflow_path.lower().split('.')[-1] # Grab the file extension workflow_path = workflow_path if ':' in workflow_path else 'file://' + workflow_path if file_type in supported_formats: if workflow_path.startswith('file://'): version = get_version(file_type, workflow_path[7:]) elif workflow_path.startswith('https://') or workflow_path.startswith('http://'): # If file not local go fetch it. html = urlopen(workflow_path).read() local_loc = os.path.join(os.getcwd(), 'fetchedFromRemote.' + file_type) with open(local_loc, 'w') as f: f.write(html.decode()) version = wf_info('file://' + local_loc)[0] # Don't take the file_type here, found it above. os.remove(local_loc) # TODO: Find a way to avoid recreating file before version determination. else: raise NotImplementedError('Unsupported workflow file location: {}. Must be local or HTTP(S).'.format(workflow_path)) else: raise TypeError('Unsupported workflow type: .{}. Must be {}.'.format(file_type, '.py, .cwl, or .wdl')) return version, file_type.upper()
[ "def", "wf_info", "(", "workflow_path", ")", ":", "supported_formats", "=", "[", "'py'", ",", "'wdl'", ",", "'cwl'", "]", "file_type", "=", "workflow_path", ".", "lower", "(", ")", ".", "split", "(", "'.'", ")", "[", "-", "1", "]", "# Grab the file extension", "workflow_path", "=", "workflow_path", "if", "':'", "in", "workflow_path", "else", "'file://'", "+", "workflow_path", "if", "file_type", "in", "supported_formats", ":", "if", "workflow_path", ".", "startswith", "(", "'file://'", ")", ":", "version", "=", "get_version", "(", "file_type", ",", "workflow_path", "[", "7", ":", "]", ")", "elif", "workflow_path", ".", "startswith", "(", "'https://'", ")", "or", "workflow_path", ".", "startswith", "(", "'http://'", ")", ":", "# If file not local go fetch it.", "html", "=", "urlopen", "(", "workflow_path", ")", ".", "read", "(", ")", "local_loc", "=", "os", ".", "path", ".", "join", "(", "os", ".", "getcwd", "(", ")", ",", "'fetchedFromRemote.'", "+", "file_type", ")", "with", "open", "(", "local_loc", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "html", ".", "decode", "(", ")", ")", "version", "=", "wf_info", "(", "'file://'", "+", "local_loc", ")", "[", "0", "]", "# Don't take the file_type here, found it above.", "os", ".", "remove", "(", "local_loc", ")", "# TODO: Find a way to avoid recreating file before version determination.", "else", ":", "raise", "NotImplementedError", "(", "'Unsupported workflow file location: {}. Must be local or HTTP(S).'", ".", "format", "(", "workflow_path", ")", ")", "else", ":", "raise", "TypeError", "(", "'Unsupported workflow type: .{}. Must be {}.'", ".", "format", "(", "file_type", ",", "'.py, .cwl, or .wdl'", ")", ")", "return", "version", ",", "file_type", ".", "upper", "(", ")" ]
56.241379
32.172414
def oauth_authenticate(client_id, user, require_existing_link=False): """Authenticate an oauth authorized callback. :param client_id: The client id. :param user: A user instance. :param require_existing_link: If ``True``, check if remote account exists. (Default: ``False``) :returns: ``True`` if the user is successfully authenticated. """ # Authenticate via the access token (access token used to get user_id) if not requires_confirmation(user): after_this_request(_commit) if login_user(user, remember=False): if require_existing_link: account = RemoteAccount.get(user.id, client_id) if account is None: logout_user() return False return True return False
[ "def", "oauth_authenticate", "(", "client_id", ",", "user", ",", "require_existing_link", "=", "False", ")", ":", "# Authenticate via the access token (access token used to get user_id)", "if", "not", "requires_confirmation", "(", "user", ")", ":", "after_this_request", "(", "_commit", ")", "if", "login_user", "(", "user", ",", "remember", "=", "False", ")", ":", "if", "require_existing_link", ":", "account", "=", "RemoteAccount", ".", "get", "(", "user", ".", "id", ",", "client_id", ")", "if", "account", "is", "None", ":", "logout_user", "(", ")", "return", "False", "return", "True", "return", "False" ]
39.8
14.3
def _get_subcounters(self, elapsed): """ Args: elapsed(float): Time since started. Returns: :py:class:`tuple`: list of subcounters and dictionary of additional fields Each subcounter in the list will be in a tuple of (subcounter, percentage) Fields in the dictionary are addressed in the Format documentation of this class """ fields = {} subcounters = [] for num, subcounter in enumerate(self._subcounters, 1): if self.total: subPercentage = subcounter.count / float(self.total) else: subPercentage = 0.0 # Save in tuple: count, percentage, color subcounters.append((subcounter, subPercentage)) # Set fields fields['percentage_{0}'.format(num)] = subPercentage * 100 fields['count_{0}'.format(num)] = subcounter.count if subcounter.all_fields: interations = abs(subcounter.count - subcounter.start_count) if elapsed: # Use float to force to float in Python 2 rate = fields['rate_{0}'.format(num)] = interations / float(elapsed) else: rate = fields['rate_{0}'.format(num)] = 0.0 if self.total == 0: fields['eta_{0}'.format(num)] = u'00:00' elif rate: fields['eta_{0}'.format(num)] = _format_time((self.total - interations) / rate) else: fields['eta_{0}'.format(num)] = u'?' return subcounters, fields
[ "def", "_get_subcounters", "(", "self", ",", "elapsed", ")", ":", "fields", "=", "{", "}", "subcounters", "=", "[", "]", "for", "num", ",", "subcounter", "in", "enumerate", "(", "self", ".", "_subcounters", ",", "1", ")", ":", "if", "self", ".", "total", ":", "subPercentage", "=", "subcounter", ".", "count", "/", "float", "(", "self", ".", "total", ")", "else", ":", "subPercentage", "=", "0.0", "# Save in tuple: count, percentage, color", "subcounters", ".", "append", "(", "(", "subcounter", ",", "subPercentage", ")", ")", "# Set fields", "fields", "[", "'percentage_{0}'", ".", "format", "(", "num", ")", "]", "=", "subPercentage", "*", "100", "fields", "[", "'count_{0}'", ".", "format", "(", "num", ")", "]", "=", "subcounter", ".", "count", "if", "subcounter", ".", "all_fields", ":", "interations", "=", "abs", "(", "subcounter", ".", "count", "-", "subcounter", ".", "start_count", ")", "if", "elapsed", ":", "# Use float to force to float in Python 2", "rate", "=", "fields", "[", "'rate_{0}'", ".", "format", "(", "num", ")", "]", "=", "interations", "/", "float", "(", "elapsed", ")", "else", ":", "rate", "=", "fields", "[", "'rate_{0}'", ".", "format", "(", "num", ")", "]", "=", "0.0", "if", "self", ".", "total", "==", "0", ":", "fields", "[", "'eta_{0}'", ".", "format", "(", "num", ")", "]", "=", "u'00:00'", "elif", "rate", ":", "fields", "[", "'eta_{0}'", ".", "format", "(", "num", ")", "]", "=", "_format_time", "(", "(", "self", ".", "total", "-", "interations", ")", "/", "rate", ")", "else", ":", "fields", "[", "'eta_{0}'", ".", "format", "(", "num", ")", "]", "=", "u'?'", "return", "subcounters", ",", "fields" ]
34.340426
25.744681
def threaded(callback, listargs): """ Returns the result of <callback> for each set of \*args in listargs. Each call to <callback> is called concurrently in their own separate threads. Parameters: callback (func): Callback function to apply to each set of \*args. listargs (list): List of lists; \*args to pass each thread. """ threads, results = [], [] job_is_done_event = Event() for args in listargs: args += [results, len(results)] results.append(None) threads.append(Thread(target=callback, args=args, kwargs=dict(job_is_done_event=job_is_done_event))) threads[-1].setDaemon(True) threads[-1].start() while not job_is_done_event.is_set(): if all([not t.is_alive() for t in threads]): break time.sleep(0.05) return [r for r in results if r is not None]
[ "def", "threaded", "(", "callback", ",", "listargs", ")", ":", "threads", ",", "results", "=", "[", "]", ",", "[", "]", "job_is_done_event", "=", "Event", "(", ")", "for", "args", "in", "listargs", ":", "args", "+=", "[", "results", ",", "len", "(", "results", ")", "]", "results", ".", "append", "(", "None", ")", "threads", ".", "append", "(", "Thread", "(", "target", "=", "callback", ",", "args", "=", "args", ",", "kwargs", "=", "dict", "(", "job_is_done_event", "=", "job_is_done_event", ")", ")", ")", "threads", "[", "-", "1", "]", ".", "setDaemon", "(", "True", ")", "threads", "[", "-", "1", "]", ".", "start", "(", ")", "while", "not", "job_is_done_event", ".", "is_set", "(", ")", ":", "if", "all", "(", "[", "not", "t", ".", "is_alive", "(", ")", "for", "t", "in", "threads", "]", ")", ":", "break", "time", ".", "sleep", "(", "0.05", ")", "return", "[", "r", "for", "r", "in", "results", "if", "r", "is", "not", "None", "]" ]
39.681818
18.454545
def in_global_expression(self): """Currently inside an expression""" return (self.parenthesis_count == 0 and self.curly_bracket_count == 0 and self.square_bracket_count == 0 and not self.in_single_quote and not self.in_double_quote)
[ "def", "in_global_expression", "(", "self", ")", ":", "return", "(", "self", ".", "parenthesis_count", "==", "0", "and", "self", ".", "curly_bracket_count", "==", "0", "and", "self", ".", "square_bracket_count", "==", "0", "and", "not", "self", ".", "in_single_quote", "and", "not", "self", ".", "in_double_quote", ")" ]
55.2
18
def update_queue(self): """Update queue""" started = 0 for parent_id, threadlist in list(self.started_threads.items()): still_running = [] for thread in threadlist: if thread.isFinished(): end_callback = self.end_callbacks.pop(id(thread)) if thread.results is not None: # The thread was executed successfully end_callback(thread.results) thread.setParent(None) thread = None else: still_running.append(thread) started += 1 threadlist = None if still_running: self.started_threads[parent_id] = still_running else: self.started_threads.pop(parent_id) logger.debug("Updating queue:") logger.debug(" started: %d" % started) logger.debug(" pending: %d" % len(self.pending_threads)) if self.pending_threads and started < self.max_simultaneous_threads: thread, parent_id = self.pending_threads.pop(0) thread.finished.connect(self.update_queue) threadlist = self.started_threads.get(parent_id, []) self.started_threads[parent_id] = threadlist+[thread] logger.debug("===>starting: %r" % thread) thread.start()
[ "def", "update_queue", "(", "self", ")", ":", "started", "=", "0", "for", "parent_id", ",", "threadlist", "in", "list", "(", "self", ".", "started_threads", ".", "items", "(", ")", ")", ":", "still_running", "=", "[", "]", "for", "thread", "in", "threadlist", ":", "if", "thread", ".", "isFinished", "(", ")", ":", "end_callback", "=", "self", ".", "end_callbacks", ".", "pop", "(", "id", "(", "thread", ")", ")", "if", "thread", ".", "results", "is", "not", "None", ":", "# The thread was executed successfully\r", "end_callback", "(", "thread", ".", "results", ")", "thread", ".", "setParent", "(", "None", ")", "thread", "=", "None", "else", ":", "still_running", ".", "append", "(", "thread", ")", "started", "+=", "1", "threadlist", "=", "None", "if", "still_running", ":", "self", ".", "started_threads", "[", "parent_id", "]", "=", "still_running", "else", ":", "self", ".", "started_threads", ".", "pop", "(", "parent_id", ")", "logger", ".", "debug", "(", "\"Updating queue:\"", ")", "logger", ".", "debug", "(", "\" started: %d\"", "%", "started", ")", "logger", ".", "debug", "(", "\" pending: %d\"", "%", "len", "(", "self", ".", "pending_threads", ")", ")", "if", "self", ".", "pending_threads", "and", "started", "<", "self", ".", "max_simultaneous_threads", ":", "thread", ",", "parent_id", "=", "self", ".", "pending_threads", ".", "pop", "(", "0", ")", "thread", ".", "finished", ".", "connect", "(", "self", ".", "update_queue", ")", "threadlist", "=", "self", ".", "started_threads", ".", "get", "(", "parent_id", ",", "[", "]", ")", "self", ".", "started_threads", "[", "parent_id", "]", "=", "threadlist", "+", "[", "thread", "]", "logger", ".", "debug", "(", "\"===>starting: %r\"", "%", "thread", ")", "thread", ".", "start", "(", ")" ]
46.032258
15.096774
def block_idxmat_sets(idxmat, b): """Reshapes idxmat into the idx vectors for the training set and validation set Parameters: ----------- idxmat : ndarray Matrix with N shuffled row indicies assigned to K blocks/columns from the oxyba.block_idxmat_shuffle function b : int The id of the current validation block b=[0,1,...,K-1] Returns: -------- idx_train : ndarray Vector with row indicies of the current training blocks, i.e. all blocks [0,1,...,K-1] except the b-th block. The vector contains int(N/K)*(K-1) elements. idx_valid : ndarray Vector with row indicies of the current validation block "b". The vector contains int(N/K) elements. Example: -------- K = idxmat.shape[1] for b in range(K): idx_train, idx_valid = block_idxmat_sets(idxmat, b) ... """ import numpy as np idx_train = idxmat[:, [c for c in range(idxmat.shape[1]) if c is not b]] idx_train = idx_train.reshape((np.prod(idx_train.shape),)) return idx_train, idxmat[:, b]
[ "def", "block_idxmat_sets", "(", "idxmat", ",", "b", ")", ":", "import", "numpy", "as", "np", "idx_train", "=", "idxmat", "[", ":", ",", "[", "c", "for", "c", "in", "range", "(", "idxmat", ".", "shape", "[", "1", "]", ")", "if", "c", "is", "not", "b", "]", "]", "idx_train", "=", "idx_train", ".", "reshape", "(", "(", "np", ".", "prod", "(", "idx_train", ".", "shape", ")", ",", ")", ")", "return", "idx_train", ",", "idxmat", "[", ":", ",", "b", "]" ]
31.764706
22.529412
def restore_state(self, system): """Called after unpickling to restore some attributes manually.""" Impl.restore_state(self, system) BaseSpaceContainerImpl.restore_state(self, system) mapping = {} for node in self.cellgraph: if isinstance(node, tuple): name, key = node else: name, key = node, None cells = self.get_object(name) mapping[node] = get_node(cells, key, None) self.cellgraph = nx.relabel_nodes(self.cellgraph, mapping)
[ "def", "restore_state", "(", "self", ",", "system", ")", ":", "Impl", ".", "restore_state", "(", "self", ",", "system", ")", "BaseSpaceContainerImpl", ".", "restore_state", "(", "self", ",", "system", ")", "mapping", "=", "{", "}", "for", "node", "in", "self", ".", "cellgraph", ":", "if", "isinstance", "(", "node", ",", "tuple", ")", ":", "name", ",", "key", "=", "node", "else", ":", "name", ",", "key", "=", "node", ",", "None", "cells", "=", "self", ".", "get_object", "(", "name", ")", "mapping", "[", "node", "]", "=", "get_node", "(", "cells", ",", "key", ",", "None", ")", "self", ".", "cellgraph", "=", "nx", ".", "relabel_nodes", "(", "self", ".", "cellgraph", ",", "mapping", ")" ]
39
11.857143
def EnableNetworkInterfaces( self, interfaces, logger, dhclient_script=None): """Enable the list of network interfaces. Args: interfaces: list of string, the output device names to enable. logger: logger object, used to write to SysLog and serial port. dhclient_script: string, the path to a dhclient script used by dhclient. """ helpers.CallDhclient(interfaces, logger, dhclient_script=dhclient_script)
[ "def", "EnableNetworkInterfaces", "(", "self", ",", "interfaces", ",", "logger", ",", "dhclient_script", "=", "None", ")", ":", "helpers", ".", "CallDhclient", "(", "interfaces", ",", "logger", ",", "dhclient_script", "=", "dhclient_script", ")" ]
43.5
22.9
def _validate_default_privileges(object_type, defprivs, defprivileges): ''' Validate the supplied privileges ''' if object_type != 'group': _defperms = [_DEFAULT_PRIVILEGES_MAP[defperm] for defperm in _DEFAULT_PRIVILEGE_TYPE_MAP[object_type]] _defperms.append('ALL') if object_type not in _DEFAULT_PRIVILEGES_OBJECTS: raise SaltInvocationError( 'Invalid object_type: {0} provided'.format(object_type)) if not set(defprivs).issubset(set(_defperms)): raise SaltInvocationError( 'Invalid default privilege(s): {0} provided for object {1}'.format( defprivileges, object_type)) else: if defprivileges: raise SaltInvocationError( 'The default privileges option should not ' 'be set for object_type group')
[ "def", "_validate_default_privileges", "(", "object_type", ",", "defprivs", ",", "defprivileges", ")", ":", "if", "object_type", "!=", "'group'", ":", "_defperms", "=", "[", "_DEFAULT_PRIVILEGES_MAP", "[", "defperm", "]", "for", "defperm", "in", "_DEFAULT_PRIVILEGE_TYPE_MAP", "[", "object_type", "]", "]", "_defperms", ".", "append", "(", "'ALL'", ")", "if", "object_type", "not", "in", "_DEFAULT_PRIVILEGES_OBJECTS", ":", "raise", "SaltInvocationError", "(", "'Invalid object_type: {0} provided'", ".", "format", "(", "object_type", ")", ")", "if", "not", "set", "(", "defprivs", ")", ".", "issubset", "(", "set", "(", "_defperms", ")", ")", ":", "raise", "SaltInvocationError", "(", "'Invalid default privilege(s): {0} provided for object {1}'", ".", "format", "(", "defprivileges", ",", "object_type", ")", ")", "else", ":", "if", "defprivileges", ":", "raise", "SaltInvocationError", "(", "'The default privileges option should not '", "'be set for object_type group'", ")" ]
39.636364
19.727273
def gradients(ys, xs, grad_ys=None): """Compute gradients in dtf. Args: ys: a list of Tensors xs: a list of Tensors grad_ys: an optional list of Tensors Returns: grad_xs: a list of Tensors """ graph = ys[0].graph if not grad_ys: grad_ys = [Constant(y.mesh, 1.0, y.shape, y.dtype).outputs[0] for y in ys] # figure out what Tensors are downstream of xs downstream = set(xs) for op in graph.operations: if op.has_gradient: if set(op.inputs) & downstream: downstream |= set(op.outputs) tensor_to_gradient = dict(zip(ys, grad_ys)) for op in graph.operations[::-1]: grad_outputs = [tensor_to_gradient.get(out) for out in op.outputs] if op.has_gradient and any(grad_outputs) and (set(op.inputs) & downstream): with tf.variable_scope(op.name + "/gradients"): input_grads = op.gradient(grad_outputs) for inp, grad in zip(op.inputs, input_grads): if inp in downstream and grad is not None: if inp in tensor_to_gradient: tensor_to_gradient[inp] += grad else: tensor_to_gradient[inp] = grad return [tensor_to_gradient.get(x, None) for x in xs]
[ "def", "gradients", "(", "ys", ",", "xs", ",", "grad_ys", "=", "None", ")", ":", "graph", "=", "ys", "[", "0", "]", ".", "graph", "if", "not", "grad_ys", ":", "grad_ys", "=", "[", "Constant", "(", "y", ".", "mesh", ",", "1.0", ",", "y", ".", "shape", ",", "y", ".", "dtype", ")", ".", "outputs", "[", "0", "]", "for", "y", "in", "ys", "]", "# figure out what Tensors are downstream of xs", "downstream", "=", "set", "(", "xs", ")", "for", "op", "in", "graph", ".", "operations", ":", "if", "op", ".", "has_gradient", ":", "if", "set", "(", "op", ".", "inputs", ")", "&", "downstream", ":", "downstream", "|=", "set", "(", "op", ".", "outputs", ")", "tensor_to_gradient", "=", "dict", "(", "zip", "(", "ys", ",", "grad_ys", ")", ")", "for", "op", "in", "graph", ".", "operations", "[", ":", ":", "-", "1", "]", ":", "grad_outputs", "=", "[", "tensor_to_gradient", ".", "get", "(", "out", ")", "for", "out", "in", "op", ".", "outputs", "]", "if", "op", ".", "has_gradient", "and", "any", "(", "grad_outputs", ")", "and", "(", "set", "(", "op", ".", "inputs", ")", "&", "downstream", ")", ":", "with", "tf", ".", "variable_scope", "(", "op", ".", "name", "+", "\"/gradients\"", ")", ":", "input_grads", "=", "op", ".", "gradient", "(", "grad_outputs", ")", "for", "inp", ",", "grad", "in", "zip", "(", "op", ".", "inputs", ",", "input_grads", ")", ":", "if", "inp", "in", "downstream", "and", "grad", "is", "not", "None", ":", "if", "inp", "in", "tensor_to_gradient", ":", "tensor_to_gradient", "[", "inp", "]", "+=", "grad", "else", ":", "tensor_to_gradient", "[", "inp", "]", "=", "grad", "return", "[", "tensor_to_gradient", ".", "get", "(", "x", ",", "None", ")", "for", "x", "in", "xs", "]" ]
35
15.090909
def generate_permutations(elements, n): """ Heap's algorithm for generating all n! permutations in a list https://en.wikipedia.org/wiki/Heap%27s_algorithm """ c = [0] * n yield elements i = 0 while i < n: if c[i] < i: if i % 2 == 0: elements[0], elements[i] = elements[i], elements[0] else: elements[c[i]], elements[i] = elements[i], elements[c[i]] yield elements c[i] += 1 i = 0 else: c[i] = 0 i += 1
[ "def", "generate_permutations", "(", "elements", ",", "n", ")", ":", "c", "=", "[", "0", "]", "*", "n", "yield", "elements", "i", "=", "0", "while", "i", "<", "n", ":", "if", "c", "[", "i", "]", "<", "i", ":", "if", "i", "%", "2", "==", "0", ":", "elements", "[", "0", "]", ",", "elements", "[", "i", "]", "=", "elements", "[", "i", "]", ",", "elements", "[", "0", "]", "else", ":", "elements", "[", "c", "[", "i", "]", "]", ",", "elements", "[", "i", "]", "=", "elements", "[", "i", "]", ",", "elements", "[", "c", "[", "i", "]", "]", "yield", "elements", "c", "[", "i", "]", "+=", "1", "i", "=", "0", "else", ":", "c", "[", "i", "]", "=", "0", "i", "+=", "1" ]
26
20.095238
def go_to_line(self, line=None): """Go to line dialog""" if line is not None: # When this method is called from the flileswitcher, a line # number is specified, so there is no need for the dialog. self.get_current_editor().go_to_line(line) else: if self.data: self.get_current_editor().exec_gotolinedialog()
[ "def", "go_to_line", "(", "self", ",", "line", "=", "None", ")", ":", "if", "line", "is", "not", "None", ":", "# When this method is called from the flileswitcher, a line\r", "# number is specified, so there is no need for the dialog.\r", "self", ".", "get_current_editor", "(", ")", ".", "go_to_line", "(", "line", ")", "else", ":", "if", "self", ".", "data", ":", "self", ".", "get_current_editor", "(", ")", ".", "exec_gotolinedialog", "(", ")" ]
43.888889
17.666667
def checkCompatibleParams(initialParams, laterParams): """ Check a later set of BLAST parameters against those originally found. @param initialParams: A C{dict} with the originally encountered BLAST parameter settings. @param laterParams: A C{dict} with BLAST parameter settings encountered later. @return: A C{str} summary of the parameter differences if the parameter sets differ, else C{None}. """ # Note that although the params contains a 'date', its value is empty # (as far as I've seen). This could become an issue one day if it # becomes non-empty and differs between JSON files that we cat # together. In that case we may need to be more specific in our params # compatible checking. err = [] for param in initialParams: if param in laterParams: if (param not in VARIABLE_PARAMS and initialParams[param] != laterParams[param]): err.append( '\tParam %r initial value %r differs from ' 'later value %r' % (param, initialParams[param], laterParams[param])) else: err.append('\t%r found in initial parameters, not found ' 'in later parameters' % param) for param in laterParams: if param not in initialParams: err.append('\t%r found in later parameters, not seen in ' 'initial parameters' % param) return 'Summary of differences:\n%s' % '\n'.join(err) if err else None
[ "def", "checkCompatibleParams", "(", "initialParams", ",", "laterParams", ")", ":", "# Note that although the params contains a 'date', its value is empty", "# (as far as I've seen). This could become an issue one day if it", "# becomes non-empty and differs between JSON files that we cat", "# together. In that case we may need to be more specific in our params", "# compatible checking.", "err", "=", "[", "]", "for", "param", "in", "initialParams", ":", "if", "param", "in", "laterParams", ":", "if", "(", "param", "not", "in", "VARIABLE_PARAMS", "and", "initialParams", "[", "param", "]", "!=", "laterParams", "[", "param", "]", ")", ":", "err", ".", "append", "(", "'\\tParam %r initial value %r differs from '", "'later value %r'", "%", "(", "param", ",", "initialParams", "[", "param", "]", ",", "laterParams", "[", "param", "]", ")", ")", "else", ":", "err", ".", "append", "(", "'\\t%r found in initial parameters, not found '", "'in later parameters'", "%", "param", ")", "for", "param", "in", "laterParams", ":", "if", "param", "not", "in", "initialParams", ":", "err", ".", "append", "(", "'\\t%r found in later parameters, not seen in '", "'initial parameters'", "%", "param", ")", "return", "'Summary of differences:\\n%s'", "%", "'\\n'", ".", "join", "(", "err", ")", "if", "err", "else", "None" ]
44.257143
21.971429
def get_event_tags_from_dn(dn): """ This grabs the event tags from the dn designator. They look like this: uni/tn-DataDog/ap-DtDg-AP1-EcommerceApp/epg-DtDg-Ecomm/HDl2IngrPktsAg1h """ tags = [] node = get_node_from_dn(dn) if node: tags.append("node:" + node) app = get_app_from_dn(dn) if app: tags.append("app:" + app) bd = get_bd_from_dn(dn) if bd: tags.append("bd:" + bd) cep = get_cep_from_dn(dn) if cep: tags.append("mac:" + cep) ip = get_ip_from_dn(dn) if ip: tags.append("ip:" + ip) epg = get_epg_from_dn(dn) if epg: tags.append("epg:" + epg) return tags
[ "def", "get_event_tags_from_dn", "(", "dn", ")", ":", "tags", "=", "[", "]", "node", "=", "get_node_from_dn", "(", "dn", ")", "if", "node", ":", "tags", ".", "append", "(", "\"node:\"", "+", "node", ")", "app", "=", "get_app_from_dn", "(", "dn", ")", "if", "app", ":", "tags", ".", "append", "(", "\"app:\"", "+", "app", ")", "bd", "=", "get_bd_from_dn", "(", "dn", ")", "if", "bd", ":", "tags", ".", "append", "(", "\"bd:\"", "+", "bd", ")", "cep", "=", "get_cep_from_dn", "(", "dn", ")", "if", "cep", ":", "tags", ".", "append", "(", "\"mac:\"", "+", "cep", ")", "ip", "=", "get_ip_from_dn", "(", "dn", ")", "if", "ip", ":", "tags", ".", "append", "(", "\"ip:\"", "+", "ip", ")", "epg", "=", "get_epg_from_dn", "(", "dn", ")", "if", "epg", ":", "tags", ".", "append", "(", "\"epg:\"", "+", "epg", ")", "return", "tags" ]
26.2
16.68
def get_domain_and_name(self, domain_or_name): """ Given a ``str`` or :class:`boto.sdb.domain.Domain`, return a ``tuple`` with the following members (in order): * In instance of :class:`boto.sdb.domain.Domain` for the requested domain * The domain's name as a ``str`` :type domain_or_name: ``str`` or :class:`boto.sdb.domain.Domain` :param domain_or_name: The domain or domain name to get the domain and name for. :raises: :class:`boto.exception.SDBResponseError` when an invalid domain name is specified. :rtype: tuple :return: A ``tuple`` with contents outlined as per above. """ if (isinstance(domain_or_name, Domain)): return (domain_or_name, domain_or_name.name) else: return (self.get_domain(domain_or_name), domain_or_name)
[ "def", "get_domain_and_name", "(", "self", ",", "domain_or_name", ")", ":", "if", "(", "isinstance", "(", "domain_or_name", ",", "Domain", ")", ")", ":", "return", "(", "domain_or_name", ",", "domain_or_name", ".", "name", ")", "else", ":", "return", "(", "self", ".", "get_domain", "(", "domain_or_name", ")", ",", "domain_or_name", ")" ]
40.434783
20.347826
def parse_cli_args(): """parse args from the CLI and return a dict""" parser = argparse.ArgumentParser(description='2048 in your terminal') parser.add_argument('--mode', dest='mode', type=str, default=None, help='colors mode (dark or light)') parser.add_argument('--az', dest='azmode', action='store_true', help='Use the letters a-z instead of numbers') parser.add_argument('--resume', dest='resume', action='store_true', help='restart the game from where you left') parser.add_argument('-v', '--version', action='store_true') parser.add_argument('-r', '--rules', action='store_true') return vars(parser.parse_args())
[ "def", "parse_cli_args", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "'2048 in your terminal'", ")", "parser", ".", "add_argument", "(", "'--mode'", ",", "dest", "=", "'mode'", ",", "type", "=", "str", ",", "default", "=", "None", ",", "help", "=", "'colors mode (dark or light)'", ")", "parser", ".", "add_argument", "(", "'--az'", ",", "dest", "=", "'azmode'", ",", "action", "=", "'store_true'", ",", "help", "=", "'Use the letters a-z instead of numbers'", ")", "parser", ".", "add_argument", "(", "'--resume'", ",", "dest", "=", "'resume'", ",", "action", "=", "'store_true'", ",", "help", "=", "'restart the game from where you left'", ")", "parser", ".", "add_argument", "(", "'-v'", ",", "'--version'", ",", "action", "=", "'store_true'", ")", "parser", ".", "add_argument", "(", "'-r'", ",", "'--rules'", ",", "action", "=", "'store_true'", ")", "return", "vars", "(", "parser", ".", "parse_args", "(", ")", ")" ]
59.166667
22.083333
def set_all_pattern_variables(self, patternnumber, \ sp0, ti0, sp1, ti1, sp2, ti2, sp3, ti3, sp4, ti4, sp5, ti5, sp6, ti6, sp7, ti7, \ actual_step, additional_cycles, link_pattern): """Set all variables for a given pattern at one time. Args: * patternnumber (integer): 0-7 * sp[*n*] (float): setpoint value for step *n* * ti[*n*] (integer??): step time for step *n*, 0-900 * actual_step (int): ? * additional_cycles(int): ? * link_pattern(int): ? """ _checkPatternNumber(patternnumber) self.set_pattern_step_setpoint(patternnumber, 0, sp0) self.set_pattern_step_setpoint(patternnumber, 1, sp1) self.set_pattern_step_setpoint(patternnumber, 2, sp2) self.set_pattern_step_setpoint(patternnumber, 3, sp3) self.set_pattern_step_setpoint(patternnumber, 4, sp4) self.set_pattern_step_setpoint(patternnumber, 5, sp5) self.set_pattern_step_setpoint(patternnumber, 6, sp6) self.set_pattern_step_setpoint(patternnumber, 7, sp7) self.set_pattern_step_time( patternnumber, 0, ti0) self.set_pattern_step_time( patternnumber, 1, ti1) self.set_pattern_step_time( patternnumber, 2, ti2) self.set_pattern_step_time( patternnumber, 3, ti3) self.set_pattern_step_time( patternnumber, 4, ti4) self.set_pattern_step_time( patternnumber, 5, ti5) self.set_pattern_step_time( patternnumber, 6, ti6) self.set_pattern_step_time( patternnumber, 7, ti7) self.set_pattern_additional_cycles(patternnumber, additional_cycles) self.set_pattern_link_topattern( patternnumber, link_pattern) self.set_pattern_actual_step( patternnumber, actual_step)
[ "def", "set_all_pattern_variables", "(", "self", ",", "patternnumber", ",", "sp0", ",", "ti0", ",", "sp1", ",", "ti1", ",", "sp2", ",", "ti2", ",", "sp3", ",", "ti3", ",", "sp4", ",", "ti4", ",", "sp5", ",", "ti5", ",", "sp6", ",", "ti6", ",", "sp7", ",", "ti7", ",", "actual_step", ",", "additional_cycles", ",", "link_pattern", ")", ":", "_checkPatternNumber", "(", "patternnumber", ")", "self", ".", "set_pattern_step_setpoint", "(", "patternnumber", ",", "0", ",", "sp0", ")", "self", ".", "set_pattern_step_setpoint", "(", "patternnumber", ",", "1", ",", "sp1", ")", "self", ".", "set_pattern_step_setpoint", "(", "patternnumber", ",", "2", ",", "sp2", ")", "self", ".", "set_pattern_step_setpoint", "(", "patternnumber", ",", "3", ",", "sp3", ")", "self", ".", "set_pattern_step_setpoint", "(", "patternnumber", ",", "4", ",", "sp4", ")", "self", ".", "set_pattern_step_setpoint", "(", "patternnumber", ",", "5", ",", "sp5", ")", "self", ".", "set_pattern_step_setpoint", "(", "patternnumber", ",", "6", ",", "sp6", ")", "self", ".", "set_pattern_step_setpoint", "(", "patternnumber", ",", "7", ",", "sp7", ")", "self", ".", "set_pattern_step_time", "(", "patternnumber", ",", "0", ",", "ti0", ")", "self", ".", "set_pattern_step_time", "(", "patternnumber", ",", "1", ",", "ti1", ")", "self", ".", "set_pattern_step_time", "(", "patternnumber", ",", "2", ",", "ti2", ")", "self", ".", "set_pattern_step_time", "(", "patternnumber", ",", "3", ",", "ti3", ")", "self", ".", "set_pattern_step_time", "(", "patternnumber", ",", "4", ",", "ti4", ")", "self", ".", "set_pattern_step_time", "(", "patternnumber", ",", "5", ",", "ti5", ")", "self", ".", "set_pattern_step_time", "(", "patternnumber", ",", "6", ",", "ti6", ")", "self", ".", "set_pattern_step_time", "(", "patternnumber", ",", "7", ",", "ti7", ")", "self", ".", "set_pattern_additional_cycles", "(", "patternnumber", ",", "additional_cycles", ")", "self", ".", "set_pattern_link_topattern", "(", "patternnumber", ",", "link_pattern", ")", "self", ".", "set_pattern_actual_step", "(", "patternnumber", ",", "actual_step", ")" ]
51.914286
19.514286
def refresh(self): """ Even though blocks never change, you freshly obtain its contents from an API with this method """ block = self.blockchain.rpc.get_block(self.identifier) if not block: raise BlockDoesNotExistsException super(Block, self).__init__( block, blockchain_instance=self.blockchain, use_cache=self._use_cache )
[ "def", "refresh", "(", "self", ")", ":", "block", "=", "self", ".", "blockchain", ".", "rpc", ".", "get_block", "(", "self", ".", "identifier", ")", "if", "not", "block", ":", "raise", "BlockDoesNotExistsException", "super", "(", "Block", ",", "self", ")", ".", "__init__", "(", "block", ",", "blockchain_instance", "=", "self", ".", "blockchain", ",", "use_cache", "=", "self", ".", "_use_cache", ")" ]
39.9
14.4
def shape(self): """Total spaces per axis, computed recursively. The recursion ends at the fist level that does not have a shape. Examples -------- >>> r2, r3 = odl.rn(2), odl.rn(3) >>> pspace = odl.ProductSpace(r2, r3) >>> pspace.shape (2,) >>> pspace2 = odl.ProductSpace(pspace, 3) >>> pspace2.shape (3, 2) If the space is a "pure" product space, shape recurses all the way into the components: >>> r2_2 = odl.ProductSpace(r2, 3) >>> r2_2.shape (3, 2) """ if len(self) == 0: return () elif self.is_power_space: try: sub_shape = self[0].shape except AttributeError: sub_shape = () else: sub_shape = () return (len(self),) + sub_shape
[ "def", "shape", "(", "self", ")", ":", "if", "len", "(", "self", ")", "==", "0", ":", "return", "(", ")", "elif", "self", ".", "is_power_space", ":", "try", ":", "sub_shape", "=", "self", "[", "0", "]", ".", "shape", "except", "AttributeError", ":", "sub_shape", "=", "(", ")", "else", ":", "sub_shape", "=", "(", ")", "return", "(", "len", "(", "self", ")", ",", ")", "+", "sub_shape" ]
25.909091
18.757576
def encodeUcs2(text): """ UCS2 text encoding algorithm Encodes the specified text string into UCS2-encoded bytes. :param text: the text string to encode :return: A bytearray containing the string encoded in UCS2 encoding :rtype: bytearray """ result = bytearray() for b in map(ord, text): result.append(b >> 8) result.append(b & 0xFF) return result
[ "def", "encodeUcs2", "(", "text", ")", ":", "result", "=", "bytearray", "(", ")", "for", "b", "in", "map", "(", "ord", ",", "text", ")", ":", "result", ".", "append", "(", "b", ">>", "8", ")", "result", ".", "append", "(", "b", "&", "0xFF", ")", "return", "result" ]
26.733333
18.133333
def to_datetime(jdc): '''Return a datetime for the input floating point Julian Day Count''' year, month, day = gregorian.from_jd(jdc) # in jdc: 0.0 = noon, 0.5 = midnight # the 0.5 changes it to 0.0 = midnight, 0.5 = noon frac = (jdc + 0.5) % 1 hours = int(24 * frac) mfrac = frac * 24 - hours mins = int(60 * round(mfrac, 6)) sfrac = mfrac * 60 - mins secs = int(60 * round(sfrac, 6)) msfrac = sfrac * 60 - secs # down to ms, which are 1/1000 of a second ms = int(1000 * round(msfrac, 6)) return datetime(year, month, day, int(hours), int(mins), int(secs), int(ms), tzinfo=utc)
[ "def", "to_datetime", "(", "jdc", ")", ":", "year", ",", "month", ",", "day", "=", "gregorian", ".", "from_jd", "(", "jdc", ")", "# in jdc: 0.0 = noon, 0.5 = midnight", "# the 0.5 changes it to 0.0 = midnight, 0.5 = noon", "frac", "=", "(", "jdc", "+", "0.5", ")", "%", "1", "hours", "=", "int", "(", "24", "*", "frac", ")", "mfrac", "=", "frac", "*", "24", "-", "hours", "mins", "=", "int", "(", "60", "*", "round", "(", "mfrac", ",", "6", ")", ")", "sfrac", "=", "mfrac", "*", "60", "-", "mins", "secs", "=", "int", "(", "60", "*", "round", "(", "sfrac", ",", "6", ")", ")", "msfrac", "=", "sfrac", "*", "60", "-", "secs", "# down to ms, which are 1/1000 of a second", "ms", "=", "int", "(", "1000", "*", "round", "(", "msfrac", ",", "6", ")", ")", "return", "datetime", "(", "year", ",", "month", ",", "day", ",", "int", "(", "hours", ")", ",", "int", "(", "mins", ")", ",", "int", "(", "secs", ")", ",", "int", "(", "ms", ")", ",", "tzinfo", "=", "utc", ")" ]
28.181818
21.818182
def _add_text_size_ngrams(self, text_id, size, ngrams): """Adds `ngrams`, that are of size `size`, to the data store. The added `ngrams` are associated with `text_id`. :param text_id: database ID of text associated with `ngrams` :type text_id: `int` :param size: size of n-grams :type size: `int` :param ngrams: n-grams to be added :type ngrams: `collections.Counter` """ unique_ngrams = len(ngrams) self._logger.info('Adding {} unique {}-grams'.format( unique_ngrams, size)) parameters = [[text_id, ngram, size, count] for ngram, count in ngrams.items()] with self._conn: self._conn.execute(constants.INSERT_TEXT_HAS_NGRAM_SQL, [text_id, size, unique_ngrams]) self._conn.executemany(constants.INSERT_NGRAM_SQL, parameters)
[ "def", "_add_text_size_ngrams", "(", "self", ",", "text_id", ",", "size", ",", "ngrams", ")", ":", "unique_ngrams", "=", "len", "(", "ngrams", ")", "self", ".", "_logger", ".", "info", "(", "'Adding {} unique {}-grams'", ".", "format", "(", "unique_ngrams", ",", "size", ")", ")", "parameters", "=", "[", "[", "text_id", ",", "ngram", ",", "size", ",", "count", "]", "for", "ngram", ",", "count", "in", "ngrams", ".", "items", "(", ")", "]", "with", "self", ".", "_conn", ":", "self", ".", "_conn", ".", "execute", "(", "constants", ".", "INSERT_TEXT_HAS_NGRAM_SQL", ",", "[", "text_id", ",", "size", ",", "unique_ngrams", "]", ")", "self", ".", "_conn", ".", "executemany", "(", "constants", ".", "INSERT_NGRAM_SQL", ",", "parameters", ")" ]
40.818182
17.090909
def pltexp(iverts, delta): """ Expand a triangular plate by a specified amount. The expanded plate is co-planar with, and has the same orientation as, the original. The centroids of the two plates coincide. https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/pltexp_c.html :param iverts: Vertices of the plate to be expanded. :type iverts: 3x3-Element Array of floats :param delta: Fraction by which the plate is to be expanded. :type delta: double :return: Vertices of the expanded plate. :rtype: 3x3-Element Array of floats """ iverts = stypes.toDoubleMatrix(iverts) delta = ctypes.c_double(delta) overts = stypes.emptyDoubleMatrix() libspice.pltexp_c(iverts, delta, overts) return stypes.cMatrixToNumpy(overts)
[ "def", "pltexp", "(", "iverts", ",", "delta", ")", ":", "iverts", "=", "stypes", ".", "toDoubleMatrix", "(", "iverts", ")", "delta", "=", "ctypes", ".", "c_double", "(", "delta", ")", "overts", "=", "stypes", ".", "emptyDoubleMatrix", "(", ")", "libspice", ".", "pltexp_c", "(", "iverts", ",", "delta", ",", "overts", ")", "return", "stypes", ".", "cMatrixToNumpy", "(", "overts", ")" ]
39.1
13.5
def _get_action_strings(cls, possible_actions: List[List[ProductionRule]], action_indices: Dict[int, List[List[int]]]) -> List[List[List[str]]]: """ Takes a list of possible actions and indices of decoded actions into those possible actions for a batch and returns sequences of action strings. We assume ``action_indices`` is a dict mapping batch indices to k-best decoded sequence lists. """ all_action_strings: List[List[List[str]]] = [] batch_size = len(possible_actions) for i in range(batch_size): batch_actions = possible_actions[i] batch_best_sequences = action_indices[i] if i in action_indices else [] # This will append an empty list to ``all_action_strings`` if ``batch_best_sequences`` # is empty. action_strings = [[batch_actions[rule_id][0] for rule_id in sequence] for sequence in batch_best_sequences] all_action_strings.append(action_strings) return all_action_strings
[ "def", "_get_action_strings", "(", "cls", ",", "possible_actions", ":", "List", "[", "List", "[", "ProductionRule", "]", "]", ",", "action_indices", ":", "Dict", "[", "int", ",", "List", "[", "List", "[", "int", "]", "]", "]", ")", "->", "List", "[", "List", "[", "List", "[", "str", "]", "]", "]", ":", "all_action_strings", ":", "List", "[", "List", "[", "List", "[", "str", "]", "]", "]", "=", "[", "]", "batch_size", "=", "len", "(", "possible_actions", ")", "for", "i", "in", "range", "(", "batch_size", ")", ":", "batch_actions", "=", "possible_actions", "[", "i", "]", "batch_best_sequences", "=", "action_indices", "[", "i", "]", "if", "i", "in", "action_indices", "else", "[", "]", "# This will append an empty list to ``all_action_strings`` if ``batch_best_sequences``", "# is empty.", "action_strings", "=", "[", "[", "batch_actions", "[", "rule_id", "]", "[", "0", "]", "for", "rule_id", "in", "sequence", "]", "for", "sequence", "in", "batch_best_sequences", "]", "all_action_strings", ".", "append", "(", "action_strings", ")", "return", "all_action_strings" ]
57.736842
25.105263
def _search(self, searchstring): """ search users @str searchstring: search string @rtype: dict, {<user>: {<attr>: <value>}} """ if searchstring is None: return {} ret = {} for b in self.backends: tmp = self.backends[b].search(searchstring) for u in tmp: if u not in ret: ret[u] = {} self._merge_user_attrs(tmp[u], ret[u], b) return ret
[ "def", "_search", "(", "self", ",", "searchstring", ")", ":", "if", "searchstring", "is", "None", ":", "return", "{", "}", "ret", "=", "{", "}", "for", "b", "in", "self", ".", "backends", ":", "tmp", "=", "self", ".", "backends", "[", "b", "]", ".", "search", "(", "searchstring", ")", "for", "u", "in", "tmp", ":", "if", "u", "not", "in", "ret", ":", "ret", "[", "u", "]", "=", "{", "}", "self", ".", "_merge_user_attrs", "(", "tmp", "[", "u", "]", ",", "ret", "[", "u", "]", ",", "b", ")", "return", "ret" ]
31.6
10.866667
def findfirst(f, coll): """Return first occurrence matching f, otherwise None""" result = list(dropwhile(f, coll)) return result[0] if result else None
[ "def", "findfirst", "(", "f", ",", "coll", ")", ":", "result", "=", "list", "(", "dropwhile", "(", "f", ",", "coll", ")", ")", "return", "result", "[", "0", "]", "if", "result", "else", "None" ]
40.75
4.5
def _is_missing_tags_strict(self): """ Return whether missing_tags is set to strict. """ val = self.missing_tags if val == MissingTags.strict: return True elif val == MissingTags.ignore: return False raise Exception("Unsupported 'missing_tags' value: %s" % repr(val))
[ "def", "_is_missing_tags_strict", "(", "self", ")", ":", "val", "=", "self", ".", "missing_tags", "if", "val", "==", "MissingTags", ".", "strict", ":", "return", "True", "elif", "val", "==", "MissingTags", ".", "ignore", ":", "return", "False", "raise", "Exception", "(", "\"Unsupported 'missing_tags' value: %s\"", "%", "repr", "(", "val", ")", ")" ]
26
16.923077
def _get_choice_attribute(self, value): """Get a choice attribute for the given value. Parameters ---------- value: ? The value for which we want a choice attribute. Returns ------- An instance of a class based on ``ChoiceAttributeMixin`` for the given value. Raises ------ ValueError If the value is None, as we cannot really subclass NoneType. """ if value is None: raise ValueError('Using `None` in a `Choices` object is not supported. You may ' 'use an empty string.') return create_choice_attribute(self.ChoiceAttributeMixin, value, self)
[ "def", "_get_choice_attribute", "(", "self", ",", "value", ")", ":", "if", "value", "is", "None", ":", "raise", "ValueError", "(", "'Using `None` in a `Choices` object is not supported. You may '", "'use an empty string.'", ")", "return", "create_choice_attribute", "(", "self", ".", "ChoiceAttributeMixin", ",", "value", ",", "self", ")" ]
28.958333
26.916667
async def close(self): """Close this transaction. If this transaction is the base transaction in a begin/commit nesting, the transaction will rollback(). Otherwise, the method returns. This is used to cancel a Transaction without affecting the scope of an enclosing transaction. """ if not self._parent._is_active: return if self._parent is self: await self.rollback() else: self._is_active = False
[ "async", "def", "close", "(", "self", ")", ":", "if", "not", "self", ".", "_parent", ".", "_is_active", ":", "return", "if", "self", ".", "_parent", "is", "self", ":", "await", "self", ".", "rollback", "(", ")", "else", ":", "self", ".", "_is_active", "=", "False" ]
31.375
17.5625
def Record(self, value): """Records given value.""" self.sum += value self.count += 1 pos = bisect.bisect(self.bins, value) - 1 if pos < 0: pos = 0 elif pos == len(self.bins): pos = len(self.bins) - 1 self.heights[pos] += 1
[ "def", "Record", "(", "self", ",", "value", ")", ":", "self", ".", "sum", "+=", "value", "self", ".", "count", "+=", "1", "pos", "=", "bisect", ".", "bisect", "(", "self", ".", "bins", ",", "value", ")", "-", "1", "if", "pos", "<", "0", ":", "pos", "=", "0", "elif", "pos", "==", "len", "(", "self", ".", "bins", ")", ":", "pos", "=", "len", "(", "self", ".", "bins", ")", "-", "1", "self", ".", "heights", "[", "pos", "]", "+=", "1" ]
21.166667
18.833333
def convert(self, value, view): """Check that the value is a string and matches the pattern. """ if isinstance(value, BASESTRING): if self.pattern and not self.regex.match(value): self.fail( u"must match the pattern {0}".format(self.pattern), view ) return value else: self.fail(u'must be a string', view, True)
[ "def", "convert", "(", "self", ",", "value", ",", "view", ")", ":", "if", "isinstance", "(", "value", ",", "BASESTRING", ")", ":", "if", "self", ".", "pattern", "and", "not", "self", ".", "regex", ".", "match", "(", "value", ")", ":", "self", ".", "fail", "(", "u\"must match the pattern {0}\"", ".", "format", "(", "self", ".", "pattern", ")", ",", "view", ")", "return", "value", "else", ":", "self", ".", "fail", "(", "u'must be a string'", ",", "view", ",", "True", ")" ]
36.666667
14.25
def is_valid_interval(self, lower, upper): """Return False if [lower:upper] is not a valid subitems interval. If it is, then returns a tuple of (lower index, upper index)""" try: lower_idx = self.data.index(lower) upper_idx = self.data.index(upper) return (lower_idx, upper_idx) if lower_idx <= upper_idx else False except ValueError: return False
[ "def", "is_valid_interval", "(", "self", ",", "lower", ",", "upper", ")", ":", "try", ":", "lower_idx", "=", "self", ".", "data", ".", "index", "(", "lower", ")", "upper_idx", "=", "self", ".", "data", ".", "index", "(", "upper", ")", "return", "(", "lower_idx", ",", "upper_idx", ")", "if", "lower_idx", "<=", "upper_idx", "else", "False", "except", "ValueError", ":", "return", "False" ]
46.555556
12.222222
def _get_comments_count(session, group_or_user_id, wall_id): """ https://vk.com/dev/wall.getComments """ response = session.fetch("wall.getComments", count=100, owner_id=group_or_user_id, post_id=wall_id) return response.get('count')
[ "def", "_get_comments_count", "(", "session", ",", "group_or_user_id", ",", "wall_id", ")", ":", "response", "=", "session", ".", "fetch", "(", "\"wall.getComments\"", ",", "count", "=", "100", ",", "owner_id", "=", "group_or_user_id", ",", "post_id", "=", "wall_id", ")", "return", "response", ".", "get", "(", "'count'", ")" ]
44.666667
15.666667