text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def transform(self, X): """ After the fit step, it is known which features are relevant, Only extract those from the time series handed in with the function :func:`~set_timeseries_container`. If filter_only_tsfresh_features is False, also delete the irrelevant, already present features in the data frame. :param X: the data sample to add the relevant (and delete the irrelevant) features to. :type X: pandas.DataFrame or numpy.array :return: a data sample with the same information as X, but with added relevant time series features and deleted irrelevant information (only if filter_only_tsfresh_features is False). :rtype: pandas.DataFrame """ if self.feature_selector.relevant_features is None: raise RuntimeError("You have to call fit before.") if self.timeseries_container is None: raise RuntimeError("You have to provide a time series using the set_timeseries_container function before.") self.feature_extractor.set_timeseries_container(self.timeseries_container) relevant_time_series_features = set(self.feature_selector.relevant_features) - set(pd.DataFrame(X).columns) relevant_extraction_settings = from_columns(relevant_time_series_features) # Set imputing strategy impute_function = partial(impute_dataframe_range, col_to_max=self.col_to_max, col_to_min=self.col_to_min, col_to_median=self.col_to_median) relevant_feature_extractor = FeatureAugmenter(kind_to_fc_parameters=relevant_extraction_settings, default_fc_parameters={}, column_id=self.feature_extractor.column_id, column_sort=self.feature_extractor.column_sort, column_kind=self.feature_extractor.column_kind, column_value=self.feature_extractor.column_value, chunksize=self.feature_extractor.chunksize, n_jobs=self.feature_extractor.n_jobs, show_warnings=self.feature_extractor.show_warnings, disable_progressbar=self.feature_extractor.disable_progressbar, impute_function=impute_function, profile=self.feature_extractor.profile, profiling_filename=self.feature_extractor.profiling_filename, profiling_sorting=self.feature_extractor.profiling_sorting) relevant_feature_extractor.set_timeseries_container(self.feature_extractor.timeseries_container) X_augmented = relevant_feature_extractor.transform(X) if self.filter_only_tsfresh_features: return X_augmented.copy().loc[:, self.feature_selector.relevant_features + X.columns.tolist()] else: return X_augmented.copy().loc[:, self.feature_selector.relevant_features]
[ "def", "transform", "(", "self", ",", "X", ")", ":", "if", "self", ".", "feature_selector", ".", "relevant_features", "is", "None", ":", "raise", "RuntimeError", "(", "\"You have to call fit before.\"", ")", "if", "self", ".", "timeseries_container", "is", "None", ":", "raise", "RuntimeError", "(", "\"You have to provide a time series using the set_timeseries_container function before.\"", ")", "self", ".", "feature_extractor", ".", "set_timeseries_container", "(", "self", ".", "timeseries_container", ")", "relevant_time_series_features", "=", "set", "(", "self", ".", "feature_selector", ".", "relevant_features", ")", "-", "set", "(", "pd", ".", "DataFrame", "(", "X", ")", ".", "columns", ")", "relevant_extraction_settings", "=", "from_columns", "(", "relevant_time_series_features", ")", "# Set imputing strategy", "impute_function", "=", "partial", "(", "impute_dataframe_range", ",", "col_to_max", "=", "self", ".", "col_to_max", ",", "col_to_min", "=", "self", ".", "col_to_min", ",", "col_to_median", "=", "self", ".", "col_to_median", ")", "relevant_feature_extractor", "=", "FeatureAugmenter", "(", "kind_to_fc_parameters", "=", "relevant_extraction_settings", ",", "default_fc_parameters", "=", "{", "}", ",", "column_id", "=", "self", ".", "feature_extractor", ".", "column_id", ",", "column_sort", "=", "self", ".", "feature_extractor", ".", "column_sort", ",", "column_kind", "=", "self", ".", "feature_extractor", ".", "column_kind", ",", "column_value", "=", "self", ".", "feature_extractor", ".", "column_value", ",", "chunksize", "=", "self", ".", "feature_extractor", ".", "chunksize", ",", "n_jobs", "=", "self", ".", "feature_extractor", ".", "n_jobs", ",", "show_warnings", "=", "self", ".", "feature_extractor", ".", "show_warnings", ",", "disable_progressbar", "=", "self", ".", "feature_extractor", ".", "disable_progressbar", ",", "impute_function", "=", "impute_function", ",", "profile", "=", "self", ".", "feature_extractor", ".", "profile", ",", "profiling_filename", "=", "self", ".", "feature_extractor", ".", "profiling_filename", ",", "profiling_sorting", "=", "self", ".", "feature_extractor", ".", "profiling_sorting", ")", "relevant_feature_extractor", ".", "set_timeseries_container", "(", "self", ".", "feature_extractor", ".", "timeseries_container", ")", "X_augmented", "=", "relevant_feature_extractor", ".", "transform", "(", "X", ")", "if", "self", ".", "filter_only_tsfresh_features", ":", "return", "X_augmented", ".", "copy", "(", ")", ".", "loc", "[", ":", ",", "self", ".", "feature_selector", ".", "relevant_features", "+", "X", ".", "columns", ".", "tolist", "(", ")", "]", "else", ":", "return", "X_augmented", ".", "copy", "(", ")", ".", "loc", "[", ":", ",", "self", ".", "feature_selector", ".", "relevant_features", "]" ]
63.692308
44.076923
def formats(self, value): """ Setter for **self.__formats** attribute. :param value: Attribute value. :type value: FormatsTree """ if value is not None: assert type(value) is FormatsTree, "'{0}' attribute: '{1}' type is not 'FormatsTree'!".format( "formats", value) self.__formats = value
[ "def", "formats", "(", "self", ",", "value", ")", ":", "if", "value", "is", "not", "None", ":", "assert", "type", "(", "value", ")", "is", "FormatsTree", ",", "\"'{0}' attribute: '{1}' type is not 'FormatsTree'!\"", ".", "format", "(", "\"formats\"", ",", "value", ")", "self", ".", "__formats", "=", "value" ]
30.25
17.25
async def executescript(self, sql_script: str) -> None: """Execute a user script.""" await self._execute(self._cursor.executescript, sql_script)
[ "async", "def", "executescript", "(", "self", ",", "sql_script", ":", "str", ")", "->", "None", ":", "await", "self", ".", "_execute", "(", "self", ".", "_cursor", ".", "executescript", ",", "sql_script", ")" ]
52.666667
14
def simxGetObjectParent(clientID, childObjectHandle, operationMode): ''' Please have a look at the function description/documentation in the V-REP user manual ''' parentObjectHandle = ct.c_int() return c_GetObjectParent(clientID, childObjectHandle, ct.byref(parentObjectHandle), operationMode), parentObjectHandle.value
[ "def", "simxGetObjectParent", "(", "clientID", ",", "childObjectHandle", ",", "operationMode", ")", ":", "parentObjectHandle", "=", "ct", ".", "c_int", "(", ")", "return", "c_GetObjectParent", "(", "clientID", ",", "childObjectHandle", ",", "ct", ".", "byref", "(", "parentObjectHandle", ")", ",", "operationMode", ")", ",", "parentObjectHandle", ".", "value" ]
47.714286
39.428571
def load_weight(weight_file: str, weight_name: str, weight_file_cache: Dict[str, Dict]) -> mx.nd.NDArray: """ Load wight fron a file or the cache if it was loaded before. :param weight_file: Weight file. :param weight_name: Weight name. :param weight_file_cache: Cache of loaded files. :return: Loaded weight. """ logger.info('Loading input weight file: %s', weight_file) if weight_file.endswith(".npy"): return np.load(weight_file) elif weight_file.endswith(".npz"): if weight_file not in weight_file_cache: weight_file_cache[weight_file] = np.load(weight_file) return weight_file_cache[weight_file][weight_name] else: if weight_file not in weight_file_cache: weight_file_cache[weight_file] = mx.nd.load(weight_file) return weight_file_cache[weight_file]['arg:%s' % weight_name].asnumpy()
[ "def", "load_weight", "(", "weight_file", ":", "str", ",", "weight_name", ":", "str", ",", "weight_file_cache", ":", "Dict", "[", "str", ",", "Dict", "]", ")", "->", "mx", ".", "nd", ".", "NDArray", ":", "logger", ".", "info", "(", "'Loading input weight file: %s'", ",", "weight_file", ")", "if", "weight_file", ".", "endswith", "(", "\".npy\"", ")", ":", "return", "np", ".", "load", "(", "weight_file", ")", "elif", "weight_file", ".", "endswith", "(", "\".npz\"", ")", ":", "if", "weight_file", "not", "in", "weight_file_cache", ":", "weight_file_cache", "[", "weight_file", "]", "=", "np", ".", "load", "(", "weight_file", ")", "return", "weight_file_cache", "[", "weight_file", "]", "[", "weight_name", "]", "else", ":", "if", "weight_file", "not", "in", "weight_file_cache", ":", "weight_file_cache", "[", "weight_file", "]", "=", "mx", ".", "nd", ".", "load", "(", "weight_file", ")", "return", "weight_file_cache", "[", "weight_file", "]", "[", "'arg:%s'", "%", "weight_name", "]", ".", "asnumpy", "(", ")" ]
41.318182
14.954545
def _check_for_supported_vendor(self, profile): """Checks if the port belongs to a supported vendor. Returns True for supported_pci_devs. """ vendor_info = profile.get('pci_vendor_info') if not vendor_info: return False if vendor_info not in self.supported_pci_devs: return False return True
[ "def", "_check_for_supported_vendor", "(", "self", ",", "profile", ")", ":", "vendor_info", "=", "profile", ".", "get", "(", "'pci_vendor_info'", ")", "if", "not", "vendor_info", ":", "return", "False", "if", "vendor_info", "not", "in", "self", ".", "supported_pci_devs", ":", "return", "False", "return", "True" ]
32.909091
13
def acquire(self): """ Attempt to acquire the lock every `delay` seconds until the lock is acquired or until `timeout` has expired. Raises FileLockTimeout if the timeout is exceeded. Errors opening the lock file (other than if it exists) are passed through. """ self.lock = retry_call( self._attempt, retries=float('inf'), trap=zc.lockfile.LockError, cleanup=functools.partial(self._check_timeout, timing.Stopwatch()), )
[ "def", "acquire", "(", "self", ")", ":", "self", ".", "lock", "=", "retry_call", "(", "self", ".", "_attempt", ",", "retries", "=", "float", "(", "'inf'", ")", ",", "trap", "=", "zc", ".", "lockfile", ".", "LockError", ",", "cleanup", "=", "functools", ".", "partial", "(", "self", ".", "_check_timeout", ",", "timing", ".", "Stopwatch", "(", ")", ")", ",", ")" ]
32.9375
19.1875
def send(self, s): """ Send data to the channel. Returns the number of bytes sent, or 0 if the channel stream is closed. Applications are responsible for checking that all data has been sent: if only some of the data was transmitted, the application needs to attempt delivery of the remaining data. :param str s: data to send :return: number of bytes actually sent, as an `int` :raises socket.timeout: if no data could be sent before the timeout set by `settimeout`. """ m = Message() m.add_byte(cMSG_CHANNEL_DATA) m.add_int(self.remote_chanid) return self._send(s, m)
[ "def", "send", "(", "self", ",", "s", ")", ":", "m", "=", "Message", "(", ")", "m", ".", "add_byte", "(", "cMSG_CHANNEL_DATA", ")", "m", ".", "add_int", "(", "self", ".", "remote_chanid", ")", "return", "self", ".", "_send", "(", "s", ",", "m", ")" ]
35.736842
22.052632
def set_group_add_request(self, *, flag, type, approve=True, reason=None): """ 处理加群请求、群组成员邀请 ------------ :param str flag: 加群请求的 flag(需从上报的数据中获得) :param str type: `add` 或 `invite`,请求类型(需要和上报消息中的 `sub_type` 字段相符) :param bool approve: 是否同意请求/邀请 :param str reason: 拒绝理由(仅在拒绝时有效) :return: None :rtype: None """ return super().__getattr__('set_group_add_request') \ (flag=flag, type=type, approve=approve, reason=reason)
[ "def", "set_group_add_request", "(", "self", ",", "*", ",", "flag", ",", "type", ",", "approve", "=", "True", ",", "reason", "=", "None", ")", ":", "return", "super", "(", ")", ".", "__getattr__", "(", "'set_group_add_request'", ")", "(", "flag", "=", "flag", ",", "type", "=", "type", ",", "approve", "=", "approve", ",", "reason", "=", "reason", ")" ]
33.533333
18.733333
def get_users(self, channel=None): """get list of users and channel access information (helper) :param channel: number [1:7] :return: name: (str) uid: (int) channel: (int) access: callback (bool) link_auth (bool) ipmi_msg (bool) privilege_level: (str)[callback, user, operatorm administrator, proprietary, no_access] """ if channel is None: channel = self.get_network_channel() names = {} max_ids = self.get_channel_max_user_count(channel) for uid in range(1, max_ids + 1): name = self.get_user_name(uid=uid) if name is not None: names[uid] = self.get_user(uid=uid, channel=channel) return names
[ "def", "get_users", "(", "self", ",", "channel", "=", "None", ")", ":", "if", "channel", "is", "None", ":", "channel", "=", "self", ".", "get_network_channel", "(", ")", "names", "=", "{", "}", "max_ids", "=", "self", ".", "get_channel_max_user_count", "(", "channel", ")", "for", "uid", "in", "range", "(", "1", ",", "max_ids", "+", "1", ")", ":", "name", "=", "self", ".", "get_user_name", "(", "uid", "=", "uid", ")", "if", "name", "is", "not", "None", ":", "names", "[", "uid", "]", "=", "self", ".", "get_user", "(", "uid", "=", "uid", ",", "channel", "=", "channel", ")", "return", "names" ]
33.92
15.8
def get_version_of_tools(): """ get versions of tools reactor is using (specified in constants.TOOLS_USED) :returns list of dicts, [{"name": "docker-py", "version": "1.2.3"}, ...] """ response = [] for tool in TOOLS_USED: pkg_name = tool["pkg_name"] try: tool_module = import_module(pkg_name) except ImportError as ex: logger.warning("can't import module %s: %r", pkg_name, ex) else: version = getattr(tool_module, "__version__", None) if version is None: logger.warning("tool %s doesn't have __version__", pkg_name) else: response.append({ "name": tool.get("display_name", pkg_name), "version": version, "path": tool_module.__file__, }) return response
[ "def", "get_version_of_tools", "(", ")", ":", "response", "=", "[", "]", "for", "tool", "in", "TOOLS_USED", ":", "pkg_name", "=", "tool", "[", "\"pkg_name\"", "]", "try", ":", "tool_module", "=", "import_module", "(", "pkg_name", ")", "except", "ImportError", "as", "ex", ":", "logger", ".", "warning", "(", "\"can't import module %s: %r\"", ",", "pkg_name", ",", "ex", ")", "else", ":", "version", "=", "getattr", "(", "tool_module", ",", "\"__version__\"", ",", "None", ")", "if", "version", "is", "None", ":", "logger", ".", "warning", "(", "\"tool %s doesn't have __version__\"", ",", "pkg_name", ")", "else", ":", "response", ".", "append", "(", "{", "\"name\"", ":", "tool", ".", "get", "(", "\"display_name\"", ",", "pkg_name", ")", ",", "\"version\"", ":", "version", ",", "\"path\"", ":", "tool_module", ".", "__file__", ",", "}", ")", "return", "response" ]
35.791667
18.458333
def read(self, max_length): """ Reads data from the TLS-wrapped socket :param max_length: The number of bytes to read - output may be less than this :raises: socket.socket - when a non-TLS socket error occurs oscrypto.errors.TLSError - when a TLS-related error occurs oscrypto.errors.TLSDisconnectError - when the connection disconnects oscrypto.errors.TLSGracefulDisconnectError - when the remote end gracefully closed the connection ValueError - when any of the parameters contain an invalid value TypeError - when any of the parameters are of the wrong type OSError - when an error is returned by the OS crypto library :return: A byte string of the data read """ if not isinstance(max_length, int_types): raise TypeError(pretty_message( ''' max_length must be an integer, not %s ''', type_name(max_length) )) if self._session_context is None: # Even if the session is closed, we can use # buffered data to respond to read requests if self._decrypted_bytes != b'': output = self._decrypted_bytes self._decrypted_bytes = b'' return output self._raise_closed() buffered_length = len(self._decrypted_bytes) # If we already have enough buffered data, just use that if buffered_length >= max_length: output = self._decrypted_bytes[0:max_length] self._decrypted_bytes = self._decrypted_bytes[max_length:] return output # Don't block if we have buffered data available, since it is ok to # return less than the max_length if buffered_length > 0 and not self.select_read(0): output = self._decrypted_bytes self._decrypted_bytes = b'' return output # Only read enough to get the requested amount when # combined with buffered data to_read = max_length - len(self._decrypted_bytes) read_buffer = buffer_from_bytes(to_read) processed_pointer = new(Security, 'size_t *') result = Security.SSLRead( self._session_context, read_buffer, to_read, processed_pointer ) if self._exception is not None: exception = self._exception self._exception = None raise exception if result and result not in set([SecurityConst.errSSLWouldBlock, SecurityConst.errSSLClosedGraceful]): handle_sec_error(result, TLSError) if result and result == SecurityConst.errSSLClosedGraceful: self._gracefully_closed = True self._shutdown(False) self._raise_closed() bytes_read = deref(processed_pointer) output = self._decrypted_bytes + bytes_from_buffer(read_buffer, bytes_read) self._decrypted_bytes = output[max_length:] return output[0:max_length]
[ "def", "read", "(", "self", ",", "max_length", ")", ":", "if", "not", "isinstance", "(", "max_length", ",", "int_types", ")", ":", "raise", "TypeError", "(", "pretty_message", "(", "'''\n max_length must be an integer, not %s\n '''", ",", "type_name", "(", "max_length", ")", ")", ")", "if", "self", ".", "_session_context", "is", "None", ":", "# Even if the session is closed, we can use", "# buffered data to respond to read requests", "if", "self", ".", "_decrypted_bytes", "!=", "b''", ":", "output", "=", "self", ".", "_decrypted_bytes", "self", ".", "_decrypted_bytes", "=", "b''", "return", "output", "self", ".", "_raise_closed", "(", ")", "buffered_length", "=", "len", "(", "self", ".", "_decrypted_bytes", ")", "# If we already have enough buffered data, just use that", "if", "buffered_length", ">=", "max_length", ":", "output", "=", "self", ".", "_decrypted_bytes", "[", "0", ":", "max_length", "]", "self", ".", "_decrypted_bytes", "=", "self", ".", "_decrypted_bytes", "[", "max_length", ":", "]", "return", "output", "# Don't block if we have buffered data available, since it is ok to", "# return less than the max_length", "if", "buffered_length", ">", "0", "and", "not", "self", ".", "select_read", "(", "0", ")", ":", "output", "=", "self", ".", "_decrypted_bytes", "self", ".", "_decrypted_bytes", "=", "b''", "return", "output", "# Only read enough to get the requested amount when", "# combined with buffered data", "to_read", "=", "max_length", "-", "len", "(", "self", ".", "_decrypted_bytes", ")", "read_buffer", "=", "buffer_from_bytes", "(", "to_read", ")", "processed_pointer", "=", "new", "(", "Security", ",", "'size_t *'", ")", "result", "=", "Security", ".", "SSLRead", "(", "self", ".", "_session_context", ",", "read_buffer", ",", "to_read", ",", "processed_pointer", ")", "if", "self", ".", "_exception", "is", "not", "None", ":", "exception", "=", "self", ".", "_exception", "self", ".", "_exception", "=", "None", "raise", "exception", "if", "result", "and", "result", "not", "in", "set", "(", "[", "SecurityConst", ".", "errSSLWouldBlock", ",", "SecurityConst", ".", "errSSLClosedGraceful", "]", ")", ":", "handle_sec_error", "(", "result", ",", "TLSError", ")", "if", "result", "and", "result", "==", "SecurityConst", ".", "errSSLClosedGraceful", ":", "self", ".", "_gracefully_closed", "=", "True", "self", ".", "_shutdown", "(", "False", ")", "self", ".", "_raise_closed", "(", ")", "bytes_read", "=", "deref", "(", "processed_pointer", ")", "output", "=", "self", ".", "_decrypted_bytes", "+", "bytes_from_buffer", "(", "read_buffer", ",", "bytes_read", ")", "self", ".", "_decrypted_bytes", "=", "output", "[", "max_length", ":", "]", "return", "output", "[", "0", ":", "max_length", "]" ]
37.341463
19.731707
def getWindowByPID(self, pid, order=0): """ Returns a handle for the first window that matches the provided PID """ if pid <= 0: return None EnumWindowsProc = ctypes.WINFUNCTYPE( ctypes.c_bool, ctypes.POINTER(ctypes.c_int), ctypes.py_object) def callback(hwnd, context): if ctypes.windll.user32.IsWindowVisible(hwnd): pid = ctypes.c_ulong() ctypes.windll.user32.GetWindowThreadProcessId(hwnd, ctypes.byref(pid)) if context["pid"] == int(pid.value) and not context["handle"]: if context["order"] > 0: context["order"] -= 1 else: context["handle"] = hwnd return True data = {"pid": pid, "handle": None, "order": order} ctypes.windll.user32.EnumWindows(EnumWindowsProc(callback), ctypes.py_object(data)) return data["handle"]
[ "def", "getWindowByPID", "(", "self", ",", "pid", ",", "order", "=", "0", ")", ":", "if", "pid", "<=", "0", ":", "return", "None", "EnumWindowsProc", "=", "ctypes", ".", "WINFUNCTYPE", "(", "ctypes", ".", "c_bool", ",", "ctypes", ".", "POINTER", "(", "ctypes", ".", "c_int", ")", ",", "ctypes", ".", "py_object", ")", "def", "callback", "(", "hwnd", ",", "context", ")", ":", "if", "ctypes", ".", "windll", ".", "user32", ".", "IsWindowVisible", "(", "hwnd", ")", ":", "pid", "=", "ctypes", ".", "c_ulong", "(", ")", "ctypes", ".", "windll", ".", "user32", ".", "GetWindowThreadProcessId", "(", "hwnd", ",", "ctypes", ".", "byref", "(", "pid", ")", ")", "if", "context", "[", "\"pid\"", "]", "==", "int", "(", "pid", ".", "value", ")", "and", "not", "context", "[", "\"handle\"", "]", ":", "if", "context", "[", "\"order\"", "]", ">", "0", ":", "context", "[", "\"order\"", "]", "-=", "1", "else", ":", "context", "[", "\"handle\"", "]", "=", "hwnd", "return", "True", "data", "=", "{", "\"pid\"", ":", "pid", ",", "\"handle\"", ":", "None", ",", "\"order\"", ":", "order", "}", "ctypes", ".", "windll", ".", "user32", ".", "EnumWindows", "(", "EnumWindowsProc", "(", "callback", ")", ",", "ctypes", ".", "py_object", "(", "data", ")", ")", "return", "data", "[", "\"handle\"", "]" ]
46
14.619048
def sset_loop(args): ''' Loop over all sample sets in a workspace, performing a func ''' # Ensure that the requested action is a valid fiss_cmd fiss_func = __cmd_to_func(args.action) if not fiss_func: eprint("invalid FISS cmd '" + args.action + "'") return 1 # First get the sample set names r = fapi.get_entities(args.project, args.workspace, "sample_set") fapi._check_response_code(r, 200) sample_sets = [entity['name'] for entity in r.json()] args.entity_type = "sample_set" for sset in sample_sets: print('\n# {0}::{1}/{2} {3}'.format(args.project, args.workspace, sset, args.action)) args.entity = sset # Note how this code is similar to how args.func is called in # main so it may make sense to try to a common method for both try: result = fiss_func(args) except Exception as e: status = __pretty_print_fc_exception(e) if not args.keep_going: return status printToCLI(result) return 0
[ "def", "sset_loop", "(", "args", ")", ":", "# Ensure that the requested action is a valid fiss_cmd", "fiss_func", "=", "__cmd_to_func", "(", "args", ".", "action", ")", "if", "not", "fiss_func", ":", "eprint", "(", "\"invalid FISS cmd '\"", "+", "args", ".", "action", "+", "\"'\"", ")", "return", "1", "# First get the sample set names", "r", "=", "fapi", ".", "get_entities", "(", "args", ".", "project", ",", "args", ".", "workspace", ",", "\"sample_set\"", ")", "fapi", ".", "_check_response_code", "(", "r", ",", "200", ")", "sample_sets", "=", "[", "entity", "[", "'name'", "]", "for", "entity", "in", "r", ".", "json", "(", ")", "]", "args", ".", "entity_type", "=", "\"sample_set\"", "for", "sset", "in", "sample_sets", ":", "print", "(", "'\\n# {0}::{1}/{2} {3}'", ".", "format", "(", "args", ".", "project", ",", "args", ".", "workspace", ",", "sset", ",", "args", ".", "action", ")", ")", "args", ".", "entity", "=", "sset", "# Note how this code is similar to how args.func is called in", "# main so it may make sense to try to a common method for both", "try", ":", "result", "=", "fiss_func", "(", "args", ")", "except", "Exception", "as", "e", ":", "status", "=", "__pretty_print_fc_exception", "(", "e", ")", "if", "not", "args", ".", "keep_going", ":", "return", "status", "printToCLI", "(", "result", ")", "return", "0" ]
35.933333
20
def clean(self, text, **kwargs): """Create a more clean, but still user-facing version of an instance of the type.""" text = stringify(text) if text is not None: return self.clean_text(text, **kwargs)
[ "def", "clean", "(", "self", ",", "text", ",", "*", "*", "kwargs", ")", ":", "text", "=", "stringify", "(", "text", ")", "if", "text", "is", "not", "None", ":", "return", "self", ".", "clean_text", "(", "text", ",", "*", "*", "kwargs", ")" ]
39.833333
6.666667
def check(self): """check whether all attributes are setted and have the right dtype""" for name, valItem, dtype in self.values: val = valItem.text() if dtype: try: val = dtype(val) except: msgBox = QtWidgets.QMessageBox() msgBox.setText( 'attribute %s has not the right dtype(%s)' % (name, str(dtype))) msgBox.exec_() self.args[name] = val self.accept()
[ "def", "check", "(", "self", ")", ":", "for", "name", ",", "valItem", ",", "dtype", "in", "self", ".", "values", ":", "val", "=", "valItem", ".", "text", "(", ")", "if", "dtype", ":", "try", ":", "val", "=", "dtype", "(", "val", ")", "except", ":", "msgBox", "=", "QtWidgets", ".", "QMessageBox", "(", ")", "msgBox", ".", "setText", "(", "'attribute %s has not the right dtype(%s)'", "%", "(", "name", ",", "str", "(", "dtype", ")", ")", ")", "msgBox", ".", "exec_", "(", ")", "self", ".", "args", "[", "name", "]", "=", "val", "self", ".", "accept", "(", ")" ]
38.266667
11.666667
def plot(self, xmin=-1, xmax=1, center=0, resolution_outside=20, resolution_inside=200): """ Return arrays x, y for plotting the Heaviside function H(x-`center`) on [`xmin`, `xmax`]. For the exact Heaviside function, ``x = [xmin, center, xmax]; y = [0, 0, 1]``, while for the smoothed version, the ``x`` array is computed on basis of the `eps` parameter, with `resolution_outside` intervals on each side of the smoothed region and `resolution_inside` intervals in the smoothed region. """ if self.eps == 0: return [xmin, center, xmax], [0, 0, xmax] else: n = float(resolution_inside)/self.eps x = np.concatenate(( np.linspace(xmin, center-self.eps, resolution_outside+1), np.linspace(center-self.eps, center+self.eps, n+1), np.linspace(center+self.eps, xmax, resolution_outside+1))) y = self(x) return x, y
[ "def", "plot", "(", "self", ",", "xmin", "=", "-", "1", ",", "xmax", "=", "1", ",", "center", "=", "0", ",", "resolution_outside", "=", "20", ",", "resolution_inside", "=", "200", ")", ":", "if", "self", ".", "eps", "==", "0", ":", "return", "[", "xmin", ",", "center", ",", "xmax", "]", ",", "[", "0", ",", "0", ",", "xmax", "]", "else", ":", "n", "=", "float", "(", "resolution_inside", ")", "/", "self", ".", "eps", "x", "=", "np", ".", "concatenate", "(", "(", "np", ".", "linspace", "(", "xmin", ",", "center", "-", "self", ".", "eps", ",", "resolution_outside", "+", "1", ")", ",", "np", ".", "linspace", "(", "center", "-", "self", ".", "eps", ",", "center", "+", "self", ".", "eps", ",", "n", "+", "1", ")", ",", "np", ".", "linspace", "(", "center", "+", "self", ".", "eps", ",", "xmax", ",", "resolution_outside", "+", "1", ")", ")", ")", "y", "=", "self", "(", "x", ")", "return", "x", ",", "y" ]
45.545455
17
def unset(self, key): """ Delete object indexed by <key> """ try: try: self.bucket.delete(key) except couchbase.exception.MemcachedError, inst: if str(inst) == "Memcached error #1: Not found": # for some reason the py cb client raises an error when # a key isnt found, instead we just want a none value. return else: raise except: raise except: raise
[ "def", "unset", "(", "self", ",", "key", ")", ":", "try", ":", "try", ":", "self", ".", "bucket", ".", "delete", "(", "key", ")", "except", "couchbase", ".", "exception", ".", "MemcachedError", ",", "inst", ":", "if", "str", "(", "inst", ")", "==", "\"Memcached error #1: Not found\"", ":", "# for some reason the py cb client raises an error when", "# a key isnt found, instead we just want a none value.", "return", "else", ":", "raise", "except", ":", "raise", "except", ":", "raise" ]
23.736842
21.315789
def drag_drop_cb(self, viewer, urls): """Punt drag-drops to the ginga shell. """ channel = self.fv.get_current_channel() if channel is None: return self.fv.open_uris(urls, chname=channel.name, bulk_add=True) return True
[ "def", "drag_drop_cb", "(", "self", ",", "viewer", ",", "urls", ")", ":", "channel", "=", "self", ".", "fv", ".", "get_current_channel", "(", ")", "if", "channel", "is", "None", ":", "return", "self", ".", "fv", ".", "open_uris", "(", "urls", ",", "chname", "=", "channel", ".", "name", ",", "bulk_add", "=", "True", ")", "return", "True" ]
34
11.625
def validate(style, value, vectorized=True): """ Validates a style and associated value. Arguments --------- style: str The style to validate (e.g. 'color', 'size' or 'marker') value: The style value to validate vectorized: bool Whether validator should allow vectorized setting Returns ------- valid: boolean or None If validation is supported returns boolean, otherwise None """ validator = get_validator(style) if validator is None: return None if isinstance(value, (np.ndarray, list)) and vectorized: return all(validator(v) for v in value) try: valid = validator(value) return False if valid == False else True except: return False
[ "def", "validate", "(", "style", ",", "value", ",", "vectorized", "=", "True", ")", ":", "validator", "=", "get_validator", "(", "style", ")", "if", "validator", "is", "None", ":", "return", "None", "if", "isinstance", "(", "value", ",", "(", "np", ".", "ndarray", ",", "list", ")", ")", "and", "vectorized", ":", "return", "all", "(", "validator", "(", "v", ")", "for", "v", "in", "value", ")", "try", ":", "valid", "=", "validator", "(", "value", ")", "return", "False", "if", "valid", "==", "False", "else", "True", "except", ":", "return", "False" ]
26.535714
18.678571
def galcenrect_to_XYZ_jac(*args,**kwargs): """ NAME: galcenrect_to_XYZ_jac PURPOSE: calculate the Jacobian of the Galactocentric rectangular to Galactic coordinates INPUT: X,Y,Z- Galactocentric rectangular coordinates vX, vY, vZ- Galactocentric rectangular velocities if 6 inputs: X,Y,Z,vX,vY,vZ if 3: X,Y,Z Xsun - cylindrical distance to the GC Zsun - Sun's height above the midplane OUTPUT: jacobian d(galcen.)/d(Galactic) HISTORY: 2013-12-09 - Written - Bovy (IAS) """ Xsun= kwargs.get('Xsun',1.) dgc= nu.sqrt(Xsun**2.+kwargs.get('Zsun',0.)**2.) costheta, sintheta= Xsun/dgc, kwargs.get('Zsun',0.)/dgc out= sc.zeros((6,6)) out[0,0]= -costheta out[0,2]= -sintheta out[1,1]= 1. out[2,0]= -nu.sign(Xsun)*sintheta out[2,2]= nu.sign(Xsun)*costheta if len(args) == 3: return out[:3,:3] out[3,3]= -costheta out[3,5]= -sintheta out[4,4]= 1. out[5,3]= -nu.sign(Xsun)*sintheta out[5,5]= nu.sign(Xsun)*costheta return out
[ "def", "galcenrect_to_XYZ_jac", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "Xsun", "=", "kwargs", ".", "get", "(", "'Xsun'", ",", "1.", ")", "dgc", "=", "nu", ".", "sqrt", "(", "Xsun", "**", "2.", "+", "kwargs", ".", "get", "(", "'Zsun'", ",", "0.", ")", "**", "2.", ")", "costheta", ",", "sintheta", "=", "Xsun", "/", "dgc", ",", "kwargs", ".", "get", "(", "'Zsun'", ",", "0.", ")", "/", "dgc", "out", "=", "sc", ".", "zeros", "(", "(", "6", ",", "6", ")", ")", "out", "[", "0", ",", "0", "]", "=", "-", "costheta", "out", "[", "0", ",", "2", "]", "=", "-", "sintheta", "out", "[", "1", ",", "1", "]", "=", "1.", "out", "[", "2", ",", "0", "]", "=", "-", "nu", ".", "sign", "(", "Xsun", ")", "*", "sintheta", "out", "[", "2", ",", "2", "]", "=", "nu", ".", "sign", "(", "Xsun", ")", "*", "costheta", "if", "len", "(", "args", ")", "==", "3", ":", "return", "out", "[", ":", "3", ",", ":", "3", "]", "out", "[", "3", ",", "3", "]", "=", "-", "costheta", "out", "[", "3", ",", "5", "]", "=", "-", "sintheta", "out", "[", "4", ",", "4", "]", "=", "1.", "out", "[", "5", ",", "3", "]", "=", "-", "nu", ".", "sign", "(", "Xsun", ")", "*", "sintheta", "out", "[", "5", ",", "5", "]", "=", "nu", ".", "sign", "(", "Xsun", ")", "*", "costheta", "return", "out" ]
21.854167
21.645833
def check_compressed_file_type(filepath): """Check if filename is a compressed file supported by the tool. This function uses magic numbers (first four bytes) to determine the type of the file. Supported types are 'gz' and 'bz2'. When the filetype is not supported, the function returns `None`. :param filepath: path to the file :returns: 'gz' or 'bz2'; `None` if the type is not supported """ def compressed_file_type(content): magic_dict = { b'\x1f\x8b\x08': 'gz', b'\x42\x5a\x68': 'bz2', b'PK\x03\x04': 'zip' } for magic, filetype in magic_dict.items(): if content.startswith(magic): return filetype return None with open(filepath, mode='rb') as f: magic_number = f.read(4) return compressed_file_type(magic_number)
[ "def", "check_compressed_file_type", "(", "filepath", ")", ":", "def", "compressed_file_type", "(", "content", ")", ":", "magic_dict", "=", "{", "b'\\x1f\\x8b\\x08'", ":", "'gz'", ",", "b'\\x42\\x5a\\x68'", ":", "'bz2'", ",", "b'PK\\x03\\x04'", ":", "'zip'", "}", "for", "magic", ",", "filetype", "in", "magic_dict", ".", "items", "(", ")", ":", "if", "content", ".", "startswith", "(", "magic", ")", ":", "return", "filetype", "return", "None", "with", "open", "(", "filepath", ",", "mode", "=", "'rb'", ")", "as", "f", ":", "magic_number", "=", "f", ".", "read", "(", "4", ")", "return", "compressed_file_type", "(", "magic_number", ")" ]
31.185185
17.37037
def _jobs(): ''' Return the currently configured jobs. ''' response = salt.utils.http.query( "{0}/scheduler/jobs".format(_base_url()), decode_type='json', decode=True, ) jobs = {} for job in response['dict']: jobs[job.pop('name')] = job return jobs
[ "def", "_jobs", "(", ")", ":", "response", "=", "salt", ".", "utils", ".", "http", ".", "query", "(", "\"{0}/scheduler/jobs\"", ".", "format", "(", "_base_url", "(", ")", ")", ",", "decode_type", "=", "'json'", ",", "decode", "=", "True", ",", ")", "jobs", "=", "{", "}", "for", "job", "in", "response", "[", "'dict'", "]", ":", "jobs", "[", "job", ".", "pop", "(", "'name'", ")", "]", "=", "job", "return", "jobs" ]
23.076923
18.461538
def percentage(self, percentage): """ Sets the percentage of this OrderLineItemTax. The percentage of the tax, as a string representation of a decimal number. A value of `7.25` corresponds to a percentage of 7.25%. :param percentage: The percentage of this OrderLineItemTax. :type: str """ if percentage is None: raise ValueError("Invalid value for `percentage`, must not be `None`") if len(percentage) > 10: raise ValueError("Invalid value for `percentage`, length must be less than `10`") self._percentage = percentage
[ "def", "percentage", "(", "self", ",", "percentage", ")", ":", "if", "percentage", "is", "None", ":", "raise", "ValueError", "(", "\"Invalid value for `percentage`, must not be `None`\"", ")", "if", "len", "(", "percentage", ")", ">", "10", ":", "raise", "ValueError", "(", "\"Invalid value for `percentage`, length must be less than `10`\"", ")", "self", ".", "_percentage", "=", "percentage" ]
40.4
26.933333
def load_config_file(self): """Parse configuration file and get config values.""" config_parser = SafeConfigParser() config_parser.read(self.CONFIG_FILE) if config_parser.has_section('handlers'): self._config['handlers_package'] = config_parser.get('handlers', 'package') if config_parser.has_section('auth'): self._config['consumer_key'] = config_parser.get('auth', 'consumer_key') self._config['consumer_secret'] = config_parser.get('auth', 'consumer_secret') self._config['token_key'] = config_parser.get('auth', 'token_key') self._config['token_secret'] = config_parser.get('auth', 'token_secret') if config_parser.has_section('stream'): self._config['user_stream'] = config_parser.get('stream', 'user_stream').lower() == 'true' else: self._config['user_stream'] = False if config_parser.has_option('general', 'min_seconds_between_errors'): self._config['min_seconds_between_errors'] = config_parser.get('general', 'min_seconds_between_errors') if config_parser.has_option('general', 'sleep_seconds_on_consecutive_errors'): self._config['sleep_seconds_on_consecutive_errors'] = config_parser.get( 'general', 'sleep_seconds_on_consecutive_errors')
[ "def", "load_config_file", "(", "self", ")", ":", "config_parser", "=", "SafeConfigParser", "(", ")", "config_parser", ".", "read", "(", "self", ".", "CONFIG_FILE", ")", "if", "config_parser", ".", "has_section", "(", "'handlers'", ")", ":", "self", ".", "_config", "[", "'handlers_package'", "]", "=", "config_parser", ".", "get", "(", "'handlers'", ",", "'package'", ")", "if", "config_parser", ".", "has_section", "(", "'auth'", ")", ":", "self", ".", "_config", "[", "'consumer_key'", "]", "=", "config_parser", ".", "get", "(", "'auth'", ",", "'consumer_key'", ")", "self", ".", "_config", "[", "'consumer_secret'", "]", "=", "config_parser", ".", "get", "(", "'auth'", ",", "'consumer_secret'", ")", "self", ".", "_config", "[", "'token_key'", "]", "=", "config_parser", ".", "get", "(", "'auth'", ",", "'token_key'", ")", "self", ".", "_config", "[", "'token_secret'", "]", "=", "config_parser", ".", "get", "(", "'auth'", ",", "'token_secret'", ")", "if", "config_parser", ".", "has_section", "(", "'stream'", ")", ":", "self", ".", "_config", "[", "'user_stream'", "]", "=", "config_parser", ".", "get", "(", "'stream'", ",", "'user_stream'", ")", ".", "lower", "(", ")", "==", "'true'", "else", ":", "self", ".", "_config", "[", "'user_stream'", "]", "=", "False", "if", "config_parser", ".", "has_option", "(", "'general'", ",", "'min_seconds_between_errors'", ")", ":", "self", ".", "_config", "[", "'min_seconds_between_errors'", "]", "=", "config_parser", ".", "get", "(", "'general'", ",", "'min_seconds_between_errors'", ")", "if", "config_parser", ".", "has_option", "(", "'general'", ",", "'sleep_seconds_on_consecutive_errors'", ")", ":", "self", ".", "_config", "[", "'sleep_seconds_on_consecutive_errors'", "]", "=", "config_parser", ".", "get", "(", "'general'", ",", "'sleep_seconds_on_consecutive_errors'", ")" ]
53.08
31.44
def contains_array(store, path=None): """Return True if the store contains an array at the given logical path.""" path = normalize_storage_path(path) prefix = _path_to_prefix(path) key = prefix + array_meta_key return key in store
[ "def", "contains_array", "(", "store", ",", "path", "=", "None", ")", ":", "path", "=", "normalize_storage_path", "(", "path", ")", "prefix", "=", "_path_to_prefix", "(", "path", ")", "key", "=", "prefix", "+", "array_meta_key", "return", "key", "in", "store" ]
40.833333
5.666667
def plot_residual(self, x, y1, y2, label1='Raw data', label2='Fit/theory', xlabel=None, ylabel=None, show_legend=True, **kws): """plot after clearing current plot """ panel = self.get_panel('top') panel.plot(x, y1, label=label1, **kws) panel = self.get_panel('top') panel.oplot(x, y2, label=label2, ylabel=ylabel, show_legend=show_legend, **kws) panel = self.get_panel('bottom') panel.plot(x, (y2-y1), ylabel='residual', show_legend=False, **kws) if xlabel is not None: self.xlabel = xlabel if self.xlabel is not None: self.panel_bot.set_xlabel(self.xlabel)
[ "def", "plot_residual", "(", "self", ",", "x", ",", "y1", ",", "y2", ",", "label1", "=", "'Raw data'", ",", "label2", "=", "'Fit/theory'", ",", "xlabel", "=", "None", ",", "ylabel", "=", "None", ",", "show_legend", "=", "True", ",", "*", "*", "kws", ")", ":", "panel", "=", "self", ".", "get_panel", "(", "'top'", ")", "panel", ".", "plot", "(", "x", ",", "y1", ",", "label", "=", "label1", ",", "*", "*", "kws", ")", "panel", "=", "self", ".", "get_panel", "(", "'top'", ")", "panel", ".", "oplot", "(", "x", ",", "y2", ",", "label", "=", "label2", ",", "ylabel", "=", "ylabel", ",", "show_legend", "=", "show_legend", ",", "*", "*", "kws", ")", "panel", "=", "self", ".", "get_panel", "(", "'bottom'", ")", "panel", ".", "plot", "(", "x", ",", "(", "y2", "-", "y1", ")", ",", "ylabel", "=", "'residual'", ",", "show_legend", "=", "False", ",", "*", "*", "kws", ")", "if", "xlabel", "is", "not", "None", ":", "self", ".", "xlabel", "=", "xlabel", "if", "self", ".", "xlabel", "is", "not", "None", ":", "self", ".", "panel_bot", ".", "set_xlabel", "(", "self", ".", "xlabel", ")" ]
42.1875
16.875
def extract_cookiejar(self): """ Extract cookies that pycurl instance knows. Returns `CookieJar` object. """ # Example of line: # www.google.com\tFALSE\t/accounts/\tFALSE\t0' # \tGoogleAccountsLocale_session\ten # Fields: # * domain # * whether or not all machines under that domain can # read the cookie's information. # * path # * Secure Flag: whether or not a secure connection (HTTPS) # is required to read the cookie. # * exp. timestamp # * name # * value cookiejar = CookieJar() for line in self.curl.getinfo(pycurl.INFO_COOKIELIST): values = line.split('\t') domain = values[0].lower() if domain.startswith('#httponly_'): domain = domain.replace('#httponly_', '') httponly = True else: httponly = False # old # cookies[values[-2]] = values[-1] # new cookie = create_cookie( name=values[5], value=values[6], domain=domain, path=values[2], secure=values[3] == "TRUE", expires=int(values[4]) if values[4] else None, httponly=httponly, ) cookiejar.set_cookie(cookie) return cookiejar
[ "def", "extract_cookiejar", "(", "self", ")", ":", "# Example of line:", "# www.google.com\\tFALSE\\t/accounts/\\tFALSE\\t0'", "# \\tGoogleAccountsLocale_session\\ten", "# Fields:", "# * domain", "# * whether or not all machines under that domain can", "# read the cookie's information.", "# * path", "# * Secure Flag: whether or not a secure connection (HTTPS)", "# is required to read the cookie.", "# * exp. timestamp", "# * name", "# * value", "cookiejar", "=", "CookieJar", "(", ")", "for", "line", "in", "self", ".", "curl", ".", "getinfo", "(", "pycurl", ".", "INFO_COOKIELIST", ")", ":", "values", "=", "line", ".", "split", "(", "'\\t'", ")", "domain", "=", "values", "[", "0", "]", ".", "lower", "(", ")", "if", "domain", ".", "startswith", "(", "'#httponly_'", ")", ":", "domain", "=", "domain", ".", "replace", "(", "'#httponly_'", ",", "''", ")", "httponly", "=", "True", "else", ":", "httponly", "=", "False", "# old", "# cookies[values[-2]] = values[-1]", "# new", "cookie", "=", "create_cookie", "(", "name", "=", "values", "[", "5", "]", ",", "value", "=", "values", "[", "6", "]", ",", "domain", "=", "domain", ",", "path", "=", "values", "[", "2", "]", ",", "secure", "=", "values", "[", "3", "]", "==", "\"TRUE\"", ",", "expires", "=", "int", "(", "values", "[", "4", "]", ")", "if", "values", "[", "4", "]", "else", "None", ",", "httponly", "=", "httponly", ",", ")", "cookiejar", ".", "set_cookie", "(", "cookie", ")", "return", "cookiejar" ]
32.232558
13.627907
def depth_first_iter(self, self_first=True): """ Iterate over nodes below this node, optionally yielding children before self. """ if self_first: yield self for child in list(self.children): for i in child.depth_first_iter(self_first): yield i if not self_first: yield self
[ "def", "depth_first_iter", "(", "self", ",", "self_first", "=", "True", ")", ":", "if", "self_first", ":", "yield", "self", "for", "child", "in", "list", "(", "self", ".", "children", ")", ":", "for", "i", "in", "child", ".", "depth_first_iter", "(", "self_first", ")", ":", "yield", "i", "if", "not", "self_first", ":", "yield", "self" ]
30.833333
14.333333
def _alter_umask(self): """Temporarily alter umask to custom setting, if applicable""" if self.umask is None: yield # nothing to do else: prev_umask = os.umask(self.umask) try: yield finally: os.umask(prev_umask)
[ "def", "_alter_umask", "(", "self", ")", ":", "if", "self", ".", "umask", "is", "None", ":", "yield", "# nothing to do", "else", ":", "prev_umask", "=", "os", ".", "umask", "(", "self", ".", "umask", ")", "try", ":", "yield", "finally", ":", "os", ".", "umask", "(", "prev_umask", ")" ]
30.8
13.2
def _build_verb_statement_mapping(): """Build the mapping between ISI verb strings and INDRA statement classes. Looks up the INDRA statement class name, if any, in a resource file, and resolves this class name to a class. Returns ------- verb_to_statement_type : dict Dictionary mapping verb name to an INDRA statment class """ path_this = os.path.dirname(os.path.abspath(__file__)) map_path = os.path.join(path_this, 'isi_verb_to_indra_statement_type.tsv') with open(map_path, 'r') as f: first_line = True verb_to_statement_type = {} for line in f: if not first_line: line = line[:-1] tokens = line.split('\t') if len(tokens) == 2 and len(tokens[1]) > 0: verb = tokens[0] s_type = tokens[1] try: statement_class = getattr(ist, s_type) verb_to_statement_type[verb] = statement_class except Exception: pass else: first_line = False return verb_to_statement_type
[ "def", "_build_verb_statement_mapping", "(", ")", ":", "path_this", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "abspath", "(", "__file__", ")", ")", "map_path", "=", "os", ".", "path", ".", "join", "(", "path_this", ",", "'isi_verb_to_indra_statement_type.tsv'", ")", "with", "open", "(", "map_path", ",", "'r'", ")", "as", "f", ":", "first_line", "=", "True", "verb_to_statement_type", "=", "{", "}", "for", "line", "in", "f", ":", "if", "not", "first_line", ":", "line", "=", "line", "[", ":", "-", "1", "]", "tokens", "=", "line", ".", "split", "(", "'\\t'", ")", "if", "len", "(", "tokens", ")", "==", "2", "and", "len", "(", "tokens", "[", "1", "]", ")", ">", "0", ":", "verb", "=", "tokens", "[", "0", "]", "s_type", "=", "tokens", "[", "1", "]", "try", ":", "statement_class", "=", "getattr", "(", "ist", ",", "s_type", ")", "verb_to_statement_type", "[", "verb", "]", "=", "statement_class", "except", "Exception", ":", "pass", "else", ":", "first_line", "=", "False", "return", "verb_to_statement_type" ]
35.875
15.96875
def create_option( self, name, value, label, selected, index, subindex=None, attrs=None): """Patch to use nicer ids.""" index = str(index) if subindex is None else "%s%s%s" % ( index, self.id_separator, subindex) if attrs is None: attrs = {} option_attrs = self.build_attrs( self.attrs, attrs) if self.option_inherits_attrs else {} if selected: option_attrs.update(self.checked_attribute) if 'id' in option_attrs: if self.use_nice_ids: option_attrs['id'] = "%s%s%s" % ( option_attrs['id'], self.id_separator, slugify(label.lower()) ) else: option_attrs['id'] = self.id_for_label( option_attrs['id'], index) return { 'name': name, 'value': value, 'label': label, 'selected': selected, 'index': index, 'attrs': option_attrs, 'type': self.input_type, 'template_name': self.option_template_name, 'wrap_label': True, }
[ "def", "create_option", "(", "self", ",", "name", ",", "value", ",", "label", ",", "selected", ",", "index", ",", "subindex", "=", "None", ",", "attrs", "=", "None", ")", ":", "index", "=", "str", "(", "index", ")", "if", "subindex", "is", "None", "else", "\"%s%s%s\"", "%", "(", "index", ",", "self", ".", "id_separator", ",", "subindex", ")", "if", "attrs", "is", "None", ":", "attrs", "=", "{", "}", "option_attrs", "=", "self", ".", "build_attrs", "(", "self", ".", "attrs", ",", "attrs", ")", "if", "self", ".", "option_inherits_attrs", "else", "{", "}", "if", "selected", ":", "option_attrs", ".", "update", "(", "self", ".", "checked_attribute", ")", "if", "'id'", "in", "option_attrs", ":", "if", "self", ".", "use_nice_ids", ":", "option_attrs", "[", "'id'", "]", "=", "\"%s%s%s\"", "%", "(", "option_attrs", "[", "'id'", "]", ",", "self", ".", "id_separator", ",", "slugify", "(", "label", ".", "lower", "(", ")", ")", ")", "else", ":", "option_attrs", "[", "'id'", "]", "=", "self", ".", "id_for_label", "(", "option_attrs", "[", "'id'", "]", ",", "index", ")", "return", "{", "'name'", ":", "name", ",", "'value'", ":", "value", ",", "'label'", ":", "label", ",", "'selected'", ":", "selected", ",", "'index'", ":", "index", ",", "'attrs'", ":", "option_attrs", ",", "'type'", ":", "self", ".", "input_type", ",", "'template_name'", ":", "self", ".", "option_template_name", ",", "'wrap_label'", ":", "True", ",", "}" ]
35.909091
12.181818
def get_data(filename, subset, url): """Get a dataset with from a url with local caching. Parameters ---------- filename : str Name of the file, for caching. subset : str To what subset the file belongs (e.g. 'ray_transform'). Each subset is saved in a separate subfolder. url : str url to the dataset online. Returns ------- dataset : dict Dictionary containing the dataset. """ # check if this data set has been already downloaded data_dir = join(get_data_dir(), subset) if not exists(data_dir): os.makedirs(data_dir) filename = join(data_dir, filename) # if the file does not exist, download it if not exists(filename): print('data {}/{} not in local storage, downloading from {}' ''.format(subset, filename, url)) # open the url of the data with contextlib.closing(urlopen(url)) as data_url: # store downloaded file locally with open(filename, 'w+b') as storage_file: copyfileobj(data_url, storage_file) # load dataset file with open(filename, 'rb') as storage_file: data_dict = io.loadmat(storage_file) return data_dict
[ "def", "get_data", "(", "filename", ",", "subset", ",", "url", ")", ":", "# check if this data set has been already downloaded", "data_dir", "=", "join", "(", "get_data_dir", "(", ")", ",", "subset", ")", "if", "not", "exists", "(", "data_dir", ")", ":", "os", ".", "makedirs", "(", "data_dir", ")", "filename", "=", "join", "(", "data_dir", ",", "filename", ")", "# if the file does not exist, download it", "if", "not", "exists", "(", "filename", ")", ":", "print", "(", "'data {}/{} not in local storage, downloading from {}'", "''", ".", "format", "(", "subset", ",", "filename", ",", "url", ")", ")", "# open the url of the data", "with", "contextlib", ".", "closing", "(", "urlopen", "(", "url", ")", ")", "as", "data_url", ":", "# store downloaded file locally", "with", "open", "(", "filename", ",", "'w+b'", ")", "as", "storage_file", ":", "copyfileobj", "(", "data_url", ",", "storage_file", ")", "# load dataset file", "with", "open", "(", "filename", ",", "'rb'", ")", "as", "storage_file", ":", "data_dict", "=", "io", ".", "loadmat", "(", "storage_file", ")", "return", "data_dict" ]
29.292683
17.804878
def get_data_xls(file_name, file_contents=None, on_demand=False): ''' Loads the old excel format files. New format files will automatically get loaded as well. Args: file_name: The name of the local file, or the holder for the extension type when the file_contents are supplied. file_contents: The file-like object holding contents of file_name. If left as None, then file_name is directly loaded. on_demand: Requests that a yielder be used in place of a full data copy. ''' def tuple_to_iso_date(tuple_date): ''' Turns a gregorian (year, month, day, hour, minute, nearest_second) into a standard YYYY-MM-DDTHH:MM:SS ISO date. If the date part is all zeros, it's assumed to be a time; if the time part is all zeros it's assumed to be a date; if all of it is zeros it's taken to be a time, specifically 00:00:00 (midnight). Note that datetimes of midnight will come back as date-only strings. A date of month=0 and day=0 is meaningless, so that part of the coercion is safe. For more on the hairy nature of Excel date/times see http://www.lexicon.net/sjmachin/xlrd.html ''' (y,m,d, hh,mm,ss) = tuple_date non_zero = lambda n: n!=0 date = "%04d-%02d-%02d" % (y,m,d) if list(filter(non_zero, (y,m,d))) else '' time = "T%02d:%02d:%02d" % (hh,mm,ss) if list(filter(non_zero, (hh,mm,ss))) or not date else '' return date+time def format_excel_val(book, val_type, value, want_tuple_date): '''Cleans up the incoming excel data''' # Data val_type Codes: # EMPTY 0 # TEXT 1 a Unicode string # NUMBER 2 float # DATE 3 float # BOOLEAN 4 int; 1 means TRUE, 0 means FALSE # ERROR 5 if val_type == 2: # TEXT if value == int(value): value = int(value) elif val_type == 3: # NUMBER datetuple = xlrd.xldate_as_tuple(value, book.datemode) value = datetuple if want_tuple_date else tuple_to_iso_date(datetuple) elif val_type == 5: # ERROR value = xlrd.error_text_from_code[value] return value def xlrd_xsl_to_array(file_name, file_contents=None): ''' Returns: A list of 2-D tables holding the converted cells of each sheet ''' book = xlrd.open_workbook(file_name, file_contents=file_contents, on_demand=on_demand) formatter = lambda t_v: format_excel_val(book, t_v[0], t_v[1], False) row_builder = lambda s, r: list(map(formatter, zip(s.row_types(r), s.row_values(r)))) data = [SheetYielder(book, index, row_builder) for index in range(book.nsheets)] if not on_demand: for sheet in data: sheet.load() book.release_resources() return data return xlrd_xsl_to_array(file_name, file_contents)
[ "def", "get_data_xls", "(", "file_name", ",", "file_contents", "=", "None", ",", "on_demand", "=", "False", ")", ":", "def", "tuple_to_iso_date", "(", "tuple_date", ")", ":", "'''\n Turns a gregorian (year, month, day, hour, minute, nearest_second) into a\n standard YYYY-MM-DDTHH:MM:SS ISO date. If the date part is all zeros, it's\n assumed to be a time; if the time part is all zeros it's assumed to be a date;\n if all of it is zeros it's taken to be a time, specifically 00:00:00 (midnight).\n\n Note that datetimes of midnight will come back as date-only strings. A date\n of month=0 and day=0 is meaningless, so that part of the coercion is safe.\n For more on the hairy nature of Excel date/times see\n http://www.lexicon.net/sjmachin/xlrd.html\n '''", "(", "y", ",", "m", ",", "d", ",", "hh", ",", "mm", ",", "ss", ")", "=", "tuple_date", "non_zero", "=", "lambda", "n", ":", "n", "!=", "0", "date", "=", "\"%04d-%02d-%02d\"", "%", "(", "y", ",", "m", ",", "d", ")", "if", "list", "(", "filter", "(", "non_zero", ",", "(", "y", ",", "m", ",", "d", ")", ")", ")", "else", "''", "time", "=", "\"T%02d:%02d:%02d\"", "%", "(", "hh", ",", "mm", ",", "ss", ")", "if", "list", "(", "filter", "(", "non_zero", ",", "(", "hh", ",", "mm", ",", "ss", ")", ")", ")", "or", "not", "date", "else", "''", "return", "date", "+", "time", "def", "format_excel_val", "(", "book", ",", "val_type", ",", "value", ",", "want_tuple_date", ")", ":", "'''Cleans up the incoming excel data'''", "# Data val_type Codes:", "# EMPTY 0", "# TEXT 1 a Unicode string", "# NUMBER 2 float", "# DATE 3 float", "# BOOLEAN 4 int; 1 means TRUE, 0 means FALSE", "# ERROR 5", "if", "val_type", "==", "2", ":", "# TEXT", "if", "value", "==", "int", "(", "value", ")", ":", "value", "=", "int", "(", "value", ")", "elif", "val_type", "==", "3", ":", "# NUMBER", "datetuple", "=", "xlrd", ".", "xldate_as_tuple", "(", "value", ",", "book", ".", "datemode", ")", "value", "=", "datetuple", "if", "want_tuple_date", "else", "tuple_to_iso_date", "(", "datetuple", ")", "elif", "val_type", "==", "5", ":", "# ERROR", "value", "=", "xlrd", ".", "error_text_from_code", "[", "value", "]", "return", "value", "def", "xlrd_xsl_to_array", "(", "file_name", ",", "file_contents", "=", "None", ")", ":", "'''\n Returns:\n A list of 2-D tables holding the converted cells of each sheet\n '''", "book", "=", "xlrd", ".", "open_workbook", "(", "file_name", ",", "file_contents", "=", "file_contents", ",", "on_demand", "=", "on_demand", ")", "formatter", "=", "lambda", "t_v", ":", "format_excel_val", "(", "book", ",", "t_v", "[", "0", "]", ",", "t_v", "[", "1", "]", ",", "False", ")", "row_builder", "=", "lambda", "s", ",", "r", ":", "list", "(", "map", "(", "formatter", ",", "zip", "(", "s", ".", "row_types", "(", "r", ")", ",", "s", ".", "row_values", "(", "r", ")", ")", ")", ")", "data", "=", "[", "SheetYielder", "(", "book", ",", "index", ",", "row_builder", ")", "for", "index", "in", "range", "(", "book", ".", "nsheets", ")", "]", "if", "not", "on_demand", ":", "for", "sheet", "in", "data", ":", "sheet", ".", "load", "(", ")", "book", ".", "release_resources", "(", ")", "return", "data", "return", "xlrd_xsl_to_array", "(", "file_name", ",", "file_contents", ")" ]
44.439394
26.287879
def discovery(self, discovery_address: Address) -> Discovery: """ Return a proxy to interact with the discovery. """ if not is_binary_address(discovery_address): raise ValueError('discovery_address must be a valid address') with self._discovery_creation_lock: if discovery_address not in self.address_to_discovery: self.address_to_discovery[discovery_address] = Discovery( jsonrpc_client=self.client, discovery_address=discovery_address, contract_manager=self.contract_manager, ) return self.address_to_discovery[discovery_address]
[ "def", "discovery", "(", "self", ",", "discovery_address", ":", "Address", ")", "->", "Discovery", ":", "if", "not", "is_binary_address", "(", "discovery_address", ")", ":", "raise", "ValueError", "(", "'discovery_address must be a valid address'", ")", "with", "self", ".", "_discovery_creation_lock", ":", "if", "discovery_address", "not", "in", "self", ".", "address_to_discovery", ":", "self", ".", "address_to_discovery", "[", "discovery_address", "]", "=", "Discovery", "(", "jsonrpc_client", "=", "self", ".", "client", ",", "discovery_address", "=", "discovery_address", ",", "contract_manager", "=", "self", ".", "contract_manager", ",", ")", "return", "self", ".", "address_to_discovery", "[", "discovery_address", "]" ]
47.714286
20.857143
def _add_q(self, q_object): """Add a Q-object to the current filter.""" self._criteria = self._criteria._combine(q_object, q_object.connector)
[ "def", "_add_q", "(", "self", ",", "q_object", ")", ":", "self", ".", "_criteria", "=", "self", ".", "_criteria", ".", "_combine", "(", "q_object", ",", "q_object", ".", "connector", ")" ]
52
17
def var(self): """ Variance value as a result of an uncertainty calculation """ mn = self.mean vr = np.mean((self._mcpts - mn) ** 2) return vr
[ "def", "var", "(", "self", ")", ":", "mn", "=", "self", ".", "mean", "vr", "=", "np", ".", "mean", "(", "(", "self", ".", "_mcpts", "-", "mn", ")", "**", "2", ")", "return", "vr" ]
26.285714
13.714286
def rnn(bptt, vocab_size, num_embed, nhid, num_layers, dropout, num_proj, batch_size): """ word embedding + LSTM Projected """ state_names = [] data = S.var('data') weight = S.var("encoder_weight", stype='row_sparse') embed = S.sparse.Embedding(data=data, weight=weight, input_dim=vocab_size, output_dim=num_embed, name='embed', sparse_grad=True) states = [] outputs = S.Dropout(embed, p=dropout) for i in range(num_layers): prefix = 'lstmp%d_' % i init_h = S.var(prefix + 'init_h', shape=(batch_size, num_proj), init=mx.init.Zero()) init_c = S.var(prefix + 'init_c', shape=(batch_size, nhid), init=mx.init.Zero()) state_names += [prefix + 'init_h', prefix + 'init_c'] lstmp = mx.gluon.contrib.rnn.LSTMPCell(nhid, num_proj, prefix=prefix) outputs, next_states = lstmp.unroll(bptt, outputs, begin_state=[init_h, init_c], \ layout='NTC', merge_outputs=True) outputs = S.Dropout(outputs, p=dropout) states += [S.stop_gradient(s) for s in next_states] outputs = S.reshape(outputs, shape=(-1, num_proj)) trainable_lstm_args = [] for arg in outputs.list_arguments(): if 'lstmp' in arg and 'init' not in arg: trainable_lstm_args.append(arg) return outputs, states, trainable_lstm_args, state_names
[ "def", "rnn", "(", "bptt", ",", "vocab_size", ",", "num_embed", ",", "nhid", ",", "num_layers", ",", "dropout", ",", "num_proj", ",", "batch_size", ")", ":", "state_names", "=", "[", "]", "data", "=", "S", ".", "var", "(", "'data'", ")", "weight", "=", "S", ".", "var", "(", "\"encoder_weight\"", ",", "stype", "=", "'row_sparse'", ")", "embed", "=", "S", ".", "sparse", ".", "Embedding", "(", "data", "=", "data", ",", "weight", "=", "weight", ",", "input_dim", "=", "vocab_size", ",", "output_dim", "=", "num_embed", ",", "name", "=", "'embed'", ",", "sparse_grad", "=", "True", ")", "states", "=", "[", "]", "outputs", "=", "S", ".", "Dropout", "(", "embed", ",", "p", "=", "dropout", ")", "for", "i", "in", "range", "(", "num_layers", ")", ":", "prefix", "=", "'lstmp%d_'", "%", "i", "init_h", "=", "S", ".", "var", "(", "prefix", "+", "'init_h'", ",", "shape", "=", "(", "batch_size", ",", "num_proj", ")", ",", "init", "=", "mx", ".", "init", ".", "Zero", "(", ")", ")", "init_c", "=", "S", ".", "var", "(", "prefix", "+", "'init_c'", ",", "shape", "=", "(", "batch_size", ",", "nhid", ")", ",", "init", "=", "mx", ".", "init", ".", "Zero", "(", ")", ")", "state_names", "+=", "[", "prefix", "+", "'init_h'", ",", "prefix", "+", "'init_c'", "]", "lstmp", "=", "mx", ".", "gluon", ".", "contrib", ".", "rnn", ".", "LSTMPCell", "(", "nhid", ",", "num_proj", ",", "prefix", "=", "prefix", ")", "outputs", ",", "next_states", "=", "lstmp", ".", "unroll", "(", "bptt", ",", "outputs", ",", "begin_state", "=", "[", "init_h", ",", "init_c", "]", ",", "layout", "=", "'NTC'", ",", "merge_outputs", "=", "True", ")", "outputs", "=", "S", ".", "Dropout", "(", "outputs", ",", "p", "=", "dropout", ")", "states", "+=", "[", "S", ".", "stop_gradient", "(", "s", ")", "for", "s", "in", "next_states", "]", "outputs", "=", "S", ".", "reshape", "(", "outputs", ",", "shape", "=", "(", "-", "1", ",", "num_proj", ")", ")", "trainable_lstm_args", "=", "[", "]", "for", "arg", "in", "outputs", ".", "list_arguments", "(", ")", ":", "if", "'lstmp'", "in", "arg", "and", "'init'", "not", "in", "arg", ":", "trainable_lstm_args", ".", "append", "(", "arg", ")", "return", "outputs", ",", "states", ",", "trainable_lstm_args", ",", "state_names" ]
52.807692
22.769231
def _check_kafka_disconnect(self): """Checks the kafka connection is still valid""" for node_id in self.consumer._client._conns: conn = self.consumer._client._conns[node_id] if conn.state == ConnectionStates.DISCONNECTED or \ conn.state == ConnectionStates.DISCONNECTING: self._spawn_kafka_connection_thread() break
[ "def", "_check_kafka_disconnect", "(", "self", ")", ":", "for", "node_id", "in", "self", ".", "consumer", ".", "_client", ".", "_conns", ":", "conn", "=", "self", ".", "consumer", ".", "_client", ".", "_conns", "[", "node_id", "]", "if", "conn", ".", "state", "==", "ConnectionStates", ".", "DISCONNECTED", "or", "conn", ".", "state", "==", "ConnectionStates", ".", "DISCONNECTING", ":", "self", ".", "_spawn_kafka_connection_thread", "(", ")", "break" ]
50
14.25
def send_voice_message(self, user_id, media_id): """ 发送语音消息 详情请参考 http://mp.weixin.qq.com/wiki/7/12a5a320ae96fecdf0e15cb06123de9f.html :param user_id: 用户 ID, 就是你收到的 WechatMessage 的 source :param media_id: 发送的语音的媒体ID。 可以通过 :func:`upload_media` 上传。 :return: 返回的 JSON 数据包 """ return self.request.post( url='https://api.weixin.qq.com/cgi-bin/message/custom/send', data={ 'touser': user_id, 'msgtype': 'voice', 'voice': { 'media_id': media_id, }, } )
[ "def", "send_voice_message", "(", "self", ",", "user_id", ",", "media_id", ")", ":", "return", "self", ".", "request", ".", "post", "(", "url", "=", "'https://api.weixin.qq.com/cgi-bin/message/custom/send'", ",", "data", "=", "{", "'touser'", ":", "user_id", ",", "'msgtype'", ":", "'voice'", ",", "'voice'", ":", "{", "'media_id'", ":", "media_id", ",", "}", ",", "}", ")" ]
34.444444
16.666667
def _run(self, tree): """ Run a query from a parse tree """ if tree.throttle: limiter = self._parse_throttle(tree.table, tree.throttle) self._query_rate_limit = limiter del tree["throttle"] return self._run(tree) if tree.action == "SELECT": return self._select(tree, self.allow_select_scan) elif tree.action == "SCAN": return self._scan(tree) elif tree.action == "DELETE": return self._delete(tree) elif tree.action == "UPDATE": return self._update(tree) elif tree.action == "CREATE": return self._create(tree) elif tree.action == "INSERT": return self._insert(tree) elif tree.action == "DROP": return self._drop(tree) elif tree.action == "ALTER": return self._alter(tree) elif tree.action == "DUMP": return self._dump(tree) elif tree.action == "LOAD": return self._load(tree) elif tree.action == "EXPLAIN": return self._explain(tree) elif tree.action == "ANALYZE": self._analyzing = True self.connection.default_return_capacity = True return self._run(tree[1]) else: raise SyntaxError("Unrecognized action '%s'" % tree.action)
[ "def", "_run", "(", "self", ",", "tree", ")", ":", "if", "tree", ".", "throttle", ":", "limiter", "=", "self", ".", "_parse_throttle", "(", "tree", ".", "table", ",", "tree", ".", "throttle", ")", "self", ".", "_query_rate_limit", "=", "limiter", "del", "tree", "[", "\"throttle\"", "]", "return", "self", ".", "_run", "(", "tree", ")", "if", "tree", ".", "action", "==", "\"SELECT\"", ":", "return", "self", ".", "_select", "(", "tree", ",", "self", ".", "allow_select_scan", ")", "elif", "tree", ".", "action", "==", "\"SCAN\"", ":", "return", "self", ".", "_scan", "(", "tree", ")", "elif", "tree", ".", "action", "==", "\"DELETE\"", ":", "return", "self", ".", "_delete", "(", "tree", ")", "elif", "tree", ".", "action", "==", "\"UPDATE\"", ":", "return", "self", ".", "_update", "(", "tree", ")", "elif", "tree", ".", "action", "==", "\"CREATE\"", ":", "return", "self", ".", "_create", "(", "tree", ")", "elif", "tree", ".", "action", "==", "\"INSERT\"", ":", "return", "self", ".", "_insert", "(", "tree", ")", "elif", "tree", ".", "action", "==", "\"DROP\"", ":", "return", "self", ".", "_drop", "(", "tree", ")", "elif", "tree", ".", "action", "==", "\"ALTER\"", ":", "return", "self", ".", "_alter", "(", "tree", ")", "elif", "tree", ".", "action", "==", "\"DUMP\"", ":", "return", "self", ".", "_dump", "(", "tree", ")", "elif", "tree", ".", "action", "==", "\"LOAD\"", ":", "return", "self", ".", "_load", "(", "tree", ")", "elif", "tree", ".", "action", "==", "\"EXPLAIN\"", ":", "return", "self", ".", "_explain", "(", "tree", ")", "elif", "tree", ".", "action", "==", "\"ANALYZE\"", ":", "self", ".", "_analyzing", "=", "True", "self", ".", "connection", ".", "default_return_capacity", "=", "True", "return", "self", ".", "_run", "(", "tree", "[", "1", "]", ")", "else", ":", "raise", "SyntaxError", "(", "\"Unrecognized action '%s'\"", "%", "tree", ".", "action", ")" ]
38.314286
7.714286
def logger(self): """:class:`logging.Logger` of this plotter""" try: return self.data.psy.logger.getChild(self.__class__.__name__) except AttributeError: name = '%s.%s' % (self.__module__, self.__class__.__name__) return logging.getLogger(name)
[ "def", "logger", "(", "self", ")", ":", "try", ":", "return", "self", ".", "data", ".", "psy", ".", "logger", ".", "getChild", "(", "self", ".", "__class__", ".", "__name__", ")", "except", "AttributeError", ":", "name", "=", "'%s.%s'", "%", "(", "self", ".", "__module__", ",", "self", ".", "__class__", ".", "__name__", ")", "return", "logging", ".", "getLogger", "(", "name", ")" ]
42.571429
18.142857
def components(self, visible=True): """ Return the component notes of chord :param bool visible: returns the name of notes if True else list of int :rtype: list[(str or int)] :return: component notes of chord """ if self._on: self._quality.append_on_chord(self.on, self.root) return self._quality.get_components(root=self._root, visible=visible)
[ "def", "components", "(", "self", ",", "visible", "=", "True", ")", ":", "if", "self", ".", "_on", ":", "self", ".", "_quality", ".", "append_on_chord", "(", "self", ".", "on", ",", "self", ".", "root", ")", "return", "self", ".", "_quality", ".", "get_components", "(", "root", "=", "self", ".", "_root", ",", "visible", "=", "visible", ")" ]
36.818182
19
def AddHeader(self, header, value): '''Add a header to send. ''' self.user_headers.append((header, value)) return self
[ "def", "AddHeader", "(", "self", ",", "header", ",", "value", ")", ":", "self", ".", "user_headers", ".", "append", "(", "(", "header", ",", "value", ")", ")", "return", "self" ]
29.2
14.4
def bytes2NativeString(x, encoding='utf-8'): """ Convert C{bytes} to a native C{str}. On Python 3 and higher, str and bytes are not equivalent. In this case, decode the bytes, and return a native string. On Python 2 and lower, str and bytes are equivalent. In this case, just just return the native string. @param x: a string of type C{bytes} @param encoding: an optional codec, default: 'utf-8' @return: a string of type C{str} """ if isinstance(x, bytes) and str != bytes: return x.decode(encoding) return x
[ "def", "bytes2NativeString", "(", "x", ",", "encoding", "=", "'utf-8'", ")", ":", "if", "isinstance", "(", "x", ",", "bytes", ")", "and", "str", "!=", "bytes", ":", "return", "x", ".", "decode", "(", "encoding", ")", "return", "x" ]
29.473684
10.526316
def barh(*args, **kwargs): """ Creates a bar plot, with white outlines and a fill color that defaults to the first teal-ish green in ColorBrewer's Set2. Optionally accepts grid='y' or grid='x' to draw a white grid over the bars, to show the scale. Almost like "erasing" some of the plot, but it adds more information! Can also add an annotation of the width of the barplots directly onto the bars with the `annotate` parameter, which can either be True, which will annotate the values, or a list of strings, which will annotate with the supplied strings. Can support stacked bars with the value of each stack shown on the stack (Added by Salil Banerjee) @param ax: matplotlib.axes instance @param top: Vector of values of where to put the top side of the bar @param width: Vector of values of the bar widths @param ytickabels: Vector of labels of the bar widths @param kwargs: Any additional arguments to matplotlib.bar() """ ax, args, kwargs = maybe_get_ax(*args, **kwargs) kwargs.setdefault('color', set2[0]) kwargs.setdefault('edgecolor', 'white') middle = 0.4 if 'width' not in kwargs else kwargs['width']/2.0 # Check if data contains stacks stacked = kwargs.pop('stacked',False) # Check if stack text should be included stack_text = kwargs.pop('stack_text',False) # Get legend if available legend = kwargs.pop('legend',False) top = args[0] width = np.array(args[1]) # Label each individual bar, if xticklabels is provided ytickabels = kwargs.pop('yticklabels', None) # left+0.4 is the center of the bar yticks = np.array(top) + middle # Whether or not to annotate each bar with the width value annotate = kwargs.pop('annotate', False) # If no grid specified, don't draw one. grid = kwargs.pop('grid', None) cmap = kwargs.pop('cmap', False) if cmap: kwargs['edgecolor'] = almost_black if not stacked: kwargs['color'] = getcolors(cmap, width, 0) # Check if stacked and plot data accordingly if stacked: num_stacks, num_data = width.shape left = np.zeros(num_data) for i in np.arange(num_stacks): lst = list(args) lst[1] = width[i] args = tuple(lst) if cmap: kwargs['color'] = getcolors(cmap, width[i], i) else: kwargs['color'] = set2[i] kwargs['left'] = left rectangles = ax.barh(*args, **kwargs) left += width[i] else: rectangles = ax.barh(*args, **kwargs) # add legend if isinstance(legend, collections.Iterable): ax.legend(legend,loc='upper center',bbox_to_anchor=(0.5,1.11), ncol=5) # add whitespace padding on left ymin, ymax = ax.get_ylim() ymin -= 0.2 if stacked: ymax = num_data ax.set_ylim(ymin, ymax) # If there are negative counts, remove the bottom axes # and add a line at y=0 if any(w < 0 for w in width.tolist()): axes_to_remove = ['top', 'right', 'bottom'] ax.vlines(x=0, ymin=ymin, ymax=ymax, linewidths=0.75) #ax.hlines(y=0, xmin=xmin, xmax=xmax, # linewidths=0.75) else: axes_to_remove = ['top', 'right'] #Remove excess axes remove_chartjunk(ax, axes_to_remove, grid=grid) if stacked: data = width width = width.sum(axis=0) # Add the yticklabels if they are there if ytickabels is not None: ax.set_yticks(yticks) ax.set_yticklabels(ytickabels) if annotate or isinstance(annotate, collections.Iterable): annotate_xrange_factor = 0.050 xmin, xmax = ax.get_xlim() xrange = xmax - xmin # Reset ymax and ymin so there's enough room to see the annotation of # the top-most if xmax > 0: xmax += xrange * 0.1 if xmin < 0: xmin -= xrange * 0.1 ax.set_xlim(xmin, xmax) xrange = xmax - xmin offset_ = xrange * annotate_xrange_factor if isinstance(annotate, collections.Iterable): annotations = map(str, annotate) else: annotations = ['%.3f' % w if type(w) is np.float_ else str(w) for w in width] for y, w, annotation in zip(yticks, width, annotations): # Adjust the offset to account for negative bars offset = offset_ if w >= 0 else -1 * offset_ # Finally, add the text to the axes ax.annotate(annotation, (w + offset, y), verticalalignment='center', horizontalalignment='center', color=almost_black) # Text for each block of stack # This was partially inspired by the following article by Tableau software # http://www.tableausoftware.com/about/blog/2014/1/new-whitepaper-survey-data-less-ugly-more-understandable-27812 if stack_text: left = np.zeros(num_data) max_w = max(width) for i in np.arange(num_stacks): for y, d, l in zip(yticks, data[i], left): if (d*100.0/max_w) > 2.0: ax.text(l+d/2.0,y,d, ha='center', va='center', color=almost_black) left += data[i] return rectangles
[ "def", "barh", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "ax", ",", "args", ",", "kwargs", "=", "maybe_get_ax", "(", "*", "args", ",", "*", "*", "kwargs", ")", "kwargs", ".", "setdefault", "(", "'color'", ",", "set2", "[", "0", "]", ")", "kwargs", ".", "setdefault", "(", "'edgecolor'", ",", "'white'", ")", "middle", "=", "0.4", "if", "'width'", "not", "in", "kwargs", "else", "kwargs", "[", "'width'", "]", "/", "2.0", "# Check if data contains stacks", "stacked", "=", "kwargs", ".", "pop", "(", "'stacked'", ",", "False", ")", "# Check if stack text should be included", "stack_text", "=", "kwargs", ".", "pop", "(", "'stack_text'", ",", "False", ")", "# Get legend if available", "legend", "=", "kwargs", ".", "pop", "(", "'legend'", ",", "False", ")", "top", "=", "args", "[", "0", "]", "width", "=", "np", ".", "array", "(", "args", "[", "1", "]", ")", "# Label each individual bar, if xticklabels is provided", "ytickabels", "=", "kwargs", ".", "pop", "(", "'yticklabels'", ",", "None", ")", "# left+0.4 is the center of the bar", "yticks", "=", "np", ".", "array", "(", "top", ")", "+", "middle", "# Whether or not to annotate each bar with the width value", "annotate", "=", "kwargs", ".", "pop", "(", "'annotate'", ",", "False", ")", "# If no grid specified, don't draw one.", "grid", "=", "kwargs", ".", "pop", "(", "'grid'", ",", "None", ")", "cmap", "=", "kwargs", ".", "pop", "(", "'cmap'", ",", "False", ")", "if", "cmap", ":", "kwargs", "[", "'edgecolor'", "]", "=", "almost_black", "if", "not", "stacked", ":", "kwargs", "[", "'color'", "]", "=", "getcolors", "(", "cmap", ",", "width", ",", "0", ")", "# Check if stacked and plot data accordingly", "if", "stacked", ":", "num_stacks", ",", "num_data", "=", "width", ".", "shape", "left", "=", "np", ".", "zeros", "(", "num_data", ")", "for", "i", "in", "np", ".", "arange", "(", "num_stacks", ")", ":", "lst", "=", "list", "(", "args", ")", "lst", "[", "1", "]", "=", "width", "[", "i", "]", "args", "=", "tuple", "(", "lst", ")", "if", "cmap", ":", "kwargs", "[", "'color'", "]", "=", "getcolors", "(", "cmap", ",", "width", "[", "i", "]", ",", "i", ")", "else", ":", "kwargs", "[", "'color'", "]", "=", "set2", "[", "i", "]", "kwargs", "[", "'left'", "]", "=", "left", "rectangles", "=", "ax", ".", "barh", "(", "*", "args", ",", "*", "*", "kwargs", ")", "left", "+=", "width", "[", "i", "]", "else", ":", "rectangles", "=", "ax", ".", "barh", "(", "*", "args", ",", "*", "*", "kwargs", ")", "# add legend", "if", "isinstance", "(", "legend", ",", "collections", ".", "Iterable", ")", ":", "ax", ".", "legend", "(", "legend", ",", "loc", "=", "'upper center'", ",", "bbox_to_anchor", "=", "(", "0.5", ",", "1.11", ")", ",", "ncol", "=", "5", ")", "# add whitespace padding on left", "ymin", ",", "ymax", "=", "ax", ".", "get_ylim", "(", ")", "ymin", "-=", "0.2", "if", "stacked", ":", "ymax", "=", "num_data", "ax", ".", "set_ylim", "(", "ymin", ",", "ymax", ")", "# If there are negative counts, remove the bottom axes", "# and add a line at y=0", "if", "any", "(", "w", "<", "0", "for", "w", "in", "width", ".", "tolist", "(", ")", ")", ":", "axes_to_remove", "=", "[", "'top'", ",", "'right'", ",", "'bottom'", "]", "ax", ".", "vlines", "(", "x", "=", "0", ",", "ymin", "=", "ymin", ",", "ymax", "=", "ymax", ",", "linewidths", "=", "0.75", ")", "#ax.hlines(y=0, xmin=xmin, xmax=xmax,", "# linewidths=0.75)", "else", ":", "axes_to_remove", "=", "[", "'top'", ",", "'right'", "]", "#Remove excess axes", "remove_chartjunk", "(", "ax", ",", "axes_to_remove", ",", "grid", "=", "grid", ")", "if", "stacked", ":", "data", "=", "width", "width", "=", "width", ".", "sum", "(", "axis", "=", "0", ")", "# Add the yticklabels if they are there", "if", "ytickabels", "is", "not", "None", ":", "ax", ".", "set_yticks", "(", "yticks", ")", "ax", ".", "set_yticklabels", "(", "ytickabels", ")", "if", "annotate", "or", "isinstance", "(", "annotate", ",", "collections", ".", "Iterable", ")", ":", "annotate_xrange_factor", "=", "0.050", "xmin", ",", "xmax", "=", "ax", ".", "get_xlim", "(", ")", "xrange", "=", "xmax", "-", "xmin", "# Reset ymax and ymin so there's enough room to see the annotation of", "# the top-most", "if", "xmax", ">", "0", ":", "xmax", "+=", "xrange", "*", "0.1", "if", "xmin", "<", "0", ":", "xmin", "-=", "xrange", "*", "0.1", "ax", ".", "set_xlim", "(", "xmin", ",", "xmax", ")", "xrange", "=", "xmax", "-", "xmin", "offset_", "=", "xrange", "*", "annotate_xrange_factor", "if", "isinstance", "(", "annotate", ",", "collections", ".", "Iterable", ")", ":", "annotations", "=", "map", "(", "str", ",", "annotate", ")", "else", ":", "annotations", "=", "[", "'%.3f'", "%", "w", "if", "type", "(", "w", ")", "is", "np", ".", "float_", "else", "str", "(", "w", ")", "for", "w", "in", "width", "]", "for", "y", ",", "w", ",", "annotation", "in", "zip", "(", "yticks", ",", "width", ",", "annotations", ")", ":", "# Adjust the offset to account for negative bars", "offset", "=", "offset_", "if", "w", ">=", "0", "else", "-", "1", "*", "offset_", "# Finally, add the text to the axes", "ax", ".", "annotate", "(", "annotation", ",", "(", "w", "+", "offset", ",", "y", ")", ",", "verticalalignment", "=", "'center'", ",", "horizontalalignment", "=", "'center'", ",", "color", "=", "almost_black", ")", "# Text for each block of stack", "# This was partially inspired by the following article by Tableau software", "# http://www.tableausoftware.com/about/blog/2014/1/new-whitepaper-survey-data-less-ugly-more-understandable-27812", "if", "stack_text", ":", "left", "=", "np", ".", "zeros", "(", "num_data", ")", "max_w", "=", "max", "(", "width", ")", "for", "i", "in", "np", ".", "arange", "(", "num_stacks", ")", ":", "for", "y", ",", "d", ",", "l", "in", "zip", "(", "yticks", ",", "data", "[", "i", "]", ",", "left", ")", ":", "if", "(", "d", "*", "100.0", "/", "max_w", ")", ">", "2.0", ":", "ax", ".", "text", "(", "l", "+", "d", "/", "2.0", ",", "y", ",", "d", ",", "ha", "=", "'center'", ",", "va", "=", "'center'", ",", "color", "=", "almost_black", ")", "left", "+=", "data", "[", "i", "]", "return", "rectangles" ]
35.22973
17.932432
def _initialize_cfg(self): """ Re-create the DiGraph """ self.kb.functions = FunctionManager(self.kb) self._jobs_to_analyze_per_function = defaultdict(set) self._completed_functions = set()
[ "def", "_initialize_cfg", "(", "self", ")", ":", "self", ".", "kb", ".", "functions", "=", "FunctionManager", "(", "self", ".", "kb", ")", "self", ".", "_jobs_to_analyze_per_function", "=", "defaultdict", "(", "set", ")", "self", ".", "_completed_functions", "=", "set", "(", ")" ]
25.666667
15.444444
def add_type_struct_or_union(self, name, interp, node): """Store the node with the name. When it is instantiated, the node itself will be handled. :name: name of the typedefd struct/union :node: the union/struct node :interp: the 010 interpreter """ self.add_type_class(name, StructUnionDef(name, interp, node))
[ "def", "add_type_struct_or_union", "(", "self", ",", "name", ",", "interp", ",", "node", ")", ":", "self", ".", "add_type_class", "(", "name", ",", "StructUnionDef", "(", "name", ",", "interp", ",", "node", ")", ")" ]
40
11.111111
def _iter_all_paths(start, end, rand=False, path=tuple()): """Iterate through all paths from start to end.""" path = path + (start, ) if start is end: yield path else: nodes = [start.lo, start.hi] if rand: # pragma: no cover random.shuffle(nodes) for node in nodes: if node is not None: yield from _iter_all_paths(node, end, rand, path)
[ "def", "_iter_all_paths", "(", "start", ",", "end", ",", "rand", "=", "False", ",", "path", "=", "tuple", "(", ")", ")", ":", "path", "=", "path", "+", "(", "start", ",", ")", "if", "start", "is", "end", ":", "yield", "path", "else", ":", "nodes", "=", "[", "start", ".", "lo", ",", "start", ".", "hi", "]", "if", "rand", ":", "# pragma: no cover", "random", ".", "shuffle", "(", "nodes", ")", "for", "node", "in", "nodes", ":", "if", "node", "is", "not", "None", ":", "yield", "from", "_iter_all_paths", "(", "node", ",", "end", ",", "rand", ",", "path", ")" ]
34.416667
13.916667
def waitStarted(self): """wait until name server is started.""" started = False while not started: if self.starter != None: started = self.starter.waitUntilStarted(0.5)
[ "def", "waitStarted", "(", "self", ")", ":", "started", "=", "False", "while", "not", "started", ":", "if", "self", ".", "starter", "!=", "None", ":", "started", "=", "self", ".", "starter", ".", "waitUntilStarted", "(", "0.5", ")" ]
35.833333
12.166667
def bbox2path(xmin, xmax, ymin, ymax): """Converts a bounding box 4-tuple to a Path object.""" b = Line(xmin + 1j*ymin, xmax + 1j*ymin) t = Line(xmin + 1j*ymax, xmax + 1j*ymax) r = Line(xmax + 1j*ymin, xmax + 1j*ymax) l = Line(xmin + 1j*ymin, xmin + 1j*ymax) return Path(b, r, t.reversed(), l.reversed())
[ "def", "bbox2path", "(", "xmin", ",", "xmax", ",", "ymin", ",", "ymax", ")", ":", "b", "=", "Line", "(", "xmin", "+", "1j", "*", "ymin", ",", "xmax", "+", "1j", "*", "ymin", ")", "t", "=", "Line", "(", "xmin", "+", "1j", "*", "ymax", ",", "xmax", "+", "1j", "*", "ymax", ")", "r", "=", "Line", "(", "xmax", "+", "1j", "*", "ymin", ",", "xmax", "+", "1j", "*", "ymax", ")", "l", "=", "Line", "(", "xmin", "+", "1j", "*", "ymin", ",", "xmin", "+", "1j", "*", "ymax", ")", "return", "Path", "(", "b", ",", "r", ",", "t", ".", "reversed", "(", ")", ",", "l", ".", "reversed", "(", ")", ")" ]
46
3.857143
def address_lookup(hypervisor, address_pool): """Retrieves a valid and available network IP address.""" address_pool = set(address_pool) active_addresses = set(active_network_addresses(hypervisor)) try: return random.choice(tuple(address_pool - active_addresses)) except IndexError: raise RuntimeError("All IP addresses are in use")
[ "def", "address_lookup", "(", "hypervisor", ",", "address_pool", ")", ":", "address_pool", "=", "set", "(", "address_pool", ")", "active_addresses", "=", "set", "(", "active_network_addresses", "(", "hypervisor", ")", ")", "try", ":", "return", "random", ".", "choice", "(", "tuple", "(", "address_pool", "-", "active_addresses", ")", ")", "except", "IndexError", ":", "raise", "RuntimeError", "(", "\"All IP addresses are in use\"", ")" ]
40.111111
18.666667
def __add_bgedge(self, bgedge, merge=True): """ Adds supplied :class:`bg.edge.BGEdge` object to current instance of :class:`BreakpointGraph`. Checks that vertices in supplied :class:`bg.edge.BGEdge` instance actually are present in current :class:`BreakpointGraph` if **merge** option of provided. Otherwise a new edge is added to the current :class:`BreakpointGraph`. :param bgedge: instance of :class:`bg.edge.BGEdge` infromation form which is to be added to current :class:`BreakpointGraph` :type bgedge: :class:`bg.edge.BGEdge` :param merge: a flag to merge supplied information from multi-color perspective into a first existing edge between two supplied vertices :type merge: ``Boolean`` :return: ``None``, performs inplace changes """ if bgedge.vertex1 in self.bg and bgedge.vertex2 in self.bg[bgedge.vertex1] and merge: key = min(self.bg[bgedge.vertex1][bgedge.vertex2].keys()) self.bg[bgedge.vertex1][bgedge.vertex2][key]["attr_dict"]["multicolor"] += bgedge.multicolor self.bg[bgedge.vertex1][bgedge.vertex2][key]["attr_dict"]["data"] = {} else: self.bg.add_edge(bgedge.vertex1, bgedge.vertex2, attr_dict={"multicolor": deepcopy(bgedge.multicolor), "data": bgedge.data}) self.cache_valid["overall_set_of_colors"] = False
[ "def", "__add_bgedge", "(", "self", ",", "bgedge", ",", "merge", "=", "True", ")", ":", "if", "bgedge", ".", "vertex1", "in", "self", ".", "bg", "and", "bgedge", ".", "vertex2", "in", "self", ".", "bg", "[", "bgedge", ".", "vertex1", "]", "and", "merge", ":", "key", "=", "min", "(", "self", ".", "bg", "[", "bgedge", ".", "vertex1", "]", "[", "bgedge", ".", "vertex2", "]", ".", "keys", "(", ")", ")", "self", ".", "bg", "[", "bgedge", ".", "vertex1", "]", "[", "bgedge", ".", "vertex2", "]", "[", "key", "]", "[", "\"attr_dict\"", "]", "[", "\"multicolor\"", "]", "+=", "bgedge", ".", "multicolor", "self", ".", "bg", "[", "bgedge", ".", "vertex1", "]", "[", "bgedge", ".", "vertex2", "]", "[", "key", "]", "[", "\"attr_dict\"", "]", "[", "\"data\"", "]", "=", "{", "}", "else", ":", "self", ".", "bg", ".", "add_edge", "(", "bgedge", ".", "vertex1", ",", "bgedge", ".", "vertex2", ",", "attr_dict", "=", "{", "\"multicolor\"", ":", "deepcopy", "(", "bgedge", ".", "multicolor", ")", ",", "\"data\"", ":", "bgedge", ".", "data", "}", ")", "self", ".", "cache_valid", "[", "\"overall_set_of_colors\"", "]", "=", "False" ]
74.842105
45.052632
def deserialize_by_field(value, field): """ Some types get serialized to JSON, as strings. If we know what they are supposed to be, we can deserialize them """ if isinstance(field, forms.DateTimeField): value = parse_datetime(value) elif isinstance(field, forms.DateField): value = parse_date(value) elif isinstance(field, forms.TimeField): value = parse_time(value) return value
[ "def", "deserialize_by_field", "(", "value", ",", "field", ")", ":", "if", "isinstance", "(", "field", ",", "forms", ".", "DateTimeField", ")", ":", "value", "=", "parse_datetime", "(", "value", ")", "elif", "isinstance", "(", "field", ",", "forms", ".", "DateField", ")", ":", "value", "=", "parse_date", "(", "value", ")", "elif", "isinstance", "(", "field", ",", "forms", ".", "TimeField", ")", ":", "value", "=", "parse_time", "(", "value", ")", "return", "value" ]
35.333333
7.833333
def _exec_cmd(self, command, **kwargs): """Create a new method as command has specific requirements. There is a handful of the TMSH global commands supported, so this method requires them as a parameter. :raises: InvalidCommand """ kwargs['command'] = command self._check_exclusive_parameters(**kwargs) requests_params = self._handle_requests_params(kwargs) session = self._meta_data['bigip']._meta_data['icr_session'] response = session.post( self._meta_data['uri'], json=kwargs, **requests_params) new_instance = self._stamp_out_core() new_instance._local_update(response.json()) if 'commandResult' in new_instance.__dict__: new_instance._check_command_result() return new_instance
[ "def", "_exec_cmd", "(", "self", ",", "command", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'command'", "]", "=", "command", "self", ".", "_check_exclusive_parameters", "(", "*", "*", "kwargs", ")", "requests_params", "=", "self", ".", "_handle_requests_params", "(", "kwargs", ")", "session", "=", "self", ".", "_meta_data", "[", "'bigip'", "]", ".", "_meta_data", "[", "'icr_session'", "]", "response", "=", "session", ".", "post", "(", "self", ".", "_meta_data", "[", "'uri'", "]", ",", "json", "=", "kwargs", ",", "*", "*", "requests_params", ")", "new_instance", "=", "self", ".", "_stamp_out_core", "(", ")", "new_instance", ".", "_local_update", "(", "response", ".", "json", "(", ")", ")", "if", "'commandResult'", "in", "new_instance", ".", "__dict__", ":", "new_instance", ".", "_check_command_result", "(", ")", "return", "new_instance" ]
38.238095
16.952381
def get_image_tags(self): """ Fetches image labels (repository / tags) from Docker. :return: A dictionary, with image name and tags as the key and the image id as value. :rtype: dict """ current_images = self.images() tags = {tag: i['Id'] for i in current_images for tag in i['RepoTags']} return tags
[ "def", "get_image_tags", "(", "self", ")", ":", "current_images", "=", "self", ".", "images", "(", ")", "tags", "=", "{", "tag", ":", "i", "[", "'Id'", "]", "for", "i", "in", "current_images", "for", "tag", "in", "i", "[", "'RepoTags'", "]", "}", "return", "tags" ]
35.6
21
def migration_creatr(migration_file, create, table): """Name of the migration file""" if not check(): click.echo(Fore.RED + 'ERROR: Ensure you are in a bast app to run the create:migration command') return migration = CreateMigration() if table is None: table = snake_case(migration_file) file = migration.create_file(snake_case(migration_file), table=table, create=create) click.echo(Fore.GREEN + 'Migration file created at %s' % file)
[ "def", "migration_creatr", "(", "migration_file", ",", "create", ",", "table", ")", ":", "if", "not", "check", "(", ")", ":", "click", ".", "echo", "(", "Fore", ".", "RED", "+", "'ERROR: Ensure you are in a bast app to run the create:migration command'", ")", "return", "migration", "=", "CreateMigration", "(", ")", "if", "table", "is", "None", ":", "table", "=", "snake_case", "(", "migration_file", ")", "file", "=", "migration", ".", "create_file", "(", "snake_case", "(", "migration_file", ")", ",", "table", "=", "table", ",", "create", "=", "create", ")", "click", ".", "echo", "(", "Fore", ".", "GREEN", "+", "'Migration file created at %s'", "%", "file", ")" ]
43.181818
24.090909
def _make_repr(class_name, *args, **kwargs): """ Generate a repr string. Positional arguments should be the positional arguments used to construct the class. Keyword arguments should consist of tuples of the attribute value and default. If the value is the default, then it won't be rendered in the output. Here's an example:: def __repr__(self): return make_repr('MyClass', 'foo', name=(self.name, None)) The output of this would be something line ``MyClass('foo', name='Will')``. """ arguments = [repr(arg) for arg in args] arguments.extend( "{}={!r}".format(name, value) for name, (value, default) in sorted(kwargs.items()) if value != default ) return "{}({})".format(class_name, ", ".join(arguments))
[ "def", "_make_repr", "(", "class_name", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "arguments", "=", "[", "repr", "(", "arg", ")", "for", "arg", "in", "args", "]", "arguments", ".", "extend", "(", "\"{}={!r}\"", ".", "format", "(", "name", ",", "value", ")", "for", "name", ",", "(", "value", ",", "default", ")", "in", "sorted", "(", "kwargs", ".", "items", "(", ")", ")", "if", "value", "!=", "default", ")", "return", "\"{}({})\"", ".", "format", "(", "class_name", ",", "\", \"", ".", "join", "(", "arguments", ")", ")" ]
31.44
20.88
def _split_generators(self, dl_manager): """Returns splits.""" # Download images and annotations that come in separate archives. # Note, that the extension of archives is .tar.gz even though the actual # archives format is uncompressed tar. dl_paths = dl_manager.download_and_extract({ "images": tfds.download.Resource( url=os.path.join(_BASE_URL, "images.tar.gz"), extract_method=tfds.download.ExtractMethod.TAR), "annotations": tfds.download.Resource( url=os.path.join(_BASE_URL, "annotations.tar.gz"), extract_method=tfds.download.ExtractMethod.TAR) }) images_path_dir = os.path.join(dl_paths["images"], "images") annotations_path_dir = os.path.join(dl_paths["annotations"], "annotations") # Setup train and test splits train_split = tfds.core.SplitGenerator( name="train", num_shards=_NUM_SHARDS, gen_kwargs={ "images_dir_path": images_path_dir, "images_list_file": os.path.join(annotations_path_dir, "trainval.txt"), }, ) test_split = tfds.core.SplitGenerator( name="test", num_shards=_NUM_SHARDS, gen_kwargs={ "images_dir_path": images_path_dir, "images_list_file": os.path.join(annotations_path_dir, "test.txt") }, ) return [train_split, test_split]
[ "def", "_split_generators", "(", "self", ",", "dl_manager", ")", ":", "# Download images and annotations that come in separate archives.", "# Note, that the extension of archives is .tar.gz even though the actual", "# archives format is uncompressed tar.", "dl_paths", "=", "dl_manager", ".", "download_and_extract", "(", "{", "\"images\"", ":", "tfds", ".", "download", ".", "Resource", "(", "url", "=", "os", ".", "path", ".", "join", "(", "_BASE_URL", ",", "\"images.tar.gz\"", ")", ",", "extract_method", "=", "tfds", ".", "download", ".", "ExtractMethod", ".", "TAR", ")", ",", "\"annotations\"", ":", "tfds", ".", "download", ".", "Resource", "(", "url", "=", "os", ".", "path", ".", "join", "(", "_BASE_URL", ",", "\"annotations.tar.gz\"", ")", ",", "extract_method", "=", "tfds", ".", "download", ".", "ExtractMethod", ".", "TAR", ")", "}", ")", "images_path_dir", "=", "os", ".", "path", ".", "join", "(", "dl_paths", "[", "\"images\"", "]", ",", "\"images\"", ")", "annotations_path_dir", "=", "os", ".", "path", ".", "join", "(", "dl_paths", "[", "\"annotations\"", "]", ",", "\"annotations\"", ")", "# Setup train and test splits", "train_split", "=", "tfds", ".", "core", ".", "SplitGenerator", "(", "name", "=", "\"train\"", ",", "num_shards", "=", "_NUM_SHARDS", ",", "gen_kwargs", "=", "{", "\"images_dir_path\"", ":", "images_path_dir", ",", "\"images_list_file\"", ":", "os", ".", "path", ".", "join", "(", "annotations_path_dir", ",", "\"trainval.txt\"", ")", ",", "}", ",", ")", "test_split", "=", "tfds", ".", "core", ".", "SplitGenerator", "(", "name", "=", "\"test\"", ",", "num_shards", "=", "_NUM_SHARDS", ",", "gen_kwargs", "=", "{", "\"images_dir_path\"", ":", "images_path_dir", ",", "\"images_list_file\"", ":", "os", ".", "path", ".", "join", "(", "annotations_path_dir", ",", "\"test.txt\"", ")", "}", ",", ")", "return", "[", "train_split", ",", "test_split", "]" ]
38.421053
18.605263
def reqfile(filepath): """Turns a text file into a list (one element per line)""" result = [] import re url_re = re.compile(".+:.+#egg=(.+)") with open(filepath, "r") as f: for line in f: line = line.strip() if not line or line.startswith("#"): continue mo = url_re.match(line) if mo is not None: line = mo.group(1) result.append(line) return result
[ "def", "reqfile", "(", "filepath", ")", ":", "result", "=", "[", "]", "import", "re", "url_re", "=", "re", ".", "compile", "(", "\".+:.+#egg=(.+)\"", ")", "with", "open", "(", "filepath", ",", "\"r\"", ")", "as", "f", ":", "for", "line", "in", "f", ":", "line", "=", "line", ".", "strip", "(", ")", "if", "not", "line", "or", "line", ".", "startswith", "(", "\"#\"", ")", ":", "continue", "mo", "=", "url_re", ".", "match", "(", "line", ")", "if", "mo", "is", "not", "None", ":", "line", "=", "mo", ".", "group", "(", "1", ")", "result", ".", "append", "(", "line", ")", "return", "result" ]
30.6
12.066667
def response(self, text, response_type='ephemeral', attachments=None): """Return a response with json format :param text: the text returned to the client :param response_type: optional. When `in_channel` is assigned, both the response message and the initial message typed by the user will be shared in the channel. When `ephemeral` is assigned, the response message will be visible only to the user that issued the command. :param attachments: optional. A list of additional messages for rich response. """ from flask import jsonify if attachments is None: attachments = [] data = { 'response_type': response_type, 'text': text, 'attachments': attachments, } return jsonify(**data)
[ "def", "response", "(", "self", ",", "text", ",", "response_type", "=", "'ephemeral'", ",", "attachments", "=", "None", ")", ":", "from", "flask", "import", "jsonify", "if", "attachments", "is", "None", ":", "attachments", "=", "[", "]", "data", "=", "{", "'response_type'", ":", "response_type", ",", "'text'", ":", "text", ",", "'attachments'", ":", "attachments", ",", "}", "return", "jsonify", "(", "*", "*", "data", ")" ]
41.541667
18.291667
def plot_channel_sweep(proxy, start_channel): ''' Parameters ---------- proxy : DMFControlBoard start_channel : int Channel number from which to start a channel sweep (should be a multiple of 40, e.g., 0, 40, 80). Returns ------- pandas.DataFrame See description of return of :func:`sweep_channels`. ''' test_loads = TEST_LOADS.copy() test_loads.index += start_channel results = sweep_channels(proxy, test_loads) normalized_measurements = (results['measured capacitance'] / results['expected capacitance']) fig, axis = plt.subplots(figsize=(10, 8)) axis.bar(normalized_measurements.index - 0.3, normalized_measurements, width=0.6, edgecolor='none', facecolor='limegreen') axis.set_xlim(left=test_loads.index.min() - 0.5, right=test_loads.index.max() + 0.5) axis.set_xlabel('channel') axis.set_ylabel(r'$\frac{C_{\tt{measured}}}{C_{\tt{expected}}}$', fontsize=28) return results
[ "def", "plot_channel_sweep", "(", "proxy", ",", "start_channel", ")", ":", "test_loads", "=", "TEST_LOADS", ".", "copy", "(", ")", "test_loads", ".", "index", "+=", "start_channel", "results", "=", "sweep_channels", "(", "proxy", ",", "test_loads", ")", "normalized_measurements", "=", "(", "results", "[", "'measured capacitance'", "]", "/", "results", "[", "'expected capacitance'", "]", ")", "fig", ",", "axis", "=", "plt", ".", "subplots", "(", "figsize", "=", "(", "10", ",", "8", ")", ")", "axis", ".", "bar", "(", "normalized_measurements", ".", "index", "-", "0.3", ",", "normalized_measurements", ",", "width", "=", "0.6", ",", "edgecolor", "=", "'none'", ",", "facecolor", "=", "'limegreen'", ")", "axis", ".", "set_xlim", "(", "left", "=", "test_loads", ".", "index", ".", "min", "(", ")", "-", "0.5", ",", "right", "=", "test_loads", ".", "index", ".", "max", "(", ")", "+", "0.5", ")", "axis", ".", "set_xlabel", "(", "'channel'", ")", "axis", ".", "set_ylabel", "(", "r'$\\frac{C_{\\tt{measured}}}{C_{\\tt{expected}}}$'", ",", "fontsize", "=", "28", ")", "return", "results" ]
35.62069
20.103448
def logging_level_verbosity(logging_verbosity): """Converts logging_level into TensorFlow logging verbosity value Args: logging_level: String value representing logging level: 'DEBUG', 'INFO', 'WARN', 'ERROR', 'FATAL' """ name_to_level = { 'FATAL': tf.logging.FATAL, 'ERROR': tf.logging.ERROR, 'WARN': tf.logging.WARN, 'INFO': tf.logging.INFO, 'DEBUG': tf.logging.DEBUG } try: return name_to_level[logging_verbosity] except Exception as e: raise RuntimeError('Not supported logs verbosity (%s). Use one of %s.' % (str(e), list(name_to_level)))
[ "def", "logging_level_verbosity", "(", "logging_verbosity", ")", ":", "name_to_level", "=", "{", "'FATAL'", ":", "tf", ".", "logging", ".", "FATAL", ",", "'ERROR'", ":", "tf", ".", "logging", ".", "ERROR", ",", "'WARN'", ":", "tf", ".", "logging", ".", "WARN", ",", "'INFO'", ":", "tf", ".", "logging", ".", "INFO", ",", "'DEBUG'", ":", "tf", ".", "logging", ".", "DEBUG", "}", "try", ":", "return", "name_to_level", "[", "logging_verbosity", "]", "except", "Exception", "as", "e", ":", "raise", "RuntimeError", "(", "'Not supported logs verbosity (%s). Use one of %s.'", "%", "(", "str", "(", "e", ")", ",", "list", "(", "name_to_level", ")", ")", ")" ]
29.95
19.15
def get_tags(): """get tags.""" tags = getattr(flask.g, 'bukudb', get_bukudb()).get_tag_all() result = { 'tags': tags[0] } if request.path.startswith('/api/'): res = jsonify(result) else: res = render_template('bukuserver/tags.html', result=result) return res
[ "def", "get_tags", "(", ")", ":", "tags", "=", "getattr", "(", "flask", ".", "g", ",", "'bukudb'", ",", "get_bukudb", "(", ")", ")", ".", "get_tag_all", "(", ")", "result", "=", "{", "'tags'", ":", "tags", "[", "0", "]", "}", "if", "request", ".", "path", ".", "startswith", "(", "'/api/'", ")", ":", "res", "=", "jsonify", "(", "result", ")", "else", ":", "res", "=", "render_template", "(", "'bukuserver/tags.html'", ",", "result", "=", "result", ")", "return", "res" ]
27.363636
20.363636
def for_model(self, fn): """Apply the given function to a single model replica. Returns: Result from applying the function. """ return ray.get(self.workers[0].for_model.remote(fn))
[ "def", "for_model", "(", "self", ",", "fn", ")", ":", "return", "ray", ".", "get", "(", "self", ".", "workers", "[", "0", "]", ".", "for_model", ".", "remote", "(", "fn", ")", ")" ]
31.285714
15.142857
def put(self, data, **kwargs): """Put data in GridFS as a new file. Equivalent to doing:: try: f = new_file(**kwargs) f.write(data) finally: f.close() `data` can be either an instance of :class:`str` (:class:`bytes` in python 3) or a file-like object providing a :meth:`read` method. If an `encoding` keyword argument is passed, `data` can also be a :class:`unicode` (:class:`str` in python 3) instance, which will be encoded as `encoding` before being written. Any keyword arguments will be passed through to the created file - see :meth:`~gridfs.grid_file.GridIn` for possible arguments. Returns the ``"_id"`` of the created file. If the ``"_id"`` of the file is manually specified, it must not already exist in GridFS. Otherwise :class:`~gridfs.errors.FileExists` is raised. :Parameters: - `data`: data to be written as a file. - `**kwargs` (optional): keyword arguments for file creation .. versionchanged:: 3.0 w=0 writes to GridFS are now prohibited. """ grid_file = GridIn(self.__collection, **kwargs) try: grid_file.write(data) finally: grid_file.close() return grid_file._id
[ "def", "put", "(", "self", ",", "data", ",", "*", "*", "kwargs", ")", ":", "grid_file", "=", "GridIn", "(", "self", ".", "__collection", ",", "*", "*", "kwargs", ")", "try", ":", "grid_file", ".", "write", "(", "data", ")", "finally", ":", "grid_file", ".", "close", "(", ")", "return", "grid_file", ".", "_id" ]
35
21.763158
def awd_lstm_lm_1150(dataset_name=None, vocab=None, pretrained=False, ctx=cpu(), root=os.path.join(get_home_dir(), 'models'), **kwargs): r"""3-layer LSTM language model with weight-drop, variational dropout, and tied weights. Embedding size is 400, and hidden layer size is 1150. Parameters ---------- dataset_name : str or None, default None The dataset name on which the pre-trained model is trained. Options are 'wikitext-2'. If specified, then the returned vocabulary is extracted from the training set of the dataset. If None, then vocab is required, for specifying embedding weight size, and is directly returned. The pre-trained model achieves 73.32/69.74 ppl on Val and Test of wikitext-2 respectively. vocab : gluonnlp.Vocab or None, default None Vocab object to be used with the language model. Required when dataset_name is not specified. pretrained : bool, default False Whether to load the pre-trained weights for model. ctx : Context, default CPU The context in which to load the pre-trained weights. root : str, default '$MXNET_HOME/models' Location for keeping the model parameters. MXNET_HOME defaults to '~/.mxnet'. Returns ------- gluon.Block, gluonnlp.Vocab """ predefined_args = {'embed_size': 400, 'hidden_size': 1150, 'mode': 'lstm', 'num_layers': 3, 'tie_weights': True, 'dropout': 0.4, 'weight_drop': 0.5, 'drop_h': 0.2, 'drop_i': 0.65, 'drop_e': 0.1} mutable_args = frozenset(['dropout', 'weight_drop', 'drop_h', 'drop_i', 'drop_e']) assert all((k not in kwargs or k in mutable_args) for k in predefined_args), \ 'Cannot override predefined model settings.' predefined_args.update(kwargs) return _get_rnn_model(AWDRNN, 'awd_lstm_lm_1150', dataset_name, vocab, pretrained, ctx, root, **predefined_args)
[ "def", "awd_lstm_lm_1150", "(", "dataset_name", "=", "None", ",", "vocab", "=", "None", ",", "pretrained", "=", "False", ",", "ctx", "=", "cpu", "(", ")", ",", "root", "=", "os", ".", "path", ".", "join", "(", "get_home_dir", "(", ")", ",", "'models'", ")", ",", "*", "*", "kwargs", ")", ":", "predefined_args", "=", "{", "'embed_size'", ":", "400", ",", "'hidden_size'", ":", "1150", ",", "'mode'", ":", "'lstm'", ",", "'num_layers'", ":", "3", ",", "'tie_weights'", ":", "True", ",", "'dropout'", ":", "0.4", ",", "'weight_drop'", ":", "0.5", ",", "'drop_h'", ":", "0.2", ",", "'drop_i'", ":", "0.65", ",", "'drop_e'", ":", "0.1", "}", "mutable_args", "=", "frozenset", "(", "[", "'dropout'", ",", "'weight_drop'", ",", "'drop_h'", ",", "'drop_i'", ",", "'drop_e'", "]", ")", "assert", "all", "(", "(", "k", "not", "in", "kwargs", "or", "k", "in", "mutable_args", ")", "for", "k", "in", "predefined_args", ")", ",", "'Cannot override predefined model settings.'", "predefined_args", ".", "update", "(", "kwargs", ")", "return", "_get_rnn_model", "(", "AWDRNN", ",", "'awd_lstm_lm_1150'", ",", "dataset_name", ",", "vocab", ",", "pretrained", ",", "ctx", ",", "root", ",", "*", "*", "predefined_args", ")" ]
46.043478
18.456522
def clone(self): """ Create a complete copy of self. :returns: A MaterialPackage that is identical to self. """ result = copy.copy(self) result.size_class_masses = copy.deepcopy(self.size_class_masses) return result
[ "def", "clone", "(", "self", ")", ":", "result", "=", "copy", ".", "copy", "(", "self", ")", "result", ".", "size_class_masses", "=", "copy", ".", "deepcopy", "(", "self", ".", "size_class_masses", ")", "return", "result" ]
26.4
18.6
def abs_horz_pos(self, amount): '''Calling this function sets the absoulte print position for the next data, this is the position from the left margin. Args: amount: desired positioning. Can be a number from 0 to 2362. The actual positioning is calculated as (amount/60)inches from the left margin. Returns: None Raises: None ''' n1 = amount%256 n2 = amount/256 self.send(chr(27)+'${n1}{n2}'.format(n1=chr(n1), n2=chr(n2)))
[ "def", "abs_horz_pos", "(", "self", ",", "amount", ")", ":", "n1", "=", "amount", "%", "256", "n2", "=", "amount", "/", "256", "self", ".", "send", "(", "chr", "(", "27", ")", "+", "'${n1}{n2}'", ".", "format", "(", "n1", "=", "chr", "(", "n1", ")", ",", "n2", "=", "chr", "(", "n2", ")", ")", ")" ]
35.866667
26.266667
def kill_conditional_comments(self, doc): """ IE conditional comments basically embed HTML that the parser doesn't normally see. We can't allow anything like that, so we'll kill any comments that could be conditional. """ bad = [] self._kill_elements( doc, lambda el: _conditional_comment_re.search(el.text), etree.Comment)
[ "def", "kill_conditional_comments", "(", "self", ",", "doc", ")", ":", "bad", "=", "[", "]", "self", ".", "_kill_elements", "(", "doc", ",", "lambda", "el", ":", "_conditional_comment_re", ".", "search", "(", "el", ".", "text", ")", ",", "etree", ".", "Comment", ")" ]
39.5
15.3
def plot_cells(cell_1, cell_2, cell_3): """Plots three cells""" fig, ((ax1, ax2, ax3)) = plt.subplots(1, 3, figsize=(12, 5)) for ax in [ax1, ax2, ax3]: ax.grid(False) ax.set_xticks([]) ax.set_yticks([]) ax1.set_title("Type 1") ax1.imshow(cell_1) ax2.set_title("Type 2") ax2.imshow(cell_2) ax3.set_title("Type 3") ax3.imshow(cell_3) return ax1, ax2, ax3
[ "def", "plot_cells", "(", "cell_1", ",", "cell_2", ",", "cell_3", ")", ":", "fig", ",", "(", "(", "ax1", ",", "ax2", ",", "ax3", ")", ")", "=", "plt", ".", "subplots", "(", "1", ",", "3", ",", "figsize", "=", "(", "12", ",", "5", ")", ")", "for", "ax", "in", "[", "ax1", ",", "ax2", ",", "ax3", "]", ":", "ax", ".", "grid", "(", "False", ")", "ax", ".", "set_xticks", "(", "[", "]", ")", "ax", ".", "set_yticks", "(", "[", "]", ")", "ax1", ".", "set_title", "(", "\"Type 1\"", ")", "ax1", ".", "imshow", "(", "cell_1", ")", "ax2", ".", "set_title", "(", "\"Type 2\"", ")", "ax2", ".", "imshow", "(", "cell_2", ")", "ax3", ".", "set_title", "(", "\"Type 3\"", ")", "ax3", ".", "imshow", "(", "cell_3", ")", "return", "ax1", ",", "ax2", ",", "ax3" ]
28.785714
13.714286
def _format_years(years): """Format a list of ints into a string including ranges Source: https://stackoverflow.com/a/9471386/1307974 """ def sub(x): return x[1] - x[0] ranges = [] for k, iterable in groupby(enumerate(sorted(years)), sub): rng = list(iterable) if len(rng) == 1: s = str(rng[0][1]) else: s = "{}-{}".format(rng[0][1], rng[-1][1]) ranges.append(s) return ", ".join(ranges)
[ "def", "_format_years", "(", "years", ")", ":", "def", "sub", "(", "x", ")", ":", "return", "x", "[", "1", "]", "-", "x", "[", "0", "]", "ranges", "=", "[", "]", "for", "k", ",", "iterable", "in", "groupby", "(", "enumerate", "(", "sorted", "(", "years", ")", ")", ",", "sub", ")", ":", "rng", "=", "list", "(", "iterable", ")", "if", "len", "(", "rng", ")", "==", "1", ":", "s", "=", "str", "(", "rng", "[", "0", "]", "[", "1", "]", ")", "else", ":", "s", "=", "\"{}-{}\"", ".", "format", "(", "rng", "[", "0", "]", "[", "1", "]", ",", "rng", "[", "-", "1", "]", "[", "1", "]", ")", "ranges", ".", "append", "(", "s", ")", "return", "\", \"", ".", "join", "(", "ranges", ")" ]
27.352941
17.705882
def __load_unique_identities(self, uidentities, matcher, match_new, reset, verbose): """Load unique identities""" self.new_uids.clear() n = 0 if reset: self.__reset_unique_identities() self.log("Loading unique identities...") for uidentity in uidentities: self.log("\n=====", verbose) self.log("+ Processing %s" % uidentity.uuid, verbose) try: stored_uuid = self.__load_unique_identity(uidentity, verbose) except LoadError as e: self.error("%s Skipping." % str(e)) self.log("=====", verbose) continue stored_uuid = self.__load_identities(uidentity.identities, stored_uuid, verbose) try: self.__load_profile(uidentity.profile, stored_uuid, verbose) except Exception as e: self.error("%s. Loading %s profile. Skipping profile." % (str(e), stored_uuid)) self.__load_enrollments(uidentity.enrollments, stored_uuid, verbose) if matcher and (not match_new or stored_uuid in self.new_uids): stored_uuid = self._merge_on_matching(stored_uuid, matcher, verbose) self.log("+ %s (old %s) loaded" % (stored_uuid, uidentity.uuid), verbose) self.log("=====", verbose) n += 1 self.log("%d/%d unique identities loaded" % (n, len(uidentities)))
[ "def", "__load_unique_identities", "(", "self", ",", "uidentities", ",", "matcher", ",", "match_new", ",", "reset", ",", "verbose", ")", ":", "self", ".", "new_uids", ".", "clear", "(", ")", "n", "=", "0", "if", "reset", ":", "self", ".", "__reset_unique_identities", "(", ")", "self", ".", "log", "(", "\"Loading unique identities...\"", ")", "for", "uidentity", "in", "uidentities", ":", "self", ".", "log", "(", "\"\\n=====\"", ",", "verbose", ")", "self", ".", "log", "(", "\"+ Processing %s\"", "%", "uidentity", ".", "uuid", ",", "verbose", ")", "try", ":", "stored_uuid", "=", "self", ".", "__load_unique_identity", "(", "uidentity", ",", "verbose", ")", "except", "LoadError", "as", "e", ":", "self", ".", "error", "(", "\"%s Skipping.\"", "%", "str", "(", "e", ")", ")", "self", ".", "log", "(", "\"=====\"", ",", "verbose", ")", "continue", "stored_uuid", "=", "self", ".", "__load_identities", "(", "uidentity", ".", "identities", ",", "stored_uuid", ",", "verbose", ")", "try", ":", "self", ".", "__load_profile", "(", "uidentity", ".", "profile", ",", "stored_uuid", ",", "verbose", ")", "except", "Exception", "as", "e", ":", "self", ".", "error", "(", "\"%s. Loading %s profile. Skipping profile.\"", "%", "(", "str", "(", "e", ")", ",", "stored_uuid", ")", ")", "self", ".", "__load_enrollments", "(", "uidentity", ".", "enrollments", ",", "stored_uuid", ",", "verbose", ")", "if", "matcher", "and", "(", "not", "match_new", "or", "stored_uuid", "in", "self", ".", "new_uids", ")", ":", "stored_uuid", "=", "self", ".", "_merge_on_matching", "(", "stored_uuid", ",", "matcher", ",", "verbose", ")", "self", ".", "log", "(", "\"+ %s (old %s) loaded\"", "%", "(", "stored_uuid", ",", "uidentity", ".", "uuid", ")", ",", "verbose", ")", "self", ".", "log", "(", "\"=====\"", ",", "verbose", ")", "n", "+=", "1", "self", ".", "log", "(", "\"%d/%d unique identities loaded\"", "%", "(", "n", ",", "len", "(", "uidentities", ")", ")", ")" ]
35.608696
24.173913
def get_sngl_bank_chisqs(self, instruments=None): """ Get the single-detector \chi^2 for each row in the table. """ if len(self) and instruments is None: instruments = map(str, \ instrument_set_from_ifos(self[0].ifos)) elif instruments is None: instruments = [] return dict((ifo, self.get_sngl_bank_chisq(ifo))\ for ifo in instruments)
[ "def", "get_sngl_bank_chisqs", "(", "self", ",", "instruments", "=", "None", ")", ":", "if", "len", "(", "self", ")", "and", "instruments", "is", "None", ":", "instruments", "=", "map", "(", "str", ",", "instrument_set_from_ifos", "(", "self", "[", "0", "]", ".", "ifos", ")", ")", "elif", "instruments", "is", "None", ":", "instruments", "=", "[", "]", "return", "dict", "(", "(", "ifo", ",", "self", ".", "get_sngl_bank_chisq", "(", "ifo", ")", ")", "for", "ifo", "in", "instruments", ")" ]
34.181818
9.818182
def annualization_factor(period, annualization): """ Return annualization factor from period entered or if a custom value is passed in. Parameters ---------- period : str, optional Defines the periodicity of the 'returns' data for purposes of annualizing. Value ignored if `annualization` parameter is specified. Defaults are:: 'monthly':12 'weekly': 52 'daily': 252 annualization : int, optional Used to suppress default values available in `period` to convert returns into annual returns. Value should be the annual frequency of `returns`. Returns ------- annualization_factor : float """ if annualization is None: try: factor = ANNUALIZATION_FACTORS[period] except KeyError: raise ValueError( "Period cannot be '{}'. " "Can be '{}'.".format( period, "', '".join(ANNUALIZATION_FACTORS.keys()) ) ) else: factor = annualization return factor
[ "def", "annualization_factor", "(", "period", ",", "annualization", ")", ":", "if", "annualization", "is", "None", ":", "try", ":", "factor", "=", "ANNUALIZATION_FACTORS", "[", "period", "]", "except", "KeyError", ":", "raise", "ValueError", "(", "\"Period cannot be '{}'. \"", "\"Can be '{}'.\"", ".", "format", "(", "period", ",", "\"', '\"", ".", "join", "(", "ANNUALIZATION_FACTORS", ".", "keys", "(", ")", ")", ")", ")", "else", ":", "factor", "=", "annualization", "return", "factor" ]
28.315789
20.894737
def instance_path(cls, project, instance): """Return a fully-qualified instance string.""" return google.api_core.path_template.expand( "projects/{project}/instances/{instance}", project=project, instance=instance, )
[ "def", "instance_path", "(", "cls", ",", "project", ",", "instance", ")", ":", "return", "google", ".", "api_core", ".", "path_template", ".", "expand", "(", "\"projects/{project}/instances/{instance}\"", ",", "project", "=", "project", ",", "instance", "=", "instance", ",", ")" ]
38.571429
11.571429
def queue(self): """An ordered list of upcoming events. Events are named tuples with fields for: time, priority, action, arguments """ # Use heapq to sort the queue rather than using 'sorted(self._queue)'. # With heapq, two events scheduled at the same time will show in # the actual order they would be retrieved. events = self._queue[:] return map(heapq.heappop, [events]*len(events))
[ "def", "queue", "(", "self", ")", ":", "# Use heapq to sort the queue rather than using 'sorted(self._queue)'.", "# With heapq, two events scheduled at the same time will show in", "# the actual order they would be retrieved.", "events", "=", "self", ".", "_queue", "[", ":", "]", "return", "map", "(", "heapq", ".", "heappop", ",", "[", "events", "]", "*", "len", "(", "events", ")", ")" ]
45.3
14.2
def make_transaction(self): """Create the transaction for this RecurredCost May only be used to create the RecurredCost's initial transaction. Returns: Transaction: The created transaction, also assigned to self.transaction. None if the amount is zero. """ if self.pk: raise CannotRecreateTransactionOnRecurredCost( 'The transaction for this recurred cost has already been created. You cannot create it again.' ) amount = self.recurring_cost.get_amount(self.billing_cycle) # It is quite possible that there will be nothing to bill, in which # case we cannot create a transaction with no legs, nor can we create # legs with zero values. Therefore we don't create any transaction. if not amount: return None self.transaction = Transaction.objects.create( description='Created by recurring cost', date=self.billing_cycle.date_range.lower ) # Use the SplitManager's custom queryset's split() method to get the # amount to be billed for each split splits = self.recurring_cost.splits.all().split(amount) # Create the transaction leg for the outbound funds # (normally to an expense account) self.transaction.legs.add(Leg.objects.create( transaction=self.transaction, amount=Money(amount, self.recurring_cost.currency), account=self.recurring_cost.to_account, )) for split, split_amount in splits: # Create the transaction legs for the inbound funds # (from housemate accounts) if split_amount: self.transaction.legs.add(Leg.objects.create( transaction=self.transaction, amount=Money(split_amount * -1, self.recurring_cost.currency), account=split.from_account, )) return self.transaction
[ "def", "make_transaction", "(", "self", ")", ":", "if", "self", ".", "pk", ":", "raise", "CannotRecreateTransactionOnRecurredCost", "(", "'The transaction for this recurred cost has already been created. You cannot create it again.'", ")", "amount", "=", "self", ".", "recurring_cost", ".", "get_amount", "(", "self", ".", "billing_cycle", ")", "# It is quite possible that there will be nothing to bill, in which", "# case we cannot create a transaction with no legs, nor can we create", "# legs with zero values. Therefore we don't create any transaction.", "if", "not", "amount", ":", "return", "None", "self", ".", "transaction", "=", "Transaction", ".", "objects", ".", "create", "(", "description", "=", "'Created by recurring cost'", ",", "date", "=", "self", ".", "billing_cycle", ".", "date_range", ".", "lower", ")", "# Use the SplitManager's custom queryset's split() method to get the", "# amount to be billed for each split", "splits", "=", "self", ".", "recurring_cost", ".", "splits", ".", "all", "(", ")", ".", "split", "(", "amount", ")", "# Create the transaction leg for the outbound funds", "# (normally to an expense account)", "self", ".", "transaction", ".", "legs", ".", "add", "(", "Leg", ".", "objects", ".", "create", "(", "transaction", "=", "self", ".", "transaction", ",", "amount", "=", "Money", "(", "amount", ",", "self", ".", "recurring_cost", ".", "currency", ")", ",", "account", "=", "self", ".", "recurring_cost", ".", "to_account", ",", ")", ")", "for", "split", ",", "split_amount", "in", "splits", ":", "# Create the transaction legs for the inbound funds", "# (from housemate accounts)", "if", "split_amount", ":", "self", ".", "transaction", ".", "legs", ".", "add", "(", "Leg", ".", "objects", ".", "create", "(", "transaction", "=", "self", ".", "transaction", ",", "amount", "=", "Money", "(", "split_amount", "*", "-", "1", ",", "self", ".", "recurring_cost", ".", "currency", ")", ",", "account", "=", "split", ".", "from_account", ",", ")", ")", "return", "self", ".", "transaction" ]
40.061224
24.22449
def write(self, data): """Write data. Args: data: actual data yielded from handler. Type is writer-specific. """ ctx = context.get() if len(data) != 2: logging.error("Got bad tuple of length %d (2-tuple expected): %s", len(data), data) try: key = str(data[0]) value = str(data[1]) except TypeError: logging.error("Expecting a tuple, but got %s: %s", data.__class__.__name__, data) file_index = key.__hash__() % len(self._filehandles) # Work-around: Since we don't have access to the context in the to_json() # function, but we need to flush each pool before we serialize the # filehandle, we rely on a member variable instead of using context for # pool management. pool = self._pools[file_index] if pool is None: filehandle = self._filehandles[file_index] pool = output_writers.GCSRecordsPool(filehandle=filehandle, ctx=ctx) self._pools[file_index] = pool proto = kv_pb.KeyValue() proto.set_key(key) proto.set_value(value) pool.append(proto.Encode())
[ "def", "write", "(", "self", ",", "data", ")", ":", "ctx", "=", "context", ".", "get", "(", ")", "if", "len", "(", "data", ")", "!=", "2", ":", "logging", ".", "error", "(", "\"Got bad tuple of length %d (2-tuple expected): %s\"", ",", "len", "(", "data", ")", ",", "data", ")", "try", ":", "key", "=", "str", "(", "data", "[", "0", "]", ")", "value", "=", "str", "(", "data", "[", "1", "]", ")", "except", "TypeError", ":", "logging", ".", "error", "(", "\"Expecting a tuple, but got %s: %s\"", ",", "data", ".", "__class__", ".", "__name__", ",", "data", ")", "file_index", "=", "key", ".", "__hash__", "(", ")", "%", "len", "(", "self", ".", "_filehandles", ")", "# Work-around: Since we don't have access to the context in the to_json()", "# function, but we need to flush each pool before we serialize the", "# filehandle, we rely on a member variable instead of using context for", "# pool management.", "pool", "=", "self", ".", "_pools", "[", "file_index", "]", "if", "pool", "is", "None", ":", "filehandle", "=", "self", ".", "_filehandles", "[", "file_index", "]", "pool", "=", "output_writers", ".", "GCSRecordsPool", "(", "filehandle", "=", "filehandle", ",", "ctx", "=", "ctx", ")", "self", ".", "_pools", "[", "file_index", "]", "=", "pool", "proto", "=", "kv_pb", ".", "KeyValue", "(", ")", "proto", ".", "set_key", "(", "key", ")", "proto", ".", "set_value", "(", "value", ")", "pool", ".", "append", "(", "proto", ".", "Encode", "(", ")", ")" ]
31.852941
21.117647
def is_random_accessible(self): """ Check if self._is_random_accessible is set to true and if all the random access strategies are implemented. Returns ------- bool : Returns True if random accessible via strategies and False otherwise. """ return self._is_random_accessible and \ not isinstance(self.ra_itraj_cuboid, NotImplementedRandomAccessStrategy) and \ not isinstance(self.ra_linear, NotImplementedRandomAccessStrategy) and \ not isinstance(self.ra_itraj_jagged, NotImplementedRandomAccessStrategy) and \ not isinstance(self.ra_itraj_linear, NotImplementedRandomAccessStrategy)
[ "def", "is_random_accessible", "(", "self", ")", ":", "return", "self", ".", "_is_random_accessible", "and", "not", "isinstance", "(", "self", ".", "ra_itraj_cuboid", ",", "NotImplementedRandomAccessStrategy", ")", "and", "not", "isinstance", "(", "self", ".", "ra_linear", ",", "NotImplementedRandomAccessStrategy", ")", "and", "not", "isinstance", "(", "self", ".", "ra_itraj_jagged", ",", "NotImplementedRandomAccessStrategy", ")", "and", "not", "isinstance", "(", "self", ".", "ra_itraj_linear", ",", "NotImplementedRandomAccessStrategy", ")" ]
57.416667
32.083333
def list_all_refund_operations(cls, **kwargs): """List RefundOperations Return a list of RefundOperations This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_refund_operations(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[RefundOperation] If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_refund_operations_with_http_info(**kwargs) else: (data) = cls._list_all_refund_operations_with_http_info(**kwargs) return data
[ "def", "list_all_refund_operations", "(", "cls", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async'", ")", ":", "return", "cls", ".", "_list_all_refund_operations_with_http_info", "(", "*", "*", "kwargs", ")", "else", ":", "(", "data", ")", "=", "cls", ".", "_list_all_refund_operations_with_http_info", "(", "*", "*", "kwargs", ")", "return", "data" ]
38.782609
15.347826
def seed(cache_dir=CACHE_DIR, product=DEFAULT_PRODUCT, bounds=None, max_download_tiles=9, **kwargs): """Seed the DEM to given bounds. :param cache_dir: Root of the DEM cache folder. :param product: DEM product choice. :param bounds: Output bounds in 'left bottom right top' order. :param max_download_tiles: Maximum number of tiles to process. :param kwargs: Pass additional kwargs to ensure_tiles. """ datasource_root, spec = ensure_setup(cache_dir, product) ensure_tiles_names = list(spec['tile_names'](*bounds)) # FIXME: emergency hack to enforce the no-bulk-download policy if len(ensure_tiles_names) > max_download_tiles: raise RuntimeError("Too many tiles: %d. Please consult the providers' websites " "for how to bulk download tiles." % len(ensure_tiles_names)) with util.lock_tiles(datasource_root, ensure_tiles_names): ensure_tiles(datasource_root, ensure_tiles_names, **kwargs) with util.lock_vrt(datasource_root, product): util.check_call_make(datasource_root, targets=['all']) return datasource_root
[ "def", "seed", "(", "cache_dir", "=", "CACHE_DIR", ",", "product", "=", "DEFAULT_PRODUCT", ",", "bounds", "=", "None", ",", "max_download_tiles", "=", "9", ",", "*", "*", "kwargs", ")", ":", "datasource_root", ",", "spec", "=", "ensure_setup", "(", "cache_dir", ",", "product", ")", "ensure_tiles_names", "=", "list", "(", "spec", "[", "'tile_names'", "]", "(", "*", "bounds", ")", ")", "# FIXME: emergency hack to enforce the no-bulk-download policy", "if", "len", "(", "ensure_tiles_names", ")", ">", "max_download_tiles", ":", "raise", "RuntimeError", "(", "\"Too many tiles: %d. Please consult the providers' websites \"", "\"for how to bulk download tiles.\"", "%", "len", "(", "ensure_tiles_names", ")", ")", "with", "util", ".", "lock_tiles", "(", "datasource_root", ",", "ensure_tiles_names", ")", ":", "ensure_tiles", "(", "datasource_root", ",", "ensure_tiles_names", ",", "*", "*", "kwargs", ")", "with", "util", ".", "lock_vrt", "(", "datasource_root", ",", "product", ")", ":", "util", ".", "check_call_make", "(", "datasource_root", ",", "targets", "=", "[", "'all'", "]", ")", "return", "datasource_root" ]
50
23.954545
def image(self, name, x=None, y=None, w=0,h=0,type='',link=''): "Put an image on the page" if not name in self.images: #First use of image, get info if(type==''): pos=name.rfind('.') if(not pos): self.error('image file has no extension and no type was specified: '+name) type=substr(name,pos+1) type=type.lower() if(type=='jpg' or type=='jpeg'): info=self._parsejpg(name) elif(type=='png'): info=self._parsepng(name) else: #Allow for additional formats #maybe the image is not showing the correct extension, #but the header is OK, succeed_parsing = False #try all the parsing functions parsing_functions = [self._parsejpg,self._parsepng,self._parsegif] for pf in parsing_functions: try: info = pf(name) succeed_parsing = True break; except: pass #last resource if not succeed_parsing: mtd='_parse'+type if not hasattr(self,mtd): self.error('Unsupported image type: '+type) info=getattr(self, mtd)(name) mtd='_parse'+type if not hasattr(self,mtd): self.error('Unsupported image type: '+type) info=getattr(self, mtd)(name) info['i']=len(self.images)+1 self.images[name]=info else: info=self.images[name] #Automatic width and height calculation if needed if(w==0 and h==0): #Put image at 72 dpi w=info['w']/self.k h=info['h']/self.k elif(w==0): w=h*info['w']/info['h'] elif(h==0): h=w*info['h']/info['w'] # Flowing mode if y is None: if (self.y + h > self.page_break_trigger and not self.in_footer and self.accept_page_break()): #Automatic page break x = self.x self.add_page(self.cur_orientation) self.x = x y = self.y self.y += h if x is None: x = self.x self._out(sprintf('q %.2f 0 0 %.2f %.2f %.2f cm /I%d Do Q',w*self.k,h*self.k,x*self.k,(self.h-(y+h))*self.k,info['i'])) if(link): self.link(x,y,w,h,link)
[ "def", "image", "(", "self", ",", "name", ",", "x", "=", "None", ",", "y", "=", "None", ",", "w", "=", "0", ",", "h", "=", "0", ",", "type", "=", "''", ",", "link", "=", "''", ")", ":", "if", "not", "name", "in", "self", ".", "images", ":", "#First use of image, get info", "if", "(", "type", "==", "''", ")", ":", "pos", "=", "name", ".", "rfind", "(", "'.'", ")", "if", "(", "not", "pos", ")", ":", "self", ".", "error", "(", "'image file has no extension and no type was specified: '", "+", "name", ")", "type", "=", "substr", "(", "name", ",", "pos", "+", "1", ")", "type", "=", "type", ".", "lower", "(", ")", "if", "(", "type", "==", "'jpg'", "or", "type", "==", "'jpeg'", ")", ":", "info", "=", "self", ".", "_parsejpg", "(", "name", ")", "elif", "(", "type", "==", "'png'", ")", ":", "info", "=", "self", ".", "_parsepng", "(", "name", ")", "else", ":", "#Allow for additional formats", "#maybe the image is not showing the correct extension,", "#but the header is OK,", "succeed_parsing", "=", "False", "#try all the parsing functions", "parsing_functions", "=", "[", "self", ".", "_parsejpg", ",", "self", ".", "_parsepng", ",", "self", ".", "_parsegif", "]", "for", "pf", "in", "parsing_functions", ":", "try", ":", "info", "=", "pf", "(", "name", ")", "succeed_parsing", "=", "True", "break", "except", ":", "pass", "#last resource", "if", "not", "succeed_parsing", ":", "mtd", "=", "'_parse'", "+", "type", "if", "not", "hasattr", "(", "self", ",", "mtd", ")", ":", "self", ".", "error", "(", "'Unsupported image type: '", "+", "type", ")", "info", "=", "getattr", "(", "self", ",", "mtd", ")", "(", "name", ")", "mtd", "=", "'_parse'", "+", "type", "if", "not", "hasattr", "(", "self", ",", "mtd", ")", ":", "self", ".", "error", "(", "'Unsupported image type: '", "+", "type", ")", "info", "=", "getattr", "(", "self", ",", "mtd", ")", "(", "name", ")", "info", "[", "'i'", "]", "=", "len", "(", "self", ".", "images", ")", "+", "1", "self", ".", "images", "[", "name", "]", "=", "info", "else", ":", "info", "=", "self", ".", "images", "[", "name", "]", "#Automatic width and height calculation if needed", "if", "(", "w", "==", "0", "and", "h", "==", "0", ")", ":", "#Put image at 72 dpi", "w", "=", "info", "[", "'w'", "]", "/", "self", ".", "k", "h", "=", "info", "[", "'h'", "]", "/", "self", ".", "k", "elif", "(", "w", "==", "0", ")", ":", "w", "=", "h", "*", "info", "[", "'w'", "]", "/", "info", "[", "'h'", "]", "elif", "(", "h", "==", "0", ")", ":", "h", "=", "w", "*", "info", "[", "'h'", "]", "/", "info", "[", "'w'", "]", "# Flowing mode", "if", "y", "is", "None", ":", "if", "(", "self", ".", "y", "+", "h", ">", "self", ".", "page_break_trigger", "and", "not", "self", ".", "in_footer", "and", "self", ".", "accept_page_break", "(", ")", ")", ":", "#Automatic page break", "x", "=", "self", ".", "x", "self", ".", "add_page", "(", "self", ".", "cur_orientation", ")", "self", ".", "x", "=", "x", "y", "=", "self", ".", "y", "self", ".", "y", "+=", "h", "if", "x", "is", "None", ":", "x", "=", "self", ".", "x", "self", ".", "_out", "(", "sprintf", "(", "'q %.2f 0 0 %.2f %.2f %.2f cm /I%d Do Q'", ",", "w", "*", "self", ".", "k", ",", "h", "*", "self", ".", "k", ",", "x", "*", "self", ".", "k", ",", "(", "self", ".", "h", "-", "(", "y", "+", "h", ")", ")", "*", "self", ".", "k", ",", "info", "[", "'i'", "]", ")", ")", "if", "(", "link", ")", ":", "self", ".", "link", "(", "x", ",", "y", ",", "w", ",", "h", ",", "link", ")" ]
39.430769
13.738462
def export(self, swf, force_stroke=False): """ Exports the specified SWF to SVG. @param swf The SWF. @param force_stroke Whether to force strokes on non-stroked fills. """ self.svg = self._e.svg(version=SVG_VERSION) self.force_stroke = force_stroke self.defs = self._e.defs() self.root = self._e.g() self.svg.append(self.defs) self.svg.append(self.root) self.shape_exporter.defs = self.defs self._num_filters = 0 self.fonts = dict([(x.characterId,x) for x in swf.all_tags_of_type(TagDefineFont)]) self.fontInfos = dict([(x.characterId,x) for x in swf.all_tags_of_type(TagDefineFontInfo)]) # GO! super(SVGExporter, self).export(swf, force_stroke) # Setup svg @width, @height and @viewBox # and add the optional margin self.bounds = SVGBounds(self.svg) self.svg.set("width", "%dpx" % round(self.bounds.width)) self.svg.set("height", "%dpx" % round(self.bounds.height)) if self._margin > 0: self.bounds.grow(self._margin) vb = [self.bounds.minx, self.bounds.miny, self.bounds.width, self.bounds.height] self.svg.set("viewBox", "%s" % " ".join(map(str,vb))) # Return the SVG as StringIO return self._serialize()
[ "def", "export", "(", "self", ",", "swf", ",", "force_stroke", "=", "False", ")", ":", "self", ".", "svg", "=", "self", ".", "_e", ".", "svg", "(", "version", "=", "SVG_VERSION", ")", "self", ".", "force_stroke", "=", "force_stroke", "self", ".", "defs", "=", "self", ".", "_e", ".", "defs", "(", ")", "self", ".", "root", "=", "self", ".", "_e", ".", "g", "(", ")", "self", ".", "svg", ".", "append", "(", "self", ".", "defs", ")", "self", ".", "svg", ".", "append", "(", "self", ".", "root", ")", "self", ".", "shape_exporter", ".", "defs", "=", "self", ".", "defs", "self", ".", "_num_filters", "=", "0", "self", ".", "fonts", "=", "dict", "(", "[", "(", "x", ".", "characterId", ",", "x", ")", "for", "x", "in", "swf", ".", "all_tags_of_type", "(", "TagDefineFont", ")", "]", ")", "self", ".", "fontInfos", "=", "dict", "(", "[", "(", "x", ".", "characterId", ",", "x", ")", "for", "x", "in", "swf", ".", "all_tags_of_type", "(", "TagDefineFontInfo", ")", "]", ")", "# GO!", "super", "(", "SVGExporter", ",", "self", ")", ".", "export", "(", "swf", ",", "force_stroke", ")", "# Setup svg @width, @height and @viewBox", "# and add the optional margin", "self", ".", "bounds", "=", "SVGBounds", "(", "self", ".", "svg", ")", "self", ".", "svg", ".", "set", "(", "\"width\"", ",", "\"%dpx\"", "%", "round", "(", "self", ".", "bounds", ".", "width", ")", ")", "self", ".", "svg", ".", "set", "(", "\"height\"", ",", "\"%dpx\"", "%", "round", "(", "self", ".", "bounds", ".", "height", ")", ")", "if", "self", ".", "_margin", ">", "0", ":", "self", ".", "bounds", ".", "grow", "(", "self", ".", "_margin", ")", "vb", "=", "[", "self", ".", "bounds", ".", "minx", ",", "self", ".", "bounds", ".", "miny", ",", "self", ".", "bounds", ".", "width", ",", "self", ".", "bounds", ".", "height", "]", "self", ".", "svg", ".", "set", "(", "\"viewBox\"", ",", "\"%s\"", "%", "\" \"", ".", "join", "(", "map", "(", "str", ",", "vb", ")", ")", ")", "# Return the SVG as StringIO", "return", "self", ".", "_serialize", "(", ")" ]
39.818182
16.545455
def generate_set_partitions(set_): """Generate all of the partitions of a set. This is a helper function that utilizes the restricted growth strings from :py:func:`generate_set_partition_strings`. The partitions are returned in lexicographic order. Parameters ---------- set_ : :py:class:`Array` or other Array-like, (`m`,) The set to find the partitions of. Returns ------- partitions : list of lists of :py:class:`Array` The number of elements in the outer list is equal to the number of partitions, which is the len(`m`)^th Bell number. Each of the inner lists corresponds to a single possible partition. The length of an inner list is therefore equal to the number of blocks. Each of the arrays in an inner list is hence a block. """ set_ = scipy.asarray(set_) strings = generate_set_partition_strings(len(set_)) partitions = [] for string in strings: blocks = [] for block_num in scipy.unique(string): blocks.append(set_[string == block_num]) partitions.append(blocks) return partitions
[ "def", "generate_set_partitions", "(", "set_", ")", ":", "set_", "=", "scipy", ".", "asarray", "(", "set_", ")", "strings", "=", "generate_set_partition_strings", "(", "len", "(", "set_", ")", ")", "partitions", "=", "[", "]", "for", "string", "in", "strings", ":", "blocks", "=", "[", "]", "for", "block_num", "in", "scipy", ".", "unique", "(", "string", ")", ":", "blocks", ".", "append", "(", "set_", "[", "string", "==", "block_num", "]", ")", "partitions", ".", "append", "(", "blocks", ")", "return", "partitions" ]
36.419355
21.258065
def endpoint_get(auth=None, **kwargs): ''' Get a single endpoint CLI Example: .. code-block:: bash salt '*' keystoneng.endpoint_get id=02cffaa173b2460f98e40eda3748dae5 ''' cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(**kwargs) return cloud.get_endpoint(**kwargs)
[ "def", "endpoint_get", "(", "auth", "=", "None", ",", "*", "*", "kwargs", ")", ":", "cloud", "=", "get_operator_cloud", "(", "auth", ")", "kwargs", "=", "_clean_kwargs", "(", "*", "*", "kwargs", ")", "return", "cloud", ".", "get_endpoint", "(", "*", "*", "kwargs", ")" ]
23.384615
22.153846
def p_expr_id_substr(p): """ string : ID substr """ entry = SYMBOL_TABLE.access_var(p[1], p.lineno(1), default_type=TYPE.string) p[0] = None if entry is None: return entry.accessed = True p[0] = make_strslice(p.lineno(1), entry, p[2][0], p[2][1])
[ "def", "p_expr_id_substr", "(", "p", ")", ":", "entry", "=", "SYMBOL_TABLE", ".", "access_var", "(", "p", "[", "1", "]", ",", "p", ".", "lineno", "(", "1", ")", ",", "default_type", "=", "TYPE", ".", "string", ")", "p", "[", "0", "]", "=", "None", "if", "entry", "is", "None", ":", "return", "entry", ".", "accessed", "=", "True", "p", "[", "0", "]", "=", "make_strslice", "(", "p", ".", "lineno", "(", "1", ")", ",", "entry", ",", "p", "[", "2", "]", "[", "0", "]", ",", "p", "[", "2", "]", "[", "1", "]", ")" ]
27.4
20.3
def get_plain_text_content(primary_text=None, secondary_text=None, tertiary_text=None): # type: (str, str, str) -> TextContent """Responsible for building plain text content object using ask-sdk-model in Alexa skills kit display interface. https://developer.amazon.com/docs/custom-skills/display-interface-reference.html#textcontent-object-specifications. :param primary_text: Text for primary_text field :type primary_text: (optional) str :param secondary_text: Text for secondary_text field :type secondary_text: (optional) str :param tertiary_text: Text for tertiary_text field :type tertiary_text: (optional) str :return: Text Content instance with primary, secondary and tertiary text set as Plain Text objects. :rtype: TextContent :raises: ValueError """ return get_text_content( primary_text=primary_text, primary_text_type=PLAIN_TEXT_TYPE, secondary_text=secondary_text, secondary_text_type=PLAIN_TEXT_TYPE, tertiary_text=tertiary_text, tertiary_text_type=PLAIN_TEXT_TYPE)
[ "def", "get_plain_text_content", "(", "primary_text", "=", "None", ",", "secondary_text", "=", "None", ",", "tertiary_text", "=", "None", ")", ":", "# type: (str, str, str) -> TextContent", "return", "get_text_content", "(", "primary_text", "=", "primary_text", ",", "primary_text_type", "=", "PLAIN_TEXT_TYPE", ",", "secondary_text", "=", "secondary_text", ",", "secondary_text_type", "=", "PLAIN_TEXT_TYPE", ",", "tertiary_text", "=", "tertiary_text", ",", "tertiary_text_type", "=", "PLAIN_TEXT_TYPE", ")" ]
50.142857
19.190476
def _key(key=''): ''' Returns a Datastore key object, prefixed with the NAMESPACE. ''' if not isinstance(key, datastore.Key): # Switchboard uses ':' to denote one thing (parent-child) and datastore # uses it for another, so replace ':' in the datastore version of the # key. safe_key = key.replace(':', '|') key = datastore.Key(os.path.join(NAMESPACE, safe_key)) return key
[ "def", "_key", "(", "key", "=", "''", ")", ":", "if", "not", "isinstance", "(", "key", ",", "datastore", ".", "Key", ")", ":", "# Switchboard uses ':' to denote one thing (parent-child) and datastore", "# uses it for another, so replace ':' in the datastore version of the", "# key.", "safe_key", "=", "key", ".", "replace", "(", "':'", ",", "'|'", ")", "key", "=", "datastore", ".", "Key", "(", "os", ".", "path", ".", "join", "(", "NAMESPACE", ",", "safe_key", ")", ")", "return", "key" ]
38.454545
24.090909
def const(const): '''Convenience wrapper to yield the value of a constant''' try: return getattr(_c, const) except AttributeError: raise FSQEnvError(errno.EINVAL, u'No such constant:'\ u' {0}'.format(const)) except TypeError: raise TypeError(errno.EINVAL, u'const name must be a string or'\ u' unicode object, not:'\ u' {0}'.format(const.__class__.__name__))
[ "def", "const", "(", "const", ")", ":", "try", ":", "return", "getattr", "(", "_c", ",", "const", ")", "except", "AttributeError", ":", "raise", "FSQEnvError", "(", "errno", ".", "EINVAL", ",", "u'No such constant:'", "u' {0}'", ".", "format", "(", "const", ")", ")", "except", "TypeError", ":", "raise", "TypeError", "(", "errno", ".", "EINVAL", ",", "u'const name must be a string or'", "u' unicode object, not:'", "u' {0}'", ".", "format", "(", "const", ".", "__class__", ".", "__name__", ")", ")" ]
42.454545
19.727273
def network_lpf_contingency(network, snapshots=None, branch_outages=None): """ Computes linear power flow for a selection of branch outages. Parameters ---------- snapshots : list-like|single snapshot A subset or an elements of network.snapshots on which to run the power flow, defaults to network.snapshots NB: currently this only works for a single snapshot branch_outages : list-like A list of passive branches which are to be tested for outages. If None, it's take as all network.passive_branches_i() Returns ------- p0 : pandas.DataFrame num_passive_branch x num_branch_outages DataFrame of new power flows """ if snapshots is None: snapshots = network.snapshots if isinstance(snapshots, collections.Iterable): logger.warning("Apologies LPF contingency, this only works for single snapshots at the moment, taking the first snapshot.") snapshot = snapshots[0] else: snapshot = snapshots network.lpf(snapshot) # Store the flows from the base case passive_branches = network.passive_branches() if branch_outages is None: branch_outages = passive_branches.index p0_base = pd.Series(index=passive_branches.index) for c in network.passive_branch_components: pnl = network.pnl(c) p0_base[c] = pnl.p0.loc[snapshot] for sn in network.sub_networks.obj: sn._branches = sn.branches() sn.calculate_BODF() p0 = pd.DataFrame(index=passive_branches.index) p0["base"] = p0_base for branch in branch_outages: if type(branch) is not tuple: logger.warning("No type given for {}, assuming it is a line".format(branch)) branch = ("Line",branch) sn = network.sub_networks.obj[passive_branches.sub_network[branch]] branch_i = sn._branches.index.get_loc(branch) p0_new = p0_base + pd.Series(sn.BODF[:,branch_i]*p0_base[branch],sn._branches.index) p0[branch] = p0_new return p0
[ "def", "network_lpf_contingency", "(", "network", ",", "snapshots", "=", "None", ",", "branch_outages", "=", "None", ")", ":", "if", "snapshots", "is", "None", ":", "snapshots", "=", "network", ".", "snapshots", "if", "isinstance", "(", "snapshots", ",", "collections", ".", "Iterable", ")", ":", "logger", ".", "warning", "(", "\"Apologies LPF contingency, this only works for single snapshots at the moment, taking the first snapshot.\"", ")", "snapshot", "=", "snapshots", "[", "0", "]", "else", ":", "snapshot", "=", "snapshots", "network", ".", "lpf", "(", "snapshot", ")", "# Store the flows from the base case", "passive_branches", "=", "network", ".", "passive_branches", "(", ")", "if", "branch_outages", "is", "None", ":", "branch_outages", "=", "passive_branches", ".", "index", "p0_base", "=", "pd", ".", "Series", "(", "index", "=", "passive_branches", ".", "index", ")", "for", "c", "in", "network", ".", "passive_branch_components", ":", "pnl", "=", "network", ".", "pnl", "(", "c", ")", "p0_base", "[", "c", "]", "=", "pnl", ".", "p0", ".", "loc", "[", "snapshot", "]", "for", "sn", "in", "network", ".", "sub_networks", ".", "obj", ":", "sn", ".", "_branches", "=", "sn", ".", "branches", "(", ")", "sn", ".", "calculate_BODF", "(", ")", "p0", "=", "pd", ".", "DataFrame", "(", "index", "=", "passive_branches", ".", "index", ")", "p0", "[", "\"base\"", "]", "=", "p0_base", "for", "branch", "in", "branch_outages", ":", "if", "type", "(", "branch", ")", "is", "not", "tuple", ":", "logger", ".", "warning", "(", "\"No type given for {}, assuming it is a line\"", ".", "format", "(", "branch", ")", ")", "branch", "=", "(", "\"Line\"", ",", "branch", ")", "sn", "=", "network", ".", "sub_networks", ".", "obj", "[", "passive_branches", ".", "sub_network", "[", "branch", "]", "]", "branch_i", "=", "sn", ".", "_branches", ".", "index", ".", "get_loc", "(", "branch", ")", "p0_new", "=", "p0_base", "+", "pd", ".", "Series", "(", "sn", ".", "BODF", "[", ":", ",", "branch_i", "]", "*", "p0_base", "[", "branch", "]", ",", "sn", ".", "_branches", ".", "index", ")", "p0", "[", "branch", "]", "=", "p0_new", "return", "p0" ]
29.264706
24.647059
def pattern_to_regex(cls, *args, **kw): """ Warn about deprecation. """ cls._deprecated() return super(GitIgnorePattern, cls).pattern_to_regex(*args, **kw)
[ "def", "pattern_to_regex", "(", "cls", ",", "*", "args", ",", "*", "*", "kw", ")", ":", "cls", ".", "_deprecated", "(", ")", "return", "super", "(", "GitIgnorePattern", ",", "cls", ")", ".", "pattern_to_regex", "(", "*", "args", ",", "*", "*", "kw", ")" ]
26.666667
10.666667
def evaluate_at(self, vals): """Evaluate the derivative at a specific point""" new_vals = self._vals.copy() new_vals.update(vals) return self.__class__(self.operand, derivs=self._derivs, vals=new_vals)
[ "def", "evaluate_at", "(", "self", ",", "vals", ")", ":", "new_vals", "=", "self", ".", "_vals", ".", "copy", "(", ")", "new_vals", ".", "update", "(", "vals", ")", "return", "self", ".", "__class__", "(", "self", ".", "operand", ",", "derivs", "=", "self", ".", "_derivs", ",", "vals", "=", "new_vals", ")" ]
45.8
13.2
def _finalize_ticks(self, axis, element, xticks, yticks, zticks): """ Apply ticks with appropriate offsets. """ yalignments = None if xticks is not None: ticks, labels, yalignments = zip(*sorted(xticks, key=lambda x: x[0])) xticks = (list(ticks), list(labels)) super(BarPlot, self)._finalize_ticks(axis, element, xticks, yticks, zticks) if yalignments: for t, y in zip(axis.get_xticklabels(), yalignments): t.set_y(y)
[ "def", "_finalize_ticks", "(", "self", ",", "axis", ",", "element", ",", "xticks", ",", "yticks", ",", "zticks", ")", ":", "yalignments", "=", "None", "if", "xticks", "is", "not", "None", ":", "ticks", ",", "labels", ",", "yalignments", "=", "zip", "(", "*", "sorted", "(", "xticks", ",", "key", "=", "lambda", "x", ":", "x", "[", "0", "]", ")", ")", "xticks", "=", "(", "list", "(", "ticks", ")", ",", "list", "(", "labels", ")", ")", "super", "(", "BarPlot", ",", "self", ")", ".", "_finalize_ticks", "(", "axis", ",", "element", ",", "xticks", ",", "yticks", ",", "zticks", ")", "if", "yalignments", ":", "for", "t", ",", "y", "in", "zip", "(", "axis", ".", "get_xticklabels", "(", ")", ",", "yalignments", ")", ":", "t", ".", "set_y", "(", "y", ")" ]
42.833333
16.833333
def add(self, tool): """ Adds a Tool to the list, logs the reference and TODO """ self.lstTools.append(tool) self.lg.record_process(self._get_tool_str(tool))
[ "def", "add", "(", "self", ",", "tool", ")", ":", "self", ".", "lstTools", ".", "append", "(", "tool", ")", "self", ".", "lg", ".", "record_process", "(", "self", ".", "_get_tool_str", "(", "tool", ")", ")" ]
32
10.333333
def drop(self, labels=None, axis=0, index=None, columns=None, level=None, inplace=False, errors='raise'): """ Drop specified labels from rows or columns. Remove rows or columns by specifying label names and corresponding axis, or by specifying directly index or column names. When using a multi-index, labels on different levels can be removed by specifying the level. Parameters ---------- labels : single label or list-like Index or column labels to drop. axis : {0 or 'index', 1 or 'columns'}, default 0 Whether to drop labels from the index (0 or 'index') or columns (1 or 'columns'). index : single label or list-like Alternative to specifying axis (``labels, axis=0`` is equivalent to ``index=labels``). .. versionadded:: 0.21.0 columns : single label or list-like Alternative to specifying axis (``labels, axis=1`` is equivalent to ``columns=labels``). .. versionadded:: 0.21.0 level : int or level name, optional For MultiIndex, level from which the labels will be removed. inplace : bool, default False If True, do operation inplace and return None. errors : {'ignore', 'raise'}, default 'raise' If 'ignore', suppress error and only existing labels are dropped. Returns ------- DataFrame DataFrame without the removed index or column labels. Raises ------ KeyError If any of the labels is not found in the selected axis. See Also -------- DataFrame.loc : Label-location based indexer for selection by label. DataFrame.dropna : Return DataFrame with labels on given axis omitted where (all or any) data are missing. DataFrame.drop_duplicates : Return DataFrame with duplicate rows removed, optionally only considering certain columns. Series.drop : Return Series with specified index labels removed. Examples -------- >>> df = pd.DataFrame(np.arange(12).reshape(3, 4), ... columns=['A', 'B', 'C', 'D']) >>> df A B C D 0 0 1 2 3 1 4 5 6 7 2 8 9 10 11 Drop columns >>> df.drop(['B', 'C'], axis=1) A D 0 0 3 1 4 7 2 8 11 >>> df.drop(columns=['B', 'C']) A D 0 0 3 1 4 7 2 8 11 Drop a row by index >>> df.drop([0, 1]) A B C D 2 8 9 10 11 Drop columns and/or rows of MultiIndex DataFrame >>> midx = pd.MultiIndex(levels=[['lama', 'cow', 'falcon'], ... ['speed', 'weight', 'length']], ... codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2], ... [0, 1, 2, 0, 1, 2, 0, 1, 2]]) >>> df = pd.DataFrame(index=midx, columns=['big', 'small'], ... data=[[45, 30], [200, 100], [1.5, 1], [30, 20], ... [250, 150], [1.5, 0.8], [320, 250], ... [1, 0.8], [0.3, 0.2]]) >>> df big small lama speed 45.0 30.0 weight 200.0 100.0 length 1.5 1.0 cow speed 30.0 20.0 weight 250.0 150.0 length 1.5 0.8 falcon speed 320.0 250.0 weight 1.0 0.8 length 0.3 0.2 >>> df.drop(index='cow', columns='small') big lama speed 45.0 weight 200.0 length 1.5 falcon speed 320.0 weight 1.0 length 0.3 >>> df.drop(index='length', level=1) big small lama speed 45.0 30.0 weight 200.0 100.0 cow speed 30.0 20.0 weight 250.0 150.0 falcon speed 320.0 250.0 weight 1.0 0.8 """ return super().drop(labels=labels, axis=axis, index=index, columns=columns, level=level, inplace=inplace, errors=errors)
[ "def", "drop", "(", "self", ",", "labels", "=", "None", ",", "axis", "=", "0", ",", "index", "=", "None", ",", "columns", "=", "None", ",", "level", "=", "None", ",", "inplace", "=", "False", ",", "errors", "=", "'raise'", ")", ":", "return", "super", "(", ")", ".", "drop", "(", "labels", "=", "labels", ",", "axis", "=", "axis", ",", "index", "=", "index", ",", "columns", "=", "columns", ",", "level", "=", "level", ",", "inplace", "=", "inplace", ",", "errors", "=", "errors", ")" ]
34.566929
19.748031
def section_end_info(template, tag_key, state, index): """ Given the tag key of an opening section tag, find the corresponding closing tag (if it exists) and return information about that match. """ state.section.push(tag_key) match = None matchinfo = None search_index = index while state.section: match = state.tag_re.search(template, search_index) if not match: raise Exception("Open section %s never closed" % tag_key) matchinfo = get_match_info(template, match, state) # If we find a new section tag, add it to the stack and keep going if matchinfo['tag_type'] in ('#', '^'): state.section.push(matchinfo['tag_key']) # If we find a closing tag for the current section, 'close' it by # popping the stack elif matchinfo['tag_type'] == '/': if matchinfo['tag_key'] == state.section(): state.section.pop() else: raise Exception( 'Unexpected section end: received %s, expected {{/%s}}' % ( repr(match.group(0)), tag_key)) search_index = matchinfo['tag_end'] return matchinfo
[ "def", "section_end_info", "(", "template", ",", "tag_key", ",", "state", ",", "index", ")", ":", "state", ".", "section", ".", "push", "(", "tag_key", ")", "match", "=", "None", "matchinfo", "=", "None", "search_index", "=", "index", "while", "state", ".", "section", ":", "match", "=", "state", ".", "tag_re", ".", "search", "(", "template", ",", "search_index", ")", "if", "not", "match", ":", "raise", "Exception", "(", "\"Open section %s never closed\"", "%", "tag_key", ")", "matchinfo", "=", "get_match_info", "(", "template", ",", "match", ",", "state", ")", "# If we find a new section tag, add it to the stack and keep going", "if", "matchinfo", "[", "'tag_type'", "]", "in", "(", "'#'", ",", "'^'", ")", ":", "state", ".", "section", ".", "push", "(", "matchinfo", "[", "'tag_key'", "]", ")", "# If we find a closing tag for the current section, 'close' it by", "# popping the stack", "elif", "matchinfo", "[", "'tag_type'", "]", "==", "'/'", ":", "if", "matchinfo", "[", "'tag_key'", "]", "==", "state", ".", "section", "(", ")", ":", "state", ".", "section", ".", "pop", "(", ")", "else", ":", "raise", "Exception", "(", "'Unexpected section end: received %s, expected {{/%s}}'", "%", "(", "repr", "(", "match", ".", "group", "(", "0", ")", ")", ",", "tag_key", ")", ")", "search_index", "=", "matchinfo", "[", "'tag_end'", "]", "return", "matchinfo" ]
35.848485
20.454545
def _write_jsonl(filepath, data, kwargs): """See documentation of mpu.io.write.""" with io_stl.open(filepath, 'w', encoding='utf8') as outfile: kwargs['indent'] = None # JSON has to be on one line! if 'sort_keys' not in kwargs: kwargs['sort_keys'] = True if 'separators' not in kwargs: kwargs['separators'] = (',', ': ') if 'ensure_ascii' not in kwargs: kwargs['ensure_ascii'] = False for line in data: str_ = json.dumps(line, **kwargs) outfile.write(to_unicode(str_)) outfile.write(u'\n') return data
[ "def", "_write_jsonl", "(", "filepath", ",", "data", ",", "kwargs", ")", ":", "with", "io_stl", ".", "open", "(", "filepath", ",", "'w'", ",", "encoding", "=", "'utf8'", ")", "as", "outfile", ":", "kwargs", "[", "'indent'", "]", "=", "None", "# JSON has to be on one line!", "if", "'sort_keys'", "not", "in", "kwargs", ":", "kwargs", "[", "'sort_keys'", "]", "=", "True", "if", "'separators'", "not", "in", "kwargs", ":", "kwargs", "[", "'separators'", "]", "=", "(", "','", ",", "': '", ")", "if", "'ensure_ascii'", "not", "in", "kwargs", ":", "kwargs", "[", "'ensure_ascii'", "]", "=", "False", "for", "line", "in", "data", ":", "str_", "=", "json", ".", "dumps", "(", "line", ",", "*", "*", "kwargs", ")", "outfile", ".", "write", "(", "to_unicode", "(", "str_", ")", ")", "outfile", ".", "write", "(", "u'\\n'", ")", "return", "data" ]
40.8
7.866667
def regenerate_storage_keys(kwargs=None, conn=None, call=None): ''' .. versionadded:: 2015.8.0 Regenerate storage account keys. Requires a key_type ("primary" or "secondary") to be specified. CLI Example: .. code-block:: bash salt-cloud -f regenerate_storage_keys my-azure name=my_storage key_type=primary ''' if call != 'function': raise SaltCloudSystemExit( 'The show_storage function must be called with -f or --function.' ) if not conn: conn = get_conn() if kwargs is None: kwargs = {} if 'name' not in kwargs: raise SaltCloudSystemExit('A name must be specified as "name"') if 'key_type' not in kwargs or kwargs['key_type'] not in ('primary', 'secondary'): raise SaltCloudSystemExit('A key_type must be specified ("primary" or "secondary")') try: data = conn.regenerate_storage_account_keys( service_name=kwargs['name'], key_type=kwargs['key_type'], ) return show_storage_keys(kwargs={'name': kwargs['name']}, call='function') except AzureConflictHttpError: raise SaltCloudSystemExit('There was a conflict. This usually means that the storage account already exists.')
[ "def", "regenerate_storage_keys", "(", "kwargs", "=", "None", ",", "conn", "=", "None", ",", "call", "=", "None", ")", ":", "if", "call", "!=", "'function'", ":", "raise", "SaltCloudSystemExit", "(", "'The show_storage function must be called with -f or --function.'", ")", "if", "not", "conn", ":", "conn", "=", "get_conn", "(", ")", "if", "kwargs", "is", "None", ":", "kwargs", "=", "{", "}", "if", "'name'", "not", "in", "kwargs", ":", "raise", "SaltCloudSystemExit", "(", "'A name must be specified as \"name\"'", ")", "if", "'key_type'", "not", "in", "kwargs", "or", "kwargs", "[", "'key_type'", "]", "not", "in", "(", "'primary'", ",", "'secondary'", ")", ":", "raise", "SaltCloudSystemExit", "(", "'A key_type must be specified (\"primary\" or \"secondary\")'", ")", "try", ":", "data", "=", "conn", ".", "regenerate_storage_account_keys", "(", "service_name", "=", "kwargs", "[", "'name'", "]", ",", "key_type", "=", "kwargs", "[", "'key_type'", "]", ",", ")", "return", "show_storage_keys", "(", "kwargs", "=", "{", "'name'", ":", "kwargs", "[", "'name'", "]", "}", ",", "call", "=", "'function'", ")", "except", "AzureConflictHttpError", ":", "raise", "SaltCloudSystemExit", "(", "'There was a conflict. This usually means that the storage account already exists.'", ")" ]
32.236842
28.710526
def _sitesettings_files(): """ Get a list of sitesettings files settings.py can be prefixed with a subdomain and underscore so with example.com site: sitesettings/settings.py would be the example.com settings file and sitesettings/admin_settings.py would be the admin.example.com settings file """ settings_files = [] sitesettings_path = os.path.join(env.project_package_name,'sitesettings') if os.path.exists(sitesettings_path): sitesettings = os.listdir(sitesettings_path) for file in sitesettings: if file == 'settings.py': settings_files.append(file) elif len(file)>12 and file[-12:]=='_settings.py': #prefixed settings settings_files.append(file) return settings_files
[ "def", "_sitesettings_files", "(", ")", ":", "settings_files", "=", "[", "]", "sitesettings_path", "=", "os", ".", "path", ".", "join", "(", "env", ".", "project_package_name", ",", "'sitesettings'", ")", "if", "os", ".", "path", ".", "exists", "(", "sitesettings_path", ")", ":", "sitesettings", "=", "os", ".", "listdir", "(", "sitesettings_path", ")", "for", "file", "in", "sitesettings", ":", "if", "file", "==", "'settings.py'", ":", "settings_files", ".", "append", "(", "file", ")", "elif", "len", "(", "file", ")", ">", "12", "and", "file", "[", "-", "12", ":", "]", "==", "'_settings.py'", ":", "#prefixed settings", "settings_files", ".", "append", "(", "file", ")", "return", "settings_files" ]
42.944444
17.277778