text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def _agate_to_schema(self, agate_table, column_override): """Convert agate.Table with column names to a list of bigquery schemas. """ bq_schema = [] for idx, col_name in enumerate(agate_table.column_names): inferred_type = self.convert_agate_type(agate_table, idx) type_ = column_override.get(col_name, inferred_type) bq_schema.append( google.cloud.bigquery.SchemaField(col_name, type_) ) return bq_schema
[ "def", "_agate_to_schema", "(", "self", ",", "agate_table", ",", "column_override", ")", ":", "bq_schema", "=", "[", "]", "for", "idx", ",", "col_name", "in", "enumerate", "(", "agate_table", ".", "column_names", ")", ":", "inferred_type", "=", "self", ".", "convert_agate_type", "(", "agate_table", ",", "idx", ")", "type_", "=", "column_override", ".", "get", "(", "col_name", ",", "inferred_type", ")", "bq_schema", ".", "append", "(", "google", ".", "cloud", ".", "bigquery", ".", "SchemaField", "(", "col_name", ",", "type_", ")", ")", "return", "bq_schema" ]
45.363636
17.545455
def _find_model(self, constructor, table_name, constraints=None, *, columns=None, order_by=None): """Calls DataAccess.find and passes the results to the given constructor.""" data = self.find(table_name, constraints, columns=columns, order_by=order_by) return constructor(data) if data else None
[ "def", "_find_model", "(", "self", ",", "constructor", ",", "table_name", ",", "constraints", "=", "None", ",", "*", ",", "columns", "=", "None", ",", "order_by", "=", "None", ")", ":", "data", "=", "self", ".", "find", "(", "table_name", ",", "constraints", ",", "columns", "=", "columns", ",", "order_by", "=", "order_by", ")", "return", "constructor", "(", "data", ")", "if", "data", "else", "None" ]
76
26
def listDF(option='mostactive', token='', version=''): '''Returns an array of quotes for the top 10 symbols in a specified list. https://iexcloud.io/docs/api/#list Updated intraday Args: option (string); Option to query token (string); Access token version (string); API version Returns: DataFrame: result ''' df = pd.DataFrame(list(option, token, version)) _toDatetime(df) _reindex(df, 'symbol') return df
[ "def", "listDF", "(", "option", "=", "'mostactive'", ",", "token", "=", "''", ",", "version", "=", "''", ")", ":", "df", "=", "pd", ".", "DataFrame", "(", "list", "(", "option", ",", "token", ",", "version", ")", ")", "_toDatetime", "(", "df", ")", "_reindex", "(", "df", ",", "'symbol'", ")", "return", "df" ]
24.421053
22.105263
def get_weighted_random_index(self,weights): """Return an index of an array based on the weights if a random number between 0 and 1 is less than an index return the lowest index :param weights: a list of floats for how to weight each index [w1, w2, ... wN] :type weights: list :return: index :rtype: int """ tot = float(sum([float(x) for x in weights])) fracarray = [weights[0]] for w in weights[1:]: prev = fracarray[-1] fracarray.append(w+prev) #print fracarray rnum = self._random.random()*tot #print rnum #sys.exit() for i in range(len(weights)): if rnum < fracarray[i]: return i sys.stderr.write("Warning unexpected no random\n")
[ "def", "get_weighted_random_index", "(", "self", ",", "weights", ")", ":", "tot", "=", "float", "(", "sum", "(", "[", "float", "(", "x", ")", "for", "x", "in", "weights", "]", ")", ")", "fracarray", "=", "[", "weights", "[", "0", "]", "]", "for", "w", "in", "weights", "[", "1", ":", "]", ":", "prev", "=", "fracarray", "[", "-", "1", "]", "fracarray", ".", "append", "(", "w", "+", "prev", ")", "#print fracarray", "rnum", "=", "self", ".", "_random", ".", "random", "(", ")", "*", "tot", "#print rnum", "#sys.exit()", "for", "i", "in", "range", "(", "len", "(", "weights", ")", ")", ":", "if", "rnum", "<", "fracarray", "[", "i", "]", ":", "return", "i", "sys", ".", "stderr", ".", "write", "(", "\"Warning unexpected no random\\n\"", ")" ]
33.333333
16.857143
def _set_compact_flash(self, v, load=False): """ Setter method for compact_flash, mapped from YANG variable /system_monitor/compact_flash (container) If this variable is read-only (config: false) in the source YANG file, then _set_compact_flash is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_compact_flash() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=compact_flash.compact_flash, is_container='container', presence=False, yang_name="compact-flash", rest_name="compact-flash", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure threshold for component:COMPACT-FLASH', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-system-monitor', defining_module='brocade-system-monitor', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """compact_flash must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=compact_flash.compact_flash, is_container='container', presence=False, yang_name="compact-flash", rest_name="compact-flash", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure threshold for component:COMPACT-FLASH', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-system-monitor', defining_module='brocade-system-monitor', yang_type='container', is_config=True)""", }) self.__compact_flash = t if hasattr(self, '_set'): self._set()
[ "def", "_set_compact_flash", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "compact_flash", ".", "compact_flash", ",", "is_container", "=", "'container'", ",", "presence", "=", "False", ",", "yang_name", "=", "\"compact-flash\"", ",", "rest_name", "=", "\"compact-flash\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'Configure threshold for component:COMPACT-FLASH'", ",", "u'cli-incomplete-no'", ":", "None", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-system-monitor'", ",", "defining_module", "=", "'brocade-system-monitor'", ",", "yang_type", "=", "'container'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"compact_flash must be of a type compatible with container\"\"\"", ",", "'defined-type'", ":", "\"container\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=compact_flash.compact_flash, is_container='container', presence=False, yang_name=\"compact-flash\", rest_name=\"compact-flash\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure threshold for component:COMPACT-FLASH', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-system-monitor', defining_module='brocade-system-monitor', yang_type='container', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__compact_flash", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
80.727273
37.545455
def __ensure_provisioning_alarm(table_name, key_name): """ Ensure that provisioning alarm threshold is not exceeded :type table_name: str :param table_name: Name of the DynamoDB table :type key_name: str :param key_name: Configuration option key name """ lookback_window_start = get_table_option( key_name, 'lookback_window_start') lookback_period = get_table_option(key_name, 'lookback_period') consumed_read_units_percent = table_stats.get_consumed_read_units_percent( table_name, lookback_window_start, lookback_period) consumed_write_units_percent = table_stats.get_consumed_write_units_percent( table_name, lookback_window_start, lookback_period) reads_upper_alarm_threshold = \ get_table_option(key_name, 'reads-upper-alarm-threshold') reads_lower_alarm_threshold = \ get_table_option(key_name, 'reads-lower-alarm-threshold') writes_upper_alarm_threshold = \ get_table_option(key_name, 'writes-upper-alarm-threshold') writes_lower_alarm_threshold = \ get_table_option(key_name, 'writes-lower-alarm-threshold') # Check upper alarm thresholds upper_alert_triggered = False upper_alert_message = [] if 0 < reads_upper_alarm_threshold <= consumed_read_units_percent: upper_alert_triggered = True upper_alert_message.append( '{0} - Consumed Read Capacity {1:f}% ' 'was greater than or equal to the upper ' 'alarm threshold {2:f}%\n'.format( table_name, consumed_read_units_percent, reads_upper_alarm_threshold)) if 0 < writes_upper_alarm_threshold <= consumed_write_units_percent: upper_alert_triggered = True upper_alert_message.append( '{0} - Consumed Write Capacity {1:f}% ' 'was greater than or equal to the upper alarm ' 'threshold {2:f}%\n'.format( table_name, consumed_write_units_percent, writes_upper_alarm_threshold)) # Check lower alarm thresholds lower_alert_triggered = False lower_alert_message = [] if (reads_lower_alarm_threshold > 0 and consumed_read_units_percent < reads_lower_alarm_threshold): lower_alert_triggered = True lower_alert_message.append( '{0} - Consumed Read Capacity {1:f}% ' 'was below the lower alarm threshold {2:f}%\n'.format( table_name, consumed_read_units_percent, reads_lower_alarm_threshold)) if (writes_lower_alarm_threshold > 0 and consumed_write_units_percent < writes_lower_alarm_threshold): lower_alert_triggered = True lower_alert_message.append( '{0} - Consumed Write Capacity {1:f}% ' 'was below the lower alarm threshold {2:f}%\n'.format( table_name, consumed_write_units_percent, writes_lower_alarm_threshold)) # Send alert if needed if upper_alert_triggered: logger.info( '{0} - Will send high provisioning alert'.format(table_name)) sns.publish_table_notification( key_name, ''.join(upper_alert_message), ['high-throughput-alarm'], subject='ALARM: High Throughput for Table {0}'.format(table_name)) elif lower_alert_triggered: logger.info( '{0} - Will send low provisioning alert'.format(table_name)) sns.publish_table_notification( key_name, ''.join(lower_alert_message), ['low-throughput-alarm'], subject='ALARM: Low Throughput for Table {0}'.format(table_name)) else: logger.debug('{0} - Throughput alarm thresholds not crossed'.format( table_name))
[ "def", "__ensure_provisioning_alarm", "(", "table_name", ",", "key_name", ")", ":", "lookback_window_start", "=", "get_table_option", "(", "key_name", ",", "'lookback_window_start'", ")", "lookback_period", "=", "get_table_option", "(", "key_name", ",", "'lookback_period'", ")", "consumed_read_units_percent", "=", "table_stats", ".", "get_consumed_read_units_percent", "(", "table_name", ",", "lookback_window_start", ",", "lookback_period", ")", "consumed_write_units_percent", "=", "table_stats", ".", "get_consumed_write_units_percent", "(", "table_name", ",", "lookback_window_start", ",", "lookback_period", ")", "reads_upper_alarm_threshold", "=", "get_table_option", "(", "key_name", ",", "'reads-upper-alarm-threshold'", ")", "reads_lower_alarm_threshold", "=", "get_table_option", "(", "key_name", ",", "'reads-lower-alarm-threshold'", ")", "writes_upper_alarm_threshold", "=", "get_table_option", "(", "key_name", ",", "'writes-upper-alarm-threshold'", ")", "writes_lower_alarm_threshold", "=", "get_table_option", "(", "key_name", ",", "'writes-lower-alarm-threshold'", ")", "# Check upper alarm thresholds", "upper_alert_triggered", "=", "False", "upper_alert_message", "=", "[", "]", "if", "0", "<", "reads_upper_alarm_threshold", "<=", "consumed_read_units_percent", ":", "upper_alert_triggered", "=", "True", "upper_alert_message", ".", "append", "(", "'{0} - Consumed Read Capacity {1:f}% '", "'was greater than or equal to the upper '", "'alarm threshold {2:f}%\\n'", ".", "format", "(", "table_name", ",", "consumed_read_units_percent", ",", "reads_upper_alarm_threshold", ")", ")", "if", "0", "<", "writes_upper_alarm_threshold", "<=", "consumed_write_units_percent", ":", "upper_alert_triggered", "=", "True", "upper_alert_message", ".", "append", "(", "'{0} - Consumed Write Capacity {1:f}% '", "'was greater than or equal to the upper alarm '", "'threshold {2:f}%\\n'", ".", "format", "(", "table_name", ",", "consumed_write_units_percent", ",", "writes_upper_alarm_threshold", ")", ")", "# Check lower alarm thresholds", "lower_alert_triggered", "=", "False", "lower_alert_message", "=", "[", "]", "if", "(", "reads_lower_alarm_threshold", ">", "0", "and", "consumed_read_units_percent", "<", "reads_lower_alarm_threshold", ")", ":", "lower_alert_triggered", "=", "True", "lower_alert_message", ".", "append", "(", "'{0} - Consumed Read Capacity {1:f}% '", "'was below the lower alarm threshold {2:f}%\\n'", ".", "format", "(", "table_name", ",", "consumed_read_units_percent", ",", "reads_lower_alarm_threshold", ")", ")", "if", "(", "writes_lower_alarm_threshold", ">", "0", "and", "consumed_write_units_percent", "<", "writes_lower_alarm_threshold", ")", ":", "lower_alert_triggered", "=", "True", "lower_alert_message", ".", "append", "(", "'{0} - Consumed Write Capacity {1:f}% '", "'was below the lower alarm threshold {2:f}%\\n'", ".", "format", "(", "table_name", ",", "consumed_write_units_percent", ",", "writes_lower_alarm_threshold", ")", ")", "# Send alert if needed", "if", "upper_alert_triggered", ":", "logger", ".", "info", "(", "'{0} - Will send high provisioning alert'", ".", "format", "(", "table_name", ")", ")", "sns", ".", "publish_table_notification", "(", "key_name", ",", "''", ".", "join", "(", "upper_alert_message", ")", ",", "[", "'high-throughput-alarm'", "]", ",", "subject", "=", "'ALARM: High Throughput for Table {0}'", ".", "format", "(", "table_name", ")", ")", "elif", "lower_alert_triggered", ":", "logger", ".", "info", "(", "'{0} - Will send low provisioning alert'", ".", "format", "(", "table_name", ")", ")", "sns", ".", "publish_table_notification", "(", "key_name", ",", "''", ".", "join", "(", "lower_alert_message", ")", ",", "[", "'low-throughput-alarm'", "]", ",", "subject", "=", "'ALARM: Low Throughput for Table {0}'", ".", "format", "(", "table_name", ")", ")", "else", ":", "logger", ".", "debug", "(", "'{0} - Throughput alarm thresholds not crossed'", ".", "format", "(", "table_name", ")", ")" ]
40.923913
15.673913
def redo(self): """ Redo the latest undone command. """ self.undo_manager.redo() self.notify_observers() logging.debug('undo_manager redo stack={}'.format(self.undo_manager._redo_stack))
[ "def", "redo", "(", "self", ")", ":", "self", ".", "undo_manager", ".", "redo", "(", ")", "self", ".", "notify_observers", "(", ")", "logging", ".", "debug", "(", "'undo_manager redo stack={}'", ".", "format", "(", "self", ".", "undo_manager", ".", "_redo_stack", ")", ")" ]
32.571429
13.142857
def result(retn): ''' Return a value or raise an exception from a retn tuple. ''' ok, valu = retn if ok: return valu name, info = valu ctor = getattr(s_exc, name, None) if ctor is not None: raise ctor(**info) info['errx'] = name raise s_exc.SynErr(**info)
[ "def", "result", "(", "retn", ")", ":", "ok", ",", "valu", "=", "retn", "if", "ok", ":", "return", "valu", "name", ",", "info", "=", "valu", "ctor", "=", "getattr", "(", "s_exc", ",", "name", ",", "None", ")", "if", "ctor", "is", "not", "None", ":", "raise", "ctor", "(", "*", "*", "info", ")", "info", "[", "'errx'", "]", "=", "name", "raise", "s_exc", ".", "SynErr", "(", "*", "*", "info", ")" ]
17.588235
24.647059
def random_choices(self, elements=('a', 'b', 'c'), length=None): """ Returns a list of random, non-unique elements from a passed object. If `elements` is a dictionary, the value will be used as a weighting element. For example:: random_element({"{{variable_1}}": 0.5, "{{variable_2}}": 0.2, "{{variable_3}}": 0.2, "{{variable_4}}": 0.1}) will have the following distribution: * `variable_1`: 50% probability * `variable_2`: 20% probability * `variable_3`: 20% probability * `variable_4`: 10% probability """ return self.random_elements(elements, length, unique=False)
[ "def", "random_choices", "(", "self", ",", "elements", "=", "(", "'a'", ",", "'b'", ",", "'c'", ")", ",", "length", "=", "None", ")", ":", "return", "self", ".", "random_elements", "(", "elements", ",", "length", ",", "unique", "=", "False", ")" ]
39.470588
21.705882
def start_coordsys(self): """ Coordinate system at start of effect. All axes are parallel to the original vector evaluation location, with the origin moved to this effect's start point. :return: coordinate system at start of effect :rtype: :class:`CoordSys` """ coordsys = copy(self.location) coordsys.origin = self.start_point return coordsys
[ "def", "start_coordsys", "(", "self", ")", ":", "coordsys", "=", "copy", "(", "self", ".", "location", ")", "coordsys", ".", "origin", "=", "self", ".", "start_point", "return", "coordsys" ]
31.769231
14.846154
def list_objects(self, path='', relative=False, first_level=False, max_request_entries=None): """ List objects. Args: path (str): Path or URL. relative (bool): Path is relative to current root. first_level (bool): It True, returns only first level objects. Else, returns full tree. max_request_entries (int): If specified, maximum entries returned by request. Returns: generator of tuple: object name str, object header dict """ entries = 0 next_values = [] max_request_entries_arg = None if not relative: path = self.relpath(path) # From root if not path: objects = self._list_locators() # Sub directory else: objects = self._list_objects( self.get_client_kwargs(path), max_request_entries) # Yield file hierarchy for obj in objects: # Generate first level objects entries try: name, header, is_directory = obj except ValueError: # Locators name, header = obj is_directory = True # Start to generate subdirectories content if is_directory and not first_level: name = next_path = name.rstrip('/') + '/' if path: next_path = '/'.join((path.rstrip('/'), name)) if max_request_entries is not None: max_request_entries_arg = max_request_entries - entries next_values.append(( name, self._generate_async(self.list_objects( next_path, relative=True, max_request_entries=max_request_entries_arg)))) entries += 1 yield name, header if entries == max_request_entries: return for next_name, generator in next_values: # Generate other levels objects entries for name, header in generator: entries += 1 yield '/'.join((next_name.rstrip('/'), name)), header if entries == max_request_entries: return
[ "def", "list_objects", "(", "self", ",", "path", "=", "''", ",", "relative", "=", "False", ",", "first_level", "=", "False", ",", "max_request_entries", "=", "None", ")", ":", "entries", "=", "0", "next_values", "=", "[", "]", "max_request_entries_arg", "=", "None", "if", "not", "relative", ":", "path", "=", "self", ".", "relpath", "(", "path", ")", "# From root", "if", "not", "path", ":", "objects", "=", "self", ".", "_list_locators", "(", ")", "# Sub directory", "else", ":", "objects", "=", "self", ".", "_list_objects", "(", "self", ".", "get_client_kwargs", "(", "path", ")", ",", "max_request_entries", ")", "# Yield file hierarchy", "for", "obj", "in", "objects", ":", "# Generate first level objects entries", "try", ":", "name", ",", "header", ",", "is_directory", "=", "obj", "except", "ValueError", ":", "# Locators", "name", ",", "header", "=", "obj", "is_directory", "=", "True", "# Start to generate subdirectories content", "if", "is_directory", "and", "not", "first_level", ":", "name", "=", "next_path", "=", "name", ".", "rstrip", "(", "'/'", ")", "+", "'/'", "if", "path", ":", "next_path", "=", "'/'", ".", "join", "(", "(", "path", ".", "rstrip", "(", "'/'", ")", ",", "name", ")", ")", "if", "max_request_entries", "is", "not", "None", ":", "max_request_entries_arg", "=", "max_request_entries", "-", "entries", "next_values", ".", "append", "(", "(", "name", ",", "self", ".", "_generate_async", "(", "self", ".", "list_objects", "(", "next_path", ",", "relative", "=", "True", ",", "max_request_entries", "=", "max_request_entries_arg", ")", ")", ")", ")", "entries", "+=", "1", "yield", "name", ",", "header", "if", "entries", "==", "max_request_entries", ":", "return", "for", "next_name", ",", "generator", "in", "next_values", ":", "# Generate other levels objects entries", "for", "name", ",", "header", "in", "generator", ":", "entries", "+=", "1", "yield", "'/'", ".", "join", "(", "(", "next_name", ".", "rstrip", "(", "'/'", ")", ",", "name", ")", ")", ",", "header", "if", "entries", "==", "max_request_entries", ":", "return" ]
32.328571
19.5
def upgrade_plan(self, subid, vpsplanid, params=None): ''' /v1/server/upgrade_plan POST - account Upgrade the plan of a virtual machine. The virtual machine will be rebooted upon a successful upgrade. Link: https://www.vultr.com/api/#server_upgrade_plan ''' params = update_params(params, { 'SUBID': subid, 'VPSPLANID': vpsplanid }) return self.request('/v1/server/upgrade_plan', params, 'POST')
[ "def", "upgrade_plan", "(", "self", ",", "subid", ",", "vpsplanid", ",", "params", "=", "None", ")", ":", "params", "=", "update_params", "(", "params", ",", "{", "'SUBID'", ":", "subid", ",", "'VPSPLANID'", ":", "vpsplanid", "}", ")", "return", "self", ".", "request", "(", "'/v1/server/upgrade_plan'", ",", "params", ",", "'POST'", ")" ]
36.923077
18.615385
def extract(self, text: str = None, extract_first_date_only: bool = False, additional_formats: List[str] = list(), use_default_formats: bool = False, ignore_dates_before: datetime.datetime = None, ignore_dates_after: datetime.datetime = None, detect_relative_dates: bool = False, relative_base: datetime.datetime = None, preferred_date_order: str = "MDY", prefer_language_date_order: bool = True, timezone: str = None, to_timezone: str = None, return_as_timezone_aware: bool = True, prefer_day_of_month: str = "first", prefer_dates_from: str = "current", date_value_resolution: DateResolution = DateResolution.DAY, ) -> List[Extraction]: """ Args: text (str): extract dates from this 'text', default to None extract_first_date_only (bool): extract the first valid date only or extract all, default to False additional_formats (List[str]): user defined formats for extraction, default to empty list use_default_formats (bool): if use default formats together with addtional_formats, default to False ignore_dates_before (datetime.datetime): ignore dates before 'ignore_dates_before', default to None ignore_dates_after (datetime.datetime): ignore dates after 'ignore_dates_after', default to None detect_relative_dates (bool): if detect relative dates like '9 days before', default to False (pass in a ETK instance(with spaCy enabled) when init the DateExtractor is required for relative date extraction) relative_base (datetime.datetime): offset relative dates detected based on 'relative_base', default to None preferred_date_order (enum['MDY', 'DMY', 'YMD']): preferred date order when ambiguous, default to 'MDY' prefer_language_date_order (bool): if use the text language's preferred order, default to True timezone (str): add 'timezone' if there is no timezone information in the extracted date, default to None to_timezone (str): convert all dates extracted to this timezone, default to None return_as_timezone_aware (bool): returned datetime timezone awareness, default to None prefer_day_of_month (enum['first', 'current', 'last']): use which day of the month when there is no 'day', default to 'first' prefer_dates_from (enum['past', 'current', 'future']): use which date when there is few info(e.g. only month), default to 'current' date_value_resolution (enum[DateResolution.SECOND, DateResolution.MINUTE, DateResolution.HOUR, \ DateResolution.DAY, DateResolution.MONTH, DateResolution.YEAR]): specify resolution \ when convert to iso format string, default to DateResolution.DAY Returns: List[Extraction]: List of extractions, the information including:: Extraction._value: iso format string, Extraction._provenance: provenance information including: { 'start_char': int - start_char, 'end_char': int - end_char }, Extraction._addition_inf: additional information including: { 'date_object': datetime.datetime - the datetime object, 'original_text': str - the original str extracted from text, 'language': enum['en', 'es'] - language of the date } """ if return_as_timezone_aware: self._default_tz = pytz.timezone(timezone) if timezone else get_localzone() if ignore_dates_before and not ignore_dates_before.tzinfo: ignore_dates_before = ignore_dates_before.astimezone(self._default_tz) if ignore_dates_after and not ignore_dates_after.tzinfo: ignore_dates_after = ignore_dates_after.astimezone(self._default_tz) if relative_base and not relative_base.tzinfo: relative_base = relative_base.astimezone(self._default_tz) else: if ignore_dates_before and ignore_dates_before.tzinfo: ignore_dates_before = ignore_dates_before.replace(tzinfo=None) if ignore_dates_after and ignore_dates_after.tzinfo: ignore_dates_after = ignore_dates_after.replace(tzinfo=None) if relative_base and relative_base.tzinfo: relative_base = relative_base.replace(tzinfo=None) if prefer_language_date_order: try: self._lan = detect(text) except Exception as e: warn('DateExtractor: Catch LangDetectException ' + str(e)) warn(message='DateExtractor: Catch LangDetectException {}'.format(str(e))) self._settings = { EXTRACT_FIRST_DATE_ONLY: extract_first_date_only, ADDITIONAL_FORMATS: additional_formats, USE_DEFAULT_FORMATS: use_default_formats, IGNORE_DATES_BEFORE: ignore_dates_before, IGNORE_DATES_AFTER: ignore_dates_after, DETECT_RELATIVE_DATES: detect_relative_dates, RELATIVE_BASE: relative_base, PREFERRED_DATE_ORDER: preferred_date_order, PREFER_LANGUAGE_DATE_ORDER: prefer_language_date_order, TIMEZONE: timezone, TO_TIMEZONE: to_timezone, RETURN_AS_TIMEZONE_AWARE: return_as_timezone_aware, PREFER_DAY_OF_MONTH: prefer_day_of_month, PREFER_DATES_FROM: prefer_dates_from, DATE_VALUE_RESOLUTION: date_value_resolution } results = [] additional_regex = [] if additional_formats: for date_format in additional_formats: order = '' reg = date_format for key in singleton_regex: if key[0] == '%': reg2 = re.sub(key, singleton_regex[key], reg) if reg != reg2: if key in units['M']: order += 'M' elif key in units['Y']: order += 'Y' elif key in units['D']: order += 'D' reg = reg2 additional_regex.append({ 'reg': reg, 'pattern': date_format, 'order': order, }) for r in additional_regex: try: matches = [self._wrap_date_match(r['order'], match, pattern=r['pattern']) for match in re.finditer(r['reg'], text, re.I) if match] if matches: results.append(matches) except: warn('DateExtractor: Failed to extract with additional format ' + str(r) + '.') if use_default_formats: for order in self._final_regex.keys(): matches = [self._wrap_date_match(order, match) for match in re.finditer(self._final_regex[order], text, re.I) if match] if matches: results.append(matches) else: for order in self._final_regex.keys(): matches = [self._wrap_date_match(order, match) for match in re.finditer(self._final_regex[order], text, re.I) if match] results.append(matches) # for absolute dates: ans = self._remove_overlapped_date_str(results) # for relative dates: if detect_relative_dates: ans += self._extract_relative_dates(text) return ans
[ "def", "extract", "(", "self", ",", "text", ":", "str", "=", "None", ",", "extract_first_date_only", ":", "bool", "=", "False", ",", "additional_formats", ":", "List", "[", "str", "]", "=", "list", "(", ")", ",", "use_default_formats", ":", "bool", "=", "False", ",", "ignore_dates_before", ":", "datetime", ".", "datetime", "=", "None", ",", "ignore_dates_after", ":", "datetime", ".", "datetime", "=", "None", ",", "detect_relative_dates", ":", "bool", "=", "False", ",", "relative_base", ":", "datetime", ".", "datetime", "=", "None", ",", "preferred_date_order", ":", "str", "=", "\"MDY\"", ",", "prefer_language_date_order", ":", "bool", "=", "True", ",", "timezone", ":", "str", "=", "None", ",", "to_timezone", ":", "str", "=", "None", ",", "return_as_timezone_aware", ":", "bool", "=", "True", ",", "prefer_day_of_month", ":", "str", "=", "\"first\"", ",", "prefer_dates_from", ":", "str", "=", "\"current\"", ",", "date_value_resolution", ":", "DateResolution", "=", "DateResolution", ".", "DAY", ",", ")", "->", "List", "[", "Extraction", "]", ":", "if", "return_as_timezone_aware", ":", "self", ".", "_default_tz", "=", "pytz", ".", "timezone", "(", "timezone", ")", "if", "timezone", "else", "get_localzone", "(", ")", "if", "ignore_dates_before", "and", "not", "ignore_dates_before", ".", "tzinfo", ":", "ignore_dates_before", "=", "ignore_dates_before", ".", "astimezone", "(", "self", ".", "_default_tz", ")", "if", "ignore_dates_after", "and", "not", "ignore_dates_after", ".", "tzinfo", ":", "ignore_dates_after", "=", "ignore_dates_after", ".", "astimezone", "(", "self", ".", "_default_tz", ")", "if", "relative_base", "and", "not", "relative_base", ".", "tzinfo", ":", "relative_base", "=", "relative_base", ".", "astimezone", "(", "self", ".", "_default_tz", ")", "else", ":", "if", "ignore_dates_before", "and", "ignore_dates_before", ".", "tzinfo", ":", "ignore_dates_before", "=", "ignore_dates_before", ".", "replace", "(", "tzinfo", "=", "None", ")", "if", "ignore_dates_after", "and", "ignore_dates_after", ".", "tzinfo", ":", "ignore_dates_after", "=", "ignore_dates_after", ".", "replace", "(", "tzinfo", "=", "None", ")", "if", "relative_base", "and", "relative_base", ".", "tzinfo", ":", "relative_base", "=", "relative_base", ".", "replace", "(", "tzinfo", "=", "None", ")", "if", "prefer_language_date_order", ":", "try", ":", "self", ".", "_lan", "=", "detect", "(", "text", ")", "except", "Exception", "as", "e", ":", "warn", "(", "'DateExtractor: Catch LangDetectException '", "+", "str", "(", "e", ")", ")", "warn", "(", "message", "=", "'DateExtractor: Catch LangDetectException {}'", ".", "format", "(", "str", "(", "e", ")", ")", ")", "self", ".", "_settings", "=", "{", "EXTRACT_FIRST_DATE_ONLY", ":", "extract_first_date_only", ",", "ADDITIONAL_FORMATS", ":", "additional_formats", ",", "USE_DEFAULT_FORMATS", ":", "use_default_formats", ",", "IGNORE_DATES_BEFORE", ":", "ignore_dates_before", ",", "IGNORE_DATES_AFTER", ":", "ignore_dates_after", ",", "DETECT_RELATIVE_DATES", ":", "detect_relative_dates", ",", "RELATIVE_BASE", ":", "relative_base", ",", "PREFERRED_DATE_ORDER", ":", "preferred_date_order", ",", "PREFER_LANGUAGE_DATE_ORDER", ":", "prefer_language_date_order", ",", "TIMEZONE", ":", "timezone", ",", "TO_TIMEZONE", ":", "to_timezone", ",", "RETURN_AS_TIMEZONE_AWARE", ":", "return_as_timezone_aware", ",", "PREFER_DAY_OF_MONTH", ":", "prefer_day_of_month", ",", "PREFER_DATES_FROM", ":", "prefer_dates_from", ",", "DATE_VALUE_RESOLUTION", ":", "date_value_resolution", "}", "results", "=", "[", "]", "additional_regex", "=", "[", "]", "if", "additional_formats", ":", "for", "date_format", "in", "additional_formats", ":", "order", "=", "''", "reg", "=", "date_format", "for", "key", "in", "singleton_regex", ":", "if", "key", "[", "0", "]", "==", "'%'", ":", "reg2", "=", "re", ".", "sub", "(", "key", ",", "singleton_regex", "[", "key", "]", ",", "reg", ")", "if", "reg", "!=", "reg2", ":", "if", "key", "in", "units", "[", "'M'", "]", ":", "order", "+=", "'M'", "elif", "key", "in", "units", "[", "'Y'", "]", ":", "order", "+=", "'Y'", "elif", "key", "in", "units", "[", "'D'", "]", ":", "order", "+=", "'D'", "reg", "=", "reg2", "additional_regex", ".", "append", "(", "{", "'reg'", ":", "reg", ",", "'pattern'", ":", "date_format", ",", "'order'", ":", "order", ",", "}", ")", "for", "r", "in", "additional_regex", ":", "try", ":", "matches", "=", "[", "self", ".", "_wrap_date_match", "(", "r", "[", "'order'", "]", ",", "match", ",", "pattern", "=", "r", "[", "'pattern'", "]", ")", "for", "match", "in", "re", ".", "finditer", "(", "r", "[", "'reg'", "]", ",", "text", ",", "re", ".", "I", ")", "if", "match", "]", "if", "matches", ":", "results", ".", "append", "(", "matches", ")", "except", ":", "warn", "(", "'DateExtractor: Failed to extract with additional format '", "+", "str", "(", "r", ")", "+", "'.'", ")", "if", "use_default_formats", ":", "for", "order", "in", "self", ".", "_final_regex", ".", "keys", "(", ")", ":", "matches", "=", "[", "self", ".", "_wrap_date_match", "(", "order", ",", "match", ")", "for", "match", "in", "re", ".", "finditer", "(", "self", ".", "_final_regex", "[", "order", "]", ",", "text", ",", "re", ".", "I", ")", "if", "match", "]", "if", "matches", ":", "results", ".", "append", "(", "matches", ")", "else", ":", "for", "order", "in", "self", ".", "_final_regex", ".", "keys", "(", ")", ":", "matches", "=", "[", "self", ".", "_wrap_date_match", "(", "order", ",", "match", ")", "for", "match", "in", "re", ".", "finditer", "(", "self", ".", "_final_regex", "[", "order", "]", ",", "text", ",", "re", ".", "I", ")", "if", "match", "]", "results", ".", "append", "(", "matches", ")", "# for absolute dates:", "ans", "=", "self", ".", "_remove_overlapped_date_str", "(", "results", ")", "# for relative dates:", "if", "detect_relative_dates", ":", "ans", "+=", "self", ".", "_extract_relative_dates", "(", "text", ")", "return", "ans" ]
54.137931
26.082759
def begin_transaction( self, database, options_=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Starts a new transaction. Example: >>> from google.cloud import firestore_v1beta1 >>> >>> client = firestore_v1beta1.FirestoreClient() >>> >>> database = client.database_root_path('[PROJECT]', '[DATABASE]') >>> >>> response = client.begin_transaction(database) Args: database (str): The database name. In the format: ``projects/{project_id}/databases/{database_id}``. options_ (Union[dict, ~google.cloud.firestore_v1beta1.types.TransactionOptions]): The options for the transaction. Defaults to a read-write transaction. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.firestore_v1beta1.types.TransactionOptions` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.firestore_v1beta1.types.BeginTransactionResponse` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "begin_transaction" not in self._inner_api_calls: self._inner_api_calls[ "begin_transaction" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.begin_transaction, default_retry=self._method_configs["BeginTransaction"].retry, default_timeout=self._method_configs["BeginTransaction"].timeout, client_info=self._client_info, ) request = firestore_pb2.BeginTransactionRequest( database=database, options=options_ ) if metadata is None: metadata = [] metadata = list(metadata) try: routing_header = [("database", database)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( routing_header ) metadata.append(routing_metadata) return self._inner_api_calls["begin_transaction"]( request, retry=retry, timeout=timeout, metadata=metadata )
[ "def", "begin_transaction", "(", "self", ",", "database", ",", "options_", "=", "None", ",", "retry", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "method", ".", "DEFAULT", ",", "timeout", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "method", ".", "DEFAULT", ",", "metadata", "=", "None", ",", ")", ":", "# Wrap the transport method to add retry and timeout logic.", "if", "\"begin_transaction\"", "not", "in", "self", ".", "_inner_api_calls", ":", "self", ".", "_inner_api_calls", "[", "\"begin_transaction\"", "]", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "method", ".", "wrap_method", "(", "self", ".", "transport", ".", "begin_transaction", ",", "default_retry", "=", "self", ".", "_method_configs", "[", "\"BeginTransaction\"", "]", ".", "retry", ",", "default_timeout", "=", "self", ".", "_method_configs", "[", "\"BeginTransaction\"", "]", ".", "timeout", ",", "client_info", "=", "self", ".", "_client_info", ",", ")", "request", "=", "firestore_pb2", ".", "BeginTransactionRequest", "(", "database", "=", "database", ",", "options", "=", "options_", ")", "if", "metadata", "is", "None", ":", "metadata", "=", "[", "]", "metadata", "=", "list", "(", "metadata", ")", "try", ":", "routing_header", "=", "[", "(", "\"database\"", ",", "database", ")", "]", "except", "AttributeError", ":", "pass", "else", ":", "routing_metadata", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "routing_header", ".", "to_grpc_metadata", "(", "routing_header", ")", "metadata", ".", "append", "(", "routing_metadata", ")", "return", "self", ".", "_inner_api_calls", "[", "\"begin_transaction\"", "]", "(", "request", ",", "retry", "=", "retry", ",", "timeout", "=", "timeout", ",", "metadata", "=", "metadata", ")" ]
41.571429
25
def set_affinity_matrix(self, affinity_mat): """ Parameters ---------- affinity_mat : sparse matrix (N_obs, N_obs). The adjacency matrix to input. """ affinity_mat = check_array(affinity_mat, accept_sparse=sparse_formats) if affinity_mat.shape[0] != affinity_mat.shape[1]: raise ValueError("affinity matrix is not square") self.affinity_matrix = affinity_mat
[ "def", "set_affinity_matrix", "(", "self", ",", "affinity_mat", ")", ":", "affinity_mat", "=", "check_array", "(", "affinity_mat", ",", "accept_sparse", "=", "sparse_formats", ")", "if", "affinity_mat", ".", "shape", "[", "0", "]", "!=", "affinity_mat", ".", "shape", "[", "1", "]", ":", "raise", "ValueError", "(", "\"affinity matrix is not square\"", ")", "self", ".", "affinity_matrix", "=", "affinity_mat" ]
39.636364
12.909091
def inverse_distance_to_grid(xp, yp, variable, grid_x, grid_y, r, gamma=None, kappa=None, min_neighbors=3, kind='cressman'): r"""Generate an inverse distance interpolation of the given points to a regular grid. Values are assigned to the given grid using inverse distance weighting based on either [Cressman1959]_ or [Barnes1964]_. The Barnes implementation used here based on [Koch1983]_. Parameters ---------- xp: (N, ) ndarray x-coordinates of observations. yp: (N, ) ndarray y-coordinates of observations. variable: (N, ) ndarray observation values associated with (xp, yp) pairs. IE, variable[i] is a unique observation at (xp[i], yp[i]). grid_x: (M, 2) ndarray Meshgrid associated with x dimension. grid_y: (M, 2) ndarray Meshgrid associated with y dimension. r: float Radius from grid center, within which observations are considered and weighted. gamma: float Adjustable smoothing parameter for the barnes interpolation. Default None. kappa: float Response parameter for barnes interpolation. Default None. min_neighbors: int Minimum number of neighbors needed to perform barnes or cressman interpolation for a point. Default is 3. kind: str Specify what inverse distance weighting interpolation to use. Options: 'cressman' or 'barnes'. Default 'cressman' Returns ------- img: (M, N) ndarray Interpolated values on a 2-dimensional grid See Also -------- inverse_distance_to_points """ # Handle grid-to-points conversion, and use function from `interpolation` points_obs = list(zip(xp, yp)) points_grid = generate_grid_coords(grid_x, grid_y) img = inverse_distance_to_points(points_obs, variable, points_grid, r, gamma=gamma, kappa=kappa, min_neighbors=min_neighbors, kind=kind) return img.reshape(grid_x.shape)
[ "def", "inverse_distance_to_grid", "(", "xp", ",", "yp", ",", "variable", ",", "grid_x", ",", "grid_y", ",", "r", ",", "gamma", "=", "None", ",", "kappa", "=", "None", ",", "min_neighbors", "=", "3", ",", "kind", "=", "'cressman'", ")", ":", "# Handle grid-to-points conversion, and use function from `interpolation`", "points_obs", "=", "list", "(", "zip", "(", "xp", ",", "yp", ")", ")", "points_grid", "=", "generate_grid_coords", "(", "grid_x", ",", "grid_y", ")", "img", "=", "inverse_distance_to_points", "(", "points_obs", ",", "variable", ",", "points_grid", ",", "r", ",", "gamma", "=", "gamma", ",", "kappa", "=", "kappa", ",", "min_neighbors", "=", "min_neighbors", ",", "kind", "=", "kind", ")", "return", "img", ".", "reshape", "(", "grid_x", ".", "shape", ")" ]
39.36
23.72
def is_existing_object(did): """Return True if PID is for an object for which science bytes are stored locally. This excludes SIDs and PIDs for unprocessed replica requests, remote or non-existing revisions of local replicas and objects aggregated in Resource Maps. """ return d1_gmn.app.models.ScienceObject.objects.filter(pid__did=did).exists()
[ "def", "is_existing_object", "(", "did", ")", ":", "return", "d1_gmn", ".", "app", ".", "models", ".", "ScienceObject", ".", "objects", ".", "filter", "(", "pid__did", "=", "did", ")", ".", "exists", "(", ")" ]
45.125
26.5
def isoformat(self): """Return the date formatted according to ISO. This is 'YYYY-MM-DD'. References: - http://www.w3.org/TR/NOTE-datetime - http://www.cl.cam.ac.uk/~mgk25/iso-time.html """ # return "%04d-%02d-%02d" % (self._year, self._month, self._day) return "%s-%s-%s" % (str(self._year).zfill(4), str(self._month).zfill(2), str(self._day).zfill(2))
[ "def", "isoformat", "(", "self", ")", ":", "# return \"%04d-%02d-%02d\" % (self._year, self._month, self._day)", "return", "\"%s-%s-%s\"", "%", "(", "str", "(", "self", ".", "_year", ")", ".", "zfill", "(", "4", ")", ",", "str", "(", "self", ".", "_month", ")", ".", "zfill", "(", "2", ")", ",", "str", "(", "self", ".", "_day", ")", ".", "zfill", "(", "2", ")", ")" ]
37.181818
22.545455
def execute_sql_statement(sql_statement, query, user_name, session, cursor): """Executes a single SQL statement""" database = query.database db_engine_spec = database.db_engine_spec parsed_query = ParsedQuery(sql_statement) sql = parsed_query.stripped() SQL_MAX_ROWS = app.config.get('SQL_MAX_ROW') if not parsed_query.is_readonly() and not database.allow_dml: raise SqlLabSecurityException( _('Only `SELECT` statements are allowed against this database')) if query.select_as_cta: if not parsed_query.is_select(): raise SqlLabException(_( 'Only `SELECT` statements can be used with the CREATE TABLE ' 'feature.')) if not query.tmp_table_name: start_dttm = datetime.fromtimestamp(query.start_time) query.tmp_table_name = 'tmp_{}_table_{}'.format( query.user_id, start_dttm.strftime('%Y_%m_%d_%H_%M_%S')) sql = parsed_query.as_create_table(query.tmp_table_name) query.select_as_cta_used = True if parsed_query.is_select(): if SQL_MAX_ROWS and (not query.limit or query.limit > SQL_MAX_ROWS): query.limit = SQL_MAX_ROWS if query.limit: sql = database.apply_limit_to_sql(sql, query.limit) # Hook to allow environment-specific mutation (usually comments) to the SQL SQL_QUERY_MUTATOR = config.get('SQL_QUERY_MUTATOR') if SQL_QUERY_MUTATOR: sql = SQL_QUERY_MUTATOR(sql, user_name, security_manager, database) try: if log_query: log_query( query.database.sqlalchemy_uri, query.executed_sql, query.schema, user_name, __name__, security_manager, ) query.executed_sql = sql with stats_timing('sqllab.query.time_executing_query', stats_logger): logging.info('Running query: \n{}'.format(sql)) db_engine_spec.execute(cursor, sql, async_=True) logging.info('Handling cursor') db_engine_spec.handle_cursor(cursor, query, session) with stats_timing('sqllab.query.time_fetching_results', stats_logger): logging.debug('Fetching data for query object: {}'.format(query.to_dict())) data = db_engine_spec.fetch_data(cursor, query.limit) except SoftTimeLimitExceeded as e: logging.exception(e) raise SqlLabTimeoutException( "SQL Lab timeout. This environment's policy is to kill queries " 'after {} seconds.'.format(SQLLAB_TIMEOUT)) except Exception as e: logging.exception(e) raise SqlLabException(db_engine_spec.extract_error_message(e)) logging.debug('Fetching cursor description') cursor_description = cursor.description return dataframe.SupersetDataFrame(data, cursor_description, db_engine_spec)
[ "def", "execute_sql_statement", "(", "sql_statement", ",", "query", ",", "user_name", ",", "session", ",", "cursor", ")", ":", "database", "=", "query", ".", "database", "db_engine_spec", "=", "database", ".", "db_engine_spec", "parsed_query", "=", "ParsedQuery", "(", "sql_statement", ")", "sql", "=", "parsed_query", ".", "stripped", "(", ")", "SQL_MAX_ROWS", "=", "app", ".", "config", ".", "get", "(", "'SQL_MAX_ROW'", ")", "if", "not", "parsed_query", ".", "is_readonly", "(", ")", "and", "not", "database", ".", "allow_dml", ":", "raise", "SqlLabSecurityException", "(", "_", "(", "'Only `SELECT` statements are allowed against this database'", ")", ")", "if", "query", ".", "select_as_cta", ":", "if", "not", "parsed_query", ".", "is_select", "(", ")", ":", "raise", "SqlLabException", "(", "_", "(", "'Only `SELECT` statements can be used with the CREATE TABLE '", "'feature.'", ")", ")", "if", "not", "query", ".", "tmp_table_name", ":", "start_dttm", "=", "datetime", ".", "fromtimestamp", "(", "query", ".", "start_time", ")", "query", ".", "tmp_table_name", "=", "'tmp_{}_table_{}'", ".", "format", "(", "query", ".", "user_id", ",", "start_dttm", ".", "strftime", "(", "'%Y_%m_%d_%H_%M_%S'", ")", ")", "sql", "=", "parsed_query", ".", "as_create_table", "(", "query", ".", "tmp_table_name", ")", "query", ".", "select_as_cta_used", "=", "True", "if", "parsed_query", ".", "is_select", "(", ")", ":", "if", "SQL_MAX_ROWS", "and", "(", "not", "query", ".", "limit", "or", "query", ".", "limit", ">", "SQL_MAX_ROWS", ")", ":", "query", ".", "limit", "=", "SQL_MAX_ROWS", "if", "query", ".", "limit", ":", "sql", "=", "database", ".", "apply_limit_to_sql", "(", "sql", ",", "query", ".", "limit", ")", "# Hook to allow environment-specific mutation (usually comments) to the SQL", "SQL_QUERY_MUTATOR", "=", "config", ".", "get", "(", "'SQL_QUERY_MUTATOR'", ")", "if", "SQL_QUERY_MUTATOR", ":", "sql", "=", "SQL_QUERY_MUTATOR", "(", "sql", ",", "user_name", ",", "security_manager", ",", "database", ")", "try", ":", "if", "log_query", ":", "log_query", "(", "query", ".", "database", ".", "sqlalchemy_uri", ",", "query", ".", "executed_sql", ",", "query", ".", "schema", ",", "user_name", ",", "__name__", ",", "security_manager", ",", ")", "query", ".", "executed_sql", "=", "sql", "with", "stats_timing", "(", "'sqllab.query.time_executing_query'", ",", "stats_logger", ")", ":", "logging", ".", "info", "(", "'Running query: \\n{}'", ".", "format", "(", "sql", ")", ")", "db_engine_spec", ".", "execute", "(", "cursor", ",", "sql", ",", "async_", "=", "True", ")", "logging", ".", "info", "(", "'Handling cursor'", ")", "db_engine_spec", ".", "handle_cursor", "(", "cursor", ",", "query", ",", "session", ")", "with", "stats_timing", "(", "'sqllab.query.time_fetching_results'", ",", "stats_logger", ")", ":", "logging", ".", "debug", "(", "'Fetching data for query object: {}'", ".", "format", "(", "query", ".", "to_dict", "(", ")", ")", ")", "data", "=", "db_engine_spec", ".", "fetch_data", "(", "cursor", ",", "query", ".", "limit", ")", "except", "SoftTimeLimitExceeded", "as", "e", ":", "logging", ".", "exception", "(", "e", ")", "raise", "SqlLabTimeoutException", "(", "\"SQL Lab timeout. This environment's policy is to kill queries \"", "'after {} seconds.'", ".", "format", "(", "SQLLAB_TIMEOUT", ")", ")", "except", "Exception", "as", "e", ":", "logging", ".", "exception", "(", "e", ")", "raise", "SqlLabException", "(", "db_engine_spec", ".", "extract_error_message", "(", "e", ")", ")", "logging", ".", "debug", "(", "'Fetching cursor description'", ")", "cursor_description", "=", "cursor", ".", "description", "return", "dataframe", ".", "SupersetDataFrame", "(", "data", ",", "cursor_description", ",", "db_engine_spec", ")" ]
43.287879
19.484848
def get_git_version(git_path=None): """ Get the Git version. """ if git_path is None: git_path = GIT_PATH git_version = check_output([git_path, "--version"]).split()[2] return git_version
[ "def", "get_git_version", "(", "git_path", "=", "None", ")", ":", "if", "git_path", "is", "None", ":", "git_path", "=", "GIT_PATH", "git_version", "=", "check_output", "(", "[", "git_path", ",", "\"--version\"", "]", ")", ".", "split", "(", ")", "[", "2", "]", "return", "git_version" ]
29.285714
9.857143
def new_stat(self): """Look at the key and value that the user has entered into the stat configurator, and set them on the currently selected entity. """ key = self.ids.newstatkey.text value = self.ids.newstatval.text if not (key and value): # TODO implement some feedback to the effect that # you need to enter things return try: self.proxy[key] = self.engine.unpack(value) except (TypeError, ValueError): self.proxy[key] = value self.ids.newstatkey.text = '' self.ids.newstatval.text = ''
[ "def", "new_stat", "(", "self", ")", ":", "key", "=", "self", ".", "ids", ".", "newstatkey", ".", "text", "value", "=", "self", ".", "ids", ".", "newstatval", ".", "text", "if", "not", "(", "key", "and", "value", ")", ":", "# TODO implement some feedback to the effect that", "# you need to enter things", "return", "try", ":", "self", ".", "proxy", "[", "key", "]", "=", "self", ".", "engine", ".", "unpack", "(", "value", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "self", ".", "proxy", "[", "key", "]", "=", "value", "self", ".", "ids", ".", "newstatkey", ".", "text", "=", "''", "self", ".", "ids", ".", "newstatval", ".", "text", "=", "''" ]
34.555556
12.055556
def find_mounts(self): """Finds all mountpoints that are mounted to a directory matching :attr:`re_pattern` or originate from a directory matching :attr:`orig_re_pattern`. """ for mountpoint, (orig, fs, opts) in self.mountpoints.items(): if 'bind' not in opts and (re.match(self.orig_re_pattern, orig) or (self.be_greedy and re.match(self.re_pattern, mountpoint))): yield mountpoint
[ "def", "find_mounts", "(", "self", ")", ":", "for", "mountpoint", ",", "(", "orig", ",", "fs", ",", "opts", ")", "in", "self", ".", "mountpoints", ".", "items", "(", ")", ":", "if", "'bind'", "not", "in", "opts", "and", "(", "re", ".", "match", "(", "self", ".", "orig_re_pattern", ",", "orig", ")", "or", "(", "self", ".", "be_greedy", "and", "re", ".", "match", "(", "self", ".", "re_pattern", ",", "mountpoint", ")", ")", ")", ":", "yield", "mountpoint" ]
52.666667
22.555556
def setup_logging(format="%(asctime)s - %(levelname)s - %(message)s", level='INFO'): """Setup the logging framework with a basic configuration""" try: import coloredlogs coloredlogs.install(fmt=format, level=level) except ImportError: logging.basicConfig(format=format, level=level)
[ "def", "setup_logging", "(", "format", "=", "\"%(asctime)s - %(levelname)s - %(message)s\"", ",", "level", "=", "'INFO'", ")", ":", "try", ":", "import", "coloredlogs", "coloredlogs", ".", "install", "(", "fmt", "=", "format", ",", "level", "=", "level", ")", "except", "ImportError", ":", "logging", ".", "basicConfig", "(", "format", "=", "format", ",", "level", "=", "level", ")" ]
44.571429
19.142857
def load(self, filename): """ Load AEAD from a file. @param filename: File to read AEAD from @type filename: string """ aead_f = open(filename, "rb") buf = aead_f.read(1024) if buf.startswith(YHSM_AEAD_CRLF_File_Marker): buf = YHSM_AEAD_File_Marker + buf[len(YHSM_AEAD_CRLF_File_Marker):] if buf.startswith(YHSM_AEAD_File_Marker): if buf[len(YHSM_AEAD_File_Marker)] == chr(1): # version 1 format fmt = "< I %is" % (pyhsm.defines.YSM_AEAD_NONCE_SIZE) self.key_handle, self.nonce = struct.unpack_from(fmt, buf, len(YHSM_AEAD_File_Marker) + 1) self.data = buf[len(YHSM_AEAD_File_Marker) + 1 + struct.calcsize(fmt):] else: raise pyhsm.exception.YHSM_Error('Unknown AEAD file format') else: # version 0 format, just AEAD data self.data = buf[:pyhsm.defines.YSM_MAX_KEY_SIZE + pyhsm.defines.YSM_BLOCK_SIZE] aead_f.close()
[ "def", "load", "(", "self", ",", "filename", ")", ":", "aead_f", "=", "open", "(", "filename", ",", "\"rb\"", ")", "buf", "=", "aead_f", ".", "read", "(", "1024", ")", "if", "buf", ".", "startswith", "(", "YHSM_AEAD_CRLF_File_Marker", ")", ":", "buf", "=", "YHSM_AEAD_File_Marker", "+", "buf", "[", "len", "(", "YHSM_AEAD_CRLF_File_Marker", ")", ":", "]", "if", "buf", ".", "startswith", "(", "YHSM_AEAD_File_Marker", ")", ":", "if", "buf", "[", "len", "(", "YHSM_AEAD_File_Marker", ")", "]", "==", "chr", "(", "1", ")", ":", "# version 1 format", "fmt", "=", "\"< I %is\"", "%", "(", "pyhsm", ".", "defines", ".", "YSM_AEAD_NONCE_SIZE", ")", "self", ".", "key_handle", ",", "self", ".", "nonce", "=", "struct", ".", "unpack_from", "(", "fmt", ",", "buf", ",", "len", "(", "YHSM_AEAD_File_Marker", ")", "+", "1", ")", "self", ".", "data", "=", "buf", "[", "len", "(", "YHSM_AEAD_File_Marker", ")", "+", "1", "+", "struct", ".", "calcsize", "(", "fmt", ")", ":", "]", "else", ":", "raise", "pyhsm", ".", "exception", ".", "YHSM_Error", "(", "'Unknown AEAD file format'", ")", "else", ":", "# version 0 format, just AEAD data", "self", ".", "data", "=", "buf", "[", ":", "pyhsm", ".", "defines", ".", "YSM_MAX_KEY_SIZE", "+", "pyhsm", ".", "defines", ".", "YSM_BLOCK_SIZE", "]", "aead_f", ".", "close", "(", ")" ]
44.434783
20.956522
def auth_password(self, username, password, event=None, fallback=True): """ Authenticate to the server using a password. The username and password are sent over an encrypted link. If an ``event`` is passed in, this method will return immediately, and the event will be triggered once authentication succeeds or fails. On success, `is_authenticated` will return ``True``. On failure, you may use `get_exception` to get more detailed error information. Since 1.1, if no event is passed, this method will block until the authentication succeeds or fails. On failure, an exception is raised. Otherwise, the method simply returns. Since 1.5, if no event is passed and ``fallback`` is ``True`` (the default), if the server doesn't support plain password authentication but does support so-called "keyboard-interactive" mode, an attempt will be made to authenticate using this interactive mode. If it fails, the normal exception will be thrown as if the attempt had never been made. This is useful for some recent Gentoo and Debian distributions, which turn off plain password authentication in a misguided belief that interactive authentication is "more secure". (It's not.) If the server requires multi-step authentication (which is very rare), this method will return a list of auth types permissible for the next step. Otherwise, in the normal case, an empty list is returned. :param str username: the username to authenticate as :param basestring password: the password to authenticate with :param .threading.Event event: an event to trigger when the authentication attempt is complete (whether it was successful or not) :param bool fallback: ``True`` if an attempt at an automated "interactive" password auth should be made if the server doesn't support normal password auth :return: list of auth types permissible for the next stage of authentication (normally empty) :raises: `.BadAuthenticationType` -- if password authentication isn't allowed by the server for this user (and no event was passed in) :raises: `.AuthenticationException` -- if the authentication failed (and no event was passed in) :raises: `.SSHException` -- if there was a network error """ if (not self.active) or (not self.initial_kex_done): # we should never try to send the password unless we're on a secure # link raise SSHException("No existing session") if event is None: my_event = threading.Event() else: my_event = event self.auth_handler = AuthHandler(self) self.auth_handler.auth_password(username, password, my_event) if event is not None: # caller wants to wait for event themselves return [] try: return self.auth_handler.wait_for_response(my_event) except BadAuthenticationType as e: # if password auth isn't allowed, but keyboard-interactive *is*, # try to fudge it if not fallback or ("keyboard-interactive" not in e.allowed_types): raise try: def handler(title, instructions, fields): if len(fields) > 1: raise SSHException("Fallback authentication failed.") if len(fields) == 0: # for some reason, at least on os x, a 2nd request will # be made with zero fields requested. maybe it's just # to try to fake out automated scripting of the exact # type we're doing here. *shrug* :) return [] return [password] return self.auth_interactive(username, handler) except SSHException: # attempt failed; just raise the original exception raise e
[ "def", "auth_password", "(", "self", ",", "username", ",", "password", ",", "event", "=", "None", ",", "fallback", "=", "True", ")", ":", "if", "(", "not", "self", ".", "active", ")", "or", "(", "not", "self", ".", "initial_kex_done", ")", ":", "# we should never try to send the password unless we're on a secure", "# link", "raise", "SSHException", "(", "\"No existing session\"", ")", "if", "event", "is", "None", ":", "my_event", "=", "threading", ".", "Event", "(", ")", "else", ":", "my_event", "=", "event", "self", ".", "auth_handler", "=", "AuthHandler", "(", "self", ")", "self", ".", "auth_handler", ".", "auth_password", "(", "username", ",", "password", ",", "my_event", ")", "if", "event", "is", "not", "None", ":", "# caller wants to wait for event themselves", "return", "[", "]", "try", ":", "return", "self", ".", "auth_handler", ".", "wait_for_response", "(", "my_event", ")", "except", "BadAuthenticationType", "as", "e", ":", "# if password auth isn't allowed, but keyboard-interactive *is*,", "# try to fudge it", "if", "not", "fallback", "or", "(", "\"keyboard-interactive\"", "not", "in", "e", ".", "allowed_types", ")", ":", "raise", "try", ":", "def", "handler", "(", "title", ",", "instructions", ",", "fields", ")", ":", "if", "len", "(", "fields", ")", ">", "1", ":", "raise", "SSHException", "(", "\"Fallback authentication failed.\"", ")", "if", "len", "(", "fields", ")", "==", "0", ":", "# for some reason, at least on os x, a 2nd request will", "# be made with zero fields requested. maybe it's just", "# to try to fake out automated scripting of the exact", "# type we're doing here. *shrug* :)", "return", "[", "]", "return", "[", "password", "]", "return", "self", ".", "auth_interactive", "(", "username", ",", "handler", ")", "except", "SSHException", ":", "# attempt failed; just raise the original exception", "raise", "e" ]
49.095238
24.952381
def cleanup(self, sched, coro): """Remove this coro from the waiting for signal queue.""" try: sched.sigwait[self.name].remove((self, coro)) except ValueError: pass return True
[ "def", "cleanup", "(", "self", ",", "sched", ",", "coro", ")", ":", "try", ":", "sched", ".", "sigwait", "[", "self", ".", "name", "]", ".", "remove", "(", "(", "self", ",", "coro", ")", ")", "except", "ValueError", ":", "pass", "return", "True" ]
33.142857
15.714286
def get_washing_regex(): """Return a washing regex list.""" global _washing_regex if len(_washing_regex): return _washing_regex washing_regex = [ # Replace non and anti with non- and anti-. This allows a better # detection of keywords such as nonabelian. (re.compile(r"(\snon)[- ](\w+)"), r"\1\2"), (re.compile(r"(\santi)[- ](\w+)"), r"\1\2"), # Remove all leading numbers (e.g. 2-pion -> pion). (re.compile(r"\s\d-"), " "), # Remove multiple spaces. (re.compile(r" +"), " "), ] # Remove spaces in particle names. # Particles with -/+/* washing_regex += [ (re.compile(r"(\W%s) ([-+*])" % name), r"\1\2") for name in ("c", "muon", "s", "B", "D", "K", "Lambda", "Mu", "Omega", "Pi", "Sigma", "Tau", "W", "Xi") ] # Particles followed by numbers washing_regex += [ (re.compile(r"(\W%s) ([0-9]\W)" % name), r"\1\2") for name in ("a", "b", "c", "f", "h", "s", "B", "D", "H", "K", "L", "Phi", "Pi", "Psi", "Rho", "Stor", "UA", "Xi", "Z") ] washing_regex += [(re.compile(r"(\W%s) ?\( ?([0-9]+) ?\)[A-Z]?" % name), r"\1(\2)") for name in ("CP", "E", "G", "O", "S", "SL", "SO", "Spin", "SU", "U", "W", "Z")] # Particles with ' washing_regex += [(re.compile(r"(\W%s) ('\W)" % name), r"\1\2") for name in ("Eta", "W", "Z")] # Particles with (N) washing_regex += [(re.compile(r"(\W%s) ?\( ?N ?\)[A-Z]?" % name), r"\1(N)") for name in ("CP", "GL", "O", "SL", "SO", "Sp", "Spin", "SU", "U", "W", "Z")] # All names followed by ([0-9]{3,4}) washing_regex.append((re.compile(r"([A-Za-z]) (\([0-9]{3,4}\)\+?)\s"), r"\1\2 ")) # Some weird names followed by ([0-9]{3,4}) washing_regex += [(re.compile(r"\(%s\) (\([0-9]{3,4}\))" % name), r"\1\2 ") for name in ("a0", "Ds1", "Ds2", "K\*")] washing_regex += [ # Remove all lonel operators (usually these are errors # introduced by pdftotext.) (re.compile(r" [+*] "), r" "), # Remove multiple spaces. (re.compile(r" +"), " "), # Remove multiple line breaks. (re.compile(r"\n+"), r"\n"), ] _washing_regex = washing_regex return _washing_regex
[ "def", "get_washing_regex", "(", ")", ":", "global", "_washing_regex", "if", "len", "(", "_washing_regex", ")", ":", "return", "_washing_regex", "washing_regex", "=", "[", "# Replace non and anti with non- and anti-. This allows a better", "# detection of keywords such as nonabelian.", "(", "re", ".", "compile", "(", "r\"(\\snon)[- ](\\w+)\"", ")", ",", "r\"\\1\\2\"", ")", ",", "(", "re", ".", "compile", "(", "r\"(\\santi)[- ](\\w+)\"", ")", ",", "r\"\\1\\2\"", ")", ",", "# Remove all leading numbers (e.g. 2-pion -> pion).", "(", "re", ".", "compile", "(", "r\"\\s\\d-\"", ")", ",", "\" \"", ")", ",", "# Remove multiple spaces.", "(", "re", ".", "compile", "(", "r\" +\"", ")", ",", "\" \"", ")", ",", "]", "# Remove spaces in particle names.", "# Particles with -/+/*", "washing_regex", "+=", "[", "(", "re", ".", "compile", "(", "r\"(\\W%s) ([-+*])\"", "%", "name", ")", ",", "r\"\\1\\2\"", ")", "for", "name", "in", "(", "\"c\"", ",", "\"muon\"", ",", "\"s\"", ",", "\"B\"", ",", "\"D\"", ",", "\"K\"", ",", "\"Lambda\"", ",", "\"Mu\"", ",", "\"Omega\"", ",", "\"Pi\"", ",", "\"Sigma\"", ",", "\"Tau\"", ",", "\"W\"", ",", "\"Xi\"", ")", "]", "# Particles followed by numbers", "washing_regex", "+=", "[", "(", "re", ".", "compile", "(", "r\"(\\W%s) ([0-9]\\W)\"", "%", "name", ")", ",", "r\"\\1\\2\"", ")", "for", "name", "in", "(", "\"a\"", ",", "\"b\"", ",", "\"c\"", ",", "\"f\"", ",", "\"h\"", ",", "\"s\"", ",", "\"B\"", ",", "\"D\"", ",", "\"H\"", ",", "\"K\"", ",", "\"L\"", ",", "\"Phi\"", ",", "\"Pi\"", ",", "\"Psi\"", ",", "\"Rho\"", ",", "\"Stor\"", ",", "\"UA\"", ",", "\"Xi\"", ",", "\"Z\"", ")", "]", "washing_regex", "+=", "[", "(", "re", ".", "compile", "(", "r\"(\\W%s) ?\\( ?([0-9]+) ?\\)[A-Z]?\"", "%", "name", ")", ",", "r\"\\1(\\2)\"", ")", "for", "name", "in", "(", "\"CP\"", ",", "\"E\"", ",", "\"G\"", ",", "\"O\"", ",", "\"S\"", ",", "\"SL\"", ",", "\"SO\"", ",", "\"Spin\"", ",", "\"SU\"", ",", "\"U\"", ",", "\"W\"", ",", "\"Z\"", ")", "]", "# Particles with '", "washing_regex", "+=", "[", "(", "re", ".", "compile", "(", "r\"(\\W%s) ('\\W)\"", "%", "name", ")", ",", "r\"\\1\\2\"", ")", "for", "name", "in", "(", "\"Eta\"", ",", "\"W\"", ",", "\"Z\"", ")", "]", "# Particles with (N)", "washing_regex", "+=", "[", "(", "re", ".", "compile", "(", "r\"(\\W%s) ?\\( ?N ?\\)[A-Z]?\"", "%", "name", ")", ",", "r\"\\1(N)\"", ")", "for", "name", "in", "(", "\"CP\"", ",", "\"GL\"", ",", "\"O\"", ",", "\"SL\"", ",", "\"SO\"", ",", "\"Sp\"", ",", "\"Spin\"", ",", "\"SU\"", ",", "\"U\"", ",", "\"W\"", ",", "\"Z\"", ")", "]", "# All names followed by ([0-9]{3,4})", "washing_regex", ".", "append", "(", "(", "re", ".", "compile", "(", "r\"([A-Za-z]) (\\([0-9]{3,4}\\)\\+?)\\s\"", ")", ",", "r\"\\1\\2 \"", ")", ")", "# Some weird names followed by ([0-9]{3,4})", "washing_regex", "+=", "[", "(", "re", ".", "compile", "(", "r\"\\(%s\\) (\\([0-9]{3,4}\\))\"", "%", "name", ")", ",", "r\"\\1\\2 \"", ")", "for", "name", "in", "(", "\"a0\"", ",", "\"Ds1\"", ",", "\"Ds2\"", ",", "\"K\\*\"", ")", "]", "washing_regex", "+=", "[", "# Remove all lonel operators (usually these are errors", "# introduced by pdftotext.)", "(", "re", ".", "compile", "(", "r\" [+*] \"", ")", ",", "r\" \"", ")", ",", "# Remove multiple spaces.", "(", "re", ".", "compile", "(", "r\" +\"", ")", ",", "\" \"", ")", ",", "# Remove multiple line breaks.", "(", "re", ".", "compile", "(", "r\"\\n+\"", ")", ",", "r\"\\n\"", ")", ",", "]", "_washing_regex", "=", "washing_regex", "return", "_washing_regex" ]
37.30303
19.666667
def _view(self, ddoc, view, use_devmode=False, params=None, unrecognized_ok=False, passthrough=False): """Internal method to Execute a view (MapReduce) query :param string ddoc: Name of the design document :param string view: Name of the view function to execute :param params: Extra options to pass to the view engine :type params: string or dict :return: a :class:`~couchbase.result.HttpResult` object. """ if params: if not isinstance(params, str): params = make_options_string( params, unrecognized_ok=unrecognized_ok, passthrough=passthrough) else: params = "" ddoc = self._mk_devmode(ddoc, use_devmode) url = make_dvpath(ddoc, view) + params ret = self._http_request(type=_LCB.LCB_HTTP_TYPE_VIEW, path=url, method=_LCB.LCB_HTTP_METHOD_GET, response_format=FMT_JSON) return ret
[ "def", "_view", "(", "self", ",", "ddoc", ",", "view", ",", "use_devmode", "=", "False", ",", "params", "=", "None", ",", "unrecognized_ok", "=", "False", ",", "passthrough", "=", "False", ")", ":", "if", "params", ":", "if", "not", "isinstance", "(", "params", ",", "str", ")", ":", "params", "=", "make_options_string", "(", "params", ",", "unrecognized_ok", "=", "unrecognized_ok", ",", "passthrough", "=", "passthrough", ")", "else", ":", "params", "=", "\"\"", "ddoc", "=", "self", ".", "_mk_devmode", "(", "ddoc", ",", "use_devmode", ")", "url", "=", "make_dvpath", "(", "ddoc", ",", "view", ")", "+", "params", "ret", "=", "self", ".", "_http_request", "(", "type", "=", "_LCB", ".", "LCB_HTTP_TYPE_VIEW", ",", "path", "=", "url", ",", "method", "=", "_LCB", ".", "LCB_HTTP_METHOD_GET", ",", "response_format", "=", "FMT_JSON", ")", "return", "ret" ]
35.967742
16.258065
def present(name, profile="github", **kwargs): ''' Ensure a user is present .. code-block:: yaml ensure user test is present in github: github.present: - name: 'gitexample' The following parameters are required: name This is the github handle of the user in the organization ''' ret = { 'name': name, 'changes': {}, 'result': None, 'comment': '' } target = __salt__['github.get_user'](name, profile=profile, **kwargs) # If the user has a valid github handle and is not in the org already if not target: ret['result'] = False ret['comment'] = 'Couldnt find user {0}'.format(name) elif isinstance(target, bool) and target: ret['comment'] = 'User {0} is already in the org '.format(name) ret['result'] = True elif not target.get('in_org', False) and target.get('membership_state') != 'pending': if __opts__['test']: ret['comment'] = 'User {0} will be added to the org'.format(name) return ret # add the user result = __salt__['github.add_user']( name, profile=profile, **kwargs ) if result: ret['changes'].setdefault('old', None) ret['changes'].setdefault('new', 'User {0} exists in the org now'.format(name)) ret['result'] = True else: ret['result'] = False ret['comment'] = 'Failed to add user {0} to the org'.format(name) else: ret['comment'] = 'User {0} has already been invited.'.format(name) ret['result'] = True return ret
[ "def", "present", "(", "name", ",", "profile", "=", "\"github\"", ",", "*", "*", "kwargs", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'changes'", ":", "{", "}", ",", "'result'", ":", "None", ",", "'comment'", ":", "''", "}", "target", "=", "__salt__", "[", "'github.get_user'", "]", "(", "name", ",", "profile", "=", "profile", ",", "*", "*", "kwargs", ")", "# If the user has a valid github handle and is not in the org already", "if", "not", "target", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "'Couldnt find user {0}'", ".", "format", "(", "name", ")", "elif", "isinstance", "(", "target", ",", "bool", ")", "and", "target", ":", "ret", "[", "'comment'", "]", "=", "'User {0} is already in the org '", ".", "format", "(", "name", ")", "ret", "[", "'result'", "]", "=", "True", "elif", "not", "target", ".", "get", "(", "'in_org'", ",", "False", ")", "and", "target", ".", "get", "(", "'membership_state'", ")", "!=", "'pending'", ":", "if", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'comment'", "]", "=", "'User {0} will be added to the org'", ".", "format", "(", "name", ")", "return", "ret", "# add the user", "result", "=", "__salt__", "[", "'github.add_user'", "]", "(", "name", ",", "profile", "=", "profile", ",", "*", "*", "kwargs", ")", "if", "result", ":", "ret", "[", "'changes'", "]", ".", "setdefault", "(", "'old'", ",", "None", ")", "ret", "[", "'changes'", "]", ".", "setdefault", "(", "'new'", ",", "'User {0} exists in the org now'", ".", "format", "(", "name", ")", ")", "ret", "[", "'result'", "]", "=", "True", "else", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "'Failed to add user {0} to the org'", ".", "format", "(", "name", ")", "else", ":", "ret", "[", "'comment'", "]", "=", "'User {0} has already been invited.'", ".", "format", "(", "name", ")", "ret", "[", "'result'", "]", "=", "True", "return", "ret" ]
29.833333
24.537037
def random_array(shape, mean=128., std=20.): """Creates a uniformly distributed random array with the given `mean` and `std`. Args: shape: The desired shape mean: The desired mean (Default value = 128) std: The desired std (Default value = 20) Returns: Random numpy array of given `shape` uniformly distributed with desired `mean` and `std`. """ x = np.random.random(shape) # normalize around mean=0, std=1 x = (x - np.mean(x)) / (np.std(x) + K.epsilon()) # and then around the desired mean/std x = (x * std) + mean return x
[ "def", "random_array", "(", "shape", ",", "mean", "=", "128.", ",", "std", "=", "20.", ")", ":", "x", "=", "np", ".", "random", ".", "random", "(", "shape", ")", "# normalize around mean=0, std=1", "x", "=", "(", "x", "-", "np", ".", "mean", "(", "x", ")", ")", "/", "(", "np", ".", "std", "(", "x", ")", "+", "K", ".", "epsilon", "(", ")", ")", "# and then around the desired mean/std", "x", "=", "(", "x", "*", "std", ")", "+", "mean", "return", "x" ]
35.9375
17.25
def longest_common_substring(s1, s2): """ References: # https://en.wikibooks.org/wiki/Algorithm_Implementation/Strings/Longest_common_substring#Python2 """ m = [[0] * (1 + len(s2)) for i in range(1 + len(s1))] longest, x_longest = 0, 0 for x in range(1, 1 + len(s1)): for y in range(1, 1 + len(s2)): if s1[x - 1] == s2[y - 1]: m[x][y] = m[x - 1][y - 1] + 1 if m[x][y] > longest: longest = m[x][y] x_longest = x else: m[x][y] = 0 return s1[x_longest - longest: x_longest]
[ "def", "longest_common_substring", "(", "s1", ",", "s2", ")", ":", "m", "=", "[", "[", "0", "]", "*", "(", "1", "+", "len", "(", "s2", ")", ")", "for", "i", "in", "range", "(", "1", "+", "len", "(", "s1", ")", ")", "]", "longest", ",", "x_longest", "=", "0", ",", "0", "for", "x", "in", "range", "(", "1", ",", "1", "+", "len", "(", "s1", ")", ")", ":", "for", "y", "in", "range", "(", "1", ",", "1", "+", "len", "(", "s2", ")", ")", ":", "if", "s1", "[", "x", "-", "1", "]", "==", "s2", "[", "y", "-", "1", "]", ":", "m", "[", "x", "]", "[", "y", "]", "=", "m", "[", "x", "-", "1", "]", "[", "y", "-", "1", "]", "+", "1", "if", "m", "[", "x", "]", "[", "y", "]", ">", "longest", ":", "longest", "=", "m", "[", "x", "]", "[", "y", "]", "x_longest", "=", "x", "else", ":", "m", "[", "x", "]", "[", "y", "]", "=", "0", "return", "s1", "[", "x_longest", "-", "longest", ":", "x_longest", "]" ]
35.882353
11.058824
def rename(self, old_label_name, new_label_name): """ Take into account that a label has been renamed """ assert(old_label_name != new_label_name) self._bayes.pop(old_label_name) old_baye_dir = self._get_baye_dir(old_label_name) new_baye_dir = self._get_baye_dir(new_label_name) logger.info("Renaming label training {} -> {} : {} -> {}".format( old_label_name, new_label_name, old_baye_dir, new_baye_dir )) os.rename(old_baye_dir, new_baye_dir)
[ "def", "rename", "(", "self", ",", "old_label_name", ",", "new_label_name", ")", ":", "assert", "(", "old_label_name", "!=", "new_label_name", ")", "self", ".", "_bayes", ".", "pop", "(", "old_label_name", ")", "old_baye_dir", "=", "self", ".", "_get_baye_dir", "(", "old_label_name", ")", "new_baye_dir", "=", "self", ".", "_get_baye_dir", "(", "new_label_name", ")", "logger", ".", "info", "(", "\"Renaming label training {} -> {} : {} -> {}\"", ".", "format", "(", "old_label_name", ",", "new_label_name", ",", "old_baye_dir", ",", "new_baye_dir", ")", ")", "os", ".", "rename", "(", "old_baye_dir", ",", "new_baye_dir", ")" ]
43.75
13.75
def _get_core_transform(self, resolution): """The projection for the stereonet as a matplotlib transform. This is primarily called by LambertAxes._set_lim_and_transforms.""" return self._base_transform(self._center_longitude, self._center_latitude, resolution)
[ "def", "_get_core_transform", "(", "self", ",", "resolution", ")", ":", "return", "self", ".", "_base_transform", "(", "self", ".", "_center_longitude", ",", "self", ".", "_center_latitude", ",", "resolution", ")" ]
58.5
7.666667
def compute_dominance_frontier(graph, domtree): """ Compute a dominance frontier based on the given post-dominator tree. This implementation is based on figure 2 of paper An Efficient Method of Computing Static Single Assignment Form by Ron Cytron, etc. :param graph: The graph where we want to compute the dominance frontier. :param domtree: The dominator tree :returns: A dict of dominance frontier """ df = {} # Perform a post-order search on the dominator tree for x in networkx.dfs_postorder_nodes(domtree): if x not in graph: # Skip nodes that are not in the graph continue df[x] = set() # local set for y in graph.successors(x): if x not in domtree.predecessors(y): df[x].add(y) # up set if x is None: continue for z in domtree.successors(x): if z is x: continue if z not in df: continue for y in df[z]: if x not in list(domtree.predecessors(y)): df[x].add(y) return df
[ "def", "compute_dominance_frontier", "(", "graph", ",", "domtree", ")", ":", "df", "=", "{", "}", "# Perform a post-order search on the dominator tree", "for", "x", "in", "networkx", ".", "dfs_postorder_nodes", "(", "domtree", ")", ":", "if", "x", "not", "in", "graph", ":", "# Skip nodes that are not in the graph", "continue", "df", "[", "x", "]", "=", "set", "(", ")", "# local set", "for", "y", "in", "graph", ".", "successors", "(", "x", ")", ":", "if", "x", "not", "in", "domtree", ".", "predecessors", "(", "y", ")", ":", "df", "[", "x", "]", ".", "add", "(", "y", ")", "# up set", "if", "x", "is", "None", ":", "continue", "for", "z", "in", "domtree", ".", "successors", "(", "x", ")", ":", "if", "z", "is", "x", ":", "continue", "if", "z", "not", "in", "df", ":", "continue", "for", "y", "in", "df", "[", "z", "]", ":", "if", "x", "not", "in", "list", "(", "domtree", ".", "predecessors", "(", "y", ")", ")", ":", "df", "[", "x", "]", ".", "add", "(", "y", ")", "return", "df" ]
26.785714
22.02381
def add_seqs_to_alignment(seqs, aln, params=None): """Returns an Alignment object from seqs and existing Alignment. seqs: a cogent.core.alignment.SequenceCollection object, or data that can be used to build one. aln: a cogent.core.alignment.Alignment object, or data that can be used to build one params: dict of parameters to pass in to the Muscle app controller. """ if not params: params = {} #create SequenceCollection object from seqs seqs_collection = SequenceCollection(seqs) #Create mapping between abbreviated IDs and full IDs seqs_int_map, seqs_int_keys = seqs_collection.getIntMap(prefix='seq_') #Create SequenceCollection from int_map. seqs_int_map = SequenceCollection(seqs_int_map) #create SequenceCollection object from aln aln_collection = SequenceCollection(aln) #Create mapping between abbreviated IDs and full IDs aln_int_map, aln_int_keys = aln_collection.getIntMap(prefix='aln_') #Create SequenceCollection from int_map. aln_int_map = SequenceCollection(aln_int_map) #set output and profile options params.update({'-out':get_tmp_filename(), '-profile':True}) #save seqs to tmp file seqs_filename = get_tmp_filename() seqs_out = open(seqs_filename,'w') seqs_out.write(seqs_int_map.toFasta()) seqs_out.close() #save aln to tmp file aln_filename = get_tmp_filename() aln_out = open(aln_filename, 'w') aln_out.write(aln_int_map.toFasta()) aln_out.close() #Create Muscle app and get results app = Muscle(InputHandler='_input_as_multifile', params=params, WorkingDir=tempfile.gettempdir()) res = app((aln_filename, seqs_filename)) #Get alignment as dict out of results alignment = dict(parse_fasta(res['MuscleOut'])) #Make new dict mapping original IDs new_alignment = {} for k,v in alignment.items(): if k in seqs_int_keys: new_alignment[seqs_int_keys[k]] = v else: new_alignment[aln_int_keys[k]] = v #Create an Alignment object from alignment dict new_alignment = Alignment(new_alignment) #Clean up res.cleanUp() del(seqs_collection, seqs_int_map, seqs_int_keys) del(aln_collection, aln_int_map, aln_int_keys) del(app, res, alignment, params) remove(seqs_filename) remove(aln_filename) return new_alignment
[ "def", "add_seqs_to_alignment", "(", "seqs", ",", "aln", ",", "params", "=", "None", ")", ":", "if", "not", "params", ":", "params", "=", "{", "}", "#create SequenceCollection object from seqs", "seqs_collection", "=", "SequenceCollection", "(", "seqs", ")", "#Create mapping between abbreviated IDs and full IDs", "seqs_int_map", ",", "seqs_int_keys", "=", "seqs_collection", ".", "getIntMap", "(", "prefix", "=", "'seq_'", ")", "#Create SequenceCollection from int_map.", "seqs_int_map", "=", "SequenceCollection", "(", "seqs_int_map", ")", "#create SequenceCollection object from aln", "aln_collection", "=", "SequenceCollection", "(", "aln", ")", "#Create mapping between abbreviated IDs and full IDs", "aln_int_map", ",", "aln_int_keys", "=", "aln_collection", ".", "getIntMap", "(", "prefix", "=", "'aln_'", ")", "#Create SequenceCollection from int_map.", "aln_int_map", "=", "SequenceCollection", "(", "aln_int_map", ")", "#set output and profile options", "params", ".", "update", "(", "{", "'-out'", ":", "get_tmp_filename", "(", ")", ",", "'-profile'", ":", "True", "}", ")", "#save seqs to tmp file", "seqs_filename", "=", "get_tmp_filename", "(", ")", "seqs_out", "=", "open", "(", "seqs_filename", ",", "'w'", ")", "seqs_out", ".", "write", "(", "seqs_int_map", ".", "toFasta", "(", ")", ")", "seqs_out", ".", "close", "(", ")", "#save aln to tmp file", "aln_filename", "=", "get_tmp_filename", "(", ")", "aln_out", "=", "open", "(", "aln_filename", ",", "'w'", ")", "aln_out", ".", "write", "(", "aln_int_map", ".", "toFasta", "(", ")", ")", "aln_out", ".", "close", "(", ")", "#Create Muscle app and get results", "app", "=", "Muscle", "(", "InputHandler", "=", "'_input_as_multifile'", ",", "params", "=", "params", ",", "WorkingDir", "=", "tempfile", ".", "gettempdir", "(", ")", ")", "res", "=", "app", "(", "(", "aln_filename", ",", "seqs_filename", ")", ")", "#Get alignment as dict out of results", "alignment", "=", "dict", "(", "parse_fasta", "(", "res", "[", "'MuscleOut'", "]", ")", ")", "#Make new dict mapping original IDs", "new_alignment", "=", "{", "}", "for", "k", ",", "v", "in", "alignment", ".", "items", "(", ")", ":", "if", "k", "in", "seqs_int_keys", ":", "new_alignment", "[", "seqs_int_keys", "[", "k", "]", "]", "=", "v", "else", ":", "new_alignment", "[", "aln_int_keys", "[", "k", "]", "]", "=", "v", "#Create an Alignment object from alignment dict", "new_alignment", "=", "Alignment", "(", "new_alignment", ")", "#Clean up", "res", ".", "cleanUp", "(", ")", "del", "(", "seqs_collection", ",", "seqs_int_map", ",", "seqs_int_keys", ")", "del", "(", "aln_collection", ",", "aln_int_map", ",", "aln_int_keys", ")", "del", "(", "app", ",", "res", ",", "alignment", ",", "params", ")", "remove", "(", "seqs_filename", ")", "remove", "(", "aln_filename", ")", "return", "new_alignment" ]
33.314286
17.757143
def once(self): """ Returns a function that will be executed at most one time, no matter how often you call it. Useful for lazy initialization. """ ns = self.Namespace() ns.memo = None ns.run = False def work_once(*args, **kwargs): if ns.run is False: ns.memo = self.obj(*args, **kwargs) ns.run = True return ns.memo return self._wrap(work_once)
[ "def", "once", "(", "self", ")", ":", "ns", "=", "self", ".", "Namespace", "(", ")", "ns", ".", "memo", "=", "None", "ns", ".", "run", "=", "False", "def", "work_once", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "ns", ".", "run", "is", "False", ":", "ns", ".", "memo", "=", "self", ".", "obj", "(", "*", "args", ",", "*", "*", "kwargs", ")", "ns", ".", "run", "=", "True", "return", "ns", ".", "memo", "return", "self", ".", "_wrap", "(", "work_once", ")" ]
28.5
16.5
def delete_external_nodes(sender, **kwargs): """ sync by deleting nodes from external layers when needed """ node = kwargs['instance'] if node.layer.is_external is False or not hasattr(node.layer, 'external') or node.layer.external.synchronizer_path is None: return False if hasattr(node, 'external') and node.external.external_id: push_changes_to_external_layers.delay( node=node.external.external_id, external_layer=node.layer.external, operation='delete' )
[ "def", "delete_external_nodes", "(", "sender", ",", "*", "*", "kwargs", ")", ":", "node", "=", "kwargs", "[", "'instance'", "]", "if", "node", ".", "layer", ".", "is_external", "is", "False", "or", "not", "hasattr", "(", "node", ".", "layer", ",", "'external'", ")", "or", "node", ".", "layer", ".", "external", ".", "synchronizer_path", "is", "None", ":", "return", "False", "if", "hasattr", "(", "node", ",", "'external'", ")", "and", "node", ".", "external", ".", "external_id", ":", "push_changes_to_external_layers", ".", "delay", "(", "node", "=", "node", ".", "external", ".", "external_id", ",", "external_layer", "=", "node", ".", "layer", ".", "external", ",", "operation", "=", "'delete'", ")" ]
40.384615
21.692308
def ensure_self(func): """ Decorator that can be used to ensure 'self' is the first argument on a task method. This only needs to be used with task methods that are used as a callback to a chord or in link_error and is really just a hack to get around https://github.com/celery/celery/issues/2137 Usage: .. code-block:: python class Foo(models.Model): def __init__(self): self.bar = 1 @task def first(self): pass @task @ensure_self def last(self, results=None): print self.bar Then the following is performed: .. code-block:: python f = Foo() (f.first.s() | f.last.s(this=f)).apply_async() # prints 1 The important part here is that 'this' is passed into the last.s subtask. Hopefully issue 2137 is recognized as an issue and fixed and this hack is no longer required. """ @wraps(func) def inner(*args, **kwargs): try: self = kwargs.pop('this') if len(args) >= 1 and self == args[0]: # Make the assumption that the first argument hasn't been passed in twice... raise KeyError() return func(self, *args, **kwargs) except KeyError: # 'this' wasn't passed, all we can do is assume normal innovation return func(*args, **kwargs) return inner
[ "def", "ensure_self", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "inner", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "self", "=", "kwargs", ".", "pop", "(", "'this'", ")", "if", "len", "(", "args", ")", ">=", "1", "and", "self", "==", "args", "[", "0", "]", ":", "# Make the assumption that the first argument hasn't been passed in twice...", "raise", "KeyError", "(", ")", "return", "func", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "except", "KeyError", ":", "# 'this' wasn't passed, all we can do is assume normal innovation", "return", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "inner" ]
27.843137
24.705882
def _zfs_image_create(vm_name, pool, disk_name, hostname_property_name, sparse_volume, disk_size, disk_image_name): ''' Clones an existing image, or creates a new one. When cloning an image, disk_image_name refers to the source of the clone. If not specified, disk_size is used for creating a new zvol, and sparse_volume determines whether to create a thin provisioned volume. The cloned or new volume can have a ZFS property set containing the vm_name. Use hostname_property_name for specifying the key of this ZFS property. ''' if not disk_image_name and not disk_size: raise CommandExecutionError( 'Unable to create new disk {0}, please specify' ' the disk image name or disk size argument' .format(disk_name) ) if not pool: raise CommandExecutionError( 'Unable to create new disk {0}, please specify' ' the disk pool name'.format(disk_name)) destination_fs = os.path.join(pool, '{0}.{1}'.format(vm_name, disk_name)) log.debug('Image destination will be %s', destination_fs) existing_disk = __salt__['zfs.list'](name=pool) if 'error' in existing_disk: raise CommandExecutionError( 'Unable to create new disk {0}. {1}' .format(destination_fs, existing_disk['error']) ) elif destination_fs in existing_disk: log.info('ZFS filesystem %s already exists. Skipping creation', destination_fs) blockdevice_path = os.path.join('/dev/zvol', pool, vm_name) return blockdevice_path properties = {} if hostname_property_name: properties[hostname_property_name] = vm_name if disk_image_name: __salt__['zfs.clone']( name_a=disk_image_name, name_b=destination_fs, properties=properties) elif disk_size: __salt__['zfs.create']( name=destination_fs, properties=properties, volume_size=disk_size, sparse=sparse_volume) blockdevice_path = os.path.join('/dev/zvol', pool, '{0}.{1}' .format(vm_name, disk_name)) log.debug('Image path will be %s', blockdevice_path) return blockdevice_path
[ "def", "_zfs_image_create", "(", "vm_name", ",", "pool", ",", "disk_name", ",", "hostname_property_name", ",", "sparse_volume", ",", "disk_size", ",", "disk_image_name", ")", ":", "if", "not", "disk_image_name", "and", "not", "disk_size", ":", "raise", "CommandExecutionError", "(", "'Unable to create new disk {0}, please specify'", "' the disk image name or disk size argument'", ".", "format", "(", "disk_name", ")", ")", "if", "not", "pool", ":", "raise", "CommandExecutionError", "(", "'Unable to create new disk {0}, please specify'", "' the disk pool name'", ".", "format", "(", "disk_name", ")", ")", "destination_fs", "=", "os", ".", "path", ".", "join", "(", "pool", ",", "'{0}.{1}'", ".", "format", "(", "vm_name", ",", "disk_name", ")", ")", "log", ".", "debug", "(", "'Image destination will be %s'", ",", "destination_fs", ")", "existing_disk", "=", "__salt__", "[", "'zfs.list'", "]", "(", "name", "=", "pool", ")", "if", "'error'", "in", "existing_disk", ":", "raise", "CommandExecutionError", "(", "'Unable to create new disk {0}. {1}'", ".", "format", "(", "destination_fs", ",", "existing_disk", "[", "'error'", "]", ")", ")", "elif", "destination_fs", "in", "existing_disk", ":", "log", ".", "info", "(", "'ZFS filesystem %s already exists. Skipping creation'", ",", "destination_fs", ")", "blockdevice_path", "=", "os", ".", "path", ".", "join", "(", "'/dev/zvol'", ",", "pool", ",", "vm_name", ")", "return", "blockdevice_path", "properties", "=", "{", "}", "if", "hostname_property_name", ":", "properties", "[", "hostname_property_name", "]", "=", "vm_name", "if", "disk_image_name", ":", "__salt__", "[", "'zfs.clone'", "]", "(", "name_a", "=", "disk_image_name", ",", "name_b", "=", "destination_fs", ",", "properties", "=", "properties", ")", "elif", "disk_size", ":", "__salt__", "[", "'zfs.create'", "]", "(", "name", "=", "destination_fs", ",", "properties", "=", "properties", ",", "volume_size", "=", "disk_size", ",", "sparse", "=", "sparse_volume", ")", "blockdevice_path", "=", "os", ".", "path", ".", "join", "(", "'/dev/zvol'", ",", "pool", ",", "'{0}.{1}'", ".", "format", "(", "vm_name", ",", "disk_name", ")", ")", "log", ".", "debug", "(", "'Image path will be %s'", ",", "blockdevice_path", ")", "return", "blockdevice_path" ]
35.820896
17.701493
def canonical_name(sgf_name): """Keep filename and some date folders""" sgf_name = os.path.normpath(sgf_name) assert sgf_name.endswith('.sgf'), sgf_name # Strip off '.sgf' sgf_name = sgf_name[:-4] # Often eval is inside a folder with the run name. # include from folder before /eval/ if part of path. with_folder = re.search(r'/([^/]*/eval/.*)', sgf_name) if with_folder: return with_folder.group(1) # Return the filename return os.path.basename(sgf_name)
[ "def", "canonical_name", "(", "sgf_name", ")", ":", "sgf_name", "=", "os", ".", "path", ".", "normpath", "(", "sgf_name", ")", "assert", "sgf_name", ".", "endswith", "(", "'.sgf'", ")", ",", "sgf_name", "# Strip off '.sgf'", "sgf_name", "=", "sgf_name", "[", ":", "-", "4", "]", "# Often eval is inside a folder with the run name.", "# include from folder before /eval/ if part of path.", "with_folder", "=", "re", ".", "search", "(", "r'/([^/]*/eval/.*)'", ",", "sgf_name", ")", "if", "with_folder", ":", "return", "with_folder", ".", "group", "(", "1", ")", "# Return the filename", "return", "os", ".", "path", ".", "basename", "(", "sgf_name", ")" ]
33
14.666667
def get_history(self): """Returns the history from cache or DB or a newly created one.""" if hasattr(self, '_history'): return self._history try: self._history = APICallDayHistory.objects.get( user=self.user, creation_date=now().date()) except APICallDayHistory.DoesNotExist: self._history = APICallDayHistory(user=self.user) self._history.amount_api_calls = 0 return self._history
[ "def", "get_history", "(", "self", ")", ":", "if", "hasattr", "(", "self", ",", "'_history'", ")", ":", "return", "self", ".", "_history", "try", ":", "self", ".", "_history", "=", "APICallDayHistory", ".", "objects", ".", "get", "(", "user", "=", "self", ".", "user", ",", "creation_date", "=", "now", "(", ")", ".", "date", "(", ")", ")", "except", "APICallDayHistory", ".", "DoesNotExist", ":", "self", ".", "_history", "=", "APICallDayHistory", "(", "user", "=", "self", ".", "user", ")", "self", ".", "_history", ".", "amount_api_calls", "=", "0", "return", "self", ".", "_history" ]
43.181818
12.636364
def entries_view(self, request, form_id): """ Displays the form entries in a HTML table with option to export as CSV file. """ if request.POST.get("back"): change_url = admin_url(Form, "change", form_id) return HttpResponseRedirect(change_url) form = get_object_or_404(Form, id=form_id) entries_form = EntriesForm(form, request, request.POST or None) delete_entries_perm = "%s.delete_formentry" % FormEntry._meta.app_label can_delete_entries = request.user.has_perm(delete_entries_perm) submitted = entries_form.is_valid() if submitted: if request.POST.get("export"): response = HttpResponse(content_type="text/csv") timestamp = slugify(datetime.now().ctime()) fname = "%s-%s.csv" % (form.slug, timestamp) header = "attachment; filename=%s" % fname response["Content-Disposition"] = header queue = StringIO() delimiter = settings.FORMS_CSV_DELIMITER try: csv = writer(queue, delimiter=delimiter) writerow = csv.writerow except TypeError: queue = BytesIO() delimiter = bytes(delimiter, encoding="utf-8") csv = writer(queue, delimiter=delimiter) writerow = lambda row: csv.writerow([c.encode("utf-8") if hasattr(c, "encode") else c for c in row]) writerow(entries_form.columns()) for row in entries_form.rows(csv=True): writerow(row) data = queue.getvalue() response.write(data) return response elif request.POST.get("delete") and can_delete_entries: selected = request.POST.getlist("selected") if selected: entries = FormEntry.objects.filter(id__in=selected) count = entries.count() if count > 0: entries.delete() message = ungettext("1 entry deleted", "%(count)s entries deleted", count) info(request, message % {"count": count}) template = "admin/forms/entries.html" context = {"title": _("View Entries"), "entries_form": entries_form, "opts": self.model._meta, "original": form, "can_delete_entries": can_delete_entries, "submitted": submitted} return render(request, template, context)
[ "def", "entries_view", "(", "self", ",", "request", ",", "form_id", ")", ":", "if", "request", ".", "POST", ".", "get", "(", "\"back\"", ")", ":", "change_url", "=", "admin_url", "(", "Form", ",", "\"change\"", ",", "form_id", ")", "return", "HttpResponseRedirect", "(", "change_url", ")", "form", "=", "get_object_or_404", "(", "Form", ",", "id", "=", "form_id", ")", "entries_form", "=", "EntriesForm", "(", "form", ",", "request", ",", "request", ".", "POST", "or", "None", ")", "delete_entries_perm", "=", "\"%s.delete_formentry\"", "%", "FormEntry", ".", "_meta", ".", "app_label", "can_delete_entries", "=", "request", ".", "user", ".", "has_perm", "(", "delete_entries_perm", ")", "submitted", "=", "entries_form", ".", "is_valid", "(", ")", "if", "submitted", ":", "if", "request", ".", "POST", ".", "get", "(", "\"export\"", ")", ":", "response", "=", "HttpResponse", "(", "content_type", "=", "\"text/csv\"", ")", "timestamp", "=", "slugify", "(", "datetime", ".", "now", "(", ")", ".", "ctime", "(", ")", ")", "fname", "=", "\"%s-%s.csv\"", "%", "(", "form", ".", "slug", ",", "timestamp", ")", "header", "=", "\"attachment; filename=%s\"", "%", "fname", "response", "[", "\"Content-Disposition\"", "]", "=", "header", "queue", "=", "StringIO", "(", ")", "delimiter", "=", "settings", ".", "FORMS_CSV_DELIMITER", "try", ":", "csv", "=", "writer", "(", "queue", ",", "delimiter", "=", "delimiter", ")", "writerow", "=", "csv", ".", "writerow", "except", "TypeError", ":", "queue", "=", "BytesIO", "(", ")", "delimiter", "=", "bytes", "(", "delimiter", ",", "encoding", "=", "\"utf-8\"", ")", "csv", "=", "writer", "(", "queue", ",", "delimiter", "=", "delimiter", ")", "writerow", "=", "lambda", "row", ":", "csv", ".", "writerow", "(", "[", "c", ".", "encode", "(", "\"utf-8\"", ")", "if", "hasattr", "(", "c", ",", "\"encode\"", ")", "else", "c", "for", "c", "in", "row", "]", ")", "writerow", "(", "entries_form", ".", "columns", "(", ")", ")", "for", "row", "in", "entries_form", ".", "rows", "(", "csv", "=", "True", ")", ":", "writerow", "(", "row", ")", "data", "=", "queue", ".", "getvalue", "(", ")", "response", ".", "write", "(", "data", ")", "return", "response", "elif", "request", ".", "POST", ".", "get", "(", "\"delete\"", ")", "and", "can_delete_entries", ":", "selected", "=", "request", ".", "POST", ".", "getlist", "(", "\"selected\"", ")", "if", "selected", ":", "entries", "=", "FormEntry", ".", "objects", ".", "filter", "(", "id__in", "=", "selected", ")", "count", "=", "entries", ".", "count", "(", ")", "if", "count", ">", "0", ":", "entries", ".", "delete", "(", ")", "message", "=", "ungettext", "(", "\"1 entry deleted\"", ",", "\"%(count)s entries deleted\"", ",", "count", ")", "info", "(", "request", ",", "message", "%", "{", "\"count\"", ":", "count", "}", ")", "template", "=", "\"admin/forms/entries.html\"", "context", "=", "{", "\"title\"", ":", "_", "(", "\"View Entries\"", ")", ",", "\"entries_form\"", ":", "entries_form", ",", "\"opts\"", ":", "self", ".", "model", ".", "_meta", ",", "\"original\"", ":", "form", ",", "\"can_delete_entries\"", ":", "can_delete_entries", ",", "\"submitted\"", ":", "submitted", "}", "return", "render", "(", "request", ",", "template", ",", "context", ")" ]
49.981132
15.301887
def _create_regex_pattern_add_optional_spaces_to_word_characters(word): r"""Add the regex special characters (\s*) to allow optional spaces. :param word: (string) the word to be inserted into a regex pattern. :return: (string) the regex pattern for that word with optional spaces between all of its characters. """ new_word = u"" for ch in word: if ch.isspace(): new_word += ch else: new_word += ch + r'\s*' return new_word
[ "def", "_create_regex_pattern_add_optional_spaces_to_word_characters", "(", "word", ")", ":", "new_word", "=", "u\"\"", "for", "ch", "in", "word", ":", "if", "ch", ".", "isspace", "(", ")", ":", "new_word", "+=", "ch", "else", ":", "new_word", "+=", "ch", "+", "r'\\s*'", "return", "new_word" ]
35.785714
19.571429
def genes_to_json(store, query): """Fetch matching genes and convert to JSON.""" gene_query = store.hgnc_genes(query, search=True) json_terms = [{'name': "{} | {} ({})".format(gene['hgnc_id'], gene['hgnc_symbol'], ', '.join(gene['aliases'])), 'id': gene['hgnc_id']} for gene in gene_query] return json_terms
[ "def", "genes_to_json", "(", "store", ",", "query", ")", ":", "gene_query", "=", "store", ".", "hgnc_genes", "(", "query", ",", "search", "=", "True", ")", "json_terms", "=", "[", "{", "'name'", ":", "\"{} | {} ({})\"", ".", "format", "(", "gene", "[", "'hgnc_id'", "]", ",", "gene", "[", "'hgnc_symbol'", "]", ",", "', '", ".", "join", "(", "gene", "[", "'aliases'", "]", ")", ")", ",", "'id'", ":", "gene", "[", "'hgnc_id'", "]", "}", "for", "gene", "in", "gene_query", "]", "return", "json_terms" ]
55
21.142857
def check(self, orb): """Method that check whether or not the listener is triggered Args: orb (Orbit): Return: bool: True if there is a zero-crossing for the parameter watched by the listener """ return self.prev is not None and np.sign(self(orb)) != np.sign(self(self.prev))
[ "def", "check", "(", "self", ",", "orb", ")", ":", "return", "self", ".", "prev", "is", "not", "None", "and", "np", ".", "sign", "(", "self", "(", "orb", ")", ")", "!=", "np", ".", "sign", "(", "self", "(", "self", ".", "prev", ")", ")" ]
30.181818
27.818182
def _init_hdrgo_sortby(self, hdrgo_sortby, sortby): """Initialize header sort function.""" if hdrgo_sortby is not None: return hdrgo_sortby if sortby is not None: return sortby return self.sortby
[ "def", "_init_hdrgo_sortby", "(", "self", ",", "hdrgo_sortby", ",", "sortby", ")", ":", "if", "hdrgo_sortby", "is", "not", "None", ":", "return", "hdrgo_sortby", "if", "sortby", "is", "not", "None", ":", "return", "sortby", "return", "self", ".", "sortby" ]
35
9
def _connect(cls): """ Connect signal to current model """ post_save.connect( notify_items, sender=cls, dispatch_uid='knocker_{0}'.format(cls.__name__) )
[ "def", "_connect", "(", "cls", ")", ":", "post_save", ".", "connect", "(", "notify_items", ",", "sender", "=", "cls", ",", "dispatch_uid", "=", "'knocker_{0}'", ".", "format", "(", "cls", ".", "__name__", ")", ")" ]
26.25
11.25
def _remove_bound_conditions(agent, keep_criterion): """Removes bound conditions of agent such that keep_criterion is False. Parameters ---------- agent: Agent The agent whose bound conditions we evaluate keep_criterion: function Evaluates removal_criterion(a) for each agent a in a bound condition and if it evaluates to False, removes a from agent's bound_conditions """ new_bc = [] for ind in range(len(agent.bound_conditions)): if keep_criterion(agent.bound_conditions[ind].agent): new_bc.append(agent.bound_conditions[ind]) agent.bound_conditions = new_bc
[ "def", "_remove_bound_conditions", "(", "agent", ",", "keep_criterion", ")", ":", "new_bc", "=", "[", "]", "for", "ind", "in", "range", "(", "len", "(", "agent", ".", "bound_conditions", ")", ")", ":", "if", "keep_criterion", "(", "agent", ".", "bound_conditions", "[", "ind", "]", ".", "agent", ")", ":", "new_bc", ".", "append", "(", "agent", ".", "bound_conditions", "[", "ind", "]", ")", "agent", ".", "bound_conditions", "=", "new_bc" ]
39.125
18.75
def get_by(self, field, value): """ Gets the list of firmware baseline resources managed by the appliance. Optional parameters can be used to filter the list of resources returned. The search is case-insensitive. Args: field: Field name to filter. value: Value to filter. Returns: list: List of firmware baseline resources. """ firmwares = self.get_all() matches = [] for item in firmwares: if item.get(field) == value: matches.append(item) return matches
[ "def", "get_by", "(", "self", ",", "field", ",", "value", ")", ":", "firmwares", "=", "self", ".", "get_all", "(", ")", "matches", "=", "[", "]", "for", "item", "in", "firmwares", ":", "if", "item", ".", "get", "(", "field", ")", "==", "value", ":", "matches", ".", "append", "(", "item", ")", "return", "matches" ]
29.55
16.85
def exists(self, index, doc_type, id, **query_params): """ Return if a document exists """ path = make_path(index, doc_type, id) return self._send_request('HEAD', path, params=query_params)
[ "def", "exists", "(", "self", ",", "index", ",", "doc_type", ",", "id", ",", "*", "*", "query_params", ")", ":", "path", "=", "make_path", "(", "index", ",", "doc_type", ",", "id", ")", "return", "self", ".", "_send_request", "(", "'HEAD'", ",", "path", ",", "params", "=", "query_params", ")" ]
37.333333
8.666667
def from_molecule(cls, mol, theory, charge=None, spin_multiplicity=None, basis_set="6-31g", basis_set_option="cartesian", title=None, operation="optimize", theory_directives=None, alternate_directives=None): """ Very flexible arguments to support many types of potential setups. Users should use more friendly static methods unless they need the flexibility. Args: mol: Input molecule charge: Charge of the molecule. If None, charge on molecule is used. Defaults to None. This allows the input file to be set a charge independently from the molecule itself. spin_multiplicity: Spin multiplicity of molecule. Defaults to None, which means that the spin multiplicity is set to 1 if the molecule has no unpaired electrons and to 2 if there are unpaired electrons. basis_set: The basis set to be used as string or a dict. E.g., {"C": "6-311++G**", "H": "6-31++G**"} or "6-31G". If string, same basis set is used for all elements. basis_set_option: cartesian (default) | spherical, title: Title for the task. Defaults to None, which means a title based on the theory and operation of the task is autogenerated. theory: The theory used for the task. Defaults to "dft". operation: The operation for the task. Defaults to "optimize". theory_directives: A dict of theory directives. For example, if you are running dft calculations, you may specify the exchange correlation functional using {"xc": "b3lyp"}. alternate_directives: A dict of alternate directives. For example, to perform cosmo calculations with DFT, you'd supply {'cosmo': "cosmo"}. """ title = title if title is not None else "{} {} {}".format( re.sub(r"\s", "", mol.formula), theory, operation) charge = charge if charge is not None else mol.charge nelectrons = - charge + mol.charge + mol.nelectrons if spin_multiplicity is not None: spin_multiplicity = spin_multiplicity if (nelectrons + spin_multiplicity) % 2 != 1: raise ValueError( "Charge of {} and spin multiplicity of {} is" " not possible for this molecule".format( charge, spin_multiplicity)) elif charge == mol.charge: spin_multiplicity = mol.spin_multiplicity else: spin_multiplicity = 1 if nelectrons % 2 == 0 else 2 elements = set(mol.composition.get_el_amt_dict().keys()) if isinstance(basis_set, str): basis_set = {el: basis_set for el in elements} basis_set_option = basis_set_option return NwTask(charge, spin_multiplicity, basis_set, basis_set_option=basis_set_option, title=title, theory=theory, operation=operation, theory_directives=theory_directives, alternate_directives=alternate_directives)
[ "def", "from_molecule", "(", "cls", ",", "mol", ",", "theory", ",", "charge", "=", "None", ",", "spin_multiplicity", "=", "None", ",", "basis_set", "=", "\"6-31g\"", ",", "basis_set_option", "=", "\"cartesian\"", ",", "title", "=", "None", ",", "operation", "=", "\"optimize\"", ",", "theory_directives", "=", "None", ",", "alternate_directives", "=", "None", ")", ":", "title", "=", "title", "if", "title", "is", "not", "None", "else", "\"{} {} {}\"", ".", "format", "(", "re", ".", "sub", "(", "r\"\\s\"", ",", "\"\"", ",", "mol", ".", "formula", ")", ",", "theory", ",", "operation", ")", "charge", "=", "charge", "if", "charge", "is", "not", "None", "else", "mol", ".", "charge", "nelectrons", "=", "-", "charge", "+", "mol", ".", "charge", "+", "mol", ".", "nelectrons", "if", "spin_multiplicity", "is", "not", "None", ":", "spin_multiplicity", "=", "spin_multiplicity", "if", "(", "nelectrons", "+", "spin_multiplicity", ")", "%", "2", "!=", "1", ":", "raise", "ValueError", "(", "\"Charge of {} and spin multiplicity of {} is\"", "\" not possible for this molecule\"", ".", "format", "(", "charge", ",", "spin_multiplicity", ")", ")", "elif", "charge", "==", "mol", ".", "charge", ":", "spin_multiplicity", "=", "mol", ".", "spin_multiplicity", "else", ":", "spin_multiplicity", "=", "1", "if", "nelectrons", "%", "2", "==", "0", "else", "2", "elements", "=", "set", "(", "mol", ".", "composition", ".", "get_el_amt_dict", "(", ")", ".", "keys", "(", ")", ")", "if", "isinstance", "(", "basis_set", ",", "str", ")", ":", "basis_set", "=", "{", "el", ":", "basis_set", "for", "el", "in", "elements", "}", "basis_set_option", "=", "basis_set_option", "return", "NwTask", "(", "charge", ",", "spin_multiplicity", ",", "basis_set", ",", "basis_set_option", "=", "basis_set_option", ",", "title", "=", "title", ",", "theory", "=", "theory", ",", "operation", "=", "operation", ",", "theory_directives", "=", "theory_directives", ",", "alternate_directives", "=", "alternate_directives", ")" ]
52.096774
23.290323
def get_registry(self, registry): '''**Description** Find the registry and return its json description **Arguments** - registry: Full hostname/port of registry. Eg. myrepo.example.com:5000 **Success Return Value** A JSON object representing the registry. ''' if self._registry_string_is_valid(registry): return [False, "input registry name cannot contain '/' characters - valid registry names are of the form <host>:<port> where :<port> is optional"] url = self.url + "/api/scanning/v1/anchore/registries/" + registry res = requests.get(url, headers=self.hdrs, verify=self.ssl_verify) if not self._checkResponse(res): return [False, self.lasterr] return [True, res.json()]
[ "def", "get_registry", "(", "self", ",", "registry", ")", ":", "if", "self", ".", "_registry_string_is_valid", "(", "registry", ")", ":", "return", "[", "False", ",", "\"input registry name cannot contain '/' characters - valid registry names are of the form <host>:<port> where :<port> is optional\"", "]", "url", "=", "self", ".", "url", "+", "\"/api/scanning/v1/anchore/registries/\"", "+", "registry", "res", "=", "requests", ".", "get", "(", "url", ",", "headers", "=", "self", ".", "hdrs", ",", "verify", "=", "self", ".", "ssl_verify", ")", "if", "not", "self", ".", "_checkResponse", "(", "res", ")", ":", "return", "[", "False", ",", "self", ".", "lasterr", "]", "return", "[", "True", ",", "res", ".", "json", "(", ")", "]" ]
41.578947
27.263158
def get_graph_by_name_version(self, name: str, version: str) -> Optional[BELGraph]: """Load the BEL graph with the given name, or allows for specification of version.""" network = self.get_network_by_name_version(name, version) if network is None: return return network.as_bel()
[ "def", "get_graph_by_name_version", "(", "self", ",", "name", ":", "str", ",", "version", ":", "str", ")", "->", "Optional", "[", "BELGraph", "]", ":", "network", "=", "self", ".", "get_network_by_name_version", "(", "name", ",", "version", ")", "if", "network", "is", "None", ":", "return", "return", "network", ".", "as_bel", "(", ")" ]
39.625
24
def store(self, name, data, version, size=0, compressed=False, digest=None, logical_size=None): """Adds a new file to the storage. If the file with the same name existed before, it's not guaranteed that the link for the old version will exist until the operation completes, but it's guaranteed that the link will never point to an invalid blob. Args: name: name of the file being stored. May contain slashes that are treated as path separators. data: binary file-like object with file contents. Files with unknown length are supported for compatibility with WSGI interface: ``size`` parameter should be passed in these cases. version: new file "version" Link modification time will be set to this timestamp. If the link exists, and its modification time is higher, the file is not overwritten. size: length of ``data`` in bytes If not 0, this takes priority over internal ``data`` size. compressed: whether ``data`` is gzip-compressed If True, the compression is skipped, and file is written as-is. Note that the current server implementation sends 'Content-Encoding' header anyway, mandating client to decompress the file. digest: SHA256 digest of the file before compression If specified, the digest will not be computed again, saving resources. logical_size: if ``data`` is gzip-compressed, this parameter has to be set to decompressed file size. """ with _exclusive_lock(self._lock_path('links', name)): logger.debug('Acquired lock to link for %s.', name) link_path = self._link_path(name) if _path_exists(link_path) and _file_version(link_path) > version: logger.info( 'Tried to store older version of %s (%d < %d), ignoring.', name, version, _file_version(link_path)) return _file_version(link_path) # data is managed by contents now, and shouldn't be used directly with _InputStreamWrapper(data, size) as contents: if digest is None or logical_size is None: contents.save() if compressed: # This shouldn't occur if the request came from a proper # filetracker client, so we don't care if it's slow. logger.warning( 'Storing compressed stream without hints.') with gzip.open( contents.current_path, 'rb') as decompressed: digest = file_digest(decompressed) with gzip.open( contents.current_path, 'rb') as decompressed: logical_size = _read_stream_for_size(decompressed) else: digest = file_digest(contents.current_path) logical_size = os.stat(contents.current_path).st_size blob_path = self._blob_path(digest) with _exclusive_lock(self._lock_path('blobs', digest)): logger.debug('Acquired lock for blob %s.', digest) digest_bytes = digest.encode() with self._db_transaction() as txn: logger.debug('Started DB transaction (adding link).') link_count = int(self.db.get(digest_bytes, 0, txn=txn)) new_count = str(link_count + 1).encode() self.db.put(digest_bytes, new_count, txn=txn) if link_count == 0: self.db.put( '{}:logical_size'.format(digest).encode(), str(logical_size).encode(), txn=txn) logger.debug('Commiting DB transaction (adding link).') logger.debug('Committed DB transaction (adding link).') # Create a new blob if this isn't a duplicate. if link_count == 0: logger.debug('Creating new blob.') _create_file_dirs(blob_path) if compressed: contents.save(blob_path) else: contents.save() with open(contents.current_path, 'rb') as raw,\ gzip.open(blob_path, 'wb') as blob: shutil.copyfileobj(raw, blob) logger.debug('Released lock for blob %s.', digest) if _path_exists(link_path): # Lend the link lock to delete(). # Note that DB lock has to be released in advance, otherwise # deadlock is possible in concurrent scenarios. logger.info('Overwriting existing link %s.', name) self.delete(name, version, _lock=False) _create_file_dirs(link_path) rel_blob_path = os.path.relpath(blob_path, os.path.dirname(link_path)) os.symlink(rel_blob_path, link_path) logger.debug('Created link %s.', name) lutime(link_path, version) return version logger.debug('Released lock for link %s.', name)
[ "def", "store", "(", "self", ",", "name", ",", "data", ",", "version", ",", "size", "=", "0", ",", "compressed", "=", "False", ",", "digest", "=", "None", ",", "logical_size", "=", "None", ")", ":", "with", "_exclusive_lock", "(", "self", ".", "_lock_path", "(", "'links'", ",", "name", ")", ")", ":", "logger", ".", "debug", "(", "'Acquired lock to link for %s.'", ",", "name", ")", "link_path", "=", "self", ".", "_link_path", "(", "name", ")", "if", "_path_exists", "(", "link_path", ")", "and", "_file_version", "(", "link_path", ")", ">", "version", ":", "logger", ".", "info", "(", "'Tried to store older version of %s (%d < %d), ignoring.'", ",", "name", ",", "version", ",", "_file_version", "(", "link_path", ")", ")", "return", "_file_version", "(", "link_path", ")", "# data is managed by contents now, and shouldn't be used directly", "with", "_InputStreamWrapper", "(", "data", ",", "size", ")", "as", "contents", ":", "if", "digest", "is", "None", "or", "logical_size", "is", "None", ":", "contents", ".", "save", "(", ")", "if", "compressed", ":", "# This shouldn't occur if the request came from a proper", "# filetracker client, so we don't care if it's slow.", "logger", ".", "warning", "(", "'Storing compressed stream without hints.'", ")", "with", "gzip", ".", "open", "(", "contents", ".", "current_path", ",", "'rb'", ")", "as", "decompressed", ":", "digest", "=", "file_digest", "(", "decompressed", ")", "with", "gzip", ".", "open", "(", "contents", ".", "current_path", ",", "'rb'", ")", "as", "decompressed", ":", "logical_size", "=", "_read_stream_for_size", "(", "decompressed", ")", "else", ":", "digest", "=", "file_digest", "(", "contents", ".", "current_path", ")", "logical_size", "=", "os", ".", "stat", "(", "contents", ".", "current_path", ")", ".", "st_size", "blob_path", "=", "self", ".", "_blob_path", "(", "digest", ")", "with", "_exclusive_lock", "(", "self", ".", "_lock_path", "(", "'blobs'", ",", "digest", ")", ")", ":", "logger", ".", "debug", "(", "'Acquired lock for blob %s.'", ",", "digest", ")", "digest_bytes", "=", "digest", ".", "encode", "(", ")", "with", "self", ".", "_db_transaction", "(", ")", "as", "txn", ":", "logger", ".", "debug", "(", "'Started DB transaction (adding link).'", ")", "link_count", "=", "int", "(", "self", ".", "db", ".", "get", "(", "digest_bytes", ",", "0", ",", "txn", "=", "txn", ")", ")", "new_count", "=", "str", "(", "link_count", "+", "1", ")", ".", "encode", "(", ")", "self", ".", "db", ".", "put", "(", "digest_bytes", ",", "new_count", ",", "txn", "=", "txn", ")", "if", "link_count", "==", "0", ":", "self", ".", "db", ".", "put", "(", "'{}:logical_size'", ".", "format", "(", "digest", ")", ".", "encode", "(", ")", ",", "str", "(", "logical_size", ")", ".", "encode", "(", ")", ",", "txn", "=", "txn", ")", "logger", ".", "debug", "(", "'Commiting DB transaction (adding link).'", ")", "logger", ".", "debug", "(", "'Committed DB transaction (adding link).'", ")", "# Create a new blob if this isn't a duplicate.", "if", "link_count", "==", "0", ":", "logger", ".", "debug", "(", "'Creating new blob.'", ")", "_create_file_dirs", "(", "blob_path", ")", "if", "compressed", ":", "contents", ".", "save", "(", "blob_path", ")", "else", ":", "contents", ".", "save", "(", ")", "with", "open", "(", "contents", ".", "current_path", ",", "'rb'", ")", "as", "raw", ",", "gzip", ".", "open", "(", "blob_path", ",", "'wb'", ")", "as", "blob", ":", "shutil", ".", "copyfileobj", "(", "raw", ",", "blob", ")", "logger", ".", "debug", "(", "'Released lock for blob %s.'", ",", "digest", ")", "if", "_path_exists", "(", "link_path", ")", ":", "# Lend the link lock to delete().", "# Note that DB lock has to be released in advance, otherwise", "# deadlock is possible in concurrent scenarios.", "logger", ".", "info", "(", "'Overwriting existing link %s.'", ",", "name", ")", "self", ".", "delete", "(", "name", ",", "version", ",", "_lock", "=", "False", ")", "_create_file_dirs", "(", "link_path", ")", "rel_blob_path", "=", "os", ".", "path", ".", "relpath", "(", "blob_path", ",", "os", ".", "path", ".", "dirname", "(", "link_path", ")", ")", "os", ".", "symlink", "(", "rel_blob_path", ",", "link_path", ")", "logger", ".", "debug", "(", "'Created link %s.'", ",", "name", ")", "lutime", "(", "link_path", ",", "version", ")", "return", "version", "logger", ".", "debug", "(", "'Released lock for link %s.'", ",", "name", ")" ]
49.243478
22.556522
def secretly(reactor, action, system=None, username=None, prompt="Password:"): """ Call the given C{action} with a secret value. @return: a L{Deferred} that fires with C{action}'s result, or L{NoSecretError} if no secret can be retrieved. """ if system is None: system = action.__module__ if system == '__main__': system = os.path.abspath(sys.argv[0]) if username is None: username = getpass.getuser() while True: secret = keyring.get_password(system, username) if secret is not None: break pinentry = yield choosePinentry() keyring.set_password( system, username, (yield pinentry.askForPassword( reactor, prompt, "Enter Password", "Password Prompt for {username}@{system}" .format(system=system, username=username))) ) yield maybeDeferred(action, secret)
[ "def", "secretly", "(", "reactor", ",", "action", ",", "system", "=", "None", ",", "username", "=", "None", ",", "prompt", "=", "\"Password:\"", ")", ":", "if", "system", "is", "None", ":", "system", "=", "action", ".", "__module__", "if", "system", "==", "'__main__'", ":", "system", "=", "os", ".", "path", ".", "abspath", "(", "sys", ".", "argv", "[", "0", "]", ")", "if", "username", "is", "None", ":", "username", "=", "getpass", ".", "getuser", "(", ")", "while", "True", ":", "secret", "=", "keyring", ".", "get_password", "(", "system", ",", "username", ")", "if", "secret", "is", "not", "None", ":", "break", "pinentry", "=", "yield", "choosePinentry", "(", ")", "keyring", ".", "set_password", "(", "system", ",", "username", ",", "(", "yield", "pinentry", ".", "askForPassword", "(", "reactor", ",", "prompt", ",", "\"Enter Password\"", ",", "\"Password Prompt for {username}@{system}\"", ".", "format", "(", "system", "=", "system", ",", "username", "=", "username", ")", ")", ")", ")", "yield", "maybeDeferred", "(", "action", ",", "secret", ")" ]
34.925926
13
def OrEvent(*events): ''' Parameters ---------- events : list(threading.Event) List of events. Returns ------- threading.Event Event that is set when **at least one** of the events in :data:`events` is set. ''' or_event = threading.Event() def changed(): ''' Set ``or_event`` if any of the specified events have been set. ''' bools = [event_i.is_set() for event_i in events] if any(bools): or_event.set() else: or_event.clear() for event_i in events: # Override ``set`` and ``clear`` methods on event to update state of # `or_event` after performing default behaviour. orify(event_i, changed) # Set initial state of `or_event`. changed() return or_event
[ "def", "OrEvent", "(", "*", "events", ")", ":", "or_event", "=", "threading", ".", "Event", "(", ")", "def", "changed", "(", ")", ":", "'''\n Set ``or_event`` if any of the specified events have been set.\n '''", "bools", "=", "[", "event_i", ".", "is_set", "(", ")", "for", "event_i", "in", "events", "]", "if", "any", "(", "bools", ")", ":", "or_event", ".", "set", "(", ")", "else", ":", "or_event", ".", "clear", "(", ")", "for", "event_i", "in", "events", ":", "# Override ``set`` and ``clear`` methods on event to update state of", "# `or_event` after performing default behaviour.", "orify", "(", "event_i", ",", "changed", ")", "# Set initial state of `or_event`.", "changed", "(", ")", "return", "or_event" ]
25.03125
23.53125
def get_workspaces(self): """ Get a list of workspaces. Returns JSON-like data, not a Con instance. You might want to try the :meth:`Con.workspaces` instead if the info contained here is too little. :rtype: List of :class:`WorkspaceReply`. """ data = self.message(MessageType.GET_WORKSPACES, '') return json.loads(data, object_hook=WorkspaceReply)
[ "def", "get_workspaces", "(", "self", ")", ":", "data", "=", "self", ".", "message", "(", "MessageType", ".", "GET_WORKSPACES", ",", "''", ")", "return", "json", ".", "loads", "(", "data", ",", "object_hook", "=", "WorkspaceReply", ")" ]
33.583333
21.416667
def _delete_forever_values(self, forever_key): """ Delete all of the keys that have been stored forever. :type forever_key: str """ forever = self._store.connection().lrange(forever_key, 0, -1) if len(forever) > 0: self._store.connection().delete(*forever)
[ "def", "_delete_forever_values", "(", "self", ",", "forever_key", ")", ":", "forever", "=", "self", ".", "_store", ".", "connection", "(", ")", ".", "lrange", "(", "forever_key", ",", "0", ",", "-", "1", ")", "if", "len", "(", "forever", ")", ">", "0", ":", "self", ".", "_store", ".", "connection", "(", ")", ".", "delete", "(", "*", "forever", ")" ]
30.9
17.1
def _maybe_restore_empty_groups(self, combined): """Our index contained empty groups (e.g., from a resampling). If we reduced on that dimension, we want to restore the full index. """ if (self._full_index is not None and self._group.name in combined.dims): indexers = {self._group.name: self._full_index} combined = combined.reindex(**indexers) return combined
[ "def", "_maybe_restore_empty_groups", "(", "self", ",", "combined", ")", ":", "if", "(", "self", ".", "_full_index", "is", "not", "None", "and", "self", ".", "_group", ".", "name", "in", "combined", ".", "dims", ")", ":", "indexers", "=", "{", "self", ".", "_group", ".", "name", ":", "self", ".", "_full_index", "}", "combined", "=", "combined", ".", "reindex", "(", "*", "*", "indexers", ")", "return", "combined" ]
48
11
def _reset_on_error(self, server, func, *args, **kwargs): """Execute an operation. Reset the server on network error. Returns fn()'s return value on success. On error, clears the server's pool and marks the server Unknown. Re-raises any exception thrown by fn(). """ try: return func(*args, **kwargs) except NetworkTimeout: # The socket has been closed. Don't reset the server. raise except ConnectionFailure: self.__reset_server(server.description.address) raise
[ "def", "_reset_on_error", "(", "self", ",", "server", ",", "func", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "return", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "except", "NetworkTimeout", ":", "# The socket has been closed. Don't reset the server.", "raise", "except", "ConnectionFailure", ":", "self", ".", "__reset_server", "(", "server", ".", "description", ".", "address", ")", "raise" ]
35.875
17.375
def display_config(self): """Set up status display if option selected. NB: this method assumes that the first entry is the iteration count and the last is the rho value. """ if self.opt['Verbose']: hdrtxt = type(self).hdrtxt() # Call utility function to construct status display formatting self.hdrstr, self.fmtstr, self.nsep = common.solve_status_str( hdrtxt, fwdth0=type(self).fwiter, fprec=type(self).fpothr) else: self.hdrstr, self.fmtstr, self.nsep = '', '', 0
[ "def", "display_config", "(", "self", ")", ":", "if", "self", ".", "opt", "[", "'Verbose'", "]", ":", "hdrtxt", "=", "type", "(", "self", ")", ".", "hdrtxt", "(", ")", "# Call utility function to construct status display formatting", "self", ".", "hdrstr", ",", "self", ".", "fmtstr", ",", "self", ".", "nsep", "=", "common", ".", "solve_status_str", "(", "hdrtxt", ",", "fwdth0", "=", "type", "(", "self", ")", ".", "fwiter", ",", "fprec", "=", "type", "(", "self", ")", ".", "fpothr", ")", "else", ":", "self", ".", "hdrstr", ",", "self", ".", "fmtstr", ",", "self", ".", "nsep", "=", "''", ",", "''", ",", "0" ]
43.538462
19.153846
def visit_list(self, node, *args, **kwargs): """As transformers may return lists in some places this method can be used to enforce a list as return value. """ rv = self.visit(node, *args, **kwargs) if not isinstance(rv, list): rv = [rv] return rv
[ "def", "visit_list", "(", "self", ",", "node", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "rv", "=", "self", ".", "visit", "(", "node", ",", "*", "args", ",", "*", "*", "kwargs", ")", "if", "not", "isinstance", "(", "rv", ",", "list", ")", ":", "rv", "=", "[", "rv", "]", "return", "rv" ]
37.375
8.75
def boolean(value): ''' Convert the content of a string (or a number) to a boolean. Do nothing when input value is already a boolean. This filter accepts usual values for ``True`` and ``False``: "0", "f", "false", "n", etc. ''' if value is None or isinstance(value, bool): return value try: return bool(int(value)) except ValueError: lower_value = value.strip().lower() if not lower_value: return None if lower_value in ('f', 'false', 'n', 'no', 'off'): return False if lower_value in ('on', 't', 'true', 'y', 'yes'): return True raise Invalid('Unable to parse boolean {0}'.format(value))
[ "def", "boolean", "(", "value", ")", ":", "if", "value", "is", "None", "or", "isinstance", "(", "value", ",", "bool", ")", ":", "return", "value", "try", ":", "return", "bool", "(", "int", "(", "value", ")", ")", "except", "ValueError", ":", "lower_value", "=", "value", ".", "strip", "(", ")", ".", "lower", "(", ")", "if", "not", "lower_value", ":", "return", "None", "if", "lower_value", "in", "(", "'f'", ",", "'false'", ",", "'n'", ",", "'no'", ",", "'off'", ")", ":", "return", "False", "if", "lower_value", "in", "(", "'on'", ",", "'t'", ",", "'true'", ",", "'y'", ",", "'yes'", ")", ":", "return", "True", "raise", "Invalid", "(", "'Unable to parse boolean {0}'", ".", "format", "(", "value", ")", ")" ]
31.681818
20.5
def exponentiate_commuting_pauli_sum(pauli_sum): """ Returns a function that maps all substituent PauliTerms and sums them into a program. NOTE: Use this function with care. Substituent PauliTerms should commute. :param PauliSum pauli_sum: PauliSum to exponentiate. :returns: A function that parametrizes the exponential. :rtype: function """ if not isinstance(pauli_sum, PauliSum): raise TypeError("Argument 'pauli_sum' must be a PauliSum.") fns = [exponential_map(term) for term in pauli_sum] def combined_exp_wrap(param): return Program([f(param) for f in fns]) return combined_exp_wrap
[ "def", "exponentiate_commuting_pauli_sum", "(", "pauli_sum", ")", ":", "if", "not", "isinstance", "(", "pauli_sum", ",", "PauliSum", ")", ":", "raise", "TypeError", "(", "\"Argument 'pauli_sum' must be a PauliSum.\"", ")", "fns", "=", "[", "exponential_map", "(", "term", ")", "for", "term", "in", "pauli_sum", "]", "def", "combined_exp_wrap", "(", "param", ")", ":", "return", "Program", "(", "[", "f", "(", "param", ")", "for", "f", "in", "fns", "]", ")", "return", "combined_exp_wrap" ]
35.333333
21.111111
def _read_subtitles(self, lines): """ Read text fragments from a subtitles format text file. :param list lines: the lines of the subtitles text file :raises: ValueError: if the id regex is not valid """ self.log(u"Parsing fragments from subtitles text format") id_format = self._get_id_format() lines = [line.strip() for line in lines] pairs = [] i = 1 current = 0 while current < len(lines): line_text = lines[current] if len(line_text) > 0: fragment_lines = [line_text] following = current + 1 while (following < len(lines)) and (len(lines[following]) > 0): fragment_lines.append(lines[following]) following += 1 identifier = id_format % i pairs.append((identifier, fragment_lines)) current = following i += 1 current += 1 self._create_text_fragments(pairs)
[ "def", "_read_subtitles", "(", "self", ",", "lines", ")", ":", "self", ".", "log", "(", "u\"Parsing fragments from subtitles text format\"", ")", "id_format", "=", "self", ".", "_get_id_format", "(", ")", "lines", "=", "[", "line", ".", "strip", "(", ")", "for", "line", "in", "lines", "]", "pairs", "=", "[", "]", "i", "=", "1", "current", "=", "0", "while", "current", "<", "len", "(", "lines", ")", ":", "line_text", "=", "lines", "[", "current", "]", "if", "len", "(", "line_text", ")", ">", "0", ":", "fragment_lines", "=", "[", "line_text", "]", "following", "=", "current", "+", "1", "while", "(", "following", "<", "len", "(", "lines", ")", ")", "and", "(", "len", "(", "lines", "[", "following", "]", ")", ">", "0", ")", ":", "fragment_lines", ".", "append", "(", "lines", "[", "following", "]", ")", "following", "+=", "1", "identifier", "=", "id_format", "%", "i", "pairs", ".", "append", "(", "(", "identifier", ",", "fragment_lines", ")", ")", "current", "=", "following", "i", "+=", "1", "current", "+=", "1", "self", ".", "_create_text_fragments", "(", "pairs", ")" ]
38
13.185185
def output_reduce(input_file, path=True, pdb_name=None, force=False): """Runs Reduce on a pdb or mmol file and creates a new file with the output. Parameters ---------- input_file : str or pathlib.Path Path to file to run Reduce on. path : bool True if input_file is a path. pdb_name : str PDB ID of protein. Required if providing string not path. force : bool True if existing reduce outputs should be overwritten. Returns ------- output_path : pathlib.Path Location of output file. """ if path: output_path = reduce_output_path(path=input_file) else: output_path = reduce_output_path(pdb_name=pdb_name) if output_path.exists() and not force: return output_path reduce_mmol, reduce_message = run_reduce(input_file, path=path) if not reduce_mmol: return None output_path.parent.mkdir(exist_ok=True) output_path.write_text(reduce_mmol) return output_path
[ "def", "output_reduce", "(", "input_file", ",", "path", "=", "True", ",", "pdb_name", "=", "None", ",", "force", "=", "False", ")", ":", "if", "path", ":", "output_path", "=", "reduce_output_path", "(", "path", "=", "input_file", ")", "else", ":", "output_path", "=", "reduce_output_path", "(", "pdb_name", "=", "pdb_name", ")", "if", "output_path", ".", "exists", "(", ")", "and", "not", "force", ":", "return", "output_path", "reduce_mmol", ",", "reduce_message", "=", "run_reduce", "(", "input_file", ",", "path", "=", "path", ")", "if", "not", "reduce_mmol", ":", "return", "None", "output_path", ".", "parent", ".", "mkdir", "(", "exist_ok", "=", "True", ")", "output_path", ".", "write_text", "(", "reduce_mmol", ")", "return", "output_path" ]
31.354839
18.16129
def get_unresolved_properties_by_inheritance(self, timeperiod): """ Fill full properties with template if needed for the unresolved values (example: sunday ETCETC) :return: None """ # Ok, I do not have prop, Maybe my templates do? # Same story for plus for i in timeperiod.templates: template = self.templates[i] timeperiod.unresolved.extend(template.unresolved)
[ "def", "get_unresolved_properties_by_inheritance", "(", "self", ",", "timeperiod", ")", ":", "# Ok, I do not have prop, Maybe my templates do?", "# Same story for plus", "for", "i", "in", "timeperiod", ".", "templates", ":", "template", "=", "self", ".", "templates", "[", "i", "]", "timeperiod", ".", "unresolved", ".", "extend", "(", "template", ".", "unresolved", ")" ]
40
11.090909
def _set_auto_fields(self, model_obj): """Set the values of the auto field using counter""" for field_name, field_obj in \ self.entity_cls.meta_.auto_fields: counter_key = f'{self.schema_name}_{field_name}' if not (field_name in model_obj and model_obj[field_name] is not None): # Increment the counter and it should start from 1 counter = next(self.conn['counters'][counter_key]) if not counter: counter = next(self.conn['counters'][counter_key]) model_obj[field_name] = counter return model_obj
[ "def", "_set_auto_fields", "(", "self", ",", "model_obj", ")", ":", "for", "field_name", ",", "field_obj", "in", "self", ".", "entity_cls", ".", "meta_", ".", "auto_fields", ":", "counter_key", "=", "f'{self.schema_name}_{field_name}'", "if", "not", "(", "field_name", "in", "model_obj", "and", "model_obj", "[", "field_name", "]", "is", "not", "None", ")", ":", "# Increment the counter and it should start from 1", "counter", "=", "next", "(", "self", ".", "conn", "[", "'counters'", "]", "[", "counter_key", "]", ")", "if", "not", "counter", ":", "counter", "=", "next", "(", "self", ".", "conn", "[", "'counters'", "]", "[", "counter_key", "]", ")", "model_obj", "[", "field_name", "]", "=", "counter", "return", "model_obj" ]
48.692308
17.769231
def remove_existing_pidfile(pidfile_path): """ Remove the named PID file if it exists. Removing a PID file that doesn't already exist puts us in the desired state, so we ignore the condition if the file does not exist. """ try: os.remove(pidfile_path) except OSError as exc: if exc.errno == errno.ENOENT: pass else: raise
[ "def", "remove_existing_pidfile", "(", "pidfile_path", ")", ":", "try", ":", "os", ".", "remove", "(", "pidfile_path", ")", "except", "OSError", "as", "exc", ":", "if", "exc", ".", "errno", "==", "errno", ".", "ENOENT", ":", "pass", "else", ":", "raise" ]
26.733333
19.933333
def open_as_needed(filename): """Return a file-object given either a filename or an object. Handles opening with the right class based on the file extension. """ if hasattr(filename, 'read'): return filename if filename.endswith('.bz2'): return bz2.BZ2File(filename, 'rb') elif filename.endswith('.gz'): return gzip.GzipFile(filename, 'rb') else: return open(filename, 'rb')
[ "def", "open_as_needed", "(", "filename", ")", ":", "if", "hasattr", "(", "filename", ",", "'read'", ")", ":", "return", "filename", "if", "filename", ".", "endswith", "(", "'.bz2'", ")", ":", "return", "bz2", ".", "BZ2File", "(", "filename", ",", "'rb'", ")", "elif", "filename", ".", "endswith", "(", "'.gz'", ")", ":", "return", "gzip", ".", "GzipFile", "(", "filename", ",", "'rb'", ")", "else", ":", "return", "open", "(", "filename", ",", "'rb'", ")" ]
28.2
15.933333
def format_labels(labels): """ Convert a dictionary of labels into a comma separated string """ if labels: return ','.join(['{}={}'.format(k, v) for k, v in labels.items()]) else: return ''
[ "def", "format_labels", "(", "labels", ")", ":", "if", "labels", ":", "return", "','", ".", "join", "(", "[", "'{}={}'", ".", "format", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "labels", ".", "items", "(", ")", "]", ")", "else", ":", "return", "''" ]
35.333333
21.333333
def keep_alive(self, val: bool) -> None: """Set keep-alive connection mode. :param bool val: new state. """ self._keepalive = val if self._keepalive_handle: self._keepalive_handle.cancel() self._keepalive_handle = None
[ "def", "keep_alive", "(", "self", ",", "val", ":", "bool", ")", "->", "None", ":", "self", ".", "_keepalive", "=", "val", "if", "self", ".", "_keepalive_handle", ":", "self", ".", "_keepalive_handle", ".", "cancel", "(", ")", "self", ".", "_keepalive_handle", "=", "None" ]
30.555556
7.333333
def pypirc_temp(index_url): """ Create a temporary pypirc file for interaction with twine """ pypirc_file = tempfile.NamedTemporaryFile(suffix='.pypirc', delete=False) print(pypirc_file.name) with open(pypirc_file.name, 'w') as fh: fh.write(PYPIRC_TEMPLATE.format(index_name=PYPIRC_TEMP_INDEX_NAME, index_url=index_url)) return pypirc_file.name
[ "def", "pypirc_temp", "(", "index_url", ")", ":", "pypirc_file", "=", "tempfile", ".", "NamedTemporaryFile", "(", "suffix", "=", "'.pypirc'", ",", "delete", "=", "False", ")", "print", "(", "pypirc_file", ".", "name", ")", "with", "open", "(", "pypirc_file", ".", "name", ",", "'w'", ")", "as", "fh", ":", "fh", ".", "write", "(", "PYPIRC_TEMPLATE", ".", "format", "(", "index_name", "=", "PYPIRC_TEMP_INDEX_NAME", ",", "index_url", "=", "index_url", ")", ")", "return", "pypirc_file", ".", "name" ]
52.285714
19.285714
def _log_graphql_error(self, query, data): '''Log a ``{"errors": [...]}`` GraphQL return and return itself. :param query: the GraphQL query that triggered the result. :type query: str :param data: the decoded JSON object. :type data: dict :return: the input ``data`` :rtype: dict ''' if isinstance(query, bytes): # pragma: no cover query = query.decode('utf-8') elif not isinstance(query, str): # pragma: no cover # allows sgqlc.operation.Operation to be passed # and generate compact representation of the queries query = bytes(query).decode('utf-8') data = self._fixup_graphql_error(data) errors = data['errors'] self.logger.error('GraphQL query failed with %s errors', len(errors)) for i, error in enumerate(errors): paths = error.get('path') if paths: paths = ' ' + '/'.join(str(path) for path in paths) else: paths = '' self.logger.info('Error #{}{}:'.format(i, paths)) for ln in error.get('message', '').split('\n'): self.logger.info(' | {}'.format(ln)) s = self.snippet(query, error.get('locations')) if s: self.logger.info(' -') self.logger.info(' | Locations:') for ln in s: self.logger.info(' | {}'.format(ln)) return data
[ "def", "_log_graphql_error", "(", "self", ",", "query", ",", "data", ")", ":", "if", "isinstance", "(", "query", ",", "bytes", ")", ":", "# pragma: no cover", "query", "=", "query", ".", "decode", "(", "'utf-8'", ")", "elif", "not", "isinstance", "(", "query", ",", "str", ")", ":", "# pragma: no cover", "# allows sgqlc.operation.Operation to be passed", "# and generate compact representation of the queries", "query", "=", "bytes", "(", "query", ")", ".", "decode", "(", "'utf-8'", ")", "data", "=", "self", ".", "_fixup_graphql_error", "(", "data", ")", "errors", "=", "data", "[", "'errors'", "]", "self", ".", "logger", ".", "error", "(", "'GraphQL query failed with %s errors'", ",", "len", "(", "errors", ")", ")", "for", "i", ",", "error", "in", "enumerate", "(", "errors", ")", ":", "paths", "=", "error", ".", "get", "(", "'path'", ")", "if", "paths", ":", "paths", "=", "' '", "+", "'/'", ".", "join", "(", "str", "(", "path", ")", "for", "path", "in", "paths", ")", "else", ":", "paths", "=", "''", "self", ".", "logger", ".", "info", "(", "'Error #{}{}:'", ".", "format", "(", "i", ",", "paths", ")", ")", "for", "ln", "in", "error", ".", "get", "(", "'message'", ",", "''", ")", ".", "split", "(", "'\\n'", ")", ":", "self", ".", "logger", ".", "info", "(", "' | {}'", ".", "format", "(", "ln", ")", ")", "s", "=", "self", ".", "snippet", "(", "query", ",", "error", ".", "get", "(", "'locations'", ")", ")", "if", "s", ":", "self", ".", "logger", ".", "info", "(", "' -'", ")", "self", ".", "logger", ".", "info", "(", "' | Locations:'", ")", "for", "ln", "in", "s", ":", "self", ".", "logger", ".", "info", "(", "' | {}'", ".", "format", "(", "ln", ")", ")", "return", "data" ]
37.871795
18.897436
def plot(self, df_data, center=False, save=False, save_name=None, save_path='saved', dated=True, notebook=True): "df_data format is a dataframe with columns x, y, z (required), and style, filter (optional)" data = df_data.to_json(orient='records') options = self.to_dict() return plot(data, options, center=center, save=save, save_name=save_name, save_path=save_path, dated=dated, notebook=notebook)
[ "def", "plot", "(", "self", ",", "df_data", ",", "center", "=", "False", ",", "save", "=", "False", ",", "save_name", "=", "None", ",", "save_path", "=", "'saved'", ",", "dated", "=", "True", ",", "notebook", "=", "True", ")", ":", "data", "=", "df_data", ".", "to_json", "(", "orient", "=", "'records'", ")", "options", "=", "self", ".", "to_dict", "(", ")", "return", "plot", "(", "data", ",", "options", ",", "center", "=", "center", ",", "save", "=", "save", ",", "save_name", "=", "save_name", ",", "save_path", "=", "save_path", ",", "dated", "=", "dated", ",", "notebook", "=", "notebook", ")" ]
65.428571
27.714286
def get_activity_query_session(self): """Gets the ``OsidSession`` associated with the activity query service. return: (osid.learning.ActivityQuerySession) - a ``ActivityQuerySession`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_activity_query()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_activity_query()`` is ``true``.* """ if not self.supports_activity_query(): raise errors.Unimplemented() # pylint: disable=no-member return sessions.ActivityQuerySession(runtime=self._runtime)
[ "def", "get_activity_query_session", "(", "self", ")", ":", "if", "not", "self", ".", "supports_activity_query", "(", ")", ":", "raise", "errors", ".", "Unimplemented", "(", ")", "# pylint: disable=no-member", "return", "sessions", ".", "ActivityQuerySession", "(", "runtime", "=", "self", ".", "_runtime", ")" ]
42.375
14.625
def format_units(self, value, unit="B", optimal=5, auto=True, si=False): """ Takes a value and formats it for user output, we can choose the unit to use eg B, MiB, kbits/second. This is mainly for use with bytes/bits it converts the value into a human readable form. It has various additional options but they are really only for special cases. The function returns a tuple containing the new value (this is a number so that the user can still format it if required) and a unit that is the units that we have been converted to. By supplying unit to the function we can force those units to be used eg ``unit=KiB`` would force the output to be in Kibibytes. By default we use non-si units but if the unit is si eg kB then we will switch to si units. Units can also be things like ``Mbit/sec``. If the auto parameter is False then we use the unit provided. This only makes sense when the unit is singular eg 'Bytes' and we want the result in bytes and not say converted to MBytes. optimal is used to control the size of the output value. We try to provide an output value of that number of characters (including decimal point), it may also be less due to rounding. If a fixed unit is used the output may be more than this number of characters. """ UNITS = "KMGTPEZY" DECIMAL_SIZE = 1000 BINARY_SIZE = 1024 CUTOFF = 1000 can_round = False if unit: # try to guess the unit. Do we have a known prefix too it? if unit[0].upper() in UNITS: index = UNITS.index(unit[0].upper()) + 1 post = unit[1:] si = len(unit) > 1 and unit[1] != "i" if si: post = post[1:] if unit[1] == "b": value *= 8 auto = False else: index = 0 post = unit if si: size = DECIMAL_SIZE else: size = BINARY_SIZE if auto: # we will try to use an appropriate prefix if value < CUTOFF: unit_out = post else: value /= size for prefix in UNITS: if abs(value) < CUTOFF: break value /= size if si: # si kilo is lowercase if prefix == "K": prefix = "k" else: post = "i" + post unit_out = prefix + post can_round = True else: # we are using a fixed unit unit_out = unit size = pow(size, index) if size: value /= size can_round = True if can_round and optimal and value: # we will try to make the output value the desired size # we need to keep out value as a numeric type places = int(log10(abs(value))) if places >= optimal - 2: value = int(value) else: value = round(value, max(optimal - places - 2, 0)) return value, unit_out
[ "def", "format_units", "(", "self", ",", "value", ",", "unit", "=", "\"B\"", ",", "optimal", "=", "5", ",", "auto", "=", "True", ",", "si", "=", "False", ")", ":", "UNITS", "=", "\"KMGTPEZY\"", "DECIMAL_SIZE", "=", "1000", "BINARY_SIZE", "=", "1024", "CUTOFF", "=", "1000", "can_round", "=", "False", "if", "unit", ":", "# try to guess the unit. Do we have a known prefix too it?", "if", "unit", "[", "0", "]", ".", "upper", "(", ")", "in", "UNITS", ":", "index", "=", "UNITS", ".", "index", "(", "unit", "[", "0", "]", ".", "upper", "(", ")", ")", "+", "1", "post", "=", "unit", "[", "1", ":", "]", "si", "=", "len", "(", "unit", ")", ">", "1", "and", "unit", "[", "1", "]", "!=", "\"i\"", "if", "si", ":", "post", "=", "post", "[", "1", ":", "]", "if", "unit", "[", "1", "]", "==", "\"b\"", ":", "value", "*=", "8", "auto", "=", "False", "else", ":", "index", "=", "0", "post", "=", "unit", "if", "si", ":", "size", "=", "DECIMAL_SIZE", "else", ":", "size", "=", "BINARY_SIZE", "if", "auto", ":", "# we will try to use an appropriate prefix", "if", "value", "<", "CUTOFF", ":", "unit_out", "=", "post", "else", ":", "value", "/=", "size", "for", "prefix", "in", "UNITS", ":", "if", "abs", "(", "value", ")", "<", "CUTOFF", ":", "break", "value", "/=", "size", "if", "si", ":", "# si kilo is lowercase", "if", "prefix", "==", "\"K\"", ":", "prefix", "=", "\"k\"", "else", ":", "post", "=", "\"i\"", "+", "post", "unit_out", "=", "prefix", "+", "post", "can_round", "=", "True", "else", ":", "# we are using a fixed unit", "unit_out", "=", "unit", "size", "=", "pow", "(", "size", ",", "index", ")", "if", "size", ":", "value", "/=", "size", "can_round", "=", "True", "if", "can_round", "and", "optimal", "and", "value", ":", "# we will try to make the output value the desired size", "# we need to keep out value as a numeric type", "places", "=", "int", "(", "log10", "(", "abs", "(", "value", ")", ")", ")", "if", "places", ">=", "optimal", "-", "2", ":", "value", "=", "int", "(", "value", ")", "else", ":", "value", "=", "round", "(", "value", ",", "max", "(", "optimal", "-", "places", "-", "2", ",", "0", ")", ")", "return", "value", ",", "unit_out" ]
36.764045
19.775281
def _transform_col(self, x, i): """Encode one categorical column into average target values. Args: x (pandas.Series): a categorical column to encode i (int): column index Returns: x (pandas.Series): a column with labels. """ return x.fillna(NAN_INT).map(self.target_encoders[i]).fillna(self.target_mean)
[ "def", "_transform_col", "(", "self", ",", "x", ",", "i", ")", ":", "return", "x", ".", "fillna", "(", "NAN_INT", ")", ".", "map", "(", "self", ".", "target_encoders", "[", "i", "]", ")", ".", "fillna", "(", "self", ".", "target_mean", ")" ]
41.222222
16.222222
def reset(self): ''' Reset the state of the market (attributes in sow_vars, etc) to some user-defined initial state, and erase the histories of tracked variables. Parameters ---------- none Returns ------- none ''' for var_name in self.track_vars: # Reset the history of tracked variables setattr(self,var_name + '_hist',[]) for var_name in self.sow_vars: # Set the sow variables to their initial levels initial_val = getattr(self,var_name + '_init') setattr(self,var_name,initial_val) for this_type in self.agents: # Reset each AgentType in the market this_type.reset()
[ "def", "reset", "(", "self", ")", ":", "for", "var_name", "in", "self", ".", "track_vars", ":", "# Reset the history of tracked variables", "setattr", "(", "self", ",", "var_name", "+", "'_hist'", ",", "[", "]", ")", "for", "var_name", "in", "self", ".", "sow_vars", ":", "# Set the sow variables to their initial levels", "initial_val", "=", "getattr", "(", "self", ",", "var_name", "+", "'_init'", ")", "setattr", "(", "self", ",", "var_name", ",", "initial_val", ")", "for", "this_type", "in", "self", ".", "agents", ":", "# Reset each AgentType in the market", "this_type", ".", "reset", "(", ")" ]
35.25
27.55
def ParseStatusRow(self, parser_mediator, query, row, **unused_kwargs): """Parses a contact row from the database. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row resulting from query. """ query_hash = hash(query) event_data = TwitterIOSStatusEventData() event_data.favorite_count = self._GetRowValue( query_hash, row, 'favoriteCount') event_data.favorited = self._GetRowValue(query_hash, row, 'favorited') event_data.name = self._GetRowValue(query_hash, row, 'name') event_data.query = query event_data.retweet_count = self._GetRowValue( query_hash, row, 'retweetCount') event_data.text = self._GetRowValue(query_hash, row, 'text') event_data.user_id = self._GetRowValue(query_hash, row, 'user_id') timestamp = self._GetRowValue(query_hash, row, 'date') if timestamp: # Convert the floating point value to an integer. timestamp = int(timestamp) date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data) timestamp = self._GetRowValue(query_hash, row, 'updatedAt') if timestamp: # Convert the floating point value to an integer. timestamp = int(timestamp) date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_UPDATE) parser_mediator.ProduceEventWithEventData(event, event_data)
[ "def", "ParseStatusRow", "(", "self", ",", "parser_mediator", ",", "query", ",", "row", ",", "*", "*", "unused_kwargs", ")", ":", "query_hash", "=", "hash", "(", "query", ")", "event_data", "=", "TwitterIOSStatusEventData", "(", ")", "event_data", ".", "favorite_count", "=", "self", ".", "_GetRowValue", "(", "query_hash", ",", "row", ",", "'favoriteCount'", ")", "event_data", ".", "favorited", "=", "self", ".", "_GetRowValue", "(", "query_hash", ",", "row", ",", "'favorited'", ")", "event_data", ".", "name", "=", "self", ".", "_GetRowValue", "(", "query_hash", ",", "row", ",", "'name'", ")", "event_data", ".", "query", "=", "query", "event_data", ".", "retweet_count", "=", "self", ".", "_GetRowValue", "(", "query_hash", ",", "row", ",", "'retweetCount'", ")", "event_data", ".", "text", "=", "self", ".", "_GetRowValue", "(", "query_hash", ",", "row", ",", "'text'", ")", "event_data", ".", "user_id", "=", "self", ".", "_GetRowValue", "(", "query_hash", ",", "row", ",", "'user_id'", ")", "timestamp", "=", "self", ".", "_GetRowValue", "(", "query_hash", ",", "row", ",", "'date'", ")", "if", "timestamp", ":", "# Convert the floating point value to an integer.", "timestamp", "=", "int", "(", "timestamp", ")", "date_time", "=", "dfdatetime_posix_time", ".", "PosixTime", "(", "timestamp", "=", "timestamp", ")", "event", "=", "time_events", ".", "DateTimeValuesEvent", "(", "date_time", ",", "definitions", ".", "TIME_DESCRIPTION_CREATION", ")", "parser_mediator", ".", "ProduceEventWithEventData", "(", "event", ",", "event_data", ")", "timestamp", "=", "self", ".", "_GetRowValue", "(", "query_hash", ",", "row", ",", "'updatedAt'", ")", "if", "timestamp", ":", "# Convert the floating point value to an integer.", "timestamp", "=", "int", "(", "timestamp", ")", "date_time", "=", "dfdatetime_posix_time", ".", "PosixTime", "(", "timestamp", "=", "timestamp", ")", "event", "=", "time_events", ".", "DateTimeValuesEvent", "(", "date_time", ",", "definitions", ".", "TIME_DESCRIPTION_UPDATE", ")", "parser_mediator", ".", "ProduceEventWithEventData", "(", "event", ",", "event_data", ")" ]
44.230769
19.128205
def register_plugin(self, name): """Load and register a plugin given its package name.""" logger.info("Registering plugin: " + name) module = importlib.import_module(name) module.register_plugin(self)
[ "def", "register_plugin", "(", "self", ",", "name", ")", ":", "logger", ".", "info", "(", "\"Registering plugin: \"", "+", "name", ")", "module", "=", "importlib", ".", "import_module", "(", "name", ")", "module", ".", "register_plugin", "(", "self", ")" ]
45.6
5.6
def extract(data, items, out_dir=None): """Extract germline calls for the given sample, if tumor only. """ if vcfutils.get_paired_phenotype(data): if len(items) == 1: germline_vcf = _remove_prioritization(data["vrn_file"], data, out_dir) germline_vcf = vcfutils.bgzip_and_index(germline_vcf, data["config"]) data["vrn_file_plus"] = {"germline": germline_vcf} return data
[ "def", "extract", "(", "data", ",", "items", ",", "out_dir", "=", "None", ")", ":", "if", "vcfutils", ".", "get_paired_phenotype", "(", "data", ")", ":", "if", "len", "(", "items", ")", "==", "1", ":", "germline_vcf", "=", "_remove_prioritization", "(", "data", "[", "\"vrn_file\"", "]", ",", "data", ",", "out_dir", ")", "germline_vcf", "=", "vcfutils", ".", "bgzip_and_index", "(", "germline_vcf", ",", "data", "[", "\"config\"", "]", ")", "data", "[", "\"vrn_file_plus\"", "]", "=", "{", "\"germline\"", ":", "germline_vcf", "}", "return", "data" ]
46.888889
16.333333
def list_configured_members(lbn, profile='default'): ''' Return a list of member workers from the configuration files CLI Examples: .. code-block:: bash salt '*' modjk.list_configured_members loadbalancer1 salt '*' modjk.list_configured_members loadbalancer1 other-profile ''' config = dump_config(profile) try: ret = config['worker.{0}.balance_workers'.format(lbn)] except KeyError: return [] return [_f for _f in ret.strip().split(',') if _f]
[ "def", "list_configured_members", "(", "lbn", ",", "profile", "=", "'default'", ")", ":", "config", "=", "dump_config", "(", "profile", ")", "try", ":", "ret", "=", "config", "[", "'worker.{0}.balance_workers'", ".", "format", "(", "lbn", ")", "]", "except", "KeyError", ":", "return", "[", "]", "return", "[", "_f", "for", "_f", "in", "ret", ".", "strip", "(", ")", ".", "split", "(", "','", ")", "if", "_f", "]" ]
24.95
27.65
def deny_assignments(self): """Instance depends on the API version: * 2018-07-01-preview: :class:`DenyAssignmentsOperations<azure.mgmt.authorization.v2018_07_01_preview.operations.DenyAssignmentsOperations>` """ api_version = self._get_api_version('deny_assignments') if api_version == '2018-07-01-preview': from .v2018_07_01_preview.operations import DenyAssignmentsOperations as OperationClass else: raise NotImplementedError("APIVersion {} is not available".format(api_version)) return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
[ "def", "deny_assignments", "(", "self", ")", ":", "api_version", "=", "self", ".", "_get_api_version", "(", "'deny_assignments'", ")", "if", "api_version", "==", "'2018-07-01-preview'", ":", "from", ".", "v2018_07_01_preview", ".", "operations", "import", "DenyAssignmentsOperations", "as", "OperationClass", "else", ":", "raise", "NotImplementedError", "(", "\"APIVersion {} is not available\"", ".", "format", "(", "api_version", ")", ")", "return", "OperationClass", "(", "self", ".", "_client", ",", "self", ".", "config", ",", "Serializer", "(", "self", ".", "_models_dict", "(", "api_version", ")", ")", ",", "Deserializer", "(", "self", ".", "_models_dict", "(", "api_version", ")", ")", ")" ]
63.090909
39.636364
def refresh_content(self, order=None, name=None): """ Re-download all submissions and reset the page index """ order = order or self.content.order # Preserve the query if staying on the current page if name is None: query = self.content.query else: query = None name = name or self.content.name # Hack to allow an order specified in the name by prompt_subreddit() to # override the current default if order == 'ignore': order = None with self.term.loader('Refreshing page'): self.content = SubredditContent.from_name( self.reddit, name, self.term.loader, order=order, query=query) if not self.term.loader.exception: self.nav = Navigator(self.content.get)
[ "def", "refresh_content", "(", "self", ",", "order", "=", "None", ",", "name", "=", "None", ")", ":", "order", "=", "order", "or", "self", ".", "content", ".", "order", "# Preserve the query if staying on the current page", "if", "name", "is", "None", ":", "query", "=", "self", ".", "content", ".", "query", "else", ":", "query", "=", "None", "name", "=", "name", "or", "self", ".", "content", ".", "name", "# Hack to allow an order specified in the name by prompt_subreddit() to", "# override the current default", "if", "order", "==", "'ignore'", ":", "order", "=", "None", "with", "self", ".", "term", ".", "loader", "(", "'Refreshing page'", ")", ":", "self", ".", "content", "=", "SubredditContent", ".", "from_name", "(", "self", ".", "reddit", ",", "name", ",", "self", ".", "term", ".", "loader", ",", "order", "=", "order", ",", "query", "=", "query", ")", "if", "not", "self", ".", "term", ".", "loader", ".", "exception", ":", "self", ".", "nav", "=", "Navigator", "(", "self", ".", "content", ".", "get", ")" ]
33.958333
17.208333
def _get_template(self, root=None, **metadata_defaults): """ Iterate over items metadata_defaults {prop: val, ...} to populate template """ if root is None: if self._data_map is None: self._init_data_map() root = self._xml_root = self._data_map['_root'] template_tree = self._xml_tree = create_element_tree(root) for prop, val in iteritems(metadata_defaults): path = self._data_map.get(prop) if path and val: setattr(self, prop, val) update_property(template_tree, None, path, prop, val) return template_tree
[ "def", "_get_template", "(", "self", ",", "root", "=", "None", ",", "*", "*", "metadata_defaults", ")", ":", "if", "root", "is", "None", ":", "if", "self", ".", "_data_map", "is", "None", ":", "self", ".", "_init_data_map", "(", ")", "root", "=", "self", ".", "_xml_root", "=", "self", ".", "_data_map", "[", "'_root'", "]", "template_tree", "=", "self", ".", "_xml_tree", "=", "create_element_tree", "(", "root", ")", "for", "prop", ",", "val", "in", "iteritems", "(", "metadata_defaults", ")", ":", "path", "=", "self", ".", "_data_map", ".", "get", "(", "prop", ")", "if", "path", "and", "val", ":", "setattr", "(", "self", ",", "prop", ",", "val", ")", "update_property", "(", "template_tree", ",", "None", ",", "path", ",", "prop", ",", "val", ")", "return", "template_tree" ]
35.111111
19.555556
def grammatical_join(l, initial_joins=", ", final_join=" and "): """ Display a list of items nicely, with a different string before the final item. Useful for using lists in sentences. >>> grammatical_join(['apples', 'pears', 'bananas']) 'apples, pears and bananas' >>> grammatical_join(['apples', 'pears', 'bananas'], initial_joins=";", final_join="; or ") 'apples; pears; or bananas' :param l: List of strings to join :param initial_joins: the string to join the non-ultimate items with :param final_join: the string to join the final item with :return: items joined with commas except " and " before the final one. """ # http://stackoverflow.com/questions/19838976/grammatical-list-join-in-python return initial_joins.join(l[:-2] + [final_join.join(l[-2:])])
[ "def", "grammatical_join", "(", "l", ",", "initial_joins", "=", "\", \"", ",", "final_join", "=", "\" and \"", ")", ":", "# http://stackoverflow.com/questions/19838976/grammatical-list-join-in-python", "return", "initial_joins", ".", "join", "(", "l", "[", ":", "-", "2", "]", "+", "[", "final_join", ".", "join", "(", "l", "[", "-", "2", ":", "]", ")", "]", ")" ]
44.611111
23.944444
def call(self, scope, args=[]): """Call mixin. Parses a copy of the mixins body in the current scope and returns it. args: scope (Scope): current scope args (list): arguments raises: SyntaxError returns: list or False """ ret = False if args: args = [[ a.parse(scope) if isinstance(a, Expression) else a for a in arg ] if arg else arg for arg in args] try: self.parse_args(args, scope) except SyntaxError: pass else: if self.parse_guards(scope): body = self.body.copy() ret = body.tokens[1] if ret: utility.rename(ret, scope, Block) return ret
[ "def", "call", "(", "self", ",", "scope", ",", "args", "=", "[", "]", ")", ":", "ret", "=", "False", "if", "args", ":", "args", "=", "[", "[", "a", ".", "parse", "(", "scope", ")", "if", "isinstance", "(", "a", ",", "Expression", ")", "else", "a", "for", "a", "in", "arg", "]", "if", "arg", "else", "arg", "for", "arg", "in", "args", "]", "try", ":", "self", ".", "parse_args", "(", "args", ",", "scope", ")", "except", "SyntaxError", ":", "pass", "else", ":", "if", "self", ".", "parse_guards", "(", "scope", ")", ":", "body", "=", "self", ".", "body", ".", "copy", "(", ")", "ret", "=", "body", ".", "tokens", "[", "1", "]", "if", "ret", ":", "utility", ".", "rename", "(", "ret", ",", "scope", ",", "Block", ")", "return", "ret" ]
29.814815
14.259259
def get_order_line_item_by_id(cls, order_line_item_id, **kwargs): """Find OrderLineItem Return single instance of OrderLineItem by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_order_line_item_by_id(order_line_item_id, async=True) >>> result = thread.get() :param async bool :param str order_line_item_id: ID of orderLineItem to return (required) :return: OrderLineItem If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_order_line_item_by_id_with_http_info(order_line_item_id, **kwargs) else: (data) = cls._get_order_line_item_by_id_with_http_info(order_line_item_id, **kwargs) return data
[ "def", "get_order_line_item_by_id", "(", "cls", ",", "order_line_item_id", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async'", ")", ":", "return", "cls", ".", "_get_order_line_item_by_id_with_http_info", "(", "order_line_item_id", ",", "*", "*", "kwargs", ")", "else", ":", "(", "data", ")", "=", "cls", ".", "_get_order_line_item_by_id_with_http_info", "(", "order_line_item_id", ",", "*", "*", "kwargs", ")", "return", "data" ]
45.142857
22.761905
def conversations(self, getsrcdst=None, **kargs): """Graphes a conversations between sources and destinations and display it (using graphviz and imagemagick) getsrcdst: a function that takes an element of the list and returns the source, the destination and optionally a label. By default, returns the IP source and destination from IP and ARP layers type: output type (svg, ps, gif, jpg, etc.), passed to dot's "-T" option # noqa: E501 target: filename or redirect. Defaults pipe to Imagemagick's display program # noqa: E501 prog: which graphviz program to use""" if getsrcdst is None: def getsrcdst(pkt): """Extract src and dst addresses""" if 'IP' in pkt: return (pkt['IP'].src, pkt['IP'].dst) if 'IPv6' in pkt: return (pkt['IPv6'].src, pkt['IPv6'].dst) if 'ARP' in pkt: return (pkt['ARP'].psrc, pkt['ARP'].pdst) raise TypeError() conv = {} for p in self.res: p = self._elt2pkt(p) try: c = getsrcdst(p) except Exception: # No warning here: it's OK that getsrcdst() raises an # exception, since it might be, for example, a # function that expects a specific layer in each # packet. The try/except approach is faster and # considered more Pythonic than adding tests. continue if len(c) == 3: conv.setdefault(c[:2], set()).add(c[2]) else: conv[c] = conv.get(c, 0) + 1 gr = 'digraph "conv" {\n' for (s, d), l in six.iteritems(conv): gr += '\t "%s" -> "%s" [label="%s"]\n' % ( s, d, ', '.join(str(x) for x in l) if isinstance(l, set) else l ) gr += "}\n" return do_graph(gr, **kargs)
[ "def", "conversations", "(", "self", ",", "getsrcdst", "=", "None", ",", "*", "*", "kargs", ")", ":", "if", "getsrcdst", "is", "None", ":", "def", "getsrcdst", "(", "pkt", ")", ":", "\"\"\"Extract src and dst addresses\"\"\"", "if", "'IP'", "in", "pkt", ":", "return", "(", "pkt", "[", "'IP'", "]", ".", "src", ",", "pkt", "[", "'IP'", "]", ".", "dst", ")", "if", "'IPv6'", "in", "pkt", ":", "return", "(", "pkt", "[", "'IPv6'", "]", ".", "src", ",", "pkt", "[", "'IPv6'", "]", ".", "dst", ")", "if", "'ARP'", "in", "pkt", ":", "return", "(", "pkt", "[", "'ARP'", "]", ".", "psrc", ",", "pkt", "[", "'ARP'", "]", ".", "pdst", ")", "raise", "TypeError", "(", ")", "conv", "=", "{", "}", "for", "p", "in", "self", ".", "res", ":", "p", "=", "self", ".", "_elt2pkt", "(", "p", ")", "try", ":", "c", "=", "getsrcdst", "(", "p", ")", "except", "Exception", ":", "# No warning here: it's OK that getsrcdst() raises an", "# exception, since it might be, for example, a", "# function that expects a specific layer in each", "# packet. The try/except approach is faster and", "# considered more Pythonic than adding tests.", "continue", "if", "len", "(", "c", ")", "==", "3", ":", "conv", ".", "setdefault", "(", "c", "[", ":", "2", "]", ",", "set", "(", ")", ")", ".", "add", "(", "c", "[", "2", "]", ")", "else", ":", "conv", "[", "c", "]", "=", "conv", ".", "get", "(", "c", ",", "0", ")", "+", "1", "gr", "=", "'digraph \"conv\" {\\n'", "for", "(", "s", ",", "d", ")", ",", "l", "in", "six", ".", "iteritems", "(", "conv", ")", ":", "gr", "+=", "'\\t \"%s\" -> \"%s\" [label=\"%s\"]\\n'", "%", "(", "s", ",", "d", ",", "', '", ".", "join", "(", "str", "(", "x", ")", "for", "x", "in", "l", ")", "if", "isinstance", "(", "l", ",", "set", ")", "else", "l", ")", "gr", "+=", "\"}\\n\"", "return", "do_graph", "(", "gr", ",", "*", "*", "kargs", ")" ]
46.488372
16.744186
def uavionix_adsb_out_dynamic_send(self, utcTime, gpsLat, gpsLon, gpsAlt, gpsFix, numSats, baroAltMSL, accuracyHor, accuracyVert, accuracyVel, velVert, velNS, VelEW, emergencyStatus, state, squawk, force_mavlink1=False): ''' Dynamic data used to generate ADS-B out transponder data (send at 5Hz) utcTime : UTC time in seconds since GPS epoch (Jan 6, 1980). If unknown set to UINT32_MAX (uint32_t) gpsLat : Latitude WGS84 (deg * 1E7). If unknown set to INT32_MAX (int32_t) gpsLon : Longitude WGS84 (deg * 1E7). If unknown set to INT32_MAX (int32_t) gpsAlt : Altitude in mm (m * 1E-3) UP +ve. WGS84 altitude. If unknown set to INT32_MAX (int32_t) gpsFix : 0-1: no fix, 2: 2D fix, 3: 3D fix, 4: DGPS, 5: RTK (uint8_t) numSats : Number of satellites visible. If unknown set to UINT8_MAX (uint8_t) baroAltMSL : Barometric pressure altitude relative to a standard atmosphere of 1013.2 mBar and NOT bar corrected altitude (m * 1E-3). (up +ve). If unknown set to INT32_MAX (int32_t) accuracyHor : Horizontal accuracy in mm (m * 1E-3). If unknown set to UINT32_MAX (uint32_t) accuracyVert : Vertical accuracy in cm. If unknown set to UINT16_MAX (uint16_t) accuracyVel : Velocity accuracy in mm/s (m * 1E-3). If unknown set to UINT16_MAX (uint16_t) velVert : GPS vertical speed in cm/s. If unknown set to INT16_MAX (int16_t) velNS : North-South velocity over ground in cm/s North +ve. If unknown set to INT16_MAX (int16_t) VelEW : East-West velocity over ground in cm/s East +ve. If unknown set to INT16_MAX (int16_t) emergencyStatus : Emergency status (uint8_t) state : ADS-B transponder dynamic input state flags (uint16_t) squawk : Mode A code (typically 1200 [0x04B0] for VFR) (uint16_t) ''' return self.send(self.uavionix_adsb_out_dynamic_encode(utcTime, gpsLat, gpsLon, gpsAlt, gpsFix, numSats, baroAltMSL, accuracyHor, accuracyVert, accuracyVel, velVert, velNS, VelEW, emergencyStatus, state, squawk), force_mavlink1=force_mavlink1)
[ "def", "uavionix_adsb_out_dynamic_send", "(", "self", ",", "utcTime", ",", "gpsLat", ",", "gpsLon", ",", "gpsAlt", ",", "gpsFix", ",", "numSats", ",", "baroAltMSL", ",", "accuracyHor", ",", "accuracyVert", ",", "accuracyVel", ",", "velVert", ",", "velNS", ",", "VelEW", ",", "emergencyStatus", ",", "state", ",", "squawk", ",", "force_mavlink1", "=", "False", ")", ":", "return", "self", ".", "send", "(", "self", ".", "uavionix_adsb_out_dynamic_encode", "(", "utcTime", ",", "gpsLat", ",", "gpsLon", ",", "gpsAlt", ",", "gpsFix", ",", "numSats", ",", "baroAltMSL", ",", "accuracyHor", ",", "accuracyVert", ",", "accuracyVel", ",", "velVert", ",", "velNS", ",", "VelEW", ",", "emergencyStatus", ",", "state", ",", "squawk", ")", ",", "force_mavlink1", "=", "force_mavlink1", ")" ]
108.869565
79.478261
def _output_digraph(self, target): """Graphviz format depmap output handler.""" color_by_type = {} def maybe_add_type(dep, dep_id): """Add a class type to a dependency id if --show-types is passed.""" return dep_id if not self.show_types else '\\n'.join((dep_id, dep.__class__.__name__)) def make_node(dep, dep_id, internal): line_fmt = ' "{id}" [style=filled, fillcolor={color}{internal}];' int_shape = ', shape=ellipse' if not internal else '' dep_class = dep.__class__.__name__ if dep_class not in color_by_type: color_by_type[dep_class] = len(color_by_type.keys()) + 1 return line_fmt.format(id=dep_id, internal=int_shape, color=color_by_type[dep_class]) def make_edge(from_dep_id, to_dep_id, internal): style = ' [style=dashed]' if not internal else '' return ' "{}" -> "{}"{};'.format(from_dep_id, to_dep_id, style) def output_deps(dep, parent, parent_id, outputted): dep_id, internal = self._dep_id(dep) if dep_id not in outputted: yield make_node(dep, maybe_add_type(dep, dep_id), internal) outputted.add(dep_id) for sub_dep in self._enumerate_visible_deps(dep, self.output_candidate): for item in output_deps(sub_dep, dep, dep_id, outputted): yield item if parent: edge_id = (parent_id, dep_id) if edge_id not in outputted: yield make_edge(maybe_add_type(parent, parent_id), maybe_add_type(dep, dep_id), internal) outputted.add(edge_id) yield 'digraph "{}" {{'.format(target.id) yield ' node [shape=rectangle, colorscheme=set312;];' yield ' rankdir=LR;' for line in output_deps(target, parent=None, parent_id=None, outputted=set()): yield line yield '}'
[ "def", "_output_digraph", "(", "self", ",", "target", ")", ":", "color_by_type", "=", "{", "}", "def", "maybe_add_type", "(", "dep", ",", "dep_id", ")", ":", "\"\"\"Add a class type to a dependency id if --show-types is passed.\"\"\"", "return", "dep_id", "if", "not", "self", ".", "show_types", "else", "'\\\\n'", ".", "join", "(", "(", "dep_id", ",", "dep", ".", "__class__", ".", "__name__", ")", ")", "def", "make_node", "(", "dep", ",", "dep_id", ",", "internal", ")", ":", "line_fmt", "=", "' \"{id}\" [style=filled, fillcolor={color}{internal}];'", "int_shape", "=", "', shape=ellipse'", "if", "not", "internal", "else", "''", "dep_class", "=", "dep", ".", "__class__", ".", "__name__", "if", "dep_class", "not", "in", "color_by_type", ":", "color_by_type", "[", "dep_class", "]", "=", "len", "(", "color_by_type", ".", "keys", "(", ")", ")", "+", "1", "return", "line_fmt", ".", "format", "(", "id", "=", "dep_id", ",", "internal", "=", "int_shape", ",", "color", "=", "color_by_type", "[", "dep_class", "]", ")", "def", "make_edge", "(", "from_dep_id", ",", "to_dep_id", ",", "internal", ")", ":", "style", "=", "' [style=dashed]'", "if", "not", "internal", "else", "''", "return", "' \"{}\" -> \"{}\"{};'", ".", "format", "(", "from_dep_id", ",", "to_dep_id", ",", "style", ")", "def", "output_deps", "(", "dep", ",", "parent", ",", "parent_id", ",", "outputted", ")", ":", "dep_id", ",", "internal", "=", "self", ".", "_dep_id", "(", "dep", ")", "if", "dep_id", "not", "in", "outputted", ":", "yield", "make_node", "(", "dep", ",", "maybe_add_type", "(", "dep", ",", "dep_id", ")", ",", "internal", ")", "outputted", ".", "add", "(", "dep_id", ")", "for", "sub_dep", "in", "self", ".", "_enumerate_visible_deps", "(", "dep", ",", "self", ".", "output_candidate", ")", ":", "for", "item", "in", "output_deps", "(", "sub_dep", ",", "dep", ",", "dep_id", ",", "outputted", ")", ":", "yield", "item", "if", "parent", ":", "edge_id", "=", "(", "parent_id", ",", "dep_id", ")", "if", "edge_id", "not", "in", "outputted", ":", "yield", "make_edge", "(", "maybe_add_type", "(", "parent", ",", "parent_id", ")", ",", "maybe_add_type", "(", "dep", ",", "dep_id", ")", ",", "internal", ")", "outputted", ".", "add", "(", "edge_id", ")", "yield", "'digraph \"{}\" {{'", ".", "format", "(", "target", ".", "id", ")", "yield", "' node [shape=rectangle, colorscheme=set312;];'", "yield", "' rankdir=LR;'", "for", "line", "in", "output_deps", "(", "target", ",", "parent", "=", "None", ",", "parent_id", "=", "None", ",", "outputted", "=", "set", "(", ")", ")", ":", "yield", "line", "yield", "'}'" ]
38.755556
23.111111
def main(cls): """Main entry point of Laniakea. """ args = cls.parse_args() if args.focus: Focus.init() else: Focus.disable() logging.basicConfig(format='[Laniakea] %(asctime)s %(levelname)s: %(message)s', level=args.verbosity * 10, datefmt='%Y-%m-%d %H:%M:%S') # Laniakea base configuration logger.info('Loading Laniakea configuration from %s', Focus.data(args.settings.name)) try: settings = json.loads(args.settings.read()) except ValueError as msg: logger.error('Unable to parse %s: %s', args.settings.name, msg) return 1 # UserData userdata = '' if args.userdata: logger.info('Reading user data script content from %s', Focus.info(args.userdata.name)) try: userdata = UserData.handle_import_tags(args.userdata.read(), os.path.dirname(args.userdata.name)) except UserDataException as msg: logging.error(msg) return 1 if args.list_userdata_macros: UserData.list_tags(userdata) return 0 if args.userdata_macros: args.userdata_macros = UserData.convert_pair_to_dict(args.userdata_macros or '') userdata = UserData.handle_tags(userdata, args.userdata_macros) if args.print_userdata: logger.info('Combined UserData script:\n%s', userdata) return 0 if args.provider: provider = getattr(globals()[args.provider], args.provider.title() + 'CommandLine') provider().main(args, settings, userdata) return 0
[ "def", "main", "(", "cls", ")", ":", "args", "=", "cls", ".", "parse_args", "(", ")", "if", "args", ".", "focus", ":", "Focus", ".", "init", "(", ")", "else", ":", "Focus", ".", "disable", "(", ")", "logging", ".", "basicConfig", "(", "format", "=", "'[Laniakea] %(asctime)s %(levelname)s: %(message)s'", ",", "level", "=", "args", ".", "verbosity", "*", "10", ",", "datefmt", "=", "'%Y-%m-%d %H:%M:%S'", ")", "# Laniakea base configuration", "logger", ".", "info", "(", "'Loading Laniakea configuration from %s'", ",", "Focus", ".", "data", "(", "args", ".", "settings", ".", "name", ")", ")", "try", ":", "settings", "=", "json", ".", "loads", "(", "args", ".", "settings", ".", "read", "(", ")", ")", "except", "ValueError", "as", "msg", ":", "logger", ".", "error", "(", "'Unable to parse %s: %s'", ",", "args", ".", "settings", ".", "name", ",", "msg", ")", "return", "1", "# UserData", "userdata", "=", "''", "if", "args", ".", "userdata", ":", "logger", ".", "info", "(", "'Reading user data script content from %s'", ",", "Focus", ".", "info", "(", "args", ".", "userdata", ".", "name", ")", ")", "try", ":", "userdata", "=", "UserData", ".", "handle_import_tags", "(", "args", ".", "userdata", ".", "read", "(", ")", ",", "os", ".", "path", ".", "dirname", "(", "args", ".", "userdata", ".", "name", ")", ")", "except", "UserDataException", "as", "msg", ":", "logging", ".", "error", "(", "msg", ")", "return", "1", "if", "args", ".", "list_userdata_macros", ":", "UserData", ".", "list_tags", "(", "userdata", ")", "return", "0", "if", "args", ".", "userdata_macros", ":", "args", ".", "userdata_macros", "=", "UserData", ".", "convert_pair_to_dict", "(", "args", ".", "userdata_macros", "or", "''", ")", "userdata", "=", "UserData", ".", "handle_tags", "(", "userdata", ",", "args", ".", "userdata_macros", ")", "if", "args", ".", "print_userdata", ":", "logger", ".", "info", "(", "'Combined UserData script:\\n%s'", ",", "userdata", ")", "return", "0", "if", "args", ".", "provider", ":", "provider", "=", "getattr", "(", "globals", "(", ")", "[", "args", ".", "provider", "]", ",", "args", ".", "provider", ".", "title", "(", ")", "+", "'CommandLine'", ")", "provider", "(", ")", ".", "main", "(", "args", ",", "settings", ",", "userdata", ")", "return", "0" ]
34.392157
25.078431
def get_style(self, name, workspace=None): ''' returns a single style object. Will return None if no style is found. Will raise an error if more than one style with the same name is found. ''' styles = self.get_styles(names=name, workspaces=workspace) return self._return_first_item(styles)
[ "def", "get_style", "(", "self", ",", "name", ",", "workspace", "=", "None", ")", ":", "styles", "=", "self", ".", "get_styles", "(", "names", "=", "name", ",", "workspaces", "=", "workspace", ")", "return", "self", ".", "_return_first_item", "(", "styles", ")" ]
38.333333
20.111111
def _computeAsymptoticCovarianceMatrix(self, W, N_k, method=None): """Compute estimate of the asymptotic covariance matrix. Parameters ---------- W : np.ndarray, shape=(N, K), dtype='float' The normalized weight matrix for snapshots and states. W[n, k] is the weight of snapshot n in state k. N_k : np.ndarray, shape=(K), dtype='int' N_k[k] is the number of samples from state k. method : string, optional, default=None Method used to compute the asymptotic covariance matrix. Must be either "approximate", "svd", or "svd-ew". If None, defaults to "svd-ew". Returns ------- Theta: np.ndarray, shape=(K, K), dtype='float' Asymptotic covariance matrix Notes ----- The computational costs of the various 'method' arguments varies: 'svd' computes the generalized inverse using the singular value decomposition -- this should be efficient yet accurate (faster) 'svd-ew' is the same as 'svd', but uses the eigenvalue decomposition of W'W to bypass the need to perform an SVD (fastest) 'approximate' only requires multiplication of KxN and NxK matrices, but is an approximate underestimate of the uncertainty. svd and svd-ew are described in appendix D of Shirts, 2007 JCP, while "approximate" in Section 4 of Kong, 2003. J. R. Statist. Soc. B. We currently recommend 'svd-ew'. """ # Set 'svd-ew' as default if uncertainty method specified as None. if method == None: method = 'svd-ew' # Get dimensions of weight matrix. [N, K] = W.shape # Check dimensions if(K != N_k.size): raise ParameterError( 'W must be NxK, where N_k is a K-dimensional array.') if(np.sum(N_k) != N): raise ParameterError('W must be NxK, where N = sum_k N_k.') check_w_normalized(W, N_k) # Compute estimate of asymptotic covariance matrix using specified method. if method == 'approximate': # Use fast approximate expression from Kong et al. -- this underestimates the true covariance, but may be a good approximation in some cases and requires no matrix inversions # Theta = P'P # Construct matrices W = np.matrix(W, dtype=np.float64) # Compute covariance Theta = W.T * W elif method == 'svd': # Use singular value decomposition based approach given in supplementary material to efficiently compute uncertainty # See Appendix D.1, Eq. D4 in [1]. # Construct matrices Ndiag = np.matrix(np.diag(N_k), dtype=np.float64) W = np.matrix(W, dtype=np.float64) I = np.identity(K, dtype=np.float64) # Compute SVD of W [U, S, Vt] = linalg.svd(W, full_matrices=False) # False Avoids O(N^2) memory allocation by only calculting the active subspace of U. Sigma = np.matrix(np.diag(S)) V = np.matrix(Vt).T # Compute covariance Theta = V * Sigma * self._pseudoinverse( I - Sigma * V.T * Ndiag * V * Sigma) * Sigma * V.T elif method == 'svd-ew': # Use singular value decomposition based approach given in supplementary material to efficiently compute uncertainty # The eigenvalue decomposition of W'W is used to forego computing the SVD. # See Appendix D.1, Eqs. D4 and D5 of [1]. # Construct matrices Ndiag = np.matrix(np.diag(N_k), dtype=np.float64) W = np.matrix(W, dtype=np.float64) I = np.identity(K, dtype=np.float64) # Compute singular values and right singular vectors of W without using SVD # Instead, we compute eigenvalues and eigenvectors of W'W. # Note W'W = (U S V')'(U S V') = V S' U' U S V' = V (S'S) V' [S2, V] = linalg.eigh(W.T * W) # Set any slightly negative eigenvalues to zero. S2[np.where(S2 < 0.0)] = 0.0 # Form matrix of singular values Sigma, and V. Sigma = np.matrix(np.diag(np.sqrt(S2))) V = np.matrix(V) # Compute covariance Theta = V * Sigma * self._pseudoinverse( I - Sigma * V.T * Ndiag * V * Sigma) * Sigma * V.T else: # Raise an exception. raise ParameterError('Method ' + method + ' unrecognized.') return Theta
[ "def", "_computeAsymptoticCovarianceMatrix", "(", "self", ",", "W", ",", "N_k", ",", "method", "=", "None", ")", ":", "# Set 'svd-ew' as default if uncertainty method specified as None.", "if", "method", "==", "None", ":", "method", "=", "'svd-ew'", "# Get dimensions of weight matrix.", "[", "N", ",", "K", "]", "=", "W", ".", "shape", "# Check dimensions", "if", "(", "K", "!=", "N_k", ".", "size", ")", ":", "raise", "ParameterError", "(", "'W must be NxK, where N_k is a K-dimensional array.'", ")", "if", "(", "np", ".", "sum", "(", "N_k", ")", "!=", "N", ")", ":", "raise", "ParameterError", "(", "'W must be NxK, where N = sum_k N_k.'", ")", "check_w_normalized", "(", "W", ",", "N_k", ")", "# Compute estimate of asymptotic covariance matrix using specified method.", "if", "method", "==", "'approximate'", ":", "# Use fast approximate expression from Kong et al. -- this underestimates the true covariance, but may be a good approximation in some cases and requires no matrix inversions", "# Theta = P'P", "# Construct matrices", "W", "=", "np", ".", "matrix", "(", "W", ",", "dtype", "=", "np", ".", "float64", ")", "# Compute covariance", "Theta", "=", "W", ".", "T", "*", "W", "elif", "method", "==", "'svd'", ":", "# Use singular value decomposition based approach given in supplementary material to efficiently compute uncertainty", "# See Appendix D.1, Eq. D4 in [1].", "# Construct matrices", "Ndiag", "=", "np", ".", "matrix", "(", "np", ".", "diag", "(", "N_k", ")", ",", "dtype", "=", "np", ".", "float64", ")", "W", "=", "np", ".", "matrix", "(", "W", ",", "dtype", "=", "np", ".", "float64", ")", "I", "=", "np", ".", "identity", "(", "K", ",", "dtype", "=", "np", ".", "float64", ")", "# Compute SVD of W", "[", "U", ",", "S", ",", "Vt", "]", "=", "linalg", ".", "svd", "(", "W", ",", "full_matrices", "=", "False", ")", "# False Avoids O(N^2) memory allocation by only calculting the active subspace of U.", "Sigma", "=", "np", ".", "matrix", "(", "np", ".", "diag", "(", "S", ")", ")", "V", "=", "np", ".", "matrix", "(", "Vt", ")", ".", "T", "# Compute covariance", "Theta", "=", "V", "*", "Sigma", "*", "self", ".", "_pseudoinverse", "(", "I", "-", "Sigma", "*", "V", ".", "T", "*", "Ndiag", "*", "V", "*", "Sigma", ")", "*", "Sigma", "*", "V", ".", "T", "elif", "method", "==", "'svd-ew'", ":", "# Use singular value decomposition based approach given in supplementary material to efficiently compute uncertainty", "# The eigenvalue decomposition of W'W is used to forego computing the SVD.", "# See Appendix D.1, Eqs. D4 and D5 of [1].", "# Construct matrices", "Ndiag", "=", "np", ".", "matrix", "(", "np", ".", "diag", "(", "N_k", ")", ",", "dtype", "=", "np", ".", "float64", ")", "W", "=", "np", ".", "matrix", "(", "W", ",", "dtype", "=", "np", ".", "float64", ")", "I", "=", "np", ".", "identity", "(", "K", ",", "dtype", "=", "np", ".", "float64", ")", "# Compute singular values and right singular vectors of W without using SVD", "# Instead, we compute eigenvalues and eigenvectors of W'W.", "# Note W'W = (U S V')'(U S V') = V S' U' U S V' = V (S'S) V'", "[", "S2", ",", "V", "]", "=", "linalg", ".", "eigh", "(", "W", ".", "T", "*", "W", ")", "# Set any slightly negative eigenvalues to zero.", "S2", "[", "np", ".", "where", "(", "S2", "<", "0.0", ")", "]", "=", "0.0", "# Form matrix of singular values Sigma, and V.", "Sigma", "=", "np", ".", "matrix", "(", "np", ".", "diag", "(", "np", ".", "sqrt", "(", "S2", ")", ")", ")", "V", "=", "np", ".", "matrix", "(", "V", ")", "# Compute covariance", "Theta", "=", "V", "*", "Sigma", "*", "self", ".", "_pseudoinverse", "(", "I", "-", "Sigma", "*", "V", ".", "T", "*", "Ndiag", "*", "V", "*", "Sigma", ")", "*", "Sigma", "*", "V", ".", "T", "else", ":", "# Raise an exception.", "raise", "ParameterError", "(", "'Method '", "+", "method", "+", "' unrecognized.'", ")", "return", "Theta" ]
42.130841
26.719626
def _encrypt_password(self, password): """encrypt the password for given mode """ if self.encryption_mode.lower() == 'crypt': return self._crypt_password(password) elif self.encryption_mode.lower() == 'md5': return self._md5_password(password) elif self.encryption_mode.lower() == 'md5-base': return self._md5_base_password(password) else: raise UnknownEncryptionMode(self.encryption_mode)
[ "def", "_encrypt_password", "(", "self", ",", "password", ")", ":", "if", "self", ".", "encryption_mode", ".", "lower", "(", ")", "==", "'crypt'", ":", "return", "self", ".", "_crypt_password", "(", "password", ")", "elif", "self", ".", "encryption_mode", ".", "lower", "(", ")", "==", "'md5'", ":", "return", "self", ".", "_md5_password", "(", "password", ")", "elif", "self", ".", "encryption_mode", ".", "lower", "(", ")", "==", "'md5-base'", ":", "return", "self", ".", "_md5_base_password", "(", "password", ")", "else", ":", "raise", "UnknownEncryptionMode", "(", "self", ".", "encryption_mode", ")" ]
46.8
11.6
def _manage_cmd(cmd, settings=None): # type: () -> None """ Run django ./manage.py command manually. This function eliminates the need for having ``manage.py`` (reduces file clutter). """ import sys from os import environ from peltak.core import conf from peltak.core import context from peltak.core import log sys.path.insert(0, conf.get('src_dir')) settings = settings or conf.get('django.settings', None) environ.setdefault("DJANGO_SETTINGS_MODULE", settings) args = sys.argv[0:-1] + cmd if context.get('pretend', False): log.info("Would run the following manage command:\n<90>{}", args) else: from django.core.management import execute_from_command_line execute_from_command_line(args)
[ "def", "_manage_cmd", "(", "cmd", ",", "settings", "=", "None", ")", ":", "# type: () -> None", "import", "sys", "from", "os", "import", "environ", "from", "peltak", ".", "core", "import", "conf", "from", "peltak", ".", "core", "import", "context", "from", "peltak", ".", "core", "import", "log", "sys", ".", "path", ".", "insert", "(", "0", ",", "conf", ".", "get", "(", "'src_dir'", ")", ")", "settings", "=", "settings", "or", "conf", ".", "get", "(", "'django.settings'", ",", "None", ")", "environ", ".", "setdefault", "(", "\"DJANGO_SETTINGS_MODULE\"", ",", "settings", ")", "args", "=", "sys", ".", "argv", "[", "0", ":", "-", "1", "]", "+", "cmd", "if", "context", ".", "get", "(", "'pretend'", ",", "False", ")", ":", "log", ".", "info", "(", "\"Would run the following manage command:\\n<90>{}\"", ",", "args", ")", "else", ":", "from", "django", ".", "core", ".", "management", "import", "execute_from_command_line", "execute_from_command_line", "(", "args", ")" ]
30.32
19.72
def _parseline(self, line): """ All lines come to this method. :param line: a to parse :returns: the number of rows to jump and parse the next data line or return the code error -1 """ sline = line.split(SEPARATOR) segment = sline[0] handlers = { SEGMENT_HEADER: self._handle_header, SEGMENT_EOF: self._handle_eof, SEGMENT_RESULT: self._handle_result_line, SEGMENT_OBSERVATION_ORDER: self._handle_new_record } handler = handlers.get(segment) if handler: return handler(sline) return 0
[ "def", "_parseline", "(", "self", ",", "line", ")", ":", "sline", "=", "line", ".", "split", "(", "SEPARATOR", ")", "segment", "=", "sline", "[", "0", "]", "handlers", "=", "{", "SEGMENT_HEADER", ":", "self", ".", "_handle_header", ",", "SEGMENT_EOF", ":", "self", ".", "_handle_eof", ",", "SEGMENT_RESULT", ":", "self", ".", "_handle_result_line", ",", "SEGMENT_OBSERVATION_ORDER", ":", "self", ".", "_handle_new_record", "}", "handler", "=", "handlers", ".", "get", "(", "segment", ")", "if", "handler", ":", "return", "handler", "(", "sline", ")", "return", "0" ]
33.157895
12.315789
def cyk(grammar, parse_sequence): # type: (Grammar, Iterable[Any]) -> Nonterminal """ Perform CYK algorithm. :param grammar: Grammar to use in Chomsky Normal Form. :param parse_sequence: Input sequence to parse. :return: Instance of root Nonterminal in parsed tree. """ # check start symbol if grammar.start is None: raise StartSymbolNotSetException() # create variables parse_sequence = list(parse_sequence) input_length = len(parse_sequence) index = input_length - 1 f = Field(input_length) # creating mapping for speedup rules searching (termmap, rulemap) = _create_mapping(grammar) # fill first line with rules directly rewritable to terminal f.fill(termmap, parse_sequence) # fill rest of fields for y in range(1, input_length): for x in range(input_length - y): positions = f.positions(x, y) pairs_of_rules = [(f.rules(pos[0].x, pos[0].y), f.rules(pos[1].x, pos[1].y)) for pos in positions] rules = set() for pair_of_rule in pairs_of_rules: for (first_rule, second_rule) in _all_combinations(pair_of_rule): h = hash((first_rule.fromSymbol, second_rule.fromSymbol)) if h in rulemap: for r in rulemap[h]: # list of rules rules.add(PlaceItem(r, first_rule, second_rule)) f.put(x, y, list(rules)) # Check if is start symol on the bottom of field if grammar.start not in [r.fromSymbol for r in f.rules(0, input_length - 1)]: raise NotParsedException() # Find init symbol and rule start = grammar.start() # type: Nonterminal start_rule = [r for r in f.rules(0, input_length - 1) if grammar.start == r.fromSymbol][0] # Prepare buffer for proccess to_process = list() to_process.append({'n': start, 'r': start_rule}) # Prepare tree while len(to_process) > 0: working = to_process.pop() rule_class = working['r'] working_nonterm = working['n'] # type: Nonterminal # its middle rule - not rewritable to nonterminal if isinstance(rule_class, PlaceItem): created_rule = rule_class.rule() # type: Rule working_nonterm._set_to_rule(created_rule) created_rule._from_symbols.append(working_nonterm) for side in rule_class.to_rule: symbol = side.fromSymbol() # type: Nonterminal symbol._set_from_rule(created_rule) created_rule._to_symbols.append(symbol) to_process.append({'n': symbol, 'r': side}) # it is rule rewritable to nonterminal else: created_rule = rule_class() # type: Rule working_nonterm._set_to_rule(created_rule) created_rule._from_symbols.append(working_nonterm) t = Terminal(parse_sequence[index]) index -= 1 created_rule._to_symbols.append(t) t._set_from_rule(created_rule) return start
[ "def", "cyk", "(", "grammar", ",", "parse_sequence", ")", ":", "# type: (Grammar, Iterable[Any]) -> Nonterminal", "# check start symbol", "if", "grammar", ".", "start", "is", "None", ":", "raise", "StartSymbolNotSetException", "(", ")", "# create variables", "parse_sequence", "=", "list", "(", "parse_sequence", ")", "input_length", "=", "len", "(", "parse_sequence", ")", "index", "=", "input_length", "-", "1", "f", "=", "Field", "(", "input_length", ")", "# creating mapping for speedup rules searching", "(", "termmap", ",", "rulemap", ")", "=", "_create_mapping", "(", "grammar", ")", "# fill first line with rules directly rewritable to terminal", "f", ".", "fill", "(", "termmap", ",", "parse_sequence", ")", "# fill rest of fields", "for", "y", "in", "range", "(", "1", ",", "input_length", ")", ":", "for", "x", "in", "range", "(", "input_length", "-", "y", ")", ":", "positions", "=", "f", ".", "positions", "(", "x", ",", "y", ")", "pairs_of_rules", "=", "[", "(", "f", ".", "rules", "(", "pos", "[", "0", "]", ".", "x", ",", "pos", "[", "0", "]", ".", "y", ")", ",", "f", ".", "rules", "(", "pos", "[", "1", "]", ".", "x", ",", "pos", "[", "1", "]", ".", "y", ")", ")", "for", "pos", "in", "positions", "]", "rules", "=", "set", "(", ")", "for", "pair_of_rule", "in", "pairs_of_rules", ":", "for", "(", "first_rule", ",", "second_rule", ")", "in", "_all_combinations", "(", "pair_of_rule", ")", ":", "h", "=", "hash", "(", "(", "first_rule", ".", "fromSymbol", ",", "second_rule", ".", "fromSymbol", ")", ")", "if", "h", "in", "rulemap", ":", "for", "r", "in", "rulemap", "[", "h", "]", ":", "# list of rules", "rules", ".", "add", "(", "PlaceItem", "(", "r", ",", "first_rule", ",", "second_rule", ")", ")", "f", ".", "put", "(", "x", ",", "y", ",", "list", "(", "rules", ")", ")", "# Check if is start symol on the bottom of field", "if", "grammar", ".", "start", "not", "in", "[", "r", ".", "fromSymbol", "for", "r", "in", "f", ".", "rules", "(", "0", ",", "input_length", "-", "1", ")", "]", ":", "raise", "NotParsedException", "(", ")", "# Find init symbol and rule", "start", "=", "grammar", ".", "start", "(", ")", "# type: Nonterminal", "start_rule", "=", "[", "r", "for", "r", "in", "f", ".", "rules", "(", "0", ",", "input_length", "-", "1", ")", "if", "grammar", ".", "start", "==", "r", ".", "fromSymbol", "]", "[", "0", "]", "# Prepare buffer for proccess", "to_process", "=", "list", "(", ")", "to_process", ".", "append", "(", "{", "'n'", ":", "start", ",", "'r'", ":", "start_rule", "}", ")", "# Prepare tree", "while", "len", "(", "to_process", ")", ">", "0", ":", "working", "=", "to_process", ".", "pop", "(", ")", "rule_class", "=", "working", "[", "'r'", "]", "working_nonterm", "=", "working", "[", "'n'", "]", "# type: Nonterminal", "# its middle rule - not rewritable to nonterminal", "if", "isinstance", "(", "rule_class", ",", "PlaceItem", ")", ":", "created_rule", "=", "rule_class", ".", "rule", "(", ")", "# type: Rule", "working_nonterm", ".", "_set_to_rule", "(", "created_rule", ")", "created_rule", ".", "_from_symbols", ".", "append", "(", "working_nonterm", ")", "for", "side", "in", "rule_class", ".", "to_rule", ":", "symbol", "=", "side", ".", "fromSymbol", "(", ")", "# type: Nonterminal", "symbol", ".", "_set_from_rule", "(", "created_rule", ")", "created_rule", ".", "_to_symbols", ".", "append", "(", "symbol", ")", "to_process", ".", "append", "(", "{", "'n'", ":", "symbol", ",", "'r'", ":", "side", "}", ")", "# it is rule rewritable to nonterminal", "else", ":", "created_rule", "=", "rule_class", "(", ")", "# type: Rule", "working_nonterm", ".", "_set_to_rule", "(", "created_rule", ")", "created_rule", ".", "_from_symbols", ".", "append", "(", "working_nonterm", ")", "t", "=", "Terminal", "(", "parse_sequence", "[", "index", "]", ")", "index", "-=", "1", "created_rule", ".", "_to_symbols", ".", "append", "(", "t", ")", "t", ".", "_set_from_rule", "(", "created_rule", ")", "return", "start" ]
44.072464
13.521739
def p_file_depends(self, f_term, predicate): """Sets file dependencies.""" for _, _, other_file in self.graph.triples((f_term, predicate, None)): name = self.get_file_name(other_file) if name is not None: self.builder.add_file_dep(six.text_type(name)) else: self.error = True msg = 'File depends on file with no name' self.logger.log(msg)
[ "def", "p_file_depends", "(", "self", ",", "f_term", ",", "predicate", ")", ":", "for", "_", ",", "_", ",", "other_file", "in", "self", ".", "graph", ".", "triples", "(", "(", "f_term", ",", "predicate", ",", "None", ")", ")", ":", "name", "=", "self", ".", "get_file_name", "(", "other_file", ")", "if", "name", "is", "not", "None", ":", "self", ".", "builder", ".", "add_file_dep", "(", "six", ".", "text_type", "(", "name", ")", ")", "else", ":", "self", ".", "error", "=", "True", "msg", "=", "'File depends on file with no name'", "self", ".", "logger", ".", "log", "(", "msg", ")" ]
44.5
13.2