code
stringlengths
52
7.75k
docs
stringlengths
1
5.85k
def count(self): if self._batch: raise CQLEngineException("Only inserts, updates, and deletes are available in batch mode") if self._result_cache is None: query = self._select_query() query.count = True result = self._execute(query) return result[0]['count'] else: return len(self._result_cache)
Returns the number of rows matched by this query
def limit(self, v): if not (v is None or isinstance(v, six.integer_types)): raise TypeError if v == self._limit: return self if v < 0: raise QueryException("Negative limit is not allowed") clone = copy.deepcopy(self) clone._limit = v return clone
Sets the limit on the number of results returned CQL has a default limit of 10,000
def delete(self): #validate where clause partition_key = [x for x in self.model._primary_keys.values()][0] if not any([c.field == partition_key.column_name for c in self._where]): raise QueryException("The partition key must be defined on delete queries") dq = DeleteStatement( self.column_family_name, where=self._where, timestamp=self._timestamp ) self._execute(dq)
Deletes the contents of a query
def _validate_select_where(self): #check that there's either a = or IN relationship with a primary key or indexed field equal_ops = [self.model._columns.get(w.field) for w in self._where if isinstance(w.operator, EqualsOperator)] token_comparison = any([w for w in self._where if isinstance(w.value, Token)]) if not any([w.primary_key or w.index for w in equal_ops]) and not token_comparison and not self._allow_filtering: raise QueryException('Where clauses require either a "=" or "IN" comparison with either a primary key or indexed field') if not self._allow_filtering: #if the query is not on an indexed field if not any([w.index for w in equal_ops]): if not any([w.partition_key for w in equal_ops]) and not token_comparison: raise QueryException('Filtering on a clustering key without a partition key is not allowed unless allow_filtering() is called on the querset')
Checks that a filterset will not create invalid select statement
def _get_result_constructor(self): if not self._values_list: # we want models return lambda rows: self.model._construct_instance(rows) elif self._flat_values_list: # the user has requested flattened list (1 value per row) return lambda row: row.popitem()[1] else: return lambda row: self._get_row_value_list(self._only_fields, row)
Returns a function that will be used to instantiate query results
def update(self, **values): if not values: return nulled_columns = set() us = UpdateStatement(self.column_family_name, where=self._where, ttl=self._ttl, timestamp=self._timestamp, transactions=self._transaction) for name, val in values.items(): col_name, col_op = self._parse_filter_arg(name) col = self.model._columns.get(col_name) # check for nonexistant columns if col is None: raise ValidationError("{}.{} has no column named: {}".format(self.__module__, self.model.__name__, col_name)) # check for primary key update attempts if col.is_primary_key: raise ValidationError("Cannot apply update to primary key '{}' for {}.{}".format(col_name, self.__module__, self.model.__name__)) # we should not provide default values in this use case. val = col.validate(val) if val is None: nulled_columns.add(col_name) continue # add the update statements if isinstance(col, Counter): # TODO: implement counter updates raise NotImplementedError elif isinstance(col, (List, Set, Map)): if isinstance(col, List): klass = ListUpdateClause elif isinstance(col, Set): klass = SetUpdateClause elif isinstance(col, Map): klass = MapUpdateClause else: raise RuntimeError us.add_assignment_clause(klass(col_name, col.to_database(val), operation=col_op)) else: us.add_assignment_clause(AssignmentClause( col_name, col.to_database(val))) if us.assignments: self._execute(us) if nulled_columns: ds = DeleteStatement(self.column_family_name, fields=nulled_columns, where=self._where) self._execute(ds)
Updates the rows in this queryset
def _delete_null_columns(self): ds = DeleteStatement(self.column_family_name) deleted_fields = False for _, v in self.instance._values.items(): col = v.column if v.deleted: ds.add_field(col.db_field_name) deleted_fields = True elif isinstance(col, Map): uc = MapDeleteClause(col.db_field_name, v.value, v.previous_value) if uc.get_context_size() > 0: ds.add_field(uc) deleted_fields = True if deleted_fields: for name, col in self.model._primary_keys.items(): ds.add_where_clause(WhereClause( col.db_field_name, EqualsOperator(), col.to_database(getattr(self.instance, name)) )) self._execute(ds)
executes a delete query to remove columns that have changed to null
def delete(self): if self.instance is None: raise CQLEngineException("DML Query instance attribute is None") ds = DeleteStatement(self.column_family_name, timestamp=self._timestamp) for name, col in self.model._primary_keys.items(): if (not col.partition_key) and (getattr(self.instance, name) is None): continue ds.add_where_clause(WhereClause( col.db_field_name, EqualsOperator(), col.to_database(getattr(self.instance, name)) )) self._execute(ds)
Deletes one instance
def handle(client, request): formaters = request.get('formaters', None) if not formaters: formaters = [{'name': 'autopep8'}] logging.debug('formaters: ' + json.dumps(formaters, indent=4)) data = request.get('data', None) if not isinstance(data, str): return send(client, 'invalid data', None) max_line_length = None for formater in formaters: max_line_length = formater.get('config', {}).get('max_line_length') if max_line_length: break for formater in formaters: name = formater.get('name', None) config = formater.get('config', {}) if name not in FORMATERS: return send(client, 'formater {} not support'.format(name), None) formater = FORMATERS[name] if formater is None: return send(client, 'formater {} not installed'.format(name), None) if name == 'isort' and max_line_length: config.setdefault('line_length', max_line_length) data = formater(data, **config) return send(client, None, data)
Handle format request request struct: { 'data': 'data_need_format', 'formaters': [ { 'name': 'formater_name', 'config': {} # None or dict }, ... # formaters ] } if no formaters, use autopep8 formater and it's default config
def column_family_name(self, include_keyspace=True): if include_keyspace: return '{}.{}'.format(self.keyspace, self.name) else: return self.name
Returns the column family name if it's been defined otherwise, it creates it from the module and class name
def easeInOutQuad(n): _checkRange(n) if n < 0.5: return 2 * n**2 else: n = n * 2 - 1 return -0.5 * (n*(n-2) - 1)
A quadratic tween function that accelerates, reaches the midpoint, and then decelerates. Args: n (float): The time progress, starting at 0.0 and ending at 1.0. Returns: (float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().
def easeInOutCubic(n): _checkRange(n) n = 2 * n if n < 1: return 0.5 * n**3 else: n = n - 2 return 0.5 * (n**3 + 2)
A cubic tween function that accelerates, reaches the midpoint, and then decelerates. Args: n (float): The time progress, starting at 0.0 and ending at 1.0. Returns: (float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().
def easeInOutQuart(n): _checkRange(n) n = 2 * n if n < 1: return 0.5 * n**4 else: n = n - 2 return -0.5 * (n**4 - 2)
A quartic tween function that accelerates, reaches the midpoint, and then decelerates. Args: n (float): The time progress, starting at 0.0 and ending at 1.0. Returns: (float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().
def easeInOutQuint(n): _checkRange(n) n = 2 * n if n < 1: return 0.5 * n**5 else: n = n - 2 return 0.5 * (n**5 + 2)
A quintic tween function that accelerates, reaches the midpoint, and then decelerates. Args: n (float): The time progress, starting at 0.0 and ending at 1.0. Returns: (float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().
def easeInOutExpo(n): _checkRange(n) if n == 0: return 0 elif n == 1: return 1 else: n = n * 2 if n < 1: return 0.5 * 2**(10 * (n - 1)) else: n -= 1 # 0.5 * (-() + 2) return 0.5 * (-1 * (2 ** (-10 * n)) + 2)
An exponential tween function that accelerates, reaches the midpoint, and then decelerates. Args: n (float): The time progress, starting at 0.0 and ending at 1.0. Returns: (float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().
def easeInOutCirc(n): _checkRange(n) n = n * 2 if n < 1: return -0.5 * (math.sqrt(1 - n**2) - 1) else: n = n - 2 return 0.5 * (math.sqrt(1 - n**2) + 1)
A circular tween function that accelerates, reaches the midpoint, and then decelerates. Args: n (float): The time progress, starting at 0.0 and ending at 1.0. Returns: (float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().
def easeInElastic(n, amplitude=1, period=0.3): _checkRange(n) return 1 - easeOutElastic(1-n, amplitude=amplitude, period=period)
An elastic tween function that begins with an increasing wobble and then snaps into the destination. Args: n (float): The time progress, starting at 0.0 and ending at 1.0. Returns: (float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().
def easeOutElastic(n, amplitude=1, period=0.3): _checkRange(n) if amplitude < 1: amplitude = 1 s = period / 4 else: s = period / (2 * math.pi) * math.asin(1 / amplitude) return amplitude * 2**(-10*n) * math.sin((n-s)*(2*math.pi / period)) + 1
An elastic tween function that overshoots the destination and then "rubber bands" into the destination. Args: n (float): The time progress, starting at 0.0 and ending at 1.0. Returns: (float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().
def easeInOutElastic(n, amplitude=1, period=0.5): _checkRange(n) n *= 2 if n < 1: return easeInElastic(n, amplitude=amplitude, period=period) / 2 else: return easeOutElastic(n-1, amplitude=amplitude, period=period) / 2 + 0.5
An elastic tween function wobbles towards the midpoint. Args: n (float): The time progress, starting at 0.0 and ending at 1.0. Returns: (float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().
def easeInBack(n, s=1.70158): _checkRange(n) return n * n * ((s + 1) * n - s)
A tween function that backs up first at the start and then goes to the destination. Args: n (float): The time progress, starting at 0.0 and ending at 1.0. Returns: (float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().
def easeOutBack(n, s=1.70158): _checkRange(n) n = n - 1 return n * n * ((s + 1) * n + s) + 1
A tween function that overshoots the destination a little and then backs into the destination. Args: n (float): The time progress, starting at 0.0 and ending at 1.0. Returns: (float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().
def easeInOutBack(n, s=1.70158): _checkRange(n) n = n * 2 if n < 1: s *= 1.525 return 0.5 * (n * n * ((s + 1) * n - s)) else: n -= 2 s *= 1.525 return 0.5 * (n * n * ((s + 1) * n + s) + 2)
A "back-in" tween function that overshoots both the start and destination. Args: n (float): The time progress, starting at 0.0 and ending at 1.0. Returns: (float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().
def easeOutBounce(n): _checkRange(n) if n < (1/2.75): return 7.5625 * n * n elif n < (2/2.75): n -= (1.5/2.75) return 7.5625 * n * n + 0.75 elif n < (2.5/2.75): n -= (2.25/2.75) return 7.5625 * n * n + 0.9375 else: n -= (2.65/2.75) return 7.5625 * n * n + 0.984375
A bouncing tween function that hits the destination and then bounces to rest. Args: n (float): The time progress, starting at 0.0 and ending at 1.0. Returns: (float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().
def formfield_for_manytomany(self, db_field, request, **kwargs): ''' Not all Admin subclasses use get_field_queryset here, so we will use it explicitly ''' db = kwargs.get('using') kwargs['queryset'] = kwargs.get('queryset', self.get_field_queryset(db, db_field, request)) return super(AccessControlMixin, self).formfield_for_manytomany(db_field, request, **kwargsf formfield_for_manytomany(self, db_field, request, **kwargs): ''' Not all Admin subclasses use get_field_queryset here, so we will use it explicitly ''' db = kwargs.get('using') kwargs['queryset'] = kwargs.get('queryset', self.get_field_queryset(db, db_field, request)) return super(AccessControlMixin, self).formfield_for_manytomany(db_field, request, **kwargs)
Not all Admin subclasses use get_field_queryset here, so we will use it explicitly
def delete_view(self, request, object_id, extra_context=None): "The 'delete' admin view for this model." queryset = self.model._default_manager.filter(pk=object_id) response = self.delete_selected(request, queryset) if response: return response url = reverse('admin:%s_%s_changelist' % (self.model._meta.app_label, self.model._meta.model_name)) return HttpResponseRedirect(urlf delete_view(self, request, object_id, extra_context=None): "The 'delete' admin view for this model." queryset = self.model._default_manager.filter(pk=object_id) response = self.delete_selected(request, queryset) if response: return response url = reverse('admin:%s_%s_changelist' % (self.model._meta.app_label, self.model._meta.model_name)) return HttpResponseRedirect(url)
The 'delete' admin view for this model.
def register_plugins(cls, plugins): ''' Reguster plugins. The plugins parameter should be dict mapping model to plugin. Just calls a register_plugin for every such a pair. ''' for model in plugins: cls.register_plugin(model, plugins[model]f register_plugins(cls, plugins): ''' Reguster plugins. The plugins parameter should be dict mapping model to plugin. Just calls a register_plugin for every such a pair. ''' for model in plugins: cls.register_plugin(model, plugins[model])
Reguster plugins. The plugins parameter should be dict mapping model to plugin. Just calls a register_plugin for every such a pair.
def register_plugin(cls, model, plugin): ''' Reguster a plugin for the model. The only one plugin can be registered. If you want to combine plugins, use CompoundPlugin. ''' logger.info("Plugin registered for %s: %s", model, plugin) cls.plugins[model] = plugif register_plugin(cls, model, plugin): ''' Reguster a plugin for the model. The only one plugin can be registered. If you want to combine plugins, use CompoundPlugin. ''' logger.info("Plugin registered for %s: %s", model, plugin) cls.plugins[model] = plugin
Reguster a plugin for the model. The only one plugin can be registered. If you want to combine plugins, use CompoundPlugin.
def get_default_plugin(cls): ''' Return a default plugin. ''' from importlib import import_module from django.conf import settings default_plugin = getattr(settings, 'ACCESS_DEFAULT_PLUGIN', "access.plugins.DjangoAccessPlugin") if default_plugin not in cls.default_plugins: logger.info("Creating a default plugin: %s", default_plugin) path = default_plugin.split('.') plugin_path = '.'.join(path[:-1]) plugin_name = path[-1] DefaultPlugin = getattr(import_module(plugin_path), plugin_name) cls.default_plugins[default_plugin] = DefaultPlugin() return cls.default_plugins[default_pluginf get_default_plugin(cls): ''' Return a default plugin. ''' from importlib import import_module from django.conf import settings default_plugin = getattr(settings, 'ACCESS_DEFAULT_PLUGIN', "access.plugins.DjangoAccessPlugin") if default_plugin not in cls.default_plugins: logger.info("Creating a default plugin: %s", default_plugin) path = default_plugin.split('.') plugin_path = '.'.join(path[:-1]) plugin_name = path[-1] DefaultPlugin = getattr(import_module(plugin_path), plugin_name) cls.default_plugins[default_plugin] = DefaultPlugin() return cls.default_plugins[default_plugin]
Return a default plugin.
def plugin_for(cls, model): ''' Find and return a plugin for this model. Uses inheritance to find a model where the plugin is registered. ''' logger.debug("Getting a plugin for: %s", model) if not issubclass(model, Model): return if model in cls.plugins: return cls.plugins[model] for b in model.__bases__: p = cls.plugin_for(b) if p: return f plugin_for(cls, model): ''' Find and return a plugin for this model. Uses inheritance to find a model where the plugin is registered. ''' logger.debug("Getting a plugin for: %s", model) if not issubclass(model, Model): return if model in cls.plugins: return cls.plugins[model] for b in model.__bases__: p = cls.plugin_for(b) if p: return p
Find and return a plugin for this model. Uses inheritance to find a model where the plugin is registered.
def visible(self, request): ''' Checks the both, check_visible and apply_visible, against the owned model and it's instance set ''' return self.apply_visible(self.get_queryset(), request) if self.check_visible(self.model, request) is not False else self.get_queryset().none(f visible(self, request): ''' Checks the both, check_visible and apply_visible, against the owned model and it's instance set ''' return self.apply_visible(self.get_queryset(), request) if self.check_visible(self.model, request) is not False else self.get_queryset().none()
Checks the both, check_visible and apply_visible, against the owned model and it's instance set
def changeable(self, request): ''' Checks the both, check_changeable and apply_changeable, against the owned model and it's instance set ''' return self.apply_changeable(self.get_queryset(), request) if self.check_changeable(self.model, request) is not False else self.get_queryset().none(f changeable(self, request): ''' Checks the both, check_changeable and apply_changeable, against the owned model and it's instance set ''' return self.apply_changeable(self.get_queryset(), request) if self.check_changeable(self.model, request) is not False else self.get_queryset().none()
Checks the both, check_changeable and apply_changeable, against the owned model and it's instance set
def deleteable(self, request): ''' Checks the both, check_deleteable and apply_deleteable, against the owned model and it's instance set ''' return self.apply_deleteable(self.get_queryset(), request) if self.check_deleteable(self.model, request) is not False else self.get_queryset().none(f deleteable(self, request): ''' Checks the both, check_deleteable and apply_deleteable, against the owned model and it's instance set ''' return self.apply_deleteable(self.get_queryset(), request) if self.check_deleteable(self.model, request) is not False else self.get_queryset().none()
Checks the both, check_deleteable and apply_deleteable, against the owned model and it's instance set
def get_plugin_from_string(plugin_name): modulename, classname = plugin_name.rsplit('.', 1) module = import_module(modulename) return getattr(module, classname)
Returns plugin or plugin point class from given ``plugin_name`` string. Example of ``plugin_name``:: 'my_app.MyPlugin'
def _parse_int(value, default=None): if value is None: return default try: return int(value) except ValueError: print "Couldn't cast value to `int`." return default
Attempt to cast *value* into an integer, returning *default* if it fails.
def _parse_float(value, default=None): if value is None: return default try: return float(value) except ValueError: print "Couldn't cast value to `float`." return default
Attempt to cast *value* into a float, returning *default* if it fails.
def _parse_type(value, type_func): default = type_func(0) if value is None: return default try: return type_func(value) except ValueError: return default
Attempt to cast *value* into *type_func*, returning *default* if it fails.
def make_pdf(dist, params, size=10000): # Separate parts of parameters arg = params[:-2] loc = params[-2] scale = params[-1] # Get sane start and end points of distribution start = dist.ppf(0.01, *arg, loc=loc, scale=scale) if arg else dist.ppf(0.01, loc=loc, scale=scale) end = dist.ppf(0.99, *arg, loc=loc, scale=scale) if arg else dist.ppf(0.99, loc=loc, scale=scale) # Build PDF and turn into pandas Series x = np.linspace(start, end, size) y = dist.pdf(x, loc=loc, scale=scale, *arg) pdf = pd.Series(y, x) return pdf
Generate distributions's Propbability Distribution Function
def urlencode(query, params): return query + '?' + "&".join(key+'='+quote_plus(str(value)) for key, value in params)
Correctly convert the given query and parameters into a full query+query string, ensuring the order of the params.
def _parse_boolean(value, default=False): if value is None: return default try: return bool(value) except ValueError: return default
Attempt to cast *value* into a bool, returning *default* if it fails.
def _get(url): if PYTHON_3: req = request.Request(url, headers=HEADER) response = request.urlopen(req) return response.read().decode('utf-8') else: req = urllib2.Request(url, headers=HEADER) response = urllib2.urlopen(req) return response.read()
Convert a URL into it's response (a *str*).
def _recursively_convert_unicode_to_str(input): if isinstance(input, dict): return {_recursively_convert_unicode_to_str(key): _recursively_convert_unicode_to_str(value) for key, value in input.items()} elif isinstance(input, list): return [_recursively_convert_unicode_to_str(element) for element in input] elif not PYTHON_3 and isinstance(input, unicode): return input.encode('utf-8') else: return input
Force the given input to only use `str` instead of `bytes` or `unicode`. This works even if the input is a dict, list,
def _load_from_string(data): '''Loads the cache from the string''' global _CACHE if PYTHON_3: data = json.loads(data.decode("utf-8")) else: data = json.loads(data) _CACHE = _recursively_convert_unicode_to_str(data)['data'f _load_from_string(data): '''Loads the cache from the string''' global _CACHE if PYTHON_3: data = json.loads(data.decode("utf-8")) else: data = json.loads(data) _CACHE = _recursively_convert_unicode_to_str(data)['data']
Loads the cache from the string
def disconnect(filename=None): global _CONNECTED if filename is not None: try: with open(filename, 'r') as f: _load_from_string(f.read()) except FileNotFoundError: raise USGSException("""The cache file '{0}' was not found, and I cannot disconnect without one. If you have not been given a cache.json file, then you can create a new one: >>> from earthquakes import earthquakes >>> earthquakes.connect() >>> earthquakes._start_editing() ... >>> earthquakes.get_report() ... >>> earthquakes._save_cache('{0}')""".format(filename)) for key in _CACHE.keys(): _CACHE_COUNTER[key] = 0 _CONNECTED = False
Connect to the local cache, so no internet connection is required. :returns: void
def _lookup(key): if key not in _CACHE: return None if _CACHE_COUNTER[key] >= len(_CACHE[key][1:]): if _CACHE[key][0] == "empty": return "" elif _CACHE[key][0] == "repeat" and _CACHE[key][1:]: return _CACHE[key][-1] elif _CACHE[key][0] == "repeat": return "" else: _CACHE_COUNTER[key] = 1 else: _CACHE_COUNTER[key] += 1 if _CACHE[key]: return _CACHE[key][_CACHE_COUNTER[key]] else: return ""
Internal method that looks up a key in the local cache. :param key: Get the value based on the key from the cache. :type key: string :returns: void
def _add_to_cache(key, value): if key in _CACHE: _CACHE[key].append(value) else: _CACHE[key] = [_PATTERN, value] _CACHE_COUNTER[key] = 0
Internal method to add a new key-value to the local cache. :param str key: The new url to add to the cache :param str value: The HTTP response for this key. :returns: void
def _get_report_string(time='hour', threshold='significant', online=False): key = _get_report_request(time, threshold) result = _get(key) if _CONNECTED else _lookup(key) if (_CONNECTED or online) and _EDITABLE: _add_to_cache(key, result) return result
Like :func:`get_report` except returns the raw data instead. :param str time: A string indicating the time range of earthquakes to report. Must be either "hour" (only earthquakes in the past hour), "day" (only earthquakes that happened today), "week" (only earthquakes that happened in the past 7 days), or "month" (only earthquakes that happened in the past 30 days). :param str threshold: A string indicating what kind of earthquakes to report. Must be either "significant" (only significant earthquakes), "all" (all earthquakes, regardless of significance), "4.5", "2.5", or "1.0". Note that for the last three, all earthquakes at and above that level will be reported. :returns: str
def _to_dict(self): ''' Returns a dictionary representation of this object ''' return dict(latitude=self.latitude, longitude=self.longitude, depth=self.depthf _to_dict(self): ''' Returns a dictionary representation of this object ''' return dict(latitude=self.latitude, longitude=self.longitude, depth=self.depth)
Returns a dictionary representation of this object
def _from_json(json_data): if len(json_data) >= 3: return Coordinate(_parse_float(json_data[0]), _parse_float(json_data[1]), _parse_float(json_data[2])) else: raise USGSException("The given coordinate information was incomplete.")
Creates a Coordinate from json data. :param json_data: The raw json data to parse :type json_data: dict :returns: Coordinate
def _to_dict(self): ''' Returns a dictionary representation of this object ''' return dict(minimum=self.minimum._to_dict(), maximum=self.maximum._to_dict()f _to_dict(self): ''' Returns a dictionary representation of this object ''' return dict(minimum=self.minimum._to_dict(), maximum=self.maximum._to_dict())
Returns a dictionary representation of this object
def _from_json(json_data): if len(json_data) >= 6: return BoundingBox( Coordinate(_parse_float(json_data[0]), _parse_float(json_data[1]), _parse_float(json_data[2])), Coordinate(_parse_float(json_data[3]), _parse_float(json_data[4]), _parse_float(json_data[5]))) else: raise USGSException("The bounding box information was incomplete.")
Creates a BoundingBox from json data. :param json_data: The raw json data to parse :type json_data: dict :returns: BoundingBox
def _from_json(json_data): try: coordinates = json_data['geometry']['coordinates'] except KeyError: raise USGSException("The geometry information was not returned from the USGS website.") try: properties = json_data['properties'] except KeyError: raise USGSException("One of the earthquakes did not have any property information") return Earthquake(Coordinate._from_json(coordinates), _parse_float(properties.get('mag', '0'), 0.0), properties.get('place', ''), _parse_int(properties.get('time', '0'), 0), properties.get('url', ''), _parse_int(properties.get('felt', '0'), 0), _parse_float(properties.get('cdi', '0'), 0.0), _parse_float(properties.get('mmi', '0'), 0.0), properties['alert'] if 'alert' in properties and properties['alert'] else '', properties.get('status', ''), _parse_int(properties.get('sig', '0'), 0), json_data.get('id', ''), _parse_float(properties.get('dmin', '0'), 0.0), _parse_float(properties.get('rms', '0'), 0.0), _parse_float(properties.get('gap', '0'), 0.0))
Creates a Earthquake from json data. :param json_data: The raw json data to parse :type json_data: dict :returns: Earthquake
def _to_dict(self): ''' Returns a dictionary representation of this object ''' return dict(area= self.area._to_dict(), earthquakes = [q._to_dict() for q in self.earthquakes], title = self.titlef _to_dict(self): ''' Returns a dictionary representation of this object ''' return dict(area= self.area._to_dict(), earthquakes = [q._to_dict() for q in self.earthquakes], title = self.title)
Returns a dictionary representation of this object
def _from_json(json_data): if 'bbox' in json_data: box = BoundingBox._from_json(json_data['bbox']) else: box = BoundingBox(Coordinate(0.,0.,0.), Coordinate(0.,0.,0.)) if 'features' in json_data and json_data['features']: quakes = list(map(Earthquake._from_json, json_data['features'])) else: quakes = [] try: title = json_data['metadata']['title'] except KeyError: raise USGSException("No report title information returned by server") return Report(box, quakes, title)
Creates a Report from json data. :param json_data: The raw json data to parse :type json_data: dict :returns: Report
def _byteify(input): if isinstance(input, dict): return {_byteify(key): _byteify(value) for key, value in input.items()} elif isinstance(input, list): return [_byteify(element) for element in input] elif _PYTHON_3 and isinstance(input, str): return str(input.encode('ascii', 'replace').decode('ascii')) elif not _PYTHON_3 and isinstance(input, unicode): return str(input.encode('ascii', 'replace').decode('ascii')) else: return input
Force the given input to only use `str` instead of `bytes` or `unicode`. This works even if the input is a dict, list,
def extract_table(html): soup = BeautifulSoup(html,'lxml') table = soup.find("table", attrs={"class":"basic_table"}) if table is None: return table return table '''# The first tr contains the field names. datasets = [] for row in table.find_all("tr"): dataset = list((td.get_text().strip(), td.attrs.get('colspan', 1), td.attrs.get('rowspan', 1)) for td in row.find_all("td")) datasets.append(dataset)''' return datasetf extract_table(html): soup = BeautifulSoup(html,'lxml') table = soup.find("table", attrs={"class":"basic_table"}) if table is None: return table return table '''# The first tr contains the field names. datasets = [] for row in table.find_all("tr"): dataset = list((td.get_text().strip(), td.attrs.get('colspan', 1), td.attrs.get('rowspan', 1)) for td in row.find_all("td")) datasets.append(dataset)''' return datasets
# The first tr contains the field names. datasets = [] for row in table.find_all("tr"): dataset = list((td.get_text().strip(), td.attrs.get('colspan', 1), td.attrs.get('rowspan', 1)) for td in row.find_all("td")) datasets.append(dataset)
def get_model(cls, name=None, status=ENABLED): ppath = cls.get_pythonpath() if is_plugin_point(cls): if name is not None: kwargs = {} if status is not None: kwargs['status'] = status return Plugin.objects.get(point__pythonpath=ppath, name=name, **kwargs) else: return PluginPointModel.objects.get(pythonpath=ppath) else: return Plugin.objects.get(pythonpath=ppath)
Returns model instance of plugin point or plugin, depending from which class this methos is called. Example:: plugin_model_instance = MyPlugin.get_model() plugin_model_instance = MyPluginPoint.get_model('plugin-name') plugin_point_model_instance = MyPluginPoint.get_model()
def get_point_model(cls): if is_plugin_point(cls): raise Exception(_('This method is only available to plugin ' 'classes.')) else: return PluginPointModel.objects.\ get(plugin__pythonpath=cls.get_pythonpath())
Returns plugin point model instance. Only used from plugin classes.
def get_plugins(cls): # Django >= 1.9 changed something with the migration logic causing # plugins to be executed before the corresponding database tables # exist. This method will only return something if the database # tables have already been created. # XXX: I don't fully understand the issue and there should be # another way but this appears to work fine. if django_version >= (1, 9) and \ not db_table_exists(Plugin._meta.db_table): raise StopIteration if is_plugin_point(cls): for plugin_model in cls.get_plugins_qs(): yield plugin_model.get_plugin() else: raise Exception(_('This method is only available to plugin point ' 'classes.'))
Returns all plugin instances of plugin point, passing all args and kwargs to plugin constructor.
def get_plugins_qs(cls): if is_plugin_point(cls): point_pythonpath = cls.get_pythonpath() return Plugin.objects.filter(point__pythonpath=point_pythonpath, status=ENABLED).\ order_by('index') else: raise Exception(_('This method is only available to plugin point ' 'classes.'))
Returns query set of all plugins belonging to plugin point. Example:: for plugin_instance in MyPluginPoint.get_plugins_qs(): print(plugin_instance.get_plugin().name)
def readmetadata(): if os.path.exists(PICKLEFILE): metadata = pickle.load(gzip.open(PICKLEFILE, 'rb')) else: metadata = {} for xml in tqdm(getrdfdata()): ebook = xml.find(r'{%(pg)s}ebook' % NS) if ebook is None: continue result = parsemetadata(ebook) if result is not None: metadata[result['id']] = result pickle.dump(metadata, gzip.open(PICKLEFILE, 'wb'), protocol=-1) return metadata
Read/create cached metadata dump of Gutenberg catalog. Returns: A dictionary with the following fields: id (int): Gutenberg identifier of text author (str): Last name, First name title (str): title of work subjects (list of str): list of descriptive subjects; a subject may be hierarchical, e.g: 'England -- Social life and customs -- 19th century -- Fiction' LCC (list of str): a list of two letter Library of Congress Classifications, e.g., 'PS' language (list of str): list of two letter language codes. type (str): 'Text', 'Sound', ... formats (dict of str, str pairs): keys are MIME types, values are URLs. download count (int): the number of times this ebook has been downloaded from the Gutenberg site in the last 30 days. Fields that are not part of the metadata are set to None. http://www.gutenberg.org/wiki/Gutenberg:Help_on_Bibliographic_Record_Page
def getrdfdata(): if not os.path.exists(RDFFILES): _, _ = urllib.urlretrieve(RDFURL, RDFFILES) with tarfile.open(RDFFILES) as archive: for tarinfo in archive: yield ElementTree.parse(archive.extractfile(tarinfo))
Downloads Project Gutenberg RDF catalog. Yields: xml.etree.ElementTree.Element: An etext meta-data definition.
def etextno(lines): for line in lines: match = ETEXTRE.search(line) if match is not None: front_match = match.group('etextid_front') back_match = match.group('etextid_back') if front_match is not None: return int(front_match) elif back_match is not None: return int(back_match) else: raise ValueError('no regex match (this should never happen') raise ValueError('no etext-id found')
Retrieves the id for an etext. Args: lines (iter): The lines of the etext to search. Returns: int: The id of the etext. Raises: ValueError: If no etext id was found. Examples: >>> etextno(['Release Date: March 17, 2004 [EBook #11609]']) 11609 >>> etextno(['Release Date: July, 2003 [Etext# 4263]']) 4263 >>> etextno(['Release Date: November 29, 2003 [Eook #10335]']) 10335 >>> etextno(['December, 1998 [Etext 1576#]']) 1576 >>> etextno(['Some lines', 'without', 'Any [Etext] Number']) Traceback (most recent call last): ... ValueError: no etext-id found
def safeunicode(arg, *args, **kwargs): return arg if isinstance(arg, unicode) else unicode(arg, *args, **kwargs)
Coerce argument to unicode, if it's not already.
def get_reports(): if False: # If there was a Test version of this method, it would go here. But alas. pass else: rows = _Constants._DATABASE.execute("SELECT data FROM energy".format( hardware=_Constants._HARDWARE)) data = [r[0] for r in rows] data = [_Auxiliary._byteify(_json.loads(r)) for r in data] return _Auxiliary._byteify(data)
Returns energy data from 1960 to 2014 across various factors.
def available(self, src, dst, model): for name, point in six.iteritems(src): inst = dst.pop(name, None) if inst is None: self.print_(1, "Registering %s for %s" % (model.__name__, name)) inst = model(pythonpath=name) if inst.status == REMOVED: self.print_(1, "Updating %s for %s" % (model.__name__, name)) # re-enable a previously removed plugin point and its plugins inst.status = ENABLED yield point, inst
Iterate over all registered plugins or plugin points and prepare to add them to database.
def missing(self, dst): for inst in six.itervalues(dst): if inst.status != REMOVED: inst.status = REMOVED inst.save()
Mark all missing plugins, that exists in database, but are not registered.
def all(self): # Django >= 1.9 changed something with the migration logic causing # plugins to be executed before the corresponding database tables # exist. This method will only return something if the database # tables have already been created. # XXX: I don't fully understand the issue and there should be # another way but this appears to work fine. if django_version >= (1, 9) and ( not db_table_exists(Plugin._meta.db_table) or not db_table_exists(PluginPoint._meta.db_table)): return self.points()
Synchronize all registered plugins and plugin points to database.
def get_weather(test=False): if _Constants._TEST or test: rows = _Constants._DATABASE.execute("SELECT data FROM weather LIMIT {hardware}".format( hardware=_Constants._HARDWARE)) data = [r[0] for r in rows] data = [_Auxiliary._byteify(_json.loads(r)) for r in data] return _Auxiliary._byteify(data) else: rows = _Constants._DATABASE.execute("SELECT data FROM weather".format( hardware=_Constants._HARDWARE)) data = [r[0] for r in rows] data = [_Auxiliary._byteify(_json.loads(r)) for r in data] return _Auxiliary._byteify(data)
Returns weather reports from the dataset.
def _get(self, url, **kw): ''' Makes a GET request, setting Authorization header by default ''' headers = kw.pop('headers', {}) headers.setdefault('Content-Type', 'application/json') headers.setdefault('Accept', 'application/json') headers.setdefault('Authorization', self.AUTHORIZATION_HEADER) kw['headers'] = headers resp = self.session.get(url, **kw) self._raise_for_status(resp) return resf _get(self, url, **kw): ''' Makes a GET request, setting Authorization header by default ''' headers = kw.pop('headers', {}) headers.setdefault('Content-Type', 'application/json') headers.setdefault('Accept', 'application/json') headers.setdefault('Authorization', self.AUTHORIZATION_HEADER) kw['headers'] = headers resp = self.session.get(url, **kw) self._raise_for_status(resp) return resp
Makes a GET request, setting Authorization header by default
def _post(self, url, **kw): ''' Makes a POST request, setting Authorization header by default ''' headers = kw.pop('headers', {}) headers.setdefault('Authorization', self.AUTHORIZATION_HEADER) kw['headers'] = headers resp = self.session.post(url, **kw) self._raise_for_status(resp) return resf _post(self, url, **kw): ''' Makes a POST request, setting Authorization header by default ''' headers = kw.pop('headers', {}) headers.setdefault('Authorization', self.AUTHORIZATION_HEADER) kw['headers'] = headers resp = self.session.post(url, **kw) self._raise_for_status(resp) return resp
Makes a POST request, setting Authorization header by default
def _post_json(self, url, data, **kw): ''' Makes a POST request, setting Authorization and Content-Type headers by default ''' data = json.dumps(data) headers = kw.pop('headers', {}) headers.setdefault('Content-Type', 'application/json') headers.setdefault('Accept', 'application/json') kw['headers'] = headers kw['data'] = data return self._post(url, **kwf _post_json(self, url, data, **kw): ''' Makes a POST request, setting Authorization and Content-Type headers by default ''' data = json.dumps(data) headers = kw.pop('headers', {}) headers.setdefault('Content-Type', 'application/json') headers.setdefault('Accept', 'application/json') kw['headers'] = headers kw['data'] = data return self._post(url, **kw)
Makes a POST request, setting Authorization and Content-Type headers by default
def from_xuid(cls, xuid): ''' Instantiates an instance of ``GamerProfile`` from an xuid :param xuid: Xuid to look up :raises: :class:`~xbox.exceptions.GamertagNotFound` :returns: :class:`~xbox.GamerProfile` instance ''' url = 'https://profile.xboxlive.com/users/xuid(%s)/profile/settings' % xuid try: return cls._fetch(url) except (GamertagNotFound, InvalidRequest): # this endpoint seems to return 400 when the resource # does not exist raise GamertagNotFound('No such user: %s' % xuidf from_xuid(cls, xuid): ''' Instantiates an instance of ``GamerProfile`` from an xuid :param xuid: Xuid to look up :raises: :class:`~xbox.exceptions.GamertagNotFound` :returns: :class:`~xbox.GamerProfile` instance ''' url = 'https://profile.xboxlive.com/users/xuid(%s)/profile/settings' % xuid try: return cls._fetch(url) except (GamertagNotFound, InvalidRequest): # this endpoint seems to return 400 when the resource # does not exist raise GamertagNotFound('No such user: %s' % xuid)
Instantiates an instance of ``GamerProfile`` from an xuid :param xuid: Xuid to look up :raises: :class:`~xbox.exceptions.GamertagNotFound` :returns: :class:`~xbox.GamerProfile` instance
def from_gamertag(cls, gamertag): ''' Instantiates an instance of ``GamerProfile`` from a gamertag :param gamertag: Gamertag to look up :raises: :class:`~xbox.exceptions.GamertagNotFound` :returns: :class:`~xbox.GamerProfile` instance ''' url = 'https://profile.xboxlive.com/users/gt(%s)/profile/settings' % gamertag try: return cls._fetch(url) except GamertagNotFound: raise GamertagNotFound('No such user: %s' % gamertagf from_gamertag(cls, gamertag): ''' Instantiates an instance of ``GamerProfile`` from a gamertag :param gamertag: Gamertag to look up :raises: :class:`~xbox.exceptions.GamertagNotFound` :returns: :class:`~xbox.GamerProfile` instance ''' url = 'https://profile.xboxlive.com/users/gt(%s)/profile/settings' % gamertag try: return cls._fetch(url) except GamertagNotFound: raise GamertagNotFound('No such user: %s' % gamertag)
Instantiates an instance of ``GamerProfile`` from a gamertag :param gamertag: Gamertag to look up :raises: :class:`~xbox.exceptions.GamertagNotFound` :returns: :class:`~xbox.GamerProfile` instance
def saved_from_user(cls, user, include_pending=False): ''' Gets all clips 'saved' by a user. :param user: :class:`~xbox.GamerProfile` instance :param bool include_pending: whether to ignore clips that are not yet uploaded. These clips will have thumbnails and media_url set to ``None`` :returns: Iterator of :class:`~xbox.Clip` instances ''' url = 'https://gameclipsmetadata.xboxlive.com/users/xuid(%s)/clips/saved' resp = xbox.client._get(url % user.xuid) data = resp.json() for clip in data['gameClips']: if clip['state'] != 'PendingUpload' or include_pending: yield cls(user, clipf saved_from_user(cls, user, include_pending=False): ''' Gets all clips 'saved' by a user. :param user: :class:`~xbox.GamerProfile` instance :param bool include_pending: whether to ignore clips that are not yet uploaded. These clips will have thumbnails and media_url set to ``None`` :returns: Iterator of :class:`~xbox.Clip` instances ''' url = 'https://gameclipsmetadata.xboxlive.com/users/xuid(%s)/clips/saved' resp = xbox.client._get(url % user.xuid) data = resp.json() for clip in data['gameClips']: if clip['state'] != 'PendingUpload' or include_pending: yield cls(user, clip)
Gets all clips 'saved' by a user. :param user: :class:`~xbox.GamerProfile` instance :param bool include_pending: whether to ignore clips that are not yet uploaded. These clips will have thumbnails and media_url set to ``None`` :returns: Iterator of :class:`~xbox.Clip` instances
def increment(self, method=None, url=None, response=None, error=None, _pool=None, _stacktrace=None): if self.total is False and error: # Disabled, indicate to re-raise the error. raise six.reraise(type(error), error, _stacktrace) total = self.total if total is not None: total -= 1 _observed_errors = self._observed_errors connect = self.connect read = self.read redirect = self.redirect if error and self._is_connection_error(error): # Connect retry? if connect is False: raise six.reraise(type(error), error, _stacktrace) elif connect is not None: connect -= 1 _observed_errors += 1 elif error and self._is_read_error(error): # Read retry? if read is False: raise six.reraise(type(error), error, _stacktrace) elif read is not None: read -= 1 _observed_errors += 1 elif response and response.get_redirect_location(): # Redirect retry? if redirect is not None: redirect -= 1 else: # FIXME: Nothing changed, scenario doesn't make sense. _observed_errors += 1 new_retry = self.new( total=total, connect=connect, read=read, redirect=redirect, _observed_errors=_observed_errors) if new_retry.is_exhausted(): raise MaxRetryError(_pool, url, error) log.debug("Incremented Retry for (url='%s'): %r" % (url, new_retry)) return new_retry
Return a new Retry object with incremented retry counters. :param response: A response object, or None, if the server did not return a response. :type response: :class:`~urllib3.response.HTTPResponse` :param Exception error: An error encountered during the request, or None if the response was received successfully. :return: A new ``Retry`` object.
def from_httplib(ResponseCls, r, **response_kw): headers = HTTPHeaderDict() for k, v in r.getheaders(): headers.add(k, v) # HTTPResponse objects in Python 3 don't have a .strict attribute strict = getattr(r, 'strict', 0) return ResponseCls(body=r, headers=headers, status=r.status, version=r.version, reason=r.reason, strict=strict, original_response=r, **response_kw)
Given an :class:`httplib.HTTPResponse` instance ``r``, return a corresponding :class:`urllib3.response.HTTPResponse` object. Remaining parameters are passed to the HTTPResponse constructor, along with ``original_response=r``.
def add(self, key, value): self._data.setdefault(key.lower(), []).append((key, value))
Adds a (name, value) pair, doesn't overwrite the value if it already exists. >>> headers = HTTPHeaderDict(foo='bar') >>> headers.add('Foo', 'baz') >>> headers['foo'] 'bar, baz'
def handle_401(self, r, **kwargs): if self.pos is not None: # Rewind the file position indicator of the body to where # it was to resend the request. r.request.body.seek(self.pos) num_401_calls = getattr(self, 'num_401_calls', 1) s_auth = r.headers.get('www-authenticate', '') if 'digest' in s_auth.lower() and num_401_calls < 2: setattr(self, 'num_401_calls', num_401_calls + 1) pat = re.compile(r'digest ', flags=re.IGNORECASE) self.chal = parse_dict_header(pat.sub('', s_auth, count=1)) # Consume content and release the original connection # to allow our new request to reuse the same one. r.content r.raw.release_conn() prep = r.request.copy() extract_cookies_to_jar(prep._cookies, r.request, r.raw) prep.prepare_cookies(prep._cookies) prep.headers['Authorization'] = self.build_digest_header( prep.method, prep.url) _r = r.connection.send(prep, **kwargs) _r.history.append(r) _r.request = prep return _r setattr(self, 'num_401_calls', 1) return r
Takes the given response and tries digest-auth, if needed.
def prepare(self): p = PreparedRequest() p.prepare( method=self.method, url=self.url, headers=self.headers, files=self.files, data=self.data, params=self.params, auth=self.auth, cookies=self.cookies, hooks=self.hooks, ) return p
Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it.
def prepare_url(self, url, params): url = to_native_string(url) # Don't do any URL preparation for non-HTTP schemes like `mailto`, # `data` etc to work around exceptions from `url_parse`, which # handles RFC 3986 only. if ':' in url and not url.lower().startswith('http'): self.url = url return # Support for unicode domain names and paths. scheme, auth, host, port, path, query, fragment = parse_url(url) if not scheme: raise MissingSchema("Invalid URL {0!r}: No schema supplied. " "Perhaps you meant http://{0}?".format(url)) if not host: raise InvalidURL("Invalid URL %r: No host supplied" % url) # Only want to apply IDNA to the hostname try: host = host.encode('idna').decode('utf-8') except UnicodeError: raise InvalidURL('URL has an invalid label.') # Carefully reconstruct the network location netloc = auth or '' if netloc: netloc += '@' netloc += host if port: netloc += ':' + str(port) # Bare domains aren't valid URLs. if not path: path = '/' if is_py2: if isinstance(scheme, str): scheme = scheme.encode('utf-8') if isinstance(netloc, str): netloc = netloc.encode('utf-8') if isinstance(path, str): path = path.encode('utf-8') if isinstance(query, str): query = query.encode('utf-8') if isinstance(fragment, str): fragment = fragment.encode('utf-8') enc_params = self._encode_params(params) if enc_params: if query: query = '%s&%s' % (query, enc_params) else: query = enc_params url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment])) self.url = url
Prepares the given HTTP URL.
def prepare_body(self, data, files): # Check if file, fo, generator, iterator. # If not, run through normal process. # Nottin' on you. body = None content_type = None length = None is_stream = all([ hasattr(data, '__iter__'), not isinstance(data, (basestring, list, tuple, dict)) ]) try: length = super_len(data) except (TypeError, AttributeError, UnsupportedOperation): length = None if is_stream: body = data if files: raise NotImplementedError('Streamed bodies and files are mutually exclusive.') if length is not None: self.headers['Content-Length'] = builtin_str(length) else: self.headers['Transfer-Encoding'] = 'chunked' else: # Multi-part file uploads. if files: (body, content_type) = self._encode_files(files, data) else: if data: body = self._encode_params(data) if isinstance(data, basestring) or hasattr(data, 'read'): content_type = None else: content_type = 'application/x-www-form-urlencoded' self.prepare_content_length(body) # Add content-type if it wasn't explicitly provided. if (content_type) and (not 'content-type' in self.headers): self.headers['Content-Type'] = content_type self.body = body
Prepares the given HTTP body data.
def request_url(self, request, proxies): proxies = proxies or {} scheme = urlparse(request.url).scheme proxy = proxies.get(scheme) if proxy and scheme != 'https': url, _ = urldefrag(request.url) else: url = request.path_url return url
Obtain the url to use when making the final request. If the message is being sent through a HTTP proxy, the full URL has to be used. Otherwise, we should only use the path portion of the URL. This should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param request: The :class:`PreparedRequest <PreparedRequest>` being sent. :param proxies: A dictionary of schemes to proxy URLs.
def get_encodings_from_content(content): charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I) pragma_re = re.compile(r'<meta.*?content=["\']*;?charset=(.+?)["\'>]', flags=re.I) xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]') return (charset_re.findall(content) + pragma_re.findall(content) + xml_re.findall(content))
Returns encodings from given content string. :param content: bytestring to extract encodings from.
def _prepare_conn(self, conn): if isinstance(conn, VerifiedHTTPSConnection): conn.set_cert(key_file=self.key_file, cert_file=self.cert_file, cert_reqs=self.cert_reqs, ca_certs=self.ca_certs, assert_hostname=self.assert_hostname, assert_fingerprint=self.assert_fingerprint) conn.ssl_version = self.ssl_version if self.proxy is not None: # Python 2.7+ try: set_tunnel = conn.set_tunnel except AttributeError: # Platform-specific: Python 2.6 set_tunnel = conn._set_tunnel if sys.version_info <= (2, 6, 4) and not self.proxy_headers: # Python 2.6.4 and older set_tunnel(self.host, self.port) else: set_tunnel(self.host, self.port, self.proxy_headers) # Establish tunnel connection early, because otherwise httplib # would improperly set Host: header to proxy's IP:port. conn.connect() return conn
Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket` and establish the tunnel if proxy is used.
def description_of(file, name='stdin'): u = UniversalDetector() for line in file: u.feed(line) u.close() result = u.result if result['encoding']: return '%s: %s with confidence %s' % (name, result['encoding'], result['confidence']) else: return '%s: no result' % name
Return a string describing the probable encoding of a file.
def dumps(obj, **kwargs): ''' Serialize `obj` to a JSON formatted `str`. Accepts the same arguments as `json` module in stdlib. :param obj: a JSON serializable Python object. :param kwargs: all the arguments that `json.dumps <http://docs.python.org/ 2/library/json.html#json.dumps>`_ accepts. :raises: commentjson.JSONLibraryException :returns str: serialized string. ''' try: return json.dumps(obj, **kwargs) except Exception as e: raise JSONLibraryException(ef dumps(obj, **kwargs): ''' Serialize `obj` to a JSON formatted `str`. Accepts the same arguments as `json` module in stdlib. :param obj: a JSON serializable Python object. :param kwargs: all the arguments that `json.dumps <http://docs.python.org/ 2/library/json.html#json.dumps>`_ accepts. :raises: commentjson.JSONLibraryException :returns str: serialized string. ''' try: return json.dumps(obj, **kwargs) except Exception as e: raise JSONLibraryException(e)
Serialize `obj` to a JSON formatted `str`. Accepts the same arguments as `json` module in stdlib. :param obj: a JSON serializable Python object. :param kwargs: all the arguments that `json.dumps <http://docs.python.org/ 2/library/json.html#json.dumps>`_ accepts. :raises: commentjson.JSONLibraryException :returns str: serialized string.
def load(fp, **kwargs): ''' Deserialize `fp` (a `.read()`-supporting file-like object containing a JSON document with Python or JavaScript like comments) to a Python object. :param fp: a `.read()`-supporting file-like object containing a JSON document with or without comments. :param kwargs: all the arguments that `json.load <http://docs.python.org/ 2/library/json.html#json.load>`_ accepts. :raises: commentjson.JSONLibraryException :returns: dict or list. ''' try: return loads(fp.read(), **kwargs) except Exception as e: raise JSONLibraryException(ef load(fp, **kwargs): ''' Deserialize `fp` (a `.read()`-supporting file-like object containing a JSON document with Python or JavaScript like comments) to a Python object. :param fp: a `.read()`-supporting file-like object containing a JSON document with or without comments. :param kwargs: all the arguments that `json.load <http://docs.python.org/ 2/library/json.html#json.load>`_ accepts. :raises: commentjson.JSONLibraryException :returns: dict or list. ''' try: return loads(fp.read(), **kwargs) except Exception as e: raise JSONLibraryException(e)
Deserialize `fp` (a `.read()`-supporting file-like object containing a JSON document with Python or JavaScript like comments) to a Python object. :param fp: a `.read()`-supporting file-like object containing a JSON document with or without comments. :param kwargs: all the arguments that `json.load <http://docs.python.org/ 2/library/json.html#json.load>`_ accepts. :raises: commentjson.JSONLibraryException :returns: dict or list.
def dump(obj, fp, **kwargs): ''' Serialize `obj` as a JSON formatted stream to `fp` (a `.write()`-supporting file-like object). Accepts the same arguments as `json` module in stdlib. :param obj: a JSON serializable Python object. :param fp: a `.read()`-supporting file-like object containing a JSON document with or without comments. :param kwargs: all the arguments that `json.dump <http://docs.python.org/ 2/library/json.html#json.dump>`_ accepts. :raises: commentjson.JSONLibraryException ''' try: json.dump(obj, fp, **kwargs) except Exception as e: raise JSONLibraryException(ef dump(obj, fp, **kwargs): ''' Serialize `obj` as a JSON formatted stream to `fp` (a `.write()`-supporting file-like object). Accepts the same arguments as `json` module in stdlib. :param obj: a JSON serializable Python object. :param fp: a `.read()`-supporting file-like object containing a JSON document with or without comments. :param kwargs: all the arguments that `json.dump <http://docs.python.org/ 2/library/json.html#json.dump>`_ accepts. :raises: commentjson.JSONLibraryException ''' try: json.dump(obj, fp, **kwargs) except Exception as e: raise JSONLibraryException(e)
Serialize `obj` as a JSON formatted stream to `fp` (a `.write()`-supporting file-like object). Accepts the same arguments as `json` module in stdlib. :param obj: a JSON serializable Python object. :param fp: a `.read()`-supporting file-like object containing a JSON document with or without comments. :param kwargs: all the arguments that `json.dump <http://docs.python.org/ 2/library/json.html#json.dump>`_ accepts. :raises: commentjson.JSONLibraryException
def prepend_name_prefix(func): @wraps(func) def prepend_prefix(self, name, *args, **kwargs): name = self.name_prefix + name return func(self, name, *args, **kwargs) return prepend_prefix
Decorator that wraps instance methods to prepend the instance's filename prefix to the beginning of the referenced filename. Must only be used on instance methods where the first parameter after `self` is `name` or a comparable parameter of a different name.
def get_chalk(level): if level >= logging.ERROR: _chalk = chalk.red elif level >= logging.WARNING: _chalk = chalk.yellow elif level >= logging.INFO: _chalk = chalk.blue elif level >= logging.DEBUG: _chalk = chalk.green else: _chalk = chalk.white return _chalk
Gets the appropriate piece of chalk for the logging level
def to_str(obj): if not isinstance(obj, str) and PY3 and isinstance(obj, bytes): obj = obj.decode('utf-8') return obj if isinstance(obj, string_types) else str(obj)
Attempts to convert given object to a string object
def get_color(self, value): if value in COLOR_SET: value = COLOR_MAP[value] else: try: value = int(value) if value >= 8: raise ValueError() except ValueError as exc: raise ValueError( 'Colors should either a member of: {} or a positive ' 'integer below 8'.format(', '.join(COLOR_NAMES)) ) return '{}{}'.format(self.PREFIX, value)
Helper method to validate and map values used in the instantiation of of the Color object to the correct unicode value.
def ctr_counter(nonce, f, start = 0): for n in range(start, 2**64): yield f(nonce, n) while True: for n in range(0, 2**64): yield f(nonce, n)
Return an infinite iterator that starts at `start` and iterates by 1 over integers between 0 and 2^64 - 1 cyclically, returning on each iteration the result of combining each number with `nonce` using function `f`. `nonce` should be an random 64-bit integer that is used to make the counter unique. `f` should be a function that takes two 64-bit integers, the first being the `nonce`, and combines the two in a lossless manner (i.e. xor, addition, etc.) The returned value should be a 64-bit integer. `start` should be a number less than 2^64.
def encrypt_block(self, block): S0, S1, S2, S3 = self.S P = self.P u4_1_pack = self._u4_1_pack u1_4_unpack = self._u1_4_unpack try: L, R = self._u4_2_unpack(block) except struct_error: raise ValueError("block is not 8 bytes in length") for p1, p2 in P[:-1]: L ^= p1 a, b, c, d = u1_4_unpack(u4_1_pack(L)) R ^= (S0[a] + S1[b] ^ S2[c]) + S3[d] & 0xffffffff R ^= p2 a, b, c, d = u1_4_unpack(u4_1_pack(R)) L ^= (S0[a] + S1[b] ^ S2[c]) + S3[d] & 0xffffffff p_penultimate, p_last = P[-1] return self._u4_2_pack(R ^ p_last, L ^ p_penultimate)
Return a :obj:`bytes` object containing the encrypted bytes of a `block`. `block` should be a :obj:`bytes`-like object with exactly 8 bytes. If it is not, a :exc:`ValueError` exception is raised.
def encrypt_ecb(self, data): S1, S2, S3, S4 = self.S P = self.P u4_1_pack = self._u4_1_pack u1_4_unpack = self._u1_4_unpack encrypt = self._encrypt u4_2_pack = self._u4_2_pack try: LR_iter = self._u4_2_iter_unpack(data) except struct_error: raise ValueError("data is not a multiple of the block-size in length") for plain_L, plain_R in LR_iter: yield u4_2_pack( *encrypt(plain_L, plain_R, P, S1, S2, S3, S4, u4_1_pack, u1_4_unpack) )
Return an iterator that encrypts `data` using the Electronic Codebook (ECB) mode of operation. ECB mode can only operate on `data` that is a multiple of the block-size in length. Each iteration returns a block-sized :obj:`bytes` object (i.e. 8 bytes) containing the encrypted bytes of the corresponding block in `data`. `data` should be a :obj:`bytes`-like object that is a multiple of the block-size in length (i.e. 8, 16, 32, etc.). If it is not, a :exc:`ValueError` exception is raised.
def decrypt_ecb(self, data): S1, S2, S3, S4 = self.S P = self.P u4_1_pack = self._u4_1_pack u1_4_unpack = self._u1_4_unpack decrypt = self._decrypt u4_2_pack = self._u4_2_pack try: LR_iter = self._u4_2_iter_unpack(data) except struct_error: raise ValueError("data is not a multiple of the block-size in length") for cipher_L, cipher_R in LR_iter: yield u4_2_pack( *decrypt(cipher_L, cipher_R, P, S1, S2, S3, S4, u4_1_pack, u1_4_unpack) )
Return an iterator that decrypts `data` using the Electronic Codebook (ECB) mode of operation. ECB mode can only operate on `data` that is a multiple of the block-size in length. Each iteration returns a block-sized :obj:`bytes` object (i.e. 8 bytes) containing the decrypted bytes of the corresponding block in `data`. `data` should be a :obj:`bytes`-like object that is a multiple of the block-size in length (i.e. 8, 16, 32, etc.). If it is not, a :exc:`ValueError` exception is raised.
def encrypt_cbc(self, data, init_vector): S1, S2, S3, S4 = self.S P = self.P u4_1_pack = self._u4_1_pack u1_4_unpack = self._u1_4_unpack encrypt = self._encrypt u4_2_pack = self._u4_2_pack try: prev_cipher_L, prev_cipher_R = self._u4_2_unpack(init_vector) except struct_error: raise ValueError("initialization vector is not 8 bytes in length") try: LR_iter = self._u4_2_iter_unpack(data) except struct_error: raise ValueError("data is not a multiple of the block-size in length") for plain_L, plain_R in LR_iter: prev_cipher_L, prev_cipher_R = encrypt( prev_cipher_L ^ plain_L, prev_cipher_R ^ plain_R, P, S1, S2, S3, S4, u4_1_pack, u1_4_unpack ) yield u4_2_pack(prev_cipher_L, prev_cipher_R)
Return an iterator that encrypts `data` using the Cipher-Block Chaining (CBC) mode of operation. CBC mode can only operate on `data` that is a multiple of the block-size in length. Each iteration returns a block-sized :obj:`bytes` object (i.e. 8 bytes) containing the encrypted bytes of the corresponding block in `data`. `init_vector` is the initialization vector and should be a :obj:`bytes`-like object with exactly 8 bytes. If it is not, a :exc:`ValueError` exception is raised. `data` should be a :obj:`bytes`-like object that is a multiple of the block-size in length (i.e. 8, 16, 32, etc.). If it is not, a :exc:`ValueError` exception is raised.
def decrypt_cbc(self, data, init_vector): S1, S2, S3, S4 = self.S P = self.P u4_1_pack = self._u4_1_pack u1_4_unpack = self._u1_4_unpack decrypt = self._decrypt u4_2_pack = self._u4_2_pack try: prev_cipher_L, prev_cipher_R = self._u4_2_unpack(init_vector) except struct_error: raise ValueError("initialization vector is not 8 bytes in length") try: LR_iter = self._u4_2_iter_unpack(data) except struct_error: raise ValueError("data is not a multiple of the block-size in length") for cipher_L, cipher_R in LR_iter: L, R = decrypt( cipher_L, cipher_R, P, S1, S2, S3, S4, u4_1_pack, u1_4_unpack ) yield u4_2_pack(prev_cipher_L ^ L, prev_cipher_R ^ R) prev_cipher_L = cipher_L prev_cipher_R = cipher_R
Return an iterator that decrypts `data` using the Cipher-Block Chaining (CBC) mode of operation. CBC mode can only operate on `data` that is a multiple of the block-size in length. Each iteration returns a block-sized :obj:`bytes` object (i.e. 8 bytes) containing the decrypted bytes of the corresponding block in `data`. `init_vector` is the initialization vector and should be a :obj:`bytes`-like object with exactly 8 bytes. If it is not, a :exc:`ValueError` exception is raised. `data` should be a :obj:`bytes`-like object that is a multiple of the block-size in length (i.e. 8, 16, 32, etc.). If it is not, a :exc:`ValueError` exception is raised.
def encrypt_pcbc(self, data, init_vector): S1, S2, S3, S4 = self.S P = self.P u4_1_pack = self._u4_1_pack u1_4_unpack = self._u1_4_unpack encrypt = self._encrypt u4_2_pack = self._u4_2_pack try: init_L, init_R = self._u4_2_unpack(init_vector) except struct_error: raise ValueError("initialization vector is not 8 bytes in length") try: LR_iter = self._u4_2_iter_unpack(data) except struct_error: raise ValueError("data is not a multiple of the block-size in length") for plain_L, plain_R in LR_iter: cipher_L, cipher_R = encrypt( init_L ^ plain_L, init_R ^ plain_R, P, S1, S2, S3, S4, u4_1_pack, u1_4_unpack ) yield u4_2_pack(cipher_L, cipher_R) init_L = plain_L ^ cipher_L init_R = plain_R ^ cipher_R
Return an iterator that encrypts `data` using the Propagating Cipher-Block Chaining (PCBC) mode of operation. PCBC mode can only operate on `data` that is a multiple of the block-size in length. Each iteration returns a block-sized :obj:`bytes` object (i.e. 8 bytes) containing the encrypted bytes of the corresponding block in `data`. `init_vector` is the initialization vector and should be a :obj:`bytes`-like object with exactly 8 bytes. If it is not, a :exc:`ValueError` exception is raised. `data` should be a :obj:`bytes`-like object that is a multiple of the block-size in length (i.e. 8, 16, 32, etc.). If it is not, a :exc:`ValueError` exception is raised.
def convert(self, im): _im = im if self.image_property: _im = self.image_property.convert(im) return _im
Please override this method if you want to resize/grascale the image.