docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Set up a :class:`TCPServerSocketChannel` and create a :class:`Flow` instance for it. Args: host(str): The hostname or IP address to bind to. port(int): The port number to listen on. echo(bool): Whether to echo read/written data to stdout by default. Returns: :class:`Flow`: A Flow instance initialised with the TCP socket channel.
def listen_tcp(cls, host='', port=0, echo=False): return cls(TCPServerSocketChannel(host, port), echo=echo)
960,484
Given a version specification. It can be any dict which is returned verbatim, an index into :data:`PY_INTERNALS` or ``None``. Arguments: version: The python version to return the internals of. default: The python version that will be looked up if ``version`` is None. Returns: dict: The python internals for the requested version.
def get_py_internals(version=None, default=None): if version is None: version = default if isinstance(version, dict): return version elif version in PY_INTERNALS: return PY_INTERNALS[version] else: return ValueError('Unsupported python version %r requested.' % version)
960,531
Thin wrapper around ``bitcoin.mktx(inputs, outputs)`` Args: inputs (dict): inputs in the form of ``{'output': 'txid:vout', 'value': amount in satoshi}`` outputs (dict): outputs in the form of ``{'address': to_address, 'value': amount in satoshi}`` Returns: transaction
def build_transaction(self, inputs, outputs): # prepare inputs and outputs for bitcoin inputs = [{'output': '{}:{}'.format(input['txid'], input['vout']), 'value': input['amount']} for input in inputs] tx = bitcoin.mktx(inputs, outputs) return tx
961,795
Decodes the given transaction. Args: tx: hex of transaction Returns: decoded transaction .. note:: Only supported for blockr.io at the moment.
def decode(self, tx): if not isinstance(self._service, BitcoinBlockrService): raise NotImplementedError('Currently only supported for "blockr.io"') return self._service.decode(tx)
961,800
Checks the config.json file for default settings and auth values. Args: :msg: (Message class) an instance of a message class.
def check_config_file(msg): with jsonconfig.Config("messages", indent=4) as cfg: verify_profile_name(msg, cfg) retrieve_data_from_config(msg, cfg) if msg._auth is None: retrieve_pwd_from_config(msg, cfg) if msg.save: update_config_data(msg, cfg) update_config_pwd(msg, cfg)
962,357
Verifies the profile name exists in the config.json file. Args: :msg: (Message class) an instance of a message class. :cfg: (jsonconfig.Config) config instance.
def verify_profile_name(msg, cfg): if msg.profile not in cfg.data: raise UnknownProfileError(msg.profile)
962,358
Update msg attrs with values from the profile configuration if the msg.attr=None, else leave it alone. Args: :msg: (Message class) an instance of a message class. :cfg: (jsonconfig.Config) config instance.
def retrieve_data_from_config(msg, cfg): msg_type = msg.__class__.__name__.lower() for attr in msg: if getattr(msg, attr) is None and attr in cfg.data[msg.profile][msg_type]: setattr(msg, attr, cfg.data[msg.profile][msg_type][attr])
962,359
Retrieve auth from profile configuration and set in msg.auth attr. Args: :msg: (Message class) an instance of a message class. :cfg: (jsonconfig.Config) config instance.
def retrieve_pwd_from_config(msg, cfg): msg_type = msg.__class__.__name__.lower() key_fmt = msg.profile + "_" + msg_type pwd = cfg.pwd[key_fmt].split(" :: ") if len(pwd) == 1: msg.auth = pwd[0] else: msg.auth = tuple(pwd)
962,360
Updates the profile's config entry with values set in each attr by the user. This will overwrite existing values. Args: :msg: (Message class) an instance of a message class. :cfg: (jsonconfig.Config) config instance.
def update_config_data(msg, cfg): for attr in msg: if attr in cfg.data[msg.profile] and attr is not "auth": cfg.data[msg.profile][attr] = getattr(msg, attr)
962,361
Updates the profile's auth entry with values set by the user. This will overwrite existing values. Args: :msg: (Message class) an instance of a message class. :cfg: (jsonconfig.Config) config instance.
def update_config_pwd(msg, cfg): msg_type = msg.__class__.__name__.lower() key_fmt = msg.profile + "_" + msg_type if isinstance(msg._auth, (MutableSequence, tuple)): cfg.pwd[key_fmt] = " :: ".join(msg._auth) else: cfg.pwd[key_fmt] = msg._auth
962,362
Create a profile for the given message type. Args: :msg_type: (str) message type to create config entry.
def create_config_profile(msg_type): msg_type = msg_type.lower() if msg_type not in CONFIG.keys(): raise UnsupportedMessageTypeError(msg_type) display_required_items(msg_type) if get_user_ack(): profile_name = input("Profile Name: ") data = get_data_from_user(msg_type) auth = get_auth_from_user(msg_type) configure_profile(msg_type, profile_name, data, auth)
962,363
Display the required items needed to configure a profile for the given message type. Args: :msg_type: (str) message type to create config entry.
def display_required_items(msg_type): print("Configure a profile for: " + msg_type) print("You will need the following information:") for k, v in CONFIG[msg_type]["settings"].items(): print(" * " + v) print("Authorization/credentials required:") for k, v in CONFIG[msg_type]["auth"].items(): print(" * " + v)
962,364
Create the profile entry. Args: :msg_type: (str) message type to create config entry. :profile_name: (str) name of the profile entry :data: (dict) dict values for the 'settings' :auth: (dict) auth parameters
def configure_profile(msg_type, profile_name, data, auth): with jsonconfig.Config("messages", indent=4) as cfg: write_data(msg_type, profile_name, data, cfg) write_auth(msg_type, profile_name, auth, cfg) print("[+] Configuration entry for <" + profile_name + "> created.") print("[+] Configuration file location: " + cfg.filename)
962,367
Write the settings into the data portion of the cfg. Args: :msg_type: (str) message type to create config entry. :profile_name: (str) name of the profile entry :data: (dict) dict values for the 'settings' :cfg: (jsonconfig.Config) config instance.
def write_data(msg_type, profile_name, data, cfg): if profile_name not in cfg.data: cfg.data[profile_name] = {} cfg.data[profile_name][msg_type] = data
962,368
Write the settings into the auth portion of the cfg. Args: :msg_type: (str) message type to create config entry. :profile_name: (str) name of the profile entry :auth: (dict) auth parameters :cfg: (jsonconfig.Config) config instance.
def write_auth(msg_type, profile_name, auth, cfg): key_fmt = profile_name + "_" + msg_type pwd = [] for k, v in CONFIG[msg_type]["auth"].items(): pwd.append(auth[k]) if len(pwd) > 1: cfg.pwd[key_fmt] = " :: ".join(pwd) else: cfg.pwd[key_fmt] = pwd[0]
962,369
Factory function to return the specified message instance. Args: :msg_type: (str) the type of message to send, i.e. 'Email' :msg_types: (str, list, or set) the supported message types :kwargs: (dict) keywords arguments that are required for the various message types. See docstrings for each type. i.e. help(messages.Email), help(messages.Twilio), etc.
def message_factory(msg_type, msg_types=MESSAGE_TYPES, *args, **kwargs): try: return msg_types[msg_type.lower()](*args, **kwargs) except (UnknownProfileError, InvalidMessageInputError) as e: err_exit("Unable to send message: ", e) except KeyError: raise UnsupportedMessageTypeError(msg_type, msg_types)
962,442
Converts a time-string into a valid :py:class:`~datetime.datetime.DateTime` object. Args: s (str): string to be formatted. ``**kwargs`` is passed directly to :func:`.dateutil_parser`. Returns: :py:class:`~datetime.datetime.DateTime`
def parse_datetime(s, **kwargs): if not s: return None try: ret = dateutil_parser(s, **kwargs) except (OverflowError, TypeError, ValueError) as e: logger.exception(e, exc_info=True) reraise('datetime parsing error from %s' % s, e) return ret
962,525
Returns a list of directories matching the path given. Args: path (str): glob pattern. Returns: List[str]
def browse(self, path=None): params = None if path: assert isinstance(path, string_types) params = {'current': path} return self.get('browse', params=params)
962,529
Pause the given device. Args: device (str): Device ID. Returns: dict: with keys ``success`` and ``error``.
def pause(self, device): resp = self.post('pause', params={'device': device}, return_response=True) error = resp.text if not error: error = None return {'success': resp.status_code == requests.codes.ok, 'error': error}
962,534
Erase the database index from a given folder and restart Syncthing. Args: folder (str): Folder ID. Returns: None
def reset_folder(self, folder): warnings.warn('This is a destructive action that cannot be undone.') self.post('reset', data={}, params={'folder': folder})
962,535
Returns the completion percentage (0 to 100) for a given device and folder. Args: device (str): The Syncthing device the folder is syncing to. folder (str): The folder that is being synced. Returs: int
def completion(self, device, folder): return self.get( 'completion', params={'folder': folder, 'device': device} ).get('completion', None)
962,537
Applies ``patterns`` to ``folder``'s ``.stignore`` file. Args: folder (str): patterns (str): Returns: dict
def set_ignores(self, folder, *patterns): if not patterns: return {} data = {'ignore': list(patterns)} return self.post('ignores', params={'folder': folder}, data=data)
962,538
Returns lists of files which are needed by this device in order for it to become in sync. Args: folder (str): page (int): If defined applies pagination accross the collection of results. perpage (int): If defined applies pagination across the collection of results. Returns: dict
def need(self, folder, page=None, perpage=None): assert isinstance(page, int) or page is None assert isinstance(perpage, int) or perpage is None return self.get('need', params={'folder': folder, 'page': page, 'perpage': perpage})
962,539
A long-polling method that queries Syncthing for events.. Args: using_url (str): REST HTTP endpoint filters (List[str]): Creates an "event group" in Syncthing to only receive events that have been subscribed to. limit (int): The number of events to query in the history to catch up to the current state. Returns: generator[dict]
def _events(self, using_url, filters=None, limit=None): # coerce if not isinstance(limit, (int, NoneType)): limit = None # coerce if filters is None: filters = [] # format our list into the correct expectation of string with commas if isinstance(filters, string_types): filters = filters.split(',') # reset the state if the loop was broken with `stop` if not self.blocking: self.blocking = True # block/long-poll for updates to the events api while self.blocking: params = { 'since': self._last_seen_id, 'limit': limit, } if filters: params['events'] = ','.join(map(str, filters)) try: data = self.get(using_url, params=params, raw_exceptions=True) except (ConnectTimeout, ConnectionError) as e: # swallow timeout errors for long polling data = None except Exception as e: reraise('', e) if data: # update our last_seen_id to move our event counter forward self._last_seen_id = data[-1]['id'] for event in data: # handle potentially multiple events returned in a list self._count += 1 yield event
962,542
Call a method on the server Arguments: method - the remote server method params - an array of commands to send to the method Keyword Arguments: callback - a callback function containing the return data
def call(self, method, params, callback=None): cur_id = self._next_id() if callback: self._callbacks[cur_id] = callback self.send({'msg': 'method', 'id': cur_id, 'method': method, 'params': params})
962,802
Subcribe to add/change/remove events for a collection Arguments: name - the name of the publication to subscribe params - params to subscribe (parsed as ejson) Keyword Arguments: callback - a callback function that gets executed when the subscription has completed
def subscribe(self, name, params, callback=None): cur_id = self._next_id() if callback: self._callbacks[cur_id] = callback self.send({'msg': 'sub', 'id': cur_id, 'name': name, 'params': params}) return cur_id
962,803
Write this :class:`SampleSheet` to a file-like object. Args: handle: Object to wrap by csv.writer. blank_lines: Number of blank lines to write between sections.
def write(self, handle: TextIO, blank_lines: int = 1) -> None: if not isinstance(blank_lines, int) or blank_lines <= 0: raise ValueError('Number of blank lines must be a positive int.') writer = csv.writer(handle) csv_width: int = max([len(self.all_sample_keys), 2]) section_order = ['Header', 'Reads'] + self._sections + ['Settings'] def pad_iterable( iterable: Iterable, size: int = csv_width, padding: str = '' ) -> List[str]: return list(islice(chain(iterable, repeat(padding)), size)) def write_blank_lines( writer: Any, n: int = blank_lines, width: int = csv_width ) -> None: for i in range(n): writer.writerow(pad_iterable([], width)) for title in section_order: writer.writerow(pad_iterable([f'[{title}]'], csv_width)) section = getattr(self, title) if title == 'Reads': for read in self.Reads: writer.writerow(pad_iterable([read], csv_width)) else: for key, value in section.items(): writer.writerow(pad_iterable([key, value], csv_width)) write_blank_lines(writer) writer.writerow(pad_iterable(['[Data]'], csv_width)) writer.writerow(pad_iterable(self.all_sample_keys, csv_width)) for sample in self.samples: line = [getattr(sample, key) for key in self.all_sample_keys] writer.writerow(pad_iterable(line, csv_width))
963,102
Attribute constructor. Args: name (str): Attribute name. display_name (str): Attribute display name. description (str): Attribute description. default (bool): Whether the attribute is a default attribute of the corresponding datasets.
def __init__(self, name, display_name='', description='', default=False): self._name = name self._display_name = display_name self._description = description self._default = default
963,191
Filter constructor. Args: name (str): Filter name. type (str): Type of the filter (boolean, int, etc.). description (str): Filter description.
def __init__(self, name, type, description=''): self._name = name self._type = type self._description = description
963,192
ServerBase constructor. Args: host (str): Url of host to connect to. path (str): Path on the host to access to the biomart service. port (int): Port to use for the connection. use_cache (bool): Whether to cache requests.
def __init__(self, host=None, path=None, port=None, use_cache=True): # Use defaults if arg is None. host = host or DEFAULT_HOST path = path or DEFAULT_PATH port = port or DEFAULT_PORT # Add http prefix and remove trailing slash. host = self._add_http_prefix(host) host = self._remove_trailing_slash(host) # Ensure path starts with slash. if not path.startswith('/'): path = '/' + path self._host = host self._path = path self._port = port self._use_cache = use_cache
963,301
Performs get request to the biomart service. Args: **params (dict of str: any): Arbitrary keyword arguments, which are added as parameters to the get request to biomart. Returns: requests.models.Response: Response from biomart for the request.
def get(self, **params): if self._use_cache: r = requests.get(self.url, params=params) else: with requests_cache.disabled(): r = requests.get(self.url, params=params) r.raise_for_status() return r
963,303
Checks if the user browser from the given user agent is mobile. Args: user_agent: A given user agent. Returns: True if the browser from the user agent is mobile.
def is_mobile(user_agent): if user_agent: b = reg_b.search(user_agent) v = reg_v.search(user_agent[0:4]) return b or v return False
964,812
Returns the content of a cached resource. Args: url: The url of the resource Returns: The content of the cached resource or None if not in the cache
def get_content(self, url): cache_path = self._url_to_path(url) try: with open(cache_path, 'rb') as f: return f.read() except IOError: return None
965,643
Returns the path of a cached resource. Args: url: The url of the resource Returns: The path to the cached resource or None if not in the cache
def get_path(self, url): cache_path = self._url_to_path(url) if os.path.exists(cache_path): return cache_path return None
965,644
Stores the content of a resource into the disk cache. Args: url: The url of the resource content: The content of the resource Raises: CacheError: If the content cannot be put in cache
def put_content(self, url, content): cache_path = self._url_to_path(url) # Ensure that cache directories exist try: dir = os.path.dirname(cache_path) os.makedirs(dir) except OSError as e: if e.errno != errno.EEXIST: raise Error('Failed to create cache directories for ' % cache_path) try: with open(cache_path, 'wb') as f: f.write(content) except IOError: raise Error('Failed to cache content as %s for %s' % (cache_path, url))
965,645
Puts a resource already on disk into the disk cache. Args: url: The original url of the resource path: The resource already available on disk Raises: CacheError: If the file cannot be put in cache
def put_path(self, url, path): cache_path = self._url_to_path(url) # Ensure that cache directories exist try: dir = os.path.dirname(cache_path) os.makedirs(dir) except OSError as e: if e.errno != errno.EEXIST: raise Error('Failed to create cache directories for ' % cache_path) # Remove the resource already exist try: os.unlink(cache_path) except OSError: pass try: # First try hard link to avoid wasting disk space & overhead os.link(path, cache_path) except OSError: try: # Use file copy as fallaback shutil.copyfile(path, cache_path) except IOError: raise Error('Failed to cache %s as %s for %s' % (path, cache_path, url))
965,646
Generates branches with alternative system. Args: scales (tuple/array): Indicating how the branch/es length/es develop/s from age to age. angles (tuple/array): Holding the branch and shift angle in radians. shift_angle (float): Holding the rotation angle for all branches. Returns: branches (2d-array): A array constits of arrays holding scale and angle for every branch.
def generate_branches(scales=None, angles=None, shift_angle=0): branches = [] for pos, scale in enumerate(scales): angle = -sum(angles)/2 + sum(angles[:pos]) + shift_angle branches.append([scale, angle]) return branches
966,862
The contructor. Args: pos (tupel): A tupel, holding the start and end point of the tree. (x1, y1, x2, y2) branches (tupel/array): Holding array/s with scale and angle for every branch. sigma (tuple): Holding the branch and angle sigma. e.g.(0.1, 0.2)
def __init__(self, pos=(0, 0, 0, -100), branches=None, sigma=(0, 0)): self.pos = pos self.length = sqrt((pos[2]-pos[0])**2+(pos[3]-pos[1])**2) self.branches = branches self.sigma = sigma self.comp = len(self.branches) self.age = 0 self.nodes = [ [Node(pos[2:])] ]
966,863
Get the length of a branch. This method calculates the length of a branch in specific age. The used formula: length * scale^age. Args: age (int): The age, for which you want to know the branch length. Returns: float: The length of the branch
def get_branch_length(self, age=None, pos=0): if age is None: age = self.age return self.length * pow(self.branches[pos][0], age)
966,866
Move the tree. Args: delta (tupel): The adjustment of the position.
def move(self, delta): pos = self.pos self.pos = (pos[0]+delta[0], pos[1]+delta[1], pos[2]+delta[0], pos[3]+delta[1]) # Move all nodes for age in self.nodes: for node in age: node.move(delta)
966,872
Let the tree grow. Args: times (integer): Indicate how many times the tree will grow.
def grow(self, times=1): self.nodes.append([]) for n, node in enumerate(self.nodes[self.age]): if self.age == 0: p_node = Node(self.pos[:2]) else: p_node = self._get_node_parent(self.age-1, n) angle = node.get_node_angle(p_node) for i in range(self.comp): tot_angle = self.__get_total_angle(angle, i) length = self.__get_total_length(self.age+1, i) self.nodes[self.age+1].append(node.make_new_node(length, tot_angle)) self.age += 1 if times > 1: self.grow(times-1)
966,873
Draw the tree on a canvas. Args: canvas (object): The canvas, you want to draw the tree on. Supported canvases: svgwrite.Drawing and PIL.Image (You can also add your custom libraries.) stem_color (tupel): Color or gradient for the stem of the tree. leaf_color (tupel): Color for the leaf (= the color for last iteration). thickness (int): The start thickness of the tree.
def draw_on(self, canvas, stem_color, leaf_color, thickness, ages=None): if canvas.__module__ in SUPPORTED_CANVAS: drawer = SUPPORTED_CANVAS[canvas.__module__] drawer(self, canvas, stem_color, leaf_color, thickness, ages).draw()
966,874
Get the fill color depending on age. Args: age (int): The age of the branch/es Returns: tuple: (r, g, b)
def _get_color(self, age): if age == self.tree.age: return self.leaf_color color = self.stem_color tree = self.tree if len(color) == 3: return color diff = [color[i+3]-color[i] for i in range(3)] per_age = [diff[i]/(tree.age-1) for i in range(3)] return tuple([int(color[i]+per_age[i]*age) for i in range(3)])
966,935
Draws the tree. Args: ages (array): Contains the ages you want to draw.
def draw(self): for age, level in enumerate(self.tree.get_branches()): if age in self.ages: thickness = self._get_thickness(age) color = self._get_color(age) for branch in level: self._draw_branch(branch, color, thickness, age)
966,936
Fetch a Bucket for the given key. rate and capacity might be overridden from the Throttler defaults. Args: rate (float): Units regenerated by second, or None to keep Throttler defaults capacity (int): Maximum units available, or None to keep Throttler defaults
def get_bucket(self, key, rate=None, capacity=None, **kwargs): return buckets.Bucket( key=key, rate=rate or self.rate, capacity=capacity or self.capacity, storate=self.storate, **kwargs)
967,316
Set the value of several keys at once. Args: values (dict): maps a key to its value.
def mset(self, values): for key, value in values.items(): self.set(key, value)
967,397
Returns the Django HTML to load the tag library and render the tag. Args: tag_id (str): The tag id for the to return the HTML for.
def get_tag_html(tag_id): tag_data = get_lazy_tag_data(tag_id) tag = tag_data['tag'] args = tag_data['args'] kwargs = tag_data['kwargs'] lib, tag_name = get_lib_and_tag_name(tag) args_str = '' if args: for arg in args: if isinstance(arg, six.string_types): args_str += "'{0}' ".format(arg) else: args_str += "{0} ".format(arg) kwargs_str = '' if kwargs: for name, value in kwargs.items(): if isinstance(value, six.string_types): kwargs_str += "{0}='{1}' ".format(name, value) else: kwargs_str += "{0}={1} ".format(name, value) html = '{{% load {lib} %}}{{% {tag_name} {args}{kwargs}%}}'.format( lib=lib, tag_name=tag_name, args=args_str, kwargs=kwargs_str) return html
967,464
Get the angle beetween 2 nodes relative to the horizont. Args: node (object): The other node. Returns: rad: The angle
def get_node_angle(self, node): return atan2(self.pos[0]-node.pos[0], self.pos[1]-node.pos[1]) - pi / 2
967,498
Get the distance beetween 2 nodes Args: node (object): The other node.
def get_distance(self, node): delta = (node.pos[0]-self.pos[0], node.pos[1]-self.pos[1]) return sqrt(delta[0]**2+delta[1]**2)
967,499
Move the node. Args: delta (tupel): A tupel, holding the adjustment of the position.
def move(self, delta): self.pos = (self.pos[0]+delta[0], self.pos[1]+delta[1])
967,500
Return a :class:`Collection` with the specified properties. Args: iterable (Iterable): collection to instantiate new collection from. mutable (bool): Whether or not the new collection is mutable. ordered (bool): Whether or not the new collection is ordered. unique (bool): Whether or not the new collection contains only unique values.
def collection(iterable=None, mutable=True, ordered=False, unique=False): if iterable is None: iterable = tuple() if unique: if ordered: if mutable: return setlist(iterable) else: return frozensetlist(iterable) else: if mutable: return set(iterable) else: return frozenset(iterable) else: if ordered: if mutable: return list(iterable) else: return tuple(iterable) else: if mutable: return bag(iterable) else: return frozenbag(iterable)
967,755
Computes the next closure for rules based on the symbol we got. Args: rules - an iterable of DottedRules symbol - a string denoting the symbol we've just seen Returns: frozenset of DottedRules
def goto(self, rules, symbol): return self.closure( {rule.move_dot() for rule in rules if not rule.at_end and rule.rhs[rule.pos] == symbol}, )
967,765
Fills out the entire closure based on some initial dotted rules. Args: rules - an iterable of DottedRules Returns: frozenset of DottedRules
def closure(self, rules): closure = set() todo = set(rules) while todo: rule = todo.pop() closure.add(rule) # If the dot is at the end, there's no need to process it. if rule.at_end: continue symbol = rule.rhs[rule.pos] for production in self.nonterminals[symbol]: for first in self.first(rule.rest): if EPSILON in production.rhs: # Move immediately to the end if the production # goes to epsilon new_rule = DottedRule(production, 1, first) else: new_rule = DottedRule(production, 0, first) if new_rule not in closure: todo.add(new_rule) return frozenset(closure)
967,766
Start publishing MySQL row-based binlog events to blinker signals Args: mysql_settings (dict): information to connect to mysql via pymysql **kwargs: The additional kwargs will be passed to :py:class:`pymysqlreplication.BinLogStreamReader`.
def start_publishing(mysql_settings, **kwargs): _logger.info('Start publishing from %s with:\n%s' % (mysql_settings, kwargs)) kwargs.setdefault('server_id', random.randint(1000000000, 4294967295)) kwargs.setdefault('freeze_schema', True) # connect to binlog stream stream = pymysqlreplication.BinLogStreamReader( mysql_settings, only_events=[row_event.DeleteRowsEvent, row_event.UpdateRowsEvent, row_event.WriteRowsEvent], **kwargs ) for event in stream: # ignore non row events if not isinstance(event, row_event.RowsEvent): continue _logger.debug('Send binlog signal "%s@%s.%s"' % ( event.__class__.__name__, event.schema, event.table )) signals.binlog_signal.send(event, stream=stream) signals.binlog_position_signal.send((stream.log_file, stream.log_pos))
967,775
List the n most common elements and their counts. List is from the most common to the least. If n is None, the list all element counts. Run time should be O(m log m) where m is len(self) Args: n (int): The number of elements to return
def nlargest(self, n=None): if n is None: return sorted(self.counts(), key=itemgetter(1), reverse=True) else: return heapq.nlargest(n, self.counts(), key=itemgetter(1))
967,826
Check that every element in self has a count <= in other. Args: other (Set)
def is_subset(self, other): if isinstance(other, _basebag): for elem, count in self.counts(): if not count <= other.count(elem): return False else: for elem in self: if self.count(elem) > 1 or elem not in other: return False return True
967,829
Create instance of FileBasedBinlogPosMemory Args: pos_filename (str|None): position storage file. None will makes *mysqlbinlog2blinker.binlog.pos* at current working dir interval (float): the interval in second
def __init__(self, pos_filename, interval=2): if not pos_filename: pos_filename = os.path.join(os.getcwd(), 'mysqlbinlog2blinker.binlog.pos') self.pos_storage_filename = pos_filename assert self.pos_storage_filename self.interval = interval self._log_file = None self._log_pos = None self._pos_changed = False self.save_log_pos_thread_stop_flag = threading.Event() self.save_log_pos_thread = \ threading.Thread(target=self._save_log_pos_thread_runner) self.save_log_pos_thread.daemon = True
967,924
Create a setlist, initializing from iterable if present. Args: iterable (Iterable): Values to initialize the setlist with. raise_on_duplicate: Raise a ValueError if any duplicate values are present.
def __init__(self, iterable=None, raise_on_duplicate=False): self._list = list() self._dict = dict() if iterable: if raise_on_duplicate: self._extend(iterable) else: self._update(iterable)
967,938
Return the index of value between start and end. By default, the entire setlist is searched. This runs in O(1) Args: value: The value to find the index of start (int): The index to start searching at (defaults to 0) end (int): The index to stop searching at (defaults to the end of the list) Returns: int: The index of the value Raises: ValueError: If the value is not in the list or outside of start - end IndexError: If start or end are out of range
def index(self, value, start=0, end=None): try: index = self._dict[value] except KeyError: raise ValueError else: start = self._fix_neg_index(start) end = self._fix_end_index(end) if start <= index and index < end: return index else: raise ValueError
967,946
Return the index of a subsequence. This runs in O(len(sub)) Args: sub (Sequence): An Iterable to search for Returns: int: The index of the first element of sub Raises: ValueError: If sub isn't a subsequence TypeError: If sub isn't iterable IndexError: If start or end are out of range
def sub_index(self, sub, start=0, end=None): start_index = self.index(sub[0], start, end) end = self._fix_end_index(end) if start_index + len(sub) > end: raise ValueError for i in range(1, len(sub)): if sub[i] != self[start_index + i]: raise ValueError return start_index
967,953
Insert value at index. Args: index (int): Index to insert value at value: Value to insert Raises: ValueError: If value already in self IndexError: If start or end are out of range
def insert(self, index, value): if value in self: raise ValueError index = self._fix_neg_index(index) self._dict[value] = index for elem in self._list[index:]: self._dict[elem] += 1 self._list.insert(index, value)
967,959
Add all values to the end of self. Args: values (Iterable): Values to append Raises: ValueError: If any values are already present
def __iadd__(self, values): self._check_type(values, '+=') self.extend(values) return self
967,960
Remove value from self. Args: value: Element to remove from self Raises: ValueError: if element is already present
def remove(self, value): try: index = self._dict[value] except KeyError: raise ValueError('Value "%s" is not present.') else: del self[index]
967,961
Get updated values from 2 dicts of values Args: before_values (dict): values before update after_values (dict): values after update Returns: dict: a diff dict with key is field key, value is tuple of (before_value, after_value)
def _get_updated_values(before_values, after_values): assert before_values.keys() == after_values.keys() return dict([(k, [before_values[k], after_values[k]]) for k in before_values.keys() if before_values[k] != after_values[k]])
967,972
Convert a row for update event Args: row (dict): event row data
def _convert_update_row(row): after_values = row['after_values'] # type: dict before_values = row['before_values'] # type: dict values = after_values return { 'values': values, 'updated_values': _get_updated_values(before_values, after_values) }
967,973
Convert RowsEvent to a dict Args: e (pymysqlreplication.row_event.RowsEvent): the event stream (pymysqlreplication.BinLogStreamReader): the stream that yields event Returns: dict: event's data as a dict
def _rows_event_to_dict(e, stream): pk_cols = e.primary_key if isinstance(e.primary_key, (list, tuple)) \ else (e.primary_key, ) if isinstance(e, row_event.UpdateRowsEvent): sig = signals.rows_updated action = 'update' row_converter = _convert_update_row elif isinstance(e, row_event.WriteRowsEvent): sig = signals.rows_inserted action = 'insert' row_converter = _convert_write_row elif isinstance(e, row_event.DeleteRowsEvent): sig = signals.rows_deleted action = 'delete' row_converter = _convert_write_row else: assert False, 'Invalid binlog event' meta = { 'time': e.timestamp, 'log_pos': stream.log_pos, 'log_file': stream.log_file, 'schema': e.schema, 'table': e.table, 'action': action, } rows = list(map(row_converter, e.rows)) for row in rows: row['keys'] = {k: row['values'][k] for k in pk_cols} return rows, meta
967,974
Process on a binlog event 1. Convert event instance into a dict 2. Send corresponding schema/table/signals Args: event (pymysqlreplication.row_event.RowsEvent): the event
def on_binlog(event, stream): rows, meta = _rows_event_to_dict(event, stream) table_name = '%s.%s' % (meta['schema'], meta['table']) if meta['action'] == 'insert': sig = signals.rows_inserted elif meta['action'] == 'update': sig = signals.rows_updated elif meta['action'] == 'delete': sig = signals.rows_deleted else: raise RuntimeError('Invalid action "%s"' % meta['action']) sig.send(table_name, rows=rows, meta=meta)
967,975
Create a mapped range. Args: start: The start of the range, inclusive. stop: The end of the range, exclusive. value: The mapped value.
def __init__(self, start, stop, value): self.start = start self.stop = stop self.value = value
967,983
Get JSON from response if success, raise requests.HTTPError otherwise. Args: resp: requests.Response or flask.Response Retuens: JSON value
def resp_json(resp): if isinstance(resp, flask.Response): if 400 <= resp.status_code < 600: msg = resp.status try: result = loads(resp.data.decode("utf-8")) if isinstance(result, str): msg = "%s, %s" % (resp.status, result) else: msg = "%s %s, %s" % ( resp.status_code, result["error"], result["message"]) except Exception: pass raise requests.HTTPError(msg, response=resp) else: return loads(resp.data.decode("utf-8")) else: try: resp.raise_for_status() except requests.HTTPError as ex: # the response may contains {"error": "", "message": ""} # append error and message to exception if possible try: result = resp.json() ex.args += (result["error"], result["message"]) except (ValueError, KeyError): pass raise return resp.json()
968,216
Send request Args: resource: resource action: action data: string or object which can be json.dumps headers: http headers
def _request(self, resource, action, data=None, headers=None): url, httpmethod = res_to_url(resource, action) return self.ajax(url, httpmethod, data, headers)
968,221
Returns a list of valid host globs for an SSL certificate. Args: cert: A dictionary representing an SSL certificate. Returns: list: A list of valid host globs.
def GetValidHostsForCert(cert): if 'subjectAltName' in cert: return [x[1] for x in cert['subjectAltName'] if x[0].lower() == 'dns'] else: return [x[0][1] for x in cert['subject'] if x[0][0].lower() == 'commonname']
968,285
Validates that a given hostname is valid for an SSL certificate. Args: cert: A dictionary representing an SSL certificate. hostname: The hostname to test. Returns: bool: Whether or not the hostname is valid for this certificate.
def ValidateCertificateHostname(cert, hostname): hosts = GetValidHostsForCert(cert) boto.log.debug( "validating server certificate: hostname=%s, certificate hosts=%s", hostname, hosts) for host in hosts: host_re = host.replace('.', '\.').replace('*', '[^.]*') if re.search('^%s$' % (host_re,), hostname, re.I): return True return False
968,286
Constructor. Args: host: The hostname the connection was made to. cert: The SSL certificate (as a dictionary) the host returned.
def __init__(self, host, cert, reason): httplib.HTTPException.__init__(self) self.host = host self.cert = cert self.reason = reason
968,287
Abort with suitable error response Args: code (int): status code error (str): error symbol or flask.Response message (str): error message
def abort(code, error=None, message=None): if error is None: flask_abort(code) elif isinstance(error, Response): error.status_code = code flask_abort(code, response=error) else: body = { "status": code, "error": error, "message": message } flask_abort(code, response=export(body, code))
968,385
Convert rv to tuple(data, code, headers) Args: rv: data or tuple that contain code and headers Returns: tuple (rv, status, headers)
def unpack(rv): status = headers = None if isinstance(rv, tuple): rv, status, headers = rv + (None,) * (3 - len(rv)) if isinstance(status, (dict, list)): headers, status = status, headers return (rv, status, headers)
968,386
Create a suitable response Args: rv: return value of action code: status code headers: response headers Returns: flask.Response
def export(rv, code=None, headers=None): if isinstance(rv, ResponseBase): return make_response(rv, code, headers) else: if code is None: code = 200 mediatype = request.accept_mimetypes.best_match( exporters.keys(), default='application/json') return exporters[mediatype](rv, code, headers)
968,387
Parse YAML syntax content from docs If docs is None, return {} If docs has no YAML content, return {"$desc": docs} Else, parse YAML content, return {"$desc": docs, YAML} Args: docs (str): docs to be parsed marks (list): list of which indicate YAML content starts Returns: A dict contains information of docs
def parse_docs(docs, marks): if docs is None: return {} indexs = [] for mark in marks: i = docs.find(mark) if i >= 0: indexs.append(i) if not indexs: return {"$desc": textwrap.dedent(docs).strip()} start = min(indexs) start = docs.rfind("\n", 0, start) yamltext = textwrap.dedent(docs[start + 1:]) meta = yaml.load(yamltext) meta["$desc"] = textwrap.dedent(docs[:start]).strip() return meta
968,388
Add resource Parse resource and it's actions, route actions by naming rule. Args: resource: resource class class_args: class_args class_kwargs: class_kwargs
def add_resource(self, resource, *class_args, **class_kwargs): name = resource.__name__.lower() meta_resource = parse_docs(resource.__doc__, ["$shared"]) self.meta[name] = meta_resource shared = self.meta["$shared"].copy() shared.update(meta_resource.get("$shared", {})) with MarkKey("%s.$shared" % resource.__name__): sp = SchemaParser(validators=self.validators, shared=shared) with MarkKey(resource.__name__): resource = resource(*class_args, **class_kwargs) # group actions by it's name, and # make action group a view function actions = defaultdict(lambda: {}) for action in dir(resource): find = PATTERN_ACTION.findall(action) if not find: continue httpmethod, action_name = find[0] action_group = actions[action_name] fn = getattr(resource, action) meta_action = parse_docs( fn.__doc__, ["$input", "$output", "$error"]) meta_resource[action] = meta_action with MarkKey(fn.__name__): action_group[httpmethod] = \ self.make_action(fn, sp, meta_action) for action_name in actions: if action_name == "": url = "/" + name endpoint = name else: url = "/{0}/{1}".format(name, action_name) endpoint = "{0}@{1}".format(name, action_name) action_group = actions[action_name] self.app.add_url_rule( url, endpoint=endpoint, view_func=self.make_view(action_group), methods=set(action_group) )
968,394
Make resource's method an action Validate input, output by schema in meta. If no input schema, call fn without params. If no output schema, will not validate return value. Args: fn: resource's method schema_parser: for parsing schema in meta meta: meta data of the action
def make_action(self, fn, schema_parser, meta): validate_input = validate_output = None if "$input" in meta: with MarkKey("$input"): validate_input = schema_parser.parse(meta["$input"]) if "$output" in meta: with MarkKey("$output"): validate_output = schema_parser.parse(meta["$output"]) def action(data): if validate_input: try: data = validate_input(data) except Invalid as ex: return abort(400, "InvalidData", str(ex)) if isinstance(data, dict): rv = fn(**data) else: rv = fn(data) else: rv = fn() rv, status, headers = unpack(rv) if validate_output: try: rv = validate_output(rv) except Invalid as ex: return abort(500, "ServerError", str(ex)) return rv, status, headers return action
968,395
Parse metadata of API Args: meta: metadata of API Returns: tuple(url_prefix, auth_header, resources)
def parse_meta(meta): resources = {} for name in meta: if name.startswith("$"): continue resources[name] = resource = {} for action in meta[name]: if action.startswith("$"): continue url, httpmethod = res_to_url(name, action) resource[action] = { "url": url, "method": httpmethod } url_prefix = meta.get("$url_prefix", "").rstrip("/") return url_prefix, meta["$auth"]["header"].lower(), resources
969,002
Generate res.js Args: meta: tuple(url_prefix, auth_header, resources) or metadata of API Returns: res.js source code
def generate_code(meta, prefix=None, node=False, min=False): if isinstance(meta, dict): url_prefix, auth_header, resources = parse_meta(meta) else: url_prefix, auth_header, resources = meta if prefix is not None: url_prefix = prefix core = render_core(url_prefix, auth_header, resources) if min: filename = 'res.web.min.js' else: filename = 'res.web.js' if node: filename = 'res.node.js' base = read_file(filename) return base.replace('"#res.core.js#"', core)
969,006
Creates a JSON response JSON content is encoded by utf-8, not unicode escape. Args: data: any type object that can dump to json status (int): http status code headers (dict): http headers
def export_json(data, status, headers): dumped = json.dumps(data, ensure_ascii=False) resp = current_app.response_class( dumped, status=status, headers=headers, content_type='application/json; charset=utf-8') return resp
969,658
Calculate token expiration return expiration if the token need to set expiration or refresh, otherwise return None. Args: token (dict): a decoded token
def calculate_expiration(self, token): if not token: return None now = datetime.utcnow() time_to_live = self.config["expiration"] if "exp" not in token: return now + timedelta(seconds=time_to_live) elif self.config["refresh"]: exp = datetime.utcfromtimestamp(token["exp"]) # 0.5: reduce refresh frequent if exp - now < timedelta(seconds=0.5 * time_to_live): return now + timedelta(seconds=time_to_live) return None
970,917
Create a chunker performing content-defined chunking (CDC) using Rabin Karp's rolling hash scheme with a specific, expected chunk size. Args: chunk_size (int): (Expected) target chunk size. Returns: BaseChunker: A chunker object.
def create_chunker(self, chunk_size): rolling_hash = _rabinkarprh.RabinKarpHash(self.window_size, self._seed) rolling_hash.set_threshold(1.0 / chunk_size) return RabinKarpCDC._Chunker(rolling_hash)
971,649
Constructor. Args: channel: A grpc.Channel.
def __init__(self, channel): self.Capabilities = channel.unary_unary( '/gnmi.gNMI/Capabilities', request_serializer=gnmi__pb2.CapabilityRequest.SerializeToString, response_deserializer=gnmi__pb2.CapabilityResponse.FromString, ) self.Get = channel.unary_unary( '/gnmi.gNMI/Get', request_serializer=gnmi__pb2.GetRequest.SerializeToString, response_deserializer=gnmi__pb2.GetResponse.FromString, ) self.Set = channel.unary_unary( '/gnmi.gNMI/Set', request_serializer=gnmi__pb2.SetRequest.SerializeToString, response_deserializer=gnmi__pb2.SetResponse.FromString, ) self.Subscribe = channel.stream_stream( '/gnmi.gNMI/Subscribe', request_serializer=gnmi__pb2.SubscribeRequest.SerializeToString, response_deserializer=gnmi__pb2.SubscribeResponse.FromString, )
971,714
Transform a ``frequency_map`` into a map of probability using the sum of all frequencies as the total. Example: >>> frequency_to_probability({'a': 2, 'b': 2}) {'a': 0.5, 'b': 0.5} Args: frequency_map (dict): The dictionary to transform decorator (function): A function to manipulate the probability Returns: Dictionary of ngrams to probability
def frequency_to_probability(frequency_map, decorator=lambda f: f): total = sum(frequency_map.values()) return {k: decorator(v / total) for k, v in frequency_map.items()}
971,723
Calculate the Chi Squared statistic by comparing ``source_frequency`` with ``target_frequency``. Example: >>> chi_squared({'a': 2, 'b': 3}, {'a': 1, 'b': 2}) 0.1 Args: source_frequency (dict): Frequency map of the text you are analyzing target_frequency (dict): Frequency map of the target language to compare with Returns: Decimal value of the chi-squared statistic
def chi_squared(source_frequency, target_frequency): # Ignore any symbols from source that are not in target. # TODO: raise Error if source_len is 0? target_prob = frequency_to_probability(target_frequency) source_len = sum(v for k, v in source_frequency.items() if k in target_frequency) result = 0 for symbol, prob in target_prob.items(): symbol_frequency = source_frequency.get(symbol, 0) # Frequecy is 0 if it doesnt appear in source result += _calculate_chi_squared(symbol_frequency, prob, source_len) return result
971,726
Score ``text`` using ``score_functions``. Examples: >>> score("abc", function_a) >>> score("abc", function_a, function_b) Args: text (str): The text to score *score_functions (variable length argument list): functions to score with Returns: Arithmetic mean of scores Raises: ValueError: If score_functions is empty
def score(text, *score_functions): if not score_functions: raise ValueError("score_functions must not be empty") return statistics.mean(func(text) for func in score_functions)
971,729
Compute the score of a text by determing if a pattern matches. Example: >>> fitness = PatternMatch("flag{.*}") >>> fitness("flag{example}") 0 >>> fitness("junk") -1 Args: regex (str): regular expression string to use as a pattern
def PatternMatch(regex): pattern = re.compile(regex) return lambda text: -1 if pattern.search(text) is None else 0
971,733
Write or return XML for a set of resources in sitemap format. Arguments: - resources - either an iterable or iterator of Resource objects; if there an md attribute this will go to <rs:md> if there an ln attribute this will go to <rs:ln> - sitemapindex - set True to write sitemapindex instead of sitemap - fh - write to filehandle fh instead of returning string
def resources_as_xml(self, resources, sitemapindex=False, fh=None): # element names depending on sitemapindex or not root_element = ('sitemapindex' if (sitemapindex) else 'urlset') item_element = ('sitemap' if (sitemapindex) else 'url') # namespaces and other settings namespaces = {'xmlns': SITEMAP_NS, 'xmlns:rs': RS_NS} root = Element(root_element, namespaces) if (self.pretty_xml): root.text = "\n" # <rs:ln> if (hasattr(resources, 'ln')): for ln in resources.ln: self.add_element_with_atts_to_etree(root, 'rs:ln', ln) # <rs:md> if (hasattr(resources, 'md')): self.add_element_with_atts_to_etree(root, 'rs:md', resources.md) # <url> entries from either an iterable or an iterator for r in resources: e = self.resource_etree_element(r, element_name=item_element) root.append(e) # have tree, now serialize tree = ElementTree(root) xml_buf = None if (fh is None): xml_buf = io.StringIO() fh = xml_buf if (sys.version_info >= (3, 0)): tree.write( fh, encoding='unicode', xml_declaration=True, method='xml') elif (sys.version_info >= (2, 7)): tree.write( fh, encoding='UTF-8', xml_declaration=True, method='xml') else: # python2.6 tree.write(fh, encoding='UTF-8') if (xml_buf is not None): if (sys.version_info >= (3, 0)): return(xml_buf.getvalue()) else: return(xml_buf.getvalue().decode('utf-8'))
971,849
Parse rs:md attributes returning a dict of the data. Parameters: md_element - etree element <rs:md> context - context for error reporting
def md_from_etree(self, md_element, context=''): md = {} # grab all understood attributes into md dict for att in ('capability', 'change', 'hash', 'length', 'path', 'mime_type', 'md_at', 'md_completed', 'md_from', 'md_until'): xml_att = self._xml_att_name(att) val = md_element.attrib.get(xml_att, None) if (val is not None): md[att] = val # capability. Allow this to be missing but do a very simple syntax # check on plausible values if present if ('capability' in md): if (re.match(r"^[\w\-]+$", md['capability']) is None): raise SitemapParseError( "Bad capability name '%s' in %s" % (capability, context)) # change should be one of defined values if ('change' in md): if (md['change'] not in ['created', 'updated', 'deleted']): self.logger.warning( "Bad change attribute in <rs:md> for %s" % (context)) # length should be an integer if ('length' in md): try: md['length'] = int(md['length']) except ValueError as e: raise SitemapParseError( "Invalid length element in <rs:md> for %s" % (context)) return(md)
971,854
Parse rs:ln element from an etree, returning a dict of the data. Parameters: md_element - etree element <rs:md> context - context string for error reporting
def ln_from_etree(self, ln_element, context=''): ln = {} # grab all understood attributes into ln dict for att in ('hash', 'href', 'length', 'modified', 'path', 'rel', 'pri', 'mime_type'): xml_att = self._xml_att_name(att) val = ln_element.attrib.get(xml_att, None) if (val is not None): ln[att] = val # now do some checks and conversions... # href (MANDATORY) if ('href' not in ln): raise SitemapParseError( "Missing href in <rs:ln> in %s" % (context)) # rel (MANDATORY) if ('rel' not in ln): raise SitemapParseError("Missing rel in <rs:ln> in %s" % (context)) # length in bytes if ('length' in ln): try: ln['length'] = int(ln['length']) except ValueError as e: raise SitemapParseError( "Invalid length attribute value in <rs:ln> for %s" % (context)) # pri - priority, must be a number between 1 and 999999 if ('pri' in ln): try: ln['pri'] = int(ln['pri']) except ValueError as e: raise SitemapParseError( "Invalid pri attribute in <rs:ln> for %s" % (context)) if (ln['pri'] < 1 or ln['pri'] > 999999): raise SitemapParseError( "Bad pri attribute value in <rs:ln> for %s" % (context)) return(ln)
971,855
Add element with name and atts to etree iff there are any atts. Parameters: etree - an etree object name - XML element name atts - dicts of attribute values. Attribute names are transformed
def add_element_with_atts_to_etree(self, etree, name, atts): xml_atts = {} for att in atts.keys(): val = atts[att] if (val is not None): xml_atts[self._xml_att_name(att)] = str(val) if (len(xml_atts) > 0): e = Element(name, xml_atts) if (self.pretty_xml): e.tail = "\n" etree.append(e)
971,856
Remove ``exclude`` symbols from ``text``. Example: >>> remove("example text", string.whitespace) 'exampletext' Args: text (str): The text to modify exclude (iterable): The symbols to exclude Returns: ``text`` with ``exclude`` symbols removed
def remove(text, exclude): exclude = ''.join(str(symbol) for symbol in exclude) return text.translate(str.maketrans('', '', exclude))
971,868
Split ``text`` into ``n_columns`` many columns. Example: >>> split_columns("example", 2) ['eape', 'xml'] Args: text (str): The text to split n_columns (int): The number of columns to create Returns: List of columns Raises: ValueError: If n_cols is <= 0 or >= len(text)
def split_columns(text, n_columns): if n_columns <= 0 or n_columns > len(text): raise ValueError("n_columns must be within the bounds of 1 and text length") return [text[i::n_columns] for i in range(n_columns)]
971,869
Combine ``columns`` into a single string. Example: >>> combine_columns(['eape', 'xml']) 'example' Args: columns (iterable): ordered columns to combine Returns: String of combined columns
def combine_columns(columns): columns_zipped = itertools.zip_longest(*columns) return ''.join(x for zipped in columns_zipped for x in zipped if x)
971,870
Generator to yield ngrams in ``text``. Example: >>> for ngram in iterate_ngrams("example", 4): ... print(ngram) exam xamp ampl mple Args: text (str): text to iterate over n (int): size of window for iteration Returns: Generator expression to yield the next ngram in the text Raises: ValueError: If n is non positive
def iterate_ngrams(text, n): if n <= 0: raise ValueError("n must be a positive integer") return [text[i: i + n] for i in range(len(text) - n + 1)]
971,871
Group ``text`` into blocks of ``size``. Example: >>> group("test", 2) ['te', 'st'] Args: text (str): text to separate size (int): size of groups to split the text into Returns: List of n-sized groups of text Raises: ValueError: If n is non positive
def group(text, size): if size <= 0: raise ValueError("n must be a positive integer") return [text[i:i + size] for i in range(0, len(text), size)]
971,872
Decrypt Simple Substitution enciphered ``ciphertext`` using ``key``. Example: >>> decrypt("PQSTUVWXYZCODEBRAKINGFHJLM", "XUOOB") HELLO Args: key (iterable): The key to use ciphertext (str): The text to decrypt Returns: Decrypted ciphertext
def decrypt(key, ciphertext): # TODO: Is it worth keeping this here I should I only accept strings? key = ''.join(key) alphabet = string.ascii_letters cipher_alphabet = key.lower() + key.upper() return ciphertext.translate(str.maketrans(cipher_alphabet, alphabet))
971,976
Compute the score of a text by using the frequencies of ngrams. Example: >>> fitness = NgramScorer(english.unigrams) >>> fitness("ABC") -4.3622319742618245 Args: frequency_map (dict): ngram to frequency mapping
def NgramScorer(frequency_map): # Calculate the log probability length = len(next(iter(frequency_map))) # TODO: 0.01 is a magic number. Needs to be better than that. floor = math.log10(0.01 / sum(frequency_map.values())) ngrams = frequency.frequency_to_probability(frequency_map, decorator=math.log10) def inner(text): # I dont like this, it is only for the .upper() to work, # But I feel as though this can be removed in later refactoring text = ''.join(text) text = remove(text.upper(), string.whitespace + string.punctuation) return sum(ngrams.get(ngram, floor) for ngram in iterate_ngrams(text, length)) return inner
972,229