code
stringlengths
52
7.75k
docs
stringlengths
1
5.85k
def has_table(self, name): return len(self.sql("SELECT name FROM sqlite_master WHERE type='table' AND name=?", parameters=(name,), asrecarray=False, cache=False)) > 0
Return ``True`` if the table *name* exists in the database.
def close(self): self.__decrement_connection_counter() if self.connection_count == 0: if self.dbfile == ":memory:": SQL = """DROP TABLE IF EXISTS __self__""" self.sql(SQL, asrecarray=False, cache=False) else: self.connection.commit() self.connection.close()
Clean up (if no more connections to the db exist). * For in-memory: Delete the underlying SQL table from the in-memory database. * For on-disk: save and close connection
def append(self,k,v): self.__ringbuffer.append(k) super(KRingbuffer,self).__setitem__(k,v) self._prune()
x.append(k,v)
def _prune(self): delkeys = [k for k in self.keys() if k not in self.__ringbuffer] for k in delkeys: # necessary because dict is changed during iterations super(KRingbuffer,self).__delitem__(k)
Primitive way to keep dict in sync with RB.
def chunk_on(pipeline, new_chunk_signal, output_type=tuple): ''' split the stream into seperate chunks based on a new chunk signal ''' assert iterable(pipeline), 'chunks needs pipeline to be iterable' assert callable(new_chunk_signal), 'chunks needs new_chunk_signal to be callable' assert callable(output_type), 'chunks needs output_type to be callable' out = deque() for i in pipeline: if new_chunk_signal(i) and len(out): # if new chunk start detected yield output_type(out) out.clear() out.append(i) # after looping, if there is anything in out, yield that too if len(out): yield output_type(outf chunk_on(pipeline, new_chunk_signal, output_type=tuple): ''' split the stream into seperate chunks based on a new chunk signal ''' assert iterable(pipeline), 'chunks needs pipeline to be iterable' assert callable(new_chunk_signal), 'chunks needs new_chunk_signal to be callable' assert callable(output_type), 'chunks needs output_type to be callable' out = deque() for i in pipeline: if new_chunk_signal(i) and len(out): # if new chunk start detected yield output_type(out) out.clear() out.append(i) # after looping, if there is anything in out, yield that too if len(out): yield output_type(out)
split the stream into seperate chunks based on a new chunk signal
def repeat(f, dt=1/60): stop(f) pyglet.clock.schedule_interval(f, dt)
重复执行函数f,时间间隔dt
def center_image(self, img): img.anchor_x = img.width // 2 # int img.anchor_y = img.height // 2
Sets an image's anchor point to its center
def get_persons(self): cs = self.data["to"]["data"] res = [] for c in cs: res.append(c["name"]) return res
Returns list of strings which represents persons being chated with
def get_messages(self): cs = self.data["comments"]["data"] res = [] for c in cs: res.append(Message(c,self)) return res
Returns list of Message objects which represents messages being transported.
def next(self): c = Conversation(self.data, requests.get(self.data["comments"]["paging"]["next"]).json()) if "error" in c.data["comments"] and c.data["comments"]["error"]["code"] == 613: raise LimitExceededException() return c
Returns next paging
def main(): opts = dict( name="clamavmirror", version='0.0.4', description="ClamAV Signature Mirroring Tool", long_description=get_readme(), keywords="clamav mirror mirroring mirror-tool signatures", author="Andrew Colin Kissa", author_email="[email protected]", url="https://github.com/akissa/clamavmirror", license="MPL 2.0", packages=[], entry_points={ 'console_scripts': [ 'clamavmirror=clamavmirror:main' ], }, include_package_data=True, zip_safe=False, install_requires=['urllib3', 'dnspython', 'certifi'], classifiers=[ 'Development Status :: 4 - Beta', 'Programming Language :: Python', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Topic :: Software Development :: Libraries :: Python Modules', 'Intended Audience :: System Administrators', 'Environment :: Console', 'License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)', 'Natural Language :: English', 'Operating System :: OS Independent'],) setup(**opts)
Main
def _subset_table(full_table, subset): if not subset: return full_table # TODO: Figure out syntax for logical or conditions = subset.replace(' ','').split(';') valid = np.ones(len(full_table), dtype=bool) for condition in conditions: col = re.split("[<>=!]", condition)[0] comp = condition.replace(col, "") try: this_valid = eval("full_table['{0}']{1}".format(col, comp)) except KeyError as e: # catch error and redisplay for twiggy raise KeyError("Column '%s' not found" % e.message) valid = np.logical_and(valid, this_valid) return full_table[valid]
Return subtable matching all conditions in subset Parameters ---------- full_table : dataframe Entire data table subset : str String describing subset of data to use for analysis Returns ------- dataframe Subtable with records from table meeting requirements in subset
def _subset_meta(full_meta, subset, incremented=False): if not subset: return full_meta, False meta = {} # Make deepcopy of entire meta (all section dicts in meta dict) for key, val in full_meta.iteritems(): meta[key] = copy.deepcopy(dict(val)) conditions = subset.replace(' ','').split(';') inc = False for condition in conditions: condition_list = re.split('[<>=]', condition) col = condition_list[0] val = condition_list[-1] try: col_step = meta[col]['step'] except: # If there's no metadata for this col, do nothing continue operator = re.sub('[^<>=]', '', condition) if operator == '==': meta[col]['min'] = val meta[col]['max'] = val elif operator == '>=': meta[col]['min'] = val elif operator == '>': if incremented: meta[col]['min'] = val else: meta[col]['min'] = str(eval(val) + eval(col_step)) inc = True elif operator == '<=': meta[col]['max'] = val elif operator == '<': if incremented: meta[col]['max'] = val else: meta[col]['max'] = str(eval(val) - eval(col_step)) inc = True else: raise ValueError, "Subset %s not valid" % condition return meta, inc
Return metadata reflecting all conditions in subset Parameters ---------- full_meta : ConfigParser obj Metadata object subset : str String describing subset of data to use for analysis incremented : bool If True, the metadata has already been incremented Returns ------- Configparser object or dict Updated version of full_meta accounting for subset string
def _sar_ear_inner(patch, cols, splits, divs, y_func): (spp_col, count_col, x_col, y_col), patch = \ _get_cols(['spp_col', 'count_col', 'x_col', 'y_col'], cols, patch) # Loop through each split result_list = [] for substring, subpatch in _yield_subpatches(patch, splits): # Get A0 A0 = _patch_area(subpatch, x_col, y_col) # Loop through all divisions within this split all_spp = np.unique(subpatch.table[spp_col]) subresultx = [] subresulty = [] subresultnspp = [] subresultnindivids = [] subdivlist = _split_divs(divs) for subdiv in subdivlist: spatial_table = _yield_spatial_table(subpatch, subdiv, spp_col, count_col, x_col, y_col) subresulty.append(y_func(spatial_table, all_spp)) subresultx.append(A0 / eval(subdiv.replace(',', '*'))) subresultnspp.append(np.mean(spatial_table['n_spp'])) subresultnindivids.append(np.mean(spatial_table['n_individs'])) # Append subset result subresult = pd.DataFrame({'div': subdivlist, 'x': subresultx, 'y': subresulty, 'n_spp': subresultnspp, 'n_individs': subresultnindivids}) result_list.append((substring, subresult)) return result_list
y_func is function calculating the mean number of species or endemics, respectively, for the SAR or EAR
def _get_cols(special_col_names, cols, patch): # If cols not given, try to fall back on cols from metadata if not cols: if 'cols' in patch.meta['Description'].keys(): cols = patch.meta['Description']['cols'] else: raise NameError, ("cols argument not given, spp_col at a minimum " "must be specified") # Parse cols string into dict cols = cols.replace(' ', '') col_list = cols.split(';') col_dict = {x.split(':')[0]: x.split(':')[1] for x in col_list} # Get special_col_names from dict result = [] for special_col_name in special_col_names: col_name = col_dict.get(special_col_name, None) # Create a count col if its requested and doesn't exist if special_col_name is 'count_col' and col_name is None: col_name = 'count' patch.table['count'] = np.ones(len(patch.table)) # All special cols must be specified (count must exist by now) if col_name is None: raise ValueError, ("Required column %s not specified" % special_col_name) result.append(col_name) return tuple(result), patch
Retrieve values of special_cols from cols string or patch metadata
def _yield_subpatches(patch, splits, name='split'): if splits: subset_list = _parse_splits(patch, splits) for subset in subset_list: logging.info('Analyzing subset %s: %s' % (name, subset)) subpatch = copy.copy(patch) subpatch.table = _subset_table(patch.table, subset) subpatch.meta, subpatch.incremented = _subset_meta(patch.meta, subset, incremented=True) yield subset, subpatch else: yield '', patch
Iterator for subtables defined by a splits string Parameters ---------- patch : obj Patch object containing data to subset splits : str Specifies how a column of a dataset should be split. See Notes. Yields ------ tuple First element is subset string, second is subtable dataframe Notes ----- {0}
def _parse_splits(patch, splits): split_list = splits.replace(' ','').split(';') subset_list = [] # List of all subset strings for split in split_list: col, val = split.split(':') if val == 'split': uniques = [] for level in patch.table[col]: if level not in uniques: uniques.append(level) level_list = [col + '==' + str(x) + '; ' for x in uniques] else: starts, ends = _col_starts_ends(patch, col, val) level_list = [col + '>=' + str(x) + '; ' + col + '<' + str(y)+'; ' for x, y in zip(starts, ends)] subset_list.append(level_list) # Get product of all string levels as list, conv to string, drop final ; return [''.join(x)[:-2] for x in _product(*subset_list)]
Parse splits string to get list of all associated subset strings. Parameters ---------- patch : obj Patch object containing data to subset splits : str Specifies how a column of a dataset should be split. See Notes. Returns ------- list List of subset strings derived from splits string Notes ----- {0}
def _product(*args, **kwds): pools = map(tuple, args) * kwds.get('repeat', 1) result = [[]] for pool in pools: result = [x+[y] for x in result for y in pool] return result
Generates cartesian product of lists given as arguments From itertools.product documentation
def _decdeg_distance(pt1, pt2): lat1, lon1 = pt1 lat2, lon2 = pt2 # Convert decimal degrees to radians lon1, lat1, lon2, lat2 = map(np.radians, [lon1, lat1, lon2, lat2]) # haversine formula dlon = lon2 - lon1 dlat = lat2 - lat1 a = np.sin(dlat/2)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon/2)**2 c = 2 * np.arcsin(np.sqrt(a)) km = 6367 * c return km
Earth surface distance (in km) between decimal latlong points using Haversine approximation. http://stackoverflow.com/questions/15736995/ how-can-i-quickly-estimate-the-distance-between-two-latitude-longitude- points
def empirical_cdf(data): vals = pd.Series(data).value_counts() ecdf = pd.DataFrame(data).set_index(keys=0) probs = pd.DataFrame(vals.sort_index().cumsum() / np.float(len(data))) ecdf = ecdf.join(probs) ecdf = ecdf.reset_index() ecdf.columns = ['data', 'ecdf'] return ecdf
Generates an empirical cdf from data Parameters ---------- data : iterable Empirical data Returns -------- DataFrame Columns 'data' and 'ecdf'. 'data' contains ordered data and 'ecdf' contains the corresponding ecdf values for the data.
def _load_table(self, metadata_path, data_path): metadata_dir = os.path.dirname(os.path.expanduser(metadata_path)) data_path = os.path.normpath(os.path.join(metadata_dir, data_path)) extension = data_path.split('.')[-1] if extension == 'csv': full_table = pd.read_csv(data_path, index_col=False) table = _subset_table(full_table, self.subset) self.meta, _ = _subset_meta(self.meta, self.subset) elif extension in ['db', 'sql']: # TODO: deal with incrementing in DB table table = self._get_db_table(data_path, extension) else: raise TypeError('Cannot process file of type %s' % extension) return table
Load data table, taking subset if needed Parameters ---------- metadata_path : str Path to metadata file data_path : str Path to data file, absolute or relative to metadata file Returns ------- dataframe Table for analysis
def _get_db_table(self, data_path, extension): # TODO: This is probably broken raise NotImplementedError, "SQL and db file formats not yet supported" # Load table if extension == 'sql': con = lite.connect(':memory:') con.row_factory = lite.Row cur = con.cursor() with open(data_path, 'r') as f: sql = f.read() cur.executescript(sql) else: con = lite.connect(data_path) con.row_factory = lite.Row cur = con.cursor() cur.execute(self.subset) # Check that table is not empty db_info = cur.fetchall() try: col_names = db_info[0].keys() except IndexError: raise lite.OperationalError("Query %s to database %s is empty" % (query_str, data_path)) # Convert objects to tuples converted_info = [tuple(x) for x in db_info] # NOTE: Using default value for Unicode: Seems better than checking # lengths. Should we keep the type as unicode? dtypes=[type(x) if type(x) != unicode else 'S150' for x in db_info[0]] table = np.array(converted_info, dtype=zip(col_names, dtypes)) con.commit() con.close() # Return a recarray for consistency # TODO: This should now be a pd.dataframe return table.view(np.recarray)
Query a database and return query result as a recarray Parameters ---------- data_path : str Path to the database file extension : str Type of database, either sql or db Returns ------- table : recarray The database query as a recarray
def _thread_excepthook(): init_old = thread.Thread.__init__ def init(self, *args, **kwargs): init_old(self, *args, **kwargs) run_old = self.run def run_with_except_hook(*args, **kw): try: run_old(*args, **kw) except (KeyboardInterrupt, SystemExit): raise except: sys.excepthook(*sys.exc_info()) self.run = run_with_except_hook thread.Thread.__init__ = init
Make threads use sys.excepthook from parent process http://bugs.python.org/issue1230540
def inherit_docstring_from(cls): def _doc(func): cls_docstring = getattr(cls, func.__name__).__doc__ func_docstring = func.__doc__ if func_docstring is None: func.__doc__ = cls_docstring else: new_docstring = func_docstring % dict(super=cls_docstring) func.__doc__ = new_docstring return func return _doc
This decorator modifies the decorated function's docstring by replacing occurrences of '%(super)s' with the docstring of the method of the same name from the class `cls`. If the decorated method has no docstring, it is simply given the docstring of cls method. Extracted from scipy.misc.doccer.
def doc_sub(*sub): def dec(obj): obj.__doc__ = obj.__doc__.format(*sub) return obj return dec
Decorator for performing substitutions in docstrings. Using @doc_sub(some_note, other_note) on a function with {0} and {1} in the docstring will substitute the contents of some_note and other_note for {0} and {1}, respectively. Decorator appears to work properly both with IPython help (tab completion and ?) and with Sphinx.
def log_start_end(f): def inner(f, *args, **kwargs): logging.info('Starting %s' % f.__name__) res = f(*args, **kwargs) logging.info('Finished %s' % f.__name__) return res return decorator.decorator(inner, f)
Decorator to log start and end of function Use of decorator module here ensures that argspec will inspect wrapped function, not the decorator itself. http://micheles.googlecode.com/hg/decorator/documentation.html
def check_parameter_file(filename): # Load file with open(filename, "r") as fin: content = fin.read() # Check cols and splits strings bad_names = [] line_numbers = [] strs = ["cols", "splits", "divs"] for tstr in strs: start = content.find(tstr) while start != -1: cols_str = "".join(content[start:].split("\n")[0].split("=")[-1].split(" ")) semis = cols_str.count(";") # Get line number line_end = content.find("\n", start) line_number = content[:line_end].count("\n") + 1 if tstr == "divs": colons = cols_str.count(",") else: colons = cols_str.count(":") if colons != (semis + 1): bad_names.append(tstr) line_numbers.append(line_number) start = content.find(tstr, start + 1) return bad_names, line_numbers
Function does a rudimentary check whether the cols, splits and divs columns in the parameter files are formatted properly. Just provides a preliminary check. Will only catch basic mistakes Parameters ---------- filename : str Path to parameters file Returns ------- : list Contains the number of possible bad strings detected
def handle_starttag(self, tag, attrs): if tag.lower() in self.allowed_tag_whitelist: if tag.lower() == 'ol': # we need a list to store the last # number used in the previous ordered lists self.previous_nbs.append(self.nb) self.nb = 0 # we need to know which is the tag list self.previous_type_lists.append(tag.lower()) # we must remove any non-relevant spacing and end of # line before self.result = self.result.rstrip() elif tag.lower() == 'ul': self.previous_type_lists.append(tag.lower()) # we must remove any non-relevant spacing and end of # line before self.result = self.result.rstrip() elif tag.lower() == 'li': # we must remove any non-relevant spacing and end of # line before self.result = self.result.rstrip() if self.previous_type_lists[-1] == 'ol': self.nb += 1 self.result += '\n' + self.line_quotation + \ ' ' * len(self.previous_type_lists) + \ str(self.nb) + '. ' else: self.result += '\n' + self.line_quotation + \ ' ' * len(self.previous_type_lists) + '* ' elif tag.lower() == 'a': # self.previous_type_lists.append(tag.lower()) for (attr, value) in attrs: if attr.lower() == 'href': self.url = value self.result += '<' + value + '>'
Function called for new opening tags
def handle_data(self, data): if not self.silent: if self.url: if self.url == data: data = '' else: data = '(' + data + ')' self.url = '' self.result += cgi.escape(data, True) lines = data.splitlines() if len(lines) > 1: match_obj = RE_HTML_FIRST_NON_QUOTATION_CHAR_ON_LINE.search( lines[-1]) if match_obj: self.line_quotation = '&gt;' * match_obj.start() else: self.line_quotation = ''
Function called for text nodes
def handle_endtag(self, tag): if tag.lower() in self.allowed_tag_whitelist: if tag.lower() in ['ul', 'ol']: self.previous_type_lists = self.previous_type_lists[:-1] if tag.lower() == 'ol': self.nb = self.previous_nbs[-1] self.previous_nbs = self.previous_nbs[:-1] # we must remove any non-relevant spacing and end of # line before self.result = self.result.rstrip() self.result += '\n' + self.line_quotation
Function called for ending of tags
def handle_entityref(self, name): char_code = html_entities.name2codepoint.get(name, None) if char_code is not None: try: self.result += unichr(char_code).encode("utf-8") except: return
Process a general entity reference of the form "&name;". Transform to text whenever possible.
def get_topic_sha3(event_block): ''' takes an event block and returns a signature for sha3 hashing :param event_block: :return: ''' sig = "" sig += event_block["name"] if not event_block["inputs"]: sig += "()" return sig sig += "(" for input in event_block["inputs"]: sig += input["type"] sig += "," sig = sig[:-1] sig += ")" return sif get_topic_sha3(event_block): ''' takes an event block and returns a signature for sha3 hashing :param event_block: :return: ''' sig = "" sig += event_block["name"] if not event_block["inputs"]: sig += "()" return sig sig += "(" for input in event_block["inputs"]: sig += input["type"] sig += "," sig = sig[:-1] sig += ")" return sig
takes an event block and returns a signature for sha3 hashing :param event_block: :return:
def create_variant(cls, variant, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._create_variant_with_http_info(variant, **kwargs) else: (data) = cls._create_variant_with_http_info(variant, **kwargs) return data
Create Variant Create a new Variant This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.create_variant(variant, async=True) >>> result = thread.get() :param async bool :param Variant variant: Attributes of variant to create (required) :return: Variant If the method is called asynchronously, returns the request thread.
def delete_variant_by_id(cls, variant_id, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._delete_variant_by_id_with_http_info(variant_id, **kwargs) else: (data) = cls._delete_variant_by_id_with_http_info(variant_id, **kwargs) return data
Delete Variant Delete an instance of Variant by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_variant_by_id(variant_id, async=True) >>> result = thread.get() :param async bool :param str variant_id: ID of variant to delete. (required) :return: None If the method is called asynchronously, returns the request thread.
def get_variant_by_id(cls, variant_id, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_variant_by_id_with_http_info(variant_id, **kwargs) else: (data) = cls._get_variant_by_id_with_http_info(variant_id, **kwargs) return data
Find Variant Return single instance of Variant by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_variant_by_id(variant_id, async=True) >>> result = thread.get() :param async bool :param str variant_id: ID of variant to return (required) :return: Variant If the method is called asynchronously, returns the request thread.
def list_all_variants(cls, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_variants_with_http_info(**kwargs) else: (data) = cls._list_all_variants_with_http_info(**kwargs) return data
List Variants Return a list of Variants This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_variants(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[Variant] If the method is called asynchronously, returns the request thread.
def replace_variant_by_id(cls, variant_id, variant, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._replace_variant_by_id_with_http_info(variant_id, variant, **kwargs) else: (data) = cls._replace_variant_by_id_with_http_info(variant_id, variant, **kwargs) return data
Replace Variant Replace all attributes of Variant This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.replace_variant_by_id(variant_id, variant, async=True) >>> result = thread.get() :param async bool :param str variant_id: ID of variant to replace (required) :param Variant variant: Attributes of variant to replace (required) :return: Variant If the method is called asynchronously, returns the request thread.
def update_variant_by_id(cls, variant_id, variant, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._update_variant_by_id_with_http_info(variant_id, variant, **kwargs) else: (data) = cls._update_variant_by_id_with_http_info(variant_id, variant, **kwargs) return data
Update Variant Update attributes of Variant This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.update_variant_by_id(variant_id, variant, async=True) >>> result = thread.get() :param async bool :param str variant_id: ID of variant to update. (required) :param Variant variant: Attributes of variant to update. (required) :return: Variant If the method is called asynchronously, returns the request thread.
def bethe_lattice(energy, hopping): energy = np.asarray(energy).clip(-2*hopping, 2*hopping) return np.sqrt(4*hopping**2 - energy**2) / (2*np.pi*hopping**2)
Bethe lattice in inf dim density of states
def bethe_fermi(energy, quasipart, shift, hopping, beta): return fermi_dist(quasipart * energy - shift, beta) \ * bethe_lattice(energy, hopping)
product of the bethe lattice dos, fermi distribution
def bethe_fermi_ene(energy, quasipart, shift, hopping, beta): return energy * bethe_fermi(energy, quasipart, shift, hopping, beta)
product of the bethe lattice dos, fermi distribution an weighted by energy
def bethe_filling_zeroT(fermi_energy, hopping): fermi_energy = np.asarray(fermi_energy).clip(-2*hopping, 2*hopping) return 1/2. + fermi_energy/2 * bethe_lattice(fermi_energy, hopping) \ + np.arcsin(fermi_energy/2/hopping)/np.pi
Returns the particle average count given a certan fermi energy, for the semicircular density of states of the bethe lattice
def bethe_findfill_zeroT(particles, orbital_e, hopping): assert 0. <= particles <= len(orbital_e) zero = lambda e: np.sum([bethe_filling_zeroT(e-e_m, t) \ for t, e_m in zip(hopping, orbital_e)]) - particles return fsolve(zero, 0)
Return the fermi energy that correspond to the given particle quantity in a semicircular density of states of a bethe lattice in a multi orbital case that can be non-degenerate
def bethe_find_crystalfield(populations, hopping): zero = lambda orb: [bethe_filling_zeroT(-em, tz) - pop \ for em, tz, pop in zip(orb, hopping, populations)] return fsolve(zero, np.zeros(len(populations)))
Return the orbital energies to have the system populates as desired by the given individual populations
def _split_mod_var_names(resource_name): try: dot_index = resource_name.rindex('.') except ValueError: # no dot found return '', resource_name return resource_name[:dot_index], resource_name[dot_index + 1:]
Return (module_name, class_name) pair from given string.
def _get_var_from_string(item): modname, varname = _split_mod_var_names(item) if modname: mod = __import__(modname, globals(), locals(), [varname], -1) return getattr(mod, varname) else: return globals()[varname]
Get resource variable.
def _handle_list(reclist): ret = [] for item in reclist: recs = _handle_resource_setting(item) ret += [resource for resource in recs if resource.access_controller] return ret
Return list of resources that have access_controller defined.
def _ensure_content_type(): from django.contrib.contenttypes.models import ContentType try: row = ContentType.objects.get(app_label=PERM_APP_NAME) except ContentType.DoesNotExist: row = ContentType(name=PERM_APP_NAME, app_label=PERM_APP_NAME, model=PERM_APP_NAME) row.save() return row.id
Add the bulldog content type to the database if it's missing.
def _get_permission_description(permission_name): parts = permission_name.split('_') parts.pop(0) method = parts.pop() resource = ('_'.join(parts)).lower() return 'Can %s %s' % (method.upper(), resource)
Generate a descriptive string based on the permission name. For example: 'resource_Order_get' -> 'Can GET order' todo: add support for the resource name to have underscores
def _populate_permissions(resources, content_type_id): from django.contrib.auth.models import Permission # read the whole auth_permission table into memory db_perms = [perm.codename for perm in Permission.objects.all()] for resource in resources: # get all resource's permissions that are not already in db perms = [perm for perm in resource.access_controller.get_perm_names(resource) if perm not in db_perms] for perm in perms: _save_new_permission(perm, content_type_id)
Add all missing permissions to the database.
def parse_int(int_str): int_str = int_str.replace(',', '') factor = __get_factor(int_str) if factor != 1: int_str = int_str[:-1] try: return int(int_str.replace(',', '')) * factor except ValueError: return None
Parse a string of the form 1,234,567b into a Python integer. The terminal letter, if present, indicates e.g. billions.
def parse_float(float_str): factor = __get_factor(float_str) if factor != 1: float_str = float_str[:-1] try: return float(float_str.replace(',', '')) * factor except ValueError: return None
Parse a string of the form 305.48b into a Python float. The terminal letter, if present, indicates e.g. billions.
def find_nonterminals_rewritable_to_epsilon(grammar): # type: (Grammar) -> Dict[Type[Nonterminal], Type[Rule]] # start with empty dictionary (contains only epsilon) rewritable = dict() # type: Dict[Type[Nonterminal], Optional[Type[Rule]]] rewritable[EPSILON] = None # iterate until the dictionary change while True: working = rewritable.copy() for rule in grammar.rules: # no need to process rules we already know rewrite to epsilon if rule.fromSymbol in working: continue # check if the whole right side rewrite to epsilon right_side_rewrite = True for symbol in rule.right: if symbol not in rewritable: right_side_rewrite = False # the whole right side can be reduce to epsilon, add left side to dictionary if right_side_rewrite: working[rule.fromSymbol] = rule # Working set didn't change, we are done if working == rewritable: break # Otherwise swap the sets and iterate rewritable = working # delete epsilon from the dictionary del rewritable[EPSILON] return rewritable
Get nonterminals rewritable to epsilon. :param grammar: Grammar where to search. :return: Dictionary, where key is nonterminal rewritable to epsilon and value is rule that is responsible for it. The rule doesn't need to rewrite to epsilon directly, but the whole right side can be rewritable to epsilon using different rules.
def init_default(self): import f311 if self.default_filename is None: raise RuntimeError("Class '{}' has no default filename".format(self.__class__.__name__)) fullpath = f311.get_default_data_path(self.default_filename, class_=self.__class__) self.load(fullpath) name, ext = os.path.splitext(self.default_filename) new = a99.new_filename(os.path.join("./", name), ext) self.save_as(new)
Overriden to take default database and save locally The issue was that init_default() sets self.filename to None; however there can be no SQLite database without a corresponding file (not using *memory* here) Should not keep default file open either (as it is in the API directory and shouldn't be messed by user)
def _do_save_as(self, filename): if filename != self.filename: self._ensure_filename() self._close_if_open() shutil.copyfile(self.filename, filename) self.__get_conn(filename=filename)
Closes connection, copies DB file, and opens again pointing to new file **Note** if filename equals current filename, does nothing!
def ensure_schema(self): self._ensure_filename() if not os.path.isfile(self.filename): self.create_schema()
Create file and schema if it does not exist yet.
def delete(self): self._ensure_filename() self._close_if_open() os.remove(self.filename)
Removes .sqlite file. **CAREFUL** needless say
def get_table_info(self, tablename): conn = self.__get_conn() ret = a99.get_table_info(conn, tablename) if len(ret) == 0: raise RuntimeError("Cannot get info for table '{}'".format(tablename)) more = self.gui_info.get(tablename) for row in ret.values(): caption, tooltip = None, None if more: info = more.get(row["name"]) if info: caption, tooltip = info row["caption"] = caption row["tooltip"] = tooltip return ret
Returns information about fields of a specific table Returns: OrderedDict(("fieldname", MyDBRow), ...)) **Note** Fields "caption" and "tooltip" are added to rows using information in moldb.gui_info
def __get_conn(self, flag_force_new=False, filename=None): flag_open_new = flag_force_new or not self._conn_is_open() if flag_open_new: if filename is None: filename = self.filename # funny that __get_conn() calls _get_conn() but that's it conn = self._get_conn(filename) self._conn = conn else: conn = self._conn return conn
Returns connection to database. Tries to return existing connection, unless flag_force_new Args: flag_force_new: filename: Returns: sqlite3.Connection object **Note** this is a private method because you can get a connection to any file, so it has to be used in the right moment
def flatten_multidict(multidict): return dict([(key, value if len(value) > 1 else value[0]) for (key, value) in multidict.iterlists()])
Return flattened dictionary from ``MultiDict``.
def __setitem(self, chunk, key, keys, value, extend=False): def setitem(chunk): if keys: return self.__setitem(chunk, keys[0], keys[1:], value, extend) else: return value if key in ['.', ']']: chunk[key] = value elif ']' in key: # list key = int(key[:-1].replace('n', '-1')) if extend: if chunk is None: chunk = [None, ] else: if not isinstance(chunk, list): chunk = [chunk, ] if key != -1: chunk.insert(key, None) else: chunk.append(None) else: if chunk is None: chunk = [None, ] chunk[key] = setitem(chunk[key]) else: # dict if extend: if chunk is None: chunk = {} chunk[key] = None chunk[key] = setitem(chunk[key]) elif key not in chunk: chunk[key] = None chunk[key] = setitem(chunk[key]) else: if keys: chunk[key] = setitem(chunk[key]) else: if not isinstance(chunk[key], list): chunk[key] = [chunk[key], ] chunk[key].append(None) chunk[key][-1] = setitem(chunk[key][-1]) else: if chunk is None: chunk = {} if key not in chunk: chunk[key] = None chunk[key] = setitem(chunk[key]) return chunk
Helper function to fill up the dictionary.
def set(self, key, value, extend=False, **kwargs): self.__setitem__(key, value, extend, **kwargs)
Extended standard set function.
def update_points(self): x, y, w, h = self.x, self.y, self.w, self.h self.points = (x, y, x + w, y, x + w, y + h, x, y + h)
统一变为多个点组成的多边形,用于处理碰撞
def create_default_database(reset: bool = False) -> GraphDatabaseInterface: import sqlalchemy from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import sessionmaker from sqlalchemy.pool import StaticPool Base = declarative_base() engine = sqlalchemy.create_engine("sqlite:///SpotifyArtistGraph.db", poolclass=StaticPool) Session = sessionmaker(bind=engine) dbi: GraphDatabaseInterface = create_graph_database_interface( sqlalchemy, Session(), Base, sqlalchemy.orm.relationship ) if reset: Base.metadata.drop_all(engine) Base.metadata.create_all(engine) return dbi
Creates and returns a default SQLAlchemy database interface to use. Arguments: reset (bool): Whether to reset the database if it happens to exist already.
def get_authentic_node_name(self, node_name: str) -> Optional[str]: items: List[NameExternalIDPair] = self._client.search_artists_by_name(node_name) return items[0].name if len(items) > 0 else None
Returns the exact, authentic node name for the given node name if a node corresponding to the given name exists in the graph (maybe not locally yet) or `None` otherwise. By default, this method checks whether a node with the given name exists locally in the graph and return `node_name` if it does or `None` otherwise. In `Graph` extensions that are used by applications where the user can enter potentially incorrect node names, this method should be overridden to improve usability. Arguments: node_name (str): The node name to return the authentic node name for. Returns: The authentic name of the node corresponding to the given node name or `None` if no such node exists.
def _load_neighbors_from_external_source(self) -> None: graph: SpotifyArtistGraph = self._graph items: List[NameExternalIDPair] = graph.client.similar_artists(self.external_id) limit: int = graph.neighbor_count if graph.neighbor_count > 0 else self._NEIGHBORS_TO_LOAD if len(items) > limit: del items[limit:] for item in items: neighbor: SpotifyArtistNode = graph.nodes.get_node_by_name(item.name, can_validate_and_load=True, external_id=item.external_id) # Strangely we need this guard because the Spofity API's search method doesn't # recognise certain artist names. # Actually it could also be a bug in SpotifyClient.search_artists_by_name(), # the artist name sent as a request parameter may not be encoded 100% correctly... # Anyway, this is a working hotfix. if neighbor is not None: graph.add_edge(self, neighbor)
Loads the neighbors of the node from the igraph `Graph` instance that is wrapped by the graph that has this node.
def _create_node(self, index: int, name: str, external_id: Optional[str] = None) -> SpotifyArtistNode: if external_id is None: graph: SpotifyArtistGraph = self._graph items: List[NameExternalIDPair] = graph.client.search_artists_by_name(name) for item in items: if item.name == name: external_id = item.external_id break return SpotifyArtistNode(graph=self._graph, index=index, name=name, external_id=external_id)
Returns a new `SpotifyArtistNode` instance with the given index and name. Arguments: index (int): The index of the node to create. name (str): The name of the node to create. external_id (Optional[str]): The external ID of the node.
def access_token(self) -> str: if self._token_expires_at < time.time() + self._REFRESH_THRESHOLD: self.request_token() return self._token["access_token"]
The access token stored within the requested token.
def request_token(self) -> None: response: requests.Response = requests.post( self._TOKEN_URL, auth=HTTPBasicAuth(self._client_id, self._client_key), data={"grant_type": self._GRANT_TYPE}, verify=True ) response.raise_for_status() self._token = response.json() self._token_expires_at = time.time() + self._token["expires_in"]
Requests a new Client Credentials Flow authentication token from the Spotify API and stores it in the `token` property of the object. Raises: requests.HTTPError: If an HTTP error occurred during the request.
def search_artists_by_name(self, artist_name: str, limit: int = 5) -> List[NameExternalIDPair]: response: requests.Response = requests.get( self._API_URL_TEMPLATE.format("search"), params={"q": artist_name, "type": "artist", "limit": limit}, headers={"Authorization": "Bearer {}".format(self._token.access_token)} ) # TODO: handle API rate limiting response.raise_for_status() if not response.text: return [] result: List[NameExternalIDPair] = [] data: List[Dict] = response.json()["artists"]["items"] for artist in data: artist = NameExternalIDPair(artist["name"].strip(), artist["id"].strip()) if not artist.name or not artist.external_id: raise SpotifyClientError("Name or ID is missing") result.append(artist) return result
Returns zero or more artist name - external ID pairs that match the specified artist name. Arguments: artist_name (str): The artist name to search in the Spotify API. limit (int): The maximum number of results to return. Returns: Zero or more artist name - external ID pairs. Raises: requests.HTTPError: If an HTTP error occurred during the request. SpotifyClientError: If an invalid item is found.
def similar_artists(self, artist_id: str) -> List[NameExternalIDPair]: response: requests.Response = requests.get( self._API_URL_TEMPLATE.format("artists/{}/related-artists".format(artist_id)), headers={"Authorization": "Bearer {}".format(self._token.access_token)} ) # TODO: handle API rate limiting response.raise_for_status() if not response.text: return [] result: List[NameExternalIDPair] = [] data: List[Dict] = response.json()["artists"] for artist in data: artist = NameExternalIDPair(artist["name"], artist["id"]) if artist.name is None or artist.external_id is None: raise SpotifyClientError("Name or ID is missing") result.append(artist) return result
Returns zero or more similar artists (in the form of artist name - external ID pairs) to the one corresponding to the given artist ID. Arguments: artist_id ([str]): The Spotify ID of the artist for whom similar artists are requested. Returns: Zero or more artist name - external ID pairs. Raises: requests.HTTPError: If an HTTP error occurred during the request. SpotifyClientError: If an invalid item is found.
def colors(lang="en"): cache_name = "colors.%s.json" % lang data = get_cached("colors.json", cache_name, params=dict(lang=lang)) return data["colors"]
This resource returns all dyes in the game, including localized names and their color component information. :param lang: The language to query the names for. The response is a dictionary where color ids are mapped to an dictionary containing the following properties: name (string): The name of the dye. base_rgb (list): The base RGB values. cloth (object): Detailed information on its appearance when applied on cloth armor. leather (object): Detailed information on its appearance when applied on leather armor. metal (object): Detailed information on its appearance when applied on metal armor. The detailed information object contains the following properties: brightness (number): The brightness. contrast (number): The contrast. hue (number): The hue in the HSL colorspace. saturation (number): The saturation in the HSL colorspace. lightness (number): The lightness in the HSL colorspace. rgb (list): A list containing precalculated RGB values.
def event_names(lang="en"): cache_name = "event_names.%s.json" % lang data = get_cached("event_names.json", cache_name, params=dict(lang=lang)) return dict([(event["id"], event["name"]) for event in data])
This resource returns an unordered list of the localized event names for the specified language. :param lang: The language to query the names for. :return: A dictionary where the key is the event id and the value is the name of the event in the specified language.
def event_details(event_id=None, lang="en"): if event_id: cache_name = "event_details.%s.%s.json" % (event_id, lang) params = {"event_id": event_id, "lang": lang} else: cache_name = "event_details.%s.json" % lang params = {"lang": lang} data = get_cached("event_details.json", cache_name, params=params) events = data["events"] return events.get(event_id) if event_id else events
This resource returns static details about available events. :param event_id: Only list this event. :param lang: Show localized texts in the specified language. The response is a dictionary where the key is the event id, and the value is a dictionary containing the following properties: name (string) The name of the event. level (int) The event level. map_id (int) The map where the event takes place. flags (list) A list of additional flags. Possible flags are: ``group_event`` For group events ``map_wide`` For map-wide events. location (object) The location of the event. type (string) The type of the event location, can be ``sphere``, ``cylinder`` or ``poly``. center (list) X, Y, Z coordinates of the event location. radius (number) (type ``sphere`` and ``cylinder``) Radius of the event location. z_range (list) (type ``poly``) List of Minimum and Maximum Z coordinate. points (list) (type ``poly``) List of Points (X, Y) denoting the event location perimeter. If a event_id is given, only the values for that event are returned.
def PhenomModel(self, r): if r <= 0: raise ValueError field = self.B0 + self.B1 * G4.m / r + self.B2 * math.exp(-1 * self.H * r / G4.m) return field
Fit to field map A phenomenological fit by Ryan Bayes (Glasgow) to a field map generated by Bob Wands (FNAL). It assumes a 1 cm plate. This is dated January 30th, 2012. Not defined for r <= 0
def create_customer(cls, customer, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._create_customer_with_http_info(customer, **kwargs) else: (data) = cls._create_customer_with_http_info(customer, **kwargs) return data
Create Customer Create a new Customer This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.create_customer(customer, async=True) >>> result = thread.get() :param async bool :param Customer customer: Attributes of customer to create (required) :return: Customer If the method is called asynchronously, returns the request thread.
def delete_customer_by_id(cls, customer_id, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._delete_customer_by_id_with_http_info(customer_id, **kwargs) else: (data) = cls._delete_customer_by_id_with_http_info(customer_id, **kwargs) return data
Delete Customer Delete an instance of Customer by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_customer_by_id(customer_id, async=True) >>> result = thread.get() :param async bool :param str customer_id: ID of customer to delete. (required) :return: None If the method is called asynchronously, returns the request thread.
def get_customer_by_id(cls, customer_id, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_customer_by_id_with_http_info(customer_id, **kwargs) else: (data) = cls._get_customer_by_id_with_http_info(customer_id, **kwargs) return data
Find Customer Return single instance of Customer by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_customer_by_id(customer_id, async=True) >>> result = thread.get() :param async bool :param str customer_id: ID of customer to return (required) :return: Customer If the method is called asynchronously, returns the request thread.
def list_all_customers(cls, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_customers_with_http_info(**kwargs) else: (data) = cls._list_all_customers_with_http_info(**kwargs) return data
List Customers Return a list of Customers This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_customers(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[Customer] If the method is called asynchronously, returns the request thread.
def replace_customer_by_id(cls, customer_id, customer, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._replace_customer_by_id_with_http_info(customer_id, customer, **kwargs) else: (data) = cls._replace_customer_by_id_with_http_info(customer_id, customer, **kwargs) return data
Replace Customer Replace all attributes of Customer This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.replace_customer_by_id(customer_id, customer, async=True) >>> result = thread.get() :param async bool :param str customer_id: ID of customer to replace (required) :param Customer customer: Attributes of customer to replace (required) :return: Customer If the method is called asynchronously, returns the request thread.
def update_customer_by_id(cls, customer_id, customer, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._update_customer_by_id_with_http_info(customer_id, customer, **kwargs) else: (data) = cls._update_customer_by_id_with_http_info(customer_id, customer, **kwargs) return data
Update Customer Update attributes of Customer This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.update_customer_by_id(customer_id, customer, async=True) >>> result = thread.get() :param async bool :param str customer_id: ID of customer to update. (required) :param Customer customer: Attributes of customer to update. (required) :return: Customer If the method is called asynchronously, returns the request thread.
def _create_class(rule, index): # type: (Type[Rule], int) -> Type[SplitRule] name = 'SplitRule[' + rule.__name__ + ';' + str(index) + ']' created = type(name, (SplitRule,), SplitRule.__dict__.copy()) # type: Type[SplitRule] created.rule = rule.rules[index] created.rule_index = index created.from_rule = rule return created
Create subtype of SplitRule based on rule. :param rule: Rule from which the SplitRule derive. :param index: Index of the rule (in original Rule class) to use for SplitRule. :return: Class inherited from SplitRule representing rule at index.
def _set_text_from_file_dialog(self, text): text = os.path.relpath(text, ".") self.edit.setText(text) self.dialog_path = text self._act_on_change()
Sets text making it a **relative path**
def main(arguments=None): # setup the command-line util settings su = tools( arguments=arguments, docString=__doc__, logLevel="WARNING", options_first=False, projectName=False ) arguments, settings, log, dbConn = su.setup() # unpack remaining cl arguments using `exec` to setup the variable names # automatically for arg, val in arguments.iteritems(): if arg[0] == "-": varname = arg.replace("-", "") + "Flag" else: varname = arg.replace("<", "").replace(">", "") if isinstance(val, str) or isinstance(val, unicode): exec(varname + " = '%s'" % (val,)) else: exec(varname + " = %s" % (val,)) if arg == "--dbConn": dbConn = val log.debug('%s = %s' % (varname, val,)) from fundamentals.mysql import sqlite2mysql converter = sqlite2mysql( log=log, settings=settings, pathToSqlite=pathToSqliteDB, tablePrefix=tablePrefix, dbConn=dbConn ) converter.convert_sqlite_to_mysql() return
The main function used when ``yaml_to_database.py`` when installed as a cl tool
def download_data(self, configuration, output_file): params = configuration response = self.__app.native_api_call('metaql', 'download-data', params, self.__options, False, None, True, http_path="/api/v1/meta/") with open(output_file, 'wb') as out_file: shutil.copyfileobj(response.raw, out_file) del response
Выполняет указанный в конфигурации запрос и отдает файл на скачивание :param configuration: Конфгурация запроса :param output_file: Место, куда надо скачать файл :return:
def get_schema(self, db_alias, entity): response = self.__app.native_api_call('metaql', 'schema/' + db_alias + '/' + entity, {}, self.__options, False, None, False, http_path="/api/v1/meta/", http_method="GET") return json.loads(response.text)
Возвращает схему сущности: - Поля :param db_alias: Альяс БД :param entity: Альяс Сущности :return: dict
def set_dir(self, dir_): self.__lock_set_dir(dir_) self.__lock_auto_load() self.__lock_update_table() self.__update_info() self.__update_window_title()
Sets directory, auto-loads, updates all GUI contents.
def validate(self, value): if value in self.empty_values and self.required: raise ValidationError(self.error_messages['required'])
This was overridden to have our own ``empty_values``.
def clean(self, value): obj = self.factory.create(value) # todo: what if the field defines properties that have any of # these names: if obj: del obj.fields del obj.alias del obj.validators del obj.required del obj.factory # do own cleaning first... self._validate_existence(obj) self._run_validators(obj) # ret = {} # for name in self.fields.keys(): # ret[name] = getattr(obj, name) # return ret return obj
Clean the data and validate the nested spec. Implementation is the same as for other fields but in addition, this will propagate the validation to the nested spec.
def serialize(self, value, entity, request): self._validate_existence(value) self._run_validators(value) if not value: return value return self.factory.serialize(value, request)
Propagate to nested fields. :returns: data dictionary or ``None`` if no fields are present.
def _run_validators(self, value): errors = [] for v in self.validators: try: v(value) except ValidationError, e: errors.extend(e.messages) if errors: raise ValidationError(errors)
Execute all associated validators.
def clean(self, value): value = super(ListField, self).clean(value) if value is not None: return map(self.itemspec.clean, value)
Propagate to list elements.
def serialize(self, value, entity, request): value = super(ListField, self).serialize(value, entity, request) if value is None: return ret = [] for v in value: ret.append(self.itemspec.serialize(v, entity, request)) return ret
Propagate to list elements.
def try_log_part(self, context=None, with_start_message=True): if context is None: context = {} self.__counter += 1 if time.time() - self.__begin_time > self.__part_log_time_seconds: self.__begin_time = time.time() context['count'] = self.__counter if self.__total: self.__percent_done = int(self.__counter * 100 / self.__total) context['percentDone'] = self.__percent_done context['total'] = self.__total self.__log.info(msg=self.__log_message, context=context) return True elif self.__counter == 1: if with_start_message: self.__log.info(u"Начали цикл: " + self.__log_message) return True return False
Залогировать, если пришло время из part_log_time_minutes :return: boolean Возвращает True если лог был записан
def splitted_rules(root): # type: (Nonterminal) -> Nonterminal items = Traversing.post_order(root) items = filter(lambda x: isinstance(x, SplitRule), items) for i in items: # create the original rule newRule = i.from_rule() # replace it with the node in the tree Manipulations.replace(i, newRule) return root
Replace SplittedRules in the parsed tree with the original one. This method is mandatory if you insert Rule class with multiple rules into the grammar. :param root: Root of the parsed tree. :return: Modified tree.
def find_nonterminals_reachable_by_unit_rules(grammar): # type: (Grammar) -> UnitSymbolReachability # get nonterminals nonterminals = list(grammar.nonterminals) # type: List[Type[Nonterminal]] count_of_nonterms = len(nonterminals) # create indexes for nonterminals nonterm_to_index = dict() # type: Dict[Type[Nonterminal], int] for i in range(count_of_nonterms): nonterm_to_index[nonterminals[i]] = i # prepare matrix field = [[None for _ in nonterminals] for _ in nonterminals] # type: MATRIX_OF_UNIT_RULES # fill existing unit rules for rule in grammar.rules: if _is_unit(rule): field[nonterm_to_index[rule.fromSymbol]][nonterm_to_index[rule.toSymbol]] = [rule] # run Floyd Warshall f = field for k in range(count_of_nonterms): for i in range(count_of_nonterms): for j in range(count_of_nonterms): if f[i][k] is not None and f[k][j] is not None: if f[i][j] is None or len(f[i][j]) > len(f[i][k]) + len(f[k][j]): f[i][j] = f[i][k] + f[k][j] # return results return UnitSymbolReachability(f, nonterm_to_index)
Get nonterminal for which exist unit rule :param grammar: Grammar where to search :return: Instance of UnitSymbolReachability.
def reachables(self, from_symbol): # type: (Type[Nonterminal]) -> List[Type[Nonterminal]] if from_symbol not in self.t: return [] reachable = [] index = self.t[from_symbol] for n, i in self.t.items(): if len(self.f[index][i] or []) > 0: reachable.append(n) return reachable
Get list of nonterminals, what are rewritable from nonterminal passed as parameter using only unit rules. :param from_symbol: For which symbols to search. :return: List of nonterminals.
def path_rules(self, from_symbol, to_symbol): # type: (Type[Nonterminal], Type[Nonterminal]) -> List[Type[Rule]] if from_symbol not in self.t or to_symbol not in self.t: return [] return self.f[self.t[from_symbol]][self.t[to_symbol]] or []
Get sequence of unit rules between first and second parameter. :param from_symbol: From which symbol. :param to_symbol: To which symbol. :return: Sequence of unit rules. Empty sequence mean there is no way between them.
def get_all_active(self): now = timezone.now() return self.select_related().filter(active_datetime__lte=now, inactive_datetime__gte=now).order_by('active_datetime')
Get all of the active messages ordered by the active_datetime.
def render_word(self,word,size,color): '''Creates a surface that contains a word.''' pygame.font.init() font = pygame.font.Font(None,size) self.rendered_word = font.render(word,0,color) self.word_size = font.size(wordf render_word(self,word,size,color): '''Creates a surface that contains a word.''' pygame.font.init() font = pygame.font.Font(None,size) self.rendered_word = font.render(word,0,color) self.word_size = font.size(word)
Creates a surface that contains a word.