code
stringlengths
52
7.75k
docs
stringlengths
1
5.85k
def tagAttributes(fdef_master_list,node,depth=0): '''recursively tag objects with sizes, depths and path names ''' if type(node)==list: for i in node: depth+=1 tagAttributes(fdef_master_list,i,depth) if type(node)==dict: for x in fdef_master_list: if jsName(x.path,x.name)==node['name']: node['path']=x.path node['depth']=depth if "children" not in node: node["size"]=x.weight for i in node.values(): depth+=1 tagAttributes(fdef_master_list,i,depth) return nodf tagAttributes(fdef_master_list,node,depth=0): '''recursively tag objects with sizes, depths and path names ''' if type(node)==list: for i in node: depth+=1 tagAttributes(fdef_master_list,i,depth) if type(node)==dict: for x in fdef_master_list: if jsName(x.path,x.name)==node['name']: node['path']=x.path node['depth']=depth if "children" not in node: node["size"]=x.weight for i in node.values(): depth+=1 tagAttributes(fdef_master_list,i,depth) return node
recursively tag objects with sizes, depths and path names
def tagAttributes_while(fdef_master_list,root): '''Tag each node under root with the appropriate depth. ''' depth = 0 current = root untagged_nodes = [root] while untagged_nodes: current = untagged_nodes.pop() for x in fdef_master_list: if jsName(x.path,x.name) == current['name']: current['path'] = x.path if children in current: for child in children: child["depth"] = depth untagged_nodes.append(child) if depth not in current: current["depth"] = depth depth += 1 return roof tagAttributes_while(fdef_master_list,root): '''Tag each node under root with the appropriate depth. ''' depth = 0 current = root untagged_nodes = [root] while untagged_nodes: current = untagged_nodes.pop() for x in fdef_master_list: if jsName(x.path,x.name) == current['name']: current['path'] = x.path if children in current: for child in children: child["depth"] = depth untagged_nodes.append(child) if depth not in current: current["depth"] = depth depth += 1 return root
Tag each node under root with the appropriate depth.
def noEmptyNests(node): '''recursively make sure that no dictionaries inside node contain empty children lists ''' if type(node)==list: for i in node: noEmptyNests(i) if type(node)==dict: for i in node.values(): noEmptyNests(i) if node["children"] == []: node.pop("children") return nodf noEmptyNests(node): '''recursively make sure that no dictionaries inside node contain empty children lists ''' if type(node)==list: for i in node: noEmptyNests(i) if type(node)==dict: for i in node.values(): noEmptyNests(i) if node["children"] == []: node.pop("children") return node
recursively make sure that no dictionaries inside node contain empty children lists
def remove_old_tmp_files(profiles=None, max_lifetime=(7 * 24)): assert isinstance(profiles, (list, tuple)) or profiles is None if profiles is None: profiles = dju_settings.DJU_IMG_UPLOAD_PROFILES.keys() profiles = set(('default',) + tuple(profiles)) total = removed = 0 old_dt = datetime.datetime.utcnow() - datetime.timedelta(hours=max_lifetime) for profile in profiles: conf = get_profile_configs(profile=profile) root_path = os.path.join(settings.MEDIA_ROOT, dju_settings.DJU_IMG_UPLOAD_SUBDIR, conf['PATH']) for file_path in get_files_recursive(root_path): m = re_tmp.match(os.path.basename(file_path)) if m is None: continue total += 1 fdt = dtstr_to_datetime(m.group('dtstr')) if fdt and old_dt > fdt: os.remove(file_path) removed += 1 return removed, total
Removes old temp files that is older than expiration_hours. If profiles is None then will be use all profiles.
def next_task(self, item, **kwargs): filename = os.path.basename(item) try: self.tx_importer.import_batch(filename=filename) except TransactionImporterError as e: raise TransactionsFileQueueError(e) from e else: self.archive(filename)
Calls import_batch for the next filename in the queue and "archives" the file. The archive folder is typically the folder for the deserializer queue.
def get_public_comments_for_model(model): if not IS_INSTALLED: # No local comments, return empty queryset. # The project might be using DISQUS or Facebook comments instead. return CommentModelStub.objects.none() else: return CommentModel.objects.for_model(model).filter(is_public=True, is_removed=False)
Get visible comments for the model.
def get_comments_are_open(instance): if not IS_INSTALLED: return False try: # Get the moderator which is installed for this model. mod = moderator._registry[instance.__class__] except KeyError: # No moderator = no restrictions return True # Check the 'enable_field', 'auto_close_field' and 'close_after', # by reusing the basic Django policies. return CommentModerator.allow(mod, None, instance, None)
Check if comments are open for the instance
def get_comments_are_moderated(instance): if not IS_INSTALLED: return False try: # Get the moderator which is installed for this model. mod = moderator._registry[instance.__class__] except KeyError: # No moderator = no moderation return False # Check the 'auto_moderate_field', 'moderate_after', # by reusing the basic Django policies. return CommentModerator.moderate(mod, None, instance, None)
Check if comments are moderated for the instance
def load_file(self, filename): with open(filename, 'r') as sourcefile: self.set_string(sourcefile.read())
Read in file contents and set the current string.
def set_string(self, string): self.string = string self.length = len(string) self.reset_position()
Set the working string and its length then reset positions.
def add_string(self, string): self.string += string self.length += len(string) self.eos = 0
Add to the working string and its length and reset eos.
def reset_position(self): self.pos = 0 self.col = 0 self.row = 1 self.eos = 0
Reset all current positions.
def has_space(self, length=1, offset=0): return self.pos + (length + offset) - 1 < self.length
Returns boolean if self.pos + length < working string length.
def eol_distance_next(self, offset=0): distance = 0 for char in self.string[self.pos + offset:]: if char == '\n': break else: distance += 1 return distance
Return the amount of characters until the next newline.
def eol_distance_last(self, offset=0): distance = 0 for char in reversed(self.string[:self.pos + offset]): if char == '\n': break else: distance += 1 return distance
Return the ammount of characters until the last newline.
def spew_length(self, length): pos = self.pos if not pos or length > pos: return None row = self.row for char in reversed(self.string[pos - length:pos]): pos -= 1 if char == '\n': # handle a newline char row -= 1 self.pos = pos self.col = self.eol_distance_last() self.row = row if self.has_space(): # Set eos if there is no more space left. self.eos = 0
Move current position backwards by length.
def eat_length(self, length): pos = self.pos if self.eos or pos + length > self.length: return None col = self.col row = self.row for char in self.string[pos:pos + length]: col += 1 pos += 1 if char == '\n': # handle a newline char col = 0 row += 1 self.pos = pos self.col = col self.row = row if not self.has_space(): # Set eos if there is no more space left. self.eos = 1
Move current position forward by length and sets eos if needed.
def eat_string(self, string): pos = self.pos if self.eos or pos + len(string) > self.length: return None col = self.col row = self.row for char in string: col += 1 pos += 1 if char == '\n': # handle a newline char col = 0 row += 1 self.pos = pos self.col = col self.row = row if not self.has_space(): # Set eos if there is no more space left. self.eos = 1
Move current position by length of string and count lines by \n.
def eat_line(self): if self.eos: return None eat_length = self.eat_length get_char = self.get_char has_space = self.has_space while has_space() and get_char() != '\n': eat_length(1) eat_length(1)
Move current position forward until the next line.
def get_char(self, offset=0): if not self.has_space(offset=offset): return '' return self.string[self.pos + offset]
Return the current character in the working string.
def get_length(self, length, trim=0, offset=0): if trim and not self.has_space(offset + length): return self.string[self.pos + offset:] elif self.has_space(offset + length): return self.string[self.pos + offset:self.pos + offset + length] else: return ''
Return string at current position + length. If trim == true then get as much as possible before eos.
def get_string(self, offset=0): if not self.has_space(offset=offset): return '' # Get a char for each char in the current string from pos onward # solong as the char is not whitespace. string = self.string pos = self.pos + offset for i, char in enumerate(string[pos:]): if char.isspace(): return string[pos:pos + i] else: return string[pos:]
Return non space chars from current position until a whitespace.
def rest_of_string(self, offset=0): if self.has_space(offset=offset): return self.string[self.pos + offset:] else: return ''
A copy of the current position till the end of the source string.
def get_current_line(self): if not self.has_space(): return None pos = self.pos - self.col string = self.string end = self.length output = [] while pos < len(string) and string[pos] != '\n': output.append(string[pos]) pos += 1 if pos == end: break else: output.append(string[pos]) if not output: return None return SourceLine(''.join(output), self.row)
Return a SourceLine of the current line.
def get_lines(self, first, last): line = 1 linestring = [] linestrings = [] for char in self.string: if line >= first and line <= last: linestring.append(char) if char == '\n': linestrings.append((''.join(linestring), line)) linestring = [] elif line > last: break if char == '\n': line += 1 if linestring: linestrings.append((''.join(linestring), line)) elif not linestrings: return None return [SourceLine(string, lineno) for string, lineno in linestrings]
Return SourceLines for lines between and including first & last.
def get_surrounding_lines(self, past=1, future=1): string = self.string pos = self.pos - self.col end = self.length row = self.row linesback = 0 while linesback > -past: if pos <= 0: break elif string[pos - 2] == '\n': linesback -= 1 pos -= 1 output = [] linestring = [] lines = future + 1 while linesback < lines: if pos >= end: linestring.append(string[pos - 1]) output.append( SourceLine(''.join(linestring[:-1]), row + linesback)) break elif string[pos] == '\n': linestring.append(string[pos]) pos += 1 output.append( SourceLine(''.join(linestring), row + linesback)) linesback += 1 linestring = [] linestring.append(string[pos]) pos += 1 return output
Return the current line and x,y previous and future lines. Returns a list of SourceLine's.
def get_all_lines(self): output = [] line = [] lineno = 1 for char in self.string: line.append(char) if char == '\n': output.append(SourceLine(''.join(line), lineno)) line = [] lineno += 1 if line: output.append(SourceLine(''.join(line), lineno)) return output
Return all lines of the SourceString as a list of SourceLine's.
def match_string(self, string, word=0, offset=0): if word: return self.get_string(offset) == string return self.get_length(len(string), offset) == string
Returns 1 if string can be matches against SourceString's current position. If word is >= 1 then it will only match string followed by whitepsace.
def match_any_string(self, strings, word=0, offset=0): if word: current = self.get_string(offset) return current if current in strings else '' current = '' currentlength = 0 length = 0 for string in strings: length = len(string) if length != currentlength: current = self.get_length(length, offset) if string == current: return string return ''
Attempts to match each string in strings in order. Will return the string that matches or an empty string if no match. If word arg >= 1 then only match if string is followed by a whitespace which is much higher performance. If word is 0 then you should sort the strings argument yourself by length.
def match_any_char(self, chars, offset=0): if not self.has_space(offset=offset): return '' current = self.string[self.pos + offset] return current if current in chars else ''
Match and return the current SourceString char if its in chars.
def match_function_pattern(self, first, rest=None, least=1, offset=0): if not self.has_space(offset=offset): return '' firstchar = self.string[self.pos + offset] if not first(firstchar): return '' output = [firstchar] pattern = first if rest is None else rest for char in self.string[self.pos + offset + 1:]: if pattern(char): output.append(char) else: break if len(output) < least: return '' return ''.join(output)
Match each char sequentially from current SourceString position until the pattern doesnt match and return all maches. Integer argument least defines and minimum amount of chars that can be matched. This version takes functions instead of string patterns. Each function must take one argument, a string, and return a value that can be evauluated as True or False. If rest is defined then first is used only to match the first arg and the rest of the chars are matched against rest.
def count_indents(self, spacecount, tabs=0, offset=0): if not self.has_space(offset=offset): return 0 spaces = 0 indents = 0 for char in self.string[self.pos + offset - self.col:]: if char == ' ': spaces += 1 elif tabs and char == '\t': indents += 1 spaces = 0 else: break if spaces == spacecount: indents += 1 spaces = 0 return indents
Counts the number of indents that can be tabs or spacecount number of spaces in a row from the current line.
def count_indents_length(self, spacecount, tabs=0, offset=0): if not self.has_space(offset=offset): return 0 spaces = 0 indents = 0 charlen = 0 for char in self.string[self.pos + offset - self.col:]: if char == ' ': spaces += 1 elif tabs and char == '\t': indents += 1 spaces = 0 else: break charlen += 1 if spaces == spacecount: indents += 1 spaces = 0 return (indents, charlen)
Counts the number of indents that can be tabs or spacecount number of spaces in a row from the current line. Also returns the character length of the indents.
def count_indents_last_line(self, spacecount, tabs=0, back=5): if not self.has_space(): return 0 lines = self.get_surrounding_lines(back, 0) for line in reversed(lines): if not line.string.isspace(): return line.count_indents(spacecount, tabs) return 0
Finds the last meaningful line and returns its indent level. Back specifies the amount of lines to look back for a none whitespace line.
def count_indents_length_last_line(self, spacecount, tabs=0, back=5): if not self.has_space(): return 0 lines = self.get_surrounding_lines(back, 0) for line in reversed(lines): if not line.string.isspace(): return line.count_indents_length(spacecount, tabs) return (0, 0)
Finds the last meaningful line and returns its indent level and character length. Back specifies the amount of lines to look back for a none whitespace line.
def skip_whitespace(self, newlines=0): if newlines: while not self.eos: if self.get_char().isspace(): self.eat_length(1) else: break else: char = '' while not self.eos: char = self.get_char() if char.isspace() and char != '\n': self.eat_length(1) else: break
Moves the position forwards to the next non newline space character. If newlines >= 1 include newlines as spaces.
def pretty_print(self, carrot=False): lineno = self.lineno padding = 0 if lineno < 1000: padding = 1 if lineno < 100: padding = 2 if lineno < 10: padding = 3 string = str(lineno) + (' ' * padding) + '|' + self.string if carrot: string += '\n' + (' ' * (self.col + 5)) return string
Return a string of this line including linenumber. If carrot is True then a line is added under the string with a carrot under the current character position.
def safe_exit(output): try: sys.stdout.write(output) sys.stdout.flush() except IOError: pass
exit without breaking pipes.
def frag2text(endpoint, stype, selector, clean=False, raw=False, verbose=False): try: return main(endpoint, stype, selector, clean, raw, verbose) except StandardError as err: return err
returns Markdown text of selected fragment. Args: endpoint: URL, file, or HTML string stype: { 'css' | 'xpath' } selector: CSS selector or XPath expression Returns: Markdown text Options: clean: cleans fragment (lxml.html.clean defaults) raw: returns raw HTML fragment verbose: show http status, encoding, headers
def read(self, _file): with open(_file) as fh: data = fh.read() if self.verbose: sys.stdout.write("read %d bytes from %s\n" % (fh.tell(), _file)) return data
return local file contents as endpoint.
def GET(self, url): r = requests.get(url) if self.verbose: sys.stdout.write("%s %s\n" % (r.status_code, r.encoding)) sys.stdout.write(str(r.headers) + "\n") self.encoding = r.encoding return r.text
returns text content of HTTP GET response.
def select(self, html, stype, expression): etree = html5lib.parse(html, treebuilder='lxml', namespaceHTMLElements=False) if stype == 'css': selector = lxml.cssselect.CSSSelector(expression) frag = list(selector(etree)) else: frag = etree.xpath(expression) if not frag: raise RuntimeError("Nothing found for: %s" % expression) return "".join([lxml.etree.tostring(x) for x in frag])
returns WHATWG spec HTML fragment from selector expression.
def clean(self, html): return lxml.html.clean.clean_html(unicode(html, self.encoding))
removes evil HTML per lxml.html.clean defaults.
def filesystem_repository(_context, name=None, make_default=False, aggregate_class=None, repository_class=None, directory=None, content_type=None): cnf = {} if not directory is None: cnf['directory'] = directory if not content_type is None: cnf['content_type'] = content_type _repository(_context, name, make_default, aggregate_class, repository_class, REPOSITORY_TYPES.FILE_SYSTEM, 'add_filesystem_repository', cnf)
Directive for registering a file-system based repository.
def rdb_repository(_context, name=None, make_default=False, aggregate_class=None, repository_class=None, db_string=None, metadata_factory=None): cnf = {} if not db_string is None: cnf['db_string'] = db_string if not metadata_factory is None: cnf['metadata_factory'] = metadata_factory _repository(_context, name, make_default, aggregate_class, repository_class, REPOSITORY_TYPES.RDB, 'add_rdb_repository', cnf)
Directive for registering a RDBM based repository.
def messaging(_context, repository, reset_on_start=False): discriminator = ('messaging', repository) reg = get_current_registry() config = Configurator(reg, package=_context.package) _context.action(discriminator=discriminator, # pylint: disable=E1101 callable=config.setup_system_repository, args=(repository,), kw=dict(reset_on_start=reset_on_start))
Directive for setting up the user message resource in the appropriate repository. :param str repository: The repository to create the user messages resource in.
def _filter(self, dict, keep): if not keep: return dict result = {} for key, value in dict.iteritems(): if key in keep: result[key] = value return result
Remove any keys not in 'keep'
def main( upload='usbasp', core='arduino', replace_existing=True, ): def install(mcu, f_cpu, kbyte): board = AutoBunch() board.name = TEMPL_NAME.format(mcu=mcu, f_cpu=format_freq(f_cpu), upload=upload) board_id = TEMPL_ID.format(mcu=mcu, f_cpu=(f_cpu), upload=upload) board.upload.using = upload board.upload.maximum_size = kbyte * 1024 board.build.mcu = mcu board.build.f_cpu = str(f_cpu) + 'L' board.build.core = core # for 1.0 board.build.variant = 'standard' install_board(board_id, board, replace_existing=replace_existing) install('atmega8', 1000000, 8) install('atmega8', 8000000, 8) install('atmega8', 12000000, 8) install('atmega88', 1000000, 8) install('atmega88', 8000000, 8) install('atmega88', 12000000, 8) install('atmega88', 20000000, 8) install('atmega328p', 20000000, 32) install('atmega328p', 16000000, 32) install('atmega328p', 8000000, 32) install('atmega328p', 1000000, 32)
install custom boards.
def write_county_estimate(self, table, variable, code, datum): try: division = Division.objects.get( code="{}{}".format(datum["state"], datum["county"]), level=self.COUNTY_LEVEL, ) CensusEstimate.objects.update_or_create( division=division, variable=variable, defaults={"estimate": datum[code] or 0}, ) except ObjectDoesNotExist: print("ERROR: {}, {}".format(datum["NAME"], datum["state"]))
Creates new estimate from a census series. Data has following signature from API: { 'B00001_001E': '5373', 'NAME': 'Anderson County, Texas', 'county': '001', 'state': '48' }
def get_district_estimates_by_state( self, api, table, variable, estimate, state ): state = Division.objects.get(level=self.STATE_LEVEL, code=state) district_data = api.get( ("NAME", estimate), { "for": "congressional district:*", "in": "state:{}".format(state.code), }, year=int(table.year), ) for datum in district_data: self.write_district_estimate(table, variable, estimate, datum)
Calls API for all districts in a state and a given estimate.
def get_county_estimates_by_state( self, api, table, variable, estimate, state ): state = Division.objects.get(level=self.STATE_LEVEL, code=state) county_data = api.get( ("NAME", estimate), {"for": "county:*", "in": "state:{}".format(state.code)}, year=int(table.year), ) for datum in county_data: self.write_county_estimate(table, variable, estimate, datum)
Calls API for all counties in a state and a given estimate.
def get_state_estimates_by_state( self, api, table, variable, estimate, state ): state = Division.objects.get(level=self.STATE_LEVEL, code=state) state_data = api.get( ("NAME", estimate), {"for": "state:{}".format(state.code)}, year=int(table.year), ) for datum in state_data: self.write_state_estimate(table, variable, estimate, datum)
Calls API for a state and a given estimate.
def aggregate_variable(estimate, id): estimates = [ variable.estimates.get(division__id=id).estimate for variable in estimate.variable.label.variables.all() ] method = estimate.variable.label.aggregation if method == "s": aggregate = sum(estimates) elif method == "a": aggregate = statistics.mean(estimates) elif method == "m": aggregate = statistics.median(estimates) else: aggregate = None return aggregate
Aggregate census table variables by a custom label.
def aggregate_national_estimates_by_district(self): data = {} fips = "00" aggregated_labels = [] states = Division.objects.filter(level=self.DISTRICT_LEVEL) estimates = CensusEstimate.objects.filter( division__level=self.DISTRICT_LEVEL ) for estimate in estimates: series = estimate.variable.table.series year = estimate.variable.table.year table = estimate.variable.table.code label = estimate.variable.label.label table_label = "{}{}".format(table, label) code = estimate.variable.code if series not in data: data[series] = {} if year not in data[series]: data[series][year] = {} if table not in data[series][year]: data[series][year][table] = {} if fips not in data[series][year][table]: data[series][year][table][fips] = {} if label is not None: if table_label not in aggregated_labels: # c= {**a, **b} aggregated_labels.append(table_label) data[series][year][table][fips][label] = [ self.aggregate_variable(estimate, division.id) for division in states if len( CensusEstimate.objects.filter( variable=estimate.variable, division=division.id, ) ) > 0 ] else: if code in data[series][year][table][fips]: data[series][year][table][fips][code].append( estimate.estimate ) else: data[series][year][table][fips][code] = [estimate.estimate] # print(data) return data
Aggregates district-level estimates for each table within the country. Creates data structure designed for an export in this format: ...{series}/{year}/{table}/districts.json
def aggregate_state_estimates_by_county(self, parent): data = {} for division in tqdm( Division.objects.filter(level=self.COUNTY_LEVEL, parent=parent) ): fips = division.code id = division.id aggregated_labels = [] # Keep track of already agg'ed variables for estimate in division.census_estimates.all(): series = estimate.variable.table.series year = estimate.variable.table.year table = estimate.variable.table.code label = estimate.variable.label.label table_label = "{}{}".format(table, label) code = estimate.variable.code if series not in data: data[series] = {} if year not in data[series]: data[series][year] = {} if table not in data[series][year]: data[series][year][table] = {} if fips not in data[series][year][table]: data[series][year][table][fips] = {} if label is not None: if table_label not in aggregated_labels: aggregated_labels.append(table_label) data[series][year][table][fips][ label ] = self.aggregate_variable(estimate, id) else: data[series][year][table][division.code][ code ] = estimate.estimate # print(data) return data
Aggregates county-level estimates for each table within a given state. Creates data structure designed for an export in this format: ...{series}/{year}/{table}/{state_fips}/counties.json
def xml(self, fn=None, src='word/document.xml', XMLClass=XML, **params): "return the src with the given transformation applied, if any." if src in self.xml_cache: return self.xml_cache[src] if src not in self.zipfile.namelist(): return x = XMLClass( fn=fn or (self.fn and self.fn.replace('.docx', '.xml')) or None, root=self.zipfile.read(src)) self.xml_cache[src] = x return f xml(self, fn=None, src='word/document.xml', XMLClass=XML, **params): "return the src with the given transformation applied, if any." if src in self.xml_cache: return self.xml_cache[src] if src not in self.zipfile.namelist(): return x = XMLClass( fn=fn or (self.fn and self.fn.replace('.docx', '.xml')) or None, root=self.zipfile.read(src)) self.xml_cache[src] = x return x
return the src with the given transformation applied, if any.
def metadata(self): md = self.xml(src="docProps/core.xml") if md is None: md = XML(root=etree.Element("{%(cp)s}metadata" % self.NS)) return md.root
return a cp:metadata element with the metadata in the document
def endnotemap(self, cache=True): if self.__endnotemap is not None and cache==True: return self.__endnotemap else: x = self.xml(src='word/endnotes.xml') d = Dict() if x is None: return d for endnote in x.root.xpath("w:endnote", namespaces=self.NS): id = endnote.get("{%(w)s}id" % self.NS) typ = endnote.get("{%(w)s}type" % self.NS) d[id] = Dict(id=id, type=typ, elem=endnote) if cache==True: self.__endnotemap = d return d
return the endnotes from the docx, keyed to string id.
def footnotemap(self, cache=True): if self.__footnotemap is not None and cache==True: return self.__footnotemap else: x = self.xml(src='word/footnotes.xml') d = Dict() if x is None: return d for footnote in x.root.xpath("w:footnote", namespaces=self.NS): id = footnote.get("{%(w)s}id" % self.NS) typ = footnote.get("{%(w)s}type" % self.NS) d[id] = Dict(id=id, type=typ, elem=footnote) if cache==True: self.__footnotemap = d return d
return the footnotes from the docx, keyed to string id.
def commentmap(self, cache=True): if self.__commentmap is not None and cache==True: return self.__commentmap else: x = self.xml(src='word/comments.xml') d = Dict() if x is None: return d for comment in x.root.xpath("w:comment", namespaces=self.NS): id = comment.get("{%(w)s}id" % self.NS) typ = comment.get("{%(w)s}type" % self.NS) d[id] = Dict(id=id, type=typ, elem=comment) if cache==True: self.__commentmap = d return d
return the comments from the docx, keyed to string id.
def val_to_css(C, val, factor, unit=CSS.rem, pt_per_em=12., decimals=2): return (round(float(val) * factor / pt_per_em, decimals) * CSS.rem).asUnit(unit)
convert the Word val to a CSS unit val : The raw Word val factor : The conversion factor. If font sizes, typically factor=1/2., others factor=1/20. unit : The CSS unit to which we are converting, default CSS.rem pt_per_em : The number of CSS.pt per em. 12. is the default, but 'tain't necessarily so.
def selector(C, style): clas = C.classname(style.name) if style.type == 'paragraph': # heading outline levels are 0..7 internally, indicating h1..h8 outlineLvl = int((style.properties.get('outlineLvl') or {}).get('val') or 8) + 1 if outlineLvl < 9: tag = 'h%d' % outlineLvl else: tag = 'p' elif style.type == 'character': tag = 'span' elif style.type == 'table': tag = 'table' elif style.type == 'numbering': tag = 'ol' return "%s.%s" % (tag, clas)
return the selector for the given stylemap style
def stylesheet(self, fn=None, unit=CSS.rem, pt_per_em=None, decimals=2, font_factor=1/2., space_factor=1/20.): styles = self.stylemap(definitions=True, all=True, cache=False) used_styles = self.stylemap(definitions=False, all=False, cache=False) css = CSS(fn=fn or (self.fn and self.fn.replace('.docx', '.css')) or None) if pt_per_em is None: # use the size of the "Normal" font as 1.0em by definition normal = [styles[k] for k in styles if styles[k].name=='Normal'][0] if normal.properties.get('sz') is not None: pt_per_em = float(normal.properties['sz'].val) * font_factor else: pt_per_em = 12. for styleName in used_styles: style = styles[styleName] sel = self.selector(style) css.styles[sel] = self.style_properties(styles, styleName, unit=unit, pt_per_em=pt_per_em, decimals=decimals, font_factor=font_factor, space_factor=space_factor) LOG.debug("%s %r" % (sel, css.styles[sel])) return css
create a CSS stylesheet in a Text document, using DOCX.stylemap(), above.
def load_into_collection_from_stream(collection, stream, content_type): rpr = as_representer(collection, content_type) with stream: data_el = rpr.data_from_stream(stream) rpr.resource_from_data(data_el, resource=collection)
Loads resources from the given resource data stream (of the specified MIME content type) into the given collection resource.
def load_collection_from_stream(resource, stream, content_type): coll = create_staging_collection(resource) load_into_collection_from_stream(coll, stream, content_type) return coll
Creates a new collection for the registered resource and calls `load_into_collection_from_stream` with it.
def load_into_collection_from_file(collection, filename, content_type=None): if content_type is None: ext = os.path.splitext(filename)[1] try: content_type = MimeTypeRegistry.get_type_for_extension(ext) except KeyError: raise ValueError('Could not infer MIME type for file extension ' '"%s".' % ext) load_into_collection_from_stream(collection, open(filename, 'rU'), content_type)
Loads resources from the specified file into the given collection resource. If no content type is provided, an attempt is made to look up the extension of the given filename in the MIME content type registry.
def load_collection_from_file(resource, filename, content_type=None): coll = create_staging_collection(resource) load_into_collection_from_file(coll, filename, content_type=content_type) return coll
Creates a new collection for the registered resource and calls `load_into_collection_from_file` with it.
def load_into_collection_from_url(collection, url, content_type=None): parsed = urlparse.urlparse(url) scheme = parsed.scheme # pylint: disable=E1101 if scheme == 'file': # Assume a local path. load_into_collection_from_file(collection, parsed.path, # pylint: disable=E1101 content_type=content_type) else: raise ValueError('Unsupported URL scheme "%s".' % scheme)
Loads resources from the representation contained in the given URL into the given collection resource. :returns: collection resource
def load_collection_from_url(resource, url, content_type=None): coll = create_staging_collection(resource) load_into_collection_from_url(coll, url, content_type=content_type) return coll
Creates a new collection for the registered resource and calls `load_into_collection_from_url` with it.
def load_into_collections_from_zipfile(collections, zipfile): with ZipFile(zipfile) as zipf: names = zipf.namelist() name_map = dict([(os.path.splitext(name)[0], index) for (index, name) in enumerate(names)]) for coll in collections: coll_name = get_collection_name(coll) index = name_map.get(coll_name) if index is None: continue coll_fn = names[index] ext = os.path.splitext(coll_fn)[1] try: content_type = \ MimeTypeRegistry.get_type_for_extension(ext) except KeyError: raise ValueError('Could not infer MIME type for file ' 'extension "%s".' % ext) # Strings are always written as UTF-8 encoded byte strings when # the zip file is created, so we have to wrap the iterator into # a decoding step. coll_data = DecodingStream(zipf.open(coll_fn, 'r')) load_into_collection_from_stream(coll, coll_data, content_type)
Loads resources contained in the given ZIP archive into each of the given collections. The ZIP file is expected to contain a list of file names obtained with the :func:`get_collection_filename` function, each pointing to a file of zipped collection resource data. :param collections: sequence of collection resources :param str zipfile: ZIP file name
def dump_resource(resource, stream, content_type=None): if content_type is None: content_type = CsvMime rpr = as_representer(resource, content_type) rpr.to_stream(resource, stream)
Dumps the given resource to the given stream using the specified MIME content type (defaults to CSV).
def build_resource_dependency_graph(resource_classes, include_backrefs=False): def visit(mb_cls, grph, path, incl_backrefs): for attr_name in get_resource_class_attribute_names(mb_cls): if is_resource_class_terminal_attribute(mb_cls, attr_name): continue child_descr = getattr(mb_cls, attr_name) child_mb_cls = get_member_class(child_descr.attr_type) # We do not follow cyclic references back to a resource class # that is last in the path. if len(path) > 0 and child_mb_cls is path[-1] \ and not incl_backrefs: continue if not grph.has_node(child_mb_cls): grph.add_node(child_mb_cls) path.append(mb_cls) visit(child_mb_cls, grph, path, incl_backrefs) path.pop() if not grph.has_edge((mb_cls, child_mb_cls)): grph.add_edge((mb_cls, child_mb_cls)) dep_grph = digraph() for resource_class in resource_classes: mb_cls = get_member_class(resource_class) if not dep_grph.has_node(mb_cls): dep_grph.add_node(mb_cls) visit(mb_cls, dep_grph, [], include_backrefs) return dep_grph
Builds a graph of dependencies among the given resource classes. The dependency graph is a directed graph with member resource classes as nodes. An edge between two nodes represents a member or collection attribute. :param resource_classes: resource classes to determine interdependencies of. :type resource_classes: sequence of registered resources. :param bool include_backrefs: flag indicating if dependencies introduced by back-references (e.g., a child resource referencing its parent) should be included in the dependency graph.
def find_connected_resources(resource, dependency_graph=None): # Build a resource_graph. resource_graph = \ build_resource_graph(resource, dependency_graph=dependency_graph) entity_map = OrderedDict() for mb in topological_sorting(resource_graph): mb_cls = get_member_class(mb) ents = entity_map.get(mb_cls) if ents is None: ents = [] entity_map[mb_cls] = ents ents.append(mb.get_entity()) return entity_map
Collects all resources connected to the given resource and returns a dictionary mapping member resource classes to new collections containing the members found.
def dump_resource_to_files(resource, content_type=None, directory=None): if directory is None: directory = os.getcwd() # pragma: no cover if content_type is None: content_type = CsvMime srl = ConnectedResourcesSerializer(content_type) srl.to_files(resource, directory=directory)
Convenience function. See :meth:`everest.resources.io.ConnectedResourcesSerializer.to_files` for details. If no directory is given, the current working directory is used. The given context type defaults to CSV.
def dump_resource_to_zipfile(resource, zipfile, content_type=None): if content_type is None: content_type = CsvMime srl = ConnectedResourcesSerializer(content_type) srl.to_zipfile(resource, zipfile)
Convenience function. See :meth:`everest.resources.io.ConnectedResourcesSerializer.to_zipfile` for details. The given context type defaults to CSV.
def to_strings(self, resource): collections = self.__collect(resource) # Build a map of representations. rpr_map = OrderedDict() for (mb_cls, coll) in iteritems_(collections): strm = NativeIO('w') dump_resource(coll, strm, content_type=self.__content_type) rpr_map[mb_cls] = strm.getvalue() return rpr_map
Dumps the all resources reachable from the given resource to a map of string representations using the specified content_type (defaults to CSV). :returns: dictionary mapping resource member classes to string representations
def to_files(self, resource, directory): collections = self.__collect(resource) for (mb_cls, coll) in iteritems_(collections): fn = get_write_collection_path(mb_cls, self.__content_type, directory=directory) with open_text(os.path.join(directory, fn)) as strm: dump_resource(coll, strm, content_type=self.__content_type)
Dumps the given resource and all resources linked to it into a set of representation files in the given directory.
def to_zipfile(self, resource, zipfile): rpr_map = self.to_strings(resource) with ZipFile(zipfile, 'w') as zipf: for (mb_cls, rpr_string) in iteritems_(rpr_map): fn = get_collection_filename(mb_cls, self.__content_type) zipf.writestr(fn, rpr_string, compress_type=ZIP_DEFLATED)
Dumps the given resource and all resources linked to it into the given ZIP file.
def read(self): p = os.path.join(self.path, self.name) try: with open(p) as f: json_text = f.read() except FileNotFoundError as e: raise JSONFileError(e) from e try: json.loads(json_text) except (json.JSONDecodeError, TypeError) as e: raise JSONFileError(f"{e} Got {p}") from e return json_text
Returns the file contents as validated JSON text.
def deserialized_objects(self): if not self._deserialized_objects: json_text = self.read() self._deserialized_objects = self.deserialize(json_text=json_text) return self._deserialized_objects
Returns a generator of deserialized objects.
def exists(self, batch_id=None): try: self.model.objects.get(batch_id=batch_id) except self.model.DoesNotExist: return False return True
Returns True if batch_id exists in the history.
def update( self, filename=None, batch_id=None, prev_batch_id=None, producer=None, count=None, ): # TODO: refactor model enforce unique batch_id # TODO: refactor model to not allow NULLs if not filename: raise BatchHistoryError("Invalid filename. Got None") if not batch_id: raise BatchHistoryError("Invalid batch_id. Got None") if not prev_batch_id: raise BatchHistoryError("Invalid prev_batch_id. Got None") if not producer: raise BatchHistoryError("Invalid producer. Got None") if self.exists(batch_id=batch_id): raise IntegrityError("Duplicate batch_id") try: obj = self.model.objects.get(batch_id=batch_id) except self.model.DoesNotExist: obj = self.model( filename=filename, batch_id=batch_id, prev_batch_id=prev_batch_id, producer=producer, total=count, ) obj.transaction_file.name = filename obj.save() return obj
Creates an history model instance.
def populate(self, deserialized_txs=None, filename=None, retry=None): if not deserialized_txs: raise BatchError("Failed to populate batch. There are no objects to add.") self.filename = filename if not self.filename: raise BatchError("Invalid filename. Got None") try: for deserialized_tx in deserialized_txs: self.peek(deserialized_tx) self.objects.append(deserialized_tx.object) break for deserialized_tx in deserialized_txs: self.objects.append(deserialized_tx.object) except DeserializationError as e: raise BatchDeserializationError(e) from e except JSONFileError as e: raise BatchDeserializationError(e) from e
Populates the batch with unsaved model instances from a generator of deserialized objects.
def peek(self, deserialized_tx): self.batch_id = deserialized_tx.object.batch_id self.prev_batch_id = deserialized_tx.object.prev_batch_id self.producer = deserialized_tx.object.producer if self.batch_history.exists(batch_id=self.batch_id): raise BatchAlreadyProcessed( f"Batch {self.batch_id} has already been processed" ) if self.prev_batch_id != self.batch_id: if not self.batch_history.exists(batch_id=self.prev_batch_id): raise InvalidBatchSequence( f"Invalid import sequence. History does not exist for prev_batch_id. " f"Got file='{self.filename}', prev_batch_id=" f"{self.prev_batch_id}, batch_id={self.batch_id}." )
Peeks into first tx and sets self attrs or raise.
def save(self): saved = 0 if not self.objects: raise BatchError("Save failed. Batch is empty") for deserialized_tx in self.objects: try: self.model.objects.get(pk=deserialized_tx.pk) except self.model.DoesNotExist: data = {} for field in self.model._meta.get_fields(): try: data.update({field.name: getattr(deserialized_tx, field.name)}) except AttributeError: pass self.model.objects.create(**data) saved += 1 return saved
Saves all model instances in the batch as model.
def import_batch(self, filename): batch = self.batch_cls() json_file = self.json_file_cls(name=filename, path=self.path) try: deserialized_txs = json_file.deserialized_objects except JSONFileError as e: raise TransactionImporterError(e) from e try: batch.populate(deserialized_txs=deserialized_txs, filename=json_file.name) except ( BatchDeserializationError, InvalidBatchSequence, BatchAlreadyProcessed, ) as e: raise TransactionImporterError(e) from e batch.save() batch.update_history() return batch
Imports the batch of outgoing transactions into model IncomingTransaction.
def timelimit(timeout): def _1(function): def _2(*args, **kw): class Dispatch(threading.Thread): def __init__(self): threading.Thread.__init__(self) self.result = None self.error = None self.setDaemon(True) self.start() def run(self): try: self.result = function(*args, **kw) except: self.error = sys.exc_info() c = Dispatch() c.join(timeout) if c.isAlive(): raise TimeoutError, 'took too long' if c.error: raise c.error[0], c.error[1] return c.result return _2 return _1
borrowed from web.py
def root(self): self.xml_root = ElementTree.Element('interface') self.xml_root.set('type', self.type) if self.mac is not None: mac = ElementTree.SubElement(self.xml_root, 'mac') mac.set('address', self.mac) _source = ElementTree.SubElement(self.xml_root, 'source') _source.set(self.type, self.source) _model = ElementTree.SubElement(self.xml_root, 'model') _model.set('type', self.model) return self.xml_root
TODO(rdelinger) rename this as to_xml or something similar
def _populateBuffer(self, stream, n): try: for x in xrange(n): output = stream.next() self._buffer.write(output) except StopIteration, e: self._deferred.callback(None) except Exception, e: self._deferred.errback(e) else: self.delayedCall = reactor.callLater(CALL_DELAY, self._populateBuffer, stream, n)
Iterator that returns N steps of the genshi stream. Found that performance really sucks for n = 1 (0.5 requests/second for the root resources versus 80 requests/second for a blocking algorithm). Hopefully increasing the number of steps per timeslice will significantly improve performance.
def create_message( json_meta, data, data_type=0, version=b'\x00\x01@\x00'): __check_data(data) meta = __prepare_meta(json_meta) data = __compress(json_meta, data) header = __create_machine_header( json_meta, data, data_type, version) return header + meta + data
Create message, containing meta and data in df-envelope format. @json_meta - metadata @data - binary data @data_type - data type code for binary data @version - version of machine header @return - message as bytearray
def parse_from_file(filename, nodata=False): header = None with open(filename, "rb") as file: header = read_machine_header(file) meta_raw = file.read(header['meta_len']) meta = __parse_meta(meta_raw, header) data = b'' if not nodata: data = __decompress(meta, file.read(header['data_len'])) return header, meta, data
Parse df message from file. @filename - path to file @nodata - do not load data @return - [binary header, metadata, binary data]
def parse_message(message, nodata=False): header = read_machine_header(message) h_len = __get_machine_header_length(header) meta_raw = message[h_len:h_len + header['meta_len']] meta = __parse_meta(meta_raw, header) data_start = h_len + header['meta_len'] data = b'' if not nodata: data = __decompress( meta, message[data_start:data_start + header['data_len']] ) return header, meta, data
Parse df message from bytearray. @message - message data @nodata - do not load data @return - [binary header, metadata, binary data]
def read_machine_header(data): if isinstance(data, (bytes, bytearray)): stream = io.BytesIO(data) elif isinstance(data, io.BufferedReader): stream = data else: raise ValueError("data should be either bytearray or file 'rb' mode.") header = dict() header_type = stream.read(6) if header_type == b"#!\x00\x01@\x00": header['type'] = header_type[2:6] header['time'] = struct.unpack('>I', stream.read(4))[0] header['meta_type'] = struct.unpack('>I', stream.read(4))[0] header['meta_len'] = struct.unpack('>I', stream.read(4))[0] header['data_type'] = struct.unpack('>I', stream.read(4))[0] header['data_len'] = struct.unpack('>I', stream.read(4))[0] stream.read(4) elif header_type == b"#~DF02": header['type'] = header_type[2:6] header['meta_type'] = stream.read(2) header['meta_len'] = struct.unpack('>I', stream.read(4))[0] header['data_len'] = struct.unpack('>I', stream.read(4))[0] stream.read(4) else: raise NotImplementedError( "Parser for machine header %s not implemented" % (header_type.decode())) return header
Parse binary header. @data - bytearray, contains binary header of file opened in 'rb' mode @return - parsed binary header
def get_messages_from_stream(data): messages = [] iterator = HEADER_RE.finditer(data) last_pos = 0 for match in iterator: pos = match.span()[0] header = read_machine_header(data[pos:]) h_len = __get_machine_header_length(header) cur_last_pos = pos + h_len + header['meta_len'] + header['data_len'] if cur_last_pos > len(data): break header, meta, bin_data = parse_message(data[pos:]) messages.append({'header': header, 'meta': meta, 'data': bin_data}) last_pos = cur_last_pos data = data[last_pos:] return messages, data
Extract complete messages from stream and cut out them from stream. @data - stream binary data @return - [list of messages, choped stream data]
def clone(self, options=None, attribute_options=None): copied_cfg = self.__configurations[-1].copy() upd_cfg = type(copied_cfg)(options=options, attribute_options=attribute_options) copied_cfg.update(upd_cfg) return self.__class__(self.__mp_reg, self.__mapped_cls, self.__de_cls, copied_cfg)
Returns a clone of this mapping that is configured with the given option and attribute option dictionaries. :param dict options: Maps representer options to their values. :param dict attribute_options: Maps attribute names to dictionaries mapping attribute options to their values.
def update(self, options=None, attribute_options=None): attr_map = self.__get_attribute_map(self.__mapped_cls, None, 0) for attributes in attribute_options: for attr_name in attributes: if not attr_name in attr_map: raise AttributeError('Trying to configure non-existing ' 'resource attribute "%s"' % (attr_name)) cfg = RepresenterConfiguration(options=options, attribute_options=attribute_options) self.configuration.update(cfg)
Updates this mapping with the given option and attribute option maps. :param dict options: Maps representer options to their values. :param dict attribute_options: Maps attribute names to dictionaries mapping attribute options to their values.
def get_attribute_map(self, mapped_class=None, key=None): if mapped_class is None: mapped_class = self.__mapped_cls if key is None: key = MappedAttributeKey(()) return OrderedDict([(attr.resource_attr, attr) for attr in self._attribute_iterator(mapped_class, key)])
Returns an ordered map of the mapped attributes for the given mapped class and attribute key. :param key: Tuple of attribute names specifying a path to a nested attribute in a resource tree. If this is not given, all attributes in this mapping will be returned.
def get_attribute(self, attribute_name, mapped_class=None, key=None): attr_map = self.__get_attribute_map(mapped_class, key, 0) try: return attr_map[attribute_name] except KeyError: raise AttributeError(attribute_name)
Returns the specified attribute from the map of all mapped attributes for the given mapped class and attribute key. See :method:`get_attribute_map` for details.
def get_attribute_by_repr(self, attribute_repr_name, mapped_class=None, key=None): attr_map = self.__get_attribute_map(mapped_class, key, 1) try: return attr_map[attribute_repr_name] except KeyError: raise AttributeError(attribute_repr_name)
Returns the attribute (specified by its representation name) from the map of all mapped attributes for the given mapped class and attribute key. See :method:`get_attribute_map` for details.
def attribute_iterator(self, mapped_class=None, key=None): for attr in self._attribute_iterator(mapped_class, key): yield attr
Returns an iterator over all mapped attributes for the given mapped class and attribute key. See :method:`get_attribute_map` for details.