code
stringlengths
52
7.75k
docs
stringlengths
1
5.85k
def setup(options, **kwargs): params = dict(kwargs) params.update(level=options.log_level) logging.basicConfig(**params)
Setup logging with options or arguments from an OptionParser or ArgumentParser. Also pass any keyword arguments to the basicConfig call.
def setup_requests_logging(level): requests_log = logging.getLogger("requests.packages.urllib3") requests_log.setLevel(level) requests_log.propagate = True # enable debugging at httplib level http_client.HTTPConnection.debuglevel = level <= logging.DEBUG
Setup logging for 'requests' such that it logs details about the connection, headers, etc.
def _set_period(self, period): self._period = period if period: self._period_seconds = tempora.get_period_seconds(self._period) self._date_format = tempora.get_date_format_string( self._period_seconds) else: self._period_seconds = 0 self._date_format = ''
Set the period for the timestamp. If period is 0 or None, no period will be used.
def get_filename(self, t): root, ext = os.path.splitext(self.base_filename) # remove seconds not significant to the period if self._period_seconds: t -= t % self._period_seconds # convert it to a datetime object for formatting dt = datetime.datetime.utcfromtimestamp(t) # append the datestring to the filename # workaround for datetime.strftime not handling '' properly appended_date = ( dt.strftime(self._date_format) if self._date_format != '' else '' ) if appended_date: # in the future, it would be nice for this format # to be supplied as a parameter. result = root + ' ' + appended_date + ext else: result = self.base_filename return result
Return the appropriate filename for the given time based on the defined period.
def emit(self, record): now = time.time() current_name = self.get_filename(now) try: if not self.stream.name == current_name: self._use_file(current_name) except AttributeError: # a stream has not been created, so create one. self._use_file(current_name) logging.StreamHandler.emit(self, record)
Emit a record. Output the record to the file, ensuring that the currently- opened file has the correct date.
def add_vertex(self, v, partition): self.graph.add_vertex(v) self.partitions[partition].add(v)
Add a vertex to the graph :param v: vertex name :param partition: partition to add to
def add_edge(self, fr, to): if fr not in set(self.graph.vs): # ToDo: find out why item can be in set but not dict raise ValueError('can not connect unknown vertices in n-partite graphs, {!r} missing'.format(fr)) elif to not in set(self.graph.vs): raise ValueError('can not connect unknown vertices in n-partite graphs, {!r} missing'.format(to)) self.graph.add_edge(fr, to)
Add an outward edge to a vertex :param fr: The source vertex. :param to: The name of the outward edge.
def fix_imports(script): with open(script, 'r') as f_script: lines = f_script.read().splitlines() new_lines = [] for l in lines: if l.startswith("import "): l = "from . " + l if "from PyQt5 import" in l: l = l.replace("from PyQt5 import", "from pyqode.qt import") new_lines.append(l) with open(script, 'w') as f_script: f_script.write("\n".join(new_lines))
Replace "from PyQt5 import" by "from pyqode.qt import". :param script: script path
def eval_py(self, _globals, _locals): try: params = eval(self.script, _globals, _locals) except NameError as e: raise Exception( 'Failed to evaluate parameters: {}' .format(str(e)) ) except ResolutionError as e: raise Exception('GetOutput: {}'.format(str(e))) return params
Evaluates a file containing a Python params dictionary.
def new(cls, arg): content = None if arg.kind == 'file': if os.path.exists(arg.value): with open(arg.value, 'r') as f: content = f.read() else: raise Exception('File does not exist: {}'.format(arg.value)) elif arg.kind == 'cli': content = arg.value for source_cls in cls.sources: if source_cls.supports_source(arg): return source_cls(content) msg = 'Unsupported Parameter Source "{}"' raise Execption(msg.format(arg.value))
Creates a new Parameter object from the given ParameterArgument.
def minimum_pitch(self): pitch = self.pitch minimal_pitch = [] for p in pitch: minimal_pitch.append(min(p)) return min(minimal_pitch)
Returns the minimal pitch between two neighboring nodes of the mesh in each direction. :return: Minimal pitch in each direction.
def surrounding_nodes(self, position): n_node_index, n_node_position, n_node_error = self.nearest_node(position) if n_node_error == 0.0: index_mod = [] for i in range(len(n_node_index)): new_point = np.asarray(n_node_position) new_point[i] += 1.e-5*np.abs(new_point[i]) try: self.nearest_node(tuple(new_point)) index_mod.append(-1) except ValueError: index_mod.append(1) else: # Check if node_position is larger or smaller in resp. axes than position index_mod = [] for i in range(len(n_node_index)): if n_node_position[i] > position[i]: index_mod.append(-1) else: index_mod.append(1) return tuple(n_node_index), tuple(index_mod)
Returns nearest node indices and direction of opposite node. :param position: Position inside the mesh to search nearest node for as (x,y,z) :return: Nearest node indices and direction of opposite node.
def tokenize(self, string): it = colorise.compat.ifilter(None, self._pattern.finditer(string)) try: t = colorise.compat.next(it) except StopIteration: yield string, False return pos, buf, lm, escapeflag = -1, '', -1, False # Check if we need to yield any starting text if t.start() > 0: yield string[:t.start()], False pos = t.start() it = itertools.chain([t], it) for m in it: start = m.start() e, s = m.group(2) or '', m.group(3) escaped = e.count(self._ESCAPE) % 2 != 0 if escaped: buf += string[pos:m.end(2)-1] + s escapeflag = True else: buf += string[pos:m.start(3)] if buf: yield buf, escapeflag buf = '' escapeflag = False if lm == start: yield '', False yield s, False lm = m.end() pos = m.end() if buf: yield buf, escapeflag escapeflag = False if pos < len(string): yield string[pos:], False
Tokenize a string and return an iterator over its tokens.
def parse(self, format_string): txt, state = '', 0 colorstack = [(None, None)] itokens = self.tokenize(format_string) for token, escaped in itokens: if token == self._START_TOKEN and not escaped: if txt: yield txt, colorstack[-1] txt = '' state += 1 colors = self.extract_syntax(colorise.compat.next(itokens)[0]) colorstack.append(tuple(b or a for a, b in zip(colorstack[-1], colors))) elif token == self._FMT_TOKEN and not escaped: # if state == 0: # raise ColorSyntaxError("Missing '{0}'" # .format(self._START_TOKEN)) if state % 2 != 0: state += 1 else: txt += token elif token == self._STOP_TOKEN and not escaped: if state < 2: raise ColorSyntaxError("Missing '{0}' or '{1}'" .format(self._STOP_TOKEN, self._FMT_TOKEN)) if txt: yield txt, colorstack[-1] txt = '' state -= 2 colorstack.pop() else: txt += token if state != 0: raise ColorSyntaxError("Invalid color format") if txt: yield txt, colorstack[-1]
Parse color syntax from a formatted string.
def extract_syntax(self, syntax): tokens = syntax.split(self._COLOR_DELIM) r = [None, None] for token in tokens: for i, e in enumerate(('fg=', 'bg=')): if token.startswith(e): r[i] = token[3:] if r == [None, None]: raise ColorSyntaxError("Unexpected color syntax '{0}'" .format(token)) return tuple(r)
Parse and extract color/markup syntax from a format string.
def from_mapping(cls, evidence_mapping): return cls(metadata_map=MetadataMap.from_mapping(evidence_mapping['metadataMap']), copyright=evidence_mapping['copyright'], id=evidence_mapping['id'], terms_of_use=evidence_mapping['termsOfUse'], document=evidence_mapping['document'], title=evidence_mapping['title'], text=evidence_mapping['text'], value=evidence_mapping['value'])
Create an Evidence instance from the given mapping :param evidence_mapping: a mapping (e.g. dict) of values provided by Watson :return: a new Evidence
def to_obj(cls, obj_data=None, *fields, **field_map): ''' prioritize obj_dict when there are conficts ''' obj_dict = obj_data.__dict__ if hasattr(obj_data, '__dict__') else obj_data if not fields: fields = obj_dict.keys() obj = cls() update_obj(obj_dict, obj, *fields, **field_map) return obf to_obj(cls, obj_data=None, *fields, **field_map): ''' prioritize obj_dict when there are conficts ''' obj_dict = obj_data.__dict__ if hasattr(obj_data, '__dict__') else obj_data if not fields: fields = obj_dict.keys() obj = cls() update_obj(obj_dict, obj, *fields, **field_map) return obj
prioritize obj_dict when there are conficts
def with_ctx(func=None): ''' Auto create a new context if not available ''' if not func: return functools.partial(with_ctx) @functools.wraps(func) def func_with_context(_obj, *args, **kwargs): if 'ctx' not in kwargs or kwargs['ctx'] is None: # if context is empty, ensure context with _obj.ctx() as new_ctx: kwargs['ctx'] = new_ctx return func(_obj, *args, **kwargs) else: # if context is available, just call the function return func(_obj, *args, **kwargs) return func_with_contexf with_ctx(func=None): ''' Auto create a new context if not available ''' if not func: return functools.partial(with_ctx) @functools.wraps(func) def func_with_context(_obj, *args, **kwargs): if 'ctx' not in kwargs or kwargs['ctx'] is None: # if context is empty, ensure context with _obj.ctx() as new_ctx: kwargs['ctx'] = new_ctx return func(_obj, *args, **kwargs) else: # if context is available, just call the function return func(_obj, *args, **kwargs) return func_with_context
Auto create a new context if not available
def open(self, auto_commit=None, schema=None): ''' Create a context to execute queries ''' if schema is None: schema = self.schema ac = auto_commit if auto_commit is not None else schema.auto_commit exe = ExecutionContext(self.path, schema=schema, auto_commit=ac) # setup DB if required if not os.path.isfile(self.path) or os.path.getsize(self.path) == 0: getLogger().warning("DB does not exist at {}. Setup is required.".format(self.path)) # run setup files if schema is not None and schema.setup_files: for file_path in schema.setup_files: getLogger().debug("Executing script file: {}".format(file_path)) exe.cur.executescript(self.read_file(file_path)) # run setup scripts if schema.setup_scripts: for script in schema.setup_scripts: exe.cur.executescript(script) return exf open(self, auto_commit=None, schema=None): ''' Create a context to execute queries ''' if schema is None: schema = self.schema ac = auto_commit if auto_commit is not None else schema.auto_commit exe = ExecutionContext(self.path, schema=schema, auto_commit=ac) # setup DB if required if not os.path.isfile(self.path) or os.path.getsize(self.path) == 0: getLogger().warning("DB does not exist at {}. Setup is required.".format(self.path)) # run setup files if schema is not None and schema.setup_files: for file_path in schema.setup_files: getLogger().debug("Executing script file: {}".format(file_path)) exe.cur.executescript(self.read_file(file_path)) # run setup scripts if schema.setup_scripts: for script in schema.setup_scripts: exe.cur.executescript(script) return exe
Create a context to execute queries
def build_insert(self, table, values, columns=None): ''' Insert an active record into DB and return lastrowid if available ''' if not columns: columns = table.columns if len(values) < len(columns): column_names = ','.join(columns[-len(values):]) else: column_names = ','.join(columns) query = "INSERT INTO %s (%s) VALUES (%s) " % (table.name, column_names, ','.join(['?'] * len(values))) return querf build_insert(self, table, values, columns=None): ''' Insert an active record into DB and return lastrowid if available ''' if not columns: columns = table.columns if len(values) < len(columns): column_names = ','.join(columns[-len(values):]) else: column_names = ','.join(columns) query = "INSERT INTO %s (%s) VALUES (%s) " % (table.name, column_names, ','.join(['?'] * len(values))) return query
Insert an active record into DB and return lastrowid if available
def select_record(self, table, where=None, values=None, orderby=None, limit=None, columns=None): ''' Support these keywords where, values, orderby, limit and columns''' query = self.schema.query_builder.build_select(table, where, orderby, limit, columns) return table.to_table(self.execute(query, values), columns=columnsf select_record(self, table, where=None, values=None, orderby=None, limit=None, columns=None): ''' Support these keywords where, values, orderby, limit and columns''' query = self.schema.query_builder.build_select(table, where, orderby, limit, columns) return table.to_table(self.execute(query, values), columns=columns)
Support these keywords where, values, orderby, limit and columns
def should_be_excluded(name, exclude_patterns): for pattern in exclude_patterns: if fnmatch.fnmatch(name, pattern): return True return False
Check if a name should be excluded. Returns True if name matches at least one of the exclude patterns in the exclude_patterns list.
def filter_visited(curr_dir, subdirs, already_visited, follow_dirlinks, on_error): filtered = [] to_visit = set() _already_visited = already_visited.copy() try: # mark the current directory as visited, so we catch symlinks to it # immediately instead of after one iteration of the directory loop file_info = os.stat(curr_dir) if follow_dirlinks else os.lstat(curr_dir) _already_visited.add((file_info.st_dev, file_info.st_ino)) except OSError as e: on_error(e) for subdir in subdirs: full_path = os.path.join(curr_dir, subdir) try: file_info = os.stat(full_path) if follow_dirlinks else os.lstat(full_path) except OSError as e: on_error(e) continue if not follow_dirlinks and stat.S_ISLNK(file_info.st_mode): # following links to dirs is disabled, ignore this one continue dev_inode = (file_info.st_dev, file_info.st_ino) if dev_inode not in _already_visited: filtered.append(subdir) to_visit.add(dev_inode) else: on_error(OSError(errno.ELOOP, "directory loop detected", full_path)) return filtered, _already_visited.union(to_visit)
Filter subdirs that have already been visited. This is used to avoid loops in the search performed by os.walk() in index_files_by_size. curr_dir is the path of the current directory, as returned by os.walk(). subdirs is the list of subdirectories for the current directory, as returned by os.walk(). already_visited is a set of tuples (st_dev, st_ino) of already visited directories. This set will not be modified. on error is a function f(OSError) -> None, to be called in case of error. Returns a tuple: the new (possibly filtered) subdirs list, and a new set of already visited directories, now including the subdirs.
def calculate_md5(filename, length): assert length >= 0 # shortcut: MD5 of an empty string is 'd41d8cd98f00b204e9800998ecf8427e', # represented here in binary if length == 0: return '\xd4\x1d\x8c\xd9\x8f\x00\xb2\x04\xe9\x80\t\x98\xec\xf8\x42\x7e' md5_summer = hashlib.md5() f = open(filename, 'rb') try: bytes_read = 0 while bytes_read < length: chunk_size = min(MD5_CHUNK_SIZE, length - bytes_read) chunk = f.read(chunk_size) if not chunk: # found EOF: means length was larger than the file size, or # file was truncated while reading -- print warning? break md5_summer.update(chunk) bytes_read += len(chunk) finally: f.close() md5 = md5_summer.digest() return md5
Calculate the MD5 hash of a file, up to length bytes. Returns the MD5 in its binary form, as an 8-byte string. Raises IOError or OSError in case of error.
def semimajor(P,M): if type(P) != Quantity: P = P*u.day if type(M) != Quantity: M = M*u.M_sun a = ((P/2/np.pi)**2*const.G*M)**(1./3) return a.to(u.AU)
P, M can be ``Quantity`` objects; otherwise default to day, M_sun
def random_spherepos(n): signs = np.sign(rand.uniform(-1,1,size=n)) thetas = Angle(np.arccos(rand.uniform(size=n)*signs),unit=u.rad) #random b/w 0 and 180 phis = Angle(rand.uniform(0,2*np.pi,size=n),unit=u.rad) c = SkyCoord(phis,thetas,1,representation='physicsspherical') return c
returns SkyCoord object with n positions randomly oriented on the unit sphere Parameters ---------- n : int number of positions desired Returns ------- c : ``SkyCoord`` object with random positions
def orbitproject(x,y,inc,phi=0,psi=0): x2 = x*np.cos(phi) + y*np.sin(phi) y2 = -x*np.sin(phi) + y*np.cos(phi) z2 = y2*np.sin(inc) y2 = y2*np.cos(inc) xf = x2*np.cos(psi) - y2*np.sin(psi) yf = x2*np.sin(psi) + y2*np.cos(psi) return (xf,yf,z2)
Transform x,y planar coordinates into observer's coordinate frame. x,y are coordinates in z=0 plane (plane of the orbit) observer is at (inc, phi) on celestial sphere (angles in radians); psi is orientation of final x-y axes about the (inc,phi) vector. Returns x,y,z values in observer's coordinate frame, where x,y are now plane-of-sky coordinates and z is along the line of sight. Parameters ---------- x,y : float or arrray-like Coordinates to transorm inc : float or array-like Polar angle(s) of observer (where inc=0 corresponds to north pole of original x-y plane). This angle is the same as standard "inclination." phi : float or array-like, optional Azimuthal angle of observer around z-axis psi : float or array-like, optional Orientation of final observer coordinate frame (azimuthal around (inc,phi) vector. Returns ------- x,y,z : ``ndarray`` Coordinates in observers' frames. x,y in "plane of sky" and z along line of sight.
def to_dict(self): return { inflection.camelize(k, False): v for k, v in self.__dict__.items() if v }
Return a dict of all instance variables with truthy values, with key names camelized
def fully_expanded_path(self): return os.path.abspath( os.path.normpath( os.path.normcase( os.path.expandvars( os.path.expanduser(self.path)))))
Returns the absolutely absolute path. Calls os.( normpath, normcase, expandvars and expanduser).
def depth(self): return len(self.path.rstrip(os.sep).split(os.sep))
Returns the number of ancestors of this directory.
def ancestors(self, stop=None): folder = self while folder.parent != stop: if folder.parent == folder: return yield folder.parent folder = folder.parent
Generates the parents until stop or the absolute root directory is reached.
def is_descendant_of(self, ancestor): stop = Folder(ancestor) for folder in self.ancestors(): if folder == stop: return True if stop.depth > folder.depth: return False return False
Checks if this folder is inside the given ancestor.
def get_relative_path(self, root): if self.path == root: return '' ancestors = self.ancestors(stop=root) return functools.reduce(lambda f, p: Folder(p.name).child(f), ancestors, self.name)
Gets the fragment of the current path starting at root.
def get_mirror(self, target_root, source_root=None): fragment = self.get_relative_path( source_root if source_root else self.parent) return Folder(target_root).child(fragment)
Returns a File or Folder object that reperesents if the entire fragment of this directory starting with `source_root` were copied to `target_root`. >>> Folder('/usr/local/hyde/stuff').get_mirror('/usr/tmp', source_root='/usr/local/hyde') Folder('/usr/tmp/stuff')
def file_or_folder(path): target = unicode(path) return Folder(target) if os.path.isdir(target) else File(target)
Returns a File or Folder object that would represent the given path.
def is_binary(self): with open(self.path, 'rb') as fin: CHUNKSIZE = 1024 while 1: chunk = fin.read(CHUNKSIZE) if b'\0' in chunk: return True if len(chunk) < CHUNKSIZE: break return False
Return true if this is a binary file.
def make_temp(text): import tempfile (handle, path) = tempfile.mkstemp(text=True) os.close(handle) afile = File(path) afile.write(text) return afile
Creates a temprorary file and writes the `text` into it
def read_all(self, encoding='utf-8'): logger.info("Reading everything from %s" % self) with codecs.open(self.path, 'r', encoding) as fin: read_text = fin.read() return read_text
Reads from the file and returns the content as a string.
def write(self, text, encoding="utf-8"): logger.info("Writing to %s" % self) with codecs.open(self.path, 'w', encoding) as fout: fout.write(text)
Writes the given text to the file using the given encoding.
def copy_to(self, destination): target = self.__get_destination__(destination) logger.info("Copying %s to %s" % (self, target)) shutil.copy(self.path, unicode(destination)) return target
Copies the file to the given destination. Returns a File object that represents the target file. `destination` must be a File or Folder object.
def etag(self): CHUNKSIZE = 1024 * 64 from hashlib import md5 hash = md5() with open(self.path) as fin: chunk = fin.read(CHUNKSIZE) while chunk: hash_update(hash, chunk) chunk = fin.read(CHUNKSIZE) return hash.hexdigest()
Generates etag from file contents.
def walk(self, walk_folders=False, walk_files=False): if not walk_files and not walk_folders: return for root, _, a_files in os.walk(self.folder.path, followlinks=True): folder = Folder(root) if walk_folders: yield folder if walk_files: for a_file in a_files: if (not self.pattern or fnmatch.fnmatch(a_file, self.pattern)): yield File(folder.child(a_file))
A simple generator that yields a File or Folder object based on the arguments.
def list(self, list_folders=False, list_files=False): a_files = os.listdir(self.folder.path) for a_file in a_files: path = self.folder.child(a_file) if os.path.isdir(path): if list_folders: yield Folder(path) elif list_files: if not self.pattern or fnmatch.fnmatch(a_file, self.pattern): yield File(path)
A simple generator that yields a File or Folder object based on the arguments.
def child_folder(self, fragment): return Folder(os.path.join(self.path, Folder(fragment).path))
Returns a folder object by combining the fragment to this folder's path
def child(self, fragment): return os.path.join(self.path, FS(fragment).path)
Returns a path of a child item represented by `fragment`.
def make(self): try: if not self.exists: logger.info("Creating %s" % self.path) os.makedirs(self.path) except os.error: pass return self
Creates this directory and any of the missing directories in the path. Any errors that may occur are eaten.
def zip(self, target=None, basepath=None): target = self.parent.child(target or self.name + '.zip') basepath = basepath or self.path from zipfile import ZipFile with ZipFile(target, 'w') as zip: with self.walker as walker: @walker.file_visitor def add_file(f): zip.write(f.path, f.get_relative_path(basepath))
Zips the contents of this folder. If `target` is not provided, <name>.zip is used instead. `basepath` is used to specify the base path for files in the archive. The path stored along with the files in the archive will be relative to the `basepath`.
def delete(self): if self.exists: logger.info("Deleting %s" % self.path) shutil.rmtree(self.path)
Deletes the directory if it exists.
def move_to(self, destination): target = self.__get_destination__(destination) logger.info("Move %s to %s" % (self, target)) shutil.move(self.path, unicode(target)) return target
Moves this directory to the given destination. Returns a Folder object that represents the moved directory.
def rename_to(self, destination_name): target = self.parent.child_folder(destination_name) logger.info("Rename %s to %s" % (self, target)) shutil.move(self.path, unicode(target)) return target
Moves this directory to the given destination. Returns a Folder object that represents the moved directory.
def _create_target_tree(self, target): source = self with source.walker as walker: @walker.folder_visitor def visit_folder(folder): """ Create the mirror directory """ if folder != source: Folder(folder.get_mirror(target, source)).make()
There is a bug in dir_util that makes `copy_tree` crash if a folder in the tree has been deleted before and readded now. To workaround the bug, we first walk the tree and create directories that are needed.
def copy_contents_to(self, destination): logger.info("Copying contents of %s to %s" % (self, destination)) target = Folder(destination) target.make() self._create_target_tree(target) dir_util.copy_tree(self.path, unicode(target)) return target
Copies the contents of this directory to the given destination. Returns a Folder object that represents the moved directory.
def __start(self): thread = Thread(target=self.__loop, args=()) thread.daemon = True # daemonize thread thread.start() self.__enabled = True
Start a new thread to process Cron
def __dict_to_BetterDict(self, attr): if type(self[attr]) == dict: self[attr] = BetterDict(self[attr]) return self[attr]
Convert the passed attr to a BetterDict if the value is a dict Returns: The new value of the passed attribute.
def _bd_(self): if not getattr(self, '__bd__', False): self.__bd = BetterDictLookUp(self) return self.__bd
Property that allows dot lookups of otherwise hidden attributes.
def highlight_differences(s1, s2, color): ls1, ls2 = len(s1), len(s2) diff_indices = [i for i, (a, b) in enumerate(zip(s1, s2)) if a != b] print(s1) if ls2 > ls1: colorise.cprint('_' * (ls2-ls1), fg=color) else: print() colorise.highlight(s2, indices=diff_indices, fg=color, end='') if ls1 > ls2: colorise.cprint('_' * (ls1-ls2), fg=color) else: print()
Highlight the characters in s2 that differ from those in s1.
def parse_xml(self, xml): ''' :param key_xml: lxml.etree.Element representing a single VocabularyCodeSet ''' xmlutils = XmlUtils(xml) self.name = xmlutils.get_string_by_xpath('name') self.family = xmlutils.get_string_by_xpath('family') self.version = xmlutils.get_string_by_xpath('version') for item in xml.xpath('code-item'): self.code_item.append(VocabularyCodeItem(item)) self.is_vocab_truncated = xmlutils.get_bool_by_xpath('is-vocab-truncated') self.language = xmlutils.get_lang(f parse_xml(self, xml): ''' :param key_xml: lxml.etree.Element representing a single VocabularyCodeSet ''' xmlutils = XmlUtils(xml) self.name = xmlutils.get_string_by_xpath('name') self.family = xmlutils.get_string_by_xpath('family') self.version = xmlutils.get_string_by_xpath('version') for item in xml.xpath('code-item'): self.code_item.append(VocabularyCodeItem(item)) self.is_vocab_truncated = xmlutils.get_bool_by_xpath('is-vocab-truncated') self.language = xmlutils.get_lang()
:param key_xml: lxml.etree.Element representing a single VocabularyCodeSet
def normalize(text, mode='NFKC', ignore=''): u text = text.replace(u'γ€œ', u'γƒΌ').replace(u'~', u'γƒΌ') text = text.replace(u"’", "'").replace(u'”', '"').replace(u'β€œ', '``') text = text.replace(u'―', '-').replace(u'‐', u'-') return unicodedata.normalize(mode, text)
u"""Convert Half-width (Hankaku) Katakana to Full-width (Zenkaku) Katakana, Full-width (Zenkaku) ASCII and DIGIT to Half-width (Hankaku) ASCII and DIGIT. Additionally, Full-width wave dash (γ€œ) etc. are normalized Params: <unicode> text <unicode> ignore Return: <unicode> converted_text
def create_jinja_env(): template_dir = os.path.join(os.path.dirname(__file__), 'templates') env = jinja2.Environment( loader=jinja2.FileSystemLoader(template_dir), autoescape=jinja2.select_autoescape(['html']) ) env.filters['simple_date'] = filter_simple_date env.filters['paragraphify'] = filter_paragraphify return env
Create a Jinja2 `~jinja2.Environment`. Returns ------- env : `jinja2.Environment` Jinja2 template rendering environment, configured to use templates in ``templates/``.
def render_homepage(config, env): template = env.get_template('homepage.jinja') rendered_page = template.render( config=config) return rendered_page
Render the homepage.jinja template.
def filter_paragraphify(value): value = re.sub(r'\r\n|\r|\n', '\n', value) # Normalize newlines paras = re.split('\n{2,}', value) paras = ['<p>{0}</p>'.format(p) for p in paras if len(p) > 0] return jinja2.Markup('\n\n'.join(paras))
Convert text into one or more paragraphs, including <p> tags. Based on https://gist.github.com/cemk/1324543
def _init_browser(self): self.browser = splinter.Browser('phantomjs') self.browser.visit(self.server_url + "/youraccount/login") try: self.browser.fill('nickname', self.user) self.browser.fill('password', self.password) except: self.browser.fill('p_un', self.user) self.browser.fill('p_pw', self.password) self.browser.fill('login_method', self.login_method) self.browser.find_by_css('input[type=submit]').click()
Overide in appropriate way to prepare a logged in browser.
def search_with_retry(self, sleeptime=3.0, retrycount=3, **params): results = [] count = 0 while count < retrycount: try: results = self.search(**params) break except requests.exceptions.Timeout: sys.stderr.write("Timeout while searching...Retrying\n") time.sleep(sleeptime) count += 1 else: sys.stderr.write( "Aborting search after %d attempts.\n" % (retrycount,)) return results
Perform a search given a dictionary of ``search(...)`` parameters. It accounts for server timeouts as necessary and will retry some number of times. :param sleeptime: number of seconds to sleep between retries :param retrycount: number of times to retry given search :param params: search parameters :return: records in given format
def get_records_from_basket(self, bskid, group_basket=False, read_cache=True): if bskid not in self.cached_baskets or not read_cache: if self.user: if group_basket: group_basket = '&category=G' else: group_basket = '' results = requests.get( self.server_url + "/yourbaskets/display?of=xm&bskid=" + str(bskid) + group_basket, cookies=self.cookies, stream=True) else: results = requests.get( self.server_url + "/yourbaskets/display_public?of=xm&bskid=" + str(bskid), stream=True) else: return self.cached_baskets[bskid] parsed_records = self._parse_results(results.raw, self.cached_records) self.cached_baskets[bskid] = parsed_records return parsed_records
Returns the records from the (public) basket with given bskid
def upload_marcxml(self, marcxml, mode): if mode not in ["-i", "-r", "-c", "-a", "-ir"]: raise NameError("Incorrect mode " + str(mode)) return requests.post(self.server_url + "/batchuploader/robotupload", data={'file': marcxml, 'mode': mode}, headers={'User-Agent': CFG_USER_AGENT})
Upload a record to the server. :param marcxml: the XML to upload. :param mode: the mode to use for the upload. - "-i" insert new records - "-r" replace existing records - "-c" correct fields of records - "-a" append fields to records - "-ir" insert record or replace if it exists
def url(self): if self.server_url is not None and \ self.recid is not None: return '/'.join( [self.server_url, CFG_SITE_RECORD, str(self.recid)]) else: return None
Returns the URL to this record. Returns None if not known
def clean_list_of_twitter_list(list_of_twitter_lists, sent_tokenize, _treebank_word_tokenize, tagger, lemmatizer, lemmatize, stopset, first_cap_re, all_cap_re, digits_punctuation_whitespace_re, pos_set): list_of_keyword_sets = list() append_keyword_set = list_of_keyword_sets.append list_of_lemma_to_keywordbags = list() append_lemma_to_keywordbag = list_of_lemma_to_keywordbags.append if list_of_twitter_lists is not None: for twitter_list in list_of_twitter_lists: if twitter_list is not None: keyword_set, lemma_to_keywordbag = clean_twitter_list(twitter_list, sent_tokenize, _treebank_word_tokenize, tagger, lemmatizer, lemmatize, stopset, first_cap_re, all_cap_re, digits_punctuation_whitespace_re, pos_set) append_keyword_set(keyword_set) append_lemma_to_keywordbag(lemma_to_keywordbag) return list_of_keyword_sets, list_of_lemma_to_keywordbags
Extracts the sets of keywords for each Twitter list. Inputs: - list_of_twitter_lists: A python list of Twitter lists in json format. - lemmatizing: A string containing one of the following: "porter", "snowball" or "wordnet". Output: - list_of_keyword_sets: A list of sets of keywords (i.e. not a bag-of-words) in python set format. - list_of_lemma_to_keywordbags: List of python dicts that map stems/lemmas to original topic keywords.
def user_twitter_list_bag_of_words(twitter_list_corpus, sent_tokenize, _treebank_word_tokenize, tagger, lemmatizer, lemmatize, stopset, first_cap_re, all_cap_re, digits_punctuation_whitespace_re, pos_set): # Extract a bag-of-words from a list of Twitter lists. # May result in empty sets list_of_keyword_sets, list_of_lemma_to_keywordbags = clean_list_of_twitter_list(twitter_list_corpus, sent_tokenize, _treebank_word_tokenize, tagger, lemmatizer, lemmatize, stopset, first_cap_re, all_cap_re, digits_punctuation_whitespace_re, pos_set) # Reduce keyword sets. bag_of_words = reduce_list_of_bags_of_words(list_of_keyword_sets) # Reduce lemma to keywordbag maps. lemma_to_keywordbag_total = defaultdict(lambda: defaultdict(int)) for lemma_to_keywordbag in list_of_lemma_to_keywordbags: for lemma, keywordbag in lemma_to_keywordbag.items(): for keyword, multiplicity in keywordbag.items(): lemma_to_keywordbag_total[lemma][keyword] += multiplicity return bag_of_words, lemma_to_keywordbag_total
Extract a bag-of-words for a corpus of Twitter lists pertaining to a Twitter user. Inputs: - twitter_list_corpus: A python list of Twitter lists in json format. - lemmatizing: A string containing one of the following: "porter", "snowball" or "wordnet". Output: - bag_of_words: A bag-of-words in python dictionary format. - lemma_to_keywordbag_total: Aggregated python dictionary that maps stems/lemmas to original topic keywords.
def response(self, request, exception): handler = self.lookup(exception) response = None try: if handler: response = handler(request=request, exception=exception) if response is None: response = self.default(request=request, exception=exception) except Exception: self.log(format_exc()) if self.debug: url = getattr(request, 'url', 'unknown') response_message = ( 'Exception raised in exception handler "{}" ' 'for uri: "{}"\n{}').format( handler.__name__, url, format_exc()) self._log.error(response_message) return text(response_message, 500) else: return text('An error occurred while handling an error', 500) return response
Fetches and executes an exception handler and returns a response object :param request: Request :param exception: Exception to handle :return: Response object
def grouper(iterable, n, pad_value=None): chunk_gen = (chunk for chunk in zip_longest(*[iter(iterable)]*n, fillvalue=pad_value)) return chunk_gen
Returns a generator of n-length chunks of an input iterable, with appropriate padding at the end. Example: grouper(3, 'abcdefg', 'x') --> ('a','b','c'), ('d','e','f'), ('g','x','x') Inputs: - iterable: The source iterable that needs to be chunkified. - n: The size of the chunks. - pad_value: The value with which the last chunk will be padded. Output: - chunk_gen: A generator of n-length chunks of an input iterable.
def chunks(iterable, n): for i in np.arange(0, len(iterable), n): yield iterable[i:i+n]
A python generator that yields 100-length sub-list chunks. Input: - full_list: The input list that is to be separated in chunks of 100. - chunk_size: Should be set to 100, unless the Twitter API changes. Yields: - sub_list: List chunks of length 100.
def split_every(iterable, n): # TODO: Remove this, or make it return a generator. i = iter(iterable) piece = list(islice(i, n)) while piece: yield piece piece = list(islice(i, n))
A generator of n-length chunks of an input iterable
def merge_properties(item_properties, prop_name, merge_value): existing_value = item_properties.get(prop_name, None) if not existing_value: # A node without existing values for the property item_properties[prop_name] = merge_value else: if type(merge_value) is int or type(merge_value) is str: item_properties[prop_name] = existing_value + merge_value elif type(merge_value) is list: item_properties[prop_name] = merge_list(existing_value, merge_value) else: return False return item_properties
Tries to figure out which type of property value that should be merged and invoke the right function. Returns new properties if the merge was successful otherwise False.
def __rmath(self, f, x): d = {} if isinstance(x, (int, long, float, complex)): for i in self.__d: d[i] = f(x , self.__d[i]) else: raise ValueError('Cannot execute reverse operator, only (int, float, complex) as first operand possible') return d
reverse operator function :param f: operator.add/sub/mul... used operator :param x: other object field should be add/sub... with :return: dictionary (same shape as field.d) with result of operation
def generate_id(self, element): if not element.has_attribute('id'): element.set_attribute('id', self.prefix_id + str(self.count)) self.count = self.count + 1
Generate a id for a element. :param element: The element. :type element: hatemile.util.html.HTMLDOMElement
def fetch_state_data(self, states): print("Fetching census data") for table in CensusTable.objects.all(): api = self.get_series(table.series) for variable in table.variables.all(): estimate = "{}_{}".format(table.code, variable.code) print( ">> Fetching {} {} {}".format( table.year, table.series, estimate ) ) for state in tqdm(states): self.get_state_estimates_by_state( api=api, table=table, variable=variable, estimate=estimate, state=state, ) self.get_county_estimates_by_state( api=api, table=table, variable=variable, estimate=estimate, state=state, ) self.get_district_estimates_by_state( api=api, table=table, variable=variable, estimate=estimate, state=state, )
Fetch census estimates from table.
def has(self, name): for a in self.all_annotations: if a.name == name: return True return False
Returns True if there is atleast one annotation by a given name, otherwise False.
def get_first(self, name): for a in self.all_annotations: if a.name == name: return a return None
Get the first annotation by a given name.
def get_all(self, name): return [annot for annot in self.all_annotations if annot.name == name]
Get all the annotation by a given name.
def first_value_of(self, name, default_value = None): vals = self.values_of(name) if vals is not None: return vals if type(vals) is not list else vals[0] return default_value
Return the first value of a particular param by name if it exists otherwise false.
def get_long_description(): with open( os.path.join(BASE_DIRECTORY, 'README.md'), 'r', encoding='utf-8' ) as readme_file: return readme_file.read()
Returns the long description of HaTeMiLe for Python. :return: The long description of HaTeMiLe for Python. :rtype: str
def get_packages(): packages = find_packages(exclude=['tests']) packages.append('') packages.append('js') packages.append(LOCALES_DIRECTORY) for directory in os.listdir(LOCALES_DIRECTORY): packages.append(LOCALES_DIRECTORY + '.' + directory) return packages
Returns the packages used for HaTeMiLe for Python. :return: The packages used for HaTeMiLe for Python. :rtype: list(str)
def get_package_data(): package_data = { '': ['*.xml'], 'js': ['*.js'], LOCALES_DIRECTORY: ['*'] } for directory in os.listdir(LOCALES_DIRECTORY): package_data[LOCALES_DIRECTORY + '.' + directory] = ['*.json'] return package_data
Returns the packages with static files of HaTeMiLe for Python. :return: The packages with static files of HaTeMiLe for Python. :rtype: dict(str, list(str))
def get_requirements(): requirements = [] with open( os.path.join(BASE_DIRECTORY, 'requirements.txt'), 'r', encoding='utf-8' ) as requirements_file: lines = requirements_file.readlines() for line in lines: requirements.append(line.strip()) return requirements
Returns the content of 'requirements.txt' in a list. :return: The content of 'requirements.txt'. :rtype: list(str)
def where_session_id(cls, session_id): try: session = cls.query.filter_by(session_id=session_id).one() return session except (NoResultFound, MultipleResultsFound): return None
Easy way to query by session id
def where_earliest(cls, user_id): return cls.query.filter_by(user_id=user_id)\ .order_by(cls.created_at.asc()).first()
Get earilest session by created_at timestamp
def count(cls, user_id): return cls.query.with_entities( cls.user_id).filter_by(user_id=user_id).count()
Count sessions with user_id
def hydrate_callable_with_edge_node_map( self, edge_node_map, callable_function, parameter_lambda ): def extract_kwargs_dict(*args, **kwargs): return kwargs def extract_args_list(*args, **kwargs): return list(args) args = parameter_lambda(extract_args_list) kwargs = parameter_lambda(extract_kwargs_dict) arg_list = [edge_node_map[node_id] for node_id in list(args)] kwarg_map = {} for kwarg in kwargs: kwarg_map[kwarg] = edge_node_map[kwargs[kwarg]] return callable_function(*arg_list, **kwarg_map)
args and kwargs intentionally not *args and **kwargs
def get_branch(): if os.getenv('GIT_BRANCH'): # Travis branch = os.getenv('GIT_BRANCH') elif os.getenv('BRANCH_NAME'): # Jenkins 2 branch = os.getenv('BRANCH_NAME') else: branch = check_output( "git rev-parse --abbrev-ref HEAD".split(" ") ).decode('utf-8').strip() return branch.replace("/", "_")
Returns the current code branch
def get_version(): try: return check_output( "git describe --tags".split(" ") ).decode('utf-8').strip() except CalledProcessError: return check_output( "git rev-parse --short HEAD".split(" ") ).decode('utf-8').strip()
Returns the current code version
def jenkins_last_build_sha(): job_url = os.getenv('JOB_URL') job_json_url = "{0}/api/json".format(job_url) response = urllib.urlopen(job_json_url) job_data = json.loads(response.read()) last_completed_build_url = job_data['lastCompletedBuild']['url'] last_complete_build_json_url = "{0}/api/json".format(last_completed_build_url) response = urllib.urlopen(last_complete_build_json_url) last_completed_build = json.loads(response.read()) return last_completed_build[1]['lastBuiltRevision']['SHA1']
Returns the sha of the last completed jenkins build for this project. Expects JOB_URL in environment
def get_changed_files_from(old_commit_sha, new_commit_sha): return check_output( "git diff-tree --no-commit-id --name-only -r {0}..{1}".format( old_commit_sha, new_commit_sha ).split(" ") ).decode('utf-8').strip()
Returns a list of the files changed between two commits
def extract_snow_tweets_from_file_generator(json_file_path): with open(json_file_path, "r", encoding="utf-8") as fp: for file_line in fp: tweet = json.loads(file_line) yield tweet
A generator that opens a file containing many json tweets and yields all the tweets contained inside. Input: - json_file_path: The path of a json file containing a tweet in each line. Yields: - tweet: A tweet in python dictionary (json) format.
def extract_all_snow_tweets_from_disk_generator(json_folder_path): # Get a generator with all file paths in the folder json_file_path_generator = (json_folder_path + "/" + name for name in os.listdir(json_folder_path)) for path in json_file_path_generator: for tweet in extract_snow_tweets_from_file_generator(path): yield tweet
A generator that returns all SNOW tweets stored in disk. Input: - json_file_path: The path of the folder containing the raw data. Yields: - tweet: A tweet in python dictionary (json) format.
def store_snow_tweets_from_disk_to_mongodb(snow_tweets_folder): client = pymongo.MongoClient("localhost", 27017) db = client["snow_tweet_storage"] collection = db["tweets"] for tweet in extract_all_snow_tweets_from_disk_generator(snow_tweets_folder): collection.insert(tweet)
Store all SNOW tweets in a mongodb collection.
def save_file(f, full_path): make_dirs_for_file_path(full_path, mode=dju_settings.DJU_IMG_CHMOD_DIR) with open(full_path, 'wb') as t: f.seek(0) while True: buf = f.read(dju_settings.DJU_IMG_RW_FILE_BUFFER_SIZE) if not buf: break t.write(buf) os.chmod(full_path, dju_settings.DJU_IMG_CHMOD_FILE)
Saves file f to full_path and set rules.
def get_profile_configs(profile=None, use_cache=True): if use_cache and profile in _profile_configs_cache: return _profile_configs_cache[profile] profile_conf = None if profile is not None: try: profile_conf = dju_settings.DJU_IMG_UPLOAD_PROFILES[profile] except KeyError: if profile != 'default': raise ValueError(unicode(ERROR_MESSAGES['unknown_profile']) % {'profile': profile}) conf = copy.deepcopy(dju_settings.DJU_IMG_UPLOAD_PROFILE_DEFAULT) if profile_conf: conf.update(copy.deepcopy(profile_conf)) for v_i in xrange(len(conf['VARIANTS'])): v = conf['VARIANTS'][v_i] conf['VARIANTS'][v_i] = copy.deepcopy(dju_settings.DJU_IMG_UPLOAD_PROFILE_VARIANT_DEFAULT) conf['VARIANTS'][v_i].update(v) if use_cache: _profile_configs_cache[profile] = conf return conf
Returns upload configs for profile.
def generate_img_id(profile, ext=None, label=None, tmp=False): if ext and not ext.startswith('.'): ext = '.' + ext if label: label = re.sub(r'[^a-z0-9_\-]', '', label, flags=re.I) label = re.sub(r'_+', '_', label) label = label[:60] return '{profile}:{tmp}{dtstr}_{rand}{label}{ext}'.format( profile=profile, tmp=(dju_settings.DJU_IMG_UPLOAD_TMP_PREFIX if tmp else ''), dtstr=datetime_to_dtstr(), rand=get_random_string(4, 'abcdefghijklmnopqrstuvwxyz0123456789'), label=(('_' + label) if label else ''), ext=(ext or ''), )
Generates img_id.
def get_relative_path_from_img_id(img_id, variant_label=None, ext=None, create_dirs=False): profile, base_name = img_id.split(':', 1) conf = get_profile_configs(profile) if not variant_label: status_suffix = dju_settings.DJU_IMG_UPLOAD_MAIN_SUFFIX else: status_suffix = dju_settings.DJU_IMG_UPLOAD_VARIANT_SUFFIX name, file_ext = os.path.splitext(base_name) prefix = '' if name.startswith(dju_settings.DJU_IMG_UPLOAD_TMP_PREFIX): name = name[len(dju_settings.DJU_IMG_UPLOAD_TMP_PREFIX):] prefix = dju_settings.DJU_IMG_UPLOAD_TMP_PREFIX name_parts = name.split('_', 2) name = '{name}{status_suffix}{hash}'.format( name=name, status_suffix=status_suffix, hash=get_hash('_'.join(name_parts[:2]), variant_label=variant_label) ) if variant_label: name += '_' + variant_label if ext: file_ext = ext elif variant_label: for var_conf in conf['VARIANTS']: var_conf_label = var_conf['LABEL'] or get_variant_label(var_conf) if var_conf_label == variant_label: if var_conf['FORMAT']: file_ext = var_conf['FORMAT'].lower() break if file_ext and not file_ext.startswith('.'): file_ext = '.' + file_ext relative_path = os.path.join( dju_settings.DJU_IMG_UPLOAD_SUBDIR, conf['PATH'], name_parts[0][-2:], (prefix + name + file_ext) ).replace('\\', '/') if create_dirs: path = media_path(relative_path) make_dirs_for_file_path(path, mode=dju_settings.DJU_IMG_CHMOD_DIR) return relative_path
Returns path to file relative MEDIA_URL.
def is_img_id_exists(img_id): main_rel_path = get_relative_path_from_img_id(img_id) main_path = media_path(main_rel_path) return os.path.isfile(main_path)
Checks if img_id has real file on filesystem.