code
stringlengths 52
7.75k
| docs
stringlengths 1
5.85k
|
---|---|
def add_attr2fields(self, attr_name, attr_val, fields=[], exclude=[], include_all_if_empty=True):
for f in self.filter_fields(fields, exclude, include_all_if_empty):
f = self.fields[f.name]
org_val = f.widget.attrs.get(attr_name, '')
f.widget.attrs[attr_name] = '%s %s' % (org_val, attr_val) if org_val else attr_val | add attr to fields |
def add_class2fields(self, html_class, fields=[], exclude=[], include_all_if_empty=True):
self.add_attr2fields('class', html_class, fields, exclude) | add class to html widgets. |
def filter_fields(self, fields=[], exclude=[], include_all_if_empty=True):
if not include_all_if_empty and not fields:
return []
ret = []
for f in self.visible_fields():
if fields and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
ret.append(f)
return ret | filter fields
fields:
exclude:
include_all_if_empty: if fields is empty return all fields
return: fileds |
def as_required_fields(self, fields=[]):
fields = self.filter_fields(fields)
for f in fields:
f = self.fields[f.name]
f.required = True | set required to True |
def check_uniqe(self, obj_class, error_msg=_('Must be unique'), **kwargs):
if obj_class.objects.filter(**kwargs).exclude(pk=self.instance.pk):
raise forms.ValidationError(error_msg) | check if this object is unique |
def get_info(pyfile):
'''Retrieve dunder values from a pyfile'''
info = {}
info_re = re.compile(r"^__(\w+)__ = ['\"](.*)['\"]")
with open(pyfile, 'r') as f:
for line in f.readlines():
match = info_re.search(line)
if match:
info[match.group(1)] = match.group(2)
return inff get_info(pyfile):
'''Retrieve dunder values from a pyfile'''
info = {}
info_re = re.compile(r"^__(\w+)__ = ['\"](.*)['\"]")
with open(pyfile, 'r') as f:
for line in f.readlines():
match = info_re.search(line)
if match:
info[match.group(1)] = match.group(2)
return info | Retrieve dunder values from a pyfile |
def main():
args = parse_args()
config_logger(args)
logger = structlog.get_logger(__name__)
if args.show_version:
# only print the version
print_version()
sys.exit(0)
version = pkg_resources.get_distribution('lander').version
logger.info('Lander version {0}'.format(version))
config = Configuration(args=args)
# disable any build confirmed to be a PR with Travis
if config['is_travis_pull_request']:
logger.info('Skipping build from PR.')
sys.exit(0)
lander = Lander(config)
lander.build_site()
logger.info('Build complete')
if config['upload']:
lander.upload_site()
logger.info('Upload complete')
logger.info('Lander complete') | Entrypoint for ``lander`` executable. |
def insert_node(self, node):
if self._is_node_reserved(node):
return False
# Put node in map
self._node_map[node.get_id()] = node
return True | Adds node if name is available or pre-existing node
returns True if added
returns False if not added |
def merge(self, subordinate_graph):
if not isinstance(subordinate_graph, Graph):
raise Exception("Graph is expected to only merge with a Graph.")
subordinate_nodes = subordinate_graph.get_nodes()
merge_results = []
for node_id in subordinate_nodes:
node = subordinate_nodes[node_id]
merge_results.append((
node.get_id(),
self.insert_node(node)
))
# TODO perhaps throw exception if merge was unsuccessful
return merge_results | merge rules:
00 + 00 == 00 00 + 0B == 0B
0A + 00 == 0A 0A + 0B == 0A
A0 + 00 == A0 A0 + 0B == AB
AA + 00 == AA AA + 0B == AB
00 + B0 == B0 00 + BB == BB
0A + B0 == BA 0A + BB == BA
A0 + B0 == A0 A0 + BB == AB
AA + B0 == AA AA + BB == AA |
def join(self, distbase, location):
sep = ''
if distbase and distbase[-1] not in (':', '/'):
sep = '/'
return distbase + sep + location | Join 'distbase' and 'location' in such way that the
result is a valid scp destination. |
def get_location(self, location, depth=0):
if not location:
return []
if location in self.aliases:
res = []
if depth > MAXALIASDEPTH:
err_exit('Maximum alias depth exceeded: %(location)s' % locals())
for loc in self.aliases[location]:
res.extend(self.get_location(loc, depth+1))
return res
if self.is_server(location):
return [location]
if location == 'pypi':
err_exit('No configuration found for server: pypi\n'
'Please create a ~/.pypirc file')
if self.urlparser.is_url(location):
return [location]
if not self.has_host(location) and self.distbase:
return [self.join(self.distbase, location)]
return [location] | Resolve aliases and apply distbase. |
def get_default_location(self):
res = []
for location in self.distdefault:
res.extend(self.get_location(location))
return res | Return the default location. |
def check_empty_locations(self, locations=None):
if locations is None:
locations = self.locations
if not locations:
err_exit('mkrelease: option -d is required\n%s' % USAGE) | Fail if 'locations' is empty. |
def check_valid_locations(self, locations=None):
if locations is None:
locations = self.locations
for location in locations:
if (not self.is_server(location) and
not self.is_ssh_url(location) and
not self.has_host(location)):
err_exit('Unknown location: %(location)s' % locals()) | Fail if 'locations' contains bad destinations. |
def set_defaults(self, config_file):
self.defaults = Defaults(config_file)
self.locations = Locations(self.defaults)
self.python = Python()
self.setuptools = Setuptools()
self.scp = SCP()
self.scms = SCMFactory()
self.urlparser = URLParser()
self.skipcommit = not self.defaults.commit
self.skiptag = not self.defaults.tag
self.skipregister = False # per server
self.skipupload = False # special
self.push = self.defaults.push
self.develop = False # special
self.quiet = self.defaults.quiet
self.sign = False # per server
self.list = False
self.manifest = self.defaults.manifest
self.identity = '' # per server
self.branch = ''
self.scmtype = ''
self.infoflags = []
self.formats = []
self.distributions = []
self.directory = os.curdir
self.scm = None | Set defaults. |
def list_locations(self):
known = self.defaults.get_known_locations()
for default in self.defaults.distdefault:
if default not in known:
known.add(default)
if not known:
err_exit('No locations', 0)
for location in sorted(known):
if location in self.defaults.distdefault:
print(location, '(default)')
else:
print(location)
sys.exit(0) | Print known dist-locations and exit. |
def get_skipregister(self, location=None):
if location is None:
return self.skipregister or not self.defaults.register
else:
server = self.defaults.servers[location]
if self.skipregister:
return True
elif server.register is not None:
if not self.defaults.register and self.get_skipupload():
return True # prevent override
return not server.register
elif not self.defaults.register:
return True
return False | Return true if the register command is disabled (for the given server.) |
def get_uploadflags(self, location):
uploadflags = []
server = self.defaults.servers[location]
if self.sign:
uploadflags.append('--sign')
elif server.sign is not None:
if server.sign:
uploadflags.append('--sign')
elif self.defaults.sign:
uploadflags.append('--sign')
if self.identity:
if '--sign' not in uploadflags:
uploadflags.append('--sign')
uploadflags.append('--identity="%s"' % self.identity)
elif '--sign' in uploadflags:
if server.identity is not None:
if server.identity:
uploadflags.append('--identity="%s"' % server.identity)
elif self.defaults.identity:
uploadflags.append('--identity="%s"' % self.defaults.identity)
return uploadflags | Return uploadflags for the given server. |
def get_options(self):
args = self.parse_options(self.args)
if args:
self.directory = args[0]
if self.develop:
self.skiptag = True
if not self.develop:
self.develop = self.defaults.develop
if not self.develop:
self.infoflags = self.setuptools.infoflags
if not self.formats:
self.formats = self.defaults.formats
for format in self.formats:
if format == 'zip':
self.distributions.append(('sdist', ['--formats="zip"']))
elif format == 'gztar':
self.distributions.append(('sdist', ['--formats="gztar"']))
elif format == 'egg':
self.distributions.append(('bdist', ['--formats="egg"']))
elif format == 'wheel':
self.distributions.append(('bdist_wheel', []))
if not self.distributions:
self.distributions.append(('sdist', ['--formats="zip"']))
if self.list:
self.list_locations()
if not self.locations:
self.locations.extend(self.locations.get_default_location())
if not (self.skipregister and self.skipupload):
if not (self.get_skipregister() and self.get_skipupload()):
self.locations.check_empty_locations()
self.locations.check_valid_locations()
if len(args) > 1:
if self.urlparser.is_url(self.directory):
self.branch = args[1]
elif self.urlparser.is_ssh_url(self.directory):
self.branch = args[1]
else:
err_exit('mkrelease: invalid arguments\n%s' % USAGE)
if len(args) > 2:
err_exit('mkrelease: too many arguments\n%s' % USAGE) | Process the command line. |
def get_package(self):
directory = self.directory
develop = self.develop
scmtype = self.scmtype
self.scm = self.scms.get_scm(scmtype, directory)
if self.scm.is_valid_url(directory):
directory = self.urlparser.abspath(directory)
self.remoteurl = directory
self.isremote = self.push = True
else:
directory = abspath(expanduser(directory))
self.isremote = False
self.scm.check_valid_sandbox(directory)
self.setuptools.check_valid_package(directory)
name, version = self.setuptools.get_package_info(directory, develop)
print('Releasing', name, version)
if not self.skipcommit:
if self.scm.is_dirty_sandbox(directory):
self.scm.commit_sandbox(directory, name, version, self.push) | Get the URL or sandbox to release. |
def configure_gateway(
cls, launch_jvm: bool = True,
gateway: Union[GatewayParameters, Dict[str, Any]] = None,
callback_server: Union[CallbackServerParameters, Dict[str, Any]] = False,
javaopts: Iterable[str] = (), classpath: Iterable[str] = ''):
assert check_argument_types()
classpath = classpath if isinstance(classpath, str) else os.pathsep.join(classpath)
javaopts = list(javaopts)
# Substitute package names with their absolute directory paths
for match in package_re.finditer(classpath):
pkgname = match.group(1)
module = import_module(pkgname)
module_dir = os.path.dirname(module.__file__)
classpath = classpath.replace(match.group(0), module_dir)
if gateway is None:
gateway = {}
if isinstance(gateway, dict):
gateway.setdefault('eager_load', True)
gateway.setdefault('auto_convert', True)
gateway = GatewayParameters(**gateway)
if isinstance(callback_server, dict):
callback_server = CallbackServerParameters(**callback_server)
elif callback_server is True:
callback_server = CallbackServerParameters()
return launch_jvm, gateway, callback_server, classpath, javaopts | Configure a Py4J gateway.
:param launch_jvm: ``True`` to spawn a Java Virtual Machine in a subprocess and connect to
it, ``False`` to connect to an existing Py4J enabled JVM
:param gateway: either a :class:`~py4j.java_gateway.GatewayParameters` object or a
dictionary of keyword arguments for it
:param callback_server: callback server parameters or a boolean indicating if a
callback server is wanted
:param javaopts: options passed to Java itself
:param classpath: path or iterable of paths to pass to the JVM launcher as the class path |
def load(self, filename, offset):
self.offset = offset
self.filename = filename
self.bootsector = BootSector(
filename=filename,
length=NTFS_BOOTSECTOR_SIZE,
offset=self.offset)
self.mft_table = MftTable(
mft_entry_size=self.bootsector.mft_record_size,
filename=self.filename,
offset=self.mft_table_offset
)
self.mft_table.preload_entries(NUM_SYSTEM_ENTRIES)
self._load_volume_information() | Loads NTFS volume information
Args:
filename (str): Path to file/device to read the volume \
information from.
offset (uint): Valid NTFS partition offset from the beginning \
of the file/device.
Raises:
IOError: If source file/device does not exist or is not readable |
def _get_mft_zone_size(self, num_clusters, mft_zone_multiplier=1):
sizes = {
4: num_clusters >> 1, # 50%
3: (num_clusters * 3) >> 3, # 37,5%
2: num_clusters >> 2, # 25%
}
return sizes.get(mft_zone_multiplier, num_clusters >> 3) | Returns mft zone size in clusters.
From ntfs_progs.1.22. |
def artist(self):
if not self._artist:
self._artist = Artist(self._artist_id, self._artist_name, self._connection)
return self._artist | :class:`Artist` object of album's artist |
def cover(self):
if not self._cover:
self._cover = Picture(self._cover_url, self._connection)
return self._cover | album cover as :class:`Picture` object |
def export(self):
return {'id' : self.id, 'name' : self.name, 'artist' : self._artist_name, 'artist_id' : self._artist_id, 'cover' : self._cover_url} | Returns a dictionary with all album information.
Use the :meth:`from_export` method to recreate the
:class:`Album` object. |
def close(self):
if hasattr(self, 'iterators'):
for it in self.iterators:
if hasattr(it, 'close'):
it.close() | Closes all the iterators.
This is particularly important if the iterators are files. |
def _update_sorting(self):
key = self.key
sorted_tops = self.sorted_tops
tops = self.tops
iterators = self.iterators
for idx in self.idxs:
try:
tops[idx] = next(iterators[idx])
top_key = key(tops[idx])
if top_key not in sorted_tops:
sorted_tops[top_key] = []
sorted_tops[top_key].append(idx)
except StopIteration:
pass
if len(sorted_tops) == 0:
raise StopIteration
key, self.idxs = sorted_tops.popitem(last=False)
self.c_idx = 0 | Insert new entries into the merged iterator.
:param sorted_tops: A SortedDict.
:param tops: The most recent entry from each iterator.
:param idxs: The indices to update. |
def parse(raw_email):
# type: (six.string_types) -> Tuple[six.string_types, six.string_types]
if not isinstance(raw_email, six.string_types):
raise InvalidEmail("Invalid email: %s" % raw_email)
if not raw_email or pd.isnull(raw_email):
raise InvalidEmail("None or NaN is not a valid email address")
email = raw_email.split("<", 1)[-1].split(">", 1)[0]
chunks = email.split("@", 3)
# git-svn generates emails with several @, e.g.:
# <[email protected]@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>
if len(chunks) < 2:
raise InvalidEmail("Invalid email")
uname = chunks[0].rsplit(" ", 1)[-1]
addr_domain = chunks[1].split(" ", 1)[0]
return uname.split("+", 1)[0], addr_domain | Extract email from a full address. Example:
'John Doe <[email protected]>' -> [email protected]
>>> parse("John Doe <[email protected]")
('me', 'someorg.com')
>>> parse(42) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
InvalidEmail: 'Invalid email: 42'
>>> parse(None) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
InvalidEmail: 'None or NaN is not a valid email address' |
def university_domains():
# type: () -> set
fpath = os.path.join(
os.path.dirname(__file__), "email_university_domains.csv")
with open(fpath) as fh:
return set(addr_domain.strip() for addr_domain in fh) | Return list of university domains outside of .edu TLD
NOTE: only 2nd level domain is returned, i.e. for aaa.bbb.uk only bbbl.uk
will be returned. This is necessary since many universities use
departmenntal domains, like cs.cmu.edu or andrew.cmu.edu
NOTE2: .edu domains are not included into this list as they're considered
belonging to universities by default.
How to get the original CSV:
```python
x = requests.get(
"https://raw.githubusercontent.com/Hipo/university-domains-list/"
"master/world_universities_and_domains.json").json()
domains = set(ds for u in x
for ds in u['domains'] if not "edu" in ds.rsplit(".", 2)[-2:])
domains = list(domains)
pd.Series(domains, index=domains, name="domain"
).drop(
["chat.ru"]
).to_csv("email_university_domains.csv", index=False)
``` |
def domain_user_stats():
# type: () -> pd.Series
fname = os.path.join(os.path.dirname(__file__), "email_domain_users.csv")
stats = pd.read_csv(fname, header=0, squeeze=True, index_col=0)
return stats[pd.notnull(stats.index)] | Get number of distinct email addresses in observed domains
TODO: get up to date with new projects layout
How to build email_domain_users.csv:
from collections import defaultdict
import logging
from common import utils as common
import stscraper as scraper
log = logging.getLogger("domain_user_stats")
stats = defaultdict(set)
for ecosystem in common.ECOSYSTEMS:
urls = common.package_urls(ecosystem)
for package_name, url in urls.items():
log.info(package_name)
try:
cs = scraper.commits(url)
except scraper.RepoDoesNotExist:
continue
for email_addr in cs["author_email"].dropna().unique():
if not email_addr or pd.isnull(email_addr):
continue
try:
user, email_domain = clean(email_addr).split("@")
except InvalidEmail:
continue
stats[email_domain].add(user)
s = pd.Series({dm: len(users) for dm, users in stats.items()})
s = s.rename("users").sort_values(ascending=False)
s.to_csv("common/email_domain_users.csv", encoding="utf8", header=True)
return s |
def commercial_domains():
# type: () -> set
dus = domain_user_stats()
es = "test@" + pd.Series(dus.index, index=dus.index)
return set(
dus[~is_public_bulk(es) & ~is_university_bulk(es) & (dus > 1)].index) | Return list of commercial email domains, which means:
- domain is not public
- domain is not university
- it is not personal (more than 1 person using this domain)
>>> "google.com" in commercial_domains()
True
>>> "microsoft.com" in commercial_domains()
True
>>> "isri.cs.cmu.edu" in commercial_domains() # university department
False
>>> "jaraco.com" in commercial_domains() # personal
False |
def is_university(addr):
# type: (Union[str, unicode]) -> bool
addr_domain = domain(addr)
if not addr_domain: # invalid email
return False
chunks = addr_domain.split(".")
if len(chunks) < 2: # local or invalid address
return False
domains = university_domains()
# many universitites have departmental emails, such as cs.cmu.edu. However,
# the original dataset only has top level domain (cmu.edu). So, what we need
# to do is to strip leading subdomains until match or nothing to strip:
# isri.cs.cmu.edu (no match) -> cs.cmu.edu (no match) -> cmu.edu (match)
return (chunks[-1] == "edu" and chunks[-2] not in ("england", "australia"))\
or chunks[-2] == "edu" \
or any(".".join(chunks[i:]) in domains for i in range(len(chunks)-1)) | Check if provided email has a university domain
- either in .edu domain
(except public sercices like england.edu or australia.edu)
- or in .edu.TLD (non-US based institutions, like edu.au)
- or listed in a public list of universities
since universities often have department addresses as well, only the end
is matched. E.g. cs.cmu.edu will match cmu.edu
:param addr: email address
:return: bool
>>> is_university("[email protected]")
True
>>> is_university("[email protected]")
False |
def is_public(addr):
# type: (Union[str, unicode]) -> bool
addr_domain = domain(addr)
if not addr_domain:
# anybody can use invalid email
return True
chunks = addr_domain.rsplit(".", 1)
return len(chunks) < 2 \
or addr_domain.endswith("local") \
or addr_domain in public_domains() | Check if the passed email registered at a free pubic mail server
:param addr: email address to check
:return: bool
>>> is_public("[email protected]")
False
>>> is_public("[email protected]")
True |
def pos(self, x=None, y=None):
u'''Move or query the window cursor.'''
if x is not None:
System.Console.CursorLeft=x
else:
x = System.Console.CursorLeft
if y is not None:
System.Console.CursorTop=y
else:
y = System.Console.CursorTop
return x, f pos(self, x=None, y=None):
u'''Move or query the window cursor.'''
if x is not None:
System.Console.CursorLeft=x
else:
x = System.Console.CursorLeft
if y is not None:
System.Console.CursorTop=y
else:
y = System.Console.CursorTop
return x, y | u'''Move or query the window cursor. |
def write_plain(self, text, attr=None):
u'''write text at current cursor position.'''
log(u'write("%s", %s)' %(text, attr))
if attr is None:
attr = self.attr
n = c_int(0)
self.SetConsoleTextAttribute(self.hout, attr)
self.WriteConsoleA(self.hout, text, len(text), byref(n), None)
return len(textf write_plain(self, text, attr=None):
u'''write text at current cursor position.'''
log(u'write("%s", %s)' %(text, attr))
if attr is None:
attr = self.attr
n = c_int(0)
self.SetConsoleTextAttribute(self.hout, attr)
self.WriteConsoleA(self.hout, text, len(text), byref(n), None)
return len(text) | u'''write text at current cursor position. |
def text(self, x, y, text, attr=None):
u'''Write text at the given position.'''
self.pos(x, y)
self.write_color(text, attrf text(self, x, y, text, attr=None):
u'''Write text at the given position.'''
self.pos(x, y)
self.write_color(text, attr) | u'''Write text at the given position. |
def rectangle(self, rect, attr=None, fill=u' '):
u'''Fill Rectangle.'''
oldtop = self.WindowTop
oldpos = self.pos()
#raise NotImplementedError
x0, y0, x1, y1 = rect
if attr is None:
attr = self.attr
if fill:
rowfill = fill[:1] * abs(x1 - x0)
else:
rowfill = u' ' * abs(x1 - x0)
for y in range(y0, y1):
System.Console.SetCursorPosition(x0, y)
self.write_color(rowfill, attr)
self.pos(*oldposf rectangle(self, rect, attr=None, fill=u' '):
u'''Fill Rectangle.'''
oldtop = self.WindowTop
oldpos = self.pos()
#raise NotImplementedError
x0, y0, x1, y1 = rect
if attr is None:
attr = self.attr
if fill:
rowfill = fill[:1] * abs(x1 - x0)
else:
rowfill = u' ' * abs(x1 - x0)
for y in range(y0, y1):
System.Console.SetCursorPosition(x0, y)
self.write_color(rowfill, attr)
self.pos(*oldpos) | u'''Fill Rectangle. |
def scroll_window(self, lines):
u'''Scroll the window by the indicated number of lines.'''
top = self.WindowTop + lines
if top < 0:
top = 0
if top + System.Console.WindowHeight > System.Console.BufferHeight:
top = System.Console.BufferHeight
self.WindowTop = tof scroll_window(self, lines):
u'''Scroll the window by the indicated number of lines.'''
top = self.WindowTop + lines
if top < 0:
top = 0
if top + System.Console.WindowHeight > System.Console.BufferHeight:
top = System.Console.BufferHeight
self.WindowTop = top | u'''Scroll the window by the indicated number of lines. |
def getkeypress(self):
u'''Return next key press event from the queue, ignoring others.'''
ck = System.ConsoleKey
while 1:
e = System.Console.ReadKey(True)
if e.Key == System.ConsoleKey.PageDown: #PageDown
self.scroll_window(12)
elif e.Key == System.ConsoleKey.PageUp:#PageUp
self.scroll_window(-12)
elif str(e.KeyChar) == u"\000":#Drop deadkeys
log(u"Deadkey: %s"%e)
return event(self, e)
else:
return event(self, ef getkeypress(self):
u'''Return next key press event from the queue, ignoring others.'''
ck = System.ConsoleKey
while 1:
e = System.Console.ReadKey(True)
if e.Key == System.ConsoleKey.PageDown: #PageDown
self.scroll_window(12)
elif e.Key == System.ConsoleKey.PageUp:#PageUp
self.scroll_window(-12)
elif str(e.KeyChar) == u"\000":#Drop deadkeys
log(u"Deadkey: %s"%e)
return event(self, e)
else:
return event(self, e) | u'''Return next key press event from the queue, ignoring others. |
def title(self, txt=None):
u'''Set/get title.'''
if txt:
System.Console.Title = txt
else:
return System.Console.Titlf title(self, txt=None):
u'''Set/get title.'''
if txt:
System.Console.Title = txt
else:
return System.Console.Title | u'''Set/get title. |
def size(self, width=None, height=None):
u'''Set/get window size.'''
sc = System.Console
if width is not None and height is not None:
sc.BufferWidth, sc.BufferHeight = width,height
else:
return sc.BufferWidth, sc.BufferHeight
if width is not None and height is not None:
sc.WindowWidth, sc.WindowHeight = width,height
else:
return sc.WindowWidth - 1, sc.WindowHeight - f size(self, width=None, height=None):
u'''Set/get window size.'''
sc = System.Console
if width is not None and height is not None:
sc.BufferWidth, sc.BufferHeight = width,height
else:
return sc.BufferWidth, sc.BufferHeight
if width is not None and height is not None:
sc.WindowWidth, sc.WindowHeight = width,height
else:
return sc.WindowWidth - 1, sc.WindowHeight - 1 | u'''Set/get window size. |
def files(self):
if self._files is None:
self._files = SeriesZipTifHolo._index_files(self.path)
return self._files | List of hologram data file names in the input zip file |
def get_time(self, idx):
# first try to get the time from the TIFF file
# (possible meta data keywords)
ds = self._get_dataset(idx)
thetime = ds.get_time()
if np.isnan(thetime):
# use zipfile date_time
zf = zipfile.ZipFile(self.path)
info = zf.getinfo(self.files[idx])
timetuple = tuple(list(info.date_time) + [0, 0, 0])
thetime = time.mktime(timetuple)
return thetime | Time for each TIFF file
If there are no metadata keyword arguments defined for the
TIFF file format, then the zip file `date_time` value is
used. |
def set_user_variable(input_dict, environment_dict):
# Iterates through the dictionary to retrieve the variable name
command_key = input_dict.keys()[0]
while input_dict[command_key]['name'] is not 'variable_name':
input_dict = input_dict[command_key]['children']
variable_name = command_key = input_dict.keys()[0]
# User may accidentally omit this value, we must catch the exception and
# return something readable...
try:
# Iterates through the dictionary to retrieve the variable value
while input_dict[command_key]['name'] is not 'variable_value':
input_dict = input_dict[command_key]['children']
variable_value = command_key = input_dict.keys()[0]
except IndexError, e:
raise seash_exceptions.UserError("Error, expected a value to assign to variable")
uservariables[variable_name] = variable_value.strip() | <Purpose>
Seash callback to allow user to define a custom variable and assign a value
to it.
<Arguments>
input_dict: Input dictionary generated by seash_dictionary.parse_command().
environment_dict: Dictionary describing the current seash environment.
For more information, see command_callbacks.py's module docstring.
<Side Effects>
A new variable will be added to the uservariables dictionary.
<Exceptions>
UserError: The user did not provide a value to assign to the variable
<Return>
None |
def show_user_variables(input_dict, environment_dict):
for variable, value in uservariables.iteritems():
print variable+": '"+value+"'" | <Purpose>
Seash callback to allow user to check all variables that they defined.
<Arguments>
input_dict: Input dictionary generated by seash_dictionary.parse_command().
environment_dict: Dictionary describing the current seash environment.
For more information, see command_callbacks.py's module docstring.
<Side Effects>
All the variables currently defined will be printed alongside their values.
<Exceptions>
None
<Return>
None |
def autocomplete(input_list):
# We are only interested if the last token starts with a single '$'
# Double $$'s indicate that it is meant to be a '$', so we don't do anything.
if input_list[-1].startswith('$') and not input_list[-1].startswith('$$'):
# Omit the '$'
partial_variable = input_list[-1][1:]
commands = []
for variable in uservariables:
# No need to include variables that don't match...
if variable.startswith(partial_variable):
# Reconstruct the string
tokens = input_list[:-1] + ['$'+variable]
commands.append(' '.join(tokens))
return commands
return [] | <Purpose>
Returns all valid input completions for the specified command line input.
<Arguments>
input_list: A list of tokens.
<Side Effects>
None
<Exceptions>
None
<Returns>
A list of strings representing valid completions. |
def need_data(self, i):
# If we are not caching, we always grab data from the raw source
if self.caching is False:
return False
logger.debug("Checking cache for data availability at %s." % self.part.location.logstring())
try:
# Tell the DataController that we are going to be reading from the file
with self.read_lock:
self.read_count.value += 1
self.has_read_lock.append(os.getpid())
self.dataset.opennc()
# Test if the cache has the data we need
# If the point we request contains fill values,
# we need data
cached_lookup = self.dataset.get_values('domain', timeinds=[np.asarray([i])], point=self.part.location)
logger.debug("Type of result: %s" % type(cached_lookup))
logger.debug("Double mean of result: %s" % np.mean(np.mean(cached_lookup)))
logger.debug("Type of Double mean of result: %s" % type(np.mean(np.mean(cached_lookup))))
if type(np.mean(np.mean(cached_lookup))) == np.ma.core.MaskedConstant:
need = True
logger.debug("I NEED data. Got back: %s" % cached_lookup)
else:
need = False
logger.debug("I DO NOT NEED data")
except StandardError:
# If the time index doesnt even exist, we need
need = True
logger.debug("I NEED data (no time index exists in cache)")
finally:
self.dataset.closenc()
with self.read_lock:
self.read_count.value -= 1
self.has_read_lock.remove(os.getpid())
return need | Method to test if cache contains the data that
the particle needs |
def linterp(self, setx, sety, x):
if math.isnan(sety[0]) or math.isnan(setx[0]):
return np.nan
#if math.isnan(sety[0]):
# sety[0] = 0.
#if math.isnan(sety[1]):
# sety[1] = 0.
return sety[0] + (x - setx[0]) * ( (sety[1]-sety[0]) / (setx[1]-setx[0]) ) | Linear interp of model data values between time steps |
def get_buildfile_path(settings):
base = os.path.basename(settings.build_url)
return os.path.join(BUILDS_ROOT, base) | Path to which a build tarball should be downloaded. |
def prior_dates(*args, **kwargs):
try:
chron = args[0]
except IndexError:
chron = kwargs['coredates']
d_r = np.array(kwargs['d_r'])
d_std = np.array(kwargs['d_std'])
t_a = np.array(kwargs['t_a'])
t_b = np.array(kwargs['t_b'])
try:
normal_distr = kwargs['normal_distr']
except KeyError:
normal_distr = None
cc_int = kwargs['cc']
ccdict = {0: 'ConstCal', 1: 'IntCal3', 2: 'Marine13',
3: 'SHCal13', 4: 'ConstCal'}
# There is a better way to do this.
if 'cc1' in kwargs:
ccdict[1] = str(kwargs['cc1'])
if 'cc2' in kwargs:
ccdict[2] = str(kwargs['cc2'])
if 'cc3' in kwargs:
ccdict[3] = str(kwargs['cc3'])
if 'cc4' in kwargs:
ccdict[4] = str(kwargs['cc4'])
cc = []
for i in cc_int:
i = int(i)
cc.append(fetch_calibcurve(ccdict[i]))
d, p = calibrate_dates(chron, calib_curve=cc, d_r=d_r, d_std=d_std,
t_a=t_a, t_b=t_b, normal_distr=normal_distr)
return d, p | Get the prior distribution of calibrated radiocarbon dates |
def prior_sediment_rate(*args, **kwargs):
# PlotAccPrior @ Bacon.R ln 113 -> ln 1097-1115
# alpha = acc_shape, beta = acc_shape / acc_mean
# TODO(brews): Check that these stats are correctly translated to scipy.stats distribs.
acc_mean = kwargs['acc_mean']
acc_shape = kwargs['acc_shape']
x = np.linspace(0, 6 * np.max(acc_mean), 100)
y = stats.gamma.pdf(x, a=acc_shape,
scale=1 / (acc_shape/acc_mean))
return y, x | Get the prior density of sediment rates
Returns
-------
y : ndarray
Array giving the density.
x : ndarray
Array of sediment accumulation values (yr/cm) over which the density was evaluated. |
def prior_sediment_memory(*args, **kwargs):
# "plot the prior for the memory (= accumulation rate varibility between neighbouring depths)"
# PlotMemPrior @ Bacon.R ln 114 -> ln 1119 - 1141
# w_a = mem_strength * mem_mean, w_b = mem_strength * (1 - mem_mean)
# TODO(brews): Check that these stats are correctly translated to scipy.stats distribs.
mem_shape = kwargs['mem_strength'] # aka. `mem_shape`
mem_mean = kwargs['mem_mean']
x = np.linspace(0, 1, 100)
y = stats.beta.pdf(x, a=mem_shape * mem_mean,
b=mem_shape * (1 - mem_mean))
return y, x | Get the prior density of sediment memory
Returns
-------
y : ndarray
Array giving the density.
x : ndarray
Array of Memory (ratio) values over which the density was evaluated. |
def _init_browser(self):
self.browser = splinter.Browser('phantomjs')
self.browser.visit(self.server_url)
self.browser.find_link_by_partial_text("Sign in").click()
self.browser.fill(
'ctl00$ctl00$NICEMasterPageBodyContent$SiteContentPlaceholder$'
'txtFormsLogin', self.user)
self.browser.fill(
'ctl00$ctl00$NICEMasterPageBodyContent$SiteContentPlaceholder$'
'txtFormsPassword', self.password)
self.browser.find_by_css('input[type=submit]').click()
self.browser.find_by_css('input[type=submit]').click() | Update this everytime the CERN SSO login form is refactored. |
def album(self):
if not self._album:
self._album = Album(self._album_id, self._album_name,
self._artist_id, self._artist_name,
self._cover_url, self._connection)
return self._album | album as :class:`Album` object |
def stream(self):
# Add song to queue
self._connection.request(
'addSongsToQueue',
{'songIDsArtistIDs': [{'artistID': self.artist.id,
'source': 'user',
'songID': self.id,
'songQueueSongID': 1}],
'songQueueID': self._connection.session.queue},
self._connection.header('addSongsToQueue', 'jsqueue'))
stream_info = self._connection.request(
'getStreamKeyFromSongIDEx',
{'songID': self.id, 'country': self._connection.session.country,
'prefetch': False, 'mobile': False},
self._connection.header('getStreamKeyFromSongIDEx', 'jsqueue'))[1]
return Stream(stream_info['ip'], stream_info['streamKey'],
self._connection) | :class:`Stream` object for playing |
def export(self):
return {'id': self.id, 'name': self.name, 'artist': self._artist_name,
'artist_id': self._artist_id, 'album': self._album_name,
'album_id': self._album_id, 'track': self.track,
'duration': self.duration, 'popularity': self.popularity,
'cover': self._cover_url} | Returns a dictionary with all song information.
Use the :meth:`from_export` method to recreate the
:class:`Song` object. |
def format(self, pattern):
pattern = pattern.replace('%a', self.artist.name)
pattern = pattern.replace('%s', self.name)
pattern = pattern.replace('%A', self.album.name)
return pattern.replace('/', '') \
.replace('\\', '') \
.replace(":", "") \
.replace("*", "") \
.replace("?", "") \
.replace('"', "") \
.replace("|", "") \
.replace("<", "") \
.replace(">", "") | Format the song according to certain patterns:
%a: artist title
%s: song title
%A: album title |
def download(self, directory='~/Music', song_name='%a - %s - %A'):
formatted = self.format(song_name)
path = os.path.expanduser(directory) + os.path.sep + formatted + '.mp3'
try:
raw = self.safe_download()
with open(path, 'wb') as f:
f.write(raw)
except:
raise
return formatted | Download a song to a directory.
:param directory: A system file path.
:param song_name: A name that will be formatted with :meth:`format`.
:return: The formatted song name. |
def safe_download(self):
def _markStreamKeyOver30Seconds(stream):
self._connection.request(
'markStreamKeyOver30Seconds',
{'streamServerID': stream.ip,
'artistID': self.artist.id,
'songQueueID': self._connection.session.queue,
'songID': self.id,
'songQueueSongID': 1,
'streamKey': stream.key},
self._connection.header('markStreamKeyOver30Seconds', 'jsqueue'))
stream = self.stream
timer = threading.Timer(30, _markStreamKeyOver30Seconds, [stream])
timer.start()
raw = stream.data.read()
if len(raw) == stream.size:
timer.cancel()
self._connection.request(
'markSongDownloadedEx',
{'streamServerID': stream.ip,
'songID': self.id,
'streamKey': stream.key},
self._connection.header('markSongDownloadedEx', 'jsqueue'))
self._connection.request(
'removeSongsFromQueue',
{'userRemoved': True,
'songQueueID': self._connection.session.queue,
'songQueueSongIDs': [1]},
self._connection.header('removeSongsFromQueue', 'jsqueue'))
return raw
else:
raise ValueError("Content-Length {}, but read {}"
.format(stream.size, len(raw))) | Download a song respecting Grooveshark's API.
:return: The raw song data. |
def copy(self):
return self.__class__(options=self.__options,
attribute_options=self.__attribute_options) | Return a copy of this configuration. |
def get_option(self, name):
self.__validate_option_name(name)
return self.__options.get(name, None) | Returns the value for the specified generic configuration option.
:returns: configuration option value or `None`, if the option was not
set. |
def set_option(self, name, value):
self.__validate_option_name(name)
self.__options[name] = value | Sets the specified generic configuration option to the given value. |
def set_attribute_option(self, attribute, option_name, option_value):
self.__validate_attribute_option_name(option_name)
attribute_key = self.__make_key(attribute)
mp_options = self.__attribute_options.setdefault(attribute_key, {})
mp_options[option_name] = option_value | Sets the given attribute option to the given value for the specified
attribute. |
def get_attribute_option(self, attribute, option_name):
self.__validate_attribute_option_name(option_name)
attribute_key = self.__make_key(attribute)
return self.__attribute_options[attribute_key].get(option_name) | Returns the value of the given attribute option for the specified
attribute. |
def get_attribute_options(self, attribute=None):
attribute_key = self.__make_key(attribute)
if attribute_key is None:
opts = defaultdict(self._default_attributes_options.copy)
for attr, mp_options in iteritems_(self.__attribute_options):
opts[attr].update(mp_options)
else:
opts = self._default_attributes_options.copy()
attr_opts = self.__attribute_options[attribute_key]
opts.update(attr_opts)
return opts | Returns a copy of the mapping options for the given attribute name
or a copy of all mapping options, if no attribute name is provided.
All options that were not explicitly configured are given a default
value of `None`.
:param tuple attribute_key: attribute name or tuple specifying an
attribute path.
:returns: mapping options dictionary (including default `None` values) |
def run(self, visitor):
attr_option_map = self.__config.get_attribute_options()
# Sorting the keys results in a depth-first traversal, which is just
# what we want.
for (key, key_attr_option_map) in sorted(iteritems_(attr_option_map)):
if not self.__max_depth is None and len(key) > self.__max_depth:
continue
visitor.visit(key, key_attr_option_map) | Traverses this representer configuration traverser with the given
visitor.
:param visitor: :class:`RepresenterConfigVisitorBase` instance. |
def with_retry(cls, methods):
retry_with_backoff = retry(
retry_on_exception=lambda e: isinstance(e, BotoServerError),
wait_exponential_multiplier=1000,
wait_exponential_max=10000
)
for method in methods:
m = getattr(cls, method, None)
if isinstance(m, collections.Callable):
setattr(cls, method, retry_with_backoff(m))
return cls | Wraps the given list of methods in a class with an exponential-back
retry mechanism. |
def string(value) -> str:
return system_json.dumps(Json(value).safe_object(), ensure_ascii=False) | string dict/object/value to JSON |
def parse(string, is_file=False, obj=False):
try:
if obj is False:
if is_file:
return system_json.load(string)
return system_json.loads(string, encoding='utf8')
else:
if is_file:
return system_json.load(
string,
object_hook=lambda d: namedtuple('j', d.keys())
(*d.values()), ensure_ascii=False, encoding='utf8')
return system_json.loads(
string,
object_hook=lambda d: namedtuple('j', d.keys())
(*d.values()), encoding='utf8')
except (Exception, BaseException) as error:
try:
if current_app.config['DEBUG']:
raise error
except RuntimeError as flask_error:
raise error
return None | Convert a JSON string to dict/object |
def from_file(file_path) -> dict:
with io.open(file_path, 'r', encoding='utf-8') as json_stream:
return Json.parse(json_stream, True) | Load JSON file |
def safe_values(self, value):
# TODO: override-able?
string_val = ""
if isinstance(value, datetime.date):
try:
string_val = value.strftime('{0}{1}{2}'.format(
current_app.config['DATETIME']['DATE_FORMAT'],
current_app.config['DATETIME']['SEPARATOR'],
current_app.config['DATETIME']['TIME_FORMAT']))
except RuntimeError as error:
string_val = value.strftime('%Y-%m-%d %H:%M:%S')
elif isinstance(value, bytes):
string_val = value.decode('utf-8')
elif isinstance(value, decimal.Decimal):
string_val = float(value)
else:
string_val = value
return string_val | Parse non-string values that will not serialize |
def camel_case(self, snake_case):
components = snake_case.split('_')
return components[0] + "".join(x.title() for x in components[1:]) | Convert snake case to camel case |
def __find_object_children(self, obj) -> dict:
if hasattr(obj, 'items') and \
isinstance(obj.items, types.BuiltinFunctionType):
return self.__construct_object(obj)
elif isinstance(obj, (list, tuple, set)):
return self.__construct_list(obj)
else:
exclude_list = []
if hasattr(obj, '_sa_instance_state'):
# load only deferred objects
if len(orm.attributes.instance_state(obj).unloaded) > 0:
mapper = inspect(obj)
for column in mapper.attrs:
column.key
column.value
if hasattr(obj, 'json_exclude_list'):
# do not serialize any values in this list
exclude_list = obj.json_exclude_list
return self.__construct_object(vars(obj), exclude_list)
return None | Convert object to flattened object |
def __construct_list(self, list_value):
array = []
for value in list_value:
array.append(self.__iterate_value(value))
return array | Loop list/set/tuple and parse values |
def __construct_object(self, obj, exclude_list=[]):
new_obj = {}
for key, value in obj.items():
if str(key).startswith('_') or \
key is 'json_exclude_list' or key in exclude_list:
continue
new_obj[self.camel_case(key)] = self.__iterate_value(value)
return new_obj | Loop dict/class object and parse values |
def __iterate_value(self, value):
if hasattr(value, '__dict__') or isinstance(value, dict):
return self.__find_object_children(value) # go through dict/class
elif isinstance(value, (list, tuple, set)):
return self.__construct_list(value) # go through list
return self.safe_values(value) | Return value for JSON serialization |
def parse_xml(self, key_xml):
'''
Parse a VocabularyKey from an Xml as per Healthvault
schema.
:param key_xml: lxml.etree.Element representing a single VocabularyKey
'''
xmlutils = XmlUtils(key_xml)
self.name = xmlutils.get_string_by_xpath('name')
self.family = xmlutils.get_string_by_xpath('family')
self.version = xmlutils.get_string_by_xpath('version')
self.description = xmlutils.get_string_by_xpath('description')
self.language = xmlutils.get_lang(f parse_xml(self, key_xml):
'''
Parse a VocabularyKey from an Xml as per Healthvault
schema.
:param key_xml: lxml.etree.Element representing a single VocabularyKey
'''
xmlutils = XmlUtils(key_xml)
self.name = xmlutils.get_string_by_xpath('name')
self.family = xmlutils.get_string_by_xpath('family')
self.version = xmlutils.get_string_by_xpath('version')
self.description = xmlutils.get_string_by_xpath('description')
self.language = xmlutils.get_lang() | Parse a VocabularyKey from an Xml as per Healthvault
schema.
:param key_xml: lxml.etree.Element representing a single VocabularyKey |
def print_and_exit(results):
for success, value in results:
if success:
print value.encode(locale.getpreferredencoding())
else:
value.printTraceback() | Print each result and stop the reactor. |
def main(reactor):
parser = argparse.ArgumentParser(
description='Fetch a URI or series of URIs and print a title '
'or summary for each.',
epilog='If no URIs are passed on the command line, they are '
'read from standard input, one per line.')
parser.add_argument(
'uris', metavar='URI', nargs='*', help='URI to fetch')
parser.add_argument(
'-H', '--hostname-tag', action='store_true',
help='prefix titles with a hostname tag')
args = parser.parse_args()
uris = args.uris or imap(lambda x: x.strip(), sys.stdin)
finished = DeferredList([
fetch_title(uri.decode(locale.getpreferredencoding()),
hostname_tag=args.hostname_tag, friendly_errors=True)
for uri in uris])
finished.addCallback(print_and_exit)
return finished | Main command line entry point. |
def _topological_sort(self):
sorted_graph = []
node_map = self._graph.get_nodes()
nodes = [NodeVisitor(node_map[node]) for node in node_map]
def get_pointers_for_edge_nodes(visitor_decorated_node):
edges = []
edge_ids = visitor_decorated_node.get_node().get_edges()
for node in nodes:
if node.get_id() in edge_ids:
edges.append(node)
return edges
# node is initially weighted with the number of immediate dependencies
for node in nodes:
for edge in get_pointers_for_edge_nodes(node):
edge.increment()
# Start with a list of nodes who have no dependents
resolved = [node for node in nodes if node.get_weight() == 0]
while resolved:
node = resolved.pop()
sorted_graph.append(node)
for edge in get_pointers_for_edge_nodes(node):
edge.decrement()
if edge.get_weight() == 0:
resolved.append(edge)
self._circular_dependencies = [
node.get_node() for node in nodes if node.get_weight() > 0]
self._sorted_nodes = list(reversed(
[node.get_node() for node in sorted_graph])) | Kahn's algorithm for Topological Sorting
- Finds cycles in graph
- Computes dependency weight |
def __math(self, f, x):
d = {}
#operation with single number
if isinstance(x, (int, long, float, complex)):
for i in self.d: d[i] = f(self.d[i], x)
return d
#operation with other view (must have same size and partitions as self) or Field from same mesh and same bounds like View
elif isinstance(x, View) or isinstance(x, Field):
try:
for i in self.d:
d[i] = f(self.d[i], x.d[i])
return d
except: raise ValueError('Views have to be partitioned and shaped in the same way to be add/sub/mul/div/pow/mod\nField has to have same bounds and origin mesh as View.')
#operation with numpy array
elif isinstance(x, ndarray):
#array has to be of the same Size as View
try:
for i in self.d:
ind = self.__indices(self.__partition.meta_data[i], self.__mask)
d[i] = f(self.d[i], x[ind])
return d
except: raise ValueError('Array has to have same shape as View for operation')
else: raise ValueError('Operators only available for View and (View, Field, numpy.ndarray with same shape View, integer, float, complex).') | operator function
:param f: operator.add/sub/mul... used operator
:param x: other object view should be add/sub... with (other View, number, numpy.ndarray, Field)
:return: dictionary (same shape as field.d) with result of operation |
def add(self, x, axis):
return self.__array_op(operator.add, x, axis) | Function to add 3D View with vector or 2D array (type = numpy.ndarray or 2D Field or 2D View) or 2D View with vector (type = numpy.ndarray)
:param x: array(1D, 2D) or field (2D) or View(2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as view.d) |
def sub(self, x, axis):
return self.__array_op(operator.sub, x, axis) | Function to sub 3D View with vector or 2D array (type = numpy.ndarray or 2D Field or 2D View) or 2D View with vector (type = numpy.ndarray)
:param x: array(1D, 2D) or field (2D) or View(2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as view.d) |
def mul(self, x, axis):
return self.__array_op(operator.mul, x, axis) | Function to multiply 3D View with vector or 2D array (type = numpy.ndarray or 2D Field or 2D View) or 2D View with vector (type = numpy.ndarray)
:param x: array(1D, 2D) or field (2D) or View(2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as view.d) |
def div(self, x, axis):
return self.__array_op(operator.div, x, axis) | Function to divide 3D View by vector or 2D array (type = numpy.ndarray or 2D Field or 2D View) or 2D View with vector (type = numpy.ndarray)
:param x: array(1D, 2D) or field (2D) or View(2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as view.d) |
def mod(self, x, axis):
return self.__array_op(operator.mod, x, axis) | Function to modulo 3D View with vector or 2D array (type = numpy.ndarray or 2D Field or 2D View) or 2D View with vector (type = numpy.ndarray)
:param x: array(1D, 2D) or field (2D) or View(2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as view.d) |
def pow(self, x, axis):
return self.__array_op(operator.pow, x, axis) | Function to power 3D View with vector or 2D array (type = numpy.ndarray or 2D Field or 2D View) or 2D View with vector (type = numpy.ndarray)
:param x: array(1D, 2D) or field (2D) or View(2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as view.d) |
def load_environment_vars(self):
for k, v in os.environ.items():
if k.startswith(MACH9_PREFIX):
_, config_key = k.split(MACH9_PREFIX, 1)
self[config_key] = v | Looks for any MACH9_ prefixed environment variables and applies
them to the configuration if present. |
def copy(self, parent=None):
new = Structure(None, parent=parent)
new.key = self.key
new.type_ = self.type_
new.val_guaranteed = self.val_guaranteed
new.key_guaranteed = self.key_guaranteed
for child in self.children:
new.children.append(child.copy(new))
return new | Copies an existing structure and all of it's children |
def generation(self):
if not self.parent:
return 0
elif self.parent.is_dict:
return 1 + self.parent.generation
else:
return self.parent.generation | Returns the number of ancestors that are dictionaries |
def type_string(self):
if self.is_tuple:
subtypes = [item.type_string for item in self.children]
return '{}({})'.format(
'' if self.val_guaranteed else '*',
', '.join(subtypes))
elif self.is_list:
return '{}[{}]'.format(
'' if self.val_guaranteed else '*',
self.children[0].type_string)
else:
return '{}{}'.format(
'' if self.val_guaranteed else '*',
self.type_.__name__) | Returns a string representing the type of the structure |
def get_n_excluded_patches(self):
base = self.get_patches_base()
if not base:
return 0
p = base.rfind('+')
if p == -1:
return 0
try:
n = int(base[p+1:])
return n
except TypeError:
return 0 | Gets number of excluded patches from patches_base:
#patches_base=1.0.0+THIS_NUMBER |
def set_field(obj, field_name, value):
old = getattr(obj, field_name)
field = obj._meta.get_field(field_name)
# is_relation is Django 1.8 only
if field.is_relation:
# If field_name is the `_id` field, then there is no 'pk' attr and
# old/value *is* the pk
old_repr = None if old is None else getattr(old, 'pk', old)
new_repr = None if value is None else getattr(value, 'pk', value)
elif field.__class__.__name__ == 'DateTimeField':
old_repr = None if old is None else datetime_repr(old)
new_repr = None if value is None else datetime_repr(value)
else:
old_repr = None if old is None else str(old)
new_repr = None if value is None else str(value)
if old_repr != new_repr:
setattr(obj, field_name, value)
if not hasattr(obj, DIRTY):
setattr(obj, DIRTY, [])
getattr(obj, DIRTY).append(dict(
field_name=field_name,
old_value=old_repr,
new_value=new_repr,
)) | Fancy setattr with debugging. |
def obj_update(obj, data: dict, *, update_fields=UNSET, save: bool=True) -> bool:
for field_name, value in data.items():
set_field(obj, field_name, value)
dirty_data = getattr(obj, DIRTY, None)
if not dirty_data:
return False
logger.debug(
human_log_formatter(dirty_data),
extra={
'model': obj._meta.object_name,
'pk': obj.pk,
'changes': json_log_formatter(dirty_data),
}
)
if update_fields == UNSET:
update_fields = list(map(itemgetter('field_name'), dirty_data))
if not save:
update_fields = ()
obj.save(update_fields=update_fields)
delattr(obj, DIRTY)
return True | Fancy way to update `obj` with `data` dict.
Parameters
----------
obj : Django model instance
data
The data to update ``obj`` with
update_fields
Use your ``update_fields`` instead of our generated one. If you need
an auto_now or auto_now_add field to get updated, set this to ``None``
to get the default Django behavior.
save
If save=False, then don't actually save. This can be useful if you
just want to utilize the verbose logging.
DEPRECRATED in favor of the more standard ``update_fields=[]``
Returns
-------
bool
True if data changed |
def obj_update_or_create(model, defaults=None, update_fields=UNSET, **kwargs):
obj, created = model.objects.get_or_create(defaults=defaults, **kwargs)
if created:
logger.debug('CREATED %s %s',
model._meta.object_name,
obj.pk,
extra={'pk': obj.pk})
else:
obj_update(obj, defaults, update_fields=update_fields)
return obj, created | Mimic queryset.update_or_create but using obj_update. |
def detect_scheme(filename):
logger = logging.getLogger(__name__)
logger.info('Detecting partitioning scheme')
with open(filename, 'rb') as f:
# Look for MBR signature first
f.seek(mbr.MBR_SIG_OFFSET)
data = f.read(mbr.MBR_SIG_SIZE)
signature = struct.unpack("<H", data)[0]
if signature != mbr.MBR_SIGNATURE:
# Something else
logger.debug('Unknown partitioning scheme')
return PartitionScheme.SCHEME_UNKNOWN
else:
# Could be MBR or GPT, look for GPT header
f.seek(gpt.GPT_HEADER_OFFSET)
data = f.read(gpt.GPT_SIG_SIZE)
signature = struct.unpack("<8s", data)[0]
if signature != gpt.GPT_SIGNATURE:
logger.debug('MBR scheme detected')
return PartitionScheme.SCHEME_MBR
else:
logger.debug('GPT scheme detected')
return PartitionScheme.SCHEME_GPT | Detects partitioning scheme of the source
Args:
filename (str): path to file or device for detection of \
partitioning scheme.
Returns:
SCHEME_MBR, SCHEME_GPT or SCHEME_UNKNOWN
Raises:
IOError: The file doesn't exist or cannot be opened for reading
>>> from rawdisk.scheme.common import *
>>> scheme = detect_scheme('/dev/disk1')
>>> if scheme == PartitionScheme.SCHEME_MBR:
>>> <...> |
def main():
'''Entry point'''
if len(sys.argv) == 1:
print("Usage: tyler [filename]")
sys.exit(0)
filename = sys.argv[1]
if not os.path.isfile(filename):
print("Specified file does not exists")
sys.exit(8)
my_tyler = Tyler(filename=filename)
while True:
try:
for line in my_tyler:
print(line)
time.sleep(1)
except KeyboardInterrupt:
print("Quit signal received")
sys.exit(0f main():
'''Entry point'''
if len(sys.argv) == 1:
print("Usage: tyler [filename]")
sys.exit(0)
filename = sys.argv[1]
if not os.path.isfile(filename):
print("Specified file does not exists")
sys.exit(8)
my_tyler = Tyler(filename=filename)
while True:
try:
for line in my_tyler:
print(line)
time.sleep(1)
except KeyboardInterrupt:
print("Quit signal received")
sys.exit(0) | Entry point |
def _has_file_rolled(self):
# if the size is smaller then before, the file has
# probabilly been rolled
if self._fh:
size = self._getsize_of_current_file()
if size < self.oldsize:
return True
self.oldsize = size
return False | Check if the file has been rolled |
def _open_file(self, filename):
if not self._os_is_windows:
self._fh = open(filename, "rb")
self.filename = filename
self._fh.seek(0, os.SEEK_SET)
self.oldsize = 0
return
# if we're in Windows, we need to use the WIN32 API to open the
# file without locking it
import win32file
import msvcrt
handle = win32file.CreateFile(filename,
win32file.GENERIC_READ,
win32file.FILE_SHARE_DELETE |
win32file.FILE_SHARE_READ |
win32file.FILE_SHARE_WRITE,
None,
win32file.OPEN_EXISTING,
0,
None)
detached_handle = handle.Detach()
file_descriptor = msvcrt.open_osfhandle(
detached_handle, os.O_RDONLY)
self._fh = open(file_descriptor, "rb")
self.filename = filename
self._fh.seek(0, os.SEEK_SET)
self.oldsize = 0 | Open a file to be tailed |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.