text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Convert string s to the 'bytes' type, in all Pythons, even
<END_TASK>
<USER_TASK:>
Description:
def tobytes(s, encoding='ascii'):
""" Convert string s to the 'bytes' type, in all Pythons, even
back before Python 2.6. What 'str' means varies by PY3K or not.
In Pythons before 3.0, this is technically the same as the str type
in terms of the character data in memory. """ |
# NOTE: after we abandon 2.5, we might simply instead use "bytes(s)"
# NOTE: after we abandon all 2.*, del this and prepend byte strings with 'b'
if PY3K:
if isinstance(s, bytes):
return s
else:
return s.encode(encoding)
else:
# for py2.6 on (before 3.0), bytes is same as str; 2.5 has no bytes
# but handle if unicode is passed
if isinstance(s, unicode):
return s.encode(encoding)
else:
return s |
<SYSTEM_TASK:>
Convert string-like-thing s to the 'str' type, in all Pythons, even
<END_TASK>
<USER_TASK:>
Description:
def tostr(s, encoding='ascii'):
""" Convert string-like-thing s to the 'str' type, in all Pythons, even
back before Python 2.6. What 'str' means varies by PY3K or not.
In Pythons before 3.0, str and bytes are the same type.
In Python 3+, this may require a decoding step. """ |
if PY3K:
if isinstance(s, str): # str == unicode in PY3K
return s
else: # s is type bytes
return s.decode(encoding)
else:
# for py2.6 on (before 3.0), bytes is same as str; 2.5 has no bytes
# but handle if unicode is passed
if isinstance(s, unicode):
return s.encode(encoding)
else:
return s |
<SYSTEM_TASK:>
Decorator that retries the call ``retries`` times if ``func`` raises ``exceptions``. Can use a ``backoff`` function
<END_TASK>
<USER_TASK:>
Description:
def retry(func=None, retries=5, backoff=None, exceptions=(IOError, OSError, EOFError), cleanup=None, sleep=time.sleep):
"""
Decorator that retries the call ``retries`` times if ``func`` raises ``exceptions``. Can use a ``backoff`` function
to sleep till next retry.
Example::
>>> should_fail = lambda foo=[1,2,3]: foo and foo.pop()
>>> @retry
... def flaky_func():
... if should_fail():
... raise OSError('Tough luck!')
... print("Success!")
...
>>> flaky_func()
Success!
If it reaches the retry limit::
>>> @retry
... def bad_func():
... raise OSError('Tough luck!')
...
>>> bad_func()
Traceback (most recent call last):
...
OSError: Tough luck!
""" |
@Aspect(bind=True)
def retry_aspect(cutpoint, *args, **kwargs):
for count in range(retries + 1):
try:
if count and cleanup:
cleanup(*args, **kwargs)
yield
break
except exceptions as exc:
if count == retries:
raise
if not backoff:
timeout = 0
elif isinstance(backoff, (int, float)):
timeout = backoff
else:
timeout = backoff(count)
logger.exception("%s(%s, %s) raised exception %s. %s retries left. Sleeping %s secs.",
cutpoint.__name__, args, kwargs, exc, retries - count, timeout)
sleep(timeout)
return retry_aspect if func is None else retry_aspect(func) |
<SYSTEM_TASK:>
Popup right-click menu of special parameter operations
<END_TASK>
<USER_TASK:>
Description:
def popupChoices(self, event=None):
"""Popup right-click menu of special parameter operations
Relies on browserEnabled, clearEnabled, unlearnEnabled, helpEnabled
instance attributes to determine which items are available.
""" |
# don't bother if all items are disabled
if NORMAL not in (self.browserEnabled, self.clearEnabled,
self.unlearnEnabled, self.helpEnabled):
return
self.menu = Menu(self.entry, tearoff = 0)
if self.browserEnabled != DISABLED:
# Handle file and directory in different functions (tkFileDialog)
if capable.OF_TKFD_IN_EPAR:
self.menu.add_command(label = "File Browser",
state = self.browserEnabled,
command = self.fileBrowser)
self.menu.add_command(label = "Directory Browser",
state = self.browserEnabled,
command = self.dirBrowser)
# Handle file and directory in the same function (filedlg)
else:
self.menu.add_command(label = "File/Directory Browser",
state = self.browserEnabled,
command = self.fileBrowser)
self.menu.add_separator()
self.menu.add_command(label = "Clear",
state = self.clearEnabled,
command = self.clearEntry)
self.menu.add_command(label = self.defaultsVerb,
state = self.unlearnEnabled,
command = self.unlearnValue)
self.menu.add_command(label = 'Help',
state = self.helpEnabled,
command = self.helpOnParam)
# Get the current y-coordinate of the Entry
ycoord = self.entry.winfo_rooty()
# Get the current x-coordinate of the cursor
xcoord = self.entry.winfo_pointerx() - XSHIFT
# Display the Menu as a popup as it is not associated with a Button
self.menu.tk_popup(xcoord, ycoord) |
<SYSTEM_TASK:>
Invoke a tkinter directory dialog
<END_TASK>
<USER_TASK:>
Description:
def dirBrowser(self):
"""Invoke a tkinter directory dialog""" |
if capable.OF_TKFD_IN_EPAR:
fname = askdirectory(parent=self.entry, title="Select Directory")
else:
raise NotImplementedError('Fix popupChoices() logic.')
if not fname:
return # canceled
self.choice.set(fname)
# don't select when we go back to widget to reduce risk of
# accidentally typing over the filename
self.lastSelection = None |
<SYSTEM_TASK:>
Force-set a parameter entry to the given value
<END_TASK>
<USER_TASK:>
Description:
def forceValue(self, newVal, noteEdited=False):
"""Force-set a parameter entry to the given value""" |
if newVal is None:
newVal = ""
self.choice.set(newVal)
if noteEdited:
self.widgetEdited(val=newVal, skipDups=False) |
<SYSTEM_TASK:>
Unlearn a parameter value by setting it back to its default
<END_TASK>
<USER_TASK:>
Description:
def unlearnValue(self):
"""Unlearn a parameter value by setting it back to its default""" |
defaultValue = self.defaultParamInfo.get(field = "p_filename",
native = 0, prompt = 0)
self.choice.set(defaultValue) |
<SYSTEM_TASK:>
Allow keys typed in widget to select items
<END_TASK>
<USER_TASK:>
Description:
def keypress(self, event):
"""Allow keys typed in widget to select items""" |
try:
self.choice.set(self.shortcuts[event.keysym])
except KeyError:
# key not found (probably a bug, since we intend to catch
# only events from shortcut keys, but ignore it anyway)
pass |
<SYSTEM_TASK:>
Make sure proper entry is activated when menu is posted
<END_TASK>
<USER_TASK:>
Description:
def postcmd(self):
"""Make sure proper entry is activated when menu is posted""" |
value = self.choice.get()
try:
index = self.paramInfo.choice.index(value)
self.entry.menu.activate(index)
except ValueError:
# initial null value may not be in list
pass |
<SYSTEM_TASK:>
Convert to native bool; interpret certain strings.
<END_TASK>
<USER_TASK:>
Description:
def convertToNative(self, aVal):
""" Convert to native bool; interpret certain strings. """ |
if aVal is None:
return None
if isinstance(aVal, bool): return aVal
# otherwise interpret strings
return str(aVal).lower() in ('1','on','yes','true') |
<SYSTEM_TASK:>
Toggle value between Yes and No
<END_TASK>
<USER_TASK:>
Description:
def toggle(self, event=None):
"""Toggle value between Yes and No""" |
if self.choice.get() == "yes":
self.rbno.select()
else:
self.rbyes.select()
self.widgetEdited() |
<SYSTEM_TASK:>
Ensure any INDEF entry is uppercase, before base class behavior
<END_TASK>
<USER_TASK:>
Description:
def entryCheck(self, event = None, repair = True):
""" Ensure any INDEF entry is uppercase, before base class behavior """ |
valupr = self.choice.get().upper()
if valupr.strip() == 'INDEF':
self.choice.set(valupr)
return EparOption.entryCheck(self, event, repair = repair) |
<SYSTEM_TASK:>
updates the current record of the packet size per sample and the relationship between this and the fifo reads.
<END_TASK>
<USER_TASK:>
Description:
def _setSampleSizeBytes(self):
"""
updates the current record of the packet size per sample and the relationship between this and the fifo reads.
""" |
self.sampleSizeBytes = self.getPacketSize()
if self.sampleSizeBytes > 0:
self.maxBytesPerFifoRead = (32 // self.sampleSizeBytes) |
<SYSTEM_TASK:>
Input ASCII trailer file "input" will be read.
<END_TASK>
<USER_TASK:>
Description:
def convert(input, width=132, output=None, keep=False):
"""Input ASCII trailer file "input" will be read.
The contents will then be written out to a FITS file in the same format
as used by 'stwfits' from IRAF.
Parameters
===========
input : str
Filename of input ASCII trailer file
width : int
Number of characters wide to use for defining output FITS column
[Default: 132]
output : str
Filename to use for writing out converted FITS trailer file
If None, input filename will be converted from *.tra -> *_trl.fits
[Default: None]
keep : bool
Specifies whether or not to keep any previously written FITS files
[Default: False]
""" |
# open input trailer file
trl = open(input)
# process all lines
lines = np.array([i for text in trl.readlines() for i in textwrap.wrap(text,width=width)])
# close ASCII trailer file now that we have processed all the lines
trl.close()
if output is None:
# create fits file
rootname,suffix = os.path.splitext(input)
s = suffix[1:].replace('ra','rl')
fitsname = "{}_{}{}fits".format(rootname,s,os.path.extsep)
else:
fitsname = output
full_name = os.path.abspath(os.path.join(os.path.curdir,fitsname))
old_file = os.path.exists(full_name)
if old_file:
if keep:
print("ERROR: Trailer file already written out as: {}".format(full_name))
raise IOError
else:
os.remove(full_name)
# Build FITS table and write it out
line_fmt = "{}A".format(width)
tbhdu = fits.BinTableHDU.from_columns([fits.Column(name='TEXT_FILE',format=line_fmt,array=lines)])
tbhdu.writeto(fitsname)
print("Created output FITS filename for trailer:{} {}".format(os.linesep,full_name))
os.remove(input) |
<SYSTEM_TASK:>
Find all the values and sections not in the configspec from a validated
<END_TASK>
<USER_TASK:>
Description:
def get_extra_values(conf, _prepend=()):
"""
Find all the values and sections not in the configspec from a validated
ConfigObj.
``get_extra_values`` returns a list of tuples where each tuple represents
either an extra section, or an extra value.
The tuples contain two values, a tuple representing the section the value
is in and the name of the extra values. For extra values in the top level
section the first member will be an empty tuple. For values in the 'foo'
section the first member will be ``('foo',)``. For members in the 'bar'
subsection of the 'foo' section the first member will be ``('foo', 'bar')``.
NOTE: If you call ``get_extra_values`` on a ConfigObj instance that hasn't
been validated it will return an empty list.
""" |
out = []
out.extend([(_prepend, name) for name in conf.extra_values])
for name in conf.sections:
if name not in conf.extra_values:
out.extend(get_extra_values(conf[name], _prepend + (name,)))
return out |
<SYSTEM_TASK:>
Helper function to fetch values from owning section.
<END_TASK>
<USER_TASK:>
Description:
def _fetch(self, key):
"""Helper function to fetch values from owning section.
Returns a 2-tuple: the value, and the section where it was found.
""" |
# switch off interpolation before we try and fetch anything !
save_interp = self.section.main.interpolation
self.section.main.interpolation = False
# Start at section that "owns" this InterpolationEngine
current_section = self.section
while True:
# try the current section first
val = current_section.get(key)
if val is not None and not isinstance(val, Section):
break
# try "DEFAULT" next
val = current_section.get('DEFAULT', {}).get(key)
if val is not None and not isinstance(val, Section):
break
# move up to parent and try again
# top-level's parent is itself
if current_section.parent is current_section:
# reached top level, time to give up
break
current_section = current_section.parent
# restore interpolation to previous value before returning
self.section.main.interpolation = save_interp
if val is None:
raise MissingInterpolationOption(key)
return val, current_section |
<SYSTEM_TASK:>
Return a deepcopy of self as a dictionary.
<END_TASK>
<USER_TASK:>
Description:
def dict(self):
"""
Return a deepcopy of self as a dictionary.
All members that are ``Section`` instances are recursively turned to
ordinary dictionaries - by calling their ``dict`` method.
>>> n = a.dict() # doctest: +SKIP
>>> n == a # doctest: +SKIP
1
>>> n is a # doctest: +SKIP
0
""" |
newdict = {}
for entry in self:
this_entry = self[entry]
if isinstance(this_entry, Section):
this_entry = this_entry.dict()
elif isinstance(this_entry, list):
# create a copy rather than a reference
this_entry = list(this_entry)
elif isinstance(this_entry, tuple):
# create a copy rather than a reference
this_entry = tuple(this_entry)
newdict[entry] = this_entry
return newdict |
<SYSTEM_TASK:>
Change a keyname to another, without changing position in sequence.
<END_TASK>
<USER_TASK:>
Description:
def rename(self, oldkey, newkey):
"""
Change a keyname to another, without changing position in sequence.
Implemented so that transformations can be made on keys,
as well as on values. (used by encode and decode)
Also renames comments.
""" |
if oldkey in self.scalars:
the_list = self.scalars
elif oldkey in self.sections:
the_list = self.sections
else:
raise KeyError('Key "%s" not found.' % oldkey)
pos = the_list.index(oldkey)
#
val = self[oldkey]
dict.__delitem__(self, oldkey)
dict.__setitem__(self, newkey, val)
the_list.remove(oldkey)
the_list.insert(pos, newkey)
comm = self.comments[oldkey]
inline_comment = self.inline_comments[oldkey]
del self.comments[oldkey]
del self.inline_comments[oldkey]
self.comments[newkey] = comm
self.inline_comments[newkey] = inline_comment |
<SYSTEM_TASK:>
Walk every member and call a function on the keyword and value.
<END_TASK>
<USER_TASK:>
Description:
def walk(self, function, raise_errors=True,
call_on_sections=False, **keywargs):
"""
Walk every member and call a function on the keyword and value.
Return a dictionary of the return values
If the function raises an exception, raise the errror
unless ``raise_errors=False``, in which case set the return value to
``False``.
Any unrecognised keyword arguments you pass to walk, will be pased on
to the function you pass in.
Note: if ``call_on_sections`` is ``True`` then - on encountering a
subsection, *first* the function is called for the *whole* subsection,
and then recurses into it's members. This means your function must be
able to handle strings, dictionaries and lists. This allows you
to change the key of subsections as well as for ordinary members. The
return value when called on the whole subsection has to be discarded.
See the encode and decode methods for examples, including functions.
admonition:: caution
You can use ``walk`` to transform the names of members of a section
but you mustn't add or delete members.
>>> config = '''[XXXXsection]
... XXXXkey = XXXXvalue'''.splitlines()
>>> cfg = ConfigObj(config)
>>> cfg
ConfigObj({'XXXXsection': {'XXXXkey': 'XXXXvalue'}})
>>> def transform(section, key):
... val = section[key]
... newkey = key.replace('XXXX', 'CLIENT1')
... section.rename(key, newkey)
... if isinstance(val, (tuple, list, dict)):
... pass
... else:
... val = val.replace('XXXX', 'CLIENT1')
... section[newkey] = val
>>> cfg.walk(transform, call_on_sections=True)
{'CLIENT1section': {'CLIENT1key': None}}
>>> cfg
ConfigObj({'CLIENT1section': {'CLIENT1key': 'CLIENT1value'}})
""" |
out = {}
# scalars first
for i in range(len(self.scalars)):
entry = self.scalars[i]
try:
val = function(self, entry, **keywargs)
# bound again in case name has changed
entry = self.scalars[i]
out[entry] = val
except Exception:
if raise_errors:
raise
else:
entry = self.scalars[i]
out[entry] = False
# then sections
for i in range(len(self.sections)):
entry = self.sections[i]
if call_on_sections:
try:
function(self, entry, **keywargs)
except Exception:
if raise_errors:
raise
else:
entry = self.sections[i]
out[entry] = False
# bound again in case name has changed
entry = self.sections[i]
# previous result is discarded
out[entry] = self[entry].walk(
function,
raise_errors=raise_errors,
call_on_sections=call_on_sections,
**keywargs)
return out |
<SYSTEM_TASK:>
A convenience method which fetches the specified value, guaranteeing
<END_TASK>
<USER_TASK:>
Description:
def as_list(self, key):
"""
A convenience method which fetches the specified value, guaranteeing
that it is a list.
>>> a = ConfigObj()
>>> a['a'] = 1
>>> a.as_list('a')
[1]
>>> a['a'] = (1,)
>>> a.as_list('a')
[1]
>>> a['a'] = [1]
>>> a.as_list('a')
[1]
""" |
result = self[key]
if isinstance(result, (tuple, list)):
return list(result)
return [result] |
<SYSTEM_TASK:>
Recursively restore default values to all members
<END_TASK>
<USER_TASK:>
Description:
def restore_defaults(self):
"""
Recursively restore default values to all members
that have them.
This method will only work for a ConfigObj that was created
with a configspec and has been validated.
It doesn't delete or modify entries without default values.
""" |
for key in self.default_values:
self.restore_default(key)
for section in self.sections:
self[section].restore_defaults() |
<SYSTEM_TASK:>
Handle any BOM, and decode if necessary.
<END_TASK>
<USER_TASK:>
Description:
def _handle_bom(self, infile):
"""
Handle any BOM, and decode if necessary.
If an encoding is specified, that *must* be used - but the BOM should
still be removed (and the BOM attribute set).
(If the encoding is wrongly specified, then a BOM for an alternative
encoding won't be discovered or removed.)
If an encoding is not specified, UTF8 or UTF16 BOM will be detected and
removed. The BOM attribute will be set. UTF16 will be decoded to
unicode.
NOTE: This method must not be called with an empty ``infile``.
Specifying the *wrong* encoding is likely to cause a
``UnicodeDecodeError``.
``infile`` must always be returned as a list of lines, but may be
passed in as a single string.
""" |
if ((self.encoding is not None) and
(self.encoding.lower() not in BOM_LIST)):
# No need to check for a BOM
# the encoding specified doesn't have one
# just decode
return self._decode(infile, self.encoding)
if isinstance(infile, (list, tuple)):
line = infile[0]
else:
line = infile
if self.encoding is not None:
# encoding explicitly supplied
# And it could have an associated BOM
# TODO: if encoding is just UTF16 - we ought to check for both
# TODO: big endian and little endian versions.
enc = BOM_LIST[self.encoding.lower()]
if enc == 'utf_16':
# For UTF16 we try big endian and little endian
for BOM, (encoding, final_encoding) in list(BOMS.items()):
if not final_encoding:
# skip UTF8
continue
if infile.startswith(BOM):
### BOM discovered
##self.BOM = True
# Don't need to remove BOM
return self._decode(infile, encoding)
# If we get this far, will *probably* raise a DecodeError
# As it doesn't appear to start with a BOM
return self._decode(infile, self.encoding)
# Must be UTF8
BOM = BOM_SET[enc]
if not line.startswith(BOM):
return self._decode(infile, self.encoding)
newline = line[len(BOM):]
# BOM removed
if isinstance(infile, (list, tuple)):
infile[0] = newline
else:
infile = newline
self.BOM = True
return self._decode(infile, self.encoding)
# No encoding specified - so we need to check for UTF8/UTF16
for BOM, (encoding, final_encoding) in list(BOMS.items()):
if not isinstance(BOM, str) or not line.startswith(BOM):
continue
else:
# BOM discovered
self.encoding = final_encoding
if not final_encoding:
self.BOM = True
# UTF8
# remove BOM
newline = line[len(BOM):]
if isinstance(infile, (list, tuple)):
infile[0] = newline
else:
infile = newline
# UTF8 - don't decode
if isinstance(infile, string_types):
return infile.splitlines(True)
else:
return infile
# UTF16 - have to decode
return self._decode(infile, encoding)
# No BOM discovered and no encoding specified, just return
if isinstance(infile, string_types):
# infile read from a file will be a single string
return infile.splitlines(True)
return infile |
<SYSTEM_TASK:>
Decode infile to unicode. Using the specified encoding.
<END_TASK>
<USER_TASK:>
Description:
def _decode(self, infile, encoding):
"""
Decode infile to unicode. Using the specified encoding.
if is a string, it also needs converting to a list.
""" |
if isinstance(infile, string_types):
# can't be unicode
# NOTE: Could raise a ``UnicodeDecodeError``
return infile.decode(encoding).splitlines(True)
for i, line in enumerate(infile):
# NOTE: The isinstance test here handles mixed lists of unicode/string
# NOTE: But the decode will break on any non-string values
# NOTE: Or could raise a ``UnicodeDecodeError``
if PY3K:
if not isinstance(line, str):
infile[i] = line.decode(encoding)
else:
if not isinstance(line, unicode):
infile[i] = line.decode(encoding)
return infile |
<SYSTEM_TASK:>
Given a section and a depth level, walk back through the sections
<END_TASK>
<USER_TASK:>
Description:
def _match_depth(self, sect, depth):
"""
Given a section and a depth level, walk back through the sections
parents to see if the depth level matches a previous section.
Return a reference to the right section,
or raise a SyntaxError.
""" |
while depth < sect.depth:
if sect is sect.parent:
# we've reached the top level already
raise SyntaxError()
sect = sect.parent
if sect.depth == depth:
return sect
# shouldn't get here
raise SyntaxError() |
<SYSTEM_TASK:>
Handle an error according to the error settings.
<END_TASK>
<USER_TASK:>
Description:
def _handle_error(self, text, ErrorClass, infile, cur_index):
"""
Handle an error according to the error settings.
Either raise the error or store it.
The error will have occured at ``cur_index``
""" |
line = infile[cur_index]
cur_index += 1
message = text % cur_index
error = ErrorClass(message, cur_index, line)
if self.raise_errors:
# raise the error - parsing stops here
raise error
# store the error
# reraise when parsing has finished
self._errors.append(error) |
<SYSTEM_TASK:>
Return a safely quoted version of a value.
<END_TASK>
<USER_TASK:>
Description:
def _quote(self, value, multiline=True):
"""
Return a safely quoted version of a value.
Raise a ConfigObjError if the value cannot be safely quoted.
If multiline is ``True`` (default) then use triple quotes
if necessary.
* Don't quote values that don't need it.
* Recursively quote members of a list and return a comma joined list.
* Multiline is ``False`` for lists.
* Obey list syntax for empty and single member lists.
If ``list_values=False`` then the value is only quoted if it contains
a ``\\n`` (is multiline) or '#'.
If ``write_empty_values`` is set, and the value is an empty string, it
won't be quoted.
""" |
if multiline and self.write_empty_values and value == '':
# Only if multiline is set, so that it is used for values not
# keys, and not values that are part of a list
return ''
if multiline and isinstance(value, (list, tuple)):
if not value:
return ','
elif len(value) == 1:
return self._quote(value[0], multiline=False) + ','
return ', '.join([self._quote(val, multiline=False)
for val in value])
if not isinstance(value, string_types):
if self.stringify:
value = str(value)
else:
raise TypeError('Value "%s" is not a string.' % value)
if not value:
return '""'
no_lists_no_quotes = not self.list_values and '\n' not in value and '#' not in value
need_triple = multiline and ((("'" in value) and ('"' in value)) or ('\n' in value ))
hash_triple_quote = multiline and not need_triple and ("'" in value) and ('"' in value) and ('#' in value)
check_for_single = (no_lists_no_quotes or not need_triple) and not hash_triple_quote
if check_for_single:
if not self.list_values:
# we don't quote if ``list_values=False``
quot = noquot
# for normal values either single or double quotes will do
elif '\n' in value:
# will only happen if multiline is off - e.g. '\n' in key
raise ConfigObjError('Value "%s" cannot be safely quoted.' % value)
elif ((value[0] not in wspace_plus) and
(value[-1] not in wspace_plus) and
(',' not in value)):
quot = noquot
else:
quot = self._get_single_quote(value)
else:
# if value has '\n' or "'" *and* '"', it will need triple quotes
quot = self._get_triple_quote(value)
if quot == noquot and '#' in value and self.list_values:
quot = self._get_single_quote(value)
return quot % value |
<SYSTEM_TASK:>
Extract the value, where we are in a multiline situation.
<END_TASK>
<USER_TASK:>
Description:
def _multiline(self, value, infile, cur_index, maxline):
"""Extract the value, where we are in a multiline situation.""" |
quot = value[:3]
newvalue = value[3:]
single_line = self._triple_quote[quot][0]
multi_line = self._triple_quote[quot][1]
mat = single_line.match(value)
if mat is not None:
retval = list(mat.groups())
retval.append(cur_index)
return retval
elif newvalue.find(quot) != -1:
# somehow the triple quote is missing
raise SyntaxError()
#
while cur_index < maxline:
cur_index += 1
newvalue += '\n'
line = infile[cur_index]
if line.find(quot) == -1:
newvalue += line
else:
# end of multiline, process it
break
else:
# we've got to the end of the config, oops...
raise SyntaxError()
mat = multi_line.match(line)
if mat is None:
# a badly formed line
raise SyntaxError()
(value, comment) = mat.groups()
return (newvalue + value, comment, cur_index) |
<SYSTEM_TASK:>
Write an individual line, for the write method
<END_TASK>
<USER_TASK:>
Description:
def _write_line(self, indent_string, entry, this_entry, comment):
"""Write an individual line, for the write method""" |
# NOTE: the calls to self._quote here handles non-StringType values.
if not self.unrepr:
val = self._decode_element(self._quote(this_entry))
else:
val = repr(this_entry)
return '%s%s%s%s%s' % (indent_string,
self._decode_element(self._quote(entry, multiline=False)),
self._a_to_u(' = '),
val,
self._decode_element(comment)) |
<SYSTEM_TASK:>
Write a section marker line
<END_TASK>
<USER_TASK:>
Description:
def _write_marker(self, indent_string, depth, entry, comment):
"""Write a section marker line""" |
return '%s%s%s%s%s' % (indent_string,
self._a_to_u('[' * depth),
self._quote(self._decode_element(entry), multiline=False),
self._a_to_u(']' * depth),
self._decode_element(comment)) |
<SYSTEM_TASK:>
Deal with a comment.
<END_TASK>
<USER_TASK:>
Description:
def _handle_comment(self, comment):
"""Deal with a comment.""" |
if not comment:
return ''
start = self.indent_type
if not comment.startswith('#'):
start += self._a_to_u(' # ')
return (start + comment) |
<SYSTEM_TASK:>
Clear ConfigObj instance and restore to 'freshly created' state.
<END_TASK>
<USER_TASK:>
Description:
def reset(self):
"""Clear ConfigObj instance and restore to 'freshly created' state.""" |
self.clear()
self._initialise()
# FIXME: Should be done by '_initialise', but ConfigObj constructor (and reload)
# requires an empty dictionary
self.configspec = None
# Just to be sure ;-)
self._original_configspec = None |
<SYSTEM_TASK:>
Reload a ConfigObj from file.
<END_TASK>
<USER_TASK:>
Description:
def reload(self):
"""
Reload a ConfigObj from file.
This method raises a ``ReloadError`` if the ConfigObj doesn't have
a filename attribute pointing to a file.
""" |
if not isinstance(self.filename, string_types):
raise ReloadError()
filename = self.filename
current_options = {}
for entry in OPTION_DEFAULTS:
if entry == 'configspec':
continue
current_options[entry] = getattr(self, entry)
configspec = self._original_configspec
current_options['configspec'] = configspec
self.clear()
self._initialise(current_options)
self._load(filename, configspec) |
<SYSTEM_TASK:>
A dummy check method, always returns the value unchanged.
<END_TASK>
<USER_TASK:>
Description:
def check(self, check, member, missing=False):
"""A dummy check method, always returns the value unchanged.""" |
if missing:
raise self.baseErrorClass()
return member |
<SYSTEM_TASK:>
Verify that the input HDUList is for a waivered FITS file.
<END_TASK>
<USER_TASK:>
Description:
def _verify(waiveredHdul):
"""
Verify that the input HDUList is for a waivered FITS file.
Parameters:
waiveredHdul HDUList object to be verified
Returns: None
Exceptions:
ValueError Input HDUList is not for a waivered FITS file
""" |
if len(waiveredHdul) == 2:
#
# There must be exactly 2 HDU's
#
if waiveredHdul[0].header['NAXIS'] > 0:
#
# The Primary HDU must have some data
#
if isinstance(waiveredHdul[1], fits.TableHDU):
#
# The Alternate HDU must be a TableHDU
#
if waiveredHdul[0].data.shape[0] == \
waiveredHdul[1].data.shape[0] or \
waiveredHdul[1].data.shape[0] == 1:
#
# The number of arrays in the Primary HDU must match
# the number of rows in the TableHDU. This includes
# the case where there is only a single array and row.
#
return
#
# Not a valid waivered Fits file
#
raise ValueError("Input object does not represent a valid waivered" + \
" FITS file") |
<SYSTEM_TASK:>
Convert the input waivered FITS object to various formats. The
<END_TASK>
<USER_TASK:>
Description:
def convertwaiveredfits(waiveredObject,
outputFileName=None,
forceFileOutput=False,
convertTo='multiExtension',
verbose=False):
"""
Convert the input waivered FITS object to various formats. The
default conversion format is multi-extension FITS. Generate an output
file in the desired format if requested.
Parameters:
waiveredObject input object representing a waivered FITS file;
either a astropy.io.fits.HDUList object, a file object, or a
file specification
outputFileName file specification for the output file
Default: None - do not generate an output file
forceFileOutput force the generation of an output file when the
outputFileName parameter is None; the output file
specification will be the same as the input file
specification with the last character of the base
name replaced with the character `h` in
multi-extension FITS format.
Default: False
convertTo target conversion type
Default: 'multiExtension'
verbose provide verbose output
Default: False
Returns:
hdul an HDUList object in the requested format.
Exceptions:
ValueError Conversion type is unknown
""" |
if convertTo == 'multiExtension':
func = toMultiExtensionFits
else:
raise ValueError('Conversion type ' + convertTo + ' unknown')
return func(*(waiveredObject,outputFileName,forceFileOutput,verbose)) |
<SYSTEM_TASK:>
Do basic configuration for the logging system. Similar to
<END_TASK>
<USER_TASK:>
Description:
def create_logger(name, format='%(levelname)s: %(message)s', datefmt=None,
stream=None, level=logging.INFO, filename=None, filemode='w',
filelevel=None, propagate=True):
"""
Do basic configuration for the logging system. Similar to
logging.basicConfig but the logger ``name`` is configurable and both a file
output and a stream output can be created. Returns a logger object.
The default behaviour is to create a logger called ``name`` with a null
handled, and to use the "%(levelname)s: %(message)s" format string, and add
the handler to the ``name`` logger.
A number of optional keyword arguments may be specified, which can alter
the default behaviour.
:param name: Logger name
:param format: handler format string
:param datefmt: handler date/time format specifier
:param stream: add a StreamHandler using ``stream``
(None disables the stream, default=None)
:param level: logger level (default=INFO).
:param filename: add a FileHandler using ``filename`` (default=None)
:param filemode: open ``filename`` with specified filemode ('w' or 'a')
:param filelevel: logger level for file logger (default=``level``)
:param propagate: propagate message to parent (default=True)
:returns: logging.Logger object
""" |
# Get a logger for the specified name
logger = logging.getLogger(name)
logger.setLevel(level)
fmt = logging.Formatter(format, datefmt)
logger.propagate = propagate
# Remove existing handlers, otherwise multiple handlers can accrue
for hdlr in logger.handlers:
logger.removeHandler(hdlr)
# Add handlers. Add NullHandler if no file or stream output so that
# modules don't emit a warning about no handler.
if not (filename or stream):
logger.addHandler(logging.NullHandler())
if filename:
hdlr = logging.FileHandler(filename, filemode)
if filelevel is None:
filelevel = level
hdlr.setLevel(filelevel)
hdlr.setFormatter(fmt)
logger.addHandler(hdlr)
if stream:
hdlr = logging.StreamHandler(stream)
hdlr.setLevel(level)
hdlr.setFormatter(fmt)
logger.addHandler(hdlr)
return logger |
<SYSTEM_TASK:>
Get contract number when we have only one contract.
<END_TASK>
<USER_TASK:>
Description:
def _get_lonely_contract(self):
"""Get contract number when we have only one contract.""" |
contracts = {}
try:
raw_res = yield from self._session.get(MAIN_URL,
timeout=self._timeout)
except OSError:
raise PyHydroQuebecError("Can not get main page")
# Parse html
content = yield from raw_res.text()
soup = BeautifulSoup(content, 'html.parser')
info_node = soup.find("div", {"class": "span3 contrat"})
if info_node is None:
raise PyHydroQuebecError("Can not found contract")
research = re.search("Contrat ([0-9]{4} [0-9]{5})", info_node.text)
if research is not None:
contracts[research.group(1).replace(" ", "")] = None
if contracts == {}:
raise PyHydroQuebecError("Can not found contract")
return contracts |
<SYSTEM_TASK:>
Get all balances.
<END_TASK>
<USER_TASK:>
Description:
def _get_balances(self):
"""Get all balances.
.. todo::
IT SEEMS balances are shown (MAIN_URL) in the same order
that contracts in profile page (PROFILE_URL).
Maybe we should ensure that.
""" |
balances = []
try:
raw_res = yield from self._session.get(MAIN_URL,
timeout=self._timeout)
except OSError:
raise PyHydroQuebecError("Can not get main page")
# Parse html
content = yield from raw_res.text()
soup = BeautifulSoup(content, 'html.parser')
solde_nodes = soup.find_all("div", {"class": "solde-compte"})
if solde_nodes == []:
raise PyHydroQuebecError("Can not found balance")
for solde_node in solde_nodes:
try:
balance = solde_node.find("p").text
except AttributeError:
raise PyHydroQuebecError("Can not found balance")
balances.append(float(balance[:-2]
.replace(",", ".")
.replace("\xa0", "")))
return balances |
<SYSTEM_TASK:>
Load the profile page of a specific contract when we have multiple contracts.
<END_TASK>
<USER_TASK:>
Description:
def _load_contract_page(self, contract_url):
"""Load the profile page of a specific contract when we have multiple contracts.""" |
try:
yield from self._session.get(contract_url,
timeout=self._timeout)
except OSError:
raise PyHydroQuebecError("Can not get profile page for a "
"specific contract") |
<SYSTEM_TASK:>
Get detailled energy use from a specific contract.
<END_TASK>
<USER_TASK:>
Description:
def fetch_data_detailled_energy_use(self, start_date=None, end_date=None):
"""Get detailled energy use from a specific contract.""" |
if start_date is None:
start_date = datetime.datetime.now(HQ_TIMEZONE) - datetime.timedelta(days=1)
if end_date is None:
end_date = datetime.datetime.now(HQ_TIMEZONE)
# Get http session
yield from self._get_httpsession()
# Get login page
login_url = yield from self._get_login_page()
# Post login page
yield from self._post_login_page(login_url)
# Get p_p_id and contracts
p_p_id, contracts = yield from self._get_p_p_id_and_contract()
# If we don't have any contrats that means we have only
# onecontract. Let's get it
if contracts == {}:
contracts = yield from self._get_lonely_contract()
# For all contracts
for contract, contract_url in contracts.items():
if contract_url:
yield from self._load_contract_page(contract_url)
data = {}
dates = [(start_date + datetime.timedelta(n))
for n in range(int((end_date - start_date).days))]
for date in dates:
# Get Hourly data
day_date = date.strftime("%Y-%m-%d")
hourly_data = yield from self._get_hourly_data(day_date, p_p_id)
data[day_date] = hourly_data['raw_hourly_data']
# Add contract
self._data[contract] = data |
<SYSTEM_TASK:>
Get the latest data from HydroQuebec.
<END_TASK>
<USER_TASK:>
Description:
def fetch_data(self):
"""Get the latest data from HydroQuebec.""" |
# Get http session
yield from self._get_httpsession()
# Get login page
login_url = yield from self._get_login_page()
# Post login page
yield from self._post_login_page(login_url)
# Get p_p_id and contracts
p_p_id, contracts = yield from self._get_p_p_id_and_contract()
# If we don't have any contrats that means we have only
# onecontract. Let's get it
if contracts == {}:
contracts = yield from self._get_lonely_contract()
# Get balance
balances = yield from self._get_balances()
balances_len = len(balances)
balance_id = 0
# For all contracts
for contract, contract_url in contracts.items():
if contract_url:
yield from self._load_contract_page(contract_url)
# Get Hourly data
try:
yesterday = datetime.datetime.now(HQ_TIMEZONE) - datetime.timedelta(days=1)
day_date = yesterday.strftime("%Y-%m-%d")
hourly_data = yield from self._get_hourly_data(day_date, p_p_id)
hourly_data = hourly_data['processed_hourly_data']
except Exception: # pylint: disable=W0703
# We don't have hourly data for some reason
hourly_data = {}
# Get Annual data
try:
annual_data = yield from self._get_annual_data(p_p_id)
except PyHydroQuebecAnnualError:
# We don't have annual data, which is possible if your
# contract is younger than 1 year
annual_data = {}
# Get Monthly data
monthly_data = yield from self._get_monthly_data(p_p_id)
monthly_data = monthly_data[0]
# Get daily data
start_date = monthly_data.get('dateDebutPeriode')
end_date = monthly_data.get('dateFinPeriode')
try:
daily_data = yield from self._get_daily_data(p_p_id, start_date, end_date)
except Exception: # pylint: disable=W0703
daily_data = []
# We have to test daily_data because it's empty
# At the end/starts of a period
if daily_data:
daily_data = daily_data[0]['courant']
# format data
contract_data = {"balance": balances[balance_id]}
for key1, key2 in MONTHLY_MAP:
contract_data[key1] = monthly_data[key2]
for key1, key2 in ANNUAL_MAP:
contract_data[key1] = annual_data.get(key2, "")
# We have to test daily_data because it's empty
# At the end/starts of a period
if daily_data:
for key1, key2 in DAILY_MAP:
contract_data[key1] = daily_data[key2]
# Hourly
if hourly_data:
contract_data['yesterday_hourly_consumption'] = hourly_data
# Add contract
self._data[contract] = contract_data
balance_count = balance_id + 1
if balance_count < balances_len:
balance_id += 1 |
<SYSTEM_TASK:>
Validate a type or matcher argument to the constructor.
<END_TASK>
<USER_TASK:>
Description:
def _validate_argument(self, arg):
"""Validate a type or matcher argument to the constructor.""" |
if arg is None:
return arg
if isinstance(arg, type):
return InstanceOf(arg)
if not isinstance(arg, BaseMatcher):
raise TypeError(
"argument of %s can be a type or a matcher (got %r)" % (
self.__class__.__name__, type(arg)))
return arg |
<SYSTEM_TASK:>
Initiaize the mapping matcher with constructor arguments.
<END_TASK>
<USER_TASK:>
Description:
def _initialize(self, *args, **kwargs):
"""Initiaize the mapping matcher with constructor arguments.""" |
self.items = None
self.keys = None
self.values = None
if args:
if len(args) != 2:
raise TypeError("expected exactly two positional arguments, "
"got %s" % len(args))
if kwargs:
raise TypeError(
"expected positional or keyword arguments, not both")
# got positional arguments only
self.keys, self.values = map(self._validate_argument, args)
elif kwargs:
has_kv = 'keys' in kwargs and 'values' in kwargs
has_of = 'of' in kwargs
if not (has_kv or has_of):
raise TypeError("expected keys/values or items matchers, "
"but got: %s" % list(kwargs.keys()))
if has_kv and has_of:
raise TypeError(
"expected keys & values, or items matchers, not both")
if has_kv:
# got keys= and values= matchers
self.keys = self._validate_argument(kwargs['keys'])
self.values = self._validate_argument(kwargs['values'])
else:
# got of= matcher, which can be a tuple of matchers,
# or a single matcher for dictionary items
of = kwargs['of']
if isinstance(of, tuple):
try:
# got of= as tuple of matchers
self.keys, self.values = \
map(self._validate_argument, of)
except ValueError:
raise TypeError(
"of= tuple has to be a pair of matchers/types" % (
self.__class__.__name__,))
else:
# got of= as a single matcher
self.items = self._validate_argument(of) |
<SYSTEM_TASK:>
Log an error message and exit.
<END_TASK>
<USER_TASK:>
Description:
def fatal(*args, **kwargs):
"""Log an error message and exit.
Following arguments are keyword-only.
:param exitcode: Optional exit code to use
:param cause: Optional Invoke's Result object, i.e.
result of a subprocess invocation
""" |
# determine the exitcode to return to the operating system
exitcode = None
if 'exitcode' in kwargs:
exitcode = kwargs.pop('exitcode')
if 'cause' in kwargs:
cause = kwargs.pop('cause')
if not isinstance(cause, Result):
raise TypeError(
"invalid cause of fatal error: expected %r, got %r" % (
Result, type(cause)))
exitcode = exitcode or cause.return_code
logging.error(*args, **kwargs)
raise Exit(exitcode or -1) |
<SYSTEM_TASK:>
Adds the ratelimit and request timeout parameters to a function.
<END_TASK>
<USER_TASK:>
Description:
def _add_request_parameters(func):
"""Adds the ratelimit and request timeout parameters to a function.""" |
# The function the decorator returns
async def decorated_func(*args, handle_ratelimit=None, max_tries=None, request_timeout=None, **kwargs):
return await func(*args, handle_ratelimit=handle_ratelimit, max_tries=max_tries,
request_timeout=request_timeout, **kwargs)
# We return the decorated func
return decorated_func |
<SYSTEM_TASK:>
Does a request to some endpoint. This is also where ratelimit logic is handled.
<END_TASK>
<USER_TASK:>
Description:
async def _base_request(self, battle_tag: str, endpoint_name: str, session: aiohttp.ClientSession, *, platform=None,
handle_ratelimit=None, max_tries=None, request_timeout=None):
"""Does a request to some endpoint. This is also where ratelimit logic is handled.""" |
# We check the different optional arguments, and if they're not passed (are none) we set them to the default for the client object
if platform is None:
platform = self.default_platform
if handle_ratelimit is None:
handle_ratelimit = self.default_handle_ratelimit
if max_tries is None:
max_tries = self.default_max_tries
if request_timeout is None:
request_timeout = self.default_request_timeout
# The battletag with #s removed
san_battle_tag = self.sanitize_battletag(battle_tag)
# The ratelimit logic
for _ in range(max_tries):
# We execute a request
try:
resp_json, status = await self._async_get(
session,
self.server_url + self._api_urlpath + "{battle_tag}/{endpoint}".format(
battle_tag=san_battle_tag,
endpoint=endpoint_name
),
params={"platform": platform},
# Passed to _async_get and indicates what platform we're searching on
headers={"User-Agent": "overwatch_python_api"},
# According to https://github.com/SunDwarf/OWAPI/blob/master/owapi/v3/v3_util.py#L18 we have to customise our User-Agent, so we do
_async_timeout_seconds=request_timeout
)
if status == 429 and resp_json["msg"] == "you are being ratelimited":
raise RatelimitError
except RatelimitError as e:
# This excepts both RatelimitErrors and TimeoutErrors, ratelimiterrors for server returning a ratelimit, timeouterrors for the connection not being done in with in the timeout
# We are ratelimited, so we check if we handle ratelimiting logic
# If so, we wait and then execute the next iteration of the loop
if handle_ratelimit:
# We wait to remedy ratelimiting, and we wait a bit more than the response says we should
await asyncio.sleep(resp_json["retry"] + 1)
continue
else:
raise
else:
# We didn't get an error, so we exit the loop because it was a successful request
break
else:
# The loop didn't stop because it got breaked, which means that we got ratelimited until the maximum number of tries were finished
raise RatelimitError("Got ratelimited for each requests until the maximum number of retries were reached.")
# Validate the response
if status != 200:
if status == 404 and resp_json["msg"] == "profile not found":
raise ProfileNotFoundError(
"Got HTTP 404, profile not found. This is caused by the given battletag not existing on the specified platform.")
if status == 429 and resp_json["msg"] == "you are being ratelimited":
raise RatelimitError(
"Got HTTP 429, you are being ratelimited. This is caused by calls to the api too frequently.")
raise ConnectionError("Did not get HTTP status 200, got: {0}".format(status))
return resp_json |
<SYSTEM_TASK:>
Check if argument is a method.
<END_TASK>
<USER_TASK:>
Description:
def is_method(arg, min_arity=None, max_arity=None):
"""Check if argument is a method.
Optionally, we can also check if minimum or maximum arities
(number of accepted arguments) match given minimum and/or maximum.
""" |
if not callable(arg):
return False
if not any(is_(arg) for is_ in (inspect.ismethod,
inspect.ismethoddescriptor,
inspect.isbuiltin)):
return False
try:
argnames, varargs, kwargs, defaults = getargspec(arg)
except TypeError:
# On CPython 2.x, built-in methods of file aren't inspectable,
# so if it's file.read() or file.write(), we can't tell it for sure.
# Given how this check is being used, assuming the best is probably
# all we can do here.
return True
else:
if argnames and argnames[0] == 'self':
argnames = argnames[1:]
if min_arity is not None:
actual_min_arity = len(argnames) - len(defaults or ())
assert actual_min_arity >= 0, (
"Minimum arity of %r found to be negative (got %s)!" % (
arg, actual_min_arity))
if int(min_arity) != actual_min_arity:
return False
if max_arity is not None:
actual_max_arity = sys.maxsize if varargs or kwargs else len(argnames)
if int(max_arity) != actual_max_arity:
return False
return True |
<SYSTEM_TASK:>
Check if the argument is a readable file-like object.
<END_TASK>
<USER_TASK:>
Description:
def _is_readable(self, obj):
"""Check if the argument is a readable file-like object.""" |
try:
read = getattr(obj, 'read')
except AttributeError:
return False
else:
return is_method(read, max_arity=1) |
<SYSTEM_TASK:>
Check if the argument is a writable file-like object.
<END_TASK>
<USER_TASK:>
Description:
def _is_writable(self, obj):
"""Check if the argument is a writable file-like object.""" |
try:
write = getattr(obj, 'write')
except AttributeError:
return False
else:
return is_method(write, min_arity=1, max_arity=1) |
<SYSTEM_TASK:>
loops the rungtd1d function below. Figure it's easier to troubleshoot in Python than Fortran.
<END_TASK>
<USER_TASK:>
Description:
def run(time: datetime, altkm: float,
glat: Union[float, np.ndarray], glon: Union[float, np.ndarray], *,
f107a: float = None, f107: float = None, Ap: int = None) -> xarray.Dataset:
"""
loops the rungtd1d function below. Figure it's easier to troubleshoot in Python than Fortran.
""" |
glat = np.atleast_2d(glat)
glon = np.atleast_2d(glon) # has to be here
# %% altitude 1-D
if glat.size == 1 and glon.size == 1 and isinstance(time, (str, date, datetime, np.datetime64)):
atmos = rungtd1d(time, altkm, glat.squeeze()[()], glon.squeeze()[()],
f107a=f107a, f107=f107, Ap=Ap)
# %% lat/lon grid at 1 altitude
else:
atmos = loopalt_gtd(time, glat, glon, altkm,
f107a=f107a, f107=f107, Ap=Ap)
return atmos |
<SYSTEM_TASK:>
loop over location and time
<END_TASK>
<USER_TASK:>
Description:
def loopalt_gtd(time: datetime,
glat: Union[float, np.ndarray], glon: Union[float, np.ndarray],
altkm: Union[float, List[float], np.ndarray], *,
f107a: float = None, f107: float = None, Ap: int = None) -> xarray.Dataset:
"""
loop over location and time
time: datetime or numpy.datetime64 or list of datetime or np.ndarray of datetime
glat: float or 2-D np.ndarray
glon: float or 2-D np.ndarray
altkm: float or list or 1-D np.ndarray
""" |
glat = np.atleast_2d(glat)
glon = np.atleast_2d(glon)
assert glat.ndim == glon.ndim == 2
times = np.atleast_1d(time)
assert times.ndim == 1
atmos = xarray.Dataset()
for k, t in enumerate(times):
print('computing', t)
for i in range(glat.shape[0]):
for j in range(glat.shape[1]):
# atmos = xarray.concat((atmos, rungtd1d(t, altkm, glat[i,j], glon[i,j])),
# data_vars='minimal',coords='minimal',dim='lon')
atm = rungtd1d(t, altkm, glat[i, j], glon[i, j],
f107a=f107a, f107=f107, Ap=Ap)
atmos = xarray.merge((atmos, atm))
atmos.attrs = atm.attrs
return atmos |
<SYSTEM_TASK:>
Raise ValidationError if the contact exists.
<END_TASK>
<USER_TASK:>
Description:
def clean_email(self):
""" Raise ValidationError if the contact exists. """ |
contacts = self.api.lists.contacts(id=self.list_id)['result']
for contact in contacts:
if contact['email'] == self.cleaned_data['email']:
raise forms.ValidationError(
_(u'This email is already subscribed'))
return self.cleaned_data['email'] |
<SYSTEM_TASK:>
Create a contact with using the email on the list.
<END_TASK>
<USER_TASK:>
Description:
def add_contact(self):
""" Create a contact with using the email on the list. """ |
self.api.lists.addcontact(
contact=self.cleaned_data['email'], id=self.list_id, method='POST') |
<SYSTEM_TASK:>
Get or create the list id.
<END_TASK>
<USER_TASK:>
Description:
def list_id(self):
""" Get or create the list id. """ |
list_id = getattr(self, '_list_id', None)
if list_id is None:
for l in self.api.lists.all()['lists']:
if l['name'] == self.list_name:
self._list_id = l['id']
if not getattr(self, '_list_id', None):
self._list_id = self.api.lists.create(
label=self.list_label, name=self.list_name,
method='POST')['list_id']
return self._list_id |
<SYSTEM_TASK:>
Reads values of "magic tags" defined in the given Python file.
<END_TASK>
<USER_TASK:>
Description:
def read_tags(filename):
"""Reads values of "magic tags" defined in the given Python file.
:param filename: Python filename to read the tags from
:return: Dictionary of tags
""" |
with open(filename) as f:
ast_tree = ast.parse(f.read(), filename)
res = {}
for node in ast.walk(ast_tree):
if type(node) is not ast.Assign:
continue
target = node.targets[0]
if type(target) is not ast.Name:
continue
if not (target.id.startswith('__') and target.id.endswith('__')):
continue
name = target.id[2:-2]
res[name] = ast.literal_eval(node.value)
return res |
<SYSTEM_TASK:>
Parses the given text and yields tokens which represent words within
<END_TASK>
<USER_TASK:>
Description:
def word_tokenize(text, stopwords=_stopwords, ngrams=None, min_length=0, ignore_numeric=True):
"""
Parses the given text and yields tokens which represent words within
the given text. Tokens are assumed to be divided by any form of
whitespace character.
""" |
if ngrams is None:
ngrams = 1
text = re.sub(re.compile('\'s'), '', text) # Simple heuristic
text = re.sub(_re_punctuation, '', text)
matched_tokens = re.findall(_re_token, text.lower())
for tokens in get_ngrams(matched_tokens, ngrams):
for i in range(len(tokens)):
tokens[i] = tokens[i].strip(punctuation)
if len(tokens[i]) < min_length or tokens[i] in stopwords:
break
if ignore_numeric and isnumeric(tokens[i]):
break
else:
yield tuple(tokens) |
<SYSTEM_TASK:>
attempt to build using CMake >= 3
<END_TASK>
<USER_TASK:>
Description:
def cmake_setup():
"""
attempt to build using CMake >= 3
""" |
cmake_exe = shutil.which('cmake')
if not cmake_exe:
raise FileNotFoundError('CMake not available')
wopts = ['-G', 'MinGW Makefiles', '-DCMAKE_SH="CMAKE_SH-NOTFOUND'] if os.name == 'nt' else []
subprocess.check_call([cmake_exe] + wopts + [str(SRCDIR)],
cwd=BINDIR)
ret = subprocess.run([cmake_exe, '--build', str(BINDIR)],
stderr=subprocess.PIPE,
universal_newlines=True)
result(ret) |
<SYSTEM_TASK:>
attempt to build with Meson + Ninja
<END_TASK>
<USER_TASK:>
Description:
def meson_setup():
"""
attempt to build with Meson + Ninja
""" |
meson_exe = shutil.which('meson')
ninja_exe = shutil.which('ninja')
if not meson_exe or not ninja_exe:
raise FileNotFoundError('Meson or Ninja not available')
if not (BINDIR / 'build.ninja').is_file():
subprocess.check_call([meson_exe, str(SRCDIR)], cwd=BINDIR)
ret = subprocess.run(ninja_exe, cwd=BINDIR, stderr=subprocess.PIPE,
universal_newlines=True)
result(ret) |
<SYSTEM_TASK:>
Adds an occurrence of the term in the specified document.
<END_TASK>
<USER_TASK:>
Description:
def add_term_occurrence(self, term, document):
"""
Adds an occurrence of the term in the specified document.
""" |
if document not in self._documents:
self._documents[document] = 0
if term not in self._terms:
if self._freeze:
return
else:
self._terms[term] = collections.Counter()
if document not in self._terms[term]:
self._terms[term][document] = 0
self._documents[document] += 1
self._terms[term][document] += 1 |
<SYSTEM_TASK:>
Gets the frequency of the specified term in the entire corpus
<END_TASK>
<USER_TASK:>
Description:
def get_total_term_frequency(self, term):
"""
Gets the frequency of the specified term in the entire corpus
added to the HashedIndex.
""" |
if term not in self._terms:
raise IndexError(TERM_DOES_NOT_EXIST)
return sum(self._terms[term].values()) |
<SYSTEM_TASK:>
Returns the frequency of the term specified in the document.
<END_TASK>
<USER_TASK:>
Description:
def get_term_frequency(self, term, document, normalized=False):
"""
Returns the frequency of the term specified in the document.
""" |
if document not in self._documents:
raise IndexError(DOCUMENT_DOES_NOT_EXIST)
if term not in self._terms:
raise IndexError(TERM_DOES_NOT_EXIST)
result = self._terms[term].get(document, 0)
if normalized:
result /= self.get_document_length(document)
return float(result) |
<SYSTEM_TASK:>
Returns the number of documents the specified term appears in.
<END_TASK>
<USER_TASK:>
Description:
def get_document_frequency(self, term):
"""
Returns the number of documents the specified term appears in.
""" |
if term not in self._terms:
raise IndexError(TERM_DOES_NOT_EXIST)
else:
return len(self._terms[term]) |
<SYSTEM_TASK:>
Returns the number of terms found within the specified document.
<END_TASK>
<USER_TASK:>
Description:
def get_document_length(self, document):
"""
Returns the number of terms found within the specified document.
""" |
if document in self._documents:
return self._documents[document]
else:
raise IndexError(DOCUMENT_DOES_NOT_EXIST) |
<SYSTEM_TASK:>
Returns all documents related to the specified term in the
<END_TASK>
<USER_TASK:>
Description:
def get_documents(self, term):
"""
Returns all documents related to the specified term in the
form of a Counter object.
""" |
if term not in self._terms:
raise IndexError(TERM_DOES_NOT_EXIST)
else:
return self._terms[term] |
<SYSTEM_TASK:>
Returns the Term-Frequency Inverse-Document-Frequency value for the given
<END_TASK>
<USER_TASK:>
Description:
def get_tfidf(self, term, document, normalized=False):
"""
Returns the Term-Frequency Inverse-Document-Frequency value for the given
term in the specified document. If normalized is True, term frequency will
be divided by the document length.
""" |
tf = self.get_term_frequency(term, document)
# Speeds up performance by avoiding extra calculations
if tf != 0.0:
# Add 1 to document frequency to prevent divide by 0
# (Laplacian Correction)
df = 1 + self.get_document_frequency(term)
n = 2 + len(self._documents)
if normalized:
tf /= self.get_document_length(document)
return tf * math.log10(n / df)
else:
return 0.0 |
<SYSTEM_TASK:>
Returns a feature matrix in the form of a list of lists which
<END_TASK>
<USER_TASK:>
Description:
def generate_feature_matrix(self, mode='tfidf'):
"""
Returns a feature matrix in the form of a list of lists which
represents the terms and documents in this Inverted Index using
the tf-idf weighting by default. The term counts in each
document can alternatively be used by specifying scheme='count'.
A custom weighting function can also be passed which receives a term
and document as parameters.
The size of the matrix is equal to m x n where m is
the number of documents and n is the number of terms.
The list-of-lists format returned by this function can be very easily
converted to a numpy matrix if required using the `np.as_matrix`
method.
""" |
result = []
for doc in self._documents:
result.append(self.generate_document_vector(doc, mode))
return result |
<SYSTEM_TASK:>
Returns the first occurrence of an instance of type `klass` in
<END_TASK>
<USER_TASK:>
Description:
def find_class_in_list(klass, lst):
"""
Returns the first occurrence of an instance of type `klass` in
the given list, or None if no such instance is present.
""" |
filtered = list(filter(lambda x: x.__class__ == klass, lst))
if filtered:
return filtered[0]
return None |
<SYSTEM_TASK:>
Converts a dictionary of name and value pairs into a
<END_TASK>
<USER_TASK:>
Description:
def _build_parmlist(self, parameters):
"""
Converts a dictionary of name and value pairs into a
PARMLIST string value acceptable to the Payflow Pro API.
""" |
args = []
for key, value in parameters.items():
if not value is None:
# We always use the explicit-length keyname format, to reduce the chance
# of requests failing due to unusual characters in parameter values.
try:
classinfo = unicode
except NameError:
classinfo = str
if isinstance(value, classinfo):
key = '%s[%d]' % (key.upper(), len(value.encode('utf-8')))
else:
key = '%s[%d]' % (key.upper(), len(str(value)))
args.append('%s=%s' % (key, value))
args.sort()
parmlist = '&'.join(args)
return parmlist |
<SYSTEM_TASK:>
Define a grid using the specifications of a given model.
<END_TASK>
<USER_TASK:>
Description:
def from_model(cls, model_name, **kwargs):
"""
Define a grid using the specifications of a given model.
Parameters
----------
model_name : string
Name the model (see :func:`get_supported_models` for available
model names).
Supports multiple formats (e.g., 'GEOS5', 'GEOS-5' or 'GEOS_5').
**kwargs : string
Parameters that override the model or default grid
settings (See Other Parameters below).
Returns
-------
A :class:`CTMGrid` object.
Other Parameters
----------------
resolution : (float, float)
Horizontal grid resolution (lon, lat) or (DI, DJ) [degrees]
Psurf : float
Average surface pressure [hPa] (default: 1013.15)
Notes
-----
Regridded vertical models may have several valid names (e.g.,
'GEOS5_47L' and 'GEOS5_REDUCED' refer to the same model).
""" |
settings = _get_model_info(model_name)
model = settings.pop('model_name')
for k, v in list(kwargs.items()):
if k in ('resolution', 'Psurf'):
settings[k] = v
return cls(model, **settings) |
<SYSTEM_TASK:>
Set-up a user-defined grid using specifications of a reference
<END_TASK>
<USER_TASK:>
Description:
def copy_from_model(cls, model_name, reference, **kwargs):
"""
Set-up a user-defined grid using specifications of a reference
grid model.
Parameters
----------
model_name : string
name of the user-defined grid model.
reference : string or :class:`CTMGrid` instance
Name of the reference model (see :func:`get_supported_models`),
or a :class:`CTMGrid` object from which grid set-up is copied.
**kwargs
Any set-up parameter which will override the settings of the
reference model (see :class:`CTMGrid` parameters).
Returns
-------
A :class:`CTMGrid` object.
""" |
if isinstance(reference, cls):
settings = reference.__dict__.copy()
settings.pop('model')
else:
settings = _get_model_info(reference)
settings.pop('model_name')
settings.update(kwargs)
settings['reference'] = reference
return cls(model_name, **settings) |
<SYSTEM_TASK:>
Compute scalars or coordinates associated to the vertical layers.
<END_TASK>
<USER_TASK:>
Description:
def get_layers(self, Psurf=1013.25, Ptop=0.01, **kwargs):
"""
Compute scalars or coordinates associated to the vertical layers.
Parameters
----------
grid_spec : CTMGrid object
CTMGrid containing the information necessary to re-construct grid
levels for a given model coordinate system.
Returns
-------
dictionary of vertical grid components, including eta (unitless),
sigma (unitless), pressure (hPa), and altitude (km) on both layer centers
and edges, ordered from bottom-to-top.
Notes
-----
For pure sigma grids, sigma coordinates are given by the esig (edges) and
csig (centers).
For both pure sigma and hybrid grids, pressures at layers edges L are
calculated as follows:
.. math:: P_e(L) = A_p(L) + B_p(L) * (P_{surf} - C_p)
where
:math:`P_{surf}`, :math:`P_{top}`
Air pressures at the surface and the top of the modeled atmosphere
(:attr:`Psurf` and :attr:`Ptop` attributes of the :class:`CTMGrid`
instance).
:math:`A_p(L)`, :math:`Bp(L)`
Specified in the grid set-up (`Ap` and `Bp` attributes) for hybrid
grids, or respectively equals :math:`P_{top}` and :attr:`esig`
attribute for pure sigma grids.
:math:`Cp(L)`
equals :math:`P_{top}` for pure sigma grids or equals 0 for hybrid
grids.
Pressures at grid centers are averages of pressures at grid edges:
.. math:: P_c(L) = (P_e(L) + P_e(L+1)) / 2
For hybrid grids, ETA coordinates of grid edges and grid centers are
given by;
.. math:: ETA_{e}(L) = (P_e(L) - P_{top}) / (P_{surf} - P_{top})
.. math:: ETA_{c}(L) = (P_c(L) - P_{top}) / (P_{surf} - P_{top})
Altitude values are fit using a 5th-degree polynomial; see
`gridspec.prof_altitude` for more details.
""" |
Psurf = np.asarray(Psurf)
output_ndims = Psurf.ndim + 1
if output_ndims > 3:
raise ValueError("`Psurf` argument must be a float or an array"
" with <= 2 dimensions (or None)")
# Compute all variables: takes not much memory, fast
# and better for code reading
SIGe = None
SIGc = None
ETAe = None
ETAc = None
if self.hybrid:
try:
Ap = broadcast_1d_array(self.Ap, output_ndims)
Bp = broadcast_1d_array(self.Bp, output_ndims)
except KeyError:
raise ValueError("Impossible to compute vertical levels,"
" data is missing (Ap, Bp)")
Cp = 0.
else:
try:
Bp = SIGe = broadcast_1d_array(self.esig, output_ndims)
SIGc = broadcast_1d_array(self.csig, output_ndims)
except KeyError:
raise ValueError("Impossible to compute vertical levels,"
" data is missing (esig, csig)")
Ap = Cp = Ptop
Pe = Ap + Bp * (Psurf - Cp)
Pc = 0.5 * (Pe[0:-1] + Pe[1:])
if self.hybrid:
ETAe = (Pe - Ptop)/(Psurf - Ptop)
ETAc = (Pc - Ptop)/(Psurf - Ptop)
else:
SIGe = SIGe * np.ones_like(Psurf)
SIGc = SIGc * np.ones_like(Psurf)
Ze = prof_altitude(Pe, **kwargs)
Zc = prof_altitude(Pc, **kwargs)
all_vars = {'eta_edges': ETAe,
'eta_centers': ETAc,
'sigma_edges': SIGe,
'sigma_centers': SIGc,
'pressure_edges': Pe,
'pressure_centers': Pc,
'altitude_edges': Ze,
'altitude_centers': Zc}
return all_vars |
<SYSTEM_TASK:>
existing directories where to search for jinja2 templates. The order
<END_TASK>
<USER_TASK:>
Description:
def _get_template_dirs():
"""existing directories where to search for jinja2 templates. The order
is important. The first found template from the first found dir wins!""" |
return filter(lambda x: os.path.exists(x), [
# user dir
os.path.join(os.path.expanduser('~'), '.py2pack', 'templates'),
# system wide dir
os.path.join('/', 'usr', 'share', 'py2pack', 'templates'),
# usually inside the site-packages dir
os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates'),
]) |
<SYSTEM_TASK:>
try to get a license from the classifiers
<END_TASK>
<USER_TASK:>
Description:
def _license_from_classifiers(data):
"""try to get a license from the classifiers""" |
classifiers = data.get('classifiers', [])
found_license = None
for c in classifiers:
if c.startswith("License :: OSI Approved :: "):
found_license = c.replace("License :: OSI Approved :: ", "")
return found_license |
<SYSTEM_TASK:>
try to get SDPX license
<END_TASK>
<USER_TASK:>
Description:
def _normalize_license(data):
"""try to get SDPX license""" |
license = data.get('license', None)
if not license:
# try to get license from classifiers
license = _license_from_classifiers(data)
if license:
if license in SDPX_LICENSES.keys():
data['license'] = SDPX_LICENSES[license]
else:
data['license'] = "%s (FIXME:No SPDX)" % (license)
else:
data['license'] = "" |
<SYSTEM_TASK:>
Wrap an IPython's Prompt class
<END_TASK>
<USER_TASK:>
Description:
def wrap_prompts_class(Klass):
"""
Wrap an IPython's Prompt class
This is needed in order for Prompt to inject the correct escape sequences
at the right positions for shell integrations.
""" |
try:
from prompt_toolkit.token import ZeroWidthEscape
except ImportError:
return Klass
class ITerm2IPythonPrompt(Klass):
def in_prompt_tokens(self, cli=None):
return [
(ZeroWidthEscape, last_status(self.shell)+BEFORE_PROMPT),
]+\
super(ITerm2IPythonPrompt, self).in_prompt_tokens(cli)+\
[(ZeroWidthEscape, AFTER_PROMPT)]
return ITerm2IPythonPrompt |
<SYSTEM_TASK:>
A generator which yields a list of all valid keys starting at the
<END_TASK>
<USER_TASK:>
Description:
def get_all_keys(self, start=None):
"""
A generator which yields a list of all valid keys starting at the
given `start` offset. If `start` is `None`, we will start from
the root of the tree.
""" |
s = self.stream
if not start:
start = HEADER_SIZE + self.block_size * self.root_block
s.seek(start)
block_type = s.read(2)
if block_type == LEAF:
reader = LeafReader(self)
num_keys = struct.unpack('>i', reader.read(4))[0]
for _ in range(num_keys):
cur_key = reader.read(self.key_size)
# We to a tell/seek here so that the user can read from
# the file while this loop is still being run
cur_pos = s.tell()
yield cur_key
s.seek(cur_pos)
length = sbon.read_varint(reader)
reader.seek(length, 1)
elif block_type == INDEX:
(_, num_keys, first_child) = struct.unpack('>Bii', s.read(9))
children = [first_child]
for _ in range(num_keys):
# Skip the key field.
_ = s.read(self.key_size)
# Read pointer to the child block.
next_child = struct.unpack('>i', s.read(4))[0]
children.append(next_child)
for child_loc in children:
for key in self.get_all_keys(HEADER_SIZE + self.block_size * child_loc):
yield key
elif block_type == FREE:
pass
else:
raise Exception('Unhandled block type: {}'.format(block_type)) |
<SYSTEM_TASK:>
Return next unformatted "line". If format is given, unpack content,
<END_TASK>
<USER_TASK:>
Description:
def readline(self, fmt=None):
"""
Return next unformatted "line". If format is given, unpack content,
otherwise return byte string.
""" |
prefix_size = self._fix()
if fmt is None:
content = self.read(prefix_size)
else:
fmt = self.endian + fmt
fmt = _replace_star(fmt, prefix_size)
content = struct.unpack(fmt, self.read(prefix_size))
try:
suffix_size = self._fix()
except EOFError:
# when endian is invalid and prefix_size > total file size
suffix_size = -1
if prefix_size != suffix_size:
raise IOError(_FIX_ERROR)
return content |
<SYSTEM_TASK:>
Skip the next line and returns position and size of line.
<END_TASK>
<USER_TASK:>
Description:
def skipline(self):
"""
Skip the next line and returns position and size of line.
Raises IOError if pre- and suffix of line do not match.
""" |
position = self.tell()
prefix = self._fix()
self.seek(prefix, 1) # skip content
suffix = self._fix()
if prefix != suffix:
raise IOError(_FIX_ERROR)
return position, prefix |
<SYSTEM_TASK:>
Write `lines` with given `format`.
<END_TASK>
<USER_TASK:>
Description:
def writelines(self, lines, fmt):
"""
Write `lines` with given `format`.
""" |
if isinstance(fmt, basestring):
fmt = [fmt] * len(lines)
for f, line in zip(fmt, lines):
self.writeline(f, line, self.endian) |
<SYSTEM_TASK:>
Read while the most significant bit is set, then put the 7 least
<END_TASK>
<USER_TASK:>
Description:
def read_varint(stream):
"""Read while the most significant bit is set, then put the 7 least
significant bits of all read bytes together to create a number.
""" |
value = 0
while True:
byte = ord(stream.read(1))
if not byte & 0b10000000:
return value << 7 | byte
value = value << 7 | (byte & 0b01111111) |
<SYSTEM_TASK:>
Open a GEOS-Chem BPCH file output as an xarray Dataset.
<END_TASK>
<USER_TASK:>
Description:
def open_bpchdataset(filename, fields=[], categories=[],
tracerinfo_file='tracerinfo.dat',
diaginfo_file='diaginfo.dat',
endian=">", decode_cf=True,
memmap=True, dask=True, return_store=False):
""" Open a GEOS-Chem BPCH file output as an xarray Dataset.
Parameters
----------
filename : string
Path to the output file to read in.
{tracerinfo,diaginfo}_file : string, optional
Path to the metadata "info" .dat files which are used to decipher
the metadata corresponding to each variable in the output dataset.
If not provided, will look for them in the current directory or
fall back on a generic set.
fields : list, optional
List of a subset of variable names to return. This can substantially
improve read performance. Note that the field here is just the tracer
name - not the category, e.g. 'O3' instead of 'IJ-AVG-$_O3'.
categories : list, optional
List a subset of variable categories to look through. This can
substantially improve read performance.
endian : {'=', '>', '<'}, optional
Endianness of file on disk. By default, "big endian" (">") is assumed.
decode_cf : bool
Enforce CF conventions for variable names, units, and other metadata
default_dtype : numpy.dtype, optional
Default datatype for variables encoded in file on disk (single-precision
float by default).
memmap : bool
Flag indicating that data should be memory-mapped from disk instead of
eagerly loaded into memory
dask : bool
Flag indicating that data reading should be deferred (delayed) to
construct a task-graph for later execution
return_store : bool
Also return the underlying DataStore to the user
Returns
-------
ds : xarray.Dataset
Dataset containing the requested fields (or the entire file), with data
contained in proxy containers for access later.
store : xarray.AbstractDataStore
Underlying DataStore which handles the loading and processing of
bpch files on disk
""" |
store = BPCHDataStore(
filename, fields=fields, categories=categories,
tracerinfo_file=tracerinfo_file,
diaginfo_file=diaginfo_file, endian=endian,
use_mmap=memmap, dask_delayed=dask
)
ds = xr.Dataset.load_store(store)
# Record what the file object underlying the store which we culled this
# Dataset from is so that we can clean it up later
ds._file_obj = store._bpch
# Handle CF corrections
if decode_cf:
decoded_vars = OrderedDict()
rename_dict = {}
for v in ds.variables:
cf_name = cf.get_valid_varname(v)
rename_dict[v] = cf_name
new_var = cf.enforce_cf_variable(ds[v])
decoded_vars[cf_name] = new_var
ds = xr.Dataset(decoded_vars, attrs=ds.attrs.copy())
# ds.rename(rename_dict, inplace=True)
# TODO: There's a bug with xr.decode_cf which eagerly loads data.
# Re-enable this once that bug is fixed
# Note that we do not need to decode the times because we explicitly
# kept track of them as we parsed the data.
# ds = xr.decode_cf(ds, decode_times=False)
# Set attributes for CF conventions
ts = get_timestamp()
ds.attrs.update(dict(
Conventions='CF1.6',
source=filename,
tracerinfo=tracerinfo_file,
diaginfo=diaginfo_file,
filetype=store._bpch.filetype,
filetitle=store._bpch.filetitle,
history=(
"{}: Processed/loaded by xbpch-{} from {}"
.format(ts, ver, filename)
),
))
# To immediately load the data from the BPCHDataProxy paylods, need
# to execute ds.data_vars for some reason...
if return_store:
return ds, store
else:
return ds |
<SYSTEM_TASK:>
Open multiple bpch files as a single dataset.
<END_TASK>
<USER_TASK:>
Description:
def open_mfbpchdataset(paths, concat_dim='time', compat='no_conflicts',
preprocess=None, lock=None, **kwargs):
""" Open multiple bpch files as a single dataset.
You must have dask installed for this to work, as this greatly
simplifies issues relating to multi-file I/O.
Also, please note that this is not a very performant routine. I/O is still
limited by the fact that we need to manually scan/read through each bpch
file so that we can figure out what its contents are, since that metadata
isn't saved anywhere. So this routine will actually sequentially load
Datasets for each bpch file, then concatenate them along the "time" axis.
You may wish to simply process each file individually, coerce to NetCDF,
and then ingest through xarray as normal.
Parameters
----------
paths : list of strs
Filenames to load; order doesn't matter as they will be
lexicographically sorted before we read in the data
concat_dim : str, default='time'
Dimension to concatenate Datasets over. We default to "time" since this
is how GEOS-Chem splits output files
compat : str (optional)
String indicating how to compare variables of the same name for
potential conflicts when merging:
- 'broadcast_equals': all values must be equal when variables are
broadcast against each other to ensure common dimensions.
- 'equals': all values and dimensions must be the same.
- 'identical': all values, dimensions and attributes must be the
same.
- 'no_conflicts': only values which are not null in both datasets
must be equal. The returned dataset then contains the combination
of all non-null values.
preprocess : callable (optional)
A pre-processing function to apply to each Dataset prior to
concatenation
lock : False, True, or threading.Lock (optional)
Passed to :py:func:`dask.array.from_array`. By default, xarray
employs a per-variable lock when reading data from NetCDF files,
but this model has not yet been extended or implemented for bpch files
and so this is not actually used. However, it is likely necessary
before dask's multi-threaded backend can be used
**kwargs : optional
Additional arguments to pass to :py:func:`xbpch.open_bpchdataset`.
""" |
from xarray.backends.api import _MultiFileCloser
# TODO: Include file locks?
# Check for dask
dask = kwargs.pop('dask', False)
if not dask:
raise ValueError("Reading multiple files without dask is not supported")
kwargs['dask'] = True
# Add th
if isinstance(paths, basestring):
paths = sorted(glob(paths))
if not paths:
raise IOError("No paths to files were passed into open_mfbpchdataset")
datasets = [open_bpchdataset(filename, **kwargs)
for filename in paths]
bpch_objs = [ds._file_obj for ds in datasets]
if preprocess is not None:
datasets = [preprocess(ds) for ds in datasets]
# Concatenate over time
combined = xr.auto_combine(datasets, compat=compat, concat_dim=concat_dim)
combined._file_obj = _MultiFileCloser(bpch_objs)
combined.attrs = datasets[0].attrs
ts = get_timestamp()
fns_str = " ".join(paths)
combined.attrs['history'] = (
"{}: Processed/loaded by xbpch-{} from {}"
.format(ts, ver, fns_str)
)
return combined |
<SYSTEM_TASK:>
Return a bytes string that displays image given by bytes b in the terminal
<END_TASK>
<USER_TASK:>
Description:
def image_bytes(b, filename=None, inline=1, width='auto', height='auto',
preserve_aspect_ratio=None):
"""
Return a bytes string that displays image given by bytes b in the terminal
If filename=None, the filename defaults to "Unnamed file"
width and height are strings, following the format
N: N character cells.
Npx: N pixels.
N%: N percent of the session's width or height.
'auto': The image's inherent size will be used to determine an appropriate
dimension.
preserve_aspect_ratio sets whether the aspect ratio of the image is
preserved. The default (None) is True unless both width and height are
set.
See https://www.iterm2.com/documentation-images.html
""" |
if preserve_aspect_ratio is None:
if width != 'auto' and height != 'auto':
preserve_aspect_ratio = False
else:
preserve_aspect_ratio = True
data = {
'name': base64.b64encode((filename or 'Unnamed file').encode('utf-8')).decode('ascii'),
'inline': inline,
'size': len(b),
'base64_img': base64.b64encode(b).decode('ascii'),
'width': width,
'height': height,
'preserve_aspect_ratio': int(preserve_aspect_ratio),
}
# IMAGE_CODE is a string because bytes doesn't support formatting
return IMAGE_CODE.format(**data).encode('ascii') |
<SYSTEM_TASK:>
Display the image given by the bytes b in the terminal.
<END_TASK>
<USER_TASK:>
Description:
def display_image_bytes(b, filename=None, inline=1, width='auto',
height='auto', preserve_aspect_ratio=None):
"""
Display the image given by the bytes b in the terminal.
If filename=None the filename defaults to "Unnamed file".
width and height are strings, following the format
N: N character cells.
Npx: N pixels.
N%: N percent of the session's width or height.
'auto': The image's inherent size will be used to determine an appropriate
dimension.
preserve_aspect_ratio sets whether the aspect ratio of the image is
preserved. The default (None) is True unless both width and height are
set.
See https://www.iterm2.com/documentation-images.html
""" |
sys.stdout.buffer.write(image_bytes(b, filename=filename, inline=inline,
width=width, height=height, preserve_aspect_ratio=preserve_aspect_ratio))
sys.stdout.write('\n') |
<SYSTEM_TASK:>
Display an image in the terminal.
<END_TASK>
<USER_TASK:>
Description:
def display_image_file(fn, width='auto', height='auto', preserve_aspect_ratio=None):
"""
Display an image in the terminal.
A newline is not printed.
width and height are strings, following the format
N: N character cells.
Npx: N pixels.
N%: N percent of the session's width or height.
'auto': The image's inherent size will be used to determine an appropriate
dimension.
preserve_aspect_ratio sets whether the aspect ratio of the image is
preserved. The default (None) is True unless both width and height are
set.
See https://www.iterm2.com/documentation-images.html
""" |
with open(os.path.realpath(os.path.expanduser(fn)), 'rb') as f:
sys.stdout.buffer.write(image_bytes(f.read(), filename=fn,
width=width, height=height,
preserve_aspect_ratio=preserve_aspect_ratio)) |
<SYSTEM_TASK:>
Returns the coordinates of the given entity UUID inside this world, or
<END_TASK>
<USER_TASK:>
Description:
def get_entity_uuid_coords(self, uuid):
"""
Returns the coordinates of the given entity UUID inside this world, or
`None` if the UUID is not found.
""" |
if uuid in self._entity_to_region_map:
coords = self._entity_to_region_map[uuid]
entities = self.get_entities(*coords)
for entity in entities:
if 'uniqueId' in entity.data and entity.data['uniqueId'] == uuid:
return tuple(entity.data['tilePosition'])
return None |
<SYSTEM_TASK:>
Convert a string into a fuzzy regular expression pattern.
<END_TASK>
<USER_TASK:>
Description:
def create_fuzzy_pattern(pattern):
"""
Convert a string into a fuzzy regular expression pattern.
:param pattern: The input pattern (a string).
:returns: A compiled regular expression object.
This function works by adding ``.*`` between each of the characters in the
input pattern and compiling the resulting expression into a case
insensitive regular expression.
""" |
return re.compile(".*".join(map(re.escape, pattern)), re.IGNORECASE) |
<SYSTEM_TASK:>
Perform a "fuzzy" search that matches the given characters in the given order.
<END_TASK>
<USER_TASK:>
Description:
def fuzzy_search(self, *filters):
"""
Perform a "fuzzy" search that matches the given characters in the given order.
:param filters: The pattern(s) to search for.
:returns: The matched password names (a list of strings).
""" |
matches = []
logger.verbose(
"Performing fuzzy search on %s (%s) ..", pluralize(len(filters), "pattern"), concatenate(map(repr, filters))
)
patterns = list(map(create_fuzzy_pattern, filters))
for entry in self.filtered_entries:
if all(p.search(entry.name) for p in patterns):
matches.append(entry)
logger.log(
logging.INFO if matches else logging.VERBOSE,
"Matched %s using fuzzy search.",
pluralize(len(matches), "password"),
)
return matches |
<SYSTEM_TASK:>
Select a password from the available choices.
<END_TASK>
<USER_TASK:>
Description:
def select_entry(self, *arguments):
"""
Select a password from the available choices.
:param arguments: Refer to :func:`smart_search()`.
:returns: The name of a password (a string) or :data:`None`
(when no password matched the given `arguments`).
""" |
matches = self.smart_search(*arguments)
if len(matches) > 1:
logger.info("More than one match, prompting for choice ..")
labels = [entry.name for entry in matches]
return matches[labels.index(prompt_for_choice(labels))]
else:
logger.info("Matched one entry: %s", matches[0].name)
return matches[0] |
<SYSTEM_TASK:>
Perform a simple search for case insensitive substring matches.
<END_TASK>
<USER_TASK:>
Description:
def simple_search(self, *keywords):
"""
Perform a simple search for case insensitive substring matches.
:param keywords: The string(s) to search for.
:returns: The matched password names (a generator of strings).
Only passwords whose names matches *all* of the given keywords are
returned.
""" |
matches = []
keywords = [kw.lower() for kw in keywords]
logger.verbose(
"Performing simple search on %s (%s) ..",
pluralize(len(keywords), "keyword"),
concatenate(map(repr, keywords)),
)
for entry in self.filtered_entries:
normalized = entry.name.lower()
if all(kw in normalized for kw in keywords):
matches.append(entry)
logger.log(
logging.INFO if matches else logging.VERBOSE,
"Matched %s using simple search.",
pluralize(len(matches), "password"),
)
return matches |
<SYSTEM_TASK:>
Perform a smart search on the given keywords or patterns.
<END_TASK>
<USER_TASK:>
Description:
def smart_search(self, *arguments):
"""
Perform a smart search on the given keywords or patterns.
:param arguments: The keywords or patterns to search for.
:returns: The matched password names (a list of strings).
:raises: The following exceptions can be raised:
- :exc:`.NoMatchingPasswordError` when no matching passwords are found.
- :exc:`.EmptyPasswordStoreError` when the password store is empty.
This method first tries :func:`simple_search()` and if that doesn't
produce any matches it will fall back to :func:`fuzzy_search()`. If no
matches are found an exception is raised (see above).
""" |
matches = self.simple_search(*arguments)
if not matches:
logger.verbose("Falling back from substring search to fuzzy search ..")
matches = self.fuzzy_search(*arguments)
if not matches:
if len(self.filtered_entries) > 0:
raise NoMatchingPasswordError(
format("No passwords matched the given arguments! (%s)", concatenate(map(repr, arguments)))
)
else:
msg = "You don't have any passwords yet! (no *.gpg files found)"
raise EmptyPasswordStoreError(msg)
return matches |
<SYSTEM_TASK:>
Read an output's diaginfo.dat file and parse into a DataFrame for
<END_TASK>
<USER_TASK:>
Description:
def get_diaginfo(diaginfo_file):
"""
Read an output's diaginfo.dat file and parse into a DataFrame for
use in selecting and parsing categories.
Parameters
----------
diaginfo_file : str
Path to diaginfo.dat
Returns
-------
DataFrame containing the category information.
""" |
widths = [rec.width for rec in diag_recs]
col_names = [rec.name for rec in diag_recs]
dtypes = [rec.type for rec in diag_recs]
usecols = [name for name in col_names if not name.startswith('-')]
diag_df = pd.read_fwf(diaginfo_file, widths=widths, names=col_names,
dtypes=dtypes, comment="#", header=None,
usecols=usecols)
diag_desc = {diag.name: diag.desc for diag in diag_recs
if not diag.name.startswith('-')}
return diag_df, diag_desc |
<SYSTEM_TASK:>
Read an output's tracerinfo.dat file and parse into a DataFrame for
<END_TASK>
<USER_TASK:>
Description:
def get_tracerinfo(tracerinfo_file):
"""
Read an output's tracerinfo.dat file and parse into a DataFrame for
use in selecting and parsing categories.
Parameters
----------
tracerinfo_file : str
Path to tracerinfo.dat
Returns
-------
DataFrame containing the tracer information.
""" |
widths = [rec.width for rec in tracer_recs]
col_names = [rec.name for rec in tracer_recs]
dtypes = [rec.type for rec in tracer_recs]
usecols = [name for name in col_names if not name.startswith('-')]
tracer_df = pd.read_fwf(tracerinfo_file, widths=widths, names=col_names,
dtypes=dtypes, comment="#", header=None,
usecols=usecols)
# Check an edge case related to a bug in GEOS-Chem v12.0.3 which
# erroneously dropped short/long tracer names in certain tracerinfo.dat outputs.
# What we do here is figure out which rows were erroneously processed (they'll
# have NaNs in them) and raise a warning if there are any
na_free = tracer_df.dropna(subset=['tracer', 'scale'])
only_na = tracer_df[~tracer_df.index.isin(na_free.index)]
if len(only_na) > 0:
warn("At least one row in {} wasn't decoded correctly; we strongly"
" recommend you manually check that file to see that all"
" tracers are properly recorded."
.format(tracerinfo_file))
tracer_desc = {tracer.name: tracer.desc for tracer in tracer_recs
if not tracer.name.startswith('-')}
# Process some of the information about which variables are hydrocarbons
# and chemical tracers versus other diagnostics.
def _assign_hydrocarbon(row):
if row['C'] != 1:
row['hydrocarbon'] = True
row['molwt'] = C_MOLECULAR_WEIGHT
else:
row['hydrocarbon'] = False
return row
tracer_df = (
tracer_df
.apply(_assign_hydrocarbon, axis=1)
.assign(chemical=lambda x: x['molwt'].astype(bool))
)
return tracer_df, tracer_desc |
<SYSTEM_TASK:>
Read a chunk of data from a bpch output file.
<END_TASK>
<USER_TASK:>
Description:
def read_from_bpch(filename, file_position, shape, dtype, endian,
use_mmap=False):
""" Read a chunk of data from a bpch output file.
Parameters
----------
filename : str
Path to file on disk containing the data
file_position : int
Position (bytes) where desired data chunk begins
shape : tuple of ints
Resultant (n-dimensional) shape of requested data; the chunk
will be read sequentially from disk and then re-shaped
dtype : dtype
Dtype of data; for best results, pass a dtype which includes
an endian indicator, e.g. `dtype = np.dtype('>f4')`
endian : str
Endianness of data; should be consistent with `dtype`
use_mmap : bool
Memory map the chunk of data to the file on disk, else read
immediately
Returns
-------
Array with shape `shape` and dtype `dtype` containing the requested
chunk of data from `filename`.
""" |
offset = file_position + 4
if use_mmap:
d = np.memmap(filename, dtype=dtype, mode='r', shape=shape,
offset=offset, order='F')
else:
with FortranFile(filename, 'rb', endian) as ff:
ff.seek(file_position)
d = np.array(ff.readline('*f'))
d = d.reshape(shape, order='F')
# As a sanity check, *be sure* that the resulting data block has the
# correct shape, and fail early if it doesn't.
if (d.shape != shape):
raise IOError("Data chunk read from {} does not have the right shape,"
" (expected {} but got {})"
.format(filename, shape, d.shape))
return d |
<SYSTEM_TASK:>
Helper function to load the data referenced by this bundle.
<END_TASK>
<USER_TASK:>
Description:
def _read(self):
""" Helper function to load the data referenced by this bundle. """ |
if self._dask:
d = da.from_delayed(
delayed(read_from_bpch, )(
self.filename, self.file_position, self.shape,
self.dtype, self.endian, use_mmap=self._mmap
),
self.shape, self.dtype
)
else:
d = read_from_bpch(
self.filename, self.file_position, self.shape,
self.dtype, self.endian, use_mmap=self._mmap
)
return d |
<SYSTEM_TASK:>
Close this bpch file.
<END_TASK>
<USER_TASK:>
Description:
def close(self):
""" Close this bpch file.
""" |
if not self.fp.closed:
for v in list(self.var_data):
del self.var_data[v]
self.fp.close() |
<SYSTEM_TASK:>
Read the main metadata packaged within a bpch file, indicating
<END_TASK>
<USER_TASK:>
Description:
def _read_metadata(self):
""" Read the main metadata packaged within a bpch file, indicating
the output filetype and its title.
""" |
filetype = self.fp.readline().strip()
filetitle = self.fp.readline().strip()
# Decode to UTF string, if possible
try:
filetype = str(filetype, 'utf-8')
filetitle = str(filetitle, 'utf-8')
except:
# TODO: Handle this edge-case of converting file metadata more elegantly.
pass
self.__setattr__('filetype', filetype)
self.__setattr__('filetitle', filetitle) |
<SYSTEM_TASK:>
Iterate over the block of this bpch file and return handlers
<END_TASK>
<USER_TASK:>
Description:
def _read_var_data(self):
""" Iterate over the block of this bpch file and return handlers
in the form of `BPCHDataBundle`s for access to the data contained
therein.
""" |
var_bundles = OrderedDict()
var_attrs = OrderedDict()
n_vars = 0
while self.fp.tell() < self.fsize:
var_attr = OrderedDict()
# read first and second header lines
line = self.fp.readline('20sffii')
modelname, res0, res1, halfpolar, center180 = line
line = self.fp.readline('40si40sdd40s7i')
category_name, number, unit, tau0, tau1, reserved = line[:6]
dim0, dim1, dim2, dim3, dim4, dim5, skip = line[6:]
var_attr['number'] = number
# Decode byte-strings to utf-8
category_name = str(category_name, 'utf-8')
var_attr['category'] = category_name.strip()
unit = str(unit, 'utf-8')
# get additional metadata from tracerinfo / diaginfo
try:
cat_df = self.diaginfo_df[
self.diaginfo_df.name == category_name.strip()
]
# TODO: Safer logic for handling case where more than one
# tracer metadata match was made
# if len(cat_df > 1):
# raise ValueError(
# "More than one category matching {} found in "
# "diaginfo.dat".format(
# category_name.strip()
# )
# )
# Safe now to select the only row in the DataFrame
cat = cat_df.T.squeeze()
tracer_num = int(cat.offset) + int(number)
diag_df = self.tracerinfo_df[
self.tracerinfo_df.tracer == tracer_num
]
# TODO: Safer logic for handling case where more than one
# tracer metadata match was made
# if len(diag_df > 1):
# raise ValueError(
# "More than one tracer matching {:d} found in "
# "tracerinfo.dat".format(tracer_num)
# )
# Safe now to select only row in the DataFrame
diag = diag_df.T.squeeze()
diag_attr = diag.to_dict()
if not unit.strip(): # unit may be empty in bpch
unit = diag_attr['unit'] # but not in tracerinfo
var_attr.update(diag_attr)
except:
diag = {'name': '', 'scale': 1}
var_attr.update(diag)
var_attr['unit'] = unit
vname = diag['name']
fullname = category_name.strip() + "_" + vname
# parse metadata, get data or set a data proxy
if dim2 == 1:
data_shape = (dim0, dim1) # 2D field
else:
data_shape = (dim0, dim1, dim2)
var_attr['original_shape'] = data_shape
# Add proxy time dimension to shape
data_shape = tuple([1, ] + list(data_shape))
origin = (dim3, dim4, dim5)
var_attr['origin'] = origin
timelo, timehi = cf.tau2time(tau0), cf.tau2time(tau1)
pos = self.fp.tell()
# Note that we don't pass a dtype, and assume everything is
# single-fp floats with the correct endian, as hard-coded
var_bundle = BPCHDataBundle(
data_shape, self.endian, self.filename, pos, [timelo, timehi],
metadata=var_attr,
use_mmap=self.use_mmap, dask_delayed=self.dask_delayed
)
self.fp.skipline()
# Save the data as a "bundle" for concatenating in the final step
if fullname in var_bundles:
var_bundles[fullname].append(var_bundle)
else:
var_bundles[fullname] = [var_bundle, ]
var_attrs[fullname] = var_attr
n_vars += 1
self.var_data = var_bundles
self.var_attrs = var_attrs |
<SYSTEM_TASK:>
Return the current timestamp in machine local time.
<END_TASK>
<USER_TASK:>
Description:
def get_timestamp(time=True, date=True, fmt=None):
""" Return the current timestamp in machine local time.
Parameters:
-----------
time, date : Boolean
Flag to include the time or date components, respectively,
in the output.
fmt : str, optional
If passed, will override the time/date choice and use as
the format string passed to `strftime`.
""" |
time_format = "%H:%M:%S"
date_format = "%m-%d-%Y"
if fmt is None:
if time and date:
fmt = time_format + " " + date_format
elif time:
fmt = time_format
elif date:
fmt = date_format
else:
raise ValueError("One of `date` or `time` must be True!")
return datetime.now().strftime(fmt) |
<SYSTEM_TASK:>
This is a temporary hot-fix to handle the way metadata is encoded
<END_TASK>
<USER_TASK:>
Description:
def fix_attr_encoding(ds):
""" This is a temporary hot-fix to handle the way metadata is encoded
when we read data directly from bpch files. It removes the 'scale_factor'
and 'units' attributes we encode with the data we ingest, converts the
'hydrocarbon' and 'chemical' attribute to a binary integer instead of a
boolean, and removes the 'units' attribute from the "time" dimension since
that too is implicitly encoded.
In future versions of this library, when upstream issues in decoding
data wrapped in dask arrays is fixed, this won't be necessary and will be
removed.
""" |
def _maybe_del_attr(da, attr):
""" Possibly delete an attribute on a DataArray if it's present """
if attr in da.attrs:
del da.attrs[attr]
return da
def _maybe_decode_attr(da, attr):
# TODO: Fix this so that bools get written as attributes just fine
""" Possibly coerce an attribute on a DataArray to an easier type
to write to disk. """
# bool -> int
if (attr in da.attrs) and (type(da.attrs[attr] == bool)):
da.attrs[attr] = int(da.attrs[attr])
return da
for v in ds.data_vars:
da = ds[v]
da = _maybe_del_attr(da, 'scale_factor')
da = _maybe_del_attr(da, 'units')
da = _maybe_decode_attr(da, 'hydrocarbon')
da = _maybe_decode_attr(da, 'chemical')
# Also delete attributes on time.
if hasattr(ds, 'time'):
times = ds.time
times = _maybe_del_attr(times, 'units')
return ds |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.