text_prompt
stringlengths 157
13.1k
| code_prompt
stringlengths 7
19.8k
⌀ |
---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _comment(self, element):
"""Extracts the character to use for comments in the input file."""
|
for v in _get_xml_version(element):
self.versions[v].comment = element.text
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _line(self, element):
"""Parses the XML element as a single line entry in the input file."""
|
for v in _get_xml_version(element):
if "id" in element.attrib:
tline = TemplateLine(element, None, self.versions[v].comment)
self.versions[v].entries[tline.identifier] = tline
self.versions[v].order.append(tline.identifier)
else:
msg.warn("no id element in {}. Ignored. (_line)".format(element))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def convert(self, path, version, target = None):
"""Converts the specified file using the relevant template. :arg path: the full path to the file to convert. :arg version: the new version of the file. :arg target: the optional path to save the file under. If not specified, the file is saved based on the template file name. """
|
#Get the template and values out of the XML input file and
#write them in the format of the keywordless file.
values, template = self.parse(path)
lines = template.write(values, version)
#Finally, write the lines to the correct path.
if target is None:
target = os.path.join(os.path.dirname(path), template.name)
with open(os.path.expanduser(target), 'w') as f:
f.write("\n".join(lines))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse(self, path):
"""Extracts a dictionary of values from the XML file at the specified path."""
|
#Load the template that will be used for parsing the values.
expath, template, root = self._load_template(path)
if expath is not None:
values = template.parse(root)
return (values, template)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def convert(self, path, version, target):
"""Converts the specified source file to a new version number."""
|
source = self.comparer.get_representation(path)
lines = [ '# <fortpy version="{}"></fortpy>\n'.format(version) ]
for line in self.comparer.template.contents[version].preamble:
lines.append(line.write(source.preamble, source.version, source.stored) + "\n")
for line in self.comparer.template.contents[version].body:
for valueset in source.body:
lines.append(line.write(valueset, source.version, source.stored) + "\n")
with open(os.path.expanduser(target), 'w') as f:
f.writelines(lines)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cachedstr(self):
"""Returns the full string of the file contents from the cache for the file that we are currently providing intellisense for."""
|
if self._cachedstr is None:
if self.module is not None:
refstring = self.module.refstring
self._cachedstr = refstring.splitlines()
else:
self._cachedstr = []
return self._cachedstr
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def exact_match(self):
"""Returns the symbol under the cursor looking both directions as part of a definition lookup for an exact match. """
|
#We don't have to worry about grouping or anything else fancy. Just
#loop through forward and back until we hit a character that can't be
#part of a variable or function name.
if self._exact_match is None:
i = self.pos[1] - 1
start = None
end = None
line = self.current_line
terminators = ['(', ')', '\n', ' ', '=', '%', ',']
while i >= 0 and start is None:
if line[i] in terminators:
start = i + 1
i -= 1
i = self.pos[1]
while i < len(line) and end is None:
if line[i] in terminators:
end = i
i += 1
self._exact_match = line[start:end].lower()
return self._exact_match
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def short_full_symbol(self):
"""Gets the full symbol excluding the character under the cursor."""
|
if self._short_full_symbol is None:
self._short_full_symbol = self._symbol_extract(cache.RE_FULL_CURSOR,
False, True)
return self._short_full_symbol
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def symbol(self):
"""Gets the symbol under the current cursor."""
|
if self._symbol is None:
self._symbol = self._symbol_extract(cache.RE_CURSOR)
return self._symbol
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def full_symbol(self):
"""Returns the symbol under the cursor AND additional contextual symbols in the case of %-separated lists of type members."""
|
if self._full_symbol is None:
self._full_symbol = self._symbol_extract(cache.RE_FULL_CURSOR, brackets=True)
return self._full_symbol
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _symbol_extract(self, regex, plus = True, brackets=False):
"""Extracts a symbol or full symbol from the current line, optionally including the character under the cursor. :arg regex: the compiled regular expression to use for extraction. :arg plus: when true, the character under the cursor *is* included. :arg brackets: when true, matching pairs of brackets are first removed before the regex is run. """
|
charplus = self.pos[1] + (1 if plus else -1)
consider = self.current_line[:charplus][::-1]
#We want to remove matching pairs of brackets so that derived types
#that have arrays still get intellisense.
if brackets==True:
#The string has already been reversed, just run through it.
rightb = []
lastchar = None
for i in range(len(consider)):
if consider[i] == ")":
rightb.append(i)
elif consider[i] == "(" and len(rightb) > 0:
lastchar = i
rightb.pop()
if lastchar is not None:
consider = '%' + consider[lastchar+1:]
rematch = regex.match(consider)
if rematch is not None:
return rematch.group("symbol")[::-1]
else:
return ""
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def call_arg_index(self):
"""Determines the index of the parameter in a call list using string manipulation and context information."""
|
#The function name we are calling should be in el_name by now
if self._call_index is None:
if (self.el_section == "body" and
self.el_call in [ "sub", "fun" ]):
#Get hold of the element instance of the function being called so
#we can examine its parameters.
fncall = self.el_name
if fncall in self.current_line[:self.pos[1]]:
args = self.current_line[:self.pos[1]].split(fncall)[1]
else:
args = ""
#This handles the case of the bracket-complete.
if args == "":
return 0
#The nester requires each start bracket to have an end bracket
if args[-1] != ")":
args += ")"
#Pyparsing handles calls where functions are being called as
#the values for parameters like function(a, fun(c,d), r).
try:
nl = cache.nester.parseString(args).asList()[0]
clean = [n for n in nl if not isinstance(n, list) and n != ","]
#We need to specially handle the case where they have started typing the
#name of the first parameter but haven't put a ',' in yet. In that case,
#we are still on the first argument.
if clean[-1][-1] != ',':
self._call_index = len(clean) - 1
else:
self._call_index = len(clean)
except:
msg.warn("ARG INDEX: lookup failed on bracket parsing.")
return self._call_index
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _setup_regex(self):
"""Sets up the constant regex strings etc. that can be used to parse the strings for determining context."""
|
self.RE_COMMENTS = cache.RE_COMMENTS
self.RE_MODULE = cache.RE_MODULE
self.RE_TYPE = cache.RE_TYPE
self.RE_EXEC = cache.RE_EXEC
self.RE_MEMBERS = cache.RE_MEMBERS
self.RE_DEPEND = cache.RE_DEPEND
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _contextualize(self):
"""Finds values for all the important attributes that determine the user's context."""
|
line, column = self.pos
#Get the module top-level information
self._get_module(line)
if self.module is None:
return
#Use the position of the cursor in the file to decide which
#element we are working on.
self.element = self.module.get_element(line, column)
#Now all that's left is to contextualize the line that the
#cursor is on.
self._deep_match(line, column)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_module(self, line):
"""Finds the name of the module and retrieves it from the parser cache."""
|
#Finding the module name is trivial; start at the beginning of
#the module and iterate lines until we find the module.
for sline in self._source:
if len(sline) > 0 and sline[0] != "!":
rmatch = self.RE_MODULE.match(sline)
if rmatch is not None:
self.modulename = rmatch.group("name")
break
else:
#We don't even have the start of a module in this code file
return
#Before we carry on with the rest of the context, find the separating
#CONTAINS keyword so we know whether to look for types or subs/funcs.
#If the code parser hasn't ever parsed this module, parse it now.
self.parser.isense_parse(self._orig_path, self.modulename)
self.module = self.parser.modules[self.modulename]
if line > self.module.contains_index:
self.section = "contains"
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _deep_match(self, line, column):
"""Checks the contents of executables, types and modules for member definitions and updates the context."""
|
#Now we just try each of the possibilities for the current line
if self._match_member(line, column):
self.el_section = "vars"
self.el_type = ValueElement
self.el_name = self.col_match.group("names")
return
if isinstance(self.element, Executable) or isinstance(self.element, Module):
#We are inside of a subroutine or function definition
#It is either params, vars or body. We already tested for variable
#declarations in the match_member test. Check now to see if we are
#on the line that defines the function
self._match_exec(line)
if self.col_match:
self.el_section = "params"
self.el_call = "assign"
return
#This regex incorrectly grabbing things like 'if' is functions because
#they do actually like like functions... We need to filter the list
#with special keywords before we claim victory. TODO
self.col_match = self.RE_DEPEND.match(self._source[line])
if self.col_match:
self.el_section = "body"
self.el_name = self.col_match.group("exec")
if self.col_match.group("sub") and "call" in self.col_match.group("sub"):
self.el_call = "sub"
self.el_type = Subroutine
else:
self.el_call = "fun"
self.el_type = Function
return
#If we are inside a type, we either got the member variable declaration
#already, or it is a pointer to a method inside of the module. Add the
#test for that later. TODO
# if isinstance(self.element, CustomType):
self.el_section = "body"
self.el_type = None
#We just need to figure out what kind of a call is being made
#at this column position, the only thing left is
if " = " in self._source[line]:
eqi = self._source[line].index("=")
if column < eqi:
self.el_call = "name"
else:
self.el_call = "assign"
self.el_name = self._source[line].split("=")[0].strip()
elif re.match("\s*call\s+", self._source[line]):
self.el_call = "sub"
self.el_name = self._source[line].split("call ")[1]
else:
#The only thing left (and the hardest to nail down) is
#the arithmetic catch all type.
self.el_call = "arith"
self.el_name = "[arith]"
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _match_exec(self, i):
"""Looks at line 'i' for a subroutine or function definition."""
|
self.col_match = self.RE_EXEC.match(self._source[i])
if self.col_match is not None:
if self.col_match.group("codetype") == "function":
self.el_type = Function
else:
self.el_type = Subroutine
self.el_name = self.col_match.group("name")
return True
else:
return False
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _match_member(self, i, column):
"""Looks at line 'i' to see if the line matches a module member def."""
|
self.col_match = self.RE_MEMBERS.match(self._source[i])
if self.col_match is not None:
if column < self._source[i].index(":"):
self.el_call = "name"
else:
self.el_call = "assign"
return True
else:
return False
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _match_type(self, i):
"""Looks at line 'i' to see if the line matches a module user type def."""
|
self.col_match = self.RE_TYPE.match(self._source[i])
if self.col_match is not None:
self.section = "types"
self.el_type = CustomType
self.el_name = self.col_match.group("name")
return True
else:
return False
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def upgradedb(options):
""" Add 'fake' data migrations for existing tables from legacy GeoNode versions """
|
version = options.get('version')
if version in ['1.1', '1.2']:
sh("python manage.py migrate maps 0001 --fake")
sh("python manage.py migrate avatar 0001 --fake")
elif version is None:
print "Please specify your GeoNode version"
else:
print "Upgrades from version %s are not yet supported." % version
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def package(options):
""" Creates a tarball to use for building the system elsewhere """
|
import pkg_resources
import tarfile
import geonode
version = geonode.get_version()
# Use GeoNode's version for the package name.
pkgname = 'GeoNode-%s-all' % version
# Create the output directory.
out_pkg = path(pkgname)
out_pkg_tar = path("%s.tar.gz" % pkgname)
# Create a distribution in zip format for the geonode python package.
dist_dir = path('dist')
dist_dir.rmtree()
sh('python setup.py sdist --formats=zip')
with pushd('package'):
#Delete old tar files in that directory
for f in glob.glob('GeoNode*.tar.gz'):
old_package = path(f)
if old_package != out_pkg_tar:
old_package.remove()
if out_pkg_tar.exists():
info('There is already a package for version %s' % version)
return
# Clean anything that is in the oupout package tree.
out_pkg.rmtree()
out_pkg.makedirs()
support_folder = path('support')
install_file = path('install.sh')
# And copy the default files from the package folder.
justcopy(support_folder, out_pkg / 'support')
justcopy(install_file, out_pkg)
geonode_dist = path('..') / 'dist' / 'GeoNode-%s.zip' % version
justcopy(geonode_dist, out_pkg)
rogue_dist = path('../..') / 'dist' / 'geoshape-0.1.zip'
justcopy(rogue_dist, out_pkg)
# Create a tar file with all files in the output package folder.
tar = tarfile.open(out_pkg_tar, "w:gz")
for file in out_pkg.walkfiles():
tar.add(file)
# Add the README with the license and important links to documentation.
tar.add('README', arcname=('%s/README.md' % out_pkg))
tar.close()
# Remove all the files in the temporary output package directory.
out_pkg.rmtree()
# Report the info about the new package.
info("%s created" % out_pkg_tar.abspath())
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def deb(options):
""" Creates debian packages. Example uses: paver deb paver deb -k 12345 paver deb -k 12345 -p geonode/testing """
|
key = options.get('key', None)
ppa = options.get('ppa', None)
version, simple_version = versions()
info('Creating package for GeoNode version %s' % version)
with pushd('package'):
# Get rid of any uncommitted changes to debian/changelog
info('Getting rid of any uncommitted changes in debian/changelog')
sh('git checkout debian/changelog')
# Workaround for git-dch bug
# http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=594580
path('.git').makedirs()
# Install requirements
#sh('sudo apt-get -y install debhelper devscripts git-buildpackage')
sh(('git-dch --spawn-editor=snapshot --git-author --new-version=%s'
' --id-length=6 --ignore-branch --release' % (
simple_version)))
deb_changelog = path('debian') / 'changelog'
for line in fileinput.input([deb_changelog], inplace = True):
print line.replace("urgency=low", "urgency=high"),
## Revert workaround for git-dhc bug
path('.git').rmtree()
if key is None and ppa is None:
# A local installable package
sh('debuild -uc -us -A')
elif key is None and ppa is not None:
# A sources package, signed by daemon
sh('debuild -S')
elif key is not None and ppa is None:
# A signed installable package
sh('debuild -k%s -A' % key)
elif key is not None and ppa is not None:
# A signed, source package
sh('debuild -k%s -S' % key)
if ppa is not None:
sh('dput ppa:%s geonode_%s_source.changes' % (ppa, simple_version))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_comet(self):
"""Parse `targetname` as if it were a comet. :return: (string or None, int or None, string or None); The designation, number and prefix, and name of the comet as derived from `self.targetname` are extracted into a tuple; each element that does not exist is set to `None`. Parenthesis in `self.targetname` will be ignored. :example: the following table shows the result of the parsing: |targetname |(desig, prefixnumber, name) | +================================+================================+ |1P/Halley |(None, '1P', 'Halley') | |3D/Biela |(None, '3D', 'Biela') | |9P/Tempel 1 |(None, '9P', 'Tempel 1') | |73P/Schwassmann Wachmann 3 C |(None, '73P', | | |'Schwassmann Wachmann 3 C') | |73P-C/Schwassmann Wachmann 3 C |(None, '73P-C', | | |'Schwassmann Wachmann 3 C') | |73P-BB |(None, '73P-BB', None) | |322P |(None, '322P', None) | |X/1106 C1 |('1166 C1', 'X', None) | |P/1994 N2 (McNaught-Hartley) |('1994 N2', 'P', | | |'McNaught-Hartley') | |P/2001 YX127 (LINEAR) |('2001 YX127', 'P', 'LINEAR') | |C/-146 P1 |('-146 P1', 'C', None) | |C/2001 A2-A (LINEAR) |('2001 A2-A', 'C', 'LINEAR') | |C/2013 US10 |('2013 US10', 'C', None) | |C/2015 V2 (Johnson) |('2015 V2', 'C', 'Johnson') | |C/2016 KA (Catalina) |('2016 KA', 'C', 'Catalina') | """
|
import re
pat = ('^(([1-9]+[PDCXAI](-[A-Z]{1,2})?)|[PDCXAI]/)' + # prefix [0,1,2]
'|([-]?[0-9]{3,4}[ _][A-Z]{1,2}([0-9]{1,3})?(-[1-9A-Z]{0,2})?)' +
# designation [3,4]
('|(([A-Z][a-z]?[A-Z]*[a-z]*[ -]?[A-Z]?[1-9]*[a-z]*)' +
'( [1-9A-Z]{1,2})*)') # name [5,6]
)
m = re.findall(pat, self.targetname.strip())
# print(m)
prefixnumber = None
desig = None
name = None
if len(m) > 0:
for el in m:
# prefix/number
if len(el[0]) > 0:
prefixnumber = el[0].replace('/', '')
# designation
if len(el[3]) > 0:
desig = el[3].replace('_', ' ')
# name
if len(el[5]) > 0:
if len(el[5]) > 1:
name = el[5]
return (desig, prefixnumber, name)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def isorbit_record(self):
"""`True` if `targetname` appears to be a comet orbit record number. NAIF record numbers are 6 digits, begin with a '9' and can change at any time. """
|
import re
test = re.match('^9[0-9]{5}$', self.targetname.strip()) is not None
return test
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def iscomet(self):
"""`True` if `targetname` appears to be a comet. """
|
# treat this object as comet if there is a prefix/number
if self.comet is not None:
return self.comet
elif self.asteroid is not None:
return not self.asteroid
else:
return (self.parse_comet()[0] is not None or
self.parse_comet()[1] is not None)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def isasteroid(self):
"""`True` if `targetname` appears to be an asteroid."""
|
if self.asteroid is not None:
return self.asteroid
elif self.comet is not None:
return not self.comet
else:
return any(self.parse_asteroid()) is not None
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_epochrange(self, start_epoch, stop_epoch, step_size):
"""Set a range of epochs, all times are UT :param start_epoch: str; start epoch of the format 'YYYY-MM-DD [HH-MM-SS]' :param stop_epoch: str; final epoch of the format 'YYYY-MM-DD [HH-MM-SS]' :param step_size: str; :return: None Note that dates are mandatory; if no time is given, midnight is assumed. """
|
self.start_epoch = start_epoch
self.stop_epoch = stop_epoch
self.step_size = step_size
return None
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_discreteepochs(self, discreteepochs):
"""Set a list of discrete epochs, epochs have to be given as Julian Dates :param discreteepochs: array_like list or 1D array of floats or strings :return: None """
|
if not isinstance(discreteepochs, (list, np.ndarray)):
discreteepochs = [discreteepochs]
self.discreteepochs = list(discreteepochs)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def prepend_urls(self):
""" Add the following array of urls to the resource base urls """
|
return [
url(r"^(?P<resource_name>%s)/view/(?P<name>[\w\d_.-]+)%s$" % (self._meta.resource_name, trailing_slash()),
self.wrap_view('view'), name="api_fileitem_view"),
url(r"^(?P<resource_name>%s)/download/(?P<name>[\w\d_.-]+)%s$" % (
self._meta.resource_name, trailing_slash()), self.wrap_view('download'), name="api_fileitem_download"),
url(r"^(?P<resource_name>%s)/(?P<pk>\w[\w/-]*)/download%s$" % (self._meta.resource_name, trailing_slash()),
self.wrap_view('download'), name="api_fileitem_download"),
url(r"^(?P<resource_name>%s)/(?P<name>[\w\d_.-]+)/download%s$"
% (self._meta.resource_name, trailing_slash()), self.wrap_view('download'),
name="api_fileitem_download"),
url(r"^(?P<resource_name>%s)/(?P<pk>\w[\w/-]*)/view%s$"
% (self._meta.resource_name, trailing_slash()), self.wrap_view('view'), name="api_fileitem_view"),
url(r"^(?P<resource_name>%s)/(?P<name>[\w\d_.-]+)/view%s$"
% (self._meta.resource_name, trailing_slash()), self.wrap_view('view'), name="api_fileitem_view"),
url(r"^(?P<resource_name>%s)/(?P<name>[\w\d_.-]+)/$"
% self._meta.resource_name, self.wrap_view('dispatch_detail'), name="api_dispatch_detail_name"),
url(r"^(?P<resource_name>%s)/(?P<id>[\d]+)/$"
% self._meta.resource_name, self.wrap_view('dispatch_detail'), name="api_dispatch_detail"),
]
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set(self, attr_dict):
"""Sets attributes of this user object. :type attr_dict: dict :param attr_dict: Parameters to set, with attribute keys. :rtype: :class:`.Base` :return: The current object. """
|
for key in attr_dict:
if key == self._id_attribute:
setattr(self, self._id_attribute, attr_dict[key])
else:
setattr(self, u"_" + key, attr_dict[key])
return self
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def security_warnings(request, PROXY_ALLOWED_HOSTS=()):
""" Detects insecure settings and reports them to the client-side context. """
|
warnings = []
PROXY_ALLOWED_HOSTS = PROXY_ALLOWED_HOSTS or getattr(settings, 'PROXY_ALLOWED_HOSTS', ())
if PROXY_ALLOWED_HOSTS and '*' in PROXY_ALLOWED_HOSTS:
warnings.append(dict(title=_('Insecure setting detected.'),
description=_('A wildcard is included in the PROXY_ALLOWED_HOSTS setting.')))
return dict(warnings=warnings)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def conv_units(val, meta):
""" Format and convert units to be more human readable @return new val with converted units """
|
if not val or not meta:
return val
try:
val = float(val)
except ValueError:
logging.error("unable to apply convert units for %s" % val)
return val
suf = 0
while val > 1024 and suf < 4:
val /= 1024
suf += 1
return "%.2f%s" % (val, UNITS_SUFFIX[suf])
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def align(self, arr):
""" Align columns, including column headers """
|
if arr is None:
return arr
c_hdrs = self._get_col_hdrs()
if self.show_col_hdr_in_cell:
for hdr in c_hdrs:
arr[hdr] = map(lambda col: ":".join([hdr, str(col)]), arr[hdr])
if self.show_col_hdrs:
widths = [max(len(str(col))
for col in arr[hdr].tolist() + [hdr]) for hdr in c_hdrs]
else:
widths = [max(len(str(col))
for col in arr[hdr].tolist()) for hdr in c_hdrs]
# align column headers
c_hdrs = map(lambda (c_hdr, width): c_hdr.ljust(width),
zip(c_hdrs, widths))
# align data
for n_row in range(len(arr)):
arr[n_row] = tuple(map(lambda (col, width): col.ljust(width),
zip(arr[n_row], widths)))
return arr, c_hdrs, widths
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def wrap(self, string, width):
""" Wrap lines according to width Place '\n' whenever necessary """
|
if not string or width <= 0:
logging.error("invalid string: %s or width: %s" % (string, width))
return False
tmp = ""
for line in string.splitlines():
if len(line) <= width:
tmp += line + "\n"
continue
cur = 0
length = len(line)
while cur + width < length:
cur = line[:cur+width].rfind(self.sep) + len(self.sep) - 1
line = line[:cur] + "\n" + line[cur+1:]
tmp += line + "\n\n"
return tmp
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_meta(self, row, col):
""" Get metadata for a particular cell """
|
if self.meta is None:
logging.error("unable to get meta: empty section")
return {}
if not row in self._get_row_hdrs() or\
not col in self._get_col_hdrs():
logging.error("unable to get meta: cell [%s,%s] does not exist"
% (row, col))
return {}
meta_str = self.meta[col][self.irt[row]]
try:
meta = ast.literal_eval(meta_str)
if isinstance(meta, dict):
return meta
except (SyntaxError, ValueError), e:
logging.error("unable to parse meta string - %s: %s"
% (meta_str, e))
return {}
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def in_function_call(self):
"""Return either completion information or a call signature for the function definition that we are on currently."""
|
#The last thing to do before we can form completions etc. is perform
#a real-time update of the in-memory versions of the modules.
if settings.real_time_update:
cache.rt.update(self._user_context)
return self._evaluator.in_function_call()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def goto_definitions(self):
""" Return the definition of a the symbol under the cursor via exact match. Goes to that definition with a buffer. """
|
element = self._evaluator.get_definition()
if element is not None:
return BaseDefinition(self._user_context, element)
else:
return None
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def order_module_dependencies(modules, parser):
"""Orders the specified list of modules based on their inter-dependencies."""
|
result = []
for modk in modules:
if modk not in result:
result.append(modk)
#We also need to look up the dependencies of each of these modules
recursed = list(result)
for i in range(len(result)):
module = result[i]
_process_module_order(parser, module, i, recursed)
return recursed
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _process_module_order(parser, module, i, result):
"""Adds the module and its dependencies to the result list."""
|
#Some code might decide to use the fortpy module methods for general
#development, ignore it since we know it will be present in the end.
if module == "fortpy" or module == "fpy_auxiliary":
return
#See if the parser has alread loaded this module.
if module not in parser.modules:
parser.load_dependency(module, True, True, False)
#It is possible that the parser couldn't find it, if so
#we can't get a self-consistent ordering.
if module in parser.modules:
modneeds = parser.modules[module].needs
for modn in modneeds:
if modn not in result:
#Since this module depends on the other, insert the other
#above it in the list.
result.insert(i, modn)
else:
x = result.index(modn)
if x > i:
#We need to move this module higher up in the food chain
#because it is needed sooner.
result.remove(modn)
result.insert(i, modn)
newi = result.index(modn)
_process_module_order(parser, modn, newi, result)
else:
raise ValueError("unable to find module {}.".format(module))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def list_dependencies(self, module, result):
"""Lists the names of all the modules that the specified module depends on."""
|
if result is None:
result = {}
#We will try at least once to load each module that we don't have
if module not in self.modules:
self.load_dependency(module, True, True, False)
if module in self.modules and module not in result:
result[module] = self.modules[module].filepath
for depend in self.modules[module].dependencies:
name = depend.split(".")[0].lower()
if name not in result:
self.list_dependencies(name, result)
return result
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _parse_dependencies(self, pmodules, dependencies, recursive, greedy):
"""Parses the dependencies of the modules in the list pmodules. :arg pmodules: a list of modules that were parsed from a *.f90 file. :arg dependencies: when true, the dependency's dependencies will be loaded. :arg recursive: specifies whether to continue loading dependencies to completion; i.e. up the chain until we have every module that any module needs to run. :arg greedy: when true, """
|
#See if we need to also load dependencies for the modules
if dependencies:
allkeys = [ module.name.lower() for module in pmodules ]
for key in allkeys:
for depend in self.modules[key].collection("dependencies"):
base = depend.split(".")[0]
if self.verbose and base.lower() not in self.modules:
msg.info("DEPENDENCY: {}".format(base), 2)
self.load_dependency(base, dependencies and recursive, recursive, greedy)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _parse_docstrings(self, filepath):
"""Looks for additional docstring specifications in the correctly named XML files in the same directory as the module."""
|
xmlpath = self.get_xmldoc_path(filepath)
if self.tramp.exists(xmlpath):
xmlstring = self.tramp.read(xmlpath)
self.modulep.docparser.parsexml(xmlstring, self.modules, xmlpath)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_xmldoc_path(self, filepath):
"""Returns the full path to a possible XML documentation file for the specified code filepath."""
|
segs = filepath.split(".")
segs.pop()
return ".".join(segs) + ".xml"
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_mod_mtime(self, filepath):
"""Gets the modified time of the file or its accompanying XML file, whichever is greater. """
|
file_mtime = self.tramp.getmtime(filepath)
xmlpath = self.get_xmldoc_path(filepath)
if self.tramp.exists(xmlpath):
xml_mtime = self.tramp.getmtime(xmlpath)
if xml_mtime > file_mtime:
file_mtime = xml_mtime
return file_mtime
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _check_parse_modtime(self, filepath, fname):
"""Checks whether the modules in the specified file path need to be reparsed because the file was changed since it was last loaded."""
|
#We also want to perform a reparse if the XML documentation file for the
#module changed, since the docs are also cached.
file_mtime = self._get_mod_mtime(filepath)
#If we have parsed this file and have its modules in memory, its
#filepath will be in self._parsed. Otherwise we can load it from
#file or from a cached pickle version.
if filepath.lower() in self._parsed:
#Get the name of the first module in that file from the modulefiles
#list. Find out when it was last modified.
module_mtime = None
if fname in self._modulefiles:
if len(self._modulefiles[fname]) == 0:
msg.warn("Module file {} has no modules defined inside of it!".format(fname))
return None
modulename = self._modulefiles[fname][0]
if modulename in self.modules:
#Make sure that if there are modules with the same name but different
#files, that we are working with the correct one.
if filepath.lower() != self.modules[modulename].filepath:
msg.warn("module {} parsed ".format(modulename) +
"from {}, not {}".format(self.modules[modulename].filepath,
filepath))
module_mtime = self.modules[modulename].change_time
if module_mtime is not None:
if module_mtime < file_mtime:
#The file has been modified since we reloaded the module.
#Return the two times we used for the comparison so the
#module file can be reloaded.
return [module_mtime, file_mtime]
else:
return None
else:
#The file has never been parsed by this CodeParser. We can
#either do a full parse or a pickle load.
return [file_mtime]
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def reparse(self, filepath):
"""Reparses the specified module file from disk, overwriting any cached representations etc. of the module."""
|
#The easiest way to do this is to touch the file and then call
#the regular parse method so that the cache becomes invalidated.
self.tramp.touch(filepath)
self.parse(filepath)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _add_current_codedir(self, path):
"""Adds the directory of the file at the specified path as a base path to find other files in. """
|
dirpath = self.tramp.dirname(path)
if dirpath not in self.basepaths:
self.basepaths.append(dirpath)
self.rescan()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def isense_parse(self, filepath, modulename):
"""Parses the specified file from either memory, cached disk or full disk depending on whether the fetch is via SSH or not and how long it has been since we last checked the modification time of the file. """
|
#We only want to check whether the file has been modified for reload
from datetime import datetime
if modulename not in self._last_isense_check:
self._last_isense_check[modulename] = datetime.utcnow()
self.parse(filepath, True)
else:
elapsed = (datetime.utcnow() - self._last_isense_check[modulename]).seconds
if elapsed > 60:
self.parse(filepath, True)
self._last_isense_check[modulename] = datetime.utcnow()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse(self, filepath, dependencies=False, recursive=False, greedy=False):
"""Parses the fortran code in the specified file. :arg dependencies: if true, all folder paths will be searched for modules that have been referenced but aren't loaded in the parser. :arg greedy: if true, when a module cannot be found using a file name of module_name.f90, all modules in all folders are searched."""
|
#If we have already parsed this file path, we should check to see if the
#module file has changed and needs to be reparsed.
abspath = self.tramp.abspath(filepath)
self._add_current_codedir(abspath)
fname = filepath.split("/")[-1].lower()
mtime_check = self._check_parse_modtime(abspath, fname)
if mtime_check is None:
return
#Keep track of parsing times if we are running in verbose mode.
if self.verbose:
start_time = clock()
msg.okay("WORKING on {0}".format(abspath), 2)
if fname not in self._modulefiles:
self._modulefiles[fname] = []
if fname not in self._programfiles:
self._programfiles[fname] = []
#Check if we can load the file from a pickle instead of doing a time
#consuming file system parse.
pickle_load = False
pprograms = []
if len(mtime_check) == 1 and settings.use_filesystem_cache:
#We use the pickler to load the file since a cached version might
#be good enough.
pmodules = self.serialize.load_module(abspath, mtime_check[0], self)
if pmodules is not None:
for module in pmodules:
self.modules[module.name.lower()] = module
self._modulefiles[fname].append(module.name.lower())
pickle_load = True
else:
#We have to do a full load from the file system.
pmodules, pprograms = self._parse_from_file(abspath, fname,
dependencies, recursive, greedy)
else:
#We have to do a full load from the file system.
pmodules, pprograms = self._parse_from_file(abspath, fname,
dependencies, recursive, greedy)
#Add the filename to the list of files that have been parsed.
self._parsed.append(abspath.lower())
if not pickle_load and len(pmodules) > 0 and settings.use_filesystem_cache:
self.serialize.save_module(abspath, pmodules)
if self.verbose:
msg.info("PARSED: {} modules and {} ".format(len(pmodules), len(pprograms)) +
"programs in {} in {}".format(fname, secondsToStr(clock() - start_time)), 2)
for module in pmodules:
msg.gen("\tMODULE {}".format(module.name), 2)
for program in pprograms:
msg.gen("\tPROGRAM {}".format(program.name), 2)
if len(pmodules) > 0 or len(pprograms) > 0:
msg.blank()
self._parse_dependencies(pmodules, dependencies, recursive, greedy)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def rescan(self):
"""Rescans the base paths to find new code files."""
|
self._pathfiles = {}
for path in self.basepaths:
self.scan_path(path)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_dependency(self, module_name, dependencies, recursive, greedy, ismapping = False):
"""Loads the module with the specified name if it isn't already loaded."""
|
key = module_name.lower()
if key not in self.modules:
if key == "fortpy":
#Manually specify the correct path to the fortpy.f90 that shipped with
#the distribution
from fortpy.utility import get_fortpy_templates_dir
from os import path
fpy_path = path.join(get_fortpy_templates_dir(), "fortpy.f90")
self.parse(fpy_path, False, False)
return
fkey = key + ".f90"
if fkey in self._pathfiles:
self.parse(self._pathfiles[fkey], dependencies, recursive)
elif greedy:
#The default naming doesn't match for this module
#we will load all modules until we find the right
#one
self._load_greedy(key, dependencies, recursive)
elif key in self.mappings and self.mappings[key] in self._pathfiles:
#See if they have a mapping specified to a code file for this module name.
if self.verbose:
msg.info("MAPPING: using {} as the file".format(self.mappings[key]) +
" name for module {}".format(key))
self.parse(self._pathfiles[self.mappings[key]], dependencies, recursive)
elif key not in ["mkl_vsl_type", "mkl_vsl", "iso_c_binding"]:
#The parsing can't continue without the necessary dependency modules.
msg.err(("could not find module {}. Enable greedy search or"
" add a module filename mapping.".format(key)))
if self.austere:
exit(1)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _load_greedy(self, module_name, dependencies, recursive):
"""Keeps loading modules in the filepaths dictionary until all have been loaded or the module is found."""
|
found = module_name in self.modules
allmodules = list(self._pathfiles.keys())
i = 0
while not found and i < len(allmodules):
current = allmodules[i]
if not current in self._modulefiles:
#We haven't tried to parse this file yet
self.parse(self._pathfiles[current], dependencies and recursive)
found = module_name in self.modules
i += 1
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def scan_path(self, path, result = None):
"""Determines which valid fortran files reside in the base path. :arg path: the path to the folder to list f90 files in. :arg result: an optional dictionary to add results to in addition to populating the private member dictionary of the parser. """
|
files = []
#Find all the files in the directory
for (dirpath, dirnames, filenames) in self.tramp.walk(path):
files.extend(filenames)
break
#Check if the .fpyignore file exists in the folder.
patterns = [".#*"]
if ".fpyignore" in files:
for line in self.tramp.read(os.path.join(path, ".fpyignore")).split('\n'):
sline = line.strip()
if len(sline) > 0 and sline[0] != '#':
patterns.append(sline)
#Filter them to find the fortran code files
from fnmatch import fnmatch
for fname in files:
if fnmatch(fname, "*.f90"):
if all([not fnmatch(fname, p) for p in patterns]):
self._pathfiles[fname.lower()] = os.path.join(path, fname)
if result is not None:
result[fname.lower()] = os.path.join(path, fname)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def tree_find(self, symbol, origin, attribute):
"""Finds the code element corresponding to specified symbol by searching all modules in the parser. :arg symbol: the name of the code element to find. :arg origin: an instance of a Module element who owns the text that is generate the find search. :arg attribute: one of ['dependencies', 'publics', 'members', 'types', 'executables', 'interfaces'] that specifies which collection in the module should house the symbol's element. """
|
#The symbol must be accessible to the origin module, otherwise
#it wouldn't compile. Start there, first looking at the origin
#itself and then the other modules that it depends on.
#Since we will be referring to this multiple times, might as
#well get a pointer to it.
oattr = origin.collection(attribute)
base = None
lorigin = None
if symbol in oattr:
base = oattr[symbol]
lorigin = origin
else:
for module in origin.dependencies:
usespec = module.split(".")
if len(usespec) > 1:
if usespec[1] == symbol:
#The dependency is to a specific element in the module,
#and it matches.
lorigin = self.get(usespec[0])
else:
lorigin = None
else:
#The dependency is to the entire module!
lorigin = self.get(usespec[0])
#If we have code for the origin, we can search for the
#actual base object that we are interested in
if lorigin is not None:
lattr = lorigin.collection(attribute)
if symbol in lattr:
base = lattr[symbol]
break
#By now, we either have the item we were after or we don't have
#code for the module it needs
return (base, lorigin)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_executable(self, fullname):
"""Gets the executable corresponding to the specified full name. :arg fullname: a string with modulename.executable. """
|
result = None
modname, exname = fullname.split(".")
module = self.get(modname)
if module is not None:
if exname in module.executables:
result = module.executables[exname]
return result
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_interface(self, fullname):
"""Gets the interface corresponding to the specified full name. :arg fullname: a string with modulename.interface. """
|
result = None
[modname, iname] = fullname.split(".")
module = self.get(modname)
if module is not None:
if iname in module.interfaces:
result = module.interfaces[iname]
return result
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get(self, name):
"""Gets the module with the given name if it exists in this code parser."""
|
if name not in self.modules:
self.load_dependency(name, False, False, False)
if name in self.modules:
return self.modules[name]
else:
return None
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_bucket(self, key, rate=None, capacity=None, **kwargs):
"""Fetch a Bucket for the given key. rate and capacity might be overridden from the Throttler defaults. Args: rate (float):
Units regenerated by second, or None to keep Throttler defaults capacity (int):
Maximum units available, or None to keep Throttler defaults """
|
return buckets.Bucket(
key=key,
rate=rate or self.rate,
capacity=capacity or self.capacity,
storate=self.storate,
**kwargs)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def consume(self, key, amount=1, rate=None, capacity=None, **kwargs):
"""Consume an amount for a given key. Non-default rate/capacity can be given to override Throttler defaults. Returns: bool: whether the units could be consumed """
|
bucket = self.get_bucket(key, rate, capacity, **kwargs)
return bucket.consume(amount)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def throttle(self, key, amount=1, rate=None, capacity=None, exc_class=Throttled, **kwargs):
"""Consume an amount for a given key, or raise a Throttled exception."""
|
if not self.consume(key, amount, rate, capacity, **kwargs):
raise exc_class("Request of %d unit for %s exceeds capacity."
% (amount, key))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def leak(self):
"""Leak the adequate amount of data from the bucket. This should be called before any consumption takes place. Returns: int: the new capacity of the bucket """
|
capacity, last_leak = self.storage.mget(self.key_amount, self.key_last_leak,
coherent=True)
now = time.time()
if last_leak:
elapsed = now - last_leak
decrement = elapsed * self.rate
new_capacity = max(int(capacity - decrement), 0)
else:
new_capacity = 0
self.storage.mset({
self.key_amount: new_capacity,
self.key_last_leak: now,
})
return new_capacity
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def consume(self, amount=1):
"""Consume one or more units from the bucket."""
|
# First, cleanup old stock
current = self.leak()
if current + amount > self.capacity:
return False
self._incr(amount)
return True
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def register(self, entry_point):
"""Register a converter :param string entry_point: converter to register (entry point syntax) :raise: ValueError if already registered """
|
if entry_point in self.registered_converters:
raise ValueError('Already registered')
self.registered_converters.insert(0, entry_point)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse():
"""Parses all the modules in the library specified by the script args. """
|
from fortpy.code import CodeParser
c = CodeParser()
if args["verbose"]:
c.verbose = True
f90files = {}
c.scan_path(args["source"], f90files)
for fname, fpath in f90files.items():
if fname not in c._modulefiles:
c._modulefiles[fname] = []
c._parse_from_file(fpath, fname, args["recursive"], args["recursive"], False)
return c
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _check_pointers(parser):
"""Checks the pointer best-practice conditions."""
|
from fortpy.stats.bp import check_pointers
check_pointers(parser, args["source"], args["filter"], args["recursive"])
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def setup_regex(self):
"""Regex definitions for parsing the code elements."""
|
self._RX_INTERFACE = (r"\n\s*interface\s+(?P<name>[a-z0-9_]+)(\s\((?P<symbol>[.\w+*=/-]+)\))?"
r"(?P<contents>.+?)"
r"end\s*interface\s+(?P=name)?")
self.RE_INTERFACE = re.compile(self._RX_INTERFACE, re.I | re.DOTALL)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update_docs(self, iface, module):
"""Updates the documentation for the specified interface using the module predocs."""
|
#We need to look in the parent module docstrings for this types decorating tags.
key = "{}.{}".format(module.name, iface.name)
if key in module.predocs:
iface.docstring = self.docparser.to_doc(module.predocs[key][0], iface.name)
iface.docstart, iface.docend = (module.predocs[key][1], module.predocs[key][2])
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def makefile(identifier, dependencies, makepath, compileid, precompile=False, inclfortpy=True, parser=None, executable=True, extralinks=None, inclfpyaux=False, makefpyaux=False, verbose=False):
"""Generates a makefile to create the unit testing executable for the specified test identifier. :arg identifier: the id of the test/library that this makefile should be made for. :arg dependencies: a list of the module names that need to be included in the compilation. :arg makepath: the path to the file to save the Makefile in. :arg compileid: the 'module.executable' that this Makefile is being produced for. :arg precompile: when True, the precompiler flags will be added to the makefile. :arg inclfortpy: when True, the fortpy module will be added first to the list of modules to compile for the executable/library. :arg parser: if the module file names are different from the module names, specify a code parser to use for converting one to the other. :arg executable: when true and executable is compiled for rule 'all', else the library is the default and the executable is set as a different rule for 'identifier'.x. :arg extralinks: a list of additional libraries to link in with the explicitly compiled f90 files. These aren't checked at all, just added to the linklist. :arg verbose: when True, the full compilation header will be printed with flags and module information; otherwise it won't. """
|
lines = []
#Append the general variables
lines.append("EXENAME\t\t= {}.x".format(identifier))
lines.append("SHELL\t\t= /bin/bash")
lines.append("UNAME\t\t= $(shell uname)")
lines.append("HOSTNAME\t= $(shell hostname)")
lines.append("LOG\t\t= compile.{}.log".format(identifier if identifier is not None else "default"))
lines.append("")
#Now the standard entries for ifort. We will just have the ifort include
#file so that the MPI and other options can be tested to.
lines.append(_make_compiler_include(precompile, extralinks))
lines.append(".SILENT:")
lines.append("")
#Append all the dependent modules to the makefile
lines.append("LIBMODULESF90\t= \\")
for modk in dependencies:
if modk not in ["fortpy", "fpy_auxiliary", identifier]:
if parser is not None:
lines.append("\t\t{} \\".format(_get_mapping(parser, modk)))
else:
lines.append("\t\t{} \\".format(modk))
if makefpyaux:
lines.append("\t\tfpy_auxiliary.f90 \\")
lines.append("")
lines.append("MAINF90\t\t= {}.f90".format(identifier))
lines.append("SRCF90\t\t= $(LIBMODULESF90) $(MAINF90)")
lines.append("OBJSF90\t\t= $(SRCF90:.f90=.o)")
lines.append("SLIBF90\t\t= $(LIBMODULESF90:.f90=.o)")
lines.append("")
#Add explicitly defined libraries that should be included when linking
#the unit testing executable.
linklibs = True
_add_explicit_includes(lines, dependencies, extralinks)
if inclfortpy or inclfpyaux:
import sys
if len(sys.modules["config"].includes) == 0:
lines.append("LIBS\t\t= \\")
if inclfortpy:
lines.append("\t\tfortpy.o \\")
if inclfpyaux:
lines.append("\t\tfpy_aux.so \\")
lines.append("")
#We need to add the error handling commands to make debugging compiling easier.
lines.append(_make_error())
lines.append("")
main = "$(EXENAME)" if executable == True else "{}.{}".format(identifier, executable)
lines.append("all: info {}".format(main))
lines.append(_make_info(compileid, verbose))
lines.append(_make_exe(linklibs, identifier, verbose))
from os import path
makedir, makef = path.split(makepath)
lines[-1] += " make -f '{}'".format(makef)
with open(makepath, 'w') as f:
f.writelines("\n".join(lines))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _add_explicit_includes(lines, dependencies=None, extralinks=None):
"""Adds any relevant libraries that need to be explicitly included according to the fortpy configuration file. Libraries are appended to the specified collection of lines. Returns true if relevant libraries were added. """
|
from fortpy import config
import sys
from os import path
includes = sys.modules["config"].includes
linklibs = False
if extralinks is not None and len(extralinks) > 0:
for i, link in enumerate(extralinks):
lines.append("LBD{0:d} = {1}".format(i, link))
lines.append("")
if len(includes) > 0:
lines.append("LIBS\t\t= \\")
for library in includes:
addlib = False
if "modules" in library:
#We need to loop over the modules specified for the library and see
#if any of them are in our list of modules.
for libmod in library["modules"]:
if dependencies is None or libmod.lower() in dependencies:
addlib = True
break
else:
addlib = True
if addlib:
linklibs = True
lines.append("\t\t{} \\".format(library["path"]))
#These links specify explicit libraries to include in the final compilation.
if extralinks is not None:
for i in range(len(extralinks)):
if path.isfile(extralinks[i]):
lines.append("\t\t$(LBD{0:d}) \\".format(i))
return linklibs or (extralinks is not None and len(extralinks) > 0)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_mapping(parser, mapped):
"""Gets the original file name for a module that was mapped when the module name does not coincide with the file name that the module was defined in."""
|
if mapped in parser.mappings:
return parser.mappings[mapped]
else:
return mapped + ".f90"
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def patch(self, overrides):
""" Patches the config with the given overrides. Example: If the current dictionary looks like this: a: 1, b: { c: 3, d: 4 } and `patch` is called with the following overrides: b: { d: 2, e: 4 }, c: 5 then, the following will be the resulting dictionary: a: 1, b: { c: 3, d: 2, e: 4 }, c: 5 """
|
overrides = overrides or {}
for key, value in iteritems(overrides):
current = self.get(key)
if isinstance(value, dict) and isinstance(current, dict):
current.patch(value)
else:
self[key] = value
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update_controller_info(self):
""" Pulls controller information. :returns: True if successfull, otherwise False. :rtype: boolean """
|
# Read the controller information.
self.controller_info = customer_details(self._user_token)
self.controller_status = status_schedule(self._user_token)
if self.controller_info is None or self.controller_status is None:
return False
# Only supports one controller right now.
# Use the first one from the array.
self.current_controller = self.controller_info['controllers'][0]
self.status = self.current_controller['status']
self.controller_id = self.current_controller['controller_id']
self.customer_id = self.controller_info['customer_id']
self.user_id = self.controller_status['user_id']
self.num_relays = len(self.controller_status['relays'])
self.relays = self.controller_status['relays']
self.name = self.controller_status['name']
self.watering_time = self.controller_status['watering_time']
self.sensors = self.controller_status['sensors']
try:
self.running = self.controller_status['running']
except KeyError:
self.running = None
return True
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def controller(self):
""" Check if multiple controllers are connected. :returns: Return the controller_id of the active controller. :rtype: string """
|
if hasattr(self, 'controller_id'):
if len(self.controller_info['controllers']) > 1:
raise TypeError(
'Only one controller per account is supported.'
)
return self.controller_id
raise AttributeError('No controllers assigned to this account.')
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def relay_info(self, relay, attribute=None):
""" Return information about a relay. :param relay: The relay being queried. :type relay: int :param attribute: The attribute being queried, or all attributes for that relay if None is specified. :type attribute: string or None :returns: The attribute being queried or None if not found. :rtype: string or int """
|
# Check if the relay number is valid.
if (relay < 0) or (relay > (self.num_relays - 1)):
# Invalid relay index specified.
return None
else:
if attribute is None:
# Return all the relay attributes.
return self.relays[relay]
else:
try:
return self.relays[relay][attribute]
except KeyError:
# Invalid key specified.
return None
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def suspend_zone(self, days, zone=None):
""" Suspend or unsuspend a zone or all zones for an amount of time. :param days: Number of days to suspend the zone(s) :type days: int :param zone: The zone to suspend. If no zone is specified then suspend all zones :type zone: int or None :returns: The response from set_zones() or None if there was an error. :rtype: None or string """
|
if zone is None:
zone_cmd = 'suspendall'
relay_id = None
else:
if zone < 0 or zone > (len(self.relays) - 1):
return None
else:
zone_cmd = 'suspend'
relay_id = self.relays[zone]['relay_id']
# If days is 0 then remove suspension
if days <= 0:
time_cmd = 0
else:
# 1 day = 60 * 60 * 24 seconds = 86400
time_cmd = time.mktime(time.localtime()) + (days * 86400)
return set_zones(self._user_token, zone_cmd, relay_id, time_cmd)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def run_zone(self, minutes, zone=None):
""" Run or stop a zone or all zones for an amount of time. :param minutes: The number of minutes to run. :type minutes: int :param zone: The zone number to run. If no zone is specified then run all zones. :type zone: int or None :returns: The response from set_zones() or None if there was an error. :rtype: None or string """
|
if zone is None:
zone_cmd = 'runall'
relay_id = None
else:
if zone < 0 or zone > (len(self.relays) - 1):
return None
else:
zone_cmd = 'run'
relay_id = self.relays[zone]['relay_id']
if minutes <= 0:
time_cmd = 0
if zone is None:
zone_cmd = 'stopall'
else:
zone_cmd = 'stop'
else:
time_cmd = minutes * 60
return set_zones(self._user_token, zone_cmd, relay_id, time_cmd)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def list_running_zones(self):
""" Returns the currently active relay. :returns: Returns the running relay number or None if no relays are active. :rtype: string """
|
self.update_controller_info()
if self.running is None or not self.running:
return None
return int(self.running[0]['relay'])
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_zone_running(self, zone):
""" Returns the state of the specified zone. :param zone: The zone to check. :type zone: int :returns: Returns True if the zone is currently running, otherwise returns False if the zone is not running. :rtype: boolean """
|
self.update_controller_info()
if self.running is None or not self.running:
return False
if int(self.running[0]['relay']) == zone:
return True
return False
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def time_remaining(self, zone):
""" Returns the amount of watering time left in seconds. :param zone: The zone to check. :type zone: int :returns: If the zone is not running returns 0. If the zone doesn't exist returns None. Otherwise returns number of seconds left in the watering cycle. :rtype: None or seconds left in the waterting cycle. """
|
self.update_controller_info()
if zone < 0 or zone > (self.num_relays-1):
return None
if self.is_zone_running(zone):
return int(self.running[0]['time_left'])
return 0
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cache(self):
"""Memoize access to the cache backend."""
|
if self._cache is None:
self._cache = django_cache.get_cache(self.cache_name)
return self._cache
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def element(self):
"""Returns the instance of the element who owns the first line number for the operation in the cached source code."""
|
#We assume here that the entire operation is associated with a single
#code element. Since the sequence matcher groups operations by contiguous
#lines of code to change, this is a safe assumption.
if self._element is None:
line = self.icached[0]
#If we are inserting a new line, the location at the start of the line
#that used to be there interferes with the element finder.
if self.mode == "insert":
line -= 1
self._element = self.context.module.get_element(line, 0)
return self._element
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def docelement(self):
"""Returns the instance of the element whose body owns the docstring in the current operation. """
|
#This is needed since the decorating documentation
#for types and executables is in the body of the module, but when they
#get edited, the edit belongs to the type/executable because the character
#falls within the absstart and end attributes.
if self._docelement is None:
if isinstance(self.element, Module):
self._docelement = self.element
else:
ichar = self.element.module.charindex(self.icached[0], 1)
if (ichar > self.element.docstart and ichar <= self.element.docend):
self._docelement = self.element.parent
else:
self._docelement = self.element
return self._docelement
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def handle(self):
"""Handles the real time update of some code from the cached representation of the module. """
|
#If we have more statements in the buffer than the cached, it doesn't matter,
#we just run the first few replacements of the cache concurrently and do
#what's left over from the buffer.
#REVIEW
if self.mode == "insert": #then self.icached[0] == self.icached[1]:
#We are inserting the lines from the buffer into the cached version
for ib in range(len(self.buffered)):
self.state = (ib, None)
self.parser.parse(self)
self._update_extent()
elif self.mode == "delete": #then self.ibuffer[0] == self.ibuffer[1]:
#We are deleting lines from the cached version
for ic in range(len(self.cached)):
self.state = (None, ic)
self.parser.parse(self)
self._update_extent()
else: # mode == 'replace'
#Need lines from both the buffer and the cached version
#First we run all the statements in cached as deletions
for ic in range(len(self.cached)):
self.state = (None, ic)
self.parser.parse(self, "delete")
self._update_extent()
#Then run all the buffer statements as insertions.
for ib in range(len(self.buffered)):
self.state = (ib, None)
self.parser.parse(self, "insert")
self._update_extent()
self._handle_docstrings()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _handle_docstrings(self):
"""Searches through the lines affected by this operation to find blocks of adjacent docstrings to parse for the current element. """
|
#Docstrings have to be continuous sets of lines that start with !!
#When they change in any way (i.e. any of the three modes), we
#have to reparse the entire block because it has XML dependencies
#Because of that, the cached version of the docstrings is actually
#pointless and we only need to focus on the buffered.
blocks = self._docstring_getblocks()
if len(blocks) == 0:
return
xmldict = self._docstring_parse(blocks)
delta = 0
if isinstance(self.docelement, Module):
delta += self.docparser.rt_update_module(xmldict, self.docelement)
else:
#We just need to handle the type and executable internal defs.
if self.docelement.name in xmldict:
docs = self.docparser.to_doc(xmldict[self.docelement.name][0],
self.docelement.name)
self.docparser.process_memberdocs(docs, self.docelement, False)
#Also update the docstrings for any embedded types or executables.
if isinstance(self.docelement, Executable):
delta += self.docparser.process_embedded(xmldict,
self.docelement, False)
#Finally, we need to handle the overall character length change
#that this update caused to the element first and then for the
#operation as a whole for updating the module and its children.
buffertot = sum([len(self.context.bufferstr[i]) for i in self._doclines])
cachedtot = 0
for i in range(self.icached[0],self.icached[1]):
if self.docparser.RE_DOCS.match(self.context.cachedstr[i]):
cachedtot += len(self.context.cachedstr[i])
self.length = buffertot - cachedtot
if delta == 0:
#The update must have been to members variables of the module or the
#executables/types. The element who owns the members is going to get
#missed when the module updates its children.
self.docelement.end += self.length
else:
#All the individual elements have been updated already, so just
#set the length change for this operation.
self.docdelta = delta
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _docstring_parse(self, blocks):
"""Parses the XML from the specified blocks of docstrings."""
|
result = {}
for block, docline, doclength, key in blocks:
doctext = "<doc>{}</doc>".format(" ".join(block))
try:
docs = ET.XML(doctext)
docstart = self.parser.charindex(docline, 0, self.context)
if not key in result:
result[key] = [list(docs), docstart, docstart + doclength]
else:
#If there are docblocks separated by whitespace in the
#same element we can't easily keep track of the start and
#end character indices anymore.
result[key][0].extend(list(docs))
except ET.ParseError:
msg.warn(doctext)
return result
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _docstring_getblocks(self):
"""Gets the longest continuous block of docstrings from the buffer code string if any of those lines are docstring lines. """
|
#If there are no lines to look at, we have nothing to do here.
if self.ibuffer[0] == self.ibuffer[1]:
return []
lines = self.context.bufferstr[self.ibuffer[0]:self.ibuffer[1]]
docblock = []
result = []
self._doclines = []
#We need to keep track of the line number for the start of the
#documentation strings.
docline = 0
doclength = 0
first = self.docparser.RE_DOCS.match(lines[0])
if first is not None:
docblock.append(first.group("docstring"))
docline = self.ibuffer[0]
self._doclines.append(docline)
doclength += len(lines[0]) + 1 # + 1 for \n removed by split.
#We need to search backwards in the main buffer string for
#additional tags to add to the block
i = self.ibuffer[0] - 1
while i > 0:
current = self.context.bufferstr[i]
docmatch = self.docparser.RE_DOCS.match(current)
if docmatch is not None:
docblock.append(docmatch.group("docstring"))
docline = i
doclength += len(current) + 1
else:
break
i -= 1
#Reverse the docblock list since we were going backwards and appending.
if len(docblock) > 0:
docblock.reverse()
#Now handle the lines following the first line. Also handle the
#possibility of multiple, separate blocks that are still valid XML.
#We have to keep going until we have exceed the operational changes
#or found the decorating element.
i = self.ibuffer[0] + 1
while (i < len(self.context.bufferstr) and
(i < self.ibuffer[1] or len(docblock) > 0)):
line = self.context.bufferstr[i]
docmatch = self.docparser.RE_DOCS.match(line)
if docmatch is not None:
docblock.append(docmatch.group("docstring"))
doclength += len(line)
if docline == 0:
docline = i
#Only track actual documentation lines that are within the
#operations list of lines.
if i < self.ibuffer[1]:
self._doclines.append(i)
elif len(docblock) > 0:
key = self._docstring_key(line)
result.append((docblock, docline, doclength, key))
docblock = []
docline = 0
doclength = 0
#We need to exit the loop if we have exceeded the length of
#the operational changes
if len(docblock) == 0 and i > self.ibuffer[1]:
break
i += 1
return result
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _docstring_key(self, line):
"""Returns the key to use for the docblock immediately preceding the specified line."""
|
decormatch = self.docparser.RE_DECOR.match(line)
if decormatch is not None:
key = "{}.{}".format(self.docelement.name, decormatch.group("name"))
else:
key = self.element.name
return key
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _update_extent(self):
"""Updates the extent of the element being altered by this operation to include the code that has changed."""
|
#For new instances, their length is being updated by the module
#updater and will include *all* statements, so we don't want to
#keep changing the endpoints.
if self.bar_extent:
return
original = self.element.end
if self.mode == "insert":
#The end value needs to increase by the length of the current
#statement being executed.
self.element.end += self.curlength
elif self.mode == "delete":
#Reduce end by statement length
self.element.end -= self.curlength
elif self.mode == "replace":
#Check whether we are currently doing the delete portion of the
#replacement or the insert portion.
if self.state[0] is None:
self.element.end -= self.curlength
else:
self.element.end += self.curlength
#Keep track of the total effect of all statements in this operation
#so that it is easy to update the module once they are all done.
self.length += self.element.end - original
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_buffered(self):
"""Gets a list of the statements that are new for the real time update."""
|
lines = self.context.bufferstr[self.ibuffer[0]:self.ibuffer[1]]
return self._get_statements(lines, self.ibuffer[0])
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_cached(self):
"""Gets a list of statements that the operation will affect during the real time update."""
|
lines = self.context.cachedstr[self.icached[0]:self.icached[1]]
return self._get_statements(lines, self.icached[0])
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update(self, context):
"""Updates all references in the cached representation of the module with their latest code from the specified source code string that was extracted from the emacs buffer. :arg context: the buffer context information. """
|
#Get a list of all the operations that need to be performed and then
#execute them.
self._operations = self._get_operations(context)
for i in range(len(self._operations)):
self._operations[i].handle()
self.update_extent(self._operations[i])
#Last of all, we update the string content of the cached version of
#the module to have the latest source code.
if len(self._operations) > 0:
context.module.update_refstring(context.refstring)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_operations(self, context):
"""Returns a list of operations that need to be performed to turn the cached source code into the one in the buffer."""
|
#Most of the time, the real-time update is going to fire with
#incomplete statements that don't result in any changes being made
#to the module instances. The SequenceMatches caches hashes for the
#second argument. Logically, we want to turn the cached version into
#the buffer version; however, the buffer is the string that keeps
#changing.
#in order to optimize the real-time update, we *switch* the two strings
#when we set the sequence and then fix the references on the operations
#after the fact.
if context.module.changed or self.unset:
self.matcher.set_seq2(context.cachedstr)
self.unset = False
#Set the changed flag back to false now that the sequencer has
#reloaded it.
context.module.changed = False
self.matcher.set_seq1(context.bufferstr)
opcodes = self.matcher.get_opcodes()
result = []
#Index i keeps track of how many operations were actually added because
#they constituted a change we need to take care of.
i = 0
for code in opcodes:
if code[0] != "equal":
#Replacements don't have a mode change. All of the operations
#switch the order of the line indices for the two strings.
if code[0] == "insert":
newcode = ("delete", code[3], code[4], code[1], code[2])
elif code[0] == "delete":
newcode = ("insert", code[3], code[4], code[1], code[2])
else:
newcode = ("replace", code[3], code[4], code[1], code[2])
op = Operation(context, self.parser, newcode, i)
result.append(op)
i += 1
return result
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update_instance_extent(self, instance, module, operation):
"""Updates a new instance that was added to a module to be complete if the end token is present in any remaining, overlapping operations. """
|
#Essentially, we want to look in the rest of the statements that are
#part of the current operation to see how many more of them pertain
#to the new instance that was added.
#New signatures only result in instances being added if mode is "insert"
#or "replace". In both cases, the important code is in the buffered
#statements, *not* the cached version. Iterate the remaining statements
#in the buffer and look for the end_token for the instance. If we don't
#find it, check for overlap between the operations' index specifiers.
instance.end -= operation.curlength
end_token = instance.end_token
(ibuffer, length) = self._find_end_token(end_token, operation)
cum_length = length
opstack = [operation]
while ibuffer is None and opstack[-1].index + 1 < len(self._operations):
#We didn't find a natural termination to the new instance. Look for
#overlap in the operations
noperation = self._operations[opstack[-1].index + 1]
#We only want to check the next operation if it is a neighbor
#in line numbers in the buffer.
if noperation.ibuffer[0] - opstack[-1].ibuffer[1] == 1:
(ibuffer, length) = self._find_end_token(end_token, noperation)
cum_length += length
opstack.append(noperation)
else:
break
if ibuffer is not None:
instance.incomplete = False
instance.end += cum_length
for op in opstack:
op.bar_extent = True
op.set_element(instance)
else:
#We set the element for the current operation to be the new instance
#for the rest of statements in its set.
operation.set_element(instance)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _find_end_token(self, end_token, operation):
"""Looks for a statement in the operation's list that matches the specified end token. Returns the index of the statement in the operation that matches. """
|
ibuffer, icache = operation.state
length = operation.buffered[ibuffer][2]
result = None
for i in range(len(operation.buffered) - ibuffer - 1):
linenum, statement, charlength = operation.buffered[i + ibuffer + 1]
length += charlength
if end_token in statement.lower():
#We already have the absolute char index for the start of the
#instance; we just need to update the end.
result = (i + ibuffer + 1, length)
break
#If we didn't find a terminating statement, the full length of the operation
#is a good estimate for the extent of the instance.
if result is None:
result = (None, length)
return result
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def mget(self, *keys, **kwargs):
"""Retrieve values for a set of keys. Args: keys (str list):
the list of keys whose value should be retrieved Keyword arguements: default (object):
the value to use for non-existent keys coherent (bool):
whether all fetched values should be "coherent", i.e no other update was performed on any of those values while fetching from the database. Yields: object: values for the keys, in the order they were passed """
|
default = kwargs.get('default')
coherent = kwargs.get('coherent', False)
for key in keys:
yield self.get(key, default=default)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def mset(self, values):
"""Set the value of several keys at once. Args: values (dict):
maps a key to its value. """
|
for key, value in values.items():
self.set(key, value)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def names(self):
"""Returns a list of possible completions for the symbol under the cursor in the current user context."""
|
#This is where the context information is extremely useful in
#limiting the extent of the search space we need to examine.
if self._names is None:
attribute = self.get_attribute()
if self.context.module is not None:
symbol = self.context.symbol
fullsymbol = self.context.full_symbol
self._names = self._complete_el(symbol, attribute, fullsymbol)
else:
self._names = []
return self._names
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bracket_complete(self):
"""Returns a function call signature for completion whenever a bracket '(' is pressed."""
|
#The important thing to keep track of here is that '(' can be
#pushed in the following places:
# - in a subroutine/function definition
# - when calling a function or subroutine, this includes calls within
# the argument list of a function, e.g. function(a, fnb(c,d), e)
# - when specifying the dimensions of an array for element selection
# - inside of if/do/while blocks.
#If there is a \s immediately preceding the bracket, then short_symbol
#will be null string and we are most likely doing arithmetic of some
#sort or a block statement.
if self.context.short_symbol == "":
return {}
line = self.context.current_line.lower()[:self.context.pos[1]-1]
if "subroutine" in line or "function" in line:
#We are in the definition of the subroutine or function. They
#are choosing the parameter names so we don't offer any suggestions.
return {}
#Sometimes people do if() with the condition immediately after
#the keyword, this regex will catch that.
symbol = self.context.short_symbol.lower()
if symbol in ["if", "do", "while", "elseif", "case", "associate"]:
return {}
#All that should be left now are dimensions and legitimate function
#calls.
fullsymbol = self.context.short_full_symbol.lower()
return self._bracket_complete_sig(symbol, fullsymbol)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _bracket_complete_sig(self, symbol, fullsymbol):
"""Returns the call signature and docstring for the executable immediately preceding a bracket '(' that was typed."""
|
if symbol != fullsymbol:
#We have a sym%sym%... chain and the completion just needs to
#be the signature of the member method.
target, targmod = self._get_chain_parent_symbol(symbol, fullsymbol)
if symbol in target.executables:
child = target.executables[symbol]
return self._compile_signature(child.target, child.name)
elif symbol in target.members:
#We are dealing with a dimension request on an array that
#is a member of the type.
child = target.members[symbol]
return self._bracket_dim_suggest(child)
else:
return {}
else:
#We must be dealing with a regular executable or builtin fxn
#or a regular variable dimension.
iexec = self._bracket_exact_exec(symbol)
if iexec is not None:
#It is indeed a function we are completing for.
return self._compile_signature(iexec, iexec.name)
else:
#We need to look at local and global variables to find the
#variable declaration and dimensionality.
ivar = self._bracket_exact_var(symbol)
return self._bracket_dim_suggest(ivar)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _bracket_dim_suggest(self, variable):
"""Returns a dictionary of documentation for helping complete the dimensions of a variable."""
|
if variable is not None:
#Look for <dimension> descriptors that are children of the variable
#in its docstrings.
dims = variable.doc_children("dimension", ["member", "parameter", "local"])
descript = str(variable)
if len(dims) > 0:
descript += " | " + " ".join([DocElement.format_dimension(d) for d in dims])
return dict(
params=[variable.dimension],
index=0,
call_name=variable.name,
description=descript,
)
else:
return []
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.