text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Appends the Fortran code to write the wrapper executable to the specified 'lines' list.
<END_TASK>
<USER_TASK:>
Description:
def _write_executable_f90(self, execname, lines):
"""Appends the Fortran code to write the wrapper executable to the specified 'lines' list.
:arg execname: the name of the executable in the self.module.executables to write.
""" |
executable = self.module.executables[execname]
#The 14 in the formatting indent comes from 4 spaces, 2 from "_c", 1 from the spacing
#between 'subroutine' and name of the executable, 10 from subroutine, 1 from the ("
cparams = present_params(_ctypes_ex_parameters(executable), len(execname) + 14)
lines.append(" subroutine {}_c({}) BIND(C)".format(execname, cparams))
#Next, we add the variables declarations, the call to the original executable and then
#the handling of the output variables.
lines.append(" " + "\n ".join(_ctypes_ex_variables(executable)))
lines.append(" " + "\n ".join(_ctypes_ex_compatvars(executable)))
#Add assignment/allocate statements for the intent(in*) paramaters *local* variable
#declarations so that we can match the signature exactly.
lines.extend(_ctypes_ex_assign(executable))
if type(executable).__name__ == "Subroutine":
prefix = "call "
else:
prefix = "{}_f = ".format(execname)
spacing = len(list(prefix)) + len(list(execname)) + 4
lines.append(" {}{}({})".format(prefix, execname,
present_params(executable.paramorder, spacing, 90)))
lines.append("")
lines.extend(_ctypes_ex_clean(executable))
lines.append(" end subroutine {}_c\n".format(execname)) |
<SYSTEM_TASK:>
Adds the module and its dependencies to the result list in dependency order.
<END_TASK>
<USER_TASK:>
Description:
def _process_module_needs(self, modules):
"""Adds the module and its dependencies to the result list in dependency order.""" |
result = list(modules)
for i, module in enumerate(modules):
#It is possible that the parser couldn't find it, if so
#we can't create the executable!
if module in self.module.parent.modules:
modneeds = self.module.parent.modules[module].needs
for modn in modneeds:
if modn not in result:
#Since this module depends on the other, insert the other
#above it in the list.
result.insert(i, modn)
else:
x = result.index(modn)
if x > i:
#We need to move this module higher up in the food chain
#because it is needed sooner.
result.remove(modn)
result.insert(i, modn)
newi = result.index(modn)
else:
raise ValueError("Unable to find module {}.".format(module))
return result |
<SYSTEM_TASK:>
Returns the absolute path of the 'relpath' relative to the specified code directory.
<END_TASK>
<USER_TASK:>
Description:
def coderelpath(coderoot, relpath):
"""Returns the absolute path of the 'relpath' relative to the specified code directory.""" |
from os import chdir, getcwd, path
cd = getcwd()
chdir(coderoot)
result = path.abspath(relpath)
chdir(cd)
return result |
<SYSTEM_TASK:>
Initializes the connection to the server via SSH.
<END_TASK>
<USER_TASK:>
Description:
def _setup_ssh(self):
"""Initializes the connection to the server via SSH.""" |
global paramiko
if paramiko is none:
import paramiko
self.ssh = paramiko.SSHClient()
self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.ssh.connect(self.server, username=self.user, pkey=self.pkey) |
<SYSTEM_TASK:>
Returns the absolute path to the specified relative or user-relative
<END_TASK>
<USER_TASK:>
Description:
def abspath(self, path):
"""Returns the absolute path to the specified relative or user-relative
path. For ssh paths, just return the full ssh path.""" |
if self.is_ssh(path):
return path
else:
return os.path.abspath(path) |
<SYSTEM_TASK:>
Returns the full path to the parent directory of the specified
<END_TASK>
<USER_TASK:>
Description:
def dirname(self, path):
"""Returns the full path to the parent directory of the specified
file path.""" |
if self.is_ssh(path):
remotepath = self._get_remote(path)
remotedir = os.path.dirname(remotepath)
return self._get_tramp_path(remotedir)
else:
return os.path.dirname(path) |
<SYSTEM_TASK:>
Touches the specified file so that its modified time changes.
<END_TASK>
<USER_TASK:>
Description:
def touch(self, filepath):
"""Touches the specified file so that its modified time changes.""" |
if self.is_ssh(filepath):
self._check_ssh()
remotepath = self._get_remote(filepath)
stdin, stdout, stderr = self.ssh.exec_command("touch {}".format(remotepath))
stdin.close()
else:
os.system("touch {}".format(filepath)) |
<SYSTEM_TASK:>
Replaces the user root ~ with the full path on the file system.
<END_TASK>
<USER_TASK:>
Description:
def expanduser(self, filepath, ssh=False):
"""Replaces the user root ~ with the full path on the file system.
Works for local disks and remote servers. For remote servers, set
ssh=True.""" |
if ssh:
self._check_ssh()
stdin, stdout, stderr = self.ssh.exec_command("cd; pwd")
stdin.close()
remotepath = filepath.replace("~", stdout.read().split()[0])
return self._get_tramp_path(remotepath)
else:
return os.path.expanduser(filepath) |
<SYSTEM_TASK:>
Gets the last time that the file was modified.
<END_TASK>
<USER_TASK:>
Description:
def getmtime(self, filepath):
"""Gets the last time that the file was modified.""" |
if self.is_ssh(filepath):
self._check_ftp()
source = self._get_remote(filepath)
mtime = self.ftp.stat(source).st_mtime
else:
mtime = os.path.getmtime(filepath)
return mtime |
<SYSTEM_TASK:>
Returns the entire file as a string even if it is on a remote
<END_TASK>
<USER_TASK:>
Description:
def read(self, filepath):
"""Returns the entire file as a string even if it is on a remote
server.""" |
target = self._read_check(filepath)
if os.path.isfile(target):
with open(target) as f:
string = f.read()
#If we got this file via SSH, delete it from the temp folder
if self.is_ssh(filepath):
os.remove(target)
else:
string = ""
return string |
<SYSTEM_TASK:>
Performs an os.walk on a local or SSH filepath.
<END_TASK>
<USER_TASK:>
Description:
def walk(self, dirpath):
"""Performs an os.walk on a local or SSH filepath.""" |
if self.is_ssh(dirpath):
self._check_ftp()
remotepath = self._get_remote(dirpath)
return self._sftp_walk(remotepath)
else:
return os.walk(dirpath) |
<SYSTEM_TASK:>
Performs the same function as os.walk but over the SSH channel.
<END_TASK>
<USER_TASK:>
Description:
def _sftp_walk(self, remotepath):
"""Performs the same function as os.walk but over the SSH channel.""" |
#Get all the files and folders in the current directory over SFTP.
path = remotepath # We need this instance of path for the yield to work.
files=[]
folders=[]
for f in self.ftp.listdir_attr(remotepath):
if S_ISDIR(f.st_mode):
folders.append(self._get_tramp_path(f.filename))
else:
files.append(f.filename)
#We use yield so that if there are really large folders with
#complicated structures, we don't wait forever while the SFTP
#keeps traversing down the tree.
yield path, folders, files
#Now call this method recursively for each of sub-directories.
for folder in folders:
new_path = os.path.join(remotepath, folder)
for x in self._sftp_walk(new_path):
yield x |
<SYSTEM_TASK:>
Returns an md5 hash for the specified file path.
<END_TASK>
<USER_TASK:>
Description:
def _get_hashed_path(self, path):
"""Returns an md5 hash for the specified file path.""" |
return self._get_path('%s.pkl' % hashlib.md5(path.encode("utf-8")).hexdigest()) |
<SYSTEM_TASK:>
Returns the private key for quick authentication on the SSH server.
<END_TASK>
<USER_TASK:>
Description:
def pkey(self):
"""Returns the private key for quick authentication on the SSH server.""" |
if self._pkey is None:
self._pkey = self._get_pkey()
return self._pkey |
<SYSTEM_TASK:>
Runs the main command or sub command based on user input
<END_TASK>
<USER_TASK:>
Description:
def run(self, args=None):
"""
Runs the main command or sub command based on user input
""" |
if not args:
args = self.parse(sys.argv[1:])
if getattr(args, 'verbose', False):
self.logger.setLevel(logging.DEBUG)
try:
if hasattr(args, 'run'):
args.run(self, args)
else:
self.__main__(args) # pylint: disable-msg=E1101
except Exception as e: # pylint: disable-msg=W0703
import traceback
self.logger.debug(traceback.format_exc())
self.logger.error(str(e))
if self.raise_exceptions:
raise
sys.exit(2) |
<SYSTEM_TASK:>
Returns the number of values recorded on this single line. If the
<END_TASK>
<USER_TASK:>
Description:
def nvalues(self):
"""Returns the number of values recorded on this single line. If the
number is variable, it returns -1.""" |
if self._nvalues is None:
self._nvalues = 0
for val in self.values:
if type(val) == type(int):
self._nvalues += val
else:
self._nvalues = -1
break
return self._nvalues |
<SYSTEM_TASK:>
Returns the lines that this template line should add to the input file.
<END_TASK>
<USER_TASK:>
Description:
def write(self, valuedict):
"""Returns the lines that this template line should add to the input file.""" |
if self.identifier in valuedict:
value = valuedict[self.identifier]
elif self.default is not None:
value = self.default
elif self.fromtag is not None and self.fromtag in valuedict:
if self.operator == "count":
value = len(valuedict[self.fromtag])
else:
msg.err("referenced 'from' attribute/operator {} not in xml dictionary.".format(self.fromtag))
exit(1)
else:
msg.err("a required line {} had no value or default specified.".format(self.identifier))
exit(1)
#Before we generate the result, validate the choices if they exist
if len(self.choices) > 0:
for single in value:
if str(single) not in self.choices:
msg.warn("failed choices validation for {} in {} (line {})".format(
single, self.choices, self.identifier))
result = []
#Get the string representation of the value
if isinstance(value, list):
sval = " ".join([ str(val) for val in value])
else:
sval = str(value)
if self.comment != "" and (self.nvalues < 0 or self.nvalues > 5):
#We will put the comments on a separate line from the actual values.
result.append(self.comment)
result.append(sval)
else:
result.append("{} {}".format(sval, self.comment))
return result |
<SYSTEM_TASK:>
Parses the contents of the specified XML element using template info.
<END_TASK>
<USER_TASK:>
Description:
def parse(self, element):
"""Parses the contents of the specified XML element using template info.
:arg element: the XML element from the input file being converted.
""" |
result = []
if element.text is not None and element.tag == self.identifier:
l, k = (0, 0)
raw = element.text.split()
while k < len(self.values):
dtype = self.dtype[k]
if isinstance(self.values[k], int):
for i in range(self.values[k]):
result.append(self._caster[dtype](raw[i + l]))
l += self.values[k]
k += 1
else:
#This is a variable argument line, just use up the rest
#of them as the type of the current line
rest = [ self._caster[dtype](val) for val in raw[l::] ]
result.extend(rest)
break
else:
msg.warn("no results for parsing {} using line {}".format(element.tag, self.identifier))
return result |
<SYSTEM_TASK:>
Returns the specified value as int if possible.
<END_TASK>
<USER_TASK:>
Description:
def _cast_int(self, value):
"""Returns the specified value as int if possible.""" |
try:
return int(value)
except ValueError:
msg.err("Cannot convert {} to int for line {}.".format(value, self.identifier))
exit(1) |
<SYSTEM_TASK:>
Returns the specified value as float if possible.
<END_TASK>
<USER_TASK:>
Description:
def _cast_float(self, value):
"""Returns the specified value as float if possible.""" |
try:
return float(value)
except ValueError:
msg.err("Cannot convert {} to float for line {}.".format(value, self.identifier))
exit(1) |
<SYSTEM_TASK:>
Loads all the child line elements from the XML group element.
<END_TASK>
<USER_TASK:>
Description:
def _load(self, element, commentchar):
"""Loads all the child line elements from the XML group element.""" |
for child in element:
if "id" in child.attrib:
tline = TemplateLine(child, self, commentchar)
self.order.append(tline.identifier)
self.lines[tline.identifier] = tline
else:
msg.warn("no id element in {}. Ignored. (group._load)".format(child)) |
<SYSTEM_TASK:>
Extracts the values from the specified XML element that is being converted.
<END_TASK>
<USER_TASK:>
Description:
def parse(self, element):
"""Extracts the values from the specified XML element that is being converted.""" |
#All the children of this element are what we are trying to parse.
result = []
for child in element:
if child.tag in self.lines:
values = { child.tag: self.lines[child.tag].parse(child) }
result.append(values)
return result |
<SYSTEM_TASK:>
Generates the lines for the converted input file using the specified
<END_TASK>
<USER_TASK:>
Description:
def write(self, valuedict):
"""Generates the lines for the converted input file using the specified
value dictionary.""" |
result = []
if self.identifier in valuedict:
values = valuedict[self.identifier]
else:
return result
if self.comment != "":
result.append(self.comment)
if self.repeat is not None and type(values) == type([]):
if self.repeat.isdigit():
for i in range(int(self.repeat)):
result.extend(self._write_iterate(values[i]))
else:
#We are repeating for as many values as we have in the value
#entry for the group in the dictionary.
for value in values:
result.extend(self._write_iterate(value))
elif type(values) == type({}):
#This group doesn't get repeated, so the values variable must
#be a dictionary, just run it once.
result = self._write_iterate(values)
return result |
<SYSTEM_TASK:>
Generates the lines for a single pass through the group.
<END_TASK>
<USER_TASK:>
Description:
def _write_iterate(self, values):
"""Generates the lines for a single pass through the group.""" |
result = []
for key in self.order:
result.append(self.lines[key].write(values))
if len(result) > 1:
return result
else:
return result[0] |
<SYSTEM_TASK:>
Extracts the XML template data from the file.
<END_TASK>
<USER_TASK:>
Description:
def _load(self):
"""Extracts the XML template data from the file.""" |
if os.path.exists(self.path):
root = ET.parse(self.path).getroot()
if (root.tag == "fortpy" and "mode" in root.attrib and
root.attrib["mode"] == "template" and "direction" in root.attrib and
root.attrib["direction"] == self.direction):
#First, we need instances of the template contents for each of the
#versions listed in the fortpy tag.
for v in _get_xml_version(root):
self.versions[v] = TemplateContents()
#Now we can update the contents objects using the XML data.
self._load_entries(root)
#See if a custom name was specified for the auto-converted
#files.
if "autoname" in root.attrib:
self.name = root.attrib["autoname"]
else:
msg.err("the specified template {} ".format(self.path) +
"is missing the mode and direction attributes.")
exit(1)
else:
msg.err("could not find the template {}.".format(self.path))
exit(1) |
<SYSTEM_TASK:>
Generates the lines for the converted input file from the valuedict.
<END_TASK>
<USER_TASK:>
Description:
def write(self, valuedict, version):
"""Generates the lines for the converted input file from the valuedict.
:arg valuedict: a dictionary of values where the keys are ids in the
template and the values obey their template rules.
:arg version: the target version of the output file.
""" |
result = []
if version in self.versions:
for tag in self.versions[version].order:
entry = self.versions[version].entries[tag]
result.extend(entry.write(valuedict))
return result |
<SYSTEM_TASK:>
Loads all the child entries of the input template from the
<END_TASK>
<USER_TASK:>
Description:
def _load_entries(self, root):
"""Loads all the child entries of the input template from the
specified root element.""" |
mdict = {
"comments": self._comment,
"line": self._line,
"group": self._group
}
for entry in root:
mdict[entry.tag](entry) |
<SYSTEM_TASK:>
Extracts the character to use for comments in the input file.
<END_TASK>
<USER_TASK:>
Description:
def _comment(self, element):
"""Extracts the character to use for comments in the input file.""" |
for v in _get_xml_version(element):
self.versions[v].comment = element.text |
<SYSTEM_TASK:>
Parses the XML element as a single line entry in the input file.
<END_TASK>
<USER_TASK:>
Description:
def _line(self, element):
"""Parses the XML element as a single line entry in the input file.""" |
for v in _get_xml_version(element):
if "id" in element.attrib:
tline = TemplateLine(element, None, self.versions[v].comment)
self.versions[v].entries[tline.identifier] = tline
self.versions[v].order.append(tline.identifier)
else:
msg.warn("no id element in {}. Ignored. (_line)".format(element)) |
<SYSTEM_TASK:>
Converts the specified file using the relevant template.
<END_TASK>
<USER_TASK:>
Description:
def convert(self, path, version, target = None):
"""Converts the specified file using the relevant template.
:arg path: the full path to the file to convert.
:arg version: the new version of the file.
:arg target: the optional path to save the file under. If not
specified, the file is saved based on the template file name.
""" |
#Get the template and values out of the XML input file and
#write them in the format of the keywordless file.
values, template = self.parse(path)
lines = template.write(values, version)
#Finally, write the lines to the correct path.
if target is None:
target = os.path.join(os.path.dirname(path), template.name)
with open(os.path.expanduser(target), 'w') as f:
f.write("\n".join(lines)) |
<SYSTEM_TASK:>
Extracts a dictionary of values from the XML file at the specified path.
<END_TASK>
<USER_TASK:>
Description:
def parse(self, path):
"""Extracts a dictionary of values from the XML file at the specified path.""" |
#Load the template that will be used for parsing the values.
expath, template, root = self._load_template(path)
if expath is not None:
values = template.parse(root)
return (values, template) |
<SYSTEM_TASK:>
Converts the specified source file to a new version number.
<END_TASK>
<USER_TASK:>
Description:
def convert(self, path, version, target):
"""Converts the specified source file to a new version number.""" |
source = self.comparer.get_representation(path)
lines = [ '# <fortpy version="{}"></fortpy>\n'.format(version) ]
for line in self.comparer.template.contents[version].preamble:
lines.append(line.write(source.preamble, source.version, source.stored) + "\n")
for line in self.comparer.template.contents[version].body:
for valueset in source.body:
lines.append(line.write(valueset, source.version, source.stored) + "\n")
with open(os.path.expanduser(target), 'w') as f:
f.writelines(lines) |
<SYSTEM_TASK:>
Returns the full string of the file contents from the cache for
<END_TASK>
<USER_TASK:>
Description:
def cachedstr(self):
"""Returns the full string of the file contents from the cache for
the file that we are currently providing intellisense for.""" |
if self._cachedstr is None:
if self.module is not None:
refstring = self.module.refstring
self._cachedstr = refstring.splitlines()
else:
self._cachedstr = []
return self._cachedstr |
<SYSTEM_TASK:>
Returns the symbol under the cursor looking both directions as part
<END_TASK>
<USER_TASK:>
Description:
def exact_match(self):
"""Returns the symbol under the cursor looking both directions as part
of a definition lookup for an exact match.
""" |
#We don't have to worry about grouping or anything else fancy. Just
#loop through forward and back until we hit a character that can't be
#part of a variable or function name.
if self._exact_match is None:
i = self.pos[1] - 1
start = None
end = None
line = self.current_line
terminators = ['(', ')', '\n', ' ', '=', '%', ',']
while i >= 0 and start is None:
if line[i] in terminators:
start = i + 1
i -= 1
i = self.pos[1]
while i < len(line) and end is None:
if line[i] in terminators:
end = i
i += 1
self._exact_match = line[start:end].lower()
return self._exact_match |
<SYSTEM_TASK:>
Gets the full symbol excluding the character under the cursor.
<END_TASK>
<USER_TASK:>
Description:
def short_full_symbol(self):
"""Gets the full symbol excluding the character under the cursor.""" |
if self._short_full_symbol is None:
self._short_full_symbol = self._symbol_extract(cache.RE_FULL_CURSOR,
False, True)
return self._short_full_symbol |
<SYSTEM_TASK:>
Gets the symbol under the current cursor.
<END_TASK>
<USER_TASK:>
Description:
def symbol(self):
"""Gets the symbol under the current cursor.""" |
if self._symbol is None:
self._symbol = self._symbol_extract(cache.RE_CURSOR)
return self._symbol |
<SYSTEM_TASK:>
Returns the symbol under the cursor AND additional contextual
<END_TASK>
<USER_TASK:>
Description:
def full_symbol(self):
"""Returns the symbol under the cursor AND additional contextual
symbols in the case of %-separated lists of type members.""" |
if self._full_symbol is None:
self._full_symbol = self._symbol_extract(cache.RE_FULL_CURSOR, brackets=True)
return self._full_symbol |
<SYSTEM_TASK:>
Extracts a symbol or full symbol from the current line,
<END_TASK>
<USER_TASK:>
Description:
def _symbol_extract(self, regex, plus = True, brackets=False):
"""Extracts a symbol or full symbol from the current line,
optionally including the character under the cursor.
:arg regex: the compiled regular expression to use for extraction.
:arg plus: when true, the character under the cursor *is* included.
:arg brackets: when true, matching pairs of brackets are first removed
before the regex is run.
""" |
charplus = self.pos[1] + (1 if plus else -1)
consider = self.current_line[:charplus][::-1]
#We want to remove matching pairs of brackets so that derived types
#that have arrays still get intellisense.
if brackets==True:
#The string has already been reversed, just run through it.
rightb = []
lastchar = None
for i in range(len(consider)):
if consider[i] == ")":
rightb.append(i)
elif consider[i] == "(" and len(rightb) > 0:
lastchar = i
rightb.pop()
if lastchar is not None:
consider = '%' + consider[lastchar+1:]
rematch = regex.match(consider)
if rematch is not None:
return rematch.group("symbol")[::-1]
else:
return "" |
<SYSTEM_TASK:>
Determines the index of the parameter in a call list using
<END_TASK>
<USER_TASK:>
Description:
def call_arg_index(self):
"""Determines the index of the parameter in a call list using
string manipulation and context information.""" |
#The function name we are calling should be in el_name by now
if self._call_index is None:
if (self.el_section == "body" and
self.el_call in [ "sub", "fun" ]):
#Get hold of the element instance of the function being called so
#we can examine its parameters.
fncall = self.el_name
if fncall in self.current_line[:self.pos[1]]:
args = self.current_line[:self.pos[1]].split(fncall)[1]
else:
args = ""
#This handles the case of the bracket-complete.
if args == "":
return 0
#The nester requires each start bracket to have an end bracket
if args[-1] != ")":
args += ")"
#Pyparsing handles calls where functions are being called as
#the values for parameters like function(a, fun(c,d), r).
try:
nl = cache.nester.parseString(args).asList()[0]
clean = [n for n in nl if not isinstance(n, list) and n != ","]
#We need to specially handle the case where they have started typing the
#name of the first parameter but haven't put a ',' in yet. In that case,
#we are still on the first argument.
if clean[-1][-1] != ',':
self._call_index = len(clean) - 1
else:
self._call_index = len(clean)
except:
msg.warn("ARG INDEX: lookup failed on bracket parsing.")
return self._call_index |
<SYSTEM_TASK:>
Sets up the constant regex strings etc. that can be used to
<END_TASK>
<USER_TASK:>
Description:
def _setup_regex(self):
"""Sets up the constant regex strings etc. that can be used to
parse the strings for determining context.""" |
self.RE_COMMENTS = cache.RE_COMMENTS
self.RE_MODULE = cache.RE_MODULE
self.RE_TYPE = cache.RE_TYPE
self.RE_EXEC = cache.RE_EXEC
self.RE_MEMBERS = cache.RE_MEMBERS
self.RE_DEPEND = cache.RE_DEPEND |
<SYSTEM_TASK:>
Finds values for all the important attributes that determine the
<END_TASK>
<USER_TASK:>
Description:
def _contextualize(self):
"""Finds values for all the important attributes that determine the
user's context.""" |
line, column = self.pos
#Get the module top-level information
self._get_module(line)
if self.module is None:
return
#Use the position of the cursor in the file to decide which
#element we are working on.
self.element = self.module.get_element(line, column)
#Now all that's left is to contextualize the line that the
#cursor is on.
self._deep_match(line, column) |
<SYSTEM_TASK:>
Finds the name of the module and retrieves it from the parser cache.
<END_TASK>
<USER_TASK:>
Description:
def _get_module(self, line):
"""Finds the name of the module and retrieves it from the parser cache.""" |
#Finding the module name is trivial; start at the beginning of
#the module and iterate lines until we find the module.
for sline in self._source:
if len(sline) > 0 and sline[0] != "!":
rmatch = self.RE_MODULE.match(sline)
if rmatch is not None:
self.modulename = rmatch.group("name")
break
else:
#We don't even have the start of a module in this code file
return
#Before we carry on with the rest of the context, find the separating
#CONTAINS keyword so we know whether to look for types or subs/funcs.
#If the code parser hasn't ever parsed this module, parse it now.
self.parser.isense_parse(self._orig_path, self.modulename)
self.module = self.parser.modules[self.modulename]
if line > self.module.contains_index:
self.section = "contains" |
<SYSTEM_TASK:>
Checks the contents of executables, types and modules for member
<END_TASK>
<USER_TASK:>
Description:
def _deep_match(self, line, column):
"""Checks the contents of executables, types and modules for member
definitions and updates the context.""" |
#Now we just try each of the possibilities for the current line
if self._match_member(line, column):
self.el_section = "vars"
self.el_type = ValueElement
self.el_name = self.col_match.group("names")
return
if isinstance(self.element, Executable) or isinstance(self.element, Module):
#We are inside of a subroutine or function definition
#It is either params, vars or body. We already tested for variable
#declarations in the match_member test. Check now to see if we are
#on the line that defines the function
self._match_exec(line)
if self.col_match:
self.el_section = "params"
self.el_call = "assign"
return
#This regex incorrectly grabbing things like 'if' is functions because
#they do actually like like functions... We need to filter the list
#with special keywords before we claim victory. TODO
self.col_match = self.RE_DEPEND.match(self._source[line])
if self.col_match:
self.el_section = "body"
self.el_name = self.col_match.group("exec")
if self.col_match.group("sub") and "call" in self.col_match.group("sub"):
self.el_call = "sub"
self.el_type = Subroutine
else:
self.el_call = "fun"
self.el_type = Function
return
#If we are inside a type, we either got the member variable declaration
#already, or it is a pointer to a method inside of the module. Add the
#test for that later. TODO
# if isinstance(self.element, CustomType):
self.el_section = "body"
self.el_type = None
#We just need to figure out what kind of a call is being made
#at this column position, the only thing left is
if " = " in self._source[line]:
eqi = self._source[line].index("=")
if column < eqi:
self.el_call = "name"
else:
self.el_call = "assign"
self.el_name = self._source[line].split("=")[0].strip()
elif re.match("\s*call\s+", self._source[line]):
self.el_call = "sub"
self.el_name = self._source[line].split("call ")[1]
else:
#The only thing left (and the hardest to nail down) is
#the arithmetic catch all type.
self.el_call = "arith"
self.el_name = "[arith]" |
<SYSTEM_TASK:>
Looks at line 'i' for a subroutine or function definition.
<END_TASK>
<USER_TASK:>
Description:
def _match_exec(self, i):
"""Looks at line 'i' for a subroutine or function definition.""" |
self.col_match = self.RE_EXEC.match(self._source[i])
if self.col_match is not None:
if self.col_match.group("codetype") == "function":
self.el_type = Function
else:
self.el_type = Subroutine
self.el_name = self.col_match.group("name")
return True
else:
return False |
<SYSTEM_TASK:>
Looks at line 'i' to see if the line matches a module member def.
<END_TASK>
<USER_TASK:>
Description:
def _match_member(self, i, column):
"""Looks at line 'i' to see if the line matches a module member def.""" |
self.col_match = self.RE_MEMBERS.match(self._source[i])
if self.col_match is not None:
if column < self._source[i].index(":"):
self.el_call = "name"
else:
self.el_call = "assign"
return True
else:
return False |
<SYSTEM_TASK:>
Looks at line 'i' to see if the line matches a module user type def.
<END_TASK>
<USER_TASK:>
Description:
def _match_type(self, i):
"""Looks at line 'i' to see if the line matches a module user type def.""" |
self.col_match = self.RE_TYPE.match(self._source[i])
if self.col_match is not None:
self.section = "types"
self.el_type = CustomType
self.el_name = self.col_match.group("name")
return True
else:
return False |
<SYSTEM_TASK:>
Add 'fake' data migrations for existing tables from legacy GeoNode versions
<END_TASK>
<USER_TASK:>
Description:
def upgradedb(options):
"""
Add 'fake' data migrations for existing tables from legacy GeoNode versions
""" |
version = options.get('version')
if version in ['1.1', '1.2']:
sh("python manage.py migrate maps 0001 --fake")
sh("python manage.py migrate avatar 0001 --fake")
elif version is None:
print "Please specify your GeoNode version"
else:
print "Upgrades from version %s are not yet supported." % version |
<SYSTEM_TASK:>
Creates a tarball to use for building the system elsewhere
<END_TASK>
<USER_TASK:>
Description:
def package(options):
"""
Creates a tarball to use for building the system elsewhere
""" |
import pkg_resources
import tarfile
import geonode
version = geonode.get_version()
# Use GeoNode's version for the package name.
pkgname = 'GeoNode-%s-all' % version
# Create the output directory.
out_pkg = path(pkgname)
out_pkg_tar = path("%s.tar.gz" % pkgname)
# Create a distribution in zip format for the geonode python package.
dist_dir = path('dist')
dist_dir.rmtree()
sh('python setup.py sdist --formats=zip')
with pushd('package'):
#Delete old tar files in that directory
for f in glob.glob('GeoNode*.tar.gz'):
old_package = path(f)
if old_package != out_pkg_tar:
old_package.remove()
if out_pkg_tar.exists():
info('There is already a package for version %s' % version)
return
# Clean anything that is in the oupout package tree.
out_pkg.rmtree()
out_pkg.makedirs()
support_folder = path('support')
install_file = path('install.sh')
# And copy the default files from the package folder.
justcopy(support_folder, out_pkg / 'support')
justcopy(install_file, out_pkg)
geonode_dist = path('..') / 'dist' / 'GeoNode-%s.zip' % version
justcopy(geonode_dist, out_pkg)
rogue_dist = path('../..') / 'dist' / 'geoshape-0.1.zip'
justcopy(rogue_dist, out_pkg)
# Create a tar file with all files in the output package folder.
tar = tarfile.open(out_pkg_tar, "w:gz")
for file in out_pkg.walkfiles():
tar.add(file)
# Add the README with the license and important links to documentation.
tar.add('README', arcname=('%s/README.md' % out_pkg))
tar.close()
# Remove all the files in the temporary output package directory.
out_pkg.rmtree()
# Report the info about the new package.
info("%s created" % out_pkg_tar.abspath()) |
<SYSTEM_TASK:>
Parse `targetname` as if it were a comet.
<END_TASK>
<USER_TASK:>
Description:
def parse_comet(self):
"""Parse `targetname` as if it were a comet.
:return: (string or None, int or None, string or None);
The designation, number and prefix, and name of the comet as derived
from `self.targetname` are extracted into a tuple; each element that
does not exist is set to `None`. Parenthesis in `self.targetname`
will be ignored.
:example: the following table shows the result of the parsing:
+--------------------------------+--------------------------------+
|targetname |(desig, prefixnumber, name) |
+================================+================================+
|1P/Halley |(None, '1P', 'Halley') |
+--------------------------------+--------------------------------+
|3D/Biela |(None, '3D', 'Biela') |
+--------------------------------+--------------------------------+
|9P/Tempel 1 |(None, '9P', 'Tempel 1') |
+--------------------------------+--------------------------------+
|73P/Schwassmann Wachmann 3 C |(None, '73P', |
| |'Schwassmann Wachmann 3 C') |
+--------------------------------+--------------------------------+
|73P-C/Schwassmann Wachmann 3 C |(None, '73P-C', |
| |'Schwassmann Wachmann 3 C') |
+--------------------------------+--------------------------------+
|73P-BB |(None, '73P-BB', None) |
+--------------------------------+--------------------------------+
|322P |(None, '322P', None) |
+--------------------------------+--------------------------------+
|X/1106 C1 |('1166 C1', 'X', None) |
+--------------------------------+--------------------------------+
|P/1994 N2 (McNaught-Hartley) |('1994 N2', 'P', |
| |'McNaught-Hartley') |
+--------------------------------+--------------------------------+
|P/2001 YX127 (LINEAR) |('2001 YX127', 'P', 'LINEAR') |
+--------------------------------+--------------------------------+
|C/-146 P1 |('-146 P1', 'C', None) |
+--------------------------------+--------------------------------+
|C/2001 A2-A (LINEAR) |('2001 A2-A', 'C', 'LINEAR') |
+--------------------------------+--------------------------------+
|C/2013 US10 |('2013 US10', 'C', None) |
+--------------------------------+--------------------------------+
|C/2015 V2 (Johnson) |('2015 V2', 'C', 'Johnson') |
+--------------------------------+--------------------------------+
|C/2016 KA (Catalina) |('2016 KA', 'C', 'Catalina') |
+--------------------------------+--------------------------------+
""" |
import re
pat = ('^(([1-9]+[PDCXAI](-[A-Z]{1,2})?)|[PDCXAI]/)' + # prefix [0,1,2]
'|([-]?[0-9]{3,4}[ _][A-Z]{1,2}([0-9]{1,3})?(-[1-9A-Z]{0,2})?)' +
# designation [3,4]
('|(([A-Z][a-z]?[A-Z]*[a-z]*[ -]?[A-Z]?[1-9]*[a-z]*)' +
'( [1-9A-Z]{1,2})*)') # name [5,6]
)
m = re.findall(pat, self.targetname.strip())
# print(m)
prefixnumber = None
desig = None
name = None
if len(m) > 0:
for el in m:
# prefix/number
if len(el[0]) > 0:
prefixnumber = el[0].replace('/', '')
# designation
if len(el[3]) > 0:
desig = el[3].replace('_', ' ')
# name
if len(el[5]) > 0:
if len(el[5]) > 1:
name = el[5]
return (desig, prefixnumber, name) |
<SYSTEM_TASK:>
`True` if `targetname` appears to be a comet orbit record number.
<END_TASK>
<USER_TASK:>
Description:
def isorbit_record(self):
"""`True` if `targetname` appears to be a comet orbit record number.
NAIF record numbers are 6 digits, begin with a '9' and can
change at any time.
""" |
import re
test = re.match('^9[0-9]{5}$', self.targetname.strip()) is not None
return test |
<SYSTEM_TASK:>
`True` if `targetname` appears to be a comet.
<END_TASK>
<USER_TASK:>
Description:
def iscomet(self):
"""`True` if `targetname` appears to be a comet. """ |
# treat this object as comet if there is a prefix/number
if self.comet is not None:
return self.comet
elif self.asteroid is not None:
return not self.asteroid
else:
return (self.parse_comet()[0] is not None or
self.parse_comet()[1] is not None) |
<SYSTEM_TASK:>
`True` if `targetname` appears to be an asteroid.
<END_TASK>
<USER_TASK:>
Description:
def isasteroid(self):
"""`True` if `targetname` appears to be an asteroid.""" |
if self.asteroid is not None:
return self.asteroid
elif self.comet is not None:
return not self.comet
else:
return any(self.parse_asteroid()) is not None |
<SYSTEM_TASK:>
Set a range of epochs, all times are UT
<END_TASK>
<USER_TASK:>
Description:
def set_epochrange(self, start_epoch, stop_epoch, step_size):
"""Set a range of epochs, all times are UT
:param start_epoch: str;
start epoch of the format 'YYYY-MM-DD [HH-MM-SS]'
:param stop_epoch: str;
final epoch of the format 'YYYY-MM-DD [HH-MM-SS]'
:param step_size: str;
epoch step size, e.g., '1d' for 1 day, '10m' for 10 minutes...
:return: None
:example: >>> import callhorizons
>>> ceres = callhorizons.query('Ceres')
>>> ceres.set_epochrange('2016-02-26', '2016-10-25', '1d')
Note that dates are mandatory; if no time is given, midnight is assumed.
""" |
self.start_epoch = start_epoch
self.stop_epoch = stop_epoch
self.step_size = step_size
return None |
<SYSTEM_TASK:>
Set a list of discrete epochs, epochs have to be given as Julian
<END_TASK>
<USER_TASK:>
Description:
def set_discreteepochs(self, discreteepochs):
"""Set a list of discrete epochs, epochs have to be given as Julian
Dates
:param discreteepochs: array_like
list or 1D array of floats or strings
:return: None
:example: >>> import callhorizons
>>> ceres = callhorizons.query('Ceres')
>>> ceres.set_discreteepochs([2457446.177083, 2457446.182343])
""" |
if not isinstance(discreteepochs, (list, np.ndarray)):
discreteepochs = [discreteepochs]
self.discreteepochs = list(discreteepochs) |
<SYSTEM_TASK:>
Add the following array of urls to the resource base urls
<END_TASK>
<USER_TASK:>
Description:
def prepend_urls(self):
""" Add the following array of urls to the resource base urls """ |
return [
url(r"^(?P<resource_name>%s)/view/(?P<name>[\w\d_.-]+)%s$" % (self._meta.resource_name, trailing_slash()),
self.wrap_view('view'), name="api_fileitem_view"),
url(r"^(?P<resource_name>%s)/download/(?P<name>[\w\d_.-]+)%s$" % (
self._meta.resource_name, trailing_slash()), self.wrap_view('download'), name="api_fileitem_download"),
url(r"^(?P<resource_name>%s)/(?P<pk>\w[\w/-]*)/download%s$" % (self._meta.resource_name, trailing_slash()),
self.wrap_view('download'), name="api_fileitem_download"),
url(r"^(?P<resource_name>%s)/(?P<name>[\w\d_.-]+)/download%s$"
% (self._meta.resource_name, trailing_slash()), self.wrap_view('download'),
name="api_fileitem_download"),
url(r"^(?P<resource_name>%s)/(?P<pk>\w[\w/-]*)/view%s$"
% (self._meta.resource_name, trailing_slash()), self.wrap_view('view'), name="api_fileitem_view"),
url(r"^(?P<resource_name>%s)/(?P<name>[\w\d_.-]+)/view%s$"
% (self._meta.resource_name, trailing_slash()), self.wrap_view('view'), name="api_fileitem_view"),
url(r"^(?P<resource_name>%s)/(?P<name>[\w\d_.-]+)/$"
% self._meta.resource_name, self.wrap_view('dispatch_detail'), name="api_dispatch_detail_name"),
url(r"^(?P<resource_name>%s)/(?P<id>[\d]+)/$"
% self._meta.resource_name, self.wrap_view('dispatch_detail'), name="api_dispatch_detail"),
] |
<SYSTEM_TASK:>
Sets attributes of this user object.
<END_TASK>
<USER_TASK:>
Description:
def set(self, attr_dict):
"""Sets attributes of this user object.
:type attr_dict: dict
:param attr_dict: Parameters to set, with attribute keys.
:rtype: :class:`.Base`
:return: The current object.
""" |
for key in attr_dict:
if key == self._id_attribute:
setattr(self, self._id_attribute, attr_dict[key])
else:
setattr(self, u"_" + key, attr_dict[key])
return self |
<SYSTEM_TASK:>
Detects insecure settings and reports them to the client-side context.
<END_TASK>
<USER_TASK:>
Description:
def security_warnings(request, PROXY_ALLOWED_HOSTS=()):
""" Detects insecure settings and reports them to the client-side context. """ |
warnings = []
PROXY_ALLOWED_HOSTS = PROXY_ALLOWED_HOSTS or getattr(settings, 'PROXY_ALLOWED_HOSTS', ())
if PROXY_ALLOWED_HOSTS and '*' in PROXY_ALLOWED_HOSTS:
warnings.append(dict(title=_('Insecure setting detected.'),
description=_('A wildcard is included in the PROXY_ALLOWED_HOSTS setting.')))
return dict(warnings=warnings) |
<SYSTEM_TASK:>
Format and convert units to be more human readable
<END_TASK>
<USER_TASK:>
Description:
def conv_units(val, meta):
"""
Format and convert units to be more human readable
@return new val with converted units
""" |
if not val or not meta:
return val
try:
val = float(val)
except ValueError:
logging.error("unable to apply convert units for %s" % val)
return val
suf = 0
while val > 1024 and suf < 4:
val /= 1024
suf += 1
return "%.2f%s" % (val, UNITS_SUFFIX[suf]) |
<SYSTEM_TASK:>
Align columns, including column headers
<END_TASK>
<USER_TASK:>
Description:
def align(self, arr):
"""
Align columns, including column headers
""" |
if arr is None:
return arr
c_hdrs = self._get_col_hdrs()
if self.show_col_hdr_in_cell:
for hdr in c_hdrs:
arr[hdr] = map(lambda col: ":".join([hdr, str(col)]), arr[hdr])
if self.show_col_hdrs:
widths = [max(len(str(col))
for col in arr[hdr].tolist() + [hdr]) for hdr in c_hdrs]
else:
widths = [max(len(str(col))
for col in arr[hdr].tolist()) for hdr in c_hdrs]
# align column headers
c_hdrs = map(lambda (c_hdr, width): c_hdr.ljust(width),
zip(c_hdrs, widths))
# align data
for n_row in range(len(arr)):
arr[n_row] = tuple(map(lambda (col, width): col.ljust(width),
zip(arr[n_row], widths)))
return arr, c_hdrs, widths |
<SYSTEM_TASK:>
Wrap lines according to width
<END_TASK>
<USER_TASK:>
Description:
def wrap(self, string, width):
"""
Wrap lines according to width
Place '\n' whenever necessary
""" |
if not string or width <= 0:
logging.error("invalid string: %s or width: %s" % (string, width))
return False
tmp = ""
for line in string.splitlines():
if len(line) <= width:
tmp += line + "\n"
continue
cur = 0
length = len(line)
while cur + width < length:
cur = line[:cur+width].rfind(self.sep) + len(self.sep) - 1
line = line[:cur] + "\n" + line[cur+1:]
tmp += line + "\n\n"
return tmp |
<SYSTEM_TASK:>
Get metadata for a particular cell
<END_TASK>
<USER_TASK:>
Description:
def _get_meta(self, row, col):
"""
Get metadata for a particular cell
""" |
if self.meta is None:
logging.error("unable to get meta: empty section")
return {}
if not row in self._get_row_hdrs() or\
not col in self._get_col_hdrs():
logging.error("unable to get meta: cell [%s,%s] does not exist"
% (row, col))
return {}
meta_str = self.meta[col][self.irt[row]]
try:
meta = ast.literal_eval(meta_str)
if isinstance(meta, dict):
return meta
except (SyntaxError, ValueError), e:
logging.error("unable to parse meta string - %s: %s"
% (meta_str, e))
return {} |
<SYSTEM_TASK:>
Return either completion information or a call signature for
<END_TASK>
<USER_TASK:>
Description:
def in_function_call(self):
"""Return either completion information or a call signature for
the function definition that we are on currently.""" |
#The last thing to do before we can form completions etc. is perform
#a real-time update of the in-memory versions of the modules.
if settings.real_time_update:
cache.rt.update(self._user_context)
return self._evaluator.in_function_call() |
<SYSTEM_TASK:>
Return the definition of a the symbol under the cursor via exact match.
<END_TASK>
<USER_TASK:>
Description:
def goto_definitions(self):
"""
Return the definition of a the symbol under the cursor via exact match.
Goes to that definition with a buffer.
""" |
element = self._evaluator.get_definition()
if element is not None:
return BaseDefinition(self._user_context, element)
else:
return None |
<SYSTEM_TASK:>
Orders the specified list of modules based on their inter-dependencies.
<END_TASK>
<USER_TASK:>
Description:
def order_module_dependencies(modules, parser):
"""Orders the specified list of modules based on their inter-dependencies.""" |
result = []
for modk in modules:
if modk not in result:
result.append(modk)
#We also need to look up the dependencies of each of these modules
recursed = list(result)
for i in range(len(result)):
module = result[i]
_process_module_order(parser, module, i, recursed)
return recursed |
<SYSTEM_TASK:>
Adds the module and its dependencies to the result list.
<END_TASK>
<USER_TASK:>
Description:
def _process_module_order(parser, module, i, result):
"""Adds the module and its dependencies to the result list.""" |
#Some code might decide to use the fortpy module methods for general
#development, ignore it since we know it will be present in the end.
if module == "fortpy" or module == "fpy_auxiliary":
return
#See if the parser has alread loaded this module.
if module not in parser.modules:
parser.load_dependency(module, True, True, False)
#It is possible that the parser couldn't find it, if so
#we can't get a self-consistent ordering.
if module in parser.modules:
modneeds = parser.modules[module].needs
for modn in modneeds:
if modn not in result:
#Since this module depends on the other, insert the other
#above it in the list.
result.insert(i, modn)
else:
x = result.index(modn)
if x > i:
#We need to move this module higher up in the food chain
#because it is needed sooner.
result.remove(modn)
result.insert(i, modn)
newi = result.index(modn)
_process_module_order(parser, modn, newi, result)
else:
raise ValueError("unable to find module {}.".format(module)) |
<SYSTEM_TASK:>
Lists the names of all the modules that the specified module depends
<END_TASK>
<USER_TASK:>
Description:
def list_dependencies(self, module, result):
"""Lists the names of all the modules that the specified module depends
on.""" |
if result is None:
result = {}
#We will try at least once to load each module that we don't have
if module not in self.modules:
self.load_dependency(module, True, True, False)
if module in self.modules and module not in result:
result[module] = self.modules[module].filepath
for depend in self.modules[module].dependencies:
name = depend.split(".")[0].lower()
if name not in result:
self.list_dependencies(name, result)
return result |
<SYSTEM_TASK:>
Parses the dependencies of the modules in the list pmodules.
<END_TASK>
<USER_TASK:>
Description:
def _parse_dependencies(self, pmodules, dependencies, recursive, greedy):
"""Parses the dependencies of the modules in the list pmodules.
:arg pmodules: a list of modules that were parsed from a *.f90 file.
:arg dependencies: when true, the dependency's dependencies will be loaded.
:arg recursive: specifies whether to continue loading dependencies to
completion; i.e. up the chain until we have every module that any
module needs to run.
:arg greedy: when true,
""" |
#See if we need to also load dependencies for the modules
if dependencies:
allkeys = [ module.name.lower() for module in pmodules ]
for key in allkeys:
for depend in self.modules[key].collection("dependencies"):
base = depend.split(".")[0]
if self.verbose and base.lower() not in self.modules:
msg.info("DEPENDENCY: {}".format(base), 2)
self.load_dependency(base, dependencies and recursive, recursive, greedy) |
<SYSTEM_TASK:>
Looks for additional docstring specifications in the correctly named
<END_TASK>
<USER_TASK:>
Description:
def _parse_docstrings(self, filepath):
"""Looks for additional docstring specifications in the correctly named
XML files in the same directory as the module.""" |
xmlpath = self.get_xmldoc_path(filepath)
if self.tramp.exists(xmlpath):
xmlstring = self.tramp.read(xmlpath)
self.modulep.docparser.parsexml(xmlstring, self.modules, xmlpath) |
<SYSTEM_TASK:>
Returns the full path to a possible XML documentation file for the
<END_TASK>
<USER_TASK:>
Description:
def get_xmldoc_path(self, filepath):
"""Returns the full path to a possible XML documentation file for the
specified code filepath.""" |
segs = filepath.split(".")
segs.pop()
return ".".join(segs) + ".xml" |
<SYSTEM_TASK:>
Gets the modified time of the file or its accompanying XML file,
<END_TASK>
<USER_TASK:>
Description:
def _get_mod_mtime(self, filepath):
"""Gets the modified time of the file or its accompanying XML file,
whichever is greater.
""" |
file_mtime = self.tramp.getmtime(filepath)
xmlpath = self.get_xmldoc_path(filepath)
if self.tramp.exists(xmlpath):
xml_mtime = self.tramp.getmtime(xmlpath)
if xml_mtime > file_mtime:
file_mtime = xml_mtime
return file_mtime |
<SYSTEM_TASK:>
Checks whether the modules in the specified file path need
<END_TASK>
<USER_TASK:>
Description:
def _check_parse_modtime(self, filepath, fname):
"""Checks whether the modules in the specified file path need
to be reparsed because the file was changed since it was
last loaded.""" |
#We also want to perform a reparse if the XML documentation file for the
#module changed, since the docs are also cached.
file_mtime = self._get_mod_mtime(filepath)
#If we have parsed this file and have its modules in memory, its
#filepath will be in self._parsed. Otherwise we can load it from
#file or from a cached pickle version.
if filepath.lower() in self._parsed:
#Get the name of the first module in that file from the modulefiles
#list. Find out when it was last modified.
module_mtime = None
if fname in self._modulefiles:
if len(self._modulefiles[fname]) == 0:
msg.warn("Module file {} has no modules defined inside of it!".format(fname))
return None
modulename = self._modulefiles[fname][0]
if modulename in self.modules:
#Make sure that if there are modules with the same name but different
#files, that we are working with the correct one.
if filepath.lower() != self.modules[modulename].filepath:
msg.warn("module {} parsed ".format(modulename) +
"from {}, not {}".format(self.modules[modulename].filepath,
filepath))
module_mtime = self.modules[modulename].change_time
if module_mtime is not None:
if module_mtime < file_mtime:
#The file has been modified since we reloaded the module.
#Return the two times we used for the comparison so the
#module file can be reloaded.
return [module_mtime, file_mtime]
else:
return None
else:
#The file has never been parsed by this CodeParser. We can
#either do a full parse or a pickle load.
return [file_mtime] |
<SYSTEM_TASK:>
Reparses the specified module file from disk, overwriting any
<END_TASK>
<USER_TASK:>
Description:
def reparse(self, filepath):
"""Reparses the specified module file from disk, overwriting any
cached representations etc. of the module.""" |
#The easiest way to do this is to touch the file and then call
#the regular parse method so that the cache becomes invalidated.
self.tramp.touch(filepath)
self.parse(filepath) |
<SYSTEM_TASK:>
Adds the directory of the file at the specified path as a base
<END_TASK>
<USER_TASK:>
Description:
def _add_current_codedir(self, path):
"""Adds the directory of the file at the specified path as a base
path to find other files in.
""" |
dirpath = self.tramp.dirname(path)
if dirpath not in self.basepaths:
self.basepaths.append(dirpath)
self.rescan() |
<SYSTEM_TASK:>
Parses the specified file from either memory, cached disk or full disk
<END_TASK>
<USER_TASK:>
Description:
def isense_parse(self, filepath, modulename):
"""Parses the specified file from either memory, cached disk or full disk
depending on whether the fetch is via SSH or not and how long it has been
since we last checked the modification time of the file.
""" |
#We only want to check whether the file has been modified for reload
from datetime import datetime
if modulename not in self._last_isense_check:
self._last_isense_check[modulename] = datetime.utcnow()
self.parse(filepath, True)
else:
elapsed = (datetime.utcnow() - self._last_isense_check[modulename]).seconds
if elapsed > 60:
self.parse(filepath, True)
self._last_isense_check[modulename] = datetime.utcnow() |
<SYSTEM_TASK:>
Parses the fortran code in the specified file.
<END_TASK>
<USER_TASK:>
Description:
def parse(self, filepath, dependencies=False, recursive=False, greedy=False):
"""Parses the fortran code in the specified file.
:arg dependencies: if true, all folder paths will be searched for modules
that have been referenced but aren't loaded in the parser.
:arg greedy: if true, when a module cannot be found using a file name
of module_name.f90, all modules in all folders are searched.""" |
#If we have already parsed this file path, we should check to see if the
#module file has changed and needs to be reparsed.
abspath = self.tramp.abspath(filepath)
self._add_current_codedir(abspath)
fname = filepath.split("/")[-1].lower()
mtime_check = self._check_parse_modtime(abspath, fname)
if mtime_check is None:
return
#Keep track of parsing times if we are running in verbose mode.
if self.verbose:
start_time = clock()
msg.okay("WORKING on {0}".format(abspath), 2)
if fname not in self._modulefiles:
self._modulefiles[fname] = []
if fname not in self._programfiles:
self._programfiles[fname] = []
#Check if we can load the file from a pickle instead of doing a time
#consuming file system parse.
pickle_load = False
pprograms = []
if len(mtime_check) == 1 and settings.use_filesystem_cache:
#We use the pickler to load the file since a cached version might
#be good enough.
pmodules = self.serialize.load_module(abspath, mtime_check[0], self)
if pmodules is not None:
for module in pmodules:
self.modules[module.name.lower()] = module
self._modulefiles[fname].append(module.name.lower())
pickle_load = True
else:
#We have to do a full load from the file system.
pmodules, pprograms = self._parse_from_file(abspath, fname,
dependencies, recursive, greedy)
else:
#We have to do a full load from the file system.
pmodules, pprograms = self._parse_from_file(abspath, fname,
dependencies, recursive, greedy)
#Add the filename to the list of files that have been parsed.
self._parsed.append(abspath.lower())
if not pickle_load and len(pmodules) > 0 and settings.use_filesystem_cache:
self.serialize.save_module(abspath, pmodules)
if self.verbose:
msg.info("PARSED: {} modules and {} ".format(len(pmodules), len(pprograms)) +
"programs in {} in {}".format(fname, secondsToStr(clock() - start_time)), 2)
for module in pmodules:
msg.gen("\tMODULE {}".format(module.name), 2)
for program in pprograms:
msg.gen("\tPROGRAM {}".format(program.name), 2)
if len(pmodules) > 0 or len(pprograms) > 0:
msg.blank()
self._parse_dependencies(pmodules, dependencies, recursive, greedy) |
<SYSTEM_TASK:>
Rescans the base paths to find new code files.
<END_TASK>
<USER_TASK:>
Description:
def rescan(self):
"""Rescans the base paths to find new code files.""" |
self._pathfiles = {}
for path in self.basepaths:
self.scan_path(path) |
<SYSTEM_TASK:>
Loads the module with the specified name if it isn't already loaded.
<END_TASK>
<USER_TASK:>
Description:
def load_dependency(self, module_name, dependencies, recursive, greedy, ismapping = False):
"""Loads the module with the specified name if it isn't already loaded.""" |
key = module_name.lower()
if key not in self.modules:
if key == "fortpy":
#Manually specify the correct path to the fortpy.f90 that shipped with
#the distribution
from fortpy.utility import get_fortpy_templates_dir
from os import path
fpy_path = path.join(get_fortpy_templates_dir(), "fortpy.f90")
self.parse(fpy_path, False, False)
return
fkey = key + ".f90"
if fkey in self._pathfiles:
self.parse(self._pathfiles[fkey], dependencies, recursive)
elif greedy:
#The default naming doesn't match for this module
#we will load all modules until we find the right
#one
self._load_greedy(key, dependencies, recursive)
elif key in self.mappings and self.mappings[key] in self._pathfiles:
#See if they have a mapping specified to a code file for this module name.
if self.verbose:
msg.info("MAPPING: using {} as the file".format(self.mappings[key]) +
" name for module {}".format(key))
self.parse(self._pathfiles[self.mappings[key]], dependencies, recursive)
elif key not in ["mkl_vsl_type", "mkl_vsl", "iso_c_binding"]:
#The parsing can't continue without the necessary dependency modules.
msg.err(("could not find module {}. Enable greedy search or"
" add a module filename mapping.".format(key)))
if self.austere:
exit(1) |
<SYSTEM_TASK:>
Keeps loading modules in the filepaths dictionary until all have
<END_TASK>
<USER_TASK:>
Description:
def _load_greedy(self, module_name, dependencies, recursive):
"""Keeps loading modules in the filepaths dictionary until all have
been loaded or the module is found.""" |
found = module_name in self.modules
allmodules = list(self._pathfiles.keys())
i = 0
while not found and i < len(allmodules):
current = allmodules[i]
if not current in self._modulefiles:
#We haven't tried to parse this file yet
self.parse(self._pathfiles[current], dependencies and recursive)
found = module_name in self.modules
i += 1 |
<SYSTEM_TASK:>
Determines which valid fortran files reside in the base path.
<END_TASK>
<USER_TASK:>
Description:
def scan_path(self, path, result = None):
"""Determines which valid fortran files reside in the base path.
:arg path: the path to the folder to list f90 files in.
:arg result: an optional dictionary to add results to in addition
to populating the private member dictionary of the parser.
""" |
files = []
#Find all the files in the directory
for (dirpath, dirnames, filenames) in self.tramp.walk(path):
files.extend(filenames)
break
#Check if the .fpyignore file exists in the folder.
patterns = [".#*"]
if ".fpyignore" in files:
for line in self.tramp.read(os.path.join(path, ".fpyignore")).split('\n'):
sline = line.strip()
if len(sline) > 0 and sline[0] != '#':
patterns.append(sline)
#Filter them to find the fortran code files
from fnmatch import fnmatch
for fname in files:
if fnmatch(fname, "*.f90"):
if all([not fnmatch(fname, p) for p in patterns]):
self._pathfiles[fname.lower()] = os.path.join(path, fname)
if result is not None:
result[fname.lower()] = os.path.join(path, fname) |
<SYSTEM_TASK:>
Finds the code element corresponding to specified symbol
<END_TASK>
<USER_TASK:>
Description:
def tree_find(self, symbol, origin, attribute):
"""Finds the code element corresponding to specified symbol
by searching all modules in the parser.
:arg symbol: the name of the code element to find.
:arg origin: an instance of a Module element who owns the text
that is generate the find search.
:arg attribute: one of ['dependencies', 'publics', 'members',
'types', 'executables', 'interfaces'] that specifies which collection
in the module should house the symbol's element.
""" |
#The symbol must be accessible to the origin module, otherwise
#it wouldn't compile. Start there, first looking at the origin
#itself and then the other modules that it depends on.
#Since we will be referring to this multiple times, might as
#well get a pointer to it.
oattr = origin.collection(attribute)
base = None
lorigin = None
if symbol in oattr:
base = oattr[symbol]
lorigin = origin
else:
for module in origin.dependencies:
usespec = module.split(".")
if len(usespec) > 1:
if usespec[1] == symbol:
#The dependency is to a specific element in the module,
#and it matches.
lorigin = self.get(usespec[0])
else:
lorigin = None
else:
#The dependency is to the entire module!
lorigin = self.get(usespec[0])
#If we have code for the origin, we can search for the
#actual base object that we are interested in
if lorigin is not None:
lattr = lorigin.collection(attribute)
if symbol in lattr:
base = lattr[symbol]
break
#By now, we either have the item we were after or we don't have
#code for the module it needs
return (base, lorigin) |
<SYSTEM_TASK:>
Gets the executable corresponding to the specified full name.
<END_TASK>
<USER_TASK:>
Description:
def get_executable(self, fullname):
"""Gets the executable corresponding to the specified full name.
:arg fullname: a string with modulename.executable.
""" |
result = None
modname, exname = fullname.split(".")
module = self.get(modname)
if module is not None:
if exname in module.executables:
result = module.executables[exname]
return result |
<SYSTEM_TASK:>
Gets the interface corresponding to the specified full name.
<END_TASK>
<USER_TASK:>
Description:
def get_interface(self, fullname):
"""Gets the interface corresponding to the specified full name.
:arg fullname: a string with modulename.interface.
""" |
result = None
[modname, iname] = fullname.split(".")
module = self.get(modname)
if module is not None:
if iname in module.interfaces:
result = module.interfaces[iname]
return result |
<SYSTEM_TASK:>
Gets the module with the given name if it exists in
<END_TASK>
<USER_TASK:>
Description:
def get(self, name):
"""Gets the module with the given name if it exists in
this code parser.""" |
if name not in self.modules:
self.load_dependency(name, False, False, False)
if name in self.modules:
return self.modules[name]
else:
return None |
<SYSTEM_TASK:>
Fetch a Bucket for the given key.
<END_TASK>
<USER_TASK:>
Description:
def get_bucket(self, key, rate=None, capacity=None, **kwargs):
"""Fetch a Bucket for the given key.
rate and capacity might be overridden from the Throttler defaults.
Args:
rate (float): Units regenerated by second, or None to keep
Throttler defaults
capacity (int): Maximum units available, or None to keep Throttler
defaults
""" |
return buckets.Bucket(
key=key,
rate=rate or self.rate,
capacity=capacity or self.capacity,
storate=self.storate,
**kwargs) |
<SYSTEM_TASK:>
Consume an amount for a given key.
<END_TASK>
<USER_TASK:>
Description:
def consume(self, key, amount=1, rate=None, capacity=None, **kwargs):
"""Consume an amount for a given key.
Non-default rate/capacity can be given to override Throttler defaults.
Returns:
bool: whether the units could be consumed
""" |
bucket = self.get_bucket(key, rate, capacity, **kwargs)
return bucket.consume(amount) |
<SYSTEM_TASK:>
Consume an amount for a given key, or raise a Throttled exception.
<END_TASK>
<USER_TASK:>
Description:
def throttle(self, key, amount=1, rate=None, capacity=None,
exc_class=Throttled, **kwargs):
"""Consume an amount for a given key, or raise a Throttled exception.""" |
if not self.consume(key, amount, rate, capacity, **kwargs):
raise exc_class("Request of %d unit for %s exceeds capacity."
% (amount, key)) |
<SYSTEM_TASK:>
Leak the adequate amount of data from the bucket.
<END_TASK>
<USER_TASK:>
Description:
def leak(self):
"""Leak the adequate amount of data from the bucket.
This should be called before any consumption takes place.
Returns:
int: the new capacity of the bucket
""" |
capacity, last_leak = self.storage.mget(self.key_amount, self.key_last_leak,
coherent=True)
now = time.time()
if last_leak:
elapsed = now - last_leak
decrement = elapsed * self.rate
new_capacity = max(int(capacity - decrement), 0)
else:
new_capacity = 0
self.storage.mset({
self.key_amount: new_capacity,
self.key_last_leak: now,
})
return new_capacity |
<SYSTEM_TASK:>
Consume one or more units from the bucket.
<END_TASK>
<USER_TASK:>
Description:
def consume(self, amount=1):
"""Consume one or more units from the bucket.""" |
# First, cleanup old stock
current = self.leak()
if current + amount > self.capacity:
return False
self._incr(amount)
return True |
<SYSTEM_TASK:>
Register a converter
<END_TASK>
<USER_TASK:>
Description:
def register(self, entry_point):
"""Register a converter
:param string entry_point: converter to register (entry point syntax)
:raise: ValueError if already registered
""" |
if entry_point in self.registered_converters:
raise ValueError('Already registered')
self.registered_converters.insert(0, entry_point) |
<SYSTEM_TASK:>
Parses all the modules in the library specified by the script args.
<END_TASK>
<USER_TASK:>
Description:
def parse():
"""Parses all the modules in the library specified by the script args.
""" |
from fortpy.code import CodeParser
c = CodeParser()
if args["verbose"]:
c.verbose = True
f90files = {}
c.scan_path(args["source"], f90files)
for fname, fpath in f90files.items():
if fname not in c._modulefiles:
c._modulefiles[fname] = []
c._parse_from_file(fpath, fname, args["recursive"], args["recursive"], False)
return c |
<SYSTEM_TASK:>
Updates the documentation for the specified interface using the module predocs.
<END_TASK>
<USER_TASK:>
Description:
def update_docs(self, iface, module):
"""Updates the documentation for the specified interface using the module predocs.""" |
#We need to look in the parent module docstrings for this types decorating tags.
key = "{}.{}".format(module.name, iface.name)
if key in module.predocs:
iface.docstring = self.docparser.to_doc(module.predocs[key][0], iface.name)
iface.docstart, iface.docend = (module.predocs[key][1], module.predocs[key][2]) |
<SYSTEM_TASK:>
Generates a makefile to create the unit testing executable
<END_TASK>
<USER_TASK:>
Description:
def makefile(identifier, dependencies, makepath, compileid,
precompile=False, inclfortpy=True, parser=None,
executable=True, extralinks=None, inclfpyaux=False,
makefpyaux=False, verbose=False):
"""Generates a makefile to create the unit testing executable
for the specified test identifier.
:arg identifier: the id of the test/library that this makefile should be made for.
:arg dependencies: a list of the module names that need to be included in the compilation.
:arg makepath: the path to the file to save the Makefile in.
:arg compileid: the 'module.executable' that this Makefile is being produced for.
:arg precompile: when True, the precompiler flags will be added to the makefile.
:arg inclfortpy: when True, the fortpy module will be added first to the list of modules
to compile for the executable/library.
:arg parser: if the module file names are different from the module names, specify a
code parser to use for converting one to the other.
:arg executable: when true and executable is compiled for rule 'all', else the library
is the default and the executable is set as a different rule for 'identifier'.x.
:arg extralinks: a list of additional libraries to link in with the explicitly compiled
f90 files. These aren't checked at all, just added to the linklist.
:arg verbose: when True, the full compilation header will be printed with flags and module
information; otherwise it won't.
""" |
lines = []
#Append the general variables
lines.append("EXENAME\t\t= {}.x".format(identifier))
lines.append("SHELL\t\t= /bin/bash")
lines.append("UNAME\t\t= $(shell uname)")
lines.append("HOSTNAME\t= $(shell hostname)")
lines.append("LOG\t\t= compile.{}.log".format(identifier if identifier is not None else "default"))
lines.append("")
#Now the standard entries for ifort. We will just have the ifort include
#file so that the MPI and other options can be tested to.
lines.append(_make_compiler_include(precompile, extralinks))
lines.append(".SILENT:")
lines.append("")
#Append all the dependent modules to the makefile
lines.append("LIBMODULESF90\t= \\")
for modk in dependencies:
if modk not in ["fortpy", "fpy_auxiliary", identifier]:
if parser is not None:
lines.append("\t\t{} \\".format(_get_mapping(parser, modk)))
else:
lines.append("\t\t{} \\".format(modk))
if makefpyaux:
lines.append("\t\tfpy_auxiliary.f90 \\")
lines.append("")
lines.append("MAINF90\t\t= {}.f90".format(identifier))
lines.append("SRCF90\t\t= $(LIBMODULESF90) $(MAINF90)")
lines.append("OBJSF90\t\t= $(SRCF90:.f90=.o)")
lines.append("SLIBF90\t\t= $(LIBMODULESF90:.f90=.o)")
lines.append("")
#Add explicitly defined libraries that should be included when linking
#the unit testing executable.
linklibs = True
_add_explicit_includes(lines, dependencies, extralinks)
if inclfortpy or inclfpyaux:
import sys
if len(sys.modules["config"].includes) == 0:
lines.append("LIBS\t\t= \\")
if inclfortpy:
lines.append("\t\tfortpy.o \\")
if inclfpyaux:
lines.append("\t\tfpy_aux.so \\")
lines.append("")
#We need to add the error handling commands to make debugging compiling easier.
lines.append(_make_error())
lines.append("")
main = "$(EXENAME)" if executable == True else "{}.{}".format(identifier, executable)
lines.append("all: info {}".format(main))
lines.append(_make_info(compileid, verbose))
lines.append(_make_exe(linklibs, identifier, verbose))
from os import path
makedir, makef = path.split(makepath)
lines[-1] += " make -f '{}'".format(makef)
with open(makepath, 'w') as f:
f.writelines("\n".join(lines)) |
<SYSTEM_TASK:>
Adds any relevant libraries that need to be explicitly included according
<END_TASK>
<USER_TASK:>
Description:
def _add_explicit_includes(lines, dependencies=None, extralinks=None):
"""Adds any relevant libraries that need to be explicitly included according
to the fortpy configuration file. Libraries are appended to the specified
collection of lines. Returns true if relevant libraries were added.
""" |
from fortpy import config
import sys
from os import path
includes = sys.modules["config"].includes
linklibs = False
if extralinks is not None and len(extralinks) > 0:
for i, link in enumerate(extralinks):
lines.append("LBD{0:d} = {1}".format(i, link))
lines.append("")
if len(includes) > 0:
lines.append("LIBS\t\t= \\")
for library in includes:
addlib = False
if "modules" in library:
#We need to loop over the modules specified for the library and see
#if any of them are in our list of modules.
for libmod in library["modules"]:
if dependencies is None or libmod.lower() in dependencies:
addlib = True
break
else:
addlib = True
if addlib:
linklibs = True
lines.append("\t\t{} \\".format(library["path"]))
#These links specify explicit libraries to include in the final compilation.
if extralinks is not None:
for i in range(len(extralinks)):
if path.isfile(extralinks[i]):
lines.append("\t\t$(LBD{0:d}) \\".format(i))
return linklibs or (extralinks is not None and len(extralinks) > 0) |
<SYSTEM_TASK:>
Gets the original file name for a module that was mapped
<END_TASK>
<USER_TASK:>
Description:
def _get_mapping(parser, mapped):
"""Gets the original file name for a module that was mapped
when the module name does not coincide with the file name
that the module was defined in.""" |
if mapped in parser.mappings:
return parser.mappings[mapped]
else:
return mapped + ".f90" |
<SYSTEM_TASK:>
Patches the config with the given overrides.
<END_TASK>
<USER_TASK:>
Description:
def patch(self, overrides):
"""
Patches the config with the given overrides.
Example:
If the current dictionary looks like this:
a: 1,
b: {
c: 3,
d: 4
}
and `patch` is called with the following overrides:
b: {
d: 2,
e: 4
},
c: 5
then, the following will be the resulting dictionary:
a: 1,
b: {
c: 3,
d: 2,
e: 4
},
c: 5
""" |
overrides = overrides or {}
for key, value in iteritems(overrides):
current = self.get(key)
if isinstance(value, dict) and isinstance(current, dict):
current.patch(value)
else:
self[key] = value |
<SYSTEM_TASK:>
Pulls controller information.
<END_TASK>
<USER_TASK:>
Description:
def update_controller_info(self):
"""
Pulls controller information.
:returns: True if successfull, otherwise False.
:rtype: boolean
""" |
# Read the controller information.
self.controller_info = customer_details(self._user_token)
self.controller_status = status_schedule(self._user_token)
if self.controller_info is None or self.controller_status is None:
return False
# Only supports one controller right now.
# Use the first one from the array.
self.current_controller = self.controller_info['controllers'][0]
self.status = self.current_controller['status']
self.controller_id = self.current_controller['controller_id']
self.customer_id = self.controller_info['customer_id']
self.user_id = self.controller_status['user_id']
self.num_relays = len(self.controller_status['relays'])
self.relays = self.controller_status['relays']
self.name = self.controller_status['name']
self.watering_time = self.controller_status['watering_time']
self.sensors = self.controller_status['sensors']
try:
self.running = self.controller_status['running']
except KeyError:
self.running = None
return True |
<SYSTEM_TASK:>
Check if multiple controllers are connected.
<END_TASK>
<USER_TASK:>
Description:
def controller(self):
"""
Check if multiple controllers are connected.
:returns: Return the controller_id of the active controller.
:rtype: string
""" |
if hasattr(self, 'controller_id'):
if len(self.controller_info['controllers']) > 1:
raise TypeError(
'Only one controller per account is supported.'
)
return self.controller_id
raise AttributeError('No controllers assigned to this account.') |
<SYSTEM_TASK:>
Return information about a relay.
<END_TASK>
<USER_TASK:>
Description:
def relay_info(self, relay, attribute=None):
"""
Return information about a relay.
:param relay: The relay being queried.
:type relay: int
:param attribute: The attribute being queried, or all attributes for
that relay if None is specified.
:type attribute: string or None
:returns: The attribute being queried or None if not found.
:rtype: string or int
""" |
# Check if the relay number is valid.
if (relay < 0) or (relay > (self.num_relays - 1)):
# Invalid relay index specified.
return None
else:
if attribute is None:
# Return all the relay attributes.
return self.relays[relay]
else:
try:
return self.relays[relay][attribute]
except KeyError:
# Invalid key specified.
return None |
<SYSTEM_TASK:>
Suspend or unsuspend a zone or all zones for an amount of time.
<END_TASK>
<USER_TASK:>
Description:
def suspend_zone(self, days, zone=None):
"""
Suspend or unsuspend a zone or all zones for an amount of time.
:param days: Number of days to suspend the zone(s)
:type days: int
:param zone: The zone to suspend. If no zone is specified then suspend
all zones
:type zone: int or None
:returns: The response from set_zones() or None if there was an error.
:rtype: None or string
""" |
if zone is None:
zone_cmd = 'suspendall'
relay_id = None
else:
if zone < 0 or zone > (len(self.relays) - 1):
return None
else:
zone_cmd = 'suspend'
relay_id = self.relays[zone]['relay_id']
# If days is 0 then remove suspension
if days <= 0:
time_cmd = 0
else:
# 1 day = 60 * 60 * 24 seconds = 86400
time_cmd = time.mktime(time.localtime()) + (days * 86400)
return set_zones(self._user_token, zone_cmd, relay_id, time_cmd) |
<SYSTEM_TASK:>
Run or stop a zone or all zones for an amount of time.
<END_TASK>
<USER_TASK:>
Description:
def run_zone(self, minutes, zone=None):
"""
Run or stop a zone or all zones for an amount of time.
:param minutes: The number of minutes to run.
:type minutes: int
:param zone: The zone number to run. If no zone is specified then run
all zones.
:type zone: int or None
:returns: The response from set_zones() or None if there was an error.
:rtype: None or string
""" |
if zone is None:
zone_cmd = 'runall'
relay_id = None
else:
if zone < 0 or zone > (len(self.relays) - 1):
return None
else:
zone_cmd = 'run'
relay_id = self.relays[zone]['relay_id']
if minutes <= 0:
time_cmd = 0
if zone is None:
zone_cmd = 'stopall'
else:
zone_cmd = 'stop'
else:
time_cmd = minutes * 60
return set_zones(self._user_token, zone_cmd, relay_id, time_cmd) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.