text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Find patterns of exceptions in a file.
<END_TASK>
<USER_TASK:>
Description:
def findPatternsInFile(codes, patternFinder):
"""
Find patterns of exceptions in a file.
@param codes: code of the file to check
@param patternFinder: a visitor for pattern checking and save results
""" |
tree = ast.parse(codes)
patternFinder.visit(tree) |
<SYSTEM_TASK:>
Find patterns of exceptions in a file or folder.
<END_TASK>
<USER_TASK:>
Description:
def findAllExceptions(pathToCheck):
"""
Find patterns of exceptions in a file or folder.
@param patternFinder: a visitor for pattern checking and save results
@return: patterns of special functions and classes
""" |
finder = PatternFinder()
if os.path.isfile(pathToCheck):
with open(pathToCheck) as f:
findPatternsInFile(f.read(), finder)
else:
for path, dirs, files in os.walk(pathToCheck):
for file in files:
_, extname = os.path.splitext(file)
if extname == ".py":
pathFile = os.path.join(path, file)
with open(pathFile) as f:
findPatternsInFile(f.read(), finder)
return finder.patternsFunc, finder.patternsClass |
<SYSTEM_TASK:>
Be invoked when visiting a node of function call.
<END_TASK>
<USER_TASK:>
Description:
def visit_Call(self, nodeCall):
"""
Be invoked when visiting a node of function call.
@param node: currently visiting node
""" |
super(PatternFinder, self).generic_visit(nodeCall)
# Capture assignment like 'f = getattr(...)'.
if hasattr(nodeCall.func, "func"):
# In this case, the statement should be
# 'f = getattr(...)()'.
nodeCall = nodeCall.func
# Make sure the function's name is 'getattr'.
if not hasattr(nodeCall.func, "id"):
return
if nodeCall.func.id != "getattr":
return
# Capture 'f = getattr(foo, "bar_%s" % baz )' or
# 'f = getattr(foo, "bar_" + baz )'.
nodeArgument = nodeCall.args[1]
if not isinstance(nodeArgument, ast.BinOp):
return
operation = nodeArgument.op
if type(operation) not in [ast.Mod, ast.Add]:
return
nodePattern = nodeArgument.left
if not isinstance(nodePattern, ast.Str):
return
pattern = nodePattern.s
if not ((type(operation) == ast.Add and pattern.endswith("_")) or
(pattern.count("%s") == 1 and pattern.endswith("_%s"))):
return
pattern = pattern.replace("%s", "")
if pattern[:1].isalpha() and not pattern[:1].islower():
self.patternsClass.add(pattern)
else:
self.patternsFunc.add(pattern) |
<SYSTEM_TASK:>
Returns a duplicate of this instance.
<END_TASK>
<USER_TASK:>
Description:
def copy(self):
"""
Returns a duplicate of this instance.
:return <Query>
""" |
options = {
'op': self.__op,
'caseSensitive': self.__caseSensitive,
'value': copy.copy(self.__value),
'inverted': self.__inverted,
'functions': copy.copy(self.__functions),
'math': copy.copy(self.__math)
}
return orb.Query(self.__model, self.__column, **options) |
<SYSTEM_TASK:>
Returns an inverted copy of this query.
<END_TASK>
<USER_TASK:>
Description:
def inverted(self):
"""
Returns an inverted copy of this query.
:return <orb.Query>
""" |
out = self.copy()
out.setInverted(not self.isInverted())
return out |
<SYSTEM_TASK:>
Creates a new Query object from the given JSON data.
<END_TASK>
<USER_TASK:>
Description:
def fromJSON(jdata):
"""
Creates a new Query object from the given JSON data.
:param jdata | <dict>
:return <orb.Query> || <orb.QueryCompound>
""" |
if jdata['type'] == 'compound':
queries = [orb.Query.fromJSON(jquery) for jquery in jdata['queries']]
out = orb.QueryCompound(*queries)
out.setOp(orb.QueryCompound.Op(jdata['op']))
return out
else:
if jdata.get('model'):
model = orb.schema.model(jdata.get('model'))
if not model:
raise orb.errors.ModelNotFound(schema=jdata.get('model'))
else:
column = (model, jdata['column'])
else:
column = (jdata['column'],)
query = orb.Query(*column)
query.setOp(orb.Query.Op(jdata.get('op', 'Is')))
query.setInverted(jdata.get('inverted', False))
query.setCaseSensitive(jdata.get('caseSensitive', False))
query.setValue(jdata.get('value'))
# restore the function information
for func in jdata.get('functions', []):
query.addFunction(orb.Query.Function(func))
# restore the math information
for entry in jdata.get('math', []):
query.addMath(orb.Query.Math(entry.get('op')), entry.get('value'))
return query |
<SYSTEM_TASK:>
Returns any columns used within this query.
<END_TASK>
<USER_TASK:>
Description:
def columns(self, model=None):
"""
Returns any columns used within this query.
:return [<orb.Column>, ..]
""" |
for query in self.__queries:
for column in query.columns(model=model):
yield column |
<SYSTEM_TASK:>
Expands any shortcuts that were created for this query. Shortcuts
<END_TASK>
<USER_TASK:>
Description:
def expand(self, model=None, ignoreFilter=False):
"""
Expands any shortcuts that were created for this query. Shortcuts
provide the user access to joined methods using the '.' accessor to
access individual columns for referenced tables.
:param model | <orb.Model>
:usage |>>> from orb import Query as Q
|>>> # lookup the 'username' of foreign key 'user'
|>>> Q('user.username') == 'bob.smith'
:return <orb.Query> || <orb.QueryCompound>
""" |
queries = []
current_records = None
for query in self.__queries:
sub_q = query.expand(model)
if not sub_q:
continue
# chain together joins into sub-queries
if ((isinstance(sub_q, orb.Query) and isinstance(sub_q.value(), orb.Query)) and
sub_q.value().model(model) != sub_q.model(model)):
sub_model = sub_q.value().model(model)
sub_col = sub_q.value().column()
new_records = sub_model.select(columns=[sub_col])
sub_q = sub_q.copy()
sub_q.setOp(sub_q.Op.IsIn)
sub_q.setValue(new_records)
if current_records is not None and current_records.model() == sub_q.model(model):
new_records = new_records.refine(createNew=False, where=sub_q)
else:
queries.append(sub_q)
current_records = new_records
# update the existing recordset in the chain
elif (current_records is not None and
(
(isinstance(sub_q, orb.Query) and current_records.model() == query.model(model)) or
(isinstance(sub_q, orb.QueryCompound) and current_records.model() in sub_q.models(model))
)):
current_records.refine(createNew=False, where=sub_q)
# clear out the chain and move on to the next query set
else:
current_records = None
queries.append(query)
return QueryCompound(*queries, op=self.op()) |
<SYSTEM_TASK:>
Negates this instance and returns it.
<END_TASK>
<USER_TASK:>
Description:
def negated(self):
"""
Negates this instance and returns it.
:return self
""" |
op = QueryCompound.Op.And if self.__op == QueryCompound.Op.Or else QueryCompound.Op.Or
return QueryCompound(*self.__queries, op=op) |
<SYSTEM_TASK:>
Creates a new compound query using the
<END_TASK>
<USER_TASK:>
Description:
def or_(self, other):
"""
Creates a new compound query using the
QueryCompound.Op.Or type.
:param other <Query> || <QueryCompound>
:return <QueryCompound>
:sa or_
:usage |>>> from orb import Query as Q
|>>> query = (Q('test') != 1).or_(Q('name') == 'Eric')
|>>> print query
|(test isNot 1 or name is Eric)
""" |
if not isinstance(other, (Query, QueryCompound)) or other.isNull():
return self.copy()
elif self.isNull():
return other.copy()
else:
# grow this if the operators are the same
if self.__op == QueryCompound.Op.And:
queries = list(self.__queries) + [other]
return QueryCompound(*queries, op=QueryCompound.Op.Or)
else:
return QueryCompound(self, other, op=QueryCompound.Op.Or) |
<SYSTEM_TASK:>
Returns the tables that this query is referencing.
<END_TASK>
<USER_TASK:>
Description:
def models(self, model=None):
"""
Returns the tables that this query is referencing.
:return [ <subclass of Table>, .. ]
""" |
for query in self.__queries:
if isinstance(query, orb.Query):
yield query.model(model)
else:
for model in query.models(model):
yield model |
<SYSTEM_TASK:>
Returns the handlers defined on the static_maps.yml file located
<END_TASK>
<USER_TASK:>
Description:
def get_handlers(self):
""" Returns the handlers defined on the static_maps.yml file located
at the app config directory.
Returns: An array of static handlers to be added to the app.
""" |
handlers = []
self.static_root = self.application.get_app_component(
).get_component_path()
if self.conf:
if 'maps' in self.conf:
if self.conf['maps'] is None:
logger.warning("Maps configuration is empty. Finish the"
"static maps configuration.")
return handlers
for map_item in self.conf['maps']:
logger.debug("Mapping %s handlers." % map_item['name'])
self.static_maps[map_item['name']] = {}
self.static_maps[
map_item['name']]['root'] = self.static_root
if 'root' in map_item:
if os.path.isabs(map_item['root']):
self.static_maps[
map_item['name']]['root'] = map_item['root']
else:
self.static_maps[
map_item['name']]['root'] = os.path.abspath(
os.path.join(self.static_root,
map_item['root']))
if 'handlers' in map_item:
if map_item['handlers'] is None:
logger.warning("There is no handles mapped in the"
" static maps config file.")
else:
handlers = handlers + self.get_static_handlers(
map_item)
else:
logger.warning("No static maps configurations were provided.")
return handlers |
<SYSTEM_TASK:>
Called when if a binary operation is found.
<END_TASK>
<USER_TASK:>
Description:
def visit_binop(self, node):
"""
Called when if a binary operation is found.
Only check for string formatting operations.
@param node: currently checking node
""" |
if node.op != "%":
return
pattern = node.left.as_string()
# If the pattern's not a constant string, we don't know whether a
# dictionary or a tuple makes sense, so don't try to guess.
if not pattern.startswith("'") or pattern.startswith('"'):
return
# If the pattern has things like %(foo)s, then the values can't be a
# tuple, so don't check for it.
if "%(" in pattern:
return
valueString = node.right.as_string()
tupleUsed = valueString.startswith('(')
if tupleUsed:
return
self.add_message('W9501', node=node) |
<SYSTEM_TASK:>
A interface will be called when visiting a function or a method.
<END_TASK>
<USER_TASK:>
Description:
def visit_functiondef(self, node):
"""
A interface will be called when visiting a function or a method.
@param node: the current node
""" |
if not node.is_method():
# We only check methods.
return
name = node.name
if isTestModule(node.root().name):
if name.startswith('test'):
if not name.startswith('test_'):
self.add_message('C9303', node=node)
return
else:
# Test names start with 'test_NAME' and can be like
# test_SOME_NAME or test_render_SomeCondition.
return
if name[0].isupper():
self.add_message('C9302', node=node)
return
if name.startswith('___'):
self.add_message('C9302', node=node)
return
if name.endswith('___'):
self.add_message('C9302', node=node)
return
if name.startswith('__'):
if name.endswith('___'):
# To many trailing underscores.
self.add_message('C9302', node=node)
return
if name.endswith('_') and not name.endswith('__'):
# To few trailing underscored
self.add_message('C9302', node=node)
return
if name.endswith('__'):
# This is a reserved name and we don't do any checks on it.
return
name = name[2:-2]
if name.startswith('_'):
name = name[1:]
if name.endswith('_'):
self.add_message('C9302', node=node)
return
if '_' in name:
# This has a underscore in the main name.
prefix = self._getMethodNamePrefix(node)
if prefix:
# There are other names with same prefix so this should be
# a dispatched method.
return
self.add_message('C9302', node=node)
return
if isTestModule(node.name) and self.moduleContainsTestCase(node):
self._checkTestMethodName(node) |
<SYSTEM_TASK:>
Return the prefix of this method based on sibling methods.
<END_TASK>
<USER_TASK:>
Description:
def _getMethodNamePrefix(self, node):
"""
Return the prefix of this method based on sibling methods.
@param node: the current node
""" |
targetName = node.name
for sibling in node.parent.nodes_of_class(type(node)):
if sibling is node:
# We are on the same node in parent so we skip it.
continue
prefix = self._getCommonStart(targetName, sibling.name)
if not prefix.rstrip('_'):
# We ignore prefixes which are just underscores.
continue
return prefix
return '' |
<SYSTEM_TASK:>
Return the common prefix of the 2 strings.
<END_TASK>
<USER_TASK:>
Description:
def _getCommonStart(self, left, right):
"""
Return the common prefix of the 2 strings.
@param left: one string
@param right: another string
""" |
prefix = []
for a, b in zip(left, right):
if a == b:
prefix.append(a)
else:
break
return ''.join(prefix) |
<SYSTEM_TASK:>
Given a username and a raw, unhashed password, get the corresponding
<END_TASK>
<USER_TASK:>
Description:
def get_user_by_password(self, username, password):
"""
Given a username and a raw, unhashed password, get the corresponding
user, retuns None if no match is found.
""" |
try:
user = self.get(username=username)
except User.DoesNotExist:
return None
if bcrypt.hashpw(password, user.pass_hash) == user.pass_hash:
return user
else:
return None |
<SYSTEM_TASK:>
Middleware class factory that redirects if the user is not logged in.
<END_TASK>
<USER_TASK:>
Description:
def AuthenticatedOrRedirect(invocation):
"""
Middleware class factory that redirects if the user is not logged in.
Otherwise, nothing is effected.
""" |
class AuthenticatedOrRedirect(GiottoInputMiddleware):
def http(self, request):
if request.user:
return request
return Redirection(invocation)
def cmd(self, request):
if request.user:
return request
return Redirection(invocation)
return AuthenticatedOrRedirect |
<SYSTEM_TASK:>
Defines a decorator that can be used to filter
<END_TASK>
<USER_TASK:>
Description:
def queryFilter(self, function=None):
"""
Defines a decorator that can be used to filter
queries. It will assume the function being associated
with the decorator will take a query as an input and
return a modified query to use.
:usage
class MyModel(orb.Model):
objects = orb.ReverseLookup('Object')
@classmethod
@objects.queryFilter()
def objectsFilter(cls, query, **context):
return orb.Query()
:param function: <callable>
:return: <wrapper>
""" |
if function is not None:
self.__query_filter = function
return function
def wrapper(func):
self.__query_filter = func
return func
return wrapper |
<SYSTEM_TASK:>
Decorator factory for creating callables for native functions.
<END_TASK>
<USER_TASK:>
Description:
def Function(
library: CDLL,
name_or_ordinal: 'Union[str, int, None]'=None,
proto_factory: ('Union[ctypes.CFUNCTYPE, ctypes.WINFUNCTYPE,'
' ctypes.PYFUNCTYPE]')=CFUNCTYPE,
use_errno: bool=False,
use_last_error: bool=False,
) -> 'Callable':
"""
Decorator factory for creating callables for native functions.
Decorator factory for constructing relatively-nicely-looking callables that
call into existing native functions exposed from a dynamically-linkable
library.
:param library:
The library to look at
:param name_or_ordinal:
Typically the name of the symbol to load from the library. In rare
cases it may also be the index of the function inside the library.
:param proto_factory:
The prototype factory.
:param use_last_error:
Passed directly to the prototype factory.
:param use_last_error:
Passed directly to the prototype factory.
:returns:
A decorator for a function with particular, special annotations.
.. note::
Since nested functions have hard-to-reach documentation, the
documentation of the function returned from ``native()`` is documented
below.
""" |
def decorator(fn: 'Callable') -> 'Callable':
metadata = _ctypes_metadata(fn)
prototype = proto_factory(
metadata.restype, *metadata.argtypes,
use_errno=use_errno, use_last_error=use_last_error)
func_spec = (name_or_ordinal or fn.__name__, library)
return prototype(func_spec, metadata.paramflags)
return decorator |
<SYSTEM_TASK:>
Lookup the name of a given vendor.
<END_TASK>
<USER_TASK:>
Description:
def lookup_vendor_name(self, vendor_id):
"""
Lookup the name of a given vendor.
:param vendor_id:
PCI vendor identifier
:ptype vendor_id:
int
:returns:
Name of the PCI vendor.
.. note::
Lookup respects various flag properties that impact the behavior
in case the name cannot be found in the local database. Refer to
the documentation of each of the ``flag_`` properties.
""" |
buf = ctypes.create_string_buffer(1024)
_logger.debug("Performing the lookup on vendor %#06x", vendor_id)
flags = self._flags | pci_lookup_mode.PCI_LOOKUP_VENDOR
pci_lookup_name1(self._access, buf, ctypes.sizeof(buf), flags,
vendor_id)
return buf.value.decode("utf-8") |
<SYSTEM_TASK:>
Lookup the name of a given device.
<END_TASK>
<USER_TASK:>
Description:
def lookup_device_name(self, vendor_id, device_id):
"""
Lookup the name of a given device.
:param vendor_id:
PCI vendor identifier
:ptype vendor_id:
int
:param device_id:
PCI device identifier
:ptype device_id:
int
:returns:
Name of the PCI device.
.. note::
Lookup respects various flag properties that impact the behavior
in case the name cannot be found in the local database. Refer to
the documentation of each of the ``flag_`` properties.
""" |
buf = ctypes.create_string_buffer(1024)
_logger.debug("Performing the lookup on vendor:device %#06x:%#06x",
vendor_id, device_id)
flags = self._flags | pci_lookup_mode.PCI_LOOKUP_DEVICE
pci_lookup_name2(self._access, buf, ctypes.sizeof(buf), flags,
vendor_id, device_id)
return buf.value.decode("utf-8") |
<SYSTEM_TASK:>
Lookup the name of a given subsystem device.
<END_TASK>
<USER_TASK:>
Description:
def lookup_subsystem_device_name(
self, vendor_id, device_id, subvendor_id, subdevice_id):
"""
Lookup the name of a given subsystem device.
:param vendor_id:
PCI vendor identifier
:ptype vendor_id:
int
:param device_id:
PCI device identifier
:ptype device_id:
int
:param subvendor_id:
PCI subvendor identifier
:ptype subvendor_id:
int
:param device_id:
PCI subdevice identifier
:ptype subdevice_id:
int
:returns:
Name of the PCI subsystem device.
.. note::
Lookup respects various flag properties that impact the behavior
in case the name cannot be found in the local database. Refer to
the documentation of each of the ``flag_`` properties.
""" |
buf = ctypes.create_string_buffer(1024)
_logger.debug("Performing the lookup on vendor:device "
"subvendor:subdevice %#06x:%#06x %#06x:%#06x",
vendor_id, device_id, subvendor_id, subdevice_id)
flags = self._flags | pci_lookup_mode.PCI_LOOKUP_SUBSYSTEM
flags = self._flags | pci_lookup_mode.PCI_LOOKUP_DEVICE
pci_lookup_name4(self._access, buf, ctypes.sizeof(buf), flags,
vendor_id, device_id, subvendor_id, subdevice_id)
return buf.value.decode("utf-8") |
<SYSTEM_TASK:>
Since werkzeug request objects are immutable, this is needed to create an
<END_TASK>
<USER_TASK:>
Description:
def make_duplicate_request(request):
"""
Since werkzeug request objects are immutable, this is needed to create an
identical reuet object with immutable values so it can be retried after a
POST failure.
""" |
class FakeRequest(object):
method = 'GET'
path = request.path
headers = request.headers
GET = request.GET
POST = request.POST
user = getattr(request, 'user', None)
cookies = request.cookies
is_xhr = request.is_xhr
return FakeRequest() |
<SYSTEM_TASK:>
WGSI middleware for catching errors and rendering the error page.
<END_TASK>
<USER_TASK:>
Description:
def fancy_error_template_middleware(app):
"""
WGSI middleware for catching errors and rendering the error page.
""" |
def application(environ, start_response):
try:
return app(environ, start_response)
except Exception as exc:
sio = StringIO()
traceback.print_exc(file=sio)
sio.seek(0)
response = Response(
status=500,
body=render_error_page(500, exc, traceback=sio.read()),
content_type="text/html"
)
return response(environ, start_response)
return application |
<SYSTEM_TASK:>
Checks if the user browser from the given user agent is mobile.
<END_TASK>
<USER_TASK:>
Description:
def is_mobile(user_agent):
""" Checks if the user browser from the given user agent is mobile.
Args:
user_agent: A given user agent.
Returns: True if the browser from the user agent is mobile.
""" |
if user_agent:
b = reg_b.search(user_agent)
v = reg_v.search(user_agent[0:4])
return b or v
return False |
<SYSTEM_TASK:>
Goes through the expand options associated with this context and
<END_TASK>
<USER_TASK:>
Description:
def expandtree(self, model=None):
"""
Goes through the expand options associated with this context and
returns a trie of data.
:param model: subclass of <orb.Model> || None
:return: <dict>
""" |
if model and not self.columns:
schema = model.schema()
defaults = schema.columns(flags=orb.Column.Flags.AutoExpand).keys()
defaults += schema.collectors(flags=orb.Collector.Flags.AutoExpand).keys()
else:
defaults = []
expand = self.expand or defaults
if not expand:
return {}
def build_tree(parts, tree):
tree.setdefault(parts[0], {})
if len(parts) > 1:
build_tree(parts[1:], tree[parts[0]])
tree = {}
for branch in expand:
build_tree(branch.split('.'), tree)
return tree |
<SYSTEM_TASK:>
Returns whether or not this option set has been modified.
<END_TASK>
<USER_TASK:>
Description:
def isNull(self):
"""
Returns whether or not this option set has been modified.
:return <bool>
""" |
check = self.raw_values.copy()
scope = check.pop('scope', {})
return len(check) == 0 and len(scope) == 0 |
<SYSTEM_TASK:>
Create a module directory structure into the target directory.
<END_TASK>
<USER_TASK:>
Description:
def create_module(module, target):
""" Create a module directory structure into the target directory. """ |
module_x = module.split('.')
cur_path = ''
for path in module_x:
cur_path = os.path.join(cur_path, path)
if not os.path.isdir(os.path.join(target, cur_path)):
os.mkdir(os.path.join(target, cur_path))
if not os.path.exists(os.path.join(target, cur_path, '__init__.py')):
touch(os.path.join(target, cur_path, '__init__.py'))
return cur_path |
<SYSTEM_TASK:>
Return the extension if the filename has it. None if not.
<END_TASK>
<USER_TASK:>
Description:
def get_file_extension(filename):
""" Return the extension if the filename has it. None if not.
:param filename: The filename.
:return: Extension or None.
""" |
filename_x = filename.split('.')
if len(filename_x) > 1:
if filename_x[-1].strip() is not '':
return filename_x[-1]
return None |
<SYSTEM_TASK:>
Writes a given data to a file located at the given path.
<END_TASK>
<USER_TASK:>
Description:
def write(path, data, binary=False):
""" Writes a given data to a file located at the given path. """ |
mode = "w"
if binary:
mode = "wb"
with open(path, mode) as f:
f.write(data)
f.close() |
<SYSTEM_TASK:>
Reads a file located at the given path.
<END_TASK>
<USER_TASK:>
Description:
def read(path):
""" Reads a file located at the given path. """ |
data = None
with open(path, 'r') as f:
data = f.read()
f.close()
return data |
<SYSTEM_TASK:>
Creates a file located at the given path.
<END_TASK>
<USER_TASK:>
Description:
def touch(path):
""" Creates a file located at the given path. """ |
with open(path, 'a') as f:
os.utime(path, None)
f.close() |
<SYSTEM_TASK:>
Loads JSON data for this column type.
<END_TASK>
<USER_TASK:>
Description:
def loadJSON(self, jdata):
"""
Loads JSON data for this column type.
:param jdata: <dict>
""" |
super(StringColumn, self).loadJSON(jdata)
# load additional info
self.__maxLength = jdata.get('maxLength') or self.__maxLength |
<SYSTEM_TASK:>
Validates the value provided is a valid email address,
<END_TASK>
<USER_TASK:>
Description:
def validate(self, value):
"""
Validates the value provided is a valid email address,
at least, on paper.
:param value: <str>
:return: <bool>
""" |
if isinstance(value, (str, unicode)) and not re.match(self.__pattern, value):
raise orb.errors.ColumnValidationError(self, 'The email provided is not valid.')
else:
return super(EmailColumn, self).validate(value) |
<SYSTEM_TASK:>
Returns the rules for this password based on the configured
<END_TASK>
<USER_TASK:>
Description:
def rules(self):
"""
Returns the rules for this password based on the configured
options.
:return: <str>
""" |
rules = ['Passwords need to be at least {0} characters long'.format(self.__minlength)]
if self.__requireUppercase:
rules.append('have at least one uppercase letter')
if self.__requireLowercase:
rules.append('have at least one lowercase letter')
if self.__requireNumber:
rules.append('have at least one number')
if self.__requireWildcard:
rules.append('have at least one non alpha-number character')
if len(rules) == 1:
return rules[0]
else:
return ', '.join(rules[:-1]) + ' and ' + rules[-1] |
<SYSTEM_TASK:>
Generates a new token for this column based on its bit length. This method
<END_TASK>
<USER_TASK:>
Description:
def generate(self):
"""
Generates a new token for this column based on its bit length. This method
will not ensure uniqueness in the model itself, that should be checked against
the model records in the database first.
:return: <str>
""" |
try:
model = self.schema().model()
except AttributeError:
return os.urandom(self.__bits).encode('hex')
else:
while True:
token = os.urandom(self.__bits).encode('hex')
if model.select(where=orb.Query(self) == token).count() == 0:
return token |
<SYSTEM_TASK:>
Start monitoring for hanging threads.
<END_TASK>
<USER_TASK:>
Description:
def start_monitoring(seconds_frozen=SECONDS_FROZEN,
test_interval=TEST_INTERVAL):
"""Start monitoring for hanging threads.
seconds_frozen - How much time should thread hang to activate
printing stack trace - default(10)
tests_interval - Sleep time of monitoring thread (in milliseconds)
- default(100)
""" |
thread = StoppableThread(target=monitor, args=(seconds_frozen,
test_interval))
thread.daemon = True
thread.start()
return thread |
<SYSTEM_TASK:>
Monitoring thread function.
<END_TASK>
<USER_TASK:>
Description:
def monitor(seconds_frozen, test_interval):
"""Monitoring thread function.
Checks if thread is hanging for time defined by
``seconds_frozen`` parameter every ``test_interval`` milliseconds.
""" |
current_thread = threading.current_thread()
hanging_threads = set()
old_threads = {} # Threads found on previous iteration.
while not current_thread.is_stopped():
new_threads = get_current_frames()
# Report died threads.
for thread_id in old_threads.keys():
if thread_id not in new_threads and thread_id in hanging_threads:
log_died_thread(thread_id)
# Process live threads.
time.sleep(test_interval/1000.)
now = time.time()
then = now - seconds_frozen
for thread_id, thread_data in new_threads.items():
# Don't report the monitor thread.
if thread_id == current_thread.ident:
continue
frame = thread_data['frame']
# If thread is new or it's stack is changed then update time.
if (thread_id not in old_threads or
frame != old_threads[thread_id]['frame']):
thread_data['time'] = now
# If the thread was hanging then report awaked thread.
if thread_id in hanging_threads:
hanging_threads.remove(thread_id)
log_awaked_thread(thread_id)
else:
# If stack is not changed then keep old time.
last_change_time = old_threads[thread_id]['time']
thread_data['time'] = last_change_time
# Check if this is a new hanging thread.
if (thread_id not in hanging_threads and
last_change_time < then):
# Gotcha!
hanging_threads.add(thread_id)
# Report the hanged thread.
log_hanged_thread(thread_id, frame)
old_threads = new_threads |
<SYSTEM_TASK:>
Return current threads prepared for
<END_TASK>
<USER_TASK:>
Description:
def get_current_frames():
"""Return current threads prepared for
further processing.
""" |
return dict(
(thread_id, {'frame': thread2list(frame), 'time': None})
for thread_id, frame in sys._current_frames().items()
) |
<SYSTEM_TASK:>
Return info about frame.
<END_TASK>
<USER_TASK:>
Description:
def frame2string(frame):
"""Return info about frame.
Keyword arg:
frame
Return string in format:
File {file name}, line {line number}, in
{name of parent of code object} {newline}
Line from file at line number
""" |
lineno = frame.f_lineno # or f_lasti
co = frame.f_code
filename = co.co_filename
name = co.co_name
s = '\tFile "{0}", line {1}, in {2}'.format(filename, lineno, name)
line = linecache.getline(filename, lineno, frame.f_globals).lstrip()
return s + '\n\t\t' + line |
<SYSTEM_TASK:>
Return list with string frame representation of each frame of
<END_TASK>
<USER_TASK:>
Description:
def thread2list(frame):
"""Return list with string frame representation of each frame of
thread.
""" |
l = []
while frame:
l.insert(0, frame2string(frame))
frame = frame.f_back
return l |
<SYSTEM_TASK:>
Write formatted log message to stderr.
<END_TASK>
<USER_TASK:>
Description:
def write_log(title, message=''):
"""Write formatted log message to stderr.""" |
sys.stderr.write(''.join([
title.center(40).center(60, '-'), '\n', message
])) |
<SYSTEM_TASK:>
Request comes from the controller. Returned is a request.
<END_TASK>
<USER_TASK:>
Description:
def execute_input_middleware_stream(self, request, controller):
"""
Request comes from the controller. Returned is a request.
controller arg is the name of the controller.
""" |
start_request = request
# either 'http' or 'cmd' or 'irc'
controller_name = "".join(controller.get_controller_name().split('-')[:1])
middlewares = list(self.pre_input_middleware) + list(self.input_middleware)
for m in middlewares:
to_execute = getattr(m(controller), controller_name)
if to_execute:
result = to_execute(request)
if GiottoControl in type(result).mro():
# a middleware class returned a control object (redirection, et al.)
# ignore all other middleware classes
return request, result
request = result
return start_request, request |
<SYSTEM_TASK:>
This only gets caled internally from the get_suggestion method.
<END_TASK>
<USER_TASK:>
Description:
def _get_suggestions(self, filter_word=None):
"""
This only gets caled internally from the get_suggestion method.
""" |
keys = self.manifest.keys()
words = []
for key in keys:
if isinstance(self.manifest[key], Manifest):
# if this key is another manifest, append a slash to the
# suggestion so the user knows theres more items under this key
words.append(key + '/')
else:
words.append(key)
if filter_word:
words = [x for x in words if x.startswith(filter_word)]
return words |
<SYSTEM_TASK:>
Returns suggestions for a path. Used in tab completion from the command
<END_TASK>
<USER_TASK:>
Description:
def get_suggestion(self, front_path):
"""
Returns suggestions for a path. Used in tab completion from the command
line.
""" |
if '/' in front_path:
# transverse the manifest, return the new manifest, then
# get those suggestions with the remaining word
splitted = front_path.split('/')
new_manifest = self.manifest
pre_path = ''
for item in splitted:
try:
new_manifest = new_manifest[item]
except KeyError:
partial_word = item
break
else:
pre_path += item + '/'
if isinstance(new_manifest, Program):
return []
matches = new_manifest._get_suggestions(partial_word)
return [pre_path + match for match in matches]
else:
return self._get_suggestions(front_path or None) |
<SYSTEM_TASK:>
Given an invocation string, determine which part is the path, the program,
<END_TASK>
<USER_TASK:>
Description:
def parse_invocation(self, invocation, controller_tag):
"""
Given an invocation string, determine which part is the path, the program,
and the args.
""" |
if invocation.endswith('/'):
invocation = invocation[:-1]
if not invocation.startswith('/'):
invocation = '/' + invocation
if invocation == '':
invocation = '/'
all_programs = self.get_urls(controllers=[controller_tag])
matching_paths = set()
for program_path in sorted(all_programs):
if invocation.startswith(program_path):
matching_paths.add(program_path)
longest = ""
for path in matching_paths:
longest = path if len(path) > len(longest) else longest
matching_path = longest
program = self.get_program(matching_path, controller=controller_tag)
if not matching_path:
raise ProgramNotFound("Can't find %s" % invocation)
program_name = matching_path.split('/')[-1]
path = "/".join(matching_path.split('/')[:-1]) + '/'
args_fragment = invocation[len(matching_path):]
superformat = None
if args_fragment.startswith('.'):
# args_fragment will be something like ".html/arg1/arg2" or just ".html"
superformat = args_fragment.split('/')[0][1:]
args = args_fragment.split('/')[1:]
args_fragment = '/'.join(args)
else:
args = args_fragment.split("/")[1:] if args_fragment else []
args_fragment = args_fragment[1:] if (args_fragment and args_fragment[0] =='/') else args_fragment
return {
'program': program,
'program_name': program_name,
'superformat': superformat,
'superformat_mime': super_accept_to_mimetype(superformat),
'args': args,
'raw_args': args_fragment,
'path': path,
'invocation': invocation,
} |
<SYSTEM_TASK:>
Returns a new instance copy of this column.
<END_TASK>
<USER_TASK:>
Description:
def copy(self):
"""
Returns a new instance copy of this column.
:return: <orb.Column>
""" |
out = type(self)(
name=self.__name,
field=self.__field,
display=self.__display,
flags=self.__flags,
default=self.__default,
defaultOrder=self.__defaultOrder,
getter=self.__gettermethod,
setter=self.__settermethod,
queryFilter=self.__query_filter,
shortcut=self.__shortcut,
readPermit=self.__readPermit,
writePermit=self.__writePermit,
order=self.__order
)
return out |
<SYSTEM_TASK:>
Performs some database math on the given field. This will be database specific
<END_TASK>
<USER_TASK:>
Description:
def dbMath(self, typ, field, op, value):
"""
Performs some database math on the given field. This will be database specific
implementations and should return the resulting database operation.
:param field: <str>
:param op: <orb.Query.Math>
:param target: <variant>
:param context: <orb.Context> || None
:return: <str>
""" |
ops = orb.Query.Math(op)
format = self.MathMap.get(typ, {}).get(ops) or self.MathMap.get('Default').get(ops) or '{field}'
return format.format(field=field, value=value) |
<SYSTEM_TASK:>
Returns the database object type based on the given connection type.
<END_TASK>
<USER_TASK:>
Description:
def dbType(self, typ):
"""
Returns the database object type based on the given connection type.
:param typ: <str>
:return: <str>
""" |
return self.TypeMap.get(typ, self.TypeMap.get('Default')) |
<SYSTEM_TASK:>
Returns the default value for this column to return
<END_TASK>
<USER_TASK:>
Description:
def default(self):
"""
Returns the default value for this column to return
when generating new instances.
:return <variant>
""" |
if isinstance(self.__default, (str, unicode)):
return self.valueFromString(self.__default)
else:
return self.__default |
<SYSTEM_TASK:>
Returns the field name that this column will have inside the database.
<END_TASK>
<USER_TASK:>
Description:
def field(self):
"""
Returns the field name that this column will have inside the database.
:return <str>
""" |
if not self.__field:
default_field = inflection.underscore(self.__name)
if isinstance(self, orb.ReferenceColumn):
default_field += '_id'
self.__field = default_field
return self.__field or default_field |
<SYSTEM_TASK:>
Returns the first schema within the list that this column is a member
<END_TASK>
<USER_TASK:>
Description:
def firstMemberSchema(self, schemas):
"""
Returns the first schema within the list that this column is a member
of.
:param schemas | [<orb.TableSchema>, ..]
:return <orb.TableSchema> || None
""" |
for schema in schemas:
if schema.hasColumn(self):
return schema
return self.schema() |
<SYSTEM_TASK:>
Returns whether or not this column is a member of any of the given
<END_TASK>
<USER_TASK:>
Description:
def isMemberOf(self, schemas):
"""
Returns whether or not this column is a member of any of the given
schemas.
:param schemas | [<orb.TableSchema>, ..] || <orb.TableSchema>
:return <bool>
""" |
if type(schemas) not in (tuple, list, set):
schemas = (schemas,)
for schema in schemas:
if schema.hasColumn(self):
return True
return False |
<SYSTEM_TASK:>
Initializes the information for this class from the given JSON data blob.
<END_TASK>
<USER_TASK:>
Description:
def loadJSON(self, jdata):
"""
Initializes the information for this class from the given JSON data blob.
:param jdata: <dict>
""" |
# required params
self.__name = jdata['name']
self.__field = jdata['field']
# optional fields
self.__display = jdata.get('display') or self.__display
self.__flags = jdata.get('flags') or self.__flags
self.__defaultOrder = jdata.get('defaultOrder') or self.__defaultOrder
self.__default = jdata.get('default') or self.__default |
<SYSTEM_TASK:>
Sets whether or not this flag should be on.
<END_TASK>
<USER_TASK:>
Description:
def setFlag(self, flag, state=True):
"""
Sets whether or not this flag should be on.
:param flag | <Column.Flags>
state | <bool>
""" |
if state:
self.__flags |= flag
else:
self.__flags &= ~flag |
<SYSTEM_TASK:>
Validates the inputted value against this columns rules. If the inputted value does not pass, then
<END_TASK>
<USER_TASK:>
Description:
def validate(self, value):
"""
Validates the inputted value against this columns rules. If the inputted value does not pass, then
a validation error will be raised. Override this method in column sub-classes for more
specialized validation.
:param value | <variant>
:return <bool> success
""" |
# check for the required flag
if self.testFlag(self.Flags.Required) and not self.testFlag(self.Flags.AutoAssign):
if self.isNull(value):
msg = '{0} is a required column.'.format(self.name())
raise orb.errors.ColumnValidationError(self, msg)
# otherwise, we're good
return True |
<SYSTEM_TASK:>
Generates a new column from the given json data. This should
<END_TASK>
<USER_TASK:>
Description:
def fromJSON(cls, jdata):
"""
Generates a new column from the given json data. This should
be already loaded into a Python dictionary, not a JSON string.
:param jdata | <dict>
:return <orb.Column> || None
""" |
cls_type = jdata.get('type')
col_cls = cls.byName(cls_type)
if not col_cls:
raise orb.errors.ColumnTypeNotFound(cls_type)
else:
col = col_cls()
col.loadJSON(jdata)
return col |
<SYSTEM_TASK:>
Returns the different inherited schemas for this instance.
<END_TASK>
<USER_TASK:>
Description:
def ancestry(self):
"""
Returns the different inherited schemas for this instance.
:return [<TableSchema>, ..]
""" |
if not self.inherits():
return []
schema = orb.system.schema(self.inherits())
if not schema:
return []
return schema.ancestry() + [schema] |
<SYSTEM_TASK:>
Adds the inputted column to this table schema.
<END_TASK>
<USER_TASK:>
Description:
def addColumn(self, column):
"""
Adds the inputted column to this table schema.
:param column | <orb.Column>
""" |
column.setSchema(self)
self.__columns[column.name()] = column |
<SYSTEM_TASK:>
Adds the inputted index to this table schema.
<END_TASK>
<USER_TASK:>
Description:
def addIndex(self, index):
"""
Adds the inputted index to this table schema.
:param index | <orb.Index>
""" |
index.setSchema(self)
self.__indexes[index.name()] = index |
<SYSTEM_TASK:>
Adds the inputted collector reference to this table schema.
<END_TASK>
<USER_TASK:>
Description:
def addCollector(self, collector):
"""
Adds the inputted collector reference to this table schema.
:param collector | <orb.Collector>
""" |
collector.setSchema(self)
self.__collectors[collector.name()] = collector |
<SYSTEM_TASK:>
Returns the collector that matches the inputted name.
<END_TASK>
<USER_TASK:>
Description:
def collector(self, name, recurse=True):
"""
Returns the collector that matches the inputted name.
:return <orb.Collector> || None
""" |
return self.collectors(recurse=recurse).get(name) |
<SYSTEM_TASK:>
Returns a list of the collectors for this instance.
<END_TASK>
<USER_TASK:>
Description:
def collectors(self, recurse=True, flags=0):
"""
Returns a list of the collectors for this instance.
:return {<str> name: <orb.Collector>, ..}
""" |
output = {}
if recurse and self.inherits():
schema = orb.system.schema(self.inherits())
if not schema:
raise orb.errors.ModelNotFound(schema=self.inherits())
else:
iflags = (flags & ~orb.Collector.Flags.Virtual) if flags else ~orb.Collector.Flags.Virtual
output.update(schema.collectors(recurse=recurse, flags=iflags))
output.update({c.name(): c for c in self.__collectors.values() if not flags or c.testFlag(flags)})
return output |
<SYSTEM_TASK:>
Returns the inheritance tree for this schema, traversing up the hierarchy for the inherited schema instances.
<END_TASK>
<USER_TASK:>
Description:
def inheritanceTree(self):
"""
Returns the inheritance tree for this schema, traversing up the hierarchy for the inherited schema instances.
:return: <generator>
""" |
inherits = self.inherits()
while inherits:
ischema = orb.system.schema(inherits)
if not ischema:
raise orb.errors.ModelNotFound(schema=inherits)
yield ischema
inherits = ischema.inherits() |
<SYSTEM_TASK:>
Returns the namespace that should be used for this schema, when specified.
<END_TASK>
<USER_TASK:>
Description:
def namespace(self, **context):
"""
Returns the namespace that should be used for this schema, when specified.
:return: <str>
""" |
context = orb.Context(**context)
if context.forceNamespace:
return context.namespace or self.__namespace
else:
return self.__namespace or context.namespace |
<SYSTEM_TASK:>
Convert a list of kwargs into a dictionary. Duplicates of the same keyword
<END_TASK>
<USER_TASK:>
Description:
def parse_kwargs(kwargs):
"""
Convert a list of kwargs into a dictionary. Duplicates of the same keyword
get added to an list within the dictionary.
>>> parse_kwargs(['--var1=1', '--var2=2', '--var1=3']
{'var1': [1, 3], 'var2': 2}
""" |
d = defaultdict(list)
for k, v in ((k.lstrip('-'), v) for k,v in (a.split('=') for a in kwargs)):
d[k].append(v)
ret = {}
for k, v in d.items():
# replace single item lists with just the item.
if len(v) == 1 and type(v) is list:
ret[k] = v[0]
else:
ret[k] = v
return ret |
<SYSTEM_TASK:>
Preprocess items in a dictionary or list and prepare them to be json serialized.
<END_TASK>
<USER_TASK:>
Description:
def pre_process_json(obj):
"""
Preprocess items in a dictionary or list and prepare them to be json serialized.
""" |
if type(obj) is dict:
new_dict = {}
for key, value in obj.items():
new_dict[key] = pre_process_json(value)
return new_dict
elif type(obj) is list:
new_list = []
for item in obj:
new_list.append(pre_process_json(item))
return new_list
elif hasattr(obj, 'todict'):
return dict(obj.todict())
else:
try:
json.dumps(obj)
except TypeError:
try:
json.dumps(obj.__dict__)
except TypeError:
return str(obj)
else:
return obj.__dict__
else:
return obj |
<SYSTEM_TASK:>
Build the giotto settings object. This function gets called
<END_TASK>
<USER_TASK:>
Description:
def initialize(module_name=None):
"""
Build the giotto settings object. This function gets called
at the very begining of every request cycle.
""" |
import giotto
from giotto.utils import random_string, switchout_keyvalue
from django.conf import settings
setattr(giotto, '_config', GiottoSettings())
if not module_name:
# For testing. No settings will be set.
return
project_module = importlib.import_module(module_name)
project_path = os.path.dirname(project_module.__file__)
setattr(giotto._config, 'project_path', project_path)
try:
secrets = importlib.import_module("%s.controllers.secrets" % module_name)
except ImportError:
secrets = None
try:
machine = importlib.import_module("%s.controllers.machine" % module_name)
except ImportError:
machine = None
config = importlib.import_module("%s.controllers.config" % module_name)
if config:
for item in dir(config):
setting_value = getattr(config, item)
setattr(giotto._config, item, setting_value)
if secrets:
for item in dir(secrets):
setting_value = getattr(secrets, item)
setattr(giotto._config, item, setting_value)
else:
logging.warning("No secrets.py found")
if machine:
for item in dir(machine):
setting_value = getattr(machine, item)
setattr(giotto._config, item, setting_value)
else:
logging.warning("No machine.py found")
settings.configure(
SECRET_KEY=random_string(32),
DATABASES=get_config('DATABASES'),
INSTALLED_APPS=(module_name, 'giotto')
)
ss = get_config('session_store', None)
if ss:
class_ = switchout_keyvalue(ss)
setattr(giotto._config, "session_store", class_())
cache_engine = get_config("cache", None)
if hasattr(cache_engine, 'lower'):
# session engine was passed in as string, exchange for engine object.
class_ = switchout_keyvalue(cache_engine)
e = class_(host=get_config("cache_host", "localhost"))
setattr(giotto._config, "cache_engine", e) |
<SYSTEM_TASK:>
Use this function to get values from the config object.
<END_TASK>
<USER_TASK:>
Description:
def get_config(item, default=None):
"""
Use this function to get values from the config object.
""" |
import giotto
return getattr(giotto._config, item, default) or default |
<SYSTEM_TASK:>
check filter permissions
<END_TASK>
<USER_TASK:>
Description:
def has_object_permission(self, request, view, obj):
"""
check filter permissions
""" |
user = request.user
if not user.is_superuser and not user.is_anonymous():
valid = False
try:
ct = ContentType.objects.get_for_model(obj)
fpm = FilterPermissionModel.objects.get(user=user,
content_type=ct)
myq = QSerializer(base64=True).loads(fpm.filter)
try:
myobj = obj.__class__.objects.filter(myq).distinct().get(pk=obj.pk)
if myobj:
valid = True
except ObjectDoesNotExist:
valid = False
except ObjectDoesNotExist:
valid = True
finally:
return valid
else:
return True |
<SYSTEM_TASK:>
Remove a checker from the list of registered checkers.
<END_TASK>
<USER_TASK:>
Description:
def unregisterChecker(self, checker):
"""
Remove a checker from the list of registered checkers.
@param checker: the checker to remove
""" |
self.linter._checkers[checker.name].remove(checker)
if checker in self.linter._reports:
del self.linter._reports[checker]
if checker in self.linter.options_providers:
self.linter.options_providers.remove(checker) |
<SYSTEM_TASK:>
Find checkers which generate no allowed messages.
<END_TASK>
<USER_TASK:>
Description:
def findUselessCheckers(self, allowedMessages):
"""
Find checkers which generate no allowed messages.
@param allowedMessages: allowed messages
@return: useless checkers, remove them from pylint
""" |
uselessCheckers = []
for checkerName in self.linter._checkers:
for checker in list(self.linter._checkers[checkerName]):
messagesOfChecker = set(checker.msgs)
if not messagesOfChecker.intersection(allowedMessages):
uselessCheckers.append(checker)
return uselessCheckers |
<SYSTEM_TASK:>
Unregister useless checkers to speed up twistedchecker.
<END_TASK>
<USER_TASK:>
Description:
def restrictCheckers(self, allowedMessages):
"""
Unregister useless checkers to speed up twistedchecker.
@param allowedMessages: output messages allowed in twistedchecker
""" |
uselessCheckers = self.findUselessCheckers(allowedMessages)
# Unregister these checkers
for checker in uselessCheckers:
self.unregisterChecker(checker) |
<SYSTEM_TASK:>
Allow name exceptions by given patterns.
<END_TASK>
<USER_TASK:>
Description:
def allowPatternsForNameChecking(self, patternsFunc, patternsClass):
"""
Allow name exceptions by given patterns.
@param patternsFunc: patterns of special function names
@param patternsClass: patterns of special class names
""" |
cfgParser = self.linter.cfgfile_parser
nameChecker = self.getCheckerByName(NameChecker)
if not nameChecker:
return
if patternsFunc:
regexFuncAdd = "|((%s).+)$" % "|".join(patternsFunc)
else:
regexFuncAdd = ""
if patternsClass:
regexClassAdd = "|((%s).+)$" % "|".join(patternsClass)
else:
regexClassAdd = ""
# Modify regex for function, method and class name.
regexMethod = cfgParser.get("BASIC", "method-rgx") + regexFuncAdd
regexFunction = cfgParser.get("BASIC", "function-rgx") + regexFuncAdd
regexClass = cfgParser.get("BASIC", "class-rgx") + regexClassAdd
# Save to config parser.
cfgParser.set("BASIC", "method-rgx", regexMethod)
cfgParser.set("BASIC", "function-rgx", regexFunction)
cfgParser.set("BASIC", "class-rgx", regexClass)
# Save to name checker.
nameChecker.config.method_rgx = re.compile(regexMethod)
nameChecker.config.function_rgx = re.compile(regexFunction)
nameChecker.config.class_rgx = re.compile(regexClass) |
<SYSTEM_TASK:>
Transform a list of modules to path.
<END_TASK>
<USER_TASK:>
Description:
def getPathList(self, filesOrModules):
"""
Transform a list of modules to path.
@param filesOrModules: a list of modules (may be foo/bar.py or
foo.bar)
""" |
pathList = []
for fileOrMod in filesOrModules:
if not os.path.exists(fileOrMod):
# May be given module is not not a path,
# then transform it to a path.
try:
filepath = file_from_modpath(fileOrMod.split('.'))
except (ImportError, SyntaxError):
# Could not load this module.
continue
if not os.path.exists(filepath):
# Could not find this module in file system.
continue
if os.path.basename(filepath) == "__init__.py":
filepath = os.path.dirname(filepath)
else:
filepath = fileOrMod
pathList.append(filepath)
return pathList |
<SYSTEM_TASK:>
Find name exceptions in codes and allow them to be ignored
<END_TASK>
<USER_TASK:>
Description:
def setNameExceptions(self, filesOrModules):
"""
Find name exceptions in codes and allow them to be ignored
in checking.
@param filesOrModules: a list of modules (may be foo/bar.py or
foo.bar)
""" |
pathList = self.getPathList(filesOrModules)
for path in pathList:
patternsFunc, patternsClass = findAllExceptions(path)
self.allowPatternsForNameChecking(patternsFunc, patternsClass) |
<SYSTEM_TASK:>
Setup the environment, and run pylint.
<END_TASK>
<USER_TASK:>
Description:
def run(self, args):
"""
Setup the environment, and run pylint.
@param args: arguments will be passed to pylint
@type args: list of string
""" |
# set output stream.
if self.outputStream:
self.linter.reporter.set_output(self.outputStream)
try:
args = self.linter.load_command_line_configuration(args)
except SystemExit as exc:
if exc.code == 2: # bad options
exc.code = 32
raise
if not args:
self.displayHelp()
# Check for 'strict-epydoc' option.
if self.allowOptions and not self.linter.option_value("strict-epydoc"):
for msg in ["W9203", "W9205"]:
self.linter.disable(msg)
# insert current working directory to the python path to have a correct
# behaviour.
sys.path.insert(0, os.getcwd())
# set exceptions for name checking.
self.setNameExceptions(args)
# check for diff option.
self.diffOption = self.linter.option_value("diff")
if self.diffOption:
self.prepareDiff()
# check codes.
self.linter.check(args)
# show diff of warnings if diff option on.
if self.diffOption:
diffCount = self.showDiffResults()
exitCode = 1 if diffCount else 0
sys.exit(exitCode)
sys.exit(self.linter.msg_status) |
<SYSTEM_TASK:>
Prepare to run the checker and get diff results.
<END_TASK>
<USER_TASK:>
Description:
def prepareDiff(self):
"""
Prepare to run the checker and get diff results.
""" |
self.streamForDiff = NativeStringIO()
self.linter.reporter.set_output(self.streamForDiff) |
<SYSTEM_TASK:>
Show results when diff option on.
<END_TASK>
<USER_TASK:>
Description:
def showDiffResults(self):
"""
Show results when diff option on.
""" |
try:
oldWarnings = self.parseWarnings(self._readDiffFile())
except:
sys.stderr.write(self.errorResultRead % self.diffOption)
return 1
newWarnings = self.parseWarnings(self.streamForDiff.getvalue())
diffWarnings = self.generateDiff(oldWarnings, newWarnings)
if diffWarnings:
diffResult = self.formatWarnings(diffWarnings)
self.outputStream.write(diffResult + "\n")
return len(diffWarnings)
else:
return 0 |
<SYSTEM_TASK:>
Read content of diff file.
<END_TASK>
<USER_TASK:>
Description:
def _readDiffFile(self):
"""
Read content of diff file.
This is here to help with testing.
@return: File content.
@rtype: c{str}
""" |
with open(self.diffOption) as f:
content = f.read()
return content |
<SYSTEM_TASK:>
Generate diff between given two lists of warnings.
<END_TASK>
<USER_TASK:>
Description:
def generateDiff(self, oldWarnings, newWarnings):
"""
Generate diff between given two lists of warnings.
@param oldWarnings: parsed old warnings
@param newWarnings: parsed new warnings
@return: a dict object of diff
""" |
diffWarnings = {}
for modulename in newWarnings:
diffInModule = (
newWarnings[modulename] -
oldWarnings.get(modulename, set()))
if diffInModule:
diffWarnings[modulename] = diffInModule
return diffWarnings |
<SYSTEM_TASK:>
Transform result in string to a dict object.
<END_TASK>
<USER_TASK:>
Description:
def parseWarnings(self, result):
"""
Transform result in string to a dict object.
@param result: a list of warnings in string
@return: a dict of warnings
""" |
warnings = {}
currentModule = None
warningsCurrentModule = []
for line in result.splitlines():
if line.startswith(self.prefixModuleName):
# Save results for previous module
if currentModule:
warnings[currentModule] = set(warningsCurrentModule)
# Initial results for current module
moduleName = line.replace(self.prefixModuleName, "")
currentModule = moduleName
warningsCurrentModule = []
elif re.search(self.regexLineStart, line):
warningsCurrentModule.append(line)
else:
if warningsCurrentModule:
warningsCurrentModule[-1] += "\n" + line
# Save warnings for last module
if currentModule:
warnings[currentModule] = set(warningsCurrentModule)
return warnings |
<SYSTEM_TASK:>
Format warnings to a list of results.
<END_TASK>
<USER_TASK:>
Description:
def formatWarnings(self, warnings):
"""
Format warnings to a list of results.
@param warnings: a dict of warnings produced by parseWarnings
@return: a list of warnings in string
""" |
lines = []
for modulename in sorted(warnings):
lines.append(self.prefixModuleName + modulename)
lines.extend(sorted(warnings[modulename],
key=lambda x: x.split(":")[1]))
return "\n".join(lines) |
<SYSTEM_TASK:>
Remove records with length-outliers above 3 standard deviations from the median.
<END_TASK>
<USER_TASK:>
Description:
def remove_length_outliers(df, columnname):
"""Remove records with length-outliers above 3 standard deviations from the median.""" |
return df[df[columnname] < (np.median(df[columnname]) + 3 * np.std(df[columnname]))] |
<SYSTEM_TASK:>
Calculate average basecall quality of a read.
<END_TASK>
<USER_TASK:>
Description:
def ave_qual(quals, qround=False, tab=errs_tab(128)):
"""Calculate average basecall quality of a read.
Receive the integer quality scores of a read and return the average quality for that read
First convert Phred scores to probabilities,
calculate average error probability
convert average back to Phred scale
""" |
if quals:
mq = -10 * log(sum([tab[q] for q in quals]) / len(quals), 10)
if qround:
return round(mq)
else:
return mq
else:
return None |
<SYSTEM_TASK:>
Call calculation functions and write stats file.
<END_TASK>
<USER_TASK:>
Description:
def write_stats(datadfs, outputfile, names=[]):
"""Call calculation functions and write stats file.
This function takes a list of DataFrames,
and will create a column for each in the tab separated output.
""" |
if outputfile == 'stdout':
output = sys.stdout
else:
output = open(outputfile, 'wt')
stats = [Stats(df) for df in datadfs]
features = {
"Number of reads": "number_of_reads",
"Total bases": "number_of_bases",
"Total bases aligned": "number_of_bases_aligned",
"Median read length": "median_read_length",
"Mean read length": "mean_read_length",
"Read length N50": "n50",
"Average percent identity": "average_identity",
"Median percent identity": "median_identity",
"Active channels": "active_channels",
"Mean read quality": "mean_qual",
"Median read quality": "median_qual",
}
max_len = max([len(k) for k in features.keys()])
try:
max_num = max(max([len(str(s.number_of_bases)) for s in stats]),
max([len(str(n)) for n in names])) + 6
except ValueError:
max_num = max([len(str(s.number_of_bases)) for s in stats]) + 6
output.write("{:<{}}{}\n".format('General summary:', max_len,
" ".join(['{:>{}}'.format(n, max_num) for n in names])))
for f in sorted(features.keys()):
try:
output.write("{f:{pad}}{v}\n".format(
f=f + ':',
pad=max_len,
v=feature_list(stats, features[f], padding=max_num)))
except KeyError:
pass
if all(["quals" in df for df in datadfs]):
long_features = {
"Top 5 longest reads and their mean basecall quality score":
["top5_lengths", range(1, 6)],
"Top 5 highest mean basecall quality scores and their read lengths":
["top5_quals", range(1, 6)],
"Number, percentage and megabases of reads above quality cutoffs":
["reads_above_qual", [">Q" + str(q) for q in stats[0].qualgroups]],
}
for lf in sorted(long_features.keys()):
output.write(lf + "\n")
for i in range(5):
output.write("{}:\t{}\n".format(
long_features[lf][1][i], feature_list(stats, long_features[lf][0], index=i))) |
<SYSTEM_TASK:>
A function to override report_error in pycodestyle.
<END_TASK>
<USER_TASK:>
Description:
def errorRecorder(self, lineNumber, offset, text, check):
"""
A function to override report_error in pycodestyle.
And record output warnings.
@param lineNumber: line number
@param offset: column offset
@param text: warning message
@param check: check object in pycodestyle
""" |
code = text.split(" ")[0]
lineOffset = self.report.line_offset
self.warnings.append((lineOffset + lineNumber,
offset + 1, code, text)) |
<SYSTEM_TASK:>
Run pycodestyle checker and record warnings.
<END_TASK>
<USER_TASK:>
Description:
def run(self):
"""
Run pycodestyle checker and record warnings.
""" |
# Set a stream to replace stdout, and get results in it
stdoutBak = sys.stdout
streamResult = StringIO()
sys.stdout = streamResult
try:
pycodestyle.Checker.check_all(self)
finally:
sys.stdout = stdoutBak |
<SYSTEM_TASK:>
Map pycodestyle results to messages in pylint, then output them.
<END_TASK>
<USER_TASK:>
Description:
def _outputMessages(self, warnings, node):
"""
Map pycodestyle results to messages in pylint, then output them.
@param warnings: it should be a list of tuple including
line number and message id
""" |
if not warnings:
# No warnings were found
return
for warning in warnings:
linenum, offset, msgidInPyCodeStyle, text = warning
if text.startswith(msgidInPyCodeStyle):
# If the PyCodeStyle code is at the start of the text, trim it out
text = text[len(msgidInPyCodeStyle) + 1:]
if msgidInPyCodeStyle in self.mapPyCodeStyleMessages:
msgid, patternArguments = self.mapPyCodeStyleMessages[msgidInPyCodeStyle]
if (not self.pycodestyleEnabled and
msgid in self.standardPyCodeStyleMessages):
continue
arguments = []
if patternArguments:
matchResult = re.search(patternArguments, text)
if matchResult:
arguments = matchResult.groups()
self.add_message(msgid, line=linenum, args=arguments, node=node) |
<SYSTEM_TASK:>
Returns the proper log level core based on a given string
<END_TASK>
<USER_TASK:>
Description:
def log_level_from_string(str_level):
""" Returns the proper log level core based on a given string
:param str_level: Log level string
:return: The log level code
""" |
levels = {
'CRITICAL': logging.CRITICAL,
'ERROR': logging.ERROR,
'WARNING': logging.WARNING,
'INFO': logging.INFO,
'DEBUG': logging.DEBUG,
}
try:
return levels[str_level.upper()]
except KeyError:
pass
except AttributeError:
if str_level in [logging.DEBUG, logging.INFO, logging.WARNING,
logging.ERROR, logging.CRITICAL]:
return str_level
return logging.NOTSET |
<SYSTEM_TASK:>
Breaks a package string in module and class.
<END_TASK>
<USER_TASK:>
Description:
def get_config_from_package(package):
""" Breaks a package string in module and class.
:param package: A package string.
:return: A config dict with class and module.
""" |
package_x = package.split('.')
package_conf = {}
package_conf['class'] = package_x[-1]
package_conf['module'] = '.'.join(package_x[:-1][:])
return package_conf |
<SYSTEM_TASK:>
Returns a class from a module and a class name parameters.
<END_TASK>
<USER_TASK:>
Description:
def get_class_from_module(module, class_name):
""" Returns a class from a module and a class name parameters.
This function is used by get_class_from_config and get_class_from_name.
Example:
>>> get_class_from_module("my.module", "MyClass")
:param basestring module: The module name.
:param basestring class_name: The class name.
:return: The class resolved by the module and class name provided.
""" |
import importlib
module = importlib.import_module(module)
return getattr(module, class_name) |
<SYSTEM_TASK:>
Returns the parsed structure from a yaml config file.
<END_TASK>
<USER_TASK:>
Description:
def load_yaml_config_file(path):
""" Returns the parsed structure from a yaml config file.
:param path: Path where the yaml file is located.
:return: The yaml configuration represented by the yaml file.
""" |
result = None
with open(path, 'r') as steam:
result = yaml.safe_load(steam)
return result |
<SYSTEM_TASK:>
Populates config with data from the configuration data dict. It handles
<END_TASK>
<USER_TASK:>
Description:
def process_config(config, config_data):
""" Populates config with data from the configuration data dict. It handles
components, data, log, management and session sections from the
configuration data.
:param config: The config reference of the object that will hold the
configuration data from the config_data.
:param config_data: The configuration data loaded from a configuration
file.
""" |
if 'components' in config_data:
process_components_config_section(config, config_data['components'])
if 'data' in config_data:
process_data_config_section(config, config_data['data'])
if 'log' in config_data:
process_log_config_section(config, config_data['log'])
if 'management' in config_data:
process_management_config_section(config, config_data['management'])
if 'session' in config_data:
process_session_config_section(config, config_data['session']) |
<SYSTEM_TASK:>
Populates config with data from the configuration data dict. It handles
<END_TASK>
<USER_TASK:>
Description:
def process_app_config(config, config_data):
""" Populates config with data from the configuration data dict. It handles
everything that process_config does plus application section.
:param config: The config reference of the object that will hold the
configuration data from the config_data.
:param config_data: The configuration data loaded from a configuration
file.
""" |
process_config(config, config_data)
# If apps is on config data, this is running o multi app mode
if 'apps' in config_data:
config.app['multi'] = True
process_apps_config_session(config, config_data['apps'])
else:
# If not the app definition is on the firenado config file
if 'app' in config_data:
process_app_config_section(config, config_data['app']) |
<SYSTEM_TASK:>
Processes the app section from a configuration data dict.
<END_TASK>
<USER_TASK:>
Description:
def process_app_config_section(config, app_config):
""" Processes the app section from a configuration data dict.
:param config: The config reference of the object that will hold the
configuration data from the config_data.
:param app_config: App section from a config data dict.
""" |
if 'addresses' in app_config:
config.app['addresses'] = app_config['addresses']
if 'component' in app_config:
config.app['component'] = app_config['component']
if 'data' in app_config:
if 'sources' in app_config['data']:
config.app['data']['sources'] = app_config['data']['sources']
if 'id' in app_config:
config.app['id'] = app_config['id']
if 'login' in app_config:
if 'urls' in app_config['login']:
for url in app_config['login']['urls']:
config.app['login']['urls'][url['name']] = url['value']
if 'pythonpath' in app_config:
config.app['pythonpath'] = app_config['pythonpath']
if 'port' in app_config:
config.app['port'] = app_config['port']
if 'process' in app_config:
if 'num_processes' in app_config['process']:
config.app['process']['num_processes'] = app_config[
'process']['num_processes']
if 'url_root_path' in app_config:
root_url = app_config['url_root_path'].strip()
if root_url[0] == "/":
root_url = root_url[1:]
if root_url == "":
root_url = None
config.app['url_root_path'] = root_url
if 'settings' in app_config:
config.app['settings'] = app_config['settings']
if 'socket' in app_config:
config.app['socket'] = app_config['socket']
if 'static_path' in app_config:
config.app['static_path'] = app_config['static_path']
if 'static_url_prefix' in app_config:
config.app['static_url_prefix'] = app_config['static_url_prefix']
if 'type' in app_config:
config.app['type'] = app_config['type']
if 'types' in app_config:
for app_type in app_config['types']:
app_type['launcher'] = get_config_from_package(
app_type['launcher'])
config.app['types'][app_type['name']] = app_type
if 'wait_before_shutdown' in app_config:
config.app['wait_before_shutdown'] = app_config['wait_before_shutdown'] |
<SYSTEM_TASK:>
Processes the components section from a configuration data dict.
<END_TASK>
<USER_TASK:>
Description:
def process_components_config_section(config, components_config):
""" Processes the components section from a configuration data dict.
:param config: The config reference of the object that will hold the
configuration data from the config_data.
:param components_config: Data section from a config data dict.
""" |
for component_config in components_config:
if 'id' not in component_config:
raise Exception('The component %s was defined without an id.' %
component_config)
component_id = component_config['id']
if component_id not in config.components:
config.components[component_id] = {}
config.components[component_id]['enabled'] = False
config.components[component_id]['config'] = {}
if 'class' in component_config:
class_config_x = component_config['class'].split('.')
config.components[component_id]['class'] = class_config_x[-1]
config.components[component_id]['module'] = '.'.join(
class_config_x[:-1])
if 'enabled' in component_config:
config.components[component_id]['enabled'] = bool(
component_config['enabled']) |
<SYSTEM_TASK:>
Processes the data configuration section from the configuration
<END_TASK>
<USER_TASK:>
Description:
def process_data_config_section(config, data_config):
""" Processes the data configuration section from the configuration
data dict.
:param config: The config reference of the object that will hold the
configuration data from the config_data.
:param data_config: Data configuration section from a config data dict.
""" |
if 'connectors' in data_config:
for connector in data_config['connectors']:
config.data['connectors'][
connector['name']] = get_config_from_package(
connector['class'])
if 'sources' in data_config:
if data_config['sources']:
for source in data_config['sources']:
config.data['sources'][source['name']] = source
del config.data['sources'][source['name']]['name'] |
<SYSTEM_TASK:>
Processes the log section from a configuration data dict.
<END_TASK>
<USER_TASK:>
Description:
def process_log_config_section(config, log_config):
""" Processes the log section from a configuration data dict.
:param config: The config reference of the object that will hold the
configuration data from the config_data.
:param log_config: Log section from a config data dict.
""" |
if 'format' in log_config:
config.log['format'] = log_config['format']
if 'level' in log_config:
config.log['level'] = log_level_from_string(log_config['level']) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.