_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
---|---|---|---|---|---|
q278200
|
HitClusterizer.set_hit_dtype
|
test
|
def set_hit_dtype(self, hit_dtype):
''' Set the data type of the hits.
Fields that are not mentioned here are NOT copied into the clustered hits array.
Clusterizer has to know the hit data type to produce the clustered hit result with the same data types.
Parameters:
-----------
hit_dtype : numpy.dtype or equivalent
Defines the dtype of the hit array.
Example:
--------
hit_dtype = [("column", np.uint16), ("row", np.uint16)], where
"column", "row" is the field name of the input hit array.
'''
if not hit_dtype:
hit_dtype = np.dtype([])
else:
hit_dtype = np.dtype(hit_dtype)
cluster_hits_descr = hit_dtype.descr
# Add default back to description
for dtype_name, dtype in self._default_cluster_hits_descr:
if self._hit_fields_mapping[dtype_name] not in hit_dtype.fields:
cluster_hits_descr.append((dtype_name, dtype))
self._cluster_hits_descr = cluster_hits_descr
self._init_arrays(size=0)
|
python
|
{
"resource": ""
}
|
q278201
|
HitClusterizer.set_cluster_dtype
|
test
|
def set_cluster_dtype(self, cluster_dtype):
''' Set the data type of the cluster.
Parameters:
-----------
cluster_dtype : numpy.dtype or equivalent
Defines the dtype of the cluster array.
'''
if not cluster_dtype:
cluster_dtype = np.dtype([])
else:
cluster_dtype = np.dtype(cluster_dtype)
cluster_descr = cluster_dtype.descr
for dtype_name, dtype in self._default_cluster_descr:
if self._cluster_fields_mapping[dtype_name] not in cluster_dtype.fields:
cluster_descr.append((dtype_name, dtype))
self._cluster_descr = cluster_descr
self._init_arrays(size=0)
|
python
|
{
"resource": ""
}
|
q278202
|
HitClusterizer._check_struct_compatibility
|
test
|
def _check_struct_compatibility(self, hits):
''' Takes the hit array and checks if the important data fields have the same data type than the hit clustered array and that the field names are correct.'''
for key, _ in self._cluster_hits_descr:
if key in self._hit_fields_mapping_inverse:
mapped_key = self._hit_fields_mapping_inverse[key]
else:
mapped_key = key
# Only check hit fields that contain hit information
if mapped_key in ['cluster_ID', 'is_seed', 'cluster_size', 'n_cluster']:
continue
if key not in hits.dtype.names:
raise TypeError('Required hit field "%s" not found.' % key)
if self._cluster_hits.dtype[mapped_key] != hits.dtype[key]:
raise TypeError('The dtype for hit data field "%s" does not match. Got/expected: %s/%s.' % (key, hits.dtype[key], self._cluster_hits.dtype[mapped_key]))
additional_hit_fields = set(hits.dtype.names) - set([key for key, val in self._cluster_hits_descr])
if additional_hit_fields:
logging.warning('Found additional hit fields: %s' % ", ".join(additional_hit_fields))
|
python
|
{
"resource": ""
}
|
q278203
|
add_ruleclause_name
|
test
|
def add_ruleclause_name(self, ns_name, rid) -> bool:
"""Create a tree.Rule"""
ns_name.parser_tree = parsing.Rule(self.value(rid))
return True
|
python
|
{
"resource": ""
}
|
q278204
|
add_rules
|
test
|
def add_rules(self, bnf, r) -> bool:
"""Attach a parser tree to the dict of rules"""
bnf[r.rulename] = r.parser_tree
return True
|
python
|
{
"resource": ""
}
|
q278205
|
add_rule
|
test
|
def add_rule(self, rule, rn, alts) -> bool:
"""Add the rule name"""
rule.rulename = self.value(rn)
rule.parser_tree = alts.parser_tree
return True
|
python
|
{
"resource": ""
}
|
q278206
|
add_sequences
|
test
|
def add_sequences(self, sequences, cla) -> bool:
"""Create a tree.Seq"""
if not hasattr(sequences, 'parser_tree'):
# forward sublevel of sequence as is
sequences.parser_tree = cla.parser_tree
else:
oldnode = sequences
if isinstance(oldnode.parser_tree, parsing.Seq):
oldpt = list(oldnode.parser_tree.ptlist)
else:
oldpt = [oldnode.parser_tree]
oldpt.append(cla.parser_tree)
sequences.parser_tree = parsing.Seq(*tuple(oldpt))
return True
|
python
|
{
"resource": ""
}
|
q278207
|
add_alt
|
test
|
def add_alt(self, alternatives, alt) -> bool:
"""Create a tree.Alt"""
if not hasattr(alternatives, 'parser_tree'):
# forward sublevel of alt as is
if hasattr(alt, 'parser_tree'):
alternatives.parser_tree = alt.parser_tree
else:
alternatives.parser_tree = alt
else:
oldnode = alternatives
if isinstance(oldnode.parser_tree, parsing.Alt):
oldpt = list(oldnode.parser_tree.ptlist)
else:
oldpt = [oldnode.parser_tree]
oldpt.append(alt.parser_tree)
alternatives.parser_tree = parsing.Alt(*tuple(oldpt))
return True
|
python
|
{
"resource": ""
}
|
q278208
|
add_range
|
test
|
def add_range(self, sequence, begin, end):
"""Add a read_range primitive"""
sequence.parser_tree = parsing.Range(self.value(begin).strip("'"),
self.value(end).strip("'"))
return True
|
python
|
{
"resource": ""
}
|
q278209
|
add_rpt
|
test
|
def add_rpt(self, sequence, mod, pt):
"""Add a repeater to the previous sequence"""
modstr = self.value(mod)
if modstr == '!!':
# cursor on the REPEATER
self._stream.restore_context()
# log the error
self.diagnostic.notify(
error.Severity.ERROR,
"Cannot repeat a lookahead rule",
error.LocationInfo.from_stream(self._stream, is_error=True)
)
raise self.diagnostic
if modstr == '!':
# cursor on the REPEATER
self._stream.restore_context()
# log the error
self.diagnostic.notify(
error.Severity.ERROR,
"Cannot repeat a negated rule",
error.LocationInfo.from_stream(self._stream, is_error=True)
)
raise self.diagnostic
oldnode = sequence
sequence.parser_tree = pt.functor(oldnode.parser_tree)
return True
|
python
|
{
"resource": ""
}
|
q278210
|
add_capture
|
test
|
def add_capture(self, sequence, cpt):
"""Create a tree.Capture"""
cpt_value = self.value(cpt)
sequence.parser_tree = parsing.Capture(cpt_value, sequence.parser_tree)
return True
|
python
|
{
"resource": ""
}
|
q278211
|
add_bind
|
test
|
def add_bind(self, sequence, cpt):
"""Create a tree.Bind"""
cpt_value = self.value(cpt)
sequence.parser_tree = parsing.Bind(cpt_value, sequence.parser_tree)
return True
|
python
|
{
"resource": ""
}
|
q278212
|
add_hook
|
test
|
def add_hook(self, sequence, h):
"""Create a tree.Hook"""
sequence.parser_tree = parsing.Hook(h.name, h.listparam)
return True
|
python
|
{
"resource": ""
}
|
q278213
|
param_num
|
test
|
def param_num(self, param, n):
"""Parse a int in parameter list"""
param.pair = (int(self.value(n)), int)
return True
|
python
|
{
"resource": ""
}
|
q278214
|
param_str
|
test
|
def param_str(self, param, s):
"""Parse a str in parameter list"""
param.pair = (self.value(s).strip('"'), str)
return True
|
python
|
{
"resource": ""
}
|
q278215
|
param_char
|
test
|
def param_char(self, param, c):
"""Parse a char in parameter list"""
param.pair = (self.value(c).strip("'"), str)
return True
|
python
|
{
"resource": ""
}
|
q278216
|
param_id
|
test
|
def param_id(self, param, i):
"""Parse a node name in parameter list"""
param.pair = (self.value(i), parsing.Node)
return True
|
python
|
{
"resource": ""
}
|
q278217
|
hook_name
|
test
|
def hook_name(self, hook, n):
"""Parse a hook name"""
hook.name = self.value(n)
hook.listparam = []
return True
|
python
|
{
"resource": ""
}
|
q278218
|
hook_param
|
test
|
def hook_param(self, hook, p):
"""Parse a hook parameter"""
hook.listparam.append(p.pair)
return True
|
python
|
{
"resource": ""
}
|
q278219
|
EBNF.get_rules
|
test
|
def get_rules(self) -> parsing.Node:
"""
Parse the DSL and provide a dictionnaries of all resulting rules.
Call by the MetaGrammar class.
TODO: could be done in the rules property of parsing.BasicParser???
"""
res = None
try:
res = self.eval_rule('bnf_dsl')
if not res:
# we fail to parse, but error is not set
self.diagnostic.notify(
error.Severity.ERROR,
"Parse error in '%s' in EBNF bnf" % self._lastRule,
error.LocationInfo.from_maxstream(self._stream)
)
raise self.diagnostic
except error.Diagnostic as d:
d.notify(
error.Severity.ERROR,
"Parse error in '%s' in EBNF bnf" % self._lastRule
)
raise d
return res
|
python
|
{
"resource": ""
}
|
q278220
|
ignore_cxx
|
test
|
def ignore_cxx(self) -> bool:
"""Consume comments and whitespace characters."""
self._stream.save_context()
while not self.read_eof():
idxref = self._stream.index
if self._stream.peek_char in " \t\v\f\r\n":
while (not self.read_eof()
and self._stream.peek_char in " \t\v\f\r\n"):
self._stream.incpos()
if self.peek_text("//"):
while not self.read_eof() and not self.peek_char("\n"):
self._stream.incpos()
if not self.read_char("\n") and self.read_eof():
return self._stream.validate_context()
if self.peek_text("/*"):
while not self.read_eof() and not self.peek_text("*/"):
self._stream.incpos()
if not self.read_text("*/") and self.read_eof():
return self._stream.restore_context()
if idxref == self._stream.index:
break
return self._stream.validate_context()
|
python
|
{
"resource": ""
}
|
q278221
|
StateRegister.add_state
|
test
|
def add_state(self, s: State):
"""
all state in the register have a uid
"""
ids = id(s)
uid = len(self.states)
if ids not in self.states:
self.states[ids] = (uid, s)
|
python
|
{
"resource": ""
}
|
q278222
|
StateRegister.to_dot
|
test
|
def to_dot(self) -> str:
"""
Provide a '.dot' representation of all State in the register.
"""
txt = ""
txt += "digraph S%d {\n" % id(self)
if self.label is not None:
txt += '\tlabel="%s";\n' % (self.label + '\l').replace('\n', '\l')
txt += "\trankdir=LR;\n"
#txt += '\tlabelloc="t";\n'
txt += '\tgraph [labeljust=l, labelloc=t, nojustify=true];\n'
txt += "\tesep=1;\n"
txt += '\tranksep="equally";\n'
txt += "\tnode [shape = circle];\n"
txt += "\tsplines = ortho;\n"
for s in self.states.values():
txt += s[1].to_dot()
txt += "}\n"
return txt
|
python
|
{
"resource": ""
}
|
q278223
|
StateRegister.to_dot_file
|
test
|
def to_dot_file(self, fname: str):
"""
write a '.dot' file.
"""
with open(fname, 'w') as f:
f.write(self.to_dot())
|
python
|
{
"resource": ""
}
|
q278224
|
StateRegister.to_png_file
|
test
|
def to_png_file(self, fname: str):
"""
write a '.png' file.
"""
cmd = pipes.Template()
cmd.append('dot -Tpng > %s' % fname, '-.')
with cmd.open('pipefile', 'w') as f:
f.write(self.to_dot())
|
python
|
{
"resource": ""
}
|
q278225
|
StateRegister.to_fmt
|
test
|
def to_fmt(self) -> str:
"""
Provide a useful representation of the register.
"""
infos = fmt.end(";\n", [])
s = fmt.sep(', ', [])
for ids in sorted(self.states.keys()):
s.lsdata.append(str(ids))
infos.lsdata.append(fmt.block('(', ')', [s]))
infos.lsdata.append("events:" + repr(self.events))
infos.lsdata.append(
"named_events:" + repr(list(self.named_events.keys()))
)
infos.lsdata.append("uid_events:" + repr(list(self.uid_events.keys())))
return infos
|
python
|
{
"resource": ""
}
|
q278226
|
State.nextstate
|
test
|
def nextstate(self, newstate, treenode=None, user_data=None):
"""
Manage transition of state.
"""
if newstate is None:
return self
if isinstance(newstate, State) and id(newstate) != id(self):
return newstate
elif isinstance(newstate, StateEvent):
self.state_register.named_events[newstate.name] = True
return newstate.st
elif isinstance(newstate, StatePrecond):
return newstate.st
elif isinstance(newstate, StateHook):
# final API using PSL
newstate.call(treenode, user_data)
return newstate.st
return self
|
python
|
{
"resource": ""
}
|
q278227
|
LivingContext.resetLivingState
|
test
|
def resetLivingState(self):
"""Only one Living State on the S0 of each StateRegister"""
# TODO: add some test to control number of instanciation of LivingState
# clean all living state on S0
must_delete = []
l = len(self.ls)
for idx, ls in zip(range(l), self.ls):
# TODO: alive by default on False, change to True on the first match
ids = id(ls[1].thestate())
if ids == id(ls[0]) and (ls[1].have_finish or not ls[1].alive):
must_delete.append(idx)
elif ls[1].alive:
ls[1].alive = False
for delete in reversed(must_delete):
self.ls.pop(delete)
self.init_all()
|
python
|
{
"resource": ""
}
|
q278228
|
Inference.infer_block
|
test
|
def infer_block(self, body, diagnostic=None):
"""
Infer type on block is to type each of is sub-element
"""
# RootBlockStmt has his own .infer_node (created via infer_type)
for e in body:
e.infer_node = InferNode(parent=self.infer_node)
e.infer_type(diagnostic=diagnostic)
|
python
|
{
"resource": ""
}
|
q278229
|
Inference.infer_subexpr
|
test
|
def infer_subexpr(self, expr, diagnostic=None):
"""
Infer type on the subexpr
"""
expr.infer_node = InferNode(parent=self.infer_node)
expr.infer_type(diagnostic=diagnostic)
|
python
|
{
"resource": ""
}
|
q278230
|
Inference.infer_id
|
test
|
def infer_id(self, ident, diagnostic=None):
"""
Infer type from an ID!
- check if ID is declarated in the scope
- if no ID is polymorphic type
"""
# check if ID is declared
#defined = self.type_node.get_by_symbol_name(ident)
defined = self.infer_node.scope_node.get_by_symbol_name(ident)
if len(defined) > 0:
# set from matchings declarations
#self.type_node.update(defined)
self.infer_node.scope_node.update(defined)
else:
diagnostic.notify(
Severity.ERROR,
"%s never declared" % self.value,
self.info
)
|
python
|
{
"resource": ""
}
|
q278231
|
Inference.infer_literal
|
test
|
def infer_literal(self, args, diagnostic=None):
"""
Infer type from an LITERAL!
Type of literal depend of language.
We adopt a basic convention
"""
literal, t = args
#self.type_node.add(EvalCtx.from_sig(Val(literal, t)))
self.infer_node.scope_node.add(EvalCtx.from_sig(Val(literal, t)))
|
python
|
{
"resource": ""
}
|
q278232
|
dump_nodes
|
test
|
def dump_nodes(self):
"""
Dump tag,rule,id and value cache. For debug.
example::
R = [
#dump_nodes
]
"""
print("DUMP NODE LOCAL INFOS")
try:
print("map Id->node name")
for k, v in self.id_cache.items():
print("[%d]=%s" % (k, v))
print("map tag->capture infos")
for k, v in self.tag_cache.items():
print("[%s]=%s" % (k, v))
print("map nodes->tag resolution")
for k, v in self.rule_nodes.items():
txt = "['%s']=%d" % (k, id(v))
if k in self.tag_cache:
tag = self.tag_cache[k]
txt += " tag <%s>" % tag
k = "%d:%d" % (tag._begin, tag._end)
if k in self._stream.value_cache:
txt += " cache <%s>" % self._stream.value_cache[k]
print(txt)
except Exception as err:
print("RECV Exception %s" % err)
import sys
sys.stdout.flush()
return True
|
python
|
{
"resource": ""
}
|
q278233
|
parserrule_topython
|
test
|
def parserrule_topython(parser: parsing.BasicParser,
rulename: str) -> ast.FunctionDef:
"""Generates code for a rule.
def rulename(self):
<code for the rule>
return True
"""
visitor = RuleVisitor()
rule = parser._rules[rulename]
fn_args = ast.arguments([ast.arg('self', None)], None, None, [], None,
None, [], [])
body = visitor._clause(rule_topython(rule))
body.append(ast.Return(ast.Name('True', ast.Load())))
return ast.FunctionDef(rulename, fn_args, body, [], None)
|
python
|
{
"resource": ""
}
|
q278234
|
RuleVisitor.__exit_scope
|
test
|
def __exit_scope(self) -> ast.stmt:
"""Create the appropriate scope exiting statement.
The documentation only shows one level and always uses
'return False' in examples.
'raise AltFalse()' within a try.
'break' within a loop.
'return False' otherwise.
"""
if self.in_optional:
return ast.Pass()
if self.in_try:
return ast.Raise(
ast.Call(ast.Name('AltFalse', ast.Load()), [], [], None, None),
None)
if self.in_loop:
return ast.Break()
return ast.Return(ast.Name('False', ast.Load()))
|
python
|
{
"resource": ""
}
|
q278235
|
RuleVisitor._clause
|
test
|
def _clause(self, pt: parsing.ParserTree) -> [ast.stmt]:
"""Normalize a test expression into a statements list.
Statements list are returned as-is.
Expression is packaged as:
if not expr:
return False
"""
if isinstance(pt, list):
return pt
return [ast.If(ast.UnaryOp(ast.Not(), pt),
[self.__exit_scope()],
[])]
|
python
|
{
"resource": ""
}
|
q278236
|
RuleVisitor.visit_Call
|
test
|
def visit_Call(self, node: parsing.Call) -> ast.expr:
"""Generates python code calling the function.
fn(*args)
"""
return ast.Call(
ast.Attribute(
ast.Name('self', ast.Load),
node.callObject.__name__,
ast.Load()),
[ast.Str(param) for param in node.params],
[],
None,
None)
|
python
|
{
"resource": ""
}
|
q278237
|
RuleVisitor.visit_CallTrue
|
test
|
def visit_CallTrue(self, node: parsing.CallTrue) -> ast.expr:
"""Generates python code calling the function and returning True.
lambda: fn(*args) or True
"""
return ast.Lambda(
ast.arguments([], None, None, [], None, None, [], []),
ast.BoolOp(
ast.Or(),
[
self.visit_Call(node),
ast.Name('True', ast.Load())]))
|
python
|
{
"resource": ""
}
|
q278238
|
RuleVisitor.visit_Hook
|
test
|
def visit_Hook(self, node: parsing.Hook) -> ast.expr:
"""Generates python code calling a hook.
self.evalHook('hookname', self.ruleNodes[-1])
"""
return ast.Call(
ast.Attribute(
ast.Name('self', ast.Load()), 'evalHook', ast.Load()),
[
ast.Str(node.name),
ast.Subscript(
ast.Attribute(
ast.Name('self', ast.Load()), 'ruleNodes', ast.Load()),
ast.Index(ast.UnaryOp(ast.USub(), ast.Num(1))),
ast.Load())],
[],
None,
None)
|
python
|
{
"resource": ""
}
|
q278239
|
RuleVisitor.visit_Rule
|
test
|
def visit_Rule(self, node: parsing.Rule) -> ast.expr:
"""Generates python code calling a rule.
self.evalRule('rulename')
"""
return ast.Call(
ast.Attribute(ast.Name('self', ast.Load()),
'evalRule', ast.Load()),
[ast.Str(node.name)], [], None, None)
|
python
|
{
"resource": ""
}
|
q278240
|
RuleVisitor.visit_Capture
|
test
|
def visit_Capture(self, node: parsing.Capture) -> [ast.stmt] or ast.expr:
"""Generates python code to capture text consumed by a clause.
#If all clauses can be inlined
self.beginTag('tagname') and clause and self.endTag('tagname')
if not self.beginTag('tagname'):
return False
<code for the clause>
if not self.endTag('tagname'):
return False
"""
begintag = ast.Attribute(
ast.Name('self', ast.Load()), 'beginTag', ast.Load())
endtag = ast.Attribute(
ast.Name('self', ast.Load()), 'endTag', ast.Load())
begin = ast.Call(begintag, [ast.Str(node.tagname)], [], None, None)
end = ast.Call(endtag, [ast.Str(node.tagname)], [], None, None)
result = [begin, self.visit(node.pt), end]
for clause in result:
if not isinstance(clause, ast.expr):
break
else:
return ast.BoolOp(ast.And(), result)
res = []
for stmt in map(self._clause, result):
res.extend(stmt)
return res
|
python
|
{
"resource": ""
}
|
q278241
|
RuleVisitor.visit_Scope
|
test
|
def visit_Scope(self, node: parsing.Capture) -> [ast.stmt] or ast.expr:
"""Generates python code for a scope.
if not self.begin():
return False
res = self.pt()
if not self.end():
return False
return res
"""
return ast.Name('scope_not_implemented', ast.Load())
raise NotImplementedError()
|
python
|
{
"resource": ""
}
|
q278242
|
RuleVisitor.visit_Alt
|
test
|
def visit_Alt(self, node: parsing.Alt) -> [ast.stmt]:
"""Generates python code for alternatives.
try:
try:
<code for clause> #raise AltFalse when alternative is False
raise AltTrue()
except AltFalse:
pass
return False
except AltTrue:
pass
"""
clauses = [self.visit(clause) for clause in node.ptlist]
for clause in clauses:
if not isinstance(clause, ast.expr):
break
else:
return ast.BoolOp(ast.Or(), clauses)
res = ast.Try([], [ast.ExceptHandler(
ast.Name('AltTrue', ast.Load()), None, [ast.Pass()])], [], [])
alt_true = [ast.Raise(ast.Call(
ast.Name('AltTrue', ast.Load()), [], [], None, None), None)]
alt_false = [ast.ExceptHandler(
ast.Name('AltFalse', ast.Load()), None, [ast.Pass()])]
self.in_try += 1
for clause in node.ptlist:
res.body.append(
ast.Try(self._clause(self.visit(clause)) + alt_true,
alt_false, [], []))
self.in_try -= 1
res.body.append(self.__exit_scope())
return [res]
|
python
|
{
"resource": ""
}
|
q278243
|
RuleVisitor.visit_Seq
|
test
|
def visit_Seq(self, node: parsing.Seq) -> [ast.stmt] or ast.expr:
"""Generates python code for clauses.
#Continuous clauses which can can be inlined are combined with and
clause and clause
if not clause:
return False
if not clause:
return False
"""
exprs, stmts = [], []
for clause in node.ptlist:
clause_ast = self.visit(clause)
if isinstance(clause_ast, ast.expr):
exprs.append(clause_ast)
else:
if exprs:
stmts.extend(self.combine_exprs_for_clauses(exprs))
exprs = []
stmts.extend(self._clause(clause_ast))
if not stmts:
return ast.BoolOp(ast.And(), exprs)
if exprs:
stmts.extend(self.combine_exprs_for_clauses(exprs))
return stmts
|
python
|
{
"resource": ""
}
|
q278244
|
RuleVisitor.visit_RepOptional
|
test
|
def visit_RepOptional(self, node: parsing.RepOptional) -> ([ast.stmt] or
ast.expr):
"""Generates python code for an optional clause.
<code for the clause>
"""
cl_ast = self.visit(node.pt)
if isinstance(cl_ast, ast.expr):
return ast.BoolOp(ast.Or(), [cl_ast, ast.Name('True', ast.Load())])
self.in_optional += 1
cl_ast = self.visit(node.pt)
self.in_optional -= 1
return cl_ast
|
python
|
{
"resource": ""
}
|
q278245
|
RuleVisitor.visit_Rep0N
|
test
|
def visit_Rep0N(self, node: parsing.Rep0N) -> [ast.stmt]:
"""Generates python code for a clause repeated 0 or more times.
#If all clauses can be inlined
while clause:
pass
while True:
<code for the clause>
"""
cl_ast = self.visit(node.pt)
if isinstance(cl_ast, ast.expr):
return [ast.While(cl_ast, [ast.Pass()], [])]
self.in_loop += 1
clause = self._clause(self.visit(node.pt))
self.in_loop -= 1
return [ast.While(ast.Name('True', ast.Load()), clause, [])]
|
python
|
{
"resource": ""
}
|
q278246
|
RuleVisitor.visit_Rep1N
|
test
|
def visit_Rep1N(self, node: parsing.Rep0N) -> [ast.stmt]:
"""Generates python code for a clause repeated 1 or more times.
<code for the clause>
while True:
<code for the clause>
"""
clause = self.visit(node.pt)
if isinstance(clause, ast.expr):
return (self._clause(clause) + self.visit_Rep0N(node))
self.in_loop += 1
clause = self._clause(self.visit(node.pt))
self.in_loop -= 1
return self._clause(self.visit(node.pt)) + [
ast.While(ast.Name('True', ast.Load()), clause, [])]
|
python
|
{
"resource": ""
}
|
q278247
|
catend
|
test
|
def catend(dst: str, src: str, indent) -> str:
"""cat two strings but handle \n for tabulation"""
res = dst
txtsrc = src
if not isinstance(src, str):
txtsrc = str(src)
for c in list(txtsrc):
if len(res) > 0 and res[-1] == '\n':
res += (indentable.char_indent * indentable.num_indent) * \
(indent - 1) + c
else:
res += c
return res
|
python
|
{
"resource": ""
}
|
q278248
|
list_set_indent
|
test
|
def list_set_indent(lst: list, indent: int=1):
"""recurs into list for indentation"""
for i in lst:
if isinstance(i, indentable):
i.set_indent(indent)
if isinstance(i, list):
list_set_indent(i, indent)
|
python
|
{
"resource": ""
}
|
q278249
|
list_to_str
|
test
|
def list_to_str(lst: list, content: str, indent: int=1):
"""recurs into list for string computing """
for i in lst:
if isinstance(i, indentable):
content = i.to_str(content, indent)
elif isinstance(i, list):
content = list_to_str(i, content, indent)
elif isinstance(i, str):
content = catend(content, i, indent)
return content
|
python
|
{
"resource": ""
}
|
q278250
|
echo_nodes
|
test
|
def echo_nodes(self, *rest):
"""
Print nodes.
example::
R = [
In : node #echo("coucou", 12, node)
]
"""
txt = ""
for thing in rest:
if isinstance(thing, Node):
txt += self.value(thing)
else:
txt += str(thing)
print(txt)
return True
|
python
|
{
"resource": ""
}
|
q278251
|
populate_from_sequence
|
test
|
def populate_from_sequence(seq: list, r: ref(Edge), sr: state.StateRegister):
""" function that connect each other one sequence of MatchExpr. """
base_state = r
# we need to detect the last state of the sequence
idxlast = len(seq) - 1
idx = 0
for m in seq:
# alternatives are represented by builtin list
if isinstance(m, list):
# so recursively connect all states of each alternative sequences.
for item in m:
populate_from_sequence(item, r, sr)
elif isinstance(m, MatchExpr):
# from the current state, have we a existing edge for this event?
eX = r().get_next_edge(m)
if eX is None:
sX = None
if idx != idxlast:
sX = state.State(sr)
sX.matchDefault(base_state().s)
else:
# last state of sequence return to the base
sX = base_state().s
eX = Edge(sX)
r().next_edge[id(sX)] = eX
m.attach(r().s, sX, sr)
r = ref(eX)
idx += 1
|
python
|
{
"resource": ""
}
|
q278252
|
populate_state_register
|
test
|
def populate_state_register(all_seq: [list], sr: state.StateRegister) -> Edge:
""" function that create a state for all instance
of MatchExpr in the given list and connect each others.
"""
# Basic State
s0 = state.State(sr)
# loop on himself
s0.matchDefault(s0)
# this is default
sr.set_default_state(s0)
# use Edge to store connection
e0 = Edge(s0)
for seq in all_seq:
r = ref(e0)
# merge all sequences into one tree automata
populate_from_sequence(seq, r, sr)
# return edge for debug purpose
return e0
|
python
|
{
"resource": ""
}
|
q278253
|
MatchBlock.build_state_tree
|
test
|
def build_state_tree(self, tree: list, sr: state.StateRegister):
""" main function for creating a bottom-up tree automata
for a block of matching statements.
"""
all_seq = []
# for all statements populate a list
# from deeper to nearer of MatchExpr instances.
for stmt in self.stmts:
part_seq = list()
stmt.build_state_tree(part_seq)
all_seq.append(part_seq)
# Walk on all MatchExpr instance
# and create State instance into the StateRegister
self.root_edge = populate_state_register(all_seq, sr)
|
python
|
{
"resource": ""
}
|
q278254
|
pred_eq
|
test
|
def pred_eq(self, n, val):
"""
Test if a node set with setint or setstr equal a certain value
example::
R = [
__scope__:n
['a' #setint(n, 12) | 'b' #setint(n, 14)]
C
[#eq(n, 12) D]
]
"""
v1 = n.value
v2 = val
if hasattr(val, 'value'):
v2 = val.value
if isinstance(v1, int) and not isinstance(v2, int):
return v1 == int(v2)
return v1 == v2
|
python
|
{
"resource": ""
}
|
q278255
|
from_string
|
test
|
def from_string(bnf: str, entry=None, *optional_inherit) -> Grammar:
"""
Create a Grammar from a string
"""
inherit = [Grammar] + list(optional_inherit)
scope = {'grammar': bnf, 'entry': entry}
return build_grammar(tuple(inherit), scope)
|
python
|
{
"resource": ""
}
|
q278256
|
from_file
|
test
|
def from_file(fn: str, entry=None, *optional_inherit) -> Grammar:
"""
Create a Grammar from a file
"""
import os.path
if os.path.exists(fn):
f = open(fn, 'r')
bnf = f.read()
f.close()
inherit = [Grammar] + list(optional_inherit)
scope = {'grammar': bnf, 'entry': entry, 'source': fn}
return build_grammar(tuple(inherit), scope)
raise Exception("File not Found!")
|
python
|
{
"resource": ""
}
|
q278257
|
Grammar.parse
|
test
|
def parse(self, source: str=None, entry: str=None) -> parsing.Node:
"""Parse source using the grammar"""
self.from_string = True
if source is not None:
self.parsed_stream(source)
if entry is None:
entry = self.entry
if entry is None:
raise ValueError("No entry rule name defined for {}".format(
self.__class__.__name__))
return self._do_parse(entry)
|
python
|
{
"resource": ""
}
|
q278258
|
Grammar.parse_file
|
test
|
def parse_file(self, filename: str, entry: str=None) -> parsing.Node:
"""Parse filename using the grammar"""
self.from_string = False
import os.path
with open(filename, 'r') as f:
self.parsed_stream(f.read(), os.path.abspath(filename))
if entry is None:
entry = self.entry
if entry is None:
raise ValueError("No entry rule name defined for {}".format(
self.__class__.__name__))
return self._do_parse(entry)
|
python
|
{
"resource": ""
}
|
q278259
|
set_node
|
test
|
def set_node(self, dst, src):
"""
Basically copy one node to another.
usefull to transmit a node from a terminal
rule as result of the current rule.
example::
R = [
In : node #set(_, node)
]
here the node return by the rule In is
also the node return by the rule R
"""
if not isinstance(src, Node):
dst.value = src
else:
dst.set(src)
idsrc = id(src)
iddst = id(dst)
if iddst not in self.id_cache:
print("DST: %s" % repr(dst))
print("RULE_NODES %s" % repr(self.rule_nodes))
print("IDCACHE %s" % repr(self.id_cache))
if idsrc in self.id_cache:
k = self.id_cache[idsrc]
k2 = self.id_cache[iddst]
if k in self.rule_nodes:
self.tag_cache[k2] = self.tag_cache[k]
return True
|
python
|
{
"resource": ""
}
|
q278260
|
set_node_as_int
|
test
|
def set_node_as_int(self, dst, src):
"""
Set a node to a value captured from another node
example::
R = [
In : node #setcapture(_, node)
]
"""
dst.value = self.value(src)
return True
|
python
|
{
"resource": ""
}
|
q278261
|
get_subnode
|
test
|
def get_subnode(self, dst, ast, expr):
"""
get the value of subnode
example::
R = [
__scope__:big getsomethingbig:>big
#get(_, big, '.val') // copy big.val into _
]
"""
dst.value = eval('ast' + expr)
return True
|
python
|
{
"resource": ""
}
|
q278262
|
default_serializer
|
test
|
def default_serializer(o):
"""Default serializer for json."""
defs = (
((datetime.date, datetime.time),
lambda x: x.isoformat(), ),
((datetime.datetime, ),
lambda x: dt2utc_timestamp(x), ),
)
for types, fun in defs:
if isinstance(o, types):
return fun(o)
|
python
|
{
"resource": ""
}
|
q278263
|
get
|
test
|
def get(query, from_date, limit=0, **kwargs):
"""Get deposits."""
dep_generator = _get_depositions()
total_depids = 1 # Count of depositions is hard to determine
# If limit provided, serve only first n=limit items
if limit > 0:
dep_generator = islice(dep_generator, limit)
total_depids = limit
return total_depids, dep_generator
|
python
|
{
"resource": ""
}
|
q278264
|
dump
|
test
|
def dump(deposition, from_date, with_json=True, latest_only=False, **kwargs):
"""Dump the deposition object as dictionary."""
# Serialize the __getstate__ and fall back to default serializer
dep_json = json.dumps(deposition.__getstate__(),
default=default_serializer)
dep_dict = json.loads(dep_json)
dep_dict['_p'] = {}
dep_dict['_p']['id'] = deposition.id
dep_dict['_p']['created'] = dt2utc_timestamp(deposition.created)
dep_dict['_p']['modified'] = dt2utc_timestamp(deposition.modified)
dep_dict['_p']['user_id'] = deposition.user_id
dep_dict['_p']['state'] = deposition.state
dep_dict['_p']['has_sip'] = deposition.has_sip()
dep_dict['_p']['submitted'] = deposition.submitted
return dep_dict
|
python
|
{
"resource": ""
}
|
q278265
|
_get_recids_invenio12
|
test
|
def _get_recids_invenio12(from_date):
"""Get BibDocs for Invenio 1."""
from invenio.dbquery import run_sql
return (id[0] for id in run_sql(
'select id_bibrec from '
'bibrec_bibdoc as r join bibdoc as d on r.id_bibdoc=d.id '
'where d.modification_date >=%s',
(from_date, ), run_on_slave=True))
|
python
|
{
"resource": ""
}
|
q278266
|
_get_recids_invenio2
|
test
|
def _get_recids_invenio2(from_date):
"""Get BibDocs for Invenio 2."""
from invenio.legacy.dbquery import run_sql
return (id[0] for id in run_sql(
'select id_bibrec from '
'bibrec_bibdoc as r join bibdoc as d on r.id_bibdoc=d.id '
'where d.modification_date >=%s',
(from_date, ), run_on_slave=True))
|
python
|
{
"resource": ""
}
|
q278267
|
_import_bibdoc
|
test
|
def _import_bibdoc():
"""Import BibDocFile."""
try:
from invenio.bibdocfile import BibRecDocs, BibDoc
except ImportError:
from invenio.legacy.bibdocfile.api import BibRecDocs, BibDoc
return BibRecDocs, BibDoc
|
python
|
{
"resource": ""
}
|
q278268
|
dump_bibdoc
|
test
|
def dump_bibdoc(recid, from_date, **kwargs):
"""Dump all BibDoc metadata.
:param docid: BibDoc ID
:param from_date: Dump only BibDoc revisions newer than this date.
:returns: List of version of the BibDoc formatted as a dict
"""
BibRecDocs, BibDoc = _import_bibdoc()
bibdocfile_dump = []
date = datetime.datetime.strptime(from_date, '%Y-%m-%d %H:%M:%S')
for bibdoc in BibRecDocs(recid).list_bibdocs():
for version in bibdoc.list_versions():
bibdoc_version = bibdoc.list_version_files(version)
for f in bibdoc_version:
if f.is_icon() or f.md < date:
# Don't care about icons
# Don't care about files not modified since from_date
continue
bibdocfile_dump.append(dict(
bibdocid=f.get_bibdocid(),
checksum=f.get_checksum(),
comment=f.get_comment(),
copyright=(
f.get_copyright() if hasattr(f, 'get_copyright')
else None),
creation_date=datetime_toutc(f.cd).isoformat(),
description=f.get_description(),
encoding=f.encoding,
etag=f.etag,
flags=f.flags,
format=f.get_format(),
full_name=f.get_full_name(),
full_path=f.get_full_path(),
hidden=f.hidden,
license=(
f.get_license()if hasattr(f, 'get_license') else None),
modification_date=datetime_toutc(f.md).isoformat(),
name=f.get_name(),
mime=f.mime,
path=f.get_path(),
recid=f.get_recid(),
recids_doctype=f.recids_doctypes,
size=f.get_size(),
status=f.get_status(),
subformat=f.get_subformat(),
superformat=f.get_superformat(),
type=f.get_type(),
url=f.get_url(),
version=f.get_version(),
))
return bibdocfile_dump
|
python
|
{
"resource": ""
}
|
q278269
|
get_check
|
test
|
def get_check():
"""Get bibdocs to check."""
try:
from invenio.dbquery import run_sql
except ImportError:
from invenio.legacy.dbquery import run_sql
return (
run_sql('select count(id) from bibdoc', run_on_slave=True)[0][0],
[id[0] for id in run_sql('select id from bibdoc', run_on_slave=True)],
)
|
python
|
{
"resource": ""
}
|
q278270
|
check
|
test
|
def check(id_):
"""Check bibdocs."""
BibRecDocs, BibDoc = _import_bibdoc()
try:
BibDoc(id_).list_all_files()
except Exception:
click.secho("BibDoc {0} failed check.".format(id_), fg='red')
|
python
|
{
"resource": ""
}
|
q278271
|
dump
|
test
|
def dump(obj, from_date, with_json=True, latest_only=False, **kwargs):
"""Dump the oauth2server tokens."""
return dict(id=obj.id,
client_id=obj.client_id,
user_id=obj.user_id,
token_type=obj.token_type,
access_token=obj.access_token,
refresh_token=obj.refresh_token,
expires=dt2iso_or_empty(obj.expires),
_scopes=obj._scopes,
is_personal=obj.is_personal,
is_internal=obj.is_internal)
|
python
|
{
"resource": ""
}
|
q278272
|
get
|
test
|
def get(*args, **kwargs):
"""Get UserEXT objects."""
try:
from invenio.modules.accounts.models import UserEXT
except ImportError:
from invenio_accounts.models import UserEXT
q = UserEXT.query
return q.count(), q.all()
|
python
|
{
"resource": ""
}
|
q278273
|
dump
|
test
|
def dump(u, from_date, with_json=True, latest_only=False, **kwargs):
"""Dump the UserEXt objects as a list of dictionaries.
:param u: UserEXT to be dumped.
:type u: `invenio_accounts.models.UserEXT [Invenio2.x]`
:returns: User serialized to dictionary.
:rtype: dict
"""
return dict(id=u.id, method=u.method, id_user=u.id_user)
|
python
|
{
"resource": ""
}
|
q278274
|
get
|
test
|
def get(*args, **kwargs):
"""Get communities."""
from invenio.modules.communities.models import FeaturedCommunity
q = FeaturedCommunity.query
return q.count(), q.all()
|
python
|
{
"resource": ""
}
|
q278275
|
_get_modified_recids_invenio12
|
test
|
def _get_modified_recids_invenio12(from_date):
"""Get record ids for Invenio 1."""
from invenio.search_engine import search_pattern
from invenio.dbquery import run_sql
return set((id[0] for id in run_sql(
'select id from bibrec where modification_date >= %s',
(from_date, ), run_on_slave=True))), search_pattern
|
python
|
{
"resource": ""
}
|
q278276
|
_get_modified_recids_invenio2
|
test
|
def _get_modified_recids_invenio2(from_date):
"""Get record ids for Invenio 2."""
from invenio.legacy.search_engine import search_pattern
from invenio.modules.records.models import Record
date = datetime.datetime.strptime(from_date, '%Y-%m-%d %H:%M:%S')
return set(
(x[0]
for x in Record.query.filter(Record.modification_date >= date).values(
Record.id))), search_pattern
|
python
|
{
"resource": ""
}
|
q278277
|
_get_collection_restrictions
|
test
|
def _get_collection_restrictions(collection):
"""Get all restrictions for a given collection, users and fireroles."""
try:
from invenio.dbquery import run_sql
from invenio.access_control_firerole import compile_role_definition
except ImportError:
from invenio.modules.access.firerole import compile_role_definition
from invenio.legacy.dbquery import run_sql
res = run_sql(
'SELECT r.firerole_def_src, email '
'FROM accROLE as r '
'JOIN accROLE_accACTION_accARGUMENT ON r.id=id_accROLE '
'JOIN accARGUMENT AS a ON a.id=id_accARGUMENT '
'JOIN user_accROLE AS u ON r.id=u.id_accROLE '
'JOIN user ON user.id=u.id_user '
'WHERE a.keyword="collection" AND '
'a.value=%s AND '
'id_accACTION=(select id from accACTION where name="viewrestrcoll")',
(collection, ), run_on_slave=True
)
fireroles = set()
users = set()
for f, u in res:
fireroles.add(compile_role_definition(f))
users.add(u)
return {'fireroles': list(fireroles), 'users': users}
|
python
|
{
"resource": ""
}
|
q278278
|
get_record_revisions
|
test
|
def get_record_revisions(recid, from_date):
"""Get record revisions."""
try:
from invenio.dbquery import run_sql
except ImportError:
from invenio.legacy.dbquery import run_sql
return run_sql(
'SELECT job_date, marcxml '
'FROM hstRECORD WHERE id_bibrec = %s AND job_date >= %s '
'ORDER BY job_date ASC', (recid, from_date),
run_on_slave=True)
|
python
|
{
"resource": ""
}
|
q278279
|
get_record_collections
|
test
|
def get_record_collections(recid):
"""Get all collections the record belong to."""
try:
from invenio.search_engine import (
get_all_collections_of_a_record,
get_restricted_collections_for_recid)
except ImportError:
from invenio.legacy.search_engine import (
get_all_collections_of_a_record,
get_restricted_collections_for_recid)
collections = {
'all':
get_all_collections_of_a_record(recid, recreate_cache_if_needed=False),
}
collections['restricted'] = dict(
(coll, _get_collection_restrictions(coll))
for coll in get_restricted_collections_for_recid(
recid, recreate_cache_if_needed=False))
return collections
|
python
|
{
"resource": ""
}
|
q278280
|
dump_record_json
|
test
|
def dump_record_json(marcxml):
"""Dump JSON of record."""
try:
from invenio.modules.records.api import Record
d = Record.create(marcxml, 'marc')
return d.dumps(clean=True)
except ImportError:
from invenio.bibfield import create_record
d = create_record(marcxml, master_format='marc')
return d.dumps()
|
python
|
{
"resource": ""
}
|
q278281
|
get
|
test
|
def get(query, from_date, **kwargs):
"""Get recids matching query and with changes."""
recids, search_pattern = get_modified_recids(from_date)
recids = recids.union(get_modified_bibdoc_recids(from_date))
if query:
recids = recids.intersection(
set(search_pattern(p=query.encode('utf-8'))))
return len(recids), recids
|
python
|
{
"resource": ""
}
|
q278282
|
dump
|
test
|
def dump(recid,
from_date,
with_json=False,
latest_only=False,
with_collections=False,
**kwargs):
"""Dump MARCXML and JSON representation of a record.
:param recid: Record identifier
:param from_date: Dump only revisions from this date onwards.
:param with_json: If ``True`` use old ``Record.create`` to generate the
JSON representation of the record.
:param latest_only: Dump only the last revision of the record metadata.
:param with_collections: If ``True`` dump the list of collections that the
record belongs to.
:returns: List of versions of the record.
"""
# Grab latest only
if latest_only:
revision_iter = [get_record_revisions(recid, from_date)[-1]]
else:
revision_iter = get_record_revisions(recid, from_date)
# Dump revisions
record_dump = dict(
record=[],
files=[],
recid=recid,
collections=get_record_collections(recid)
if with_collections else None, )
for revision_date, revision_marcxml in revision_iter:
marcxml = zlib.decompress(revision_marcxml)
record_dump['record'].append(
dict(
modification_datetime=datetime_toutc(revision_date)
.isoformat(),
marcxml=marcxml,
json=dump_record_json(marcxml) if with_json else None, ))
record_dump['files'] = dump_bibdoc(recid, from_date)
return record_dump
|
python
|
{
"resource": ""
}
|
q278283
|
dump
|
test
|
def dump(ra, from_date, with_json=True, latest_only=False, **kwargs):
"""Dump the remote accounts as a list of dictionaries.
:param ra: Remote account to be dumped.
:type ra: `invenio_oauthclient.models.RemoteAccount [Invenio2.x]`
:returns: Remote accounts serialized to dictionary.
:rtype: dict
"""
return dict(id=ra.id, user_id=ra.user_id, client_id=ra.client_id,
extra_data=ra.extra_data)
|
python
|
{
"resource": ""
}
|
q278284
|
load_common
|
test
|
def load_common(model_cls, data):
"""Helper function for loading JSON data verbatim into model."""
obj = model_cls(**data)
db.session.add(obj)
db.session.commit()
|
python
|
{
"resource": ""
}
|
q278285
|
collect_things_entry_points
|
test
|
def collect_things_entry_points():
"""Collect entry points."""
things = dict()
for entry_point in iter_entry_points(group='invenio_migrator.things'):
things[entry_point.name] = entry_point.load()
return things
|
python
|
{
"resource": ""
}
|
q278286
|
init_app_context
|
test
|
def init_app_context():
"""Initialize app context for Invenio 2.x."""
try:
from invenio.base.factory import create_app
app = create_app()
app.test_request_context('/').push()
app.preprocess_request()
except ImportError:
pass
|
python
|
{
"resource": ""
}
|
q278287
|
memoize
|
test
|
def memoize(func):
"""Cache for heavy function calls."""
cache = {}
@wraps(func)
def wrap(*args, **kwargs):
key = '{0}{1}'.format(args, kwargs)
if key not in cache:
cache[key] = func(*args, **kwargs)
return cache[key]
return wrap
|
python
|
{
"resource": ""
}
|
q278288
|
_get_run_sql
|
test
|
def _get_run_sql():
"""Import ``run_sql``."""
try:
from invenio.dbquery import run_sql
except ImportError:
from invenio.legacy.dbquery import run_sql
return run_sql
|
python
|
{
"resource": ""
}
|
q278289
|
get_connected_roles
|
test
|
def get_connected_roles(action_id):
"""Get roles connected to an action."""
try:
from invenio.access_control_admin import compile_role_definition
except ImportError:
from invenio.modules.access.firerole import compile_role_definition
run_sql = _get_run_sql()
roles = {}
res = run_sql(
'select r.id, r.name, r.description, r.firerole_def_src, '
'a.keyword, a.value, email from accROLE as r '
'join accROLE_accACTION_accARGUMENT on r.id=id_accROLE '
'join accARGUMENT as a on a.id=id_accARGUMENT '
'join user_accROLE as u on r.id=u.id_accROLE '
'join user on user.id=u.id_user '
'where id_accACTION=%s', (action_id, )
)
for r in res:
role = roles.setdefault(
r[0], {
'id': r[0],
'name': r[1],
'description': r[2],
'firerole_def': r[3],
'compiled_firerole_def': compile_role_definition(r[3]),
'users': set(),
'parameters': {}
}
)
param = role['parameters'].setdefault(r[4], set())
param.add(r[5])
role['users'].add(r[6])
return six.itervalues(roles)
|
python
|
{
"resource": ""
}
|
q278290
|
get
|
test
|
def get(query, *args, **kwargs):
"""Get action definitions to dump."""
run_sql = _get_run_sql()
actions = [
dict(id=row[0],
name=row[1],
allowedkeywords=row[2],
optional=row[3])
for action in query.split(',') for row in run_sql(
'select id, name, description, allowedkeywords, optional '
'from accACTION where name like %s', (action, ),
run_on_slave=True)
]
return len(actions), actions
|
python
|
{
"resource": ""
}
|
q278291
|
dump
|
test
|
def dump(rt, from_date, with_json=True, latest_only=False, **kwargs):
"""Dump the remote tokens as a list of dictionaries.
:param ra: Remote toekn to be dumped.
:type ra: `invenio_oauthclient.models.RemoteToken [Invenio2.x]`
:returns: Remote tokens serialized to dictionary.
:rtype: dict
"""
return dict(id_remote_account=rt.id_remote_account,
token_type=rt.token_type,
access_token=rt.access_token,
secret=rt.secret)
|
python
|
{
"resource": ""
}
|
q278292
|
load_token
|
test
|
def load_token(data):
"""Load the oauth2server token from data dump."""
from invenio_oauth2server.models import Token
data['expires'] = iso2dt_or_none(data['expires'])
load_common(Token, data)
|
python
|
{
"resource": ""
}
|
q278293
|
import_record
|
test
|
def import_record(data, source_type=None, latest_only=False):
"""Migrate a record from a migration dump.
:param data: Dictionary for representing a single record and files.
:param source_type: Determines if the MARCXML or the JSON dump is used.
Default: ``marcxml``.
:param latest_only: Determine is only the latest revision should be loaded.
"""
source_type = source_type or 'marcxml'
assert source_type in ['marcxml', 'json']
recorddump = current_migrator.records_dump_cls(
data,
source_type=source_type,
pid_fetchers=current_migrator.records_pid_fetchers,
)
try:
current_migrator.records_dumploader_cls.create(recorddump)
db.session.commit()
except Exception:
db.session.rollback()
raise
|
python
|
{
"resource": ""
}
|
q278294
|
config_imp_or_default
|
test
|
def config_imp_or_default(app, config_var_imp, default):
"""Import config var import path or use default value."""
imp = app.config.get(config_var_imp)
return import_string(imp) if imp else default
|
python
|
{
"resource": ""
}
|
q278295
|
dump
|
test
|
def dump(obj, from_date, with_json=True, latest_only=False, **kwargs):
"""Dump the oauth2server Client."""
return dict(name=obj.name,
description=obj.description,
website=obj.website,
user_id=obj.user_id,
client_id=obj.client_id,
client_secret=obj.client_secret,
is_confidential=obj.is_confidential,
is_internal=obj.is_internal,
_redirect_uris=obj._redirect_uris,
_default_scopes=obj._default_scopes)
|
python
|
{
"resource": ""
}
|
q278296
|
_get_users_invenio12
|
test
|
def _get_users_invenio12(*args, **kwargs):
"""Get user accounts Invenio 1."""
from invenio.dbquery import run_sql, deserialize_via_marshal
User = namedtuple('User', [
'id', 'email', 'password', 'password_salt', 'note', 'full_name',
'settings', 'nickname', 'last_login'
])
users = run_sql(
'SELECT id, email, password, note, settings, nickname, last_login'
' FROM user',
run_on_slave=True)
return len(users), [
User(
id=user[0],
email=user[1],
password=user[2].decode('latin1'),
password_salt=user[1],
note=user[3],
full_name=user[5],
settings=deserialize_via_marshal(user[4]) if user[4] else {},
# we don't have proper nicknames on Invenio v1
nickname='id_{0}'.format(user[0]),
last_login=user[6]) for user in users
]
|
python
|
{
"resource": ""
}
|
q278297
|
_get_users_invenio2
|
test
|
def _get_users_invenio2(*args, **kwargs):
"""Get user accounts from Invenio 2."""
from invenio.modules.accounts.models import User
q = User.query
return q.count(), q.all()
|
python
|
{
"resource": ""
}
|
q278298
|
dump
|
test
|
def dump(u, *args, **kwargs):
"""Dump the users as a list of dictionaries.
:param u: User to be dumped.
:type u: `invenio.modules.accounts.models.User [Invenio2.x]` or namedtuple.
:returns: User serialized to dictionary.
:rtype: dict
"""
return dict(
id=u.id,
email=u.email,
password=u.password,
password_salt=u.password_salt,
note=u.note,
full_name=u.full_name if hasattr(u, 'full_name') else '{0} {1}'.format(
u.given_names, u.family_name),
settings=u.settings,
nickname=u.nickname,
last_login=dt2iso_or_empty(u.last_login))
|
python
|
{
"resource": ""
}
|
q278299
|
load_deposit
|
test
|
def load_deposit(data):
"""Load the raw JSON dump of the Deposition.
Uses Record API in order to bypass all Deposit-specific initialization,
which are to be done after the final stage of deposit migration.
:param data: Dictionary containing deposition data.
:type data: dict
"""
from invenio_db import db
deposit, dep_pid = create_record_and_pid(data)
deposit = create_files_and_sip(deposit, dep_pid)
db.session.commit()
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.