function
stringlengths 11
56k
| repo_name
stringlengths 5
60
| features
sequence |
---|---|---|
def revert_image(self):
log.info('download-revert', name=self.name)
if self._download_state == ImageDownload.DOWNLOAD_SUCCEEDED:
pass # TODO: Implement
self._image_state = ImageDownload.IMAGE_INACTIVE
returnValue('TODO: Implement this') | opencord/voltha | [
73,
117,
73,
17,
1484694318
] |
def monitor_state_to_activate_state(self, state):
if ':' in state:
state = state.split(':')[-1]
result = {
'enabling-software': ImageDownload.IMAGE_ACTIVATE, # currently enabling the software
'software-enabled': ImageDownload.IMAGE_ACTIVE, # successfully enabled the required software
'enable-software-failed': ImageDownload.IMAGE_INACTIVE, # unsuccessfully attempted to enable the required software revision
'activating-software': ImageDownload.IMAGE_ACTIVATE, # currently activating the software
'software-activated': ImageDownload.IMAGE_ACTIVE, # successfully activated the required software. The job terminated successfully
'activate-software-failed': ImageDownload.IMAGE_INACTIVE, # unsuccessfully attempted to activate the required software revision
'committing-software': ImageDownload.IMAGE_ACTIVATE, # currently committing the software
'software-committed': ImageDownload.IMAGE_ACTIVATE, # successfully committed the required software. The job terminated successfully
'commit-software-failed': ImageDownload.IMAGE_INACTIVE, # unsuccessfully attempted to commit the required software revision
}.get(state.lower(), None)
log.info('download-activate-state', result=result, state=state, name=self.name)
assert result is not None, 'Invalid state'
return result | opencord/voltha | [
73,
117,
73,
17,
1484694318
] |
def execute(self):
msg = self.xpybuild(shouldFail=False, args=[], buildfile='root.xpybuild.py')
self.startProcess(self.output+'/build-output/test', [], stdout='test.out', stderr='test.err') | xpybuild/xpybuild | [
7,
4,
7,
5,
1486396682
] |
def LOG_CRITICAL(msg, *args, **kwargs): # pragma: no cover
logger.critical(msg, *args, **kwargs)
logging.shutdown()
sys.exit(1) | p4lang/p4c-bm | [
23,
30,
23,
3,
1438962502
] |
def LOG_INFO(msg, *args, **kwargs): # pragma: no cover
logger.info(msg, *args, **kwargs) | p4lang/p4c-bm | [
23,
30,
23,
3,
1438962502
] |
def decorate(func):
_STATIC_VARS.append((func, varname, value))
setattr(func, varname, copy(value))
return func | p4lang/p4c-bm | [
23,
30,
23,
3,
1438962502
] |
def reset_static_vars():
for(func, varname, value) in _STATIC_VARS:
setattr(func, varname, copy(value)) | p4lang/p4c-bm | [
23,
30,
23,
3,
1438962502
] |
def find_idx(name):
for idx, field in enumerate(fields):
if name == field:
return idx
return -1 | p4lang/p4c-bm | [
23,
30,
23,
3,
1438962502
] |
def add_pragmas(json_item, p4_object):
json_item["pragmas"] = list(p4_object._pragmas) | p4lang/p4c-bm | [
23,
30,
23,
3,
1438962502
] |
def get_padding_name(i):
return "_padding_{}".format(i) | p4lang/p4c-bm | [
23,
30,
23,
3,
1438962502
] |
def dump_headers(json_dict, hlir, keep_pragmas=False):
headers = []
id_ = 0
for name, p4_header_instance in hlir.p4_header_instances.items():
if p4_header_instance.virtual:
continue
header_instance_dict = OrderedDict()
header_instance_dict["name"] = name
header_instance_dict["id"] = id_
id_ += 1
header_instance_dict["header_type"] =\
p4_header_instance.header_type.name
header_instance_dict["metadata"] = p4_header_instance.metadata
if keep_pragmas:
add_pragmas(header_instance_dict, p4_header_instance)
for p4_field in p4_header_instance.fields:
if p4_field.default is not None and p4_field.default != 0:
LOG_CRITICAL(
"In file '{}' at line {}: "
"non-zero metadata initialization is not supported by this "
"backend; field '{}' cannot be initialized to {}".format(
p4_header_instance.filename, p4_header_instance.lineno,
str(p4_field), p4_field.default))
headers.append(header_instance_dict)
json_dict["headers"] = headers | p4lang/p4c-bm | [
23,
30,
23,
3,
1438962502
] |
def __init__(self, name, size, header_type):
self.name = name
self.size = size
self.header_type = header_type
self.ids = [] | p4lang/p4c-bm | [
23,
30,
23,
3,
1438962502
] |
def field_suffix(p4_field):
suffix = p4_field.name
if suffix == "valid":
suffix = "$valid$"
return suffix | p4lang/p4c-bm | [
23,
30,
23,
3,
1438962502
] |
def header_type_field_offset(p4_header_type, fname):
for idx, f in enumerate(p4_header_type.layout):
if f == fname:
return idx
LOG_CRITICAL("No field {} in header type {}".format( # pragma: no cover
fname, p4_header_type.name)) | p4lang/p4c-bm | [
23,
30,
23,
3,
1438962502
] |
def make_expression(op, L, R):
e = OrderedDict(
[("type", "expression"), ("value", OrderedDict(
[("op", op), ("left", L), ("right", R)]))])
return e | p4lang/p4c-bm | [
23,
30,
23,
3,
1438962502
] |
def format_hexstr(i):
# Python appends a L at the end of a long number representation, which we
# need to remove
return hex(i).rstrip("L") | p4lang/p4c-bm | [
23,
30,
23,
3,
1438962502
] |
def is_register_ref(obj):
try:
return (type(obj) is p4.p4_register_ref)
except AttributeError:
return False | p4lang/p4c-bm | [
23,
30,
23,
3,
1438962502
] |
def build_match_value(widths, value):
res = ""
for width in reversed(widths):
mask = (1 << width) - 1
val = value & mask
num_bytes = (width + 7) / 8
res = "{0:0{1}x}".format(val, 2 * num_bytes) + res
value = value >> width
return "0x" + res | p4lang/p4c-bm | [
23,
30,
23,
3,
1438962502
] |
def dump_one_parser(parser_name, parser_id, p4_start_state, keep_pragmas=False):
parser_dict = OrderedDict()
parser_dict["name"] = parser_name
parser_dict["id"] = parser_id
parser_dict["init_state"] = p4_start_state.name
parse_states = []
accessible_parse_states = set()
accessible_parse_states_ordered = []
def find_accessible_parse_states(parse_state):
if parse_state in accessible_parse_states:
return
accessible_parse_states.add(parse_state)
accessible_parse_states_ordered.append(parse_state)
for _, next_state in parse_state.branch_to.items():
if isinstance(next_state, p4.p4_parse_state):
find_accessible_parse_states(next_state)
find_accessible_parse_states(p4_start_state)
for p4_parse_state in accessible_parse_states_ordered:
parse_state_dict = OrderedDict()
parse_state_dict["name"] = p4_parse_state.name
parse_state_dict["id"] = dump_one_parser.parse_state_id
dump_one_parser.parse_state_id += 1
parser_ops = []
for parser_op in p4_parse_state.call_sequence:
parser_op_dict = OrderedDict()
op_type = parser_op[0]
parameters = []
if op_type == p4.parse_call.extract:
parser_op_dict["op"] = "extract"
header = parser_op[1]
param_dict = OrderedDict()
if header.virtual:
param_dict["type"] = "stack"
param_dict["value"] = header.base_name
else:
param_dict["type"] = "regular"
param_dict["value"] = header.name
parameters.append(param_dict)
elif op_type == p4.parse_call.set:
parser_op_dict["op"] = "set"
dest_field, src = parser_op[1], parser_op[2]
assert(type(dest_field) is p4.p4_field and
"parser assignment target should be a field")
dest_dict = OrderedDict()
src_dict = OrderedDict()
dest_dict["type"] = "field"
dest_dict["value"] = format_field_ref(dest_field)
parameters.append(dest_dict)
if type(src) is int or type(src) is long:
src_dict["type"] = "hexstr"
src_dict["value"] = format_hexstr(src)
elif type(src) is p4.p4_field:
src_dict = format_field_ref_expression(src, False)
elif type(src) is tuple:
src_dict["type"] = "lookahead"
src_dict["value"] = list(src)
elif type(src) is p4.p4_expression:
src_dict["type"] = "expression"
src_dict["value"] = dump_expression(src)
else: # pragma: no cover
LOG_CRITICAL("invalid src type for set_metadata: %s",
type(src))
parameters.append(src_dict)
else: # pragma: no cover
LOG_CRITICAL("invalid parser operation: %s", op_type)
parser_op_dict["parameters"] = parameters
parser_ops.append(parser_op_dict)
parse_state_dict["parser_ops"] = parser_ops
transition_key = []
field_widths = []
for switch_ref in p4_parse_state.branch_on:
switch_ref_dict = OrderedDict()
if type(switch_ref) is p4.p4_field:
field_widths.append(switch_ref.width)
header = switch_ref.instance
if header.virtual:
switch_ref_dict["type"] = "stack_field"
else:
switch_ref_dict["type"] = "field"
switch_ref_dict["value"] = format_field_ref(switch_ref)
elif type(switch_ref) is tuple:
field_widths.append(switch_ref[1])
switch_ref_dict["type"] = "lookahead"
switch_ref_dict["value"] = list(switch_ref)
else: # pragma: no cover
LOG_CRITICAL("not supported")
transition_key.append(switch_ref_dict)
parse_state_dict["transition_key"] = transition_key
transitions = []
for branch_case, next_state in p4_parse_state.branch_to.items():
transition_dict = OrderedDict()
value, mask, type_ = None, None, None
if branch_case == p4.P4_DEFAULT:
type_ = "default"
elif type(branch_case) is int:
type_ = "hexstr"
value = build_match_value(field_widths, branch_case)
elif type(branch_case) is tuple:
type_ = "hexstr"
value, mask = (build_match_value(field_widths, branch_case[0]),
build_match_value(field_widths, branch_case[1]))
elif type(branch_case) is p4.p4_parse_value_set:
type_ = "parse_vset"
value = branch_case.name
# mask not supported yet in compiler, even though it is
# supported in bmv2
mask = None
vset_bits = sum(field_widths)
if value in dump_parsers.vset_widths:
curr_bits = dump_parsers.vset_widths[value]
if curr_bits != vset_bits: # pragma: no cover
LOG_CRITICAL("when parser value set used multiple "
"times, widths cannot clash")
else:
dump_parsers.vset_widths[value] = vset_bits
else: # pragma: no cover
LOG_CRITICAL("invalid parser branching")
transition_dict["type"] = type_
transition_dict["value"] = value
transition_dict["mask"] = mask
if isinstance(next_state, p4.p4_parse_state):
transition_dict["next_state"] = next_state.name
else:
# we do not support control flows here anymore
transition_dict["next_state"] = None
transitions.append(transition_dict)
parse_state_dict["transitions"] = transitions
if keep_pragmas:
add_pragmas(parse_state_dict, p4_parse_state)
parse_states.append(parse_state_dict)
parser_dict["parse_states"] = parse_states
return parser_dict | p4lang/p4c-bm | [
23,
30,
23,
3,
1438962502
] |
def dump_parsers(json_dict, hlir, keep_pragmas=False):
parsers = []
parser_id = 0
for name, p4_parse_state in hlir.p4_parse_states.items():
new_name = None
if name == "start":
new_name = "parser"
elif "packet_entry" in p4_parse_state._pragmas:
new_name = name
if new_name:
parsers.append(dump_one_parser(
new_name, parser_id, p4_parse_state, keep_pragmas=keep_pragmas))
parser_id += 1
json_dict["parsers"] = parsers | p4lang/p4c-bm | [
23,
30,
23,
3,
1438962502
] |
def process_forced_header_ordering(hlir, ordering):
p4_ordering = []
for hdr_name in ordering:
if hdr_name in hlir.p4_header_instances:
p4_ordering.append(hlir.p4_header_instances[hdr_name])
elif hdr_name + "[0]" in hlir.p4_header_instances:
hdr_0 = hlir.p4_header_instances[hdr_name + "[0]"]
for index in xrange(hdr_0.max_index + 1):
indexed_name = hdr_name + "[" + str(index) + "]"
p4_ordering.append(hlir.p4_header_instances[indexed_name])
else:
return None
return p4_ordering | p4lang/p4c-bm | [
23,
30,
23,
3,
1438962502
] |
def __init__(self, parse_state, prev_hdr_node, tag_stacks_index):
self.current_state = parse_state
self.prev_hdr_node = prev_hdr_node
self.stacks = frozenset(tag_stacks_index.items()) | p4lang/p4c-bm | [
23,
30,
23,
3,
1438962502
] |
def __hash__(self):
return hash((self.current_state, self.prev_hdr_node, self.stacks)) | p4lang/p4c-bm | [
23,
30,
23,
3,
1438962502
] |
def __str__(self): # pragma: no cover
return "{}, {}, {}".format(
self.current_state, self.prev_hdr_node, self.stacks) | p4lang/p4c-bm | [
23,
30,
23,
3,
1438962502
] |
def walk_rec(hlir, parse_state, prev_hdr_node, tag_stacks_index, visited,
recursion_states):
assert(isinstance(parse_state, p4.p4_parse_state))
rec_state = State(parse_state, prev_hdr_node, tag_stacks_index)
if rec_state in recursion_states:
return
recursion_states.add(rec_state)
for call in parse_state.call_sequence:
call_type = call[0]
if call_type == p4.parse_call.extract:
hdr = call[1]
if hdr.virtual:
base_name = hdr.base_name
current_index = tag_stacks_index[base_name]
if current_index > hdr.max_index:
return
tag_stacks_index[base_name] += 1
name = base_name + "[%d]" % current_index
hdr = hlir.p4_header_instances[name]
# takes care of loops in parser (e.g. for TLV parsing)
elif parse_state in visited:
return
if hdr not in header_graph:
header_graph.add_node(hdr)
hdr_node = header_graph.get_node(hdr)
if prev_hdr_node:
prev_hdr_node.add_edge_to(hdr_node)
else:
header_graph.root = hdr
prev_hdr_node = hdr_node
for branch_case, next_state in parse_state.branch_to.items():
if not next_state:
continue
if not isinstance(next_state, p4.p4_parse_state):
continue
walk_rec(hlir, next_state, prev_hdr_node,
tag_stacks_index.copy(), visited | {parse_state},
recursion_states) | p4lang/p4c-bm | [
23,
30,
23,
3,
1438962502
] |
def dump_one_deparser(deparser_name, deparser_id, p4_start_state, hlir):
deparser_dict = OrderedDict()
deparser_dict["name"] = deparser_name
deparser_dict["id"] = deparser_id
deparser_id = deparser_id
header_topo_sorting = produce_parser_topo_sorting(hlir, p4_start_state)
deparser_order = [hdr.name for hdr in header_topo_sorting]
deparser_dict["order"] = deparser_order
dump_one_deparser.header_set.update(set(header_topo_sorting))
return deparser_dict | p4lang/p4c-bm | [
23,
30,
23,
3,
1438962502
] |
def dump_deparsers(json_dict, hlir, p4_v1_1=False):
deparsers = []
deparser_id = 0
for name, p4_parse_state in hlir.p4_parse_states.items():
new_name = None
if name == "start":
new_name = "deparser"
elif "packet_entry" in p4_parse_state._pragmas:
new_name = name
if new_name:
deparsers.append(
dump_one_deparser(new_name, deparser_id, p4_parse_state, hlir))
deparser_id += 1
check_added_headers_in_parse_graph(hlir, dump_one_deparser.header_set,
p4_v1_1=p4_v1_1)
json_dict["deparsers"] = deparsers | p4lang/p4c-bm | [
23,
30,
23,
3,
1438962502
] |
def get_nodes(pipe_ptr, node_set):
if pipe_ptr is None:
return
if pipe_ptr in node_set:
return
node_set.add(pipe_ptr)
for next_node in pipe_ptr.next_.values():
get_nodes(next_node, node_set) | p4lang/p4c-bm | [
23,
30,
23,
3,
1438962502
] |
def get_table_match_type(p4_table):
match_types = []
for _, m_type, _ in p4_table.match_fields:
match_types.append(match_type_to_str(m_type))
if len(match_types) == 0:
match_type = "exact"
elif "range" in match_types:
match_type = "range"
elif "ternary" in match_types:
match_type = "ternary"
elif match_types.count("lpm") >= 2: # pragma: no cover
LOG_CRITICAL("cannot have 2 different lpm matches in a single table")
elif "lpm" in match_types:
match_type = "lpm"
else:
# that includes the case when we only have one valid match and
# nothing else
match_type = "exact"
return match_type | p4lang/p4c-bm | [
23,
30,
23,
3,
1438962502
] |
def dump_action_profile(pipe_name, action_profiles, p4_action_profile,
keep_pragmas=False):
# check that the same action profile is not referenced across multiple
# control flows. This is somewhat of an artifical restriction imposed by the
# pipeline abstraction in the JSON
if p4_action_profile in dump_action_profile.referenced:
if dump_action_profile.referenced[p4_action_profile] != pipe_name:
LOG_CRITICAL("action profile {} cannot be referenced in different "
"control flows".format(p4_action_profile.name))
else:
dump_action_profile.referenced[p4_action_profile] = pipe_name
act_prof_dict = OrderedDict()
act_prof_dict["name"] = p4_action_profile.name
act_prof_dict["id"] = dump_action_profile.act_prof_id
dump_action_profile.act_prof_id += 1
act_prof_dict["max_size"] = p4_action_profile.size
if p4_action_profile.selector is not None:
p4_selector = p4_action_profile.selector
selector = OrderedDict()
if type(p4_selector.selection_key.algorithm) is list:
# we already print a warning for this case in dump_calculations
selector["algo"] = p4_selector.selection_key.algorithm[0]
else:
selector["algo"] = p4_selector.selection_key.algorithm
elements = []
assert(len(p4_selector.selection_key.input) == 1)
for field in p4_selector.selection_key.input[0].fields:
element_dict = OrderedDict()
if type(field) is not p4.p4_field: # pragma: no cover
LOG_CRITICAL("only fields supported in field lists")
element_dict["type"] = "field"
element_dict["value"] = format_field_ref(field)
elements.append(element_dict)
selector["input"] = elements
act_prof_dict["selector"] = selector
if keep_pragmas:
add_pragmas(act_prof_dict, p4_action_profile)
action_profiles.append(act_prof_dict) | p4lang/p4c-bm | [
23,
30,
23,
3,
1438962502
] |
def dump_one_pipeline(json_dict, pipe_name, pipe_ptr, hlir, keep_pragmas=False):
def get_table_name(p4_table):
if not p4_table:
return None
return p4_table.name
def table_has_counters(p4_table):
for name, counter in hlir.p4_counters.items():
if counter.binding == (p4.P4_DIRECT, p4_table):
return True
return False
def table_direct_meters(p4_table):
for name, meter in hlir.p4_meters.items():
if meter.binding == (p4.P4_DIRECT, p4_table):
return name
return None
pipeline_dict = OrderedDict()
pipeline_dict["name"] = pipe_name
pipeline_dict["id"] = dump_one_pipeline.pipeline_id
dump_one_pipeline.pipeline_id += 1
pipeline_dict["init_table"] = get_table_name(pipe_ptr)
node_set = set()
get_nodes(pipe_ptr, node_set)
tables = []
action_profiles = []
for name, table in hlir.p4_tables.items():
if table not in node_set:
continue
table_dict = OrderedDict()
table_dict["name"] = name
table_dict["id"] = dump_one_pipeline.table_id
dump_one_pipeline.table_id += 1
match_type = get_table_match_type(table)
table_dict["match_type"] = match_type
table_dict["type"] = get_table_type(table)
if table_dict["type"] == "indirect" or\
table_dict["type"] == "indirect_ws":
table_dict["action_profile"] = table.action_profile.name
dump_action_profile(pipe_name, action_profiles,
table.action_profile, keep_pragmas=keep_pragmas)
table_dict["max_size"] = table.max_size if table.max_size else 16384
# TODO(antonin): update counters to be the same as direct meters, but
# that would make the JSON non-backwards compatible
table_dict["with_counters"] = table_has_counters(table)
table_dict["direct_meters"] = table_direct_meters(table)
table_dict["support_timeout"] = table.support_timeout
key = []
for field_ref, m_type, mask in table.match_fields:
key_field = OrderedDict()
match_type = match_type_to_str(m_type)
key_field["match_type"] = match_type
if match_type != "valid":
assert(isinstance(field_ref, p4.p4_field))
field_width = field_ref.width
if field_width == p4.P4_AUTO_WIDTH:
LOG_CRITICAL(
"Cannot match on field '{}' as matching on "
"variable-length fields is not supported".format(
field_ref))
if match_type == "valid":
if isinstance(field_ref, p4.p4_field):
header_ref = field_ref.instance
else:
header_ref = field_ref
assert(type(header_ref) is p4.p4_header_instance)
key_field["target"] = header_ref.name
else:
key_field["target"] = format_field_ref(field_ref)
if mask:
if match_type == "valid":
LOG_WARNING("a field mask does not make much sense for a "
"valid match")
field_width = 1
else:
assert(isinstance(field_ref, p4.p4_field))
field_width = field_ref.width
# re-using this function (used by parser)
mask = build_match_value([field_width], mask)
LOG_INFO("you are using a mask in a match table, "
"this is still an experimental feature")
else:
mask = None # should aready be the case
key_field["mask"] = mask
key.append(key_field)
table_dict["key"] = key
table_dict["actions"] = [a.name for a in table.actions]
next_tables = OrderedDict()
if "hit" in table.next_:
next_tables["__HIT__"] = get_table_name(table.next_["hit"])
next_tables["__MISS__"] = get_table_name(table.next_["miss"])
else:
for a, nt in table.next_.items():
next_tables[a.name] = get_table_name(nt)
table_dict["next_tables"] = next_tables
# temporarily not covered by tests, because not part of P4 spec
if hasattr(table, "default_action") and\
table.default_action is not None:
LOG_INFO("you are using the default_entry table attribute, "
"this is still an experimental feature")
action, data = table.default_action
default_entry = OrderedDict()
for j_action in json_dict["actions"]:
if j_action["name"] == action.name:
default_entry["action_id"] = j_action["id"]
default_entry["action_const"] = True
if data is not None:
default_entry["action_data"] = [format_hexstr(i) for i in data]
default_entry["action_entry_const"] = False
table_dict["default_entry"] = default_entry
# TODO: temporary, to ensure backwards compatibility
if hasattr(table, "base_default_next"):
table_dict["base_default_next"] = get_table_name(
table.base_default_next)
else: # pragma: no cover
LOG_WARNING("Your 'p4-hlir' is out-of-date, consider updating")
if keep_pragmas:
add_pragmas(table_dict, table)
tables.append(table_dict)
pipeline_dict["tables"] = tables
pipeline_dict["action_profiles"] = action_profiles
conditionals = []
for name, cnode in hlir.p4_conditional_nodes.items():
if cnode not in node_set:
continue
conditional_dict = OrderedDict()
conditional_dict["name"] = name
conditional_dict["id"] = dump_one_pipeline.condition_id
dump_one_pipeline.condition_id += 1
conditional_dict["expression"] = dump_expression(cnode.condition)
conditional_dict["true_next"] = get_table_name(cnode.next_[True])
conditional_dict["false_next"] = get_table_name(cnode.next_[False])
if keep_pragmas:
add_pragmas(conditional_dict, cnode)
conditionals.append(conditional_dict)
pipeline_dict["conditionals"] = conditionals
return pipeline_dict | p4lang/p4c-bm | [
23,
30,
23,
3,
1438962502
] |
def index_OrderedDict(self, kf):
idx = 0
for k, v in self.items():
if(k == kf):
return idx
idx += 1 | p4lang/p4c-bm | [
23,
30,
23,
3,
1438962502
] |
def field_list_to_learn_id(p4_field_list):
ids = field_list_to_learn_id.ids
if p4_field_list in ids:
return ids[p4_field_list]
idx = len(ids) + 1
ids[p4_field_list] = idx
return idx | p4lang/p4c-bm | [
23,
30,
23,
3,
1438962502
] |
def field_list_to_id(p4_field_list):
ids = field_list_to_id.ids
if p4_field_list in ids:
return ids[p4_field_list]
idx = len(ids) + 1
ids[p4_field_list] = idx
return idx | p4lang/p4c-bm | [
23,
30,
23,
3,
1438962502
] |
def dump_actions(json_dict, hlir, p4_v1_1=False, keep_pragmas=False):
actions = []
action_id = 0
table_actions_set = get_p4_action_set(hlir)
for action in table_actions_set:
action_dict = OrderedDict()
action_dict["name"] = action.name
action_dict["id"] = action_id
action_id += 1
runtime_data = []
param_with_bit_widths = OrderedDict()
for param, width in zip(action.signature, action.signature_widths):
if not width: # pragma: no cover
LOG_CRITICAL(
"unused parameter '{}' in action '{}' definition".format(
param, action.name))
param_with_bit_widths[param] = width
param_dict = OrderedDict()
param_dict["name"] = param
param_dict["bitwidth"] = width
runtime_data.append(param_dict)
action_dict["runtime_data"] = runtime_data
def is_stack_ref(call_idx, arg_idx, primitive_name):
# legacy case
if not hasattr(action, "stack_indices"): # pragma: no cover
return (primitive_name in {"push", "pop"} and arg_idx == 0)
stack_indices = action.stack_indices[call_idx]
return (arg_idx in stack_indices)
primitives = []
for call_idx, call in enumerate(action.flat_call_sequence):
primitive_dict = OrderedDict()
if p4_v1_1 and type(call[0]) is p4.p4_extern_method:
primitive_name = "_" + call[0].parent.extern_type.name \
+ "_" + call[0].name
primitive_dict["op"] = primitive_name
args = [call[0].parent.name] + call[1]
else:
primitive_name = call[0].name
primitive_dict["op"] = primitive_name
args = call[1]
# backwards compatibility with older P4 programs
if primitive_name == "modify_field" and len(args) == 3:
LOG_WARNING(
"Your P4 program uses the modify_field() action primitive "
"with 3 arguments (aka masked modify), bmv2 does not "
"support it anymore and this compiler will replace your "
"modify_field(a, b, c) with "
"modify_field(a, (a & ~c) | (b & c))")
Lexpr = p4.p4_expression(args[0], "&",
p4.p4_expression(None, "~", args[2]))
Rexpr = p4.p4_expression(args[1], "&", args[2])
new_arg = p4.p4_expression(Lexpr, "|", Rexpr)
args = [args[0], new_arg]
primitive_args = []
for arg_idx, arg in enumerate(args):
arg_dict = OrderedDict()
if type(arg) is int or type(arg) is long:
arg_dict["type"] = "hexstr"
arg_dict["value"] = format_hexstr(arg)
elif type(arg) is p4.p4_sized_integer:
# TODO(antonin)
arg_dict["type"] = "hexstr"
arg_dict["value"] = format_hexstr(arg)
elif type(arg) is p4.p4_field:
arg_dict["type"] = "field"
arg_dict["value"] = format_field_ref(arg)
elif type(arg) is p4.p4_header_instance:
arg_dict["type"] = "header"
arg_dict["value"] = arg.name
elif p4_v1_1 and type(arg) is p4.p4_header_stack:
arg_dict["type"] = "header_stack"
arg_dict["value"] = re.sub(r'\[.*\]', '', arg.name)
elif type(arg) is p4.p4_signature_ref:
arg_dict["type"] = "runtime_data"
arg_dict["value"] = arg.idx
elif type(arg) is p4.p4_field_list:
# hack for generate_digest calls
if primitive_name == "generate_digest":
id_ = field_list_to_learn_id(arg)
elif "clone" in primitive_name or\
primitive_name in {"resubmit", "recirculate"}:
id_ = field_list_to_id(arg)
arg_dict["type"] = "hexstr"
arg_dict["value"] = format_hexstr(id_)
elif type(arg) is p4.p4_field_list_calculation:
arg_dict["type"] = "calculation"
arg_dict["value"] = arg.name
elif type(arg) is p4.p4_meter:
arg_dict["type"] = "meter_array"
arg_dict["value"] = arg.name
elif type(arg) is p4.p4_counter:
arg_dict["type"] = "counter_array"
arg_dict["value"] = arg.name
elif type(arg) is p4.p4_register:
arg_dict["type"] = "register_array"
arg_dict["value"] = arg.name
elif type(arg) is p4.p4_expression:
arg_dict["type"] = "expression"
arg_dict["value"] = dump_expression(arg)
elif is_register_ref(arg):
arg_dict["type"] = "register"
arg_dict["value"] = format_register_ref(arg)
elif p4_v1_1 and type(call[0]) is p4.p4_extern_method:
if arg == call[0].parent.name:
arg_dict["type"] = "extern"
arg_dict["value"] = arg
else: # pragma: no cover
LOG_CRITICAL("action arg type is not supported: %s",
type(arg))
if (not p4_v1_1)\
and is_stack_ref(call_idx, arg_idx, primitive_name):
assert(arg_dict["type"] == "header")
arg_dict["type"] = "header_stack"
arg_dict["value"] = re.sub(r'\[.*\]', '', arg_dict["value"])
primitive_args.append(arg_dict)
primitive_dict["parameters"] = primitive_args
primitives.append(primitive_dict)
action_dict["primitives"] = primitives
if keep_pragmas:
add_pragmas(action_dict, action)
actions.append(action_dict)
json_dict["actions"] = actions | p4lang/p4c-bm | [
23,
30,
23,
3,
1438962502
] |
def dump_checksums(json_dict, hlir):
checksums = []
id_ = 0
for name, p4_header_instance in hlir.p4_header_instances.items():
for field_instance in p4_header_instance.fields:
field_ref = format_field_ref(field_instance)
field_name = '.'.join(field_ref)
for calculation in field_instance.calculation:
checksum_dict = OrderedDict()
type_, calc, if_cond = calculation
if type_ == "verify": # pragma: no cover
LOG_WARNING(
"The P4 program defines a checksum verification on "
"field '{}'; as of now bmv2 ignores all checksum "
"verifications; checksum updates are processed "
"correctly.".format(field_name))
continue
different_width = (calc.output_width != field_instance.width)
if different_width: # pragma: no cover
LOG_CRITICAL(
"For checksum on field '{}', the field width is "
"different from the calulation output width."
.format(field_name))
# if we want the name to be unique, it has to (at least) include
# the name of teh calculation; however do we really need the
# name to be unique
checksum_dict["name"] = "|".join([field_name, calc.name])
checksum_dict["id"] = id_
id_ += 1
checksum_dict["target"] = field_ref
checksum_dict["type"] = "generic"
checksum_dict["calculation"] = calc.name
checksum_dict["if_cond"] = None
if if_cond is not None:
assert(type(if_cond) is p4.p4_expression)
checksum_dict["if_cond"] = dump_expression(if_cond)
checksums.append(checksum_dict)
json_dict["checksums"] = checksums | p4lang/p4c-bm | [
23,
30,
23,
3,
1438962502
] |
def dump_learn_lists(json_dict, hlir):
learn_lists = []
learn_list_ids = field_list_to_learn_id.ids
for p4_field_list, id_ in learn_list_ids.items():
learn_list_dict = OrderedDict()
learn_list_dict["id"] = id_
learn_list_dict["name"] = p4_field_list.name
elements = []
for field in p4_field_list.fields:
element_dict = OrderedDict()
if type(field) is not p4.p4_field: # pragma: no cover
LOG_CRITICAL("only fields supported in field lists for now")
element_dict["type"] = "field"
element_dict["value"] = format_field_ref(field)
elements.append(element_dict)
learn_list_dict["elements"] = elements
learn_lists.append(learn_list_dict)
learn_lists.sort(key=lambda field_list: field_list["id"])
json_dict["learn_lists"] = learn_lists | p4lang/p4c-bm | [
23,
30,
23,
3,
1438962502
] |
def dump_meters(json_dict, hlir, keep_pragmas=False):
meters = []
id_ = 0
for name, p4_meter in hlir.p4_meters.items():
meter_dict = OrderedDict()
meter_dict["name"] = name
meter_dict["id"] = id_
id_ += 1
if p4_meter.binding and (p4_meter.binding[0] == p4.P4_DIRECT):
meter_dict["is_direct"] = True
meter_dict["binding"] = p4_meter.binding[1].name
meter_dict["size"] = p4_meter.binding[1].max_size
meter_dict["result_target"] = format_field_ref(p4_meter.result)
else:
meter_dict["is_direct"] = False
meter_dict["size"] = p4_meter.instance_count
meter_dict["rate_count"] = 2 # 2 rate, 3 colors
if p4_meter.type == p4.P4_COUNTER_BYTES:
type_ = "bytes"
elif p4_meter.type == p4.P4_COUNTER_PACKETS:
type_ = "packets"
else: # pragma: no cover
LOG_CRITICAL("invalid meter type")
meter_dict["type"] = type_
if keep_pragmas:
add_pragmas(meter_dict, p4_meter)
meters.append(meter_dict)
json_dict["meter_arrays"] = meters | p4lang/p4c-bm | [
23,
30,
23,
3,
1438962502
] |
def dump_registers(json_dict, hlir, keep_pragmas=False):
registers = []
id_ = 0
for name, p4_register in hlir.p4_registers.items():
if p4_register.binding and (p4_register.binding[0] == p4.P4_DIRECT):
LOG_CRITICAL("'{}' is a direct register; direct registers are not "
"supported by bmv2".format(name))
register_dict = OrderedDict()
register_dict["name"] = name
register_dict["id"] = id_
id_ += 1
if p4_register.layout is not None: # pragma: no cover
LOG_CRITICAL("registers with layout not supported")
register_dict["bitwidth"] = p4_register.width
register_dict["size"] = p4_register.instance_count
if keep_pragmas:
add_pragmas(register_dict, p4_register)
registers.append(register_dict)
json_dict["register_arrays"] = registers | p4lang/p4c-bm | [
23,
30,
23,
3,
1438962502
] |
def dump_force_arith(json_dict, hlir):
force_arith = []
headers = ["standard_metadata", "intrinsic_metadata"]
for header_name in headers:
if header_name not in hlir.p4_header_instances:
continue
p4_header_instance = hlir.p4_header_instances[header_name]
p4_header_type = p4_header_instance.header_type
for field, _ in p4_header_type.layout.items():
force_arith.append([header_name, field])
json_dict["force_arith"] = force_arith | p4lang/p4c-bm | [
23,
30,
23,
3,
1438962502
] |
def dump_extern_instances(json_dict, hlir):
extern_instances = []
id_ = 0
for name, p4_extern_instance in hlir.p4_extern_instances.items():
extern_instance_dict = OrderedDict()
extern_instance_dict["name"] = name
extern_instance_dict["id"] = id_
extern_instance_dict["type"] = p4_extern_instance.extern_type.name
id_ += 1
attributes = []
for attribute, attr in p4_extern_instance.attributes.items():
attr_type = p4_extern_instance.extern_type.attributes[attribute].\
value_type.type_name
if attr_type != "bit" and attr_type != "int": # pragma: no cover
LOG_CRITICAL(
"Attribute type '{}' not supported for the "
"extern type '{}'. Supported values are bit and int".
format(attr_type, p4_extern_instance.extern_type.name))
attribute_dict = OrderedDict()
attribute_dict["name"] = attribute
attribute_dict["type"] = "hexstr"
attribute_dict["value"] = hex(attr)
attributes.append(attribute_dict)
extern_instance_dict["attribute_values"] = attributes
extern_instances.append(extern_instance_dict)
json_dict["extern_instances"] = extern_instances | p4lang/p4c-bm | [
23,
30,
23,
3,
1438962502
] |
def setUp(self):
super(AwsVpcS3EndpointTest, self).setUp()
self.mock_vpc = mock.Mock()
self.mock_vpc.region = REGION
self.mock_run_cmd = self.enter_context(
mock.patch.object(aws_vpc_endpoint.AwsVpcS3Endpoint, '_RunCommand')) | GoogleCloudPlatform/PerfKitBenchmarker | [
1785,
474,
1785,
248,
1405617806
] |
def testEndPointIdNoVpc(self):
# initialize with no VPC means no immediate lookups done
endpoint = self._InitEndpoint(None)
self.assertIsNone(endpoint.id)
endpoint._RunCommand.assert_not_called() | GoogleCloudPlatform/PerfKitBenchmarker | [
1785,
474,
1785,
248,
1405617806
] |
def testCreate(self):
# shows that a call to .Create() will get the routing table info followed
# by the create-vpc-endpoint call
endpoint = self._InitEndpoint(VPC_ID)
self.mock_run_cmd.reset_mock()
self.mock_run_cmd.side_effect = [
[], # query for endpoint id
[ROUTE_TABLE_ID], # query for route tables
CREATE_RES, # _Create()
[ENDPOINT_ID], # _Exists()
]
endpoint.Create()
calls = endpoint._RunCommand.call_args_list
self.assertEqual(mock.call(QUERY_ENDPOINTS_CMD), calls[0])
self.assertEqual(mock.call(DESCRIBE_ROUTES_CMD), calls[1])
self.assertEqual(mock.call(CREATE_ENDPOINT_CMD), calls[2])
self.assertEqual(mock.call(QUERY_ENDPOINTS_CMD), calls[3])
self.assertEqual(ENDPOINT_ID, endpoint.id) | GoogleCloudPlatform/PerfKitBenchmarker | [
1785,
474,
1785,
248,
1405617806
] |
def install(self, env):
self.install_packages(env) | arenadata/ambari | [
3,
7,
3,
3,
1478181309
] |
def pre_upgrade_restart(self, env, upgrade_type=None):
import params
env.set_params(params)
if params.version and compare_versions(format_stack_version(params.version), '4.1.0.0') >= 0:
stack_select.select_packages(params.version)
# This is extremely important since it should only be called if crossing the IOP 4.2 boundary.
# This is extremely important since it should only be called if crossing the HDP 2.3.4.0 boundary.
if params.version and params.upgrade_direction:
src_version = dst_version = None
if params.upgrade_direction == Direction.UPGRADE:
src_version = upgrade_summary.get_source_version("KAFKA", default_version = params.version)
dst_version = upgrade_summary.get_target_version("KAFKA", default_version = params.version)
else:
# These represent the original values during the UPGRADE direction
src_version = upgrade_summary.get_target_version("KAFKA", default_version = params.version)
dst_version = upgrade_summary.get_source_version("KAFKA", default_version = params.version)
if compare_versions(src_version, '4.2.0.0') < 0 and compare_versions(dst_version, '4.2.0.0') >= 0:
# Upgrade from IOP 4.1 to 4.2, Calling the acl migration script requires the configs to be present.
self.configure(env, upgrade_type=upgrade_type)
upgrade.run_migration(env, upgrade_type) | arenadata/ambari | [
3,
7,
3,
3,
1478181309
] |
def stop(self, env, upgrade_type=None):
import params
env.set_params(params)
ensure_base_directories()
daemon_cmd = format('source {params.conf_dir}/kafka-env.sh; {params.kafka_bin} stop')
Execute(daemon_cmd,
user=params.kafka_user,
)
File (params.kafka_pid_file,
action = "delete"
) | arenadata/ambari | [
3,
7,
3,
3,
1478181309
] |
def verify_ids(doc_iter, es_host, index, doc_type=None, step=100000, ):
'''verify how many docs from input interator/list overlapping with existing docs.'''
index = index
doc_type = doc_type
es = get_es(es_host)
q = {'query': {'ids': {"values": []}}}
total_cnt = 0
found_cnt = 0
out = []
for doc_batch in iter_n(doc_iter, n=step):
id_li = [doc['_id'] for doc in doc_batch]
# id_li = [doc['_id'].replace('chr', '') for doc in doc_batch]
q['query']['ids']['values'] = id_li
xres = es.search(index=index, doc_type=doc_type, body=q, _source=False)
found_cnt += xres['hits']['total']
total_cnt += len(id_li)
out.extend([x['_id'] for x in xres['hits']['hits']])
return out | biothings/biothings.api | [
39,
25,
39,
80,
1452637246
] |
def wrapper(func):
'''this wrapper allows passing index and doc_type from wrapped method.'''
def outter_fn(*args, **kwargs):
self = args[0]
index = kwargs.pop('index', self._index) # pylint: disable=protected-access
doc_type = kwargs.pop('doc_type', self._doc_type) # pylint: disable=protected-access
self._index = index # pylint: disable=protected-access
self._doc_type = doc_type # pylint: disable=protected-access
return func(*args, **kwargs)
outter_fn.__doc__ = func.__doc__
return outter_fn | biothings/biothings.api | [
39,
25,
39,
80,
1452637246
] |
def __init__(self, client, index_name):
self.client = client # synchronous
self.index_name = index_name # MUST exist | biothings/biothings.api | [
39,
25,
39,
80,
1452637246
] |
def doc_type(self):
if int(self.client.info()['version']['number'].split('.')[0]) < 7:
mappings = self.client.indices.get_mapping(self.index_name)
mappings = mappings[self.index_name]["mappings"]
return next(iter(mappings.keys()))
return None | biothings/biothings.api | [
39,
25,
39,
80,
1452637246
] |
def __init__(self, index, doc_type='_doc', es_host='localhost:9200',
step=500, step_size=10, # elasticsearch.helpers.bulk
number_of_shards=1, number_of_replicas=0,
check_index=True, **kwargs):
self.es_host = es_host
self._es = get_es(es_host, **kwargs)
self._host_major_ver = int(self._es.info()['version']['number'].split('.')[0])
if check_index:
# if index is actually an alias, resolve the alias to
# the real underlying index
try:
res = self._es.indices.get_alias(index=index)
# this was an alias
assert len(res) == 1, "Expecing '%s' to be an alias, but got nothing..." % index
self._index = list(res.keys())[0]
except NotFoundError:
# this was a real index name
self._index = index
self._doc_type = None
if doc_type:
self._doc_type = doc_type
else:
# assuming index exists, get mapping to discover doc_type
try:
m = self.get_mapping()
assert len(m) == 1, "Expected only one doc type, got: %s" % m.keys()
self._doc_type = list(m).pop()
except Exception as e: # pylint: disable=broad-except
if check_index:
logging.info("Failed to guess doc_type: %s", e)
self.number_of_shards = number_of_shards # set number_of_shards when create_index
self.number_of_replicas = int(number_of_replicas) # set number_of_replicas when create_index
self.step = step or 500 # the bulk size when doing bulk operation.
self.step_size = (step_size or 10) * 1048576 # MB -> bytes
self.s = None # number of records to skip, useful to continue indexing after an error. | biothings/biothings.api | [
39,
25,
39,
80,
1452637246
] |
def get_biothing(self, bid, only_source=False, **kwargs):
rawdoc = self._es.get(index=self._index, id=bid, doc_type=self._doc_type, **kwargs)
if not only_source:
return rawdoc
else:
doc = rawdoc['_source']
doc["_id"] = rawdoc["_id"]
return doc | biothings/biothings.api | [
39,
25,
39,
80,
1452637246
] |
def exists(self, bid):
"""return True/False if a biothing id exists or not."""
try:
doc = self.get_biothing(bid, stored_fields=None)
return doc['found']
except NotFoundError:
return False | biothings/biothings.api | [
39,
25,
39,
80,
1452637246
] |
def mexists(self, bid_list):
q = {
"query": {
"ids": {
"values": bid_list
}
}
}
res = self._es.search(index=self._index, doc_type=self._doc_type, body=q, stored_fields=None, size=len(bid_list))
# id_set = set([doc['_id'] for doc in res['hits']['hits']]) # TODO: Confirm this line
id_set = {doc['_id'] for doc in res['hits']['hits']}
return [(bid, bid in id_set) for bid in bid_list] | biothings/biothings.api | [
39,
25,
39,
80,
1452637246
] |
def count(self, q=None, raw=False):
try:
_res = self._es.count(index=self._index, doc_type=self._doc_type, body=q)
return _res if raw else _res['count']
except NotFoundError:
return None | biothings/biothings.api | [
39,
25,
39,
80,
1452637246
] |
def count_src(self, src):
if isinstance(src, str):
src = [src]
cnt_d = {}
for _src in src:
q = {
"query": {
"constant_score": {
"filter": {
"exists": {"field": _src}
}
}
}
}
cnt_d[_src] = self.count(q)
return cnt_d | biothings/biothings.api | [
39,
25,
39,
80,
1452637246
] |
def create_index(self, mapping=None, extra_settings=None):
if not self._es.indices.exists(index=self._index):
body = {
'settings': {
'number_of_shards': self.number_of_shards,
"number_of_replicas": self.number_of_replicas,
}
}
extra_settings = extra_settings or {}
body["settings"].update(extra_settings)
if mapping:
# the mapping is passed in for elasticsearch 6
# if the remote server is of elasticsearch version 7 or later
# drop the doc_type first level key as it is no longer supported
self._populate_es_version()
if self._host_major_ver > 6:
if len(mapping) == 1 and next(iter(mapping)) not in ('properties', 'dynamic', '_meta'):
mapping = next(iter(mapping.values()))
mapping = {"mappings": mapping}
body.update(mapping)
self._es.indices.create(index=self._index, body=body) | biothings/biothings.api | [
39,
25,
39,
80,
1452637246
] |
def exists_index(self):
return self._es.indices.exists(index=self._index) | biothings/biothings.api | [
39,
25,
39,
80,
1452637246
] |
def index_bulk(self, docs, step=None, action='index'):
self._populate_es_version()
index_name = self._index
doc_type = self._doc_type
step = step or self.step
def _get_bulk(doc):
# keep original doc
ndoc = copy.copy(doc)
ndoc.update({
"_index": index_name,
"_type": doc_type,
"_op_type": action,
})
if self._host_major_ver > 6:
ndoc.pop("_type")
return ndoc
actions = (_get_bulk(doc) for doc in docs)
num_ok, errors = helpers.bulk(self._es, actions, chunk_size=step, max_chunk_bytes=self.step_size)
if errors:
raise ElasticsearchException("%d errors while bulk-indexing: %s" % (len(errors), [str(e) for e in errors]))
return num_ok, errors | biothings/biothings.api | [
39,
25,
39,
80,
1452637246
] |
def delete_docs(self, ids, step=None):
'''delete a list of docs in bulk.'''
index_name = self._index
doc_type = self._doc_type
step = step or self.step
def _get_bulk(_id):
if self._host_major_ver >= 7:
doc = {
'_op_type': 'delete',
"_index": index_name,
"_id": _id
}
else:
doc = {
'_op_type': 'delete',
"_index": index_name,
"_type": doc_type,
"_id": _id
}
return doc
actions = (_get_bulk(_id) for _id in ids)
return helpers.bulk(self._es, actions, chunk_size=step, stats_only=True, raise_on_error=False) | biothings/biothings.api | [
39,
25,
39,
80,
1452637246
] |
def update(self, id, extra_doc, upsert=True): # pylint: disable=redefined-builtin
'''update an existing doc with extra_doc.
allow to set upsert=True, to insert new docs.
'''
body = {'doc': extra_doc}
if upsert:
body['doc_as_upsert'] = True
return self._es.update(index=self._index, doc_type=self._doc_type, id=id, body=body) | biothings/biothings.api | [
39,
25,
39,
80,
1452637246
] |
def _get_bulk(doc):
if self._host_major_ver >= 7:
doc = {
'_op_type': 'update',
"_index": index_name,
"_id": doc['_id'],
"doc": doc
}
else:
doc = {
'_op_type': 'update',
"_index": index_name,
"_type": doc_type,
"_id": doc['_id'],
"doc": doc
}
if upsert:
doc['doc_as_upsert'] = True
return doc | biothings/biothings.api | [
39,
25,
39,
80,
1452637246
] |
def get_mapping(self):
"""return the current index mapping"""
if self._host_major_ver <= 6:
m = self._es.indices.get_mapping(
index=self._index,
doc_type=self._doc_type,
)
return m[self._index]["mappings"]
elif self._host_major_ver == 7:
m = self._es.indices.get_mapping(
index=self._index
)
# fake the mapping doc_type
m = {
self._doc_type: m[self._index]["mappings"]
}
return m
else:
raise RuntimeError(f"Server Elasticsearch version is {self._host_major_ver} "
"which is unsupported when using old ESIndexer class") | biothings/biothings.api | [
39,
25,
39,
80,
1452637246
] |
def get_mapping_meta(self):
"""return the current _meta field."""
m = self.get_mapping()
doc_type = self._doc_type
if doc_type is None:
# fetch doc_type from mapping
assert len(m) == 1, "More than one doc_type found, not supported when self._doc_type " + \
"is not initialized"
doc_type = list(m.keys())[0]
return {"_meta": m[doc_type]["_meta"]} | biothings/biothings.api | [
39,
25,
39,
80,
1452637246
] |
def build_index(self, collection, verbose=True, query=None, bulk=True, update=False, allow_upsert=True):
index_name = self._index
# update some settings for bulk indexing
body = {
"index": {
"refresh_interval": "-1", # disable refresh temporarily
"auto_expand_replicas": "0-all",
}
}
self._es.indices.put_settings(body=body, index=index_name)
try:
self._build_index_sequential(collection, verbose, query=query, bulk=bulk, update=update, allow_upsert=True)
finally:
# restore some settings after bulk indexing is done.
body = {
"index": {
"refresh_interval": "1s" # default settings
}
}
self._es.indices.put_settings(body=body, index=index_name)
try:
self._es.indices.flush()
self._es.indices.refresh()
except: # pylint: disable=bare-except # noqa
pass
time.sleep(1)
src_cnt = collection.count(query)
es_cnt = self.count()
if src_cnt != es_cnt:
raise IndexerException("Total count of documents does not match [{}, should be {}]".format(es_cnt, src_cnt))
return es_cnt | biothings/biothings.api | [
39,
25,
39,
80,
1452637246
] |
def rate_control(cnt, t):
delay = 0
if t > 90:
delay = 30
elif t > 60:
delay = 10
if delay:
time.sleep(delay) | biothings/biothings.api | [
39,
25,
39,
80,
1452637246
] |
def optimize(self, max_num_segments=1):
'''optimize the default index.'''
params = {
"wait_for_merge": False,
"max_num_segments": max_num_segments,
}
return self._es.indices.forcemerge(index=self._index, params=params) | biothings/biothings.api | [
39,
25,
39,
80,
1452637246
] |
def doc_feeder_using_helper(self, step=None, verbose=True, query=None, scroll='10m', **kwargs):
# verbose unimplemented
step = step or self.step
q = query if query else {'query': {'match_all': {}}}
for rawdoc in helpers.scan(client=self._es, query=q, scroll=scroll, index=self._index,
doc_type=self._doc_type, **kwargs):
if rawdoc.get('_source', False):
doc = rawdoc['_source']
doc["_id"] = rawdoc["_id"]
yield doc
else:
yield rawdoc | biothings/biothings.api | [
39,
25,
39,
80,
1452637246
] |
def doc_feeder(self, step=None, verbose=True, query=None, scroll='10m', only_source=True, **kwargs):
step = step or self.step
q = query if query else {'query': {'match_all': {}}}
_q_cnt = self.count(q=q, raw=True)
n = _q_cnt['count']
n_shards = _q_cnt['_shards']['total']
assert n_shards == _q_cnt['_shards']['successful']
# Not sure if scroll size is per shard anymore in the new ES...should check this
_size = int(step / n_shards)
assert _size * n_shards == step
cnt = 0
# t0 = time.time()
# if verbose:
# t1 = time.time()
res = self._es.search(index=self._index, doc_type=self._doc_type, body=q,
size=_size, search_type='scan', scroll=scroll, **kwargs)
# double check initial scroll request returns no hits
# assert len(res['hits']['hits']) == 0
assert not res['hits']['hits']
while True:
# if verbose:
# t1 = time.time()
res = self._es.scroll(scroll_id=res['_scroll_id'], scroll=scroll)
# if len(res['hits']['hits']) == 0:
if not res['hits']['hits']:
break
else:
for rawdoc in res['hits']['hits']:
if rawdoc.get('_source', False) and only_source:
doc = rawdoc['_source']
doc["_id"] = rawdoc["_id"]
yield doc
else:
yield rawdoc
cnt += 1
assert cnt == n, "Error: scroll query terminated early [{}, {}], please retry.\nLast response:\n{}".format(cnt, n, res) | biothings/biothings.api | [
39,
25,
39,
80,
1452637246
] |
def get_id_list(self, step=None, verbose=True):
step = step or self.step
cur = self.doc_feeder(step=step, _source=False, verbose=verbose)
for doc in cur:
yield doc['_id'] | biothings/biothings.api | [
39,
25,
39,
80,
1452637246
] |
def get_docs(self, ids, step=None, only_source=True, **mget_args):
''' Return matching docs for given ids iterable, if not found return None.
A generator is returned to the matched docs. If only_source is False,
the entire document is returned, otherwise only the source is returned. '''
# chunkify
step = step or self.step
for chunk in iter_n(ids, step):
if self._host_major_ver > 6:
chunk_res = self._es.mget(body={"ids": chunk}, index=self._index,
**mget_args)
else:
chunk_res = self._es.mget(body={"ids": chunk}, index=self._index,
doc_type=self._doc_type, **mget_args)
for rawdoc in chunk_res['docs']:
if (('found' not in rawdoc) or (('found' in rawdoc) and not rawdoc['found'])):
continue
elif not only_source:
yield rawdoc
else:
doc = rawdoc['_source']
doc["_id"] = rawdoc["_id"]
yield doc | biothings/biothings.api | [
39,
25,
39,
80,
1452637246
] |
def snapshot(self, repo, snapshot, mode=None, **params):
body = {
"indices": self._index,
"include_global_state": False
# there is no reason to include global state in our application
# we want to separate the staging env from the production env
# (global state includes index templates and ingest pipeline)
# but this doesn't mean this setting has to be here
# maybe move this line to where it belongs later
}
if mode == "purge":
# Note: this works, just for small one when deletion is done instantly
try:
self._es.snapshot.get(repo, snapshot)
# if we can get it, we have to delete it
self._es.snapshot.delete(repo, snapshot)
except NotFoundError:
# ok, nothing to delete/purge
pass
try:
return self._es.snapshot.create(repo, snapshot, body=body, params=params)
except RequestError as e:
try:
err_msg = e.info['error']['reason']
except KeyError:
err_msg = e.error
raise IndexerException("Can't snapshot '%s': %s" % (self._index, err_msg)) | biothings/biothings.api | [
39,
25,
39,
80,
1452637246
] |
def get_repository(self, repo_name):
try:
return self._es.snapshot.get_repository(repo_name)
except NotFoundError:
raise IndexerException("Repository '%s' doesn't exist" % repo_name) | biothings/biothings.api | [
39,
25,
39,
80,
1452637246
] |
def get_snapshot_status(self, repo, snapshot):
return self._es.snapshot.status(repo, snapshot) | biothings/biothings.api | [
39,
25,
39,
80,
1452637246
] |
def generate_es_mapping(inspect_doc, init=True, level=0):
"""Generate an ES mapping according to "inspect_doc", which is
produced by biothings.utils.inspect module"""
map_tpl = {
int: {"type": "integer"},
bool: {"type": "boolean"},
float: {"type": "float"},
str: {"type": "keyword", "normalizer": "keyword_lowercase_normalizer"}, # not splittable (like an ID for instance)
splitstr: {"type": "text"},
}
# inspect_doc, if it's been jsonified, contains keys with type as string,
# such as "<class 'str'>". This is not a real type and we need to convert them
# back to actual types. This is transparent if inspect_doc isalready in proper format
pat = re.compile(r"<class '(\w+)'>")
def str2type(k):
if isinstance(k, str):
mat = pat.findall(k)
if mat:
return eval(mat[0]) # actual type
else:
return k
else:
return k
inspect_doc = dict_walk(inspect_doc, str2type)
mapping = {}
errors = []
none_type = type(None)
if init and "_id" not in inspect_doc:
errors.append("No _id key found, document won't be indexed. (doc: %s)" % inspect_doc)
for rootk in inspect_doc:
if rootk == "_id":
keys = list(inspect_doc[rootk].keys())
if str in keys and splitstr in keys:
keys.remove(str)
if not len(keys) == 1 or (keys[0] != str and keys[0] != splitstr):
errors.append("_id fields should all be a string type (got: %s)" % keys)
# it was just a check, it's not part of the mapping
continue
if rootk == "_stats":
continue
if isinstance(rootk, type(None)): # if rootk == type(None):
# value can be null, just skip it
continue
# some inspect report have True as value, others have dict (will all have dict eventually)
if inspect_doc[rootk] is True:
inspect_doc[rootk] = {}
keys = list(inspect_doc[rootk].keys())
# if dict, it can be a dict containing the type (no explore needed) or a dict
# containing more keys (explore needed)
if list in keys:
# we explore directly the list w/ inspect_doc[rootk][list] as param.
# (similar to skipping list type, as there's no such list type in ES mapping)
# carefull: there could be list of list, if which case we move further into the structure
# to skip them
toexplore = inspect_doc[rootk][list]
while list in toexplore:
toexplore = toexplore[list]
if len(toexplore) > 1:
# we want to make sure that, whatever the structure, the types involved were the same
# Exception: None is allowed with other types (translates to 'null' in ES)
# other_types = set([k for k in toexplore.keys() if k != list and isinstance(k, type) and k is not type(None)]) # TODO: Confirm this line
other_types = {k for k in toexplore.keys() if k != list and isinstance(k, type) and not isinstance(k, none_type)}
# some mixes are allowed by ES
if {int, float}.issubset(other_types):
other_types.discard(int) # float > int
toexplore.pop(int)
if len(other_types) > 1:
raise Exception("Mixing types for key '%s': %s" % (rootk, other_types))
res = generate_es_mapping(toexplore, init=False, level=level+1)
# is it the only key or do we have more ? (ie. some docs have data as "x", some
# others have list("x")
# list was either a list of values (end of tree) or a list of dict. Depending
# on that, we add "properties" (when list of dict) or not (when list of values)
if type in set(map(type, inspect_doc[rootk][list])):
mapping[rootk] = res
else:
mapping[rootk] = {"properties": {}}
mapping[rootk]["properties"] = res
elif set(map(type, keys)) == {type}:
# it's a type declaration, no explore
# typs = list(map(type, [k for k in keys if k is not type(None)])) # TODO: Confirm this line
typs = list(map(type, [k for k in keys if not isinstance(k, none_type)]))
if len(typs) > 1:
errors.append("More than one type (key:%s,types:%s)" % (repr(rootk), repr(keys)))
try:
typ = list(inspect_doc[rootk].keys())
# ther can still be more than one type, if we have a None combined with
# the "correct" one. We allow None as a combined type, but we want to ignore
# it when we want to find the mapping
if len(typ) == 1:
typ = typ[0]
else:
# typ = [t for t in typ if t is not type(None)][0] # TODO: Confirm this line
typ = [t for t in typ if not isinstance(t, none_type)][0]
if typ is nan or typ is inf:
raise TypeError(typ)
mapping[rootk] = map_tpl[typ]
except KeyError:
errors.append("Can't find map type %s for key %s" % (inspect_doc[rootk], rootk))
except TypeError:
errors.append("Type %s for key %s isn't allowed in ES mapping" % (typ, rootk))
elif inspect_doc[rootk] == {}:
typ = rootk
return map_tpl[typ]
else:
mapping[rootk] = {"properties": {}}
mapping[rootk]["properties"] = generate_es_mapping(inspect_doc[rootk], init=False, level=level+1)
if errors:
raise MappingError("Error while generating mapping", errors)
return mapping | biothings/biothings.api | [
39,
25,
39,
80,
1452637246
] |
def get_hub_db_conn():
return Database() | biothings/biothings.api | [
39,
25,
39,
80,
1452637246
] |
def get_src_dump():
db = Database()
return db[db.CONFIG.DATA_SRC_DUMP_COLLECTION] | biothings/biothings.api | [
39,
25,
39,
80,
1452637246
] |
def get_src_build():
db = Database()
return db[db.CONFIG.DATA_SRC_BUILD_COLLECTION] | biothings/biothings.api | [
39,
25,
39,
80,
1452637246
] |
def get_data_plugin():
db = Database()
return db[db.CONFIG.DATA_PLUGIN_COLLECTION] | biothings/biothings.api | [
39,
25,
39,
80,
1452637246
] |
def get_cmd():
db = Database()
return db[db.CONFIG.CMD_COLLECTION] | biothings/biothings.api | [
39,
25,
39,
80,
1452637246
] |
def get_hub_config():
db = Database()
return db[getattr(db.CONFIG, "HUB_CONFIG_COLLECTION", "hub_config")] | biothings/biothings.api | [
39,
25,
39,
80,
1452637246
] |
def get_last_command():
cmds = list(sorted(get_cmd()._read().values(), key=lambda cmd: cmd["_id"]))
return cmds[-1] if cmds else None | biothings/biothings.api | [
39,
25,
39,
80,
1452637246
] |
def __init__(self):
super(Database, self).__init__()
self.name = self.CONFIG.DATA_HUB_DB_DATABASE
self.host = self.CONFIG.HUB_DB_BACKEND["host"]
self.client = Elasticsearch(self.host, serializer=_HubDBEncoder())
if not self.client.indices.exists(index=self.name):
self.client.indices.create(index=self.name, body={
'settings': {
'number_of_shards': 1,
"number_of_replicas": 0,
},
'mappings': {
"enabled": False
}
}) | biothings/biothings.api | [
39,
25,
39,
80,
1452637246
] |
def address(self):
return self.host | biothings/biothings.api | [
39,
25,
39,
80,
1452637246
] |
def _exists(self, _id):
return self.client.exists(self.name, _id) | biothings/biothings.api | [
39,
25,
39,
80,
1452637246
] |
def _write(self, _id, doc):
assert doc.pop("_id", None) in (_id, None)
self.client.index(self.name, doc, id=_id)
self.client.indices.refresh(self.name) | biothings/biothings.api | [
39,
25,
39,
80,
1452637246
] |
def create_collection(self, colname):
return self[colname] | biothings/biothings.api | [
39,
25,
39,
80,
1452637246
] |
def __init__(self, colname, db):
self.name = colname
self.db = db
if not self.db._exists(colname):
self._write({}) | biothings/biothings.api | [
39,
25,
39,
80,
1452637246
] |
def _read(self):
return self.db._read(self.name) | biothings/biothings.api | [
39,
25,
39,
80,
1452637246
] |
def _exists_one(self, _id):
return str(_id) in self._read() | biothings/biothings.api | [
39,
25,
39,
80,
1452637246
] |
def func(collection):
collection[str(doc["_id"])] = doc | biothings/biothings.api | [
39,
25,
39,
80,
1452637246
] |
def __getitem__(self, _id):
return self.find_one({"_id": _id}) | biothings/biothings.api | [
39,
25,
39,
80,
1452637246
] |
def find(self, filter=None, projection=None, *args, **kwargs):
if args or kwargs:
raise NotImplementedError()
results = []
logger = logging.getLogger(__name__)
for doc in self._read().values():
_doc = dict(traverse(doc)) # dotdict
_doc.update(dict(traverse(doc, True)))
for k, v in (filter or {}).items():
if isinstance(v, dict) and "$exists" in v:
logger.error("Ignored filter: {'%s': %s}", k, v)
continue
if _doc.get(k) != v:
break
else: # no break
results.append(_pyobj(doc))
if projection: # used by BuildManager.build_info
logger.error("Ignored projection: %s", projection)
return results | biothings/biothings.api | [
39,
25,
39,
80,
1452637246
] |
def replace_one(self, filter, replacement, upsert=False, *args, **kwargs):
if args or kwargs:
raise NotImplementedError()
doc = self.find_one(filter) or {}
if not (doc or upsert):
raise ValueError("No Match.")
_id = doc.get("_id") or filter["_id"]
replacement["_id"] = _id
self._write_one(replacement) | biothings/biothings.api | [
39,
25,
39,
80,
1452637246
] |
def _update_one(self, doc, update, *args, **kwargs):
if args or kwargs:
raise NotImplementedError()
if not len(update) == 1:
raise ValueError("Invalid operator.")
if next(iter(update)) not in ("$set", "$unset", "$push", "$addToSet", "$pull"):
raise NotImplementedError(next(iter(update)))
# https://docs.mongodb.com/manual/reference/operator/update/set/
# https://docs.mongodb.com/manual/reference/operator/update/unset/
# https://docs.mongodb.com/manual/reference/operator/update/push/
# https://docs.mongodb.com/manual/reference/operator/update/addToSet/
# https://docs.mongodb.com/manual/reference/operator/update/pull/
if "$set" in update:
_update = json.loads(to_json(update["$set"]))
_update = parse_dot_fields(_update)
doc = update_dict_recur(doc, _update)
elif "$unset" in update:
for dotk, v in traverse(doc):
if dotk in update["$unset"]:
v["__REMOVE__"] = True
doc = merge({}, doc)
elif "$push" in update:
for key, val in update["$push"].items():
if "." in key: # not all mongo operators are fully implemented
raise NotImplementedError("nested key in $push: %s" % key)
doc.setdefault(key, []).append(val)
elif "$addToSet" in update:
for key, val in update["$addToSet"].items():
if "." in key: # not all mongo operators are fully implemented
raise NotImplementedError("nested key in $addToSet: %s" % key)
field = doc.setdefault(key, [])
if val not in field:
field.append(val)
else: # "$pull" in update:
for key, val in update["$pull"].items():
if "." in key: # not all mongo operators are fully implemented
raise NotImplementedError("nested key in $pull: %s" % key)
if not isinstance(val, (str, int)):
raise NotImplementedError("value or condition in $pull: %s" % val)
if isinstance(doc.get(key), list):
doc[key][:] = [x for x in doc[key] if x != val]
self._write_one(doc) | biothings/biothings.api | [
39,
25,
39,
80,
1452637246
] |
def update_many(self, filter, update, upsert=False, *args, **kwargs):
docs = self.find(filter)
if not docs and upsert:
if any("." in k for k in filter):
raise ValueError("dotfield in upsert.")
docs = [filter]
for doc in docs:
self._update_one(doc, update, *args, **kwargs) | biothings/biothings.api | [
39,
25,
39,
80,
1452637246
] |
def update(self, *args, **kwargs):
# In the future,
# Use replace_one(), update_one(), or update_many() instead.
self.update_many(*args, **kwargs) | biothings/biothings.api | [
39,
25,
39,
80,
1452637246
] |
def remove(self, query):
# In the future,
# Use delete_one() or delete_many() instead.
docs = self.find(query)
collection = self._read()
for doc in docs:
del collection[doc["_id"]]
self._write(collection) | biothings/biothings.api | [
39,
25,
39,
80,
1452637246
] |
def _pyobj(doc): # ES doc -> Python object
for _, _doc in traverse(doc):
if isinstance(_doc, dict):
for k, v in list(_doc.items()):
_doc[k] = _eval(v)
elif isinstance(_doc, list):
_doc[:] = map(_eval, _doc)
return doc | biothings/biothings.api | [
39,
25,
39,
80,
1452637246
] |
Subsets and Splits