source_code
stringlengths
45
7.24k
function_name
stringlengths
1
111
messages
list
def x(self, node_set, depth=0): tab = ' ' result = list() for n in node_set: repr = (n if self.nodes[n]['type'] == 'variable' else f"{n}{inspect.signature(self.nodes[n]['lambda_fn'])}") result.append(f'{tab * depth}{repr}') result.extend(self.traverse_nodes(self.successors(n), depth=depth + 1)) return result
traverse_nodes
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, node_set, depth=0):\n tab = ' '\n result = list()\n for n in node_set:\n repr = (n if self.nodes[n]['type'] == 'variable' else\n f\"{n}{inspect.signature(self.nodes[n]['lambda_fn'])}\")\n result.append(f'{tab * depth}{repr}')\n result.extend(self.traverse_nodes(self.successors(n), depth=depth + 1))\n return result\n", "role": "user" }, { "content": "traverse_nodes", "role": "assistant" } ]
def x(cls, file: str, lambdas): with open(file, 'r') as f: data = json.load(f) return cls.from_dict(data, lambdas)
from_json_and_lambdas
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(cls, file: str, lambdas):\n with open(file, 'r') as f:\n data = json.load(f)\n return cls.from_dict(data, lambdas)\n", "role": "user" }, { "content": "from_json_and_lambdas", "role": "assistant" } ]
def x(cls, data: Dict, lambdas): functions = {d['name']: d for d in data['functions']} G = nx.DiGraph() scope_tree = nx.DiGraph() def x(basename: str, parent: str, index: int, is_loop_index: bool=False): G.add_node(f'{parent}::{basename}_{index}', type='variable', color= 'maroon', parent=parent, label=f'{basename}_{index}', basename= basename, is_loop_index=is_loop_index, padding=15, value=None) def x(cur_scope, var_name): if cur_scope.parent is None: return cur_scope.name, -1 new_scope = cur_scope.parent if var_name in new_scope.variables: return new_scope.name, new_scope.variables[var_name] else: return find_correct_scope(new_scope, var_name) def x(scope, loop_index_variable: Optional[str]=None): for stmt in scope.body: if 'name' in stmt: stmt_type = functions[stmt['name']]['type'] if stmt_type in ('assign', 'condition', 'decision'): if stmt_type == 'assign': if 'body' in functions[stmt['name']]: stmt_type = 'literal' output = stmt['output'] scope.variables[output['variable']] = output['index'] add_variable_node(output['variable'], scope.name, output['index']) G.add_edge(stmt['name'], f"{scope.name}::{output['variable']}_{output['index']}" ) ordered_inputs = list() for input in stmt.get('input', []): parent = scope.name index = input['index'] base_name = input['variable'] if index == -1 and base_name != loop_index_variable: parent, index = find_correct_scope(scope, base_name ) add_variable_node(base_name, parent, index, base_name == loop_index_variable) node_name = f'{parent}::{base_name}_{index}' ordered_inputs.append(node_name) G.add_edge(node_name, stmt['name']) G.add_node(stmt['name'], type='function', lambda_fn= getattr(lambdas, stmt['name']), func_inputs= ordered_inputs, shape='rectangle', parent=scope. name, label=stmt_type[0].upper(), padding=10) elif stmt_type == 'loop_plate': index_variable = functions[stmt['name']]['index_variable'] scope_tree.add_node(stmt['name'], color='blue') scope_tree.add_edge(scope.name, stmt['name']) new_scope = ScopeNode(functions[stmt['name']], parent=scope ) process_container(new_scope, loop_index_variable= index_variable) else: pass elif 'function' in stmt and stmt['function'] != 'print': scope_tree.add_node(stmt['function'], color='green') scope_tree.add_edge(scope.name, stmt['function']) new_scope = ScopeNode(functions[stmt['function']], parent=scope ) process_container(new_scope) root = data['start'] scope_tree.add_node(root, color='green') cur_scope = ScopeNode(functions[root]) process_container(cur_scope) return cls(G, scope_tree)
from_dict
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(cls, data: Dict, lambdas):\n functions = {d['name']: d for d in data['functions']}\n G = nx.DiGraph()\n scope_tree = nx.DiGraph()\n\n def x(basename: str, parent: str, index: int, is_loop_index: bool=False):\n G.add_node(f'{parent}::{basename}_{index}', type='variable', color=\n 'maroon', parent=parent, label=f'{basename}_{index}', basename=\n basename, is_loop_index=is_loop_index, padding=15, value=None)\n\n def x(cur_scope, var_name):\n if cur_scope.parent is None:\n return cur_scope.name, -1\n new_scope = cur_scope.parent\n if var_name in new_scope.variables:\n return new_scope.name, new_scope.variables[var_name]\n else:\n return find_correct_scope(new_scope, var_name)\n\n def x(scope, loop_index_variable: Optional[str]=None):\n for stmt in scope.body:\n if 'name' in stmt:\n stmt_type = functions[stmt['name']]['type']\n if stmt_type in ('assign', 'condition', 'decision'):\n if stmt_type == 'assign':\n if 'body' in functions[stmt['name']]:\n stmt_type = 'literal'\n output = stmt['output']\n scope.variables[output['variable']] = output['index']\n add_variable_node(output['variable'], scope.name,\n output['index'])\n G.add_edge(stmt['name'],\n f\"{scope.name}::{output['variable']}_{output['index']}\"\n )\n ordered_inputs = list()\n for input in stmt.get('input', []):\n parent = scope.name\n index = input['index']\n base_name = input['variable']\n if index == -1 and base_name != loop_index_variable:\n parent, index = find_correct_scope(scope, base_name\n )\n add_variable_node(base_name, parent, index, \n base_name == loop_index_variable)\n node_name = f'{parent}::{base_name}_{index}'\n ordered_inputs.append(node_name)\n G.add_edge(node_name, stmt['name'])\n G.add_node(stmt['name'], type='function', lambda_fn=\n getattr(lambdas, stmt['name']), func_inputs=\n ordered_inputs, shape='rectangle', parent=scope.\n name, label=stmt_type[0].upper(), padding=10)\n elif stmt_type == 'loop_plate':\n index_variable = functions[stmt['name']]['index_variable']\n scope_tree.add_node(stmt['name'], color='blue')\n scope_tree.add_edge(scope.name, stmt['name'])\n new_scope = ScopeNode(functions[stmt['name']], parent=scope\n )\n process_container(new_scope, loop_index_variable=\n index_variable)\n else:\n pass\n elif 'function' in stmt and stmt['function'] != 'print':\n scope_tree.add_node(stmt['function'], color='green')\n scope_tree.add_edge(scope.name, stmt['function'])\n new_scope = ScopeNode(functions[stmt['function']], parent=scope\n )\n process_container(new_scope)\n root = data['start']\n scope_tree.add_node(root, color='green')\n cur_scope = ScopeNode(functions[root])\n process_container(cur_scope)\n return cls(G, scope_tree)\n", "role": "user" }, { "content": "from_dict", "role": "assistant" } ]
def x(cls, python_file, lambdas_path, json_filename: str, stem: str): with open(python_file, 'r') as f: pySrc = f.read() return cls.from_python_src(pySrc, lambdas_path, json_filename, stem)
from_python_file
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(cls, python_file, lambdas_path, json_filename: str, stem: str):\n with open(python_file, 'r') as f:\n pySrc = f.read()\n return cls.from_python_src(pySrc, lambdas_path, json_filename, stem)\n", "role": "user" }, { "content": "from_python_file", "role": "assistant" } ]
def x(cls, pySrc, lambdas_path, json_filename: str, stem: str, save_file: bool=False): asts = [ast.parse(pySrc)] pgm_dict = genPGM.create_pgm_dict(lambdas_path, asts, json_filename, { 'FileName': f'{stem}.py'}) lambdas = importlib.__import__(stem + '_lambdas') return cls.from_dict(pgm_dict, lambdas)
from_python_src
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(cls, pySrc, lambdas_path, json_filename: str, stem: str, save_file:\n bool=False):\n asts = [ast.parse(pySrc)]\n pgm_dict = genPGM.create_pgm_dict(lambdas_path, asts, json_filename, {\n 'FileName': f'{stem}.py'})\n lambdas = importlib.__import__(stem + '_lambdas')\n return cls.from_dict(pgm_dict, lambdas)\n", "role": "user" }, { "content": "from_python_src", "role": "assistant" } ]
def x(cls, fortran_file: str, tmpdir: str='.'): stem = Path(fortran_file).stem if tmpdir == '.' and '/' in fortran_file: tmpdir = Path(fortran_file).parent preprocessed_fortran_file = f'{tmpdir}/{stem}_preprocessed.f' lambdas_path = f'{tmpdir}/{stem}_lambdas.py' json_filename = stem + '.json' with open(fortran_file, 'r') as f: inputLines = f.readlines() with open(preprocessed_fortran_file, 'w') as f: f.write(preprocessor.process(inputLines)) xml_string = sp.run(['java', 'fortran.ofp.FrontEnd', '--class', 'fortran.ofp.XMLPrinter', '--verbosity', '0', preprocessed_fortran_file], stdout=sp.PIPE).stdout trees = [ET.fromstring(xml_string)] comments = get_comments.get_comments(preprocessed_fortran_file) os.remove(preprocessed_fortran_file) xml_to_json_translator = translate.XMLToJSONTranslator() outputDict = xml_to_json_translator.analyze(trees, comments) pySrc = pyTranslate.create_python_source_list(outputDict)[0][0] G = cls.from_python_src(pySrc, lambdas_path, json_filename, stem) return G
from_fortran_file
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(cls, fortran_file: str, tmpdir: str='.'):\n stem = Path(fortran_file).stem\n if tmpdir == '.' and '/' in fortran_file:\n tmpdir = Path(fortran_file).parent\n preprocessed_fortran_file = f'{tmpdir}/{stem}_preprocessed.f'\n lambdas_path = f'{tmpdir}/{stem}_lambdas.py'\n json_filename = stem + '.json'\n with open(fortran_file, 'r') as f:\n inputLines = f.readlines()\n with open(preprocessed_fortran_file, 'w') as f:\n f.write(preprocessor.process(inputLines))\n xml_string = sp.run(['java', 'fortran.ofp.FrontEnd', '--class',\n 'fortran.ofp.XMLPrinter', '--verbosity', '0',\n preprocessed_fortran_file], stdout=sp.PIPE).stdout\n trees = [ET.fromstring(xml_string)]\n comments = get_comments.get_comments(preprocessed_fortran_file)\n os.remove(preprocessed_fortran_file)\n xml_to_json_translator = translate.XMLToJSONTranslator()\n outputDict = xml_to_json_translator.analyze(trees, comments)\n pySrc = pyTranslate.create_python_source_list(outputDict)[0][0]\n G = cls.from_python_src(pySrc, lambdas_path, json_filename, stem)\n return G\n", "role": "user" }, { "content": "from_fortran_file", "role": "assistant" } ]
def x(cls, fortran_src: str, dir: str='.'): import tempfile fp = tempfile.NamedTemporaryFile('w+t', delete=False, dir=dir) fp.writelines(fortran_src) fp.close() G = cls.from_fortran_file(fp.name, dir) os.remove(fp.name) return G
from_fortran_src
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(cls, fortran_src: str, dir: str='.'):\n import tempfile\n fp = tempfile.NamedTemporaryFile('w+t', delete=False, dir=dir)\n fp.writelines(fortran_src)\n fp.close()\n G = cls.from_fortran_file(fp.name, dir)\n os.remove(fp.name)\n return G\n", "role": "user" }, { "content": "from_fortran_src", "role": "assistant" } ]
def x(self): for n in self.nodes(): if self.nodes[n]['type'] == 'variable': self.nodes[n]['value'] = None elif self.nodes[n]['type'] == 'function': self.nodes[n]['func_visited'] = False
clear
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self):\n for n in self.nodes():\n if self.nodes[n]['type'] == 'variable':\n self.nodes[n]['value'] = None\n elif self.nodes[n]['type'] == 'function':\n self.nodes[n]['func_visited'] = False\n", "role": "user" }, { "content": "clear", "role": "assistant" } ]
def x(self, other): if not isinstance(other, GroundedFunctionNetwork): raise TypeError( f'Expected GroundedFunctionNetwork, but got {type(other)}') def x(var): return var[var.find('::') + 2:var.rfind('_')] def x(graph, shortname): return [v for v in graph.nodes() if shortname in v] this_var_nodes = [shortname(n) for n, d in self.nodes(data=True) if d[ 'type'] == 'variable'] other_var_nodes = [shortname(n) for n, d in other.nodes(data=True) if d ['type'] == 'variable'] shared_vars = set(this_var_nodes).intersection(set(other_var_nodes)) full_shared_vars = {full_var for shared_var in shared_vars for full_var in shortname_vars(self, shared_var)} return ForwardInfluenceBlanket(self, full_shared_vars)
to_FIB
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, other):\n if not isinstance(other, GroundedFunctionNetwork):\n raise TypeError(\n f'Expected GroundedFunctionNetwork, but got {type(other)}')\n\n def x(var):\n return var[var.find('::') + 2:var.rfind('_')]\n\n def x(graph, shortname):\n return [v for v in graph.nodes() if shortname in v]\n this_var_nodes = [shortname(n) for n, d in self.nodes(data=True) if d[\n 'type'] == 'variable']\n other_var_nodes = [shortname(n) for n, d in other.nodes(data=True) if d\n ['type'] == 'variable']\n shared_vars = set(this_var_nodes).intersection(set(other_var_nodes))\n full_shared_vars = {full_var for shared_var in shared_vars for full_var in\n shortname_vars(self, shared_var)}\n return ForwardInfluenceBlanket(self, full_shared_vars)\n", "role": "user" }, { "content": "to_FIB", "role": "assistant" } ]
def x(self): A = nx.nx_agraph.to_agraph(self) A.graph_attr.update({'dpi': 227, 'fontsize': 20, 'fontname': 'Menlo', 'rankdir': 'TB'}) A.node_attr.update({'fontname': 'Menlo'}) def x(cluster_name, root_graph): subgraph_nodes = [n[0] for n in self.nodes(data=True) if n[1][ 'parent'] == cluster_name] root_graph.add_nodes_from(subgraph_nodes) subgraph = root_graph.add_subgraph(subgraph_nodes, name= f'cluster_{cluster_name}', label=cluster_name, style= 'bold, rounded') for n in self.scope_tree.successors(cluster_name): build_tree(n, subgraph) root = [n for n, d in self.scope_tree.in_degree() if d == 0][0] build_tree(root, A) return A
to_agraph
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self):\n A = nx.nx_agraph.to_agraph(self)\n A.graph_attr.update({'dpi': 227, 'fontsize': 20, 'fontname': 'Menlo',\n 'rankdir': 'TB'})\n A.node_attr.update({'fontname': 'Menlo'})\n\n def x(cluster_name, root_graph):\n subgraph_nodes = [n[0] for n in self.nodes(data=True) if n[1][\n 'parent'] == cluster_name]\n root_graph.add_nodes_from(subgraph_nodes)\n subgraph = root_graph.add_subgraph(subgraph_nodes, name=\n f'cluster_{cluster_name}', label=cluster_name, style=\n 'bold, rounded')\n for n in self.scope_tree.successors(cluster_name):\n build_tree(n, subgraph)\n root = [n for n, d in self.scope_tree.in_degree() if d == 0][0]\n build_tree(root, A)\n return A\n", "role": "user" }, { "content": "to_agraph", "role": "assistant" } ]
def x(self): CAG = self.to_CAG() A = nx.nx_agraph.to_agraph(CAG) A.graph_attr.update({'dpi': 227, 'fontsize': 20, 'fontname': 'Menlo'}) A.node_attr.update({'shape': 'rectangle', 'color': '#650021', 'style': 'rounded', 'fontname': 'Gill Sans'}) A.edge_attr.update({'color': '#650021', 'arrowsize': 0.5}) return A
to_CAG_agraph
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self):\n CAG = self.to_CAG()\n A = nx.nx_agraph.to_agraph(CAG)\n A.graph_attr.update({'dpi': 227, 'fontsize': 20, 'fontname': 'Menlo'})\n A.node_attr.update({'shape': 'rectangle', 'color': '#650021', 'style':\n 'rounded', 'fontname': 'Gill Sans'})\n A.edge_attr.update({'color': '#650021', 'arrowsize': 0.5})\n return A\n", "role": "user" }, { "content": "to_CAG_agraph", "role": "assistant" } ]
def x(self): A = nx.nx_agraph.to_agraph(self.call_graph) A.graph_attr.update({'dpi': 227, 'fontsize': 20, 'fontname': 'Menlo'}) A.node_attr.update({'shape': 'rectangle', 'color': '#650021', 'style': 'rounded'}) A.edge_attr.update({'color': '#650021', 'arrowsize': 0.5}) return A
to_call_agraph
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self):\n A = nx.nx_agraph.to_agraph(self.call_graph)\n A.graph_attr.update({'dpi': 227, 'fontsize': 20, 'fontname': 'Menlo'})\n A.node_attr.update({'shape': 'rectangle', 'color': '#650021', 'style':\n 'rounded'})\n A.edge_attr.update({'color': '#650021', 'arrowsize': 0.5})\n return A\n", "role": "user" }, { "content": "to_call_agraph", "role": "assistant" } ]
def x(self, inputs: Dict[str, Union[float, Iterable]], covers: Dict[str, Union[float, Iterable]], torch_size: Optional[int]=None) ->Union[float, Iterable]: if len(covers) != len(self.cover_nodes): raise ValueError('Incorrect number of cover values.') for node_name, val in covers.items(): self.nodes[node_name]['value'] = val return super().run(inputs, torch_size)
run
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, inputs: Dict[str, Union[float, Iterable]], covers: Dict[str,\n Union[float, Iterable]], torch_size: Optional[int]=None) ->Union[float,\n Iterable]:\n if len(covers) != len(self.cover_nodes):\n raise ValueError('Incorrect number of cover values.')\n for node_name, val in covers.items():\n self.nodes[node_name]['value'] = val\n return super().run(inputs, torch_size)\n", "role": "user" }, { "content": "run", "role": "assistant" } ]
def x(self, sizes, bounds, presets, covers, use_torch=False, num_samples=10): args = self.inputs Si = self.sobol_analysis(num_samples, {'num_vars': len(args), 'names': args, 'bounds': [bounds[arg] for arg in args]}, covers) S2 = Si['S2'] s2_max, v1, v2 = get_max_s2_sensitivity(S2) x_var = args[v1] y_var = args[v2] search_space = [(x_var, bounds[x_var]), (y_var, bounds[y_var])] preset_vals = {arg: presets[arg] for i, arg in enumerate(args) if i != v1 and i != v2} X = np.linspace(*search_space[0][1], sizes[0]) Y = np.linspace(*search_space[1][1], sizes[1]) if use_torch: Xm, Ym = torch.meshgrid(torch.tensor(X), torch.tensor(Y)) inputs = {n: torch.full_like(Xm, v) for n, v in presets.items()} inputs.update({search_space[0][0]: Xm, search_space[1][0]: Ym}) Z = self.run(inputs, covers).numpy() else: Xm, Ym = np.meshgrid(X, Y) Z = np.zeros((len(X), len(Y))) for x, y in itertools.product(range(len(X)), range(len(Y))): inputs = {n: v for n, v in presets.items()} inputs.update({search_space[0][0]: x, search_space[1][0]: y}) Z[x][y] = self.run(inputs, covers) return X, Y, Z, x_var, y_var
S2_surface
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, sizes, bounds, presets, covers, use_torch=False, num_samples=10):\n args = self.inputs\n Si = self.sobol_analysis(num_samples, {'num_vars': len(args), 'names':\n args, 'bounds': [bounds[arg] for arg in args]}, covers)\n S2 = Si['S2']\n s2_max, v1, v2 = get_max_s2_sensitivity(S2)\n x_var = args[v1]\n y_var = args[v2]\n search_space = [(x_var, bounds[x_var]), (y_var, bounds[y_var])]\n preset_vals = {arg: presets[arg] for i, arg in enumerate(args) if i !=\n v1 and i != v2}\n X = np.linspace(*search_space[0][1], sizes[0])\n Y = np.linspace(*search_space[1][1], sizes[1])\n if use_torch:\n Xm, Ym = torch.meshgrid(torch.tensor(X), torch.tensor(Y))\n inputs = {n: torch.full_like(Xm, v) for n, v in presets.items()}\n inputs.update({search_space[0][0]: Xm, search_space[1][0]: Ym})\n Z = self.run(inputs, covers).numpy()\n else:\n Xm, Ym = np.meshgrid(X, Y)\n Z = np.zeros((len(X), len(Y)))\n for x, y in itertools.product(range(len(X)), range(len(Y))):\n inputs = {n: v for n, v in presets.items()}\n inputs.update({search_space[0][0]: x, search_space[1][0]: y})\n Z[x][y] = self.run(inputs, covers)\n return X, Y, Z, x_var, y_var\n", "role": "user" }, { "content": "S2_surface", "role": "assistant" } ]
def x(self, root, state) ->List[Dict]: val = {'tag': root.tag, 'args': []} for node in root: val['args'] += self.parseTree(node, state) return [val]
process_direct_map
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, root, state) ->List[Dict]:\n val = {'tag': root.tag, 'args': []}\n for node in root:\n val['args'] += self.parseTree(node, state)\n return [val]\n", "role": "user" }, { "content": "process_direct_map", "role": "assistant" } ]
def x(self, root, state: ParseState) ->List[Dict]: if root.tag in self.AST_TAG_HANDLERS: return self.AST_TAG_HANDLERS[root.tag](root, state) elif root.tag in self.libRtns: return self.process_libRtn(root, state) else: prog = [] for node in root: prog += self.parseTree(node, state) return prog
parseTree
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, root, state: ParseState) ->List[Dict]:\n if root.tag in self.AST_TAG_HANDLERS:\n return self.AST_TAG_HANDLERS[root.tag](root, state)\n elif root.tag in self.libRtns:\n return self.process_libRtn(root, state)\n else:\n prog = []\n for node in root:\n prog += self.parseTree(node, state)\n return prog\n", "role": "user" }, { "content": "parseTree", "role": "assistant" } ]
def x(self, root): for element in root.iter(): if element.tag == 'function': self.functionList.append(element.attrib['name'])
loadFunction
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, root):\n for element in root.iter():\n if element.tag == 'function':\n self.functionList.append(element.attrib['name'])\n", "role": "user" }, { "content": "loadFunction", "role": "assistant" } ]
def x(self, trees: List[ET.ElementTree], comments: OrderedDict) ->Dict: outputDict = {} ast = [] for tree in trees: self.loadFunction(tree) for tree in trees: ast += self.parseTree(tree, ParseState()) """ Find the entry point for the Fortran file. The entry point for a conventional Fortran file is always the PROGRAM section. This 'if' statement checks for the presence of a PROGRAM segment. If not found, the entry point can be any of the functions or subroutines in the file. So, all the functions and subroutines of the program are listed and included as the possible entry point. """ if self.entryPoint: entry = {'program': self.entryPoint[0]} else: entry = {} if self.functionList: entry['function'] = self.functionList if self.subroutineList: entry['subroutine'] = self.subroutineList outputDict['ast'] = ast outputDict['functionList'] = self.functionList outputDict['comments'] = comments return outputDict
analyze
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, trees: List[ET.ElementTree], comments: OrderedDict) ->Dict:\n outputDict = {}\n ast = []\n for tree in trees:\n self.loadFunction(tree)\n for tree in trees:\n ast += self.parseTree(tree, ParseState())\n \"\"\"\n Find the entry point for the Fortran file.\n The entry point for a conventional Fortran file is always the PROGRAM\n section. This 'if' statement checks for the presence of a PROGRAM\n segment.\n\n If not found, the entry point can be any of the functions or\n subroutines in the file. So, all the functions and subroutines of the\n program are listed and included as the possible entry point.\n \"\"\"\n if self.entryPoint:\n entry = {'program': self.entryPoint[0]}\n else:\n entry = {}\n if self.functionList:\n entry['function'] = self.functionList\n if self.subroutineList:\n entry['subroutine'] = self.subroutineList\n outputDict['ast'] = ast\n outputDict['functionList'] = self.functionList\n outputDict['comments'] = comments\n return outputDict\n", "role": "user" }, { "content": "analyze", "role": "assistant" } ]
def x(): parser = ArgumentParser(description= 'Dynamic Bayes Net Executable Model', formatter_class= ArgumentDefaultsHelpFormatter) def x(short_arg: str, long_arg: str, help: str): parser.add_argument('-' + short_arg, '--' + long_arg, help=help, action='store_true') subparsers = parser.add_subparsers() parser_execute = subparsers.add_parser('execute', help='Model execution') parser_execute.set_defaults(func=execute) parser_execute.add_argument('--input_dressed_cag', help= 'Path to the input dressed cag', type=str, default='delphi_model.pkl') parser_execute.add_argument('--steps', help= 'Number of time steps to take', type=partial(positive_int, 'steps'), default=5) parser_execute.add_argument('--samples', help= 'Number of sequences to sample', type=partial(positive_int, 'samples'), default=100) parser_execute.add_argument('--output_sequences', help= 'Output file containing sampled sequences', type=str, default= 'dbn_sampled_sequences.csv') parser_execute.add_argument('--input_variables', help= 'Path to the variables of the input dressed cag', type=str, default ='bmi_config.txt') args = parser.parse_args() if len(sys.argv) == 1: parser.print_help() else: args.func(args)
main
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x():\n parser = ArgumentParser(description=\n 'Dynamic Bayes Net Executable Model', formatter_class=\n ArgumentDefaultsHelpFormatter)\n\n def x(short_arg: str, long_arg: str, help: str):\n parser.add_argument('-' + short_arg, '--' + long_arg, help=help,\n action='store_true')\n subparsers = parser.add_subparsers()\n parser_execute = subparsers.add_parser('execute', help='Model execution')\n parser_execute.set_defaults(func=execute)\n parser_execute.add_argument('--input_dressed_cag', help=\n 'Path to the input dressed cag', type=str, default='delphi_model.pkl')\n parser_execute.add_argument('--steps', help=\n 'Number of time steps to take', type=partial(positive_int, 'steps'),\n default=5)\n parser_execute.add_argument('--samples', help=\n 'Number of sequences to sample', type=partial(positive_int,\n 'samples'), default=100)\n parser_execute.add_argument('--output_sequences', help=\n 'Output file containing sampled sequences', type=str, default=\n 'dbn_sampled_sequences.csv')\n parser_execute.add_argument('--input_variables', help=\n 'Path to the variables of the input dressed cag', type=str, default\n ='bmi_config.txt')\n args = parser.parse_args()\n if len(sys.argv) == 1:\n parser.print_help()\n else:\n args.func(args)\n", "role": "user" }, { "content": "main", "role": "assistant" } ]
def x(): df = pd.read_csv('south_sudan_data_fao.csv') gb = df.groupby('Element') d = [{'events': [{k: [{e: [process_variable_name(k, e)]} for e in list( set(gb.get_group(k)['Item'].tolist()))]} for k in gb.groups.keys()]}] yaml = YAML() yaml.default_flow_style = False with open('fao_variable_ontology.yml', 'w') as f: yaml.dump(d, f)
construct_FAO_ontology
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x():\n df = pd.read_csv('south_sudan_data_fao.csv')\n gb = df.groupby('Element')\n d = [{'events': [{k: [{e: [process_variable_name(k, e)]} for e in list(\n set(gb.get_group(k)['Item'].tolist()))]} for k in gb.groups.keys()]}]\n yaml = YAML()\n yaml.default_flow_style = False\n with open('fao_variable_ontology.yml', 'w') as f:\n yaml.dump(d, f)\n", "role": "user" }, { "content": "construct_FAO_ontology", "role": "assistant" } ]
def x(G: AnalysisGraph, source: str, target: str): return create_statement_inspection_table(G[source][target][ 'InfluenceStatements'])
inspect_edge
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(G: AnalysisGraph, source: str, target: str):\n return create_statement_inspection_table(G[source][target][\n 'InfluenceStatements'])\n", "role": "user" }, { "content": "inspect_edge", "role": "assistant" } ]
def x(G: AnalysisGraph, source: str, target: str) ->List[str]: return chain.from_iterable([[repr(e.text) for e in s.evidence] for s in G.edges[source, target]['InfluenceStatements']])
get_edge_sentences
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(G: AnalysisGraph, source: str, target: str) ->List[str]:\n return chain.from_iterable([[repr(e.text) for e in s.evidence] for s in\n G.edges[source, target]['InfluenceStatements']])\n", "role": "user" }, { "content": "get_edge_sentences", "role": "assistant" } ]
def x(type_str): if type_str == 'container': return NodeType.CONTAINER elif type_str == 'loop_plate': return NodeType.LOOP elif type_str == 'assign': return NodeType.ASSIGN elif type_str == 'condition': return NodeType.CONDITION elif type_str == 'decision': return NodeType.DECISION else: raise ValueError('Unrecognized type string: ', type_str)
get_node_type
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(type_str):\n if type_str == 'container':\n return NodeType.CONTAINER\n elif type_str == 'loop_plate':\n return NodeType.LOOP\n elif type_str == 'assign':\n return NodeType.ASSIGN\n elif type_str == 'condition':\n return NodeType.CONDITION\n elif type_str == 'decision':\n return NodeType.DECISION\n else:\n raise ValueError('Unrecognized type string: ', type_str)\n", "role": "user" }, { "content": "get_node_type", "role": "assistant" } ]
def x(type_list): out_format_list = [] for type_item in type_list: item_format = default_output_format(type_item) out_format_list.append(item_format) return out_format_list
list_output_formats
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(type_list):\n out_format_list = []\n for type_item in type_list:\n item_format = default_output_format(type_item)\n out_format_list.append(item_format)\n return out_format_list\n", "role": "user" }, { "content": "list_output_formats", "role": "assistant" } ]
def x(type_list): data_type = [] for item in type_list: match = re.match('(\\d+)(.+)', item) if not match: reps = 1 if item[0] in 'FfEegG': data_type.append('REAL') elif item[0] in 'Ii': data_type.append('INTEGER') else: reps = match.group(1) fmt = match.group(2) if '(' in fmt and ',' in fmt: fmt = fmt[1:-1].split(',') elif '(' in fmt: fmt = [fmt[1:-1]] else: fmt = [fmt] for i in range(int(reps)): for ft in fmt: if ft[0] in 'FfEegG': data_type.append('REAL') elif ft[0] in 'Ii': data_type.append('INTEGER') return data_type
list_data_type
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(type_list):\n data_type = []\n for item in type_list:\n match = re.match('(\\\\d+)(.+)', item)\n if not match:\n reps = 1\n if item[0] in 'FfEegG':\n data_type.append('REAL')\n elif item[0] in 'Ii':\n data_type.append('INTEGER')\n else:\n reps = match.group(1)\n fmt = match.group(2)\n if '(' in fmt and ',' in fmt:\n fmt = fmt[1:-1].split(',')\n elif '(' in fmt:\n fmt = [fmt[1:-1]]\n else:\n fmt = [fmt]\n for i in range(int(reps)):\n for ft in fmt:\n if ft[0] in 'FfEegG':\n data_type.append('REAL')\n elif ft[0] in 'Ii':\n data_type.append('INTEGER')\n return data_type\n", "role": "user" }, { "content": "list_data_type", "role": "assistant" } ]
def x(self): format_list = self._format_list self._re_cvt = self.match_input_fmt(format_list) regexp0_str = ''.join([subs[0] for subs in self._re_cvt]) self._regexp_str = regexp0_str self._re = re.compile(regexp0_str) self._match_exps = [subs[1] for subs in self._re_cvt if subs[1] is not None ] self._divisors = [subs[2] for subs in self._re_cvt if subs[2] is not None] self._in_cvt_fns = [subs[3] for subs in self._re_cvt if subs[3] is not None ] self._read_line_init = True
init_read_line
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self):\n format_list = self._format_list\n self._re_cvt = self.match_input_fmt(format_list)\n regexp0_str = ''.join([subs[0] for subs in self._re_cvt])\n self._regexp_str = regexp0_str\n self._re = re.compile(regexp0_str)\n self._match_exps = [subs[1] for subs in self._re_cvt if subs[1] is not None\n ]\n self._divisors = [subs[2] for subs in self._re_cvt if subs[2] is not None]\n self._in_cvt_fns = [subs[3] for subs in self._re_cvt if subs[3] is not None\n ]\n self._read_line_init = True\n", "role": "user" }, { "content": "init_read_line", "role": "assistant" } ]
def x(self): format_list = self._format_list output_info = self.gen_output_fmt(format_list) self._output_fmt = ''.join([sub[0] for sub in output_info]) self._out_gen_fmt = [sub[1] for sub in output_info if sub[1] is not None] self._out_widths = [sub[2] for sub in output_info if sub[2] is not None] self._write_line_init = True
init_write_line
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self):\n format_list = self._format_list\n output_info = self.gen_output_fmt(format_list)\n self._output_fmt = ''.join([sub[0] for sub in output_info])\n self._out_gen_fmt = [sub[1] for sub in output_info if sub[1] is not None]\n self._out_widths = [sub[2] for sub in output_info if sub[2] is not None]\n self._write_line_init = True\n", "role": "user" }, { "content": "init_write_line", "role": "assistant" } ]
def x(self, line): if not self._read_line_init: self.init_read_line() match = self._re.match(line) assert match is not None, f'Format mismatch (line = {line})' matched_values = [] for i in range(self._re.groups): cvt_re = self._match_exps[i] cvt_div = self._divisors[i] cvt_fn = self._in_cvt_fns[i] match_str = match.group(i + 1) match0 = re.match(cvt_re, match_str) if match0 is not None: if cvt_fn == 'float': if '.' in match_str: val = float(match_str) else: val = int(match_str) / cvt_div elif cvt_fn == 'int': val = int(match_str) else: sys.stderr.write( f'Unrecognized conversion function: {cvt_fn}\n') else: sys.stderr.write(f'Format conversion failed: {match_str}\n') matched_values.append(val) return tuple(matched_values)
read_line
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, line):\n if not self._read_line_init:\n self.init_read_line()\n match = self._re.match(line)\n assert match is not None, f'Format mismatch (line = {line})'\n matched_values = []\n for i in range(self._re.groups):\n cvt_re = self._match_exps[i]\n cvt_div = self._divisors[i]\n cvt_fn = self._in_cvt_fns[i]\n match_str = match.group(i + 1)\n match0 = re.match(cvt_re, match_str)\n if match0 is not None:\n if cvt_fn == 'float':\n if '.' in match_str:\n val = float(match_str)\n else:\n val = int(match_str) / cvt_div\n elif cvt_fn == 'int':\n val = int(match_str)\n else:\n sys.stderr.write(\n f'Unrecognized conversion function: {cvt_fn}\\n')\n else:\n sys.stderr.write(f'Format conversion failed: {match_str}\\n')\n matched_values.append(val)\n return tuple(matched_values)\n", "role": "user" }, { "content": "read_line", "role": "assistant" } ]
def x(self, values): if not self._write_line_init: self.init_write_line() if len(self._out_widths) > len(values): raise For2PyError( f'ERROR: too few values for format {self._format_list}\n') out_strs = [] for i in range(len(self._out_widths)): out_fmt = self._out_gen_fmt[i] out_width = self._out_widths[i] out_val = out_fmt.format(values[i]) if len(out_val) > out_width: out_val = '*' * out_width out_strs.append(out_val) out_str_exp = '"' + self._output_fmt + '".format' + str(tuple(out_strs)) out_str = eval(out_str_exp) return out_str + '\n'
write_line
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, values):\n if not self._write_line_init:\n self.init_write_line()\n if len(self._out_widths) > len(values):\n raise For2PyError(\n f'ERROR: too few values for format {self._format_list}\\n')\n out_strs = []\n for i in range(len(self._out_widths)):\n out_fmt = self._out_gen_fmt[i]\n out_width = self._out_widths[i]\n out_val = out_fmt.format(values[i])\n if len(out_val) > out_width:\n out_val = '*' * out_width\n out_strs.append(out_val)\n out_str_exp = '\"' + self._output_fmt + '\".format' + str(tuple(out_strs))\n out_str = eval(out_str_exp)\n return out_str + '\\n'\n", "role": "user" }, { "content": "write_line", "role": "assistant" } ]
def x(self, fmt_list): rexp_list = [] for fmt in fmt_list: rexp_list.extend(self.match_input_fmt_1(fmt)) return rexp_list
match_input_fmt
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, fmt_list):\n rexp_list = []\n for fmt in fmt_list:\n rexp_list.extend(self.match_input_fmt_1(fmt))\n return rexp_list\n", "role": "user" }, { "content": "match_input_fmt", "role": "assistant" } ]
def x(self, fmt): fmt = fmt.strip() match = re.match('(\\d+)(.+)', fmt) if match is None: reps = 1 else: reps = int(match.group(1)) fmt = match.group(2) if fmt[0] == '(': fmt = fmt[1:-1] fmt_list = fmt.split(',') rexp = self.match_input_fmt(fmt_list) elif fmt[0] in 'iI': sz = fmt[1:] xtract_rexp = '(.{' + sz + '})' leading_sp = ' *' optional_sign = '-?' rexp0 = '\\d+' rexp1 = leading_sp + optional_sign + rexp0 divisor = 1 rexp = [(xtract_rexp, rexp1, divisor, 'int')] elif fmt[0] in 'xX': xtract_rexp = '.' rexp = [(xtract_rexp, None, None, None)] elif fmt[0] in 'fF': idx0 = fmt.find('.') sz = fmt[1:idx0] divisor = 10 ** int(fmt[idx0 + 1:]) xtract_rexp = '(.{' + sz + '})' leading_sp = ' *' optional_sign = '-?' rexp0 = '\\d+(\\.\\d+)?' rexp1 = leading_sp + optional_sign + rexp0 rexp = [(xtract_rexp, rexp1, divisor, 'float')] else: raise For2PyError(f'ERROR: Unrecognized format specifier {fmt}\n') rexp *= reps return rexp
match_input_fmt_1
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, fmt):\n fmt = fmt.strip()\n match = re.match('(\\\\d+)(.+)', fmt)\n if match is None:\n reps = 1\n else:\n reps = int(match.group(1))\n fmt = match.group(2)\n if fmt[0] == '(':\n fmt = fmt[1:-1]\n fmt_list = fmt.split(',')\n rexp = self.match_input_fmt(fmt_list)\n elif fmt[0] in 'iI':\n sz = fmt[1:]\n xtract_rexp = '(.{' + sz + '})'\n leading_sp = ' *'\n optional_sign = '-?'\n rexp0 = '\\\\d+'\n rexp1 = leading_sp + optional_sign + rexp0\n divisor = 1\n rexp = [(xtract_rexp, rexp1, divisor, 'int')]\n elif fmt[0] in 'xX':\n xtract_rexp = '.'\n rexp = [(xtract_rexp, None, None, None)]\n elif fmt[0] in 'fF':\n idx0 = fmt.find('.')\n sz = fmt[1:idx0]\n divisor = 10 ** int(fmt[idx0 + 1:])\n xtract_rexp = '(.{' + sz + '})'\n leading_sp = ' *'\n optional_sign = '-?'\n rexp0 = '\\\\d+(\\\\.\\\\d+)?'\n rexp1 = leading_sp + optional_sign + rexp0\n rexp = [(xtract_rexp, rexp1, divisor, 'float')]\n else:\n raise For2PyError(f'ERROR: Unrecognized format specifier {fmt}\\n')\n rexp *= reps\n return rexp\n", "role": "user" }, { "content": "match_input_fmt_1", "role": "assistant" } ]
def x(self, fmt_list): rexp_list = [] for fmt in fmt_list: rexp_list.extend(self.gen_output_fmt_1(fmt)) return rexp_list
gen_output_fmt
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, fmt_list):\n rexp_list = []\n for fmt in fmt_list:\n rexp_list.extend(self.gen_output_fmt_1(fmt))\n return rexp_list\n", "role": "user" }, { "content": "gen_output_fmt", "role": "assistant" } ]
def x(self, fmt): fmt = fmt.strip() match = re.match('(\\d+)(.+)', fmt) if match is None: reps = 1 else: reps = int(match.group(1)) fmt = match.group(2) if fmt[0] == '(': fmt = fmt[1:-1] fmt_list = fmt.split(',') rexp = self.gen_output_fmt(fmt_list) elif fmt[0] in 'iI': sz = fmt[1:] gen_fmt = '{}' cvt_fmt = '{:' + str(sz) + 'd}' rexp = [(gen_fmt, cvt_fmt, int(sz))] elif fmt[0] in 'xX': gen_fmt = ' ' rexp = [(gen_fmt, None, None)] elif fmt[0] in 'eEfFgG': idx0 = fmt.find('.') sz = fmt[1:idx0] suffix = fmt[idx0 + 1:] m = re.match('(\\d+).*', suffix) assert m is not None, f"Improper format? '{fmt}'" prec = m.group(1) gen_fmt = '{}' cvt_fmt = '{:' + sz + '.' + prec + fmt[0] + '}' rexp = [(gen_fmt, cvt_fmt, int(sz))] elif fmt[0] in 'pP': rest_of_fmt = fmt[1:] rexp = self.gen_output_fmt_1(rest_of_fmt) elif fmt[0] in '\'"': sz = len(fmt) - 2 gen_fmt = fmt[1:-1] rexp = [(gen_fmt, None, None)] elif fmt[0] == '/': gen_fmt = '\\n' * len(fmt) rexp = [(gen_fmt, None, None)] else: raise For2PyError(f'ERROR: Unrecognized format specifier {fmt}\n') rexp *= reps return rexp
gen_output_fmt_1
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, fmt):\n fmt = fmt.strip()\n match = re.match('(\\\\d+)(.+)', fmt)\n if match is None:\n reps = 1\n else:\n reps = int(match.group(1))\n fmt = match.group(2)\n if fmt[0] == '(':\n fmt = fmt[1:-1]\n fmt_list = fmt.split(',')\n rexp = self.gen_output_fmt(fmt_list)\n elif fmt[0] in 'iI':\n sz = fmt[1:]\n gen_fmt = '{}'\n cvt_fmt = '{:' + str(sz) + 'd}'\n rexp = [(gen_fmt, cvt_fmt, int(sz))]\n elif fmt[0] in 'xX':\n gen_fmt = ' '\n rexp = [(gen_fmt, None, None)]\n elif fmt[0] in 'eEfFgG':\n idx0 = fmt.find('.')\n sz = fmt[1:idx0]\n suffix = fmt[idx0 + 1:]\n m = re.match('(\\\\d+).*', suffix)\n assert m is not None, f\"Improper format? '{fmt}'\"\n prec = m.group(1)\n gen_fmt = '{}'\n cvt_fmt = '{:' + sz + '.' + prec + fmt[0] + '}'\n rexp = [(gen_fmt, cvt_fmt, int(sz))]\n elif fmt[0] in 'pP':\n rest_of_fmt = fmt[1:]\n rexp = self.gen_output_fmt_1(rest_of_fmt)\n elif fmt[0] in '\\'\"':\n sz = len(fmt) - 2\n gen_fmt = fmt[1:-1]\n rexp = [(gen_fmt, None, None)]\n elif fmt[0] == '/':\n gen_fmt = '\\\\n' * len(fmt)\n rexp = [(gen_fmt, None, None)]\n else:\n raise For2PyError(f'ERROR: Unrecognized format specifier {fmt}\\n')\n rexp *= reps\n return rexp\n", "role": "user" }, { "content": "gen_output_fmt_1", "role": "assistant" } ]
def x(gb, rs: np.ndarray, e: Tuple[str, str, Dict]) ->gaussian_kde: adjective_response_dict = {} all_θs = [] σ_X = σ_Y = 0.1 for stmt in e[2]['InfluenceStatements']: for ev in stmt.evidence: if ev.annotations.get('subj_adjectives') is not None: for subj_adjective in ev.annotations['subj_adjectives']: if (subj_adjective in gb.groups and subj_adjective not in adjective_response_dict): adjective_response_dict[subj_adjective] = get_respdevs( gb.get_group(subj_adjective)) rs_subj = stmt.subj_delta['polarity' ] * adjective_response_dict.get(subj_adjective, rs) for obj_adjective in ev.annotations['obj_adjectives']: if (obj_adjective in gb.groups and obj_adjective not in adjective_response_dict): adjective_response_dict[obj_adjective ] = get_respdevs(gb.get_group(obj_adjective)) rs_obj = stmt.obj_delta['polarity' ] * adjective_response_dict.get(obj_adjective, rs) xs1, ys1 = np.meshgrid(rs_subj, rs_obj, indexing='xy') θs = np.arctan2(σ_Y * ys1.flatten(), xs1.flatten()) all_θs.append(θs) xs1, ys1 = np.meshgrid(stmt.subj_delta['polarity'] * rs, stmt. obj_delta['polarity'] * rs, indexing='xy') θs = np.arctan2(σ_Y * ys1.flatten(), σ_X * xs1.flatten()) if len(all_θs) == 0: all_θs.append(θs) return gaussian_kde(all_θs) else: return gaussian_kde(np.concatenate(all_θs))
constructConditionalPDF
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(gb, rs: np.ndarray, e: Tuple[str, str, Dict]) ->gaussian_kde:\n adjective_response_dict = {}\n all_θs = []\n σ_X = σ_Y = 0.1\n for stmt in e[2]['InfluenceStatements']:\n for ev in stmt.evidence:\n if ev.annotations.get('subj_adjectives') is not None:\n for subj_adjective in ev.annotations['subj_adjectives']:\n if (subj_adjective in gb.groups and subj_adjective not in\n adjective_response_dict):\n adjective_response_dict[subj_adjective] = get_respdevs(\n gb.get_group(subj_adjective))\n rs_subj = stmt.subj_delta['polarity'\n ] * adjective_response_dict.get(subj_adjective, rs)\n for obj_adjective in ev.annotations['obj_adjectives']:\n if (obj_adjective in gb.groups and obj_adjective not in\n adjective_response_dict):\n adjective_response_dict[obj_adjective\n ] = get_respdevs(gb.get_group(obj_adjective))\n rs_obj = stmt.obj_delta['polarity'\n ] * adjective_response_dict.get(obj_adjective, rs)\n xs1, ys1 = np.meshgrid(rs_subj, rs_obj, indexing='xy')\n θs = np.arctan2(σ_Y * ys1.flatten(), xs1.flatten())\n all_θs.append(θs)\n xs1, ys1 = np.meshgrid(stmt.subj_delta['polarity'] * rs, stmt.\n obj_delta['polarity'] * rs, indexing='xy')\n θs = np.arctan2(σ_Y * ys1.flatten(), σ_X * xs1.flatten())\n if len(all_θs) == 0:\n all_θs.append(θs)\n return gaussian_kde(all_θs)\n else:\n return gaussian_kde(np.concatenate(all_θs))\n", "role": "user" }, { "content": "constructConditionalPDF", "role": "assistant" } ]
def x(x: str): xs = x.replace('\\/', '|').split('/') xs = [x.replace('|', '/') for x in xs] if xs[0] == 'FAO': return ' '.join(xs[2:]), xs[0] else: return xs[-1], xs[0]
get_variable_and_source
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(x: str):\n xs = x.replace('\\\\/', '|').split('/')\n xs = [x.replace('|', '/') for x in xs]\n if xs[0] == 'FAO':\n return ' '.join(xs[2:]), xs[0]\n else:\n return xs[-1], xs[0]\n", "role": "user" }, { "content": "get_variable_and_source", "role": "assistant" } ]
def x(n: int=1) ->Dict[str, List[str]]: df = pd.read_sql_table('concept_to_indicator_mapping', con=engine) gb = df.groupby('Concept') _dict = {k: [get_variable_and_source(x) for x in take(n, v['Indicator'] .values)] for k, v in gb} return _dict
construct_concept_to_indicator_mapping
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(n: int=1) ->Dict[str, List[str]]:\n df = pd.read_sql_table('concept_to_indicator_mapping', con=engine)\n gb = df.groupby('Concept')\n _dict = {k: [get_variable_and_source(x) for x in take(n, v['Indicator']\n .values)] for k, v in gb}\n return _dict\n", "role": "user" }, { "content": "construct_concept_to_indicator_mapping", "role": "assistant" } ]
def x(self, builder: Builder): self.config = builder.configuration.mortality self.population_view = builder.population.get_view(['alive'], query= "alive == 'alive'") self.randomness = builder.randomness.get_stream('mortality') self.mortality_rate = builder.value.register_rate_producer('mortality_rate' , source=self.base_mortality_rate) builder.event.register_listener('time_step', self.determine_deaths)
setup
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, builder: Builder):\n self.config = builder.configuration.mortality\n self.population_view = builder.population.get_view(['alive'], query=\n \"alive == 'alive'\")\n self.randomness = builder.randomness.get_stream('mortality')\n self.mortality_rate = builder.value.register_rate_producer('mortality_rate'\n , source=self.base_mortality_rate)\n builder.event.register_listener('time_step', self.determine_deaths)\n", "role": "user" }, { "content": "setup", "role": "assistant" } ]
def x(self, index: pd.Index) ->pd.Series: return pd.Series(self.config.mortality_rate, index=index)
base_mortality_rate
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, index: pd.Index) ->pd.Series:\n return pd.Series(self.config.mortality_rate, index=index)\n", "role": "user" }, { "content": "base_mortality_rate", "role": "assistant" } ]
def x(self, event: Event): effective_rate = self.mortality_rate(event.index) effective_probability = 1 - np.exp(-effective_rate) draw = self.randomness.get_draw(event.index) affected_simulants = draw < effective_probability self.population_view.update(pd.Series('dead', index=event.index[ affected_simulants]))
determine_deaths
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, event: Event):\n effective_rate = self.mortality_rate(event.index)\n effective_probability = 1 - np.exp(-effective_rate)\n draw = self.randomness.get_draw(event.index)\n affected_simulants = draw < effective_probability\n self.population_view.update(pd.Series('dead', index=event.index[\n affected_simulants]))\n", "role": "user" }, { "content": "determine_deaths", "role": "assistant" } ]
def x(component_list: Sequence[str]) ->List[Tuple[str, Tuple[str]]]: components = [] for c in component_list: path, args_plus = c.split('(') cleaned_args = _clean_args(args_plus[:-1].split(','), path) components.append((path, cleaned_args)) return components
prep_components
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(component_list: Sequence[str]) ->List[Tuple[str, Tuple[str]]]:\n components = []\n for c in component_list:\n path, args_plus = c.split('(')\n cleaned_args = _clean_args(args_plus[:-1].split(','), path)\n components.append((path, cleaned_args))\n return components\n", "role": "user" }, { "content": "prep_components", "role": "assistant" } ]
def x(self, component_config: Union[ConfigTree, List]) ->List: if isinstance(component_config, ConfigTree): component_list = self.parse_component_config(component_config.to_dict() ) else: component_list = component_config component_list = _prep_components(component_list) return _import_and_instantiate_components(component_list)
get_components
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, component_config: Union[ConfigTree, List]) ->List:\n if isinstance(component_config, ConfigTree):\n component_list = self.parse_component_config(component_config.to_dict()\n )\n else:\n component_list = component_config\n component_list = _prep_components(component_list)\n return _import_and_instantiate_components(component_list)\n", "role": "user" }, { "content": "get_components", "role": "assistant" } ]
def x(self, component_config: Dict[str, Union[Dict, List]]) ->List[str]: return _parse_component_config(component_config)
parse_component_config
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, component_config: Dict[str, Union[Dict, List]]) ->List[str]:\n return _parse_component_config(component_config)\n", "role": "user" }, { "content": "parse_component_config", "role": "assistant" } ]
def x(self): searchunks = self._split_file() if searchunks: usage = self._parse_file(searchunks) if 'CPU' in usage: return False self._sarinfo = usage del usage return True else: return False
load_file
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self):\n searchunks = self._split_file()\n if searchunks:\n usage = self._parse_file(searchunks)\n if 'CPU' in usage:\n return False\n self._sarinfo = usage\n del usage\n return True\n else:\n return False\n", "role": "user" }, { "content": "load_file", "role": "assistant" } ]
def x(self): try: test = self._sarinfo['CPU'] del test except KeyError: file_parsed = self.load_file() if file_parsed: return self._sarinfo else: return False except: traceback.print_exc() return False return self._sarinfo
get_sar_info
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self):\n try:\n test = self._sarinfo['CPU']\n del test\n except KeyError:\n file_parsed = self.load_file()\n if file_parsed:\n return self._sarinfo\n else:\n return False\n except:\n traceback.print_exc()\n return False\n return self._sarinfo\n", "role": "user" }, { "content": "get_sar_info", "role": "assistant" } ]
def x(self, data=''): if self.__filename and os.access(self.__filename, os.R_OK) or data != '': fhandle = None if data == '': try: fhandle = os.open(self.__filename, os.O_RDONLY) except OSError: print("Couldn't open file %s" % self.__filename) fhandle = None if fhandle or data != '': datalength = 0 if platform.system() == 'Windows': dataprot = mmap.ACCESS_READ else: dataprot = mmap.PROT_READ if data != '': fhandle = -1 datalength = len(data) if platform.system() == 'Windows': dataprot = mmap.ACCESS_READ | mmap.ACCESS_WRITE else: dataprot = mmap.PROT_READ | mmap.PROT_WRITE try: if platform.system() == 'Windows': sarmap = mmap.mmap(fhandle, length=datalength, access= dataprot) else: sarmap = mmap.mmap(fhandle, length=datalength, prot= dataprot) if data != '': sarmap.write(data) sarmap.flush() sarmap.seek(0, os.SEEK_SET) except (TypeError, IndexError): if data == '': os.close(fhandle) traceback.print_exc() return False searchunks = [] oldchunkpos = 0 dlpos = sarmap.find('\n\n', 0) size = 0 if data == '': size = sarmap.size() else: len(data) while dlpos > -1: tempchunk = sarmap.read(dlpos - oldchunkpos) searchunks.append(tempchunk.strip()) oldchunkpos += dlpos - oldchunkpos + 2 try: sarmap.seek(2, os.SEEK_CUR) except ValueError: print('Out of bounds (%s)!\n' % sarmap.tell()) dlpos = sarmap.find('\n\n') if oldchunkpos < size: tempchunk = sarmap[oldchunkpos:] searchunks.append(tempchunk.strip()) sarmap.close() if fhandle != -1: os.close(fhandle) if searchunks: return searchunks else: return False return False
split_file
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, data=''):\n if self.__filename and os.access(self.__filename, os.R_OK) or data != '':\n fhandle = None\n if data == '':\n try:\n fhandle = os.open(self.__filename, os.O_RDONLY)\n except OSError:\n print(\"Couldn't open file %s\" % self.__filename)\n fhandle = None\n if fhandle or data != '':\n datalength = 0\n if platform.system() == 'Windows':\n dataprot = mmap.ACCESS_READ\n else:\n dataprot = mmap.PROT_READ\n if data != '':\n fhandle = -1\n datalength = len(data)\n if platform.system() == 'Windows':\n dataprot = mmap.ACCESS_READ | mmap.ACCESS_WRITE\n else:\n dataprot = mmap.PROT_READ | mmap.PROT_WRITE\n try:\n if platform.system() == 'Windows':\n sarmap = mmap.mmap(fhandle, length=datalength, access=\n dataprot)\n else:\n sarmap = mmap.mmap(fhandle, length=datalength, prot=\n dataprot)\n if data != '':\n sarmap.write(data)\n sarmap.flush()\n sarmap.seek(0, os.SEEK_SET)\n except (TypeError, IndexError):\n if data == '':\n os.close(fhandle)\n traceback.print_exc()\n return False\n searchunks = []\n oldchunkpos = 0\n dlpos = sarmap.find('\\n\\n', 0)\n size = 0\n if data == '':\n size = sarmap.size()\n else:\n len(data)\n while dlpos > -1:\n tempchunk = sarmap.read(dlpos - oldchunkpos)\n searchunks.append(tempchunk.strip())\n oldchunkpos += dlpos - oldchunkpos + 2\n try:\n sarmap.seek(2, os.SEEK_CUR)\n except ValueError:\n print('Out of bounds (%s)!\\n' % sarmap.tell())\n dlpos = sarmap.find('\\n\\n')\n if oldchunkpos < size:\n tempchunk = sarmap[oldchunkpos:]\n searchunks.append(tempchunk.strip())\n sarmap.close()\n if fhandle != -1:\n os.close(fhandle)\n if searchunks:\n return searchunks\n else:\n return False\n return False\n", "role": "user" }, { "content": "split_file", "role": "assistant" } ]
def x(self, sar_parts): usage = {} output = {} if type(sar_parts) is list: restart_pattern = re.compile(PATTERN_RESTART) """ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! """ """ ********** ATTENTION ******* """ """ THERE CAN BE MORE THAN ONE SAME SECTION IN ONE FILE """ """ IF SYSTEM WAS REBOOTED DURING THE DAY """ """ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! """ for PATTERNSNAME in ALL_PATTERNS: patterns = ALL_PATTERNS[PATTERNSNAME] rgxpattern = re.compile(patterns['PATTERN']) for part in sar_parts: if rgxpattern.search(part): if PATTERNSNAME in usage: usage[PATTERNSNAME] += '\n' + part else: usage[PATTERNSNAME] = part try: first_line = part.split('\n')[0] except IndexError: first_line = part self.__fields[PATTERNSNAME] = self.__find_column( patterns['FIELDS'], first_line) if restart_pattern.search(part): pieces = part.split() self.__restart_times.append(pieces[0]) del pieces del sar_parts for PATTERNSNAME in ALL_PATTERNS: patterns = ALL_PATTERNS[PATTERNSNAME] output[PATTERNSNAME] = self.__split_info(usage[PATTERNSNAME], PATTERNSNAME, patterns) del usage return output return output
parse_file
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, sar_parts):\n usage = {}\n output = {}\n if type(sar_parts) is list:\n restart_pattern = re.compile(PATTERN_RESTART)\n \"\"\" !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! \"\"\"\n \"\"\" ********** ATTENTION ******* \"\"\"\n \"\"\" THERE CAN BE MORE THAN ONE SAME SECTION IN ONE FILE \"\"\"\n \"\"\" IF SYSTEM WAS REBOOTED DURING THE DAY \"\"\"\n \"\"\" !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! \"\"\"\n for PATTERNSNAME in ALL_PATTERNS:\n patterns = ALL_PATTERNS[PATTERNSNAME]\n rgxpattern = re.compile(patterns['PATTERN'])\n for part in sar_parts:\n if rgxpattern.search(part):\n if PATTERNSNAME in usage:\n usage[PATTERNSNAME] += '\\n' + part\n else:\n usage[PATTERNSNAME] = part\n try:\n first_line = part.split('\\n')[0]\n except IndexError:\n first_line = part\n self.__fields[PATTERNSNAME] = self.__find_column(\n patterns['FIELDS'], first_line)\n if restart_pattern.search(part):\n pieces = part.split()\n self.__restart_times.append(pieces[0])\n del pieces\n del sar_parts\n for PATTERNSNAME in ALL_PATTERNS:\n patterns = ALL_PATTERNS[PATTERNSNAME]\n output[PATTERNSNAME] = self.__split_info(usage[PATTERNSNAME],\n PATTERNSNAME, patterns)\n del usage\n return output\n return output\n", "role": "user" }, { "content": "parse_file", "role": "assistant" } ]
def x(self, column_names, part_first_line): part_parts = part_first_line.split() return_dict = {} counter = 0 for piece in part_parts: for colname in column_names: pattern_re = re.compile(colname) if pattern_re.search(piece): return_dict[colname] = counter break counter += 1 for colver in column_names: try: tempval = return_dict[colver] del tempval except KeyError: return_dict[colver] = None return return_dict
find_column
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, column_names, part_first_line):\n part_parts = part_first_line.split()\n return_dict = {}\n counter = 0\n for piece in part_parts:\n for colname in column_names:\n pattern_re = re.compile(colname)\n if pattern_re.search(piece):\n return_dict[colname] = counter\n break\n counter += 1\n for colver in column_names:\n try:\n tempval = return_dict[colver]\n del tempval\n except KeyError:\n return_dict[colver] = None\n return return_dict\n", "role": "user" }, { "content": "find_column", "role": "assistant" } ]
def x(self, info_part, patternsname, patterns): pattern = patterns['PATTERN'] if pattern == '': return False return_dict = {} pattern_re = re.compile(pattern) for part_line in info_part.split('\n'): if part_line.strip() != '' and not pattern_re.search(part_line): is_24hr = True is_AM = False if part_line[9:11] == 'AM': is_24hr = False is_AM = True elif part_line[9:11] == 'PM': is_24hr = False is_AM = False if is_24hr is False: part_line = '%s_%s XX %s' % (part_line[:8], part_line[9:11], part_line[12:]) elems = part_line.split() full_time = elems[0].strip() if full_time != 'Average:': if is_24hr is False: full_time = full_time[:-3] if full_time[:2] == '12': if is_AM is True: full_time = '%s:%s' % ('00', full_time[3:]) is_AM = not is_AM if is_AM is False and full_time[0:2] != '00': hours = int(full_time[:2]) + 12 hours = '%02d' % (hours,) full_time = '%s:%s' % (hours, full_time[3:]) try: blah = return_dict[full_time] del blah except KeyError: return_dict[full_time] = {} fields = self.__fields[patternsname] pairs = patterns['PAIRS'] for sectionname in pairs.iterkeys(): value = elems[fields[pairs[sectionname]]] if (sectionname == 'membuffer' or sectionname == 'memcache' or sectionname == 'memfree' or sectionname == 'memused' or sectionname == 'swapfree' or sectionname == 'swapused'): value = int(value) else: value = float(value) if patternsname == 'CPU': cpuid = elems[1 if is_24hr is True else 2] try: blah = return_dict[full_time][cpuid] del blah except KeyError: return_dict[full_time][cpuid] = {} return_dict[full_time][cpuid][sectionname] = value else: return_dict[full_time][sectionname] = value return return_dict
split_info
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, info_part, patternsname, patterns):\n pattern = patterns['PATTERN']\n if pattern == '':\n return False\n return_dict = {}\n pattern_re = re.compile(pattern)\n for part_line in info_part.split('\\n'):\n if part_line.strip() != '' and not pattern_re.search(part_line):\n is_24hr = True\n is_AM = False\n if part_line[9:11] == 'AM':\n is_24hr = False\n is_AM = True\n elif part_line[9:11] == 'PM':\n is_24hr = False\n is_AM = False\n if is_24hr is False:\n part_line = '%s_%s XX %s' % (part_line[:8], part_line[9:11],\n part_line[12:])\n elems = part_line.split()\n full_time = elems[0].strip()\n if full_time != 'Average:':\n if is_24hr is False:\n full_time = full_time[:-3]\n if full_time[:2] == '12':\n if is_AM is True:\n full_time = '%s:%s' % ('00', full_time[3:])\n is_AM = not is_AM\n if is_AM is False and full_time[0:2] != '00':\n hours = int(full_time[:2]) + 12\n hours = '%02d' % (hours,)\n full_time = '%s:%s' % (hours, full_time[3:])\n try:\n blah = return_dict[full_time]\n del blah\n except KeyError:\n return_dict[full_time] = {}\n fields = self.__fields[patternsname]\n pairs = patterns['PAIRS']\n for sectionname in pairs.iterkeys():\n value = elems[fields[pairs[sectionname]]]\n if (sectionname == 'membuffer' or sectionname ==\n 'memcache' or sectionname == 'memfree' or \n sectionname == 'memused' or sectionname ==\n 'swapfree' or sectionname == 'swapused'):\n value = int(value)\n else:\n value = float(value)\n if patternsname == 'CPU':\n cpuid = elems[1 if is_24hr is True else 2]\n try:\n blah = return_dict[full_time][cpuid]\n del blah\n except KeyError:\n return_dict[full_time][cpuid] = {}\n return_dict[full_time][cpuid][sectionname] = value\n else:\n return_dict[full_time][sectionname] = value\n return return_dict\n", "role": "user" }, { "content": "split_info", "role": "assistant" } ]
def x(self): if os.access(self.__filename, os.R_OK): try: sar_file = open(self.__filename, 'r') except OSError: traceback.print_exc() return False except: traceback.print_exc() return False firstline = sar_file.readline() info = firstline.split() sar_file.close() try: self.__file_date = info[3] except KeyError: self.__file_date = '' return False except: traceback.print_exc() return False return True return False
get_filedate
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self):\n if os.access(self.__filename, os.R_OK):\n try:\n sar_file = open(self.__filename, 'r')\n except OSError:\n traceback.print_exc()\n return False\n except:\n traceback.print_exc()\n return False\n firstline = sar_file.readline()\n info = firstline.split()\n sar_file.close()\n try:\n self.__file_date = info[3]\n except KeyError:\n self.__file_date = ''\n return False\n except:\n traceback.print_exc()\n return False\n return True\n return False\n", "role": "user" }, { "content": "get_filedate", "role": "assistant" } ]
def x(index, event_time, transition_set, population_view): if len(transition_set) == 0 or index.empty: return outputs, decisions = transition_set.choose_new_state(index) groups = _groupby_new_state(index, outputs, decisions) if groups: for output, affected_index in sorted(groups, key=lambda x: str(x[0])): if output == 'null_transition': pass elif isinstance(output, Transient): if not isinstance(output, State): raise ValueError('Invalid transition output: {}'.format (output)) output.transition_effect(affected_index, event_time, population_view) output.next_state(affected_index, event_time, population_view) elif isinstance(output, State): output.transition_effect(affected_index, event_time, population_view) else: raise ValueError('Invalid transition output: {}'.format(output) )
next_state
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(index, event_time, transition_set, population_view):\n if len(transition_set) == 0 or index.empty:\n return\n outputs, decisions = transition_set.choose_new_state(index)\n groups = _groupby_new_state(index, outputs, decisions)\n if groups:\n for output, affected_index in sorted(groups, key=lambda x: str(x[0])):\n if output == 'null_transition':\n pass\n elif isinstance(output, Transient):\n if not isinstance(output, State):\n raise ValueError('Invalid transition output: {}'.format\n (output))\n output.transition_effect(affected_index, event_time,\n population_view)\n output.next_state(affected_index, event_time, population_view)\n elif isinstance(output, State):\n output.transition_effect(affected_index, event_time,\n population_view)\n else:\n raise ValueError('Invalid transition output: {}'.format(output)\n )\n", "role": "user" }, { "content": "next_state", "role": "assistant" } ]
def x(index, outputs, decisions): output_map = {o: i for i, o in enumerate(outputs)} groups = pd.Series(index).groupby([output_map[d] for d in decisions]) results = [(outputs[i], pd.Index(sub_group.values)) for i, sub_group in groups] selected_outputs = [o for o, _ in results] for output in outputs: if output not in selected_outputs: results.append((output, pd.Index([]))) return results
groupby_new_state
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(index, outputs, decisions):\n output_map = {o: i for i, o in enumerate(outputs)}\n groups = pd.Series(index).groupby([output_map[d] for d in decisions])\n results = [(outputs[i], pd.Index(sub_group.values)) for i, sub_group in\n groups]\n selected_outputs = [o for o, _ in results]\n for output in outputs:\n if output not in selected_outputs:\n results.append((output, pd.Index([])))\n return results\n", "role": "user" }, { "content": "groupby_new_state", "role": "assistant" } ]
def x(self, index, event_time, population_view): return _next_state(index, event_time, self.transition_set, population_view)
next_state
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, index, event_time, population_view):\n return _next_state(index, event_time, self.transition_set, population_view)\n", "role": "user" }, { "content": "next_state", "role": "assistant" } ]
def x(self, index, event_time, population_view): population_view.update(pd.Series(self.state_id, index=index)) self._transition_side_effect(index, event_time)
transition_effect
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, index, event_time, population_view):\n population_view.update(pd.Series(self.state_id, index=index))\n self._transition_side_effect(index, event_time)\n", "role": "user" }, { "content": "transition_effect", "role": "assistant" } ]
def x(self, output, probability_func=lambda index: np.ones(len(index), dtype=float), triggered=Trigger.NOT_TRIGGERED): t = Transition(self, output, probability_func=probability_func, triggered=triggered) self.transition_set.append(t) return t
add_transition
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, output, probability_func=lambda index: np.ones(len(index),\n dtype=float), triggered=Trigger.NOT_TRIGGERED):\n t = Transition(self, output, probability_func=probability_func,\n triggered=triggered)\n self.transition_set.append(t)\n return t\n", "role": "user" }, { "content": "add_transition", "role": "assistant" } ]
def x(self, builder): builder.components.add_components(self.transitions) self.random = builder.randomness.get_stream(self.key)
setup
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, builder):\n builder.components.add_components(self.transitions)\n self.random = builder.randomness.get_stream(self.key)\n", "role": "user" }, { "content": "setup", "role": "assistant" } ]
def x(self, index): outputs, probabilities = zip(*[(transition.output_state, np.array( transition.probability(index))) for transition in self.transitions]) probabilities = np.transpose(probabilities) outputs, probabilities = self._normalize_probabilities(outputs, probabilities) return outputs, self.random.choice(index, outputs, probabilities)
choose_new_state
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, index):\n outputs, probabilities = zip(*[(transition.output_state, np.array(\n transition.probability(index))) for transition in self.transitions])\n probabilities = np.transpose(probabilities)\n outputs, probabilities = self._normalize_probabilities(outputs,\n probabilities)\n return outputs, self.random.choice(index, outputs, probabilities)\n", "role": "user" }, { "content": "choose_new_state", "role": "assistant" } ]
def x(self, outputs, probabilities): outputs = list(outputs) total = np.sum(probabilities, axis=1) if self.allow_null_transition or not np.any(total): if np.any(total > 1 + 1e-08): raise ValueError( 'Null transition requested with un-normalized probability weights: {}' .format(probabilities)) total[total > 1] = 1 probabilities = np.concatenate([probabilities, (1 - total)[:, np. newaxis]], axis=1) outputs.append('null_transition') return outputs, probabilities / np.sum(probabilities, axis=1)[:, np.newaxis ]
normalize_probabilities
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, outputs, probabilities):\n outputs = list(outputs)\n total = np.sum(probabilities, axis=1)\n if self.allow_null_transition or not np.any(total):\n if np.any(total > 1 + 1e-08):\n raise ValueError(\n 'Null transition requested with un-normalized probability weights: {}'\n .format(probabilities))\n total[total > 1] = 1\n probabilities = np.concatenate([probabilities, (1 - total)[:, np.\n newaxis]], axis=1)\n outputs.append('null_transition')\n return outputs, probabilities / np.sum(probabilities, axis=1)[:, np.newaxis\n ]\n", "role": "user" }, { "content": "normalize_probabilities", "role": "assistant" } ]
def x(self, builder): builder.components.add_components(self.states) self.population_view = builder.population.get_view([self.state_column])
setup
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, builder):\n builder.components.add_components(self.states)\n self.population_view = builder.population.get_view([self.state_column])\n", "role": "user" }, { "content": "setup", "role": "assistant" } ]
def x(self, index, event_time): for state, affected in self._get_state_pops(index): if not affected.empty: state.next_state(affected.index, event_time, self. population_view.subview([self.state_column]))
transition
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, index, event_time):\n for state, affected in self._get_state_pops(index):\n if not affected.empty:\n state.next_state(affected.index, event_time, self.\n population_view.subview([self.state_column]))\n", "role": "user" }, { "content": "transition", "role": "assistant" } ]
def x(self): from graphviz import Digraph dot = Digraph(format='png') for state in self.states: if isinstance(state, TransientState): dot.node(state.state_id, style='dashed') else: dot.node(state.state_id) for transition in state.transition_set: dot.edge(state.state_id, transition.output.state_id, transition .label()) return dot
to_dot
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self):\n from graphviz import Digraph\n dot = Digraph(format='png')\n for state in self.states:\n if isinstance(state, TransientState):\n dot.node(state.state_id, style='dashed')\n else:\n dot.node(state.state_id)\n for transition in state.transition_set:\n dot.edge(state.state_id, transition.output.state_id, transition\n .label())\n return dot\n", "role": "user" }, { "content": "to_dot", "role": "assistant" } ]
def x(): from sar import parser sysstat_dir = '/var/log/sa' single_file = '%s/%s' % (sysstat_dir, 'sar21') insar = parser.Parser(single_file) print('SAR file date: %s' % insar.get_filedate()) print('Content:\n') pprint.pprint(insar.get_sar_info()) print('-' * 78) """ from sar import multiparser multi_file = ('./%s' % ('sarcombined')) inmulti = multiparser.Multiparser(multi_file) inmulti.load_file() print("Content: ") pprint.pprint(inmulti.get_sar_info()) """
main
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x():\n from sar import parser\n sysstat_dir = '/var/log/sa'\n single_file = '%s/%s' % (sysstat_dir, 'sar21')\n insar = parser.Parser(single_file)\n print('SAR file date: %s' % insar.get_filedate())\n print('Content:\\n')\n pprint.pprint(insar.get_sar_info())\n print('-' * 78)\n \"\"\"\n from sar import multiparser\n multi_file = ('./%s' % ('sarcombined'))\n inmulti = multiparser.Multiparser(multi_file)\n inmulti.load_file()\n print(\"Content:\n\")\n pprint.pprint(inmulti.get_sar_info())\n \"\"\"\n", "role": "user" }, { "content": "main", "role": "assistant" } ]
def x(self, builder, configuration): self._managers = _setup_components(builder, self._managers, configuration) self._components = _setup_components(builder, self._components, configuration)
setup_components
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, builder, configuration):\n self._managers = _setup_components(builder, self._managers, configuration)\n self._components = _setup_components(builder, self._components,\n configuration)\n", "role": "user" }, { "content": "setup_components", "role": "assistant" } ]
def x(catalog, filter_in=None, filter_out=None, meta_field=None, exclude_meta_fields=None, only_time_series=False): filter_in = filter_in or {} filter_out = filter_out or {} catalog = read_catalog_obj(catalog) if filter_in or filter_out: filtered_datasets = [dataset for dataset in catalog['dataset'] if _filter_dictionary(dataset, filter_in.get('dataset'), filter_out.get('dataset'))] else: filtered_datasets = catalog['dataset'] if only_time_series: filtered_datasets = [dataset for dataset in filtered_datasets if dataset_has_time_series(dataset)] if meta_field: return [dataset[meta_field] for dataset in filtered_datasets if meta_field in dataset] if exclude_meta_fields: meta_filtered_datasets = [] for dataset in filtered_datasets: dataset_meta_filtered = dataset.copy() for excluded_meta_field in exclude_meta_fields: dataset_meta_filtered.pop(excluded_meta_field, None) meta_filtered_datasets.append(dataset_meta_filtered) return meta_filtered_datasets else: return filtered_datasets
get_datasets
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(catalog, filter_in=None, filter_out=None, meta_field=None,\n exclude_meta_fields=None, only_time_series=False):\n filter_in = filter_in or {}\n filter_out = filter_out or {}\n catalog = read_catalog_obj(catalog)\n if filter_in or filter_out:\n filtered_datasets = [dataset for dataset in catalog['dataset'] if\n _filter_dictionary(dataset, filter_in.get('dataset'),\n filter_out.get('dataset'))]\n else:\n filtered_datasets = catalog['dataset']\n if only_time_series:\n filtered_datasets = [dataset for dataset in filtered_datasets if\n dataset_has_time_series(dataset)]\n if meta_field:\n return [dataset[meta_field] for dataset in filtered_datasets if \n meta_field in dataset]\n if exclude_meta_fields:\n meta_filtered_datasets = []\n for dataset in filtered_datasets:\n dataset_meta_filtered = dataset.copy()\n for excluded_meta_field in exclude_meta_fields:\n dataset_meta_filtered.pop(excluded_meta_field, None)\n meta_filtered_datasets.append(dataset_meta_filtered)\n return meta_filtered_datasets\n else:\n return filtered_datasets\n", "role": "user" }, { "content": "get_datasets", "role": "assistant" } ]
def x(catalog, filter_in=None, filter_out=None, meta_field=None, exclude_meta_fields=None, only_time_series=False): filter_in = filter_in or {} filter_out = filter_out or {} catalog = read_catalog_obj(catalog) distributions = [] for dataset in get_datasets(catalog, filter_in, filter_out): for distribution in dataset.get('distribution', []): distribution['dataset_identifier'] = dataset['identifier'] distributions.append(distribution) filtered_distributions = [distribution for distribution in distributions if _filter_dictionary(distribution, filter_in.get( 'distribution'), filter_out.get('distribution'))] if only_time_series: filtered_distributions = [distribution for distribution in filtered_distributions if distribution_has_time_index(distribution) ] if meta_field: return [distribution[meta_field] for distribution in filtered_distributions if meta_field in distribution] if exclude_meta_fields: meta_filtered_distributions = [] for distribution in filtered_distributions: distribution_meta_filtered = distribution.copy() for excluded_meta_field in exclude_meta_fields: distribution_meta_filtered.pop(excluded_meta_field, None) meta_filtered_distributions.append(distribution_meta_filtered) return meta_filtered_distributions else: return filtered_distributions
get_distributions
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(catalog, filter_in=None, filter_out=None, meta_field=None,\n exclude_meta_fields=None, only_time_series=False):\n filter_in = filter_in or {}\n filter_out = filter_out or {}\n catalog = read_catalog_obj(catalog)\n distributions = []\n for dataset in get_datasets(catalog, filter_in, filter_out):\n for distribution in dataset.get('distribution', []):\n distribution['dataset_identifier'] = dataset['identifier']\n distributions.append(distribution)\n filtered_distributions = [distribution for distribution in\n distributions if _filter_dictionary(distribution, filter_in.get(\n 'distribution'), filter_out.get('distribution'))]\n if only_time_series:\n filtered_distributions = [distribution for distribution in\n filtered_distributions if distribution_has_time_index(distribution)\n ]\n if meta_field:\n return [distribution[meta_field] for distribution in\n filtered_distributions if meta_field in distribution]\n if exclude_meta_fields:\n meta_filtered_distributions = []\n for distribution in filtered_distributions:\n distribution_meta_filtered = distribution.copy()\n for excluded_meta_field in exclude_meta_fields:\n distribution_meta_filtered.pop(excluded_meta_field, None)\n meta_filtered_distributions.append(distribution_meta_filtered)\n return meta_filtered_distributions\n else:\n return filtered_distributions\n", "role": "user" }, { "content": "get_distributions", "role": "assistant" } ]
def x(catalog, filter_in=None, filter_out=None, meta_field=None, only_time_series=False, distribution_identifier=None): filter_in = filter_in or {} filter_out = filter_out or {} catalog = read_catalog_obj(catalog) if distribution_identifier: if 'distribution' not in filter_in: filter_in['distribution'] = {} filter_in['distribution']['identifier'] = distribution_identifier fields = [] for distribution in get_distributions(catalog, filter_in, filter_out, only_time_series=only_time_series): distribution_fields = distribution.get('field', []) if isinstance(distribution_fields, list): for field in distribution_fields: if not only_time_series or field_is_time_series(field, distribution): field['dataset_identifier'] = distribution[ 'dataset_identifier'] field['distribution_identifier'] = distribution.get( 'identifier') fields.append(field) filtered_fields = [field for field in fields if _filter_dictionary( field, filter_in.get('field'), filter_out.get('field'))] if meta_field: return [field[meta_field] for field in filtered_fields if meta_field in field] else: return filtered_fields
get_fields
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(catalog, filter_in=None, filter_out=None, meta_field=None,\n only_time_series=False, distribution_identifier=None):\n filter_in = filter_in or {}\n filter_out = filter_out or {}\n catalog = read_catalog_obj(catalog)\n if distribution_identifier:\n if 'distribution' not in filter_in:\n filter_in['distribution'] = {}\n filter_in['distribution']['identifier'] = distribution_identifier\n fields = []\n for distribution in get_distributions(catalog, filter_in, filter_out,\n only_time_series=only_time_series):\n distribution_fields = distribution.get('field', [])\n if isinstance(distribution_fields, list):\n for field in distribution_fields:\n if not only_time_series or field_is_time_series(field,\n distribution):\n field['dataset_identifier'] = distribution[\n 'dataset_identifier']\n field['distribution_identifier'] = distribution.get(\n 'identifier')\n fields.append(field)\n filtered_fields = [field for field in fields if _filter_dictionary(\n field, filter_in.get('field'), filter_out.get('field'))]\n if meta_field:\n return [field[meta_field] for field in filtered_fields if \n meta_field in field]\n else:\n return filtered_fields\n", "role": "user" }, { "content": "get_fields", "role": "assistant" } ]
def x(catalog, identifier=None, title=None): msg = "Se requiere un 'identifier' o 'title' para buscar el dataset." assert identifier or title, msg catalog = read_catalog_obj(catalog) if identifier: try: return _get_dataset_by_identifier(catalog, identifier) except BaseException: try: catalog._build_index() return _get_dataset_by_identifier(catalog, identifier) except BaseException: filtered_datasets = get_datasets(catalog, {'dataset': { 'identifier': identifier}}) elif title: filtered_datasets = get_datasets(catalog, {'dataset': {'title': title}} ) if len(filtered_datasets) > 1: if identifier: raise ce.DatasetIdRepetitionError(identifier, filtered_datasets) elif title: raise ce.DatasetTitleRepetitionError(title, filtered_datasets) elif len(filtered_datasets) == 0: return None else: return filtered_datasets[0]
get_dataset
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(catalog, identifier=None, title=None):\n msg = \"Se requiere un 'identifier' o 'title' para buscar el dataset.\"\n assert identifier or title, msg\n catalog = read_catalog_obj(catalog)\n if identifier:\n try:\n return _get_dataset_by_identifier(catalog, identifier)\n except BaseException:\n try:\n catalog._build_index()\n return _get_dataset_by_identifier(catalog, identifier)\n except BaseException:\n filtered_datasets = get_datasets(catalog, {'dataset': {\n 'identifier': identifier}})\n elif title:\n filtered_datasets = get_datasets(catalog, {'dataset': {'title': title}}\n )\n if len(filtered_datasets) > 1:\n if identifier:\n raise ce.DatasetIdRepetitionError(identifier, filtered_datasets)\n elif title:\n raise ce.DatasetTitleRepetitionError(title, filtered_datasets)\n elif len(filtered_datasets) == 0:\n return None\n else:\n return filtered_datasets[0]\n", "role": "user" }, { "content": "get_dataset", "role": "assistant" } ]
def x(catalog, identifier=None, title=None, dataset_identifier=None): msg = "Se requiere un 'identifier' o 'title' para buscar el distribution." assert identifier or title, msg catalog = read_catalog_obj(catalog) if identifier: try: return _get_distribution_by_identifier(catalog, identifier) except BaseException: try: catalog._build_index() return _get_distribution_by_identifier(catalog, identifier) except BaseException: filtered_distributions = get_distributions(catalog, { 'distribution': {'identifier': identifier}}) elif title and dataset_identifier: filtered_distributions = get_distributions(catalog, {'dataset': { 'identifier': dataset_identifier}, 'distribution': {'title': title}}) elif title: filtered_distributions = get_distributions(catalog, {'distribution': {'title': title}}) if len(filtered_distributions) > 1: if identifier: raise ce.DistributionIdRepetitionError(identifier, filtered_distributions) elif title and dataset_identifier: raise ce.DistributionTitleRepetitionError(title, filtered_distributions) elif title: return filtered_distributions elif len(filtered_distributions) == 0: return None else: return filtered_distributions[0]
get_distribution
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(catalog, identifier=None, title=None, dataset_identifier=None):\n msg = \"Se requiere un 'identifier' o 'title' para buscar el distribution.\"\n assert identifier or title, msg\n catalog = read_catalog_obj(catalog)\n if identifier:\n try:\n return _get_distribution_by_identifier(catalog, identifier)\n except BaseException:\n try:\n catalog._build_index()\n return _get_distribution_by_identifier(catalog, identifier)\n except BaseException:\n filtered_distributions = get_distributions(catalog, {\n 'distribution': {'identifier': identifier}})\n elif title and dataset_identifier:\n filtered_distributions = get_distributions(catalog, {'dataset': {\n 'identifier': dataset_identifier}, 'distribution': {'title':\n title}})\n elif title:\n filtered_distributions = get_distributions(catalog, {'distribution':\n {'title': title}})\n if len(filtered_distributions) > 1:\n if identifier:\n raise ce.DistributionIdRepetitionError(identifier,\n filtered_distributions)\n elif title and dataset_identifier:\n raise ce.DistributionTitleRepetitionError(title,\n filtered_distributions)\n elif title:\n return filtered_distributions\n elif len(filtered_distributions) == 0:\n return None\n else:\n return filtered_distributions[0]\n", "role": "user" }, { "content": "get_distribution", "role": "assistant" } ]
def x(catalog, identifier=None, title=None, distribution_identifier=None): msg = "Se requiere un 'id' o 'title' para buscar el field." assert identifier or title, msg if identifier: try: return _get_field_by_identifier(catalog, identifier) except BaseException: try: catalog._build_index() return _get_field_by_identifier(catalog, identifier) except BaseException: filtered_fields = get_fields(catalog, {'field': {'id': identifier}}) elif title and distribution_identifier: filtered_fields = get_fields(catalog, {'distribution': { 'identifier': distribution_identifier}, 'field': {'title': title}}) elif title: filtered_fields = get_fields(catalog, {'field': {'title': title}}) if len(filtered_fields) > 1: if identifier: raise ce.FieldIdRepetitionError(identifier, filtered_fields) elif title and distribution_identifier: raise ce.FieldTitleRepetitionError(title, filtered_fields) elif title: return filtered_fields elif len(filtered_fields) == 0: return None else: return filtered_fields[0]
get_field
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(catalog, identifier=None, title=None, distribution_identifier=None):\n msg = \"Se requiere un 'id' o 'title' para buscar el field.\"\n assert identifier or title, msg\n if identifier:\n try:\n return _get_field_by_identifier(catalog, identifier)\n except BaseException:\n try:\n catalog._build_index()\n return _get_field_by_identifier(catalog, identifier)\n except BaseException:\n filtered_fields = get_fields(catalog, {'field': {'id':\n identifier}})\n elif title and distribution_identifier:\n filtered_fields = get_fields(catalog, {'distribution': {\n 'identifier': distribution_identifier}, 'field': {'title': title}})\n elif title:\n filtered_fields = get_fields(catalog, {'field': {'title': title}})\n if len(filtered_fields) > 1:\n if identifier:\n raise ce.FieldIdRepetitionError(identifier, filtered_fields)\n elif title and distribution_identifier:\n raise ce.FieldTitleRepetitionError(title, filtered_fields)\n elif title:\n return filtered_fields\n elif len(filtered_fields) == 0:\n return None\n else:\n return filtered_fields[0]\n", "role": "user" }, { "content": "get_field", "role": "assistant" } ]
def x(catalog, exclude_meta_fields=None): exclude_meta_fields = exclude_meta_fields or [] catalog_dict_copy = catalog.copy() del catalog_dict_copy['dataset'] for excluded_meta_field in exclude_meta_fields: catalog_dict_copy.pop(excluded_meta_field, None) return catalog_dict_copy
get_catalog_metadata
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(catalog, exclude_meta_fields=None):\n exclude_meta_fields = exclude_meta_fields or []\n catalog_dict_copy = catalog.copy()\n del catalog_dict_copy['dataset']\n for excluded_meta_field in exclude_meta_fields:\n catalog_dict_copy.pop(excluded_meta_field, None)\n return catalog_dict_copy\n", "role": "user" }, { "content": "get_catalog_metadata", "role": "assistant" } ]
def x(dataset): text_template = ( '\n# {title}\n\n{description}\n\n## Recursos del dataset\n\n{distributions}\n' ) if 'distribution' in dataset: distributions = ''.join(map(distribution_to_markdown, dataset[ 'distribution'])) else: distributions = '' text = text_template.format(title=dataset['title'], description=dataset .get('description', ''), distributions=distributions) return text
dataset_to_markdown
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(dataset):\n text_template = (\n '\\n# {title}\\n\\n{description}\\n\\n## Recursos del dataset\\n\\n{distributions}\\n'\n )\n if 'distribution' in dataset:\n distributions = ''.join(map(distribution_to_markdown, dataset[\n 'distribution']))\n else:\n distributions = ''\n text = text_template.format(title=dataset['title'], description=dataset\n .get('description', ''), distributions=distributions)\n return text\n", "role": "user" }, { "content": "dataset_to_markdown", "role": "assistant" } ]
def x(distribution): text_template = ( '\n### {title}\n\n{description}\n\n#### Campos del recurso\n\n{fields}\n' ) if 'field' in distribution: fields = '- ' + '\n- '.join(map(field_to_markdown, distribution[ 'field'])) else: fields = '' text = text_template.format(title=distribution['title'], description= distribution.get('description', ''), fields=fields) return text
distribution_to_markdown
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(distribution):\n text_template = (\n '\\n### {title}\\n\\n{description}\\n\\n#### Campos del recurso\\n\\n{fields}\\n'\n )\n if 'field' in distribution:\n fields = '- ' + '\\n- '.join(map(field_to_markdown, distribution[\n 'field']))\n else:\n fields = ''\n text = text_template.format(title=distribution['title'], description=\n distribution.get('description', ''), fields=fields)\n return text\n", "role": "user" }, { "content": "distribution_to_markdown", "role": "assistant" } ]
def x(field): if 'title' in field: field_title = '**{}**'.format(field['title']) else: raise Exception('Es necesario un `title` para describir un campo.') field_type = ' ({})'.format(field['type']) if 'type' in field else '' field_desc = ': {}'.format(field['description'] ) if 'description' in field else '' text_template = '{title}{type}{description}' text = text_template.format(title=field_title, type=field_type, description=field_desc) return text
field_to_markdown
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(field):\n if 'title' in field:\n field_title = '**{}**'.format(field['title'])\n else:\n raise Exception('Es necesario un `title` para describir un campo.')\n field_type = ' ({})'.format(field['type']) if 'type' in field else ''\n field_desc = ': {}'.format(field['description']\n ) if 'description' in field else ''\n text_template = '{title}{type}{description}'\n text = text_template.format(title=field_title, type=field_type,\n description=field_desc)\n return text\n", "role": "user" }, { "content": "field_to_markdown", "role": "assistant" } ]
def x(sequence, every=None, size=None, name='Items'): from ipywidgets import IntProgress, HTML, VBox from IPython.display import display is_iterator = False if size is None: try: size = len(sequence) except TypeError: is_iterator = True if size is not None: if every is None: if size <= 200: every = 1 else: every = int(size / 200) else: assert every is not None, 'sequence is iterator, set every' if is_iterator: progress = IntProgress(min=0, max=1, value=1) progress.bar_style = 'info' else: progress = IntProgress(min=0, max=size, value=0) label = HTML() box = VBox(children=[label, progress]) display(box) index = 0 try: for index, record in enumerate(sequence, 1): if index == 1 or index % every == 0: if is_iterator: label.value = '{name}: {index} / ?'.format(name=name, index=index) else: progress.value = index label.value = u'{name}: {index} / {size}'.format(name= name, index=index, size=size) yield record except Exception as e: progress.bar_style = 'danger' raise else: progress.bar_style = 'success' progress.value = index label.value = '{name}: {index}'.format(name=name, index=str(index or '?'))
log_progress
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(sequence, every=None, size=None, name='Items'):\n from ipywidgets import IntProgress, HTML, VBox\n from IPython.display import display\n is_iterator = False\n if size is None:\n try:\n size = len(sequence)\n except TypeError:\n is_iterator = True\n if size is not None:\n if every is None:\n if size <= 200:\n every = 1\n else:\n every = int(size / 200)\n else:\n assert every is not None, 'sequence is iterator, set every'\n if is_iterator:\n progress = IntProgress(min=0, max=1, value=1)\n progress.bar_style = 'info'\n else:\n progress = IntProgress(min=0, max=size, value=0)\n label = HTML()\n box = VBox(children=[label, progress])\n display(box)\n index = 0\n try:\n for index, record in enumerate(sequence, 1):\n if index == 1 or index % every == 0:\n if is_iterator:\n label.value = '{name}: {index} / ?'.format(name=name,\n index=index)\n else:\n progress.value = index\n label.value = u'{name}: {index} / {size}'.format(name=\n name, index=index, size=size)\n yield record\n except Exception as e:\n progress.bar_style = 'danger'\n raise\n else:\n progress.bar_style = 'success'\n progress.value = index\n label.value = '{name}: {index}'.format(name=name, index=str(index or\n '?'))\n", "role": "user" }, { "content": "log_progress", "role": "assistant" } ]
def x(): try: datajson_file = sys.argv[1] dj_instance = DataJson() bool_res = dj_instance.is_valid_catalog(datajson_file) full_res = dj_instance.validate_catalog(datajson_file) pretty_full_res = json.dumps(full_res, indent=4, separators=(',', ': ') ) logger.info(bool_res) logger.info(pretty_full_res) except IndexError as errmsg: format_str = ( '\n{}: pydatajson.py fue ejecutado como script sin proveer un argumento\n' ) logger.error(format_str.format(errmsg))
main
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x():\n try:\n datajson_file = sys.argv[1]\n dj_instance = DataJson()\n bool_res = dj_instance.is_valid_catalog(datajson_file)\n full_res = dj_instance.validate_catalog(datajson_file)\n pretty_full_res = json.dumps(full_res, indent=4, separators=(',', ': ')\n )\n logger.info(bool_res)\n logger.info(pretty_full_res)\n except IndexError as errmsg:\n format_str = (\n '\\n{}: pydatajson.py fue ejecutado como script sin proveer un argumento\\n'\n )\n logger.error(format_str.format(errmsg))\n", "role": "user" }, { "content": "main", "role": "assistant" } ]
def x(self): datasets_index = {} distributions_index = {} fields_index = {} for dataset_index, dataset in enumerate(self.datasets): if 'identifier' in dataset: datasets_index[dataset['identifier']] = {'dataset_index': dataset_index} for distribution_index, distribution in enumerate(dataset.get( 'distribution', [])): if 'identifier' in distribution: distributions_index[distribution['identifier']] = { 'distribution_index': distribution_index, 'dataset_identifier': dataset['identifier']} for field_index, field in enumerate(distribution.get( 'field', [])): if 'id' in field: fields_index[field['id']] = {'field_index': field_index, 'dataset_identifier': dataset[ 'identifier'], 'distribution_identifier': distribution['identifier']} setattr(self, '_distributions_index', distributions_index) setattr(self, '_datasets_index', datasets_index) setattr(self, '_fields_index', fields_index)
build_index
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self):\n datasets_index = {}\n distributions_index = {}\n fields_index = {}\n for dataset_index, dataset in enumerate(self.datasets):\n if 'identifier' in dataset:\n datasets_index[dataset['identifier']] = {'dataset_index':\n dataset_index}\n for distribution_index, distribution in enumerate(dataset.get(\n 'distribution', [])):\n if 'identifier' in distribution:\n distributions_index[distribution['identifier']] = {\n 'distribution_index': distribution_index,\n 'dataset_identifier': dataset['identifier']}\n for field_index, field in enumerate(distribution.get(\n 'field', [])):\n if 'id' in field:\n fields_index[field['id']] = {'field_index':\n field_index, 'dataset_identifier': dataset[\n 'identifier'], 'distribution_identifier':\n distribution['identifier']}\n setattr(self, '_distributions_index', distributions_index)\n setattr(self, '_datasets_index', datasets_index)\n setattr(self, '_fields_index', fields_index)\n", "role": "user" }, { "content": "build_index", "role": "assistant" } ]
def x(self, catalog=None): catalog = catalog or self return validation.is_valid_catalog(catalog, validator=self.validator)
is_valid_catalog
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, catalog=None):\n catalog = catalog or self\n return validation.is_valid_catalog(catalog, validator=self.validator)\n", "role": "user" }, { "content": "is_valid_catalog", "role": "assistant" } ]
def x(error, response): new_response = response.copy() new_response['status'] = 'ERROR' error_info = {'error_code': 1 if error.validator == 'required' else 2, 'message': error.message, 'validator': error.validator, 'validator_value': error.validator_value, 'path': list(error.path), 'instance': None if error.validator == 'required' else error.instance} if len(error.path) >= 2 and error.path[0] == 'dataset': position = new_response['error']['dataset'][error.path[1]] else: position = new_response['error']['catalog'] position['status'] = 'ERROR' position['errors'].append(error_info) return new_response
update_validation_response
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(error, response):\n new_response = response.copy()\n new_response['status'] = 'ERROR'\n error_info = {'error_code': 1 if error.validator == 'required' else 2,\n 'message': error.message, 'validator': error.validator,\n 'validator_value': error.validator_value, 'path': list(error.path),\n 'instance': None if error.validator == 'required' else error.instance}\n if len(error.path) >= 2 and error.path[0] == 'dataset':\n position = new_response['error']['dataset'][error.path[1]]\n else:\n position = new_response['error']['catalog']\n position['status'] = 'ERROR'\n position['errors'].append(error_info)\n return new_response\n", "role": "user" }, { "content": "update_validation_response", "role": "assistant" } ]
def x(self, catalog=None, only_errors=False, fmt='dict', export_path=None): catalog = catalog or self return validation.validate_catalog(catalog, only_errors, fmt, export_path, validator=self.validator)
validate_catalog
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, catalog=None, only_errors=False, fmt='dict', export_path=None):\n catalog = catalog or self\n return validation.validate_catalog(catalog, only_errors, fmt,\n export_path, validator=self.validator)\n", "role": "user" }, { "content": "validate_catalog", "role": "assistant" } ]
def x(cls, dataset, catalog_homepage=None): publisher_name = helpers.traverse_dict(dataset, ['publisher', 'name']) languages = cls._stringify_list(dataset.get('language')) super_themes = cls._stringify_list(dataset.get('superTheme')) themes = cls._stringify_list(dataset.get('theme')) def x(distribution): title = distribution.get('title') url = distribution.get('downloadURL') return '"{}": {}'.format(title, url) distributions = [d for d in dataset['distribution'] if isinstance(d, dict)] distributions_list = None if isinstance(distributions, list): distributions_strings = [_stringify_distribution(d) for d in distributions] distributions_list = '\n\n'.join(distributions_strings) distributions_formats = json.dumps(helpers. count_distribution_formats_dataset(dataset)) fields = OrderedDict() fields['dataset_identifier'] = dataset.get('identifier') fields['dataset_title'] = dataset.get('title') fields['dataset_accrualPeriodicity'] = dataset.get('accrualPeriodicity') fields['dataset_description'] = dataset.get('description') fields['dataset_publisher_name'] = publisher_name fields['dataset_superTheme'] = super_themes fields['dataset_theme'] = themes fields['dataset_landingPage'] = dataset.get('landingPage') fields['dataset_landingPage_generated'] = cls._generate_landingPage( catalog_homepage, dataset.get('identifier')) fields['dataset_issued'] = dataset.get('issued') fields['dataset_modified'] = dataset.get('modified') fields['distributions_formats'] = distributions_formats fields['distributions_list'] = distributions_list fields['dataset_license'] = dataset.get('license') fields['dataset_language'] = languages fields['dataset_spatial'] = dataset.get('spatial') fields['dataset_temporal'] = dataset.get('temporal') return fields
dataset_report_helper
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(cls, dataset, catalog_homepage=None):\n publisher_name = helpers.traverse_dict(dataset, ['publisher', 'name'])\n languages = cls._stringify_list(dataset.get('language'))\n super_themes = cls._stringify_list(dataset.get('superTheme'))\n themes = cls._stringify_list(dataset.get('theme'))\n\n def x(distribution):\n title = distribution.get('title')\n url = distribution.get('downloadURL')\n return '\"{}\": {}'.format(title, url)\n distributions = [d for d in dataset['distribution'] if isinstance(d, dict)]\n distributions_list = None\n if isinstance(distributions, list):\n distributions_strings = [_stringify_distribution(d) for d in\n distributions]\n distributions_list = '\\n\\n'.join(distributions_strings)\n distributions_formats = json.dumps(helpers.\n count_distribution_formats_dataset(dataset))\n fields = OrderedDict()\n fields['dataset_identifier'] = dataset.get('identifier')\n fields['dataset_title'] = dataset.get('title')\n fields['dataset_accrualPeriodicity'] = dataset.get('accrualPeriodicity')\n fields['dataset_description'] = dataset.get('description')\n fields['dataset_publisher_name'] = publisher_name\n fields['dataset_superTheme'] = super_themes\n fields['dataset_theme'] = themes\n fields['dataset_landingPage'] = dataset.get('landingPage')\n fields['dataset_landingPage_generated'] = cls._generate_landingPage(\n catalog_homepage, dataset.get('identifier'))\n fields['dataset_issued'] = dataset.get('issued')\n fields['dataset_modified'] = dataset.get('modified')\n fields['distributions_formats'] = distributions_formats\n fields['distributions_list'] = distributions_list\n fields['dataset_license'] = dataset.get('license')\n fields['dataset_language'] = languages\n fields['dataset_spatial'] = dataset.get('spatial')\n fields['dataset_temporal'] = dataset.get('temporal')\n return fields\n", "role": "user" }, { "content": "dataset_report_helper", "role": "assistant" } ]
def x(catalog, catalog_validation, url, catalog_id, catalog_org): fields = OrderedDict() fields['catalog_metadata_url'] = url fields['catalog_federation_id'] = catalog_id fields['catalog_federation_org'] = catalog_org fields['catalog_title'] = catalog.get('title') fields['catalog_description'] = catalog.get('description') fields['valid_catalog_metadata'] = 1 if catalog_validation['status' ] == 'OK' else 0 return fields
catalog_report_helper
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(catalog, catalog_validation, url, catalog_id, catalog_org):\n fields = OrderedDict()\n fields['catalog_metadata_url'] = url\n fields['catalog_federation_id'] = catalog_id\n fields['catalog_federation_org'] = catalog_org\n fields['catalog_title'] = catalog.get('title')\n fields['catalog_description'] = catalog.get('description')\n fields['valid_catalog_metadata'] = 1 if catalog_validation['status'\n ] == 'OK' else 0\n return fields\n", "role": "user" }, { "content": "catalog_report_helper", "role": "assistant" } ]
def x(self, dataset, dataset_validation, dataset_index, catalog_fields, harvest='none', report=None, catalog_homepage=None): good_qa, notes = self._dataset_qa(dataset) dataset_report = OrderedDict(catalog_fields) dataset_report['valid_dataset_metadata'] = 1 if dataset_validation['status' ] == 'OK' else 0 dataset_report['dataset_index'] = dataset_index if isinstance(harvest, list): dataset_report['harvest'] = 1 if dataset['title'] in harvest else 0 elif harvest == 'all': dataset_report['harvest'] = 1 elif harvest == 'none': dataset_report['harvest'] = 0 elif harvest == 'valid': dataset_report['harvest'] = int(dataset_report[ 'valid_dataset_metadata']) elif harvest == 'good': valid_metadata = int(dataset_report['valid_dataset_metadata']) == 1 dataset_report['harvest'] = 1 if valid_metadata and good_qa else 0 elif harvest == 'report': if not report: raise ValueError( """ Usted eligio 'report' como criterio de harvest, pero no proveyo un valor para el argumento 'report'. Por favor, intentelo nuevamente.""" ) datasets_to_harvest = self._extract_datasets_to_harvest(report) dataset_report['harvest'] = 1 if (dataset_report[ 'catalog_metadata_url'], dataset.get('title') ) in datasets_to_harvest else 0 else: raise ValueError( """ {} no es un criterio de harvest reconocido. Pruebe con 'all', 'none', 'valid' o 'report'.""" .format(harvest)) dataset_report.update(self._dataset_report_helper(dataset, catalog_homepage=catalog_homepage)) dataset_report['notas'] = '\n\n'.join(notes) return dataset_report.copy()
dataset_report
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, dataset, dataset_validation, dataset_index, catalog_fields,\n harvest='none', report=None, catalog_homepage=None):\n good_qa, notes = self._dataset_qa(dataset)\n dataset_report = OrderedDict(catalog_fields)\n dataset_report['valid_dataset_metadata'] = 1 if dataset_validation['status'\n ] == 'OK' else 0\n dataset_report['dataset_index'] = dataset_index\n if isinstance(harvest, list):\n dataset_report['harvest'] = 1 if dataset['title'] in harvest else 0\n elif harvest == 'all':\n dataset_report['harvest'] = 1\n elif harvest == 'none':\n dataset_report['harvest'] = 0\n elif harvest == 'valid':\n dataset_report['harvest'] = int(dataset_report[\n 'valid_dataset_metadata'])\n elif harvest == 'good':\n valid_metadata = int(dataset_report['valid_dataset_metadata']) == 1\n dataset_report['harvest'] = 1 if valid_metadata and good_qa else 0\n elif harvest == 'report':\n if not report:\n raise ValueError(\n \"\"\"\nUsted eligio 'report' como criterio de harvest, pero no proveyo un valor para\nel argumento 'report'. Por favor, intentelo nuevamente.\"\"\"\n )\n datasets_to_harvest = self._extract_datasets_to_harvest(report)\n dataset_report['harvest'] = 1 if (dataset_report[\n 'catalog_metadata_url'], dataset.get('title')\n ) in datasets_to_harvest else 0\n else:\n raise ValueError(\n \"\"\"\n{} no es un criterio de harvest reconocido. Pruebe con 'all', 'none', 'valid' o\n'report'.\"\"\"\n .format(harvest))\n dataset_report.update(self._dataset_report_helper(dataset,\n catalog_homepage=catalog_homepage))\n dataset_report['notas'] = '\\n\\n'.join(notes)\n return dataset_report.copy()\n", "role": "user" }, { "content": "dataset_report", "role": "assistant" } ]
def x(self, dataset): has_data_format = helpers.dataset_has_data_distributions(dataset) has_title = 'title' in dataset has_description = 'description' in dataset if has_title: has_min_title = len(dataset['title']) >= MIN_DATASET_TITLE else: has_min_title = False if has_description: has_min_desc = len(dataset['description']) >= MIN_DATASET_DESCRIPTION else: has_min_desc = False harvest = (has_title and has_description and has_data_format and has_min_title and has_min_desc) notes = [] if not has_data_format: notes.append('No tiene distribuciones con datos.') if not has_title: notes.append('Dataset sin titulo {}'.format(dataset)) elif not has_min_title: notes.append('Titulo tiene menos de {} caracteres'.format( MIN_DATASET_TITLE)) if not has_description: notes.append('Dataset sin descripcion {}'.format(dataset)) elif not has_min_desc: notes.append('Descripcion tiene menos de {} caracteres'.format( MIN_DATASET_DESCRIPTION)) return harvest, notes
dataset_qa
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, dataset):\n has_data_format = helpers.dataset_has_data_distributions(dataset)\n has_title = 'title' in dataset\n has_description = 'description' in dataset\n if has_title:\n has_min_title = len(dataset['title']) >= MIN_DATASET_TITLE\n else:\n has_min_title = False\n if has_description:\n has_min_desc = len(dataset['description']) >= MIN_DATASET_DESCRIPTION\n else:\n has_min_desc = False\n harvest = (has_title and has_description and has_data_format and\n has_min_title and has_min_desc)\n notes = []\n if not has_data_format:\n notes.append('No tiene distribuciones con datos.')\n if not has_title:\n notes.append('Dataset sin titulo {}'.format(dataset))\n elif not has_min_title:\n notes.append('Titulo tiene menos de {} caracteres'.format(\n MIN_DATASET_TITLE))\n if not has_description:\n notes.append('Dataset sin descripcion {}'.format(dataset))\n elif not has_min_desc:\n notes.append('Descripcion tiene menos de {} caracteres'.format(\n MIN_DATASET_DESCRIPTION))\n return harvest, notes\n", "role": "user" }, { "content": "dataset_qa", "role": "assistant" } ]
def x(self, catalog, harvest='none', report=None, catalog_id=None, catalog_homepage=None, catalog_org=None): url = catalog if isinstance(catalog, string_types) else None catalog = readers.read_catalog(catalog) validation = self.validate_catalog(catalog) catalog_validation = validation['error']['catalog'] datasets_validations = validation['error']['dataset'] catalog_fields = self._catalog_report_helper(catalog, catalog_validation, url, catalog_id, catalog_org) if 'dataset' in catalog and isinstance(catalog['dataset'], list): datasets = [(d if isinstance(d, dict) else {}) for d in catalog[ 'dataset']] else: datasets = [] catalog_report = [self._dataset_report(dataset, datasets_validations[ index], index, catalog_fields, harvest, report=report, catalog_homepage=catalog_homepage) for index, dataset in enumerate( datasets)] return catalog_report
catalog_report
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, catalog, harvest='none', report=None, catalog_id=None,\n catalog_homepage=None, catalog_org=None):\n url = catalog if isinstance(catalog, string_types) else None\n catalog = readers.read_catalog(catalog)\n validation = self.validate_catalog(catalog)\n catalog_validation = validation['error']['catalog']\n datasets_validations = validation['error']['dataset']\n catalog_fields = self._catalog_report_helper(catalog,\n catalog_validation, url, catalog_id, catalog_org)\n if 'dataset' in catalog and isinstance(catalog['dataset'], list):\n datasets = [(d if isinstance(d, dict) else {}) for d in catalog[\n 'dataset']]\n else:\n datasets = []\n catalog_report = [self._dataset_report(dataset, datasets_validations[\n index], index, catalog_fields, harvest, report=report,\n catalog_homepage=catalog_homepage) for index, dataset in enumerate(\n datasets)]\n return catalog_report\n", "role": "user" }, { "content": "catalog_report", "role": "assistant" } ]
def x(self, catalogs, harvest='valid', report=None, export_path=None, catalog_ids=None, catalog_homepages=None, catalog_orgs=None): assert isinstance(catalogs, string_types + (dict, list)) if isinstance(catalogs, list): assert not catalog_ids or len(catalogs) == len(catalog_ids) assert not catalog_orgs or len(catalogs) == len(catalog_orgs) assert not catalog_homepages or len(catalogs) == len(catalog_homepages) if isinstance(catalogs, string_types + (dict,)): catalogs = [catalogs] catalogs = list(map(readers.read_catalog_obj, catalogs)) if not catalog_ids: catalog_ids = [] for catalog in catalogs: catalog_ids.append(catalog.get('identifier', '')) if isinstance(catalog_ids, string_types + (dict,)): catalog_ids = [catalog_ids] * len(catalogs) if not catalog_orgs or isinstance(catalog_orgs, string_types + (dict,)): catalog_orgs = [catalog_orgs] * len(catalogs) if not catalog_homepages or isinstance(catalog_homepages, string_types + (dict,)): catalog_homepages = [catalog_homepages] * len(catalogs) catalogs_reports = [self.catalog_report(catalog, harvest, report, catalog_id=catalog_id, catalog_homepage=catalog_homepage, catalog_org=catalog_org) for catalog, catalog_id, catalog_org, catalog_homepage in zip(catalogs, catalog_ids, catalog_orgs, catalog_homepages)] full_report = [] for report in catalogs_reports: full_report.extend(report) if export_path: alignment = Alignment(wrap_text=True, shrink_to_fit=True, vertical= 'center') column_styles = {'dataset_title': {'width': 35}, 'dataset_description': {'width': 35}, 'dataset_publisher_name': {'width': 35}, 'dataset_issued': {'width': 20}, 'dataset_modified': {'width': 20}, 'distributions_formats': { 'width': 15}, 'distributions_list': {'width': 90}, 'notas': { 'width': 50}} cell_styles = [{'alignment': Alignment(vertical='center')}, {'row': 1, 'font': Font(bold=True)}, {'col': 'dataset_title', 'alignment': alignment}, {'col': 'dataset_description', 'alignment': alignment}, {'col': 'dataset_publisher_name', 'alignment': alignment}, {'col': 'distributions_formats', 'alignment': alignment}, {'col': 'distributions_list', 'alignment': alignment}, {'col': 'notas', 'alignment': alignment}] writers.write_table(table=full_report, path=export_path, column_styles=column_styles, cell_styles=cell_styles) else: return full_report
generate_datasets_report
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, catalogs, harvest='valid', report=None, export_path=None,\n catalog_ids=None, catalog_homepages=None, catalog_orgs=None):\n assert isinstance(catalogs, string_types + (dict, list))\n if isinstance(catalogs, list):\n assert not catalog_ids or len(catalogs) == len(catalog_ids)\n assert not catalog_orgs or len(catalogs) == len(catalog_orgs)\n assert not catalog_homepages or len(catalogs) == len(catalog_homepages)\n if isinstance(catalogs, string_types + (dict,)):\n catalogs = [catalogs]\n catalogs = list(map(readers.read_catalog_obj, catalogs))\n if not catalog_ids:\n catalog_ids = []\n for catalog in catalogs:\n catalog_ids.append(catalog.get('identifier', ''))\n if isinstance(catalog_ids, string_types + (dict,)):\n catalog_ids = [catalog_ids] * len(catalogs)\n if not catalog_orgs or isinstance(catalog_orgs, string_types + (dict,)):\n catalog_orgs = [catalog_orgs] * len(catalogs)\n if not catalog_homepages or isinstance(catalog_homepages, string_types +\n (dict,)):\n catalog_homepages = [catalog_homepages] * len(catalogs)\n catalogs_reports = [self.catalog_report(catalog, harvest, report,\n catalog_id=catalog_id, catalog_homepage=catalog_homepage,\n catalog_org=catalog_org) for catalog, catalog_id, catalog_org,\n catalog_homepage in zip(catalogs, catalog_ids, catalog_orgs,\n catalog_homepages)]\n full_report = []\n for report in catalogs_reports:\n full_report.extend(report)\n if export_path:\n alignment = Alignment(wrap_text=True, shrink_to_fit=True, vertical=\n 'center')\n column_styles = {'dataset_title': {'width': 35},\n 'dataset_description': {'width': 35}, 'dataset_publisher_name':\n {'width': 35}, 'dataset_issued': {'width': 20},\n 'dataset_modified': {'width': 20}, 'distributions_formats': {\n 'width': 15}, 'distributions_list': {'width': 90}, 'notas': {\n 'width': 50}}\n cell_styles = [{'alignment': Alignment(vertical='center')}, {'row':\n 1, 'font': Font(bold=True)}, {'col': 'dataset_title',\n 'alignment': alignment}, {'col': 'dataset_description',\n 'alignment': alignment}, {'col': 'dataset_publisher_name',\n 'alignment': alignment}, {'col': 'distributions_formats',\n 'alignment': alignment}, {'col': 'distributions_list',\n 'alignment': alignment}, {'col': 'notas', 'alignment': alignment}]\n writers.write_table(table=full_report, path=export_path,\n column_styles=column_styles, cell_styles=cell_styles)\n else:\n return full_report\n", "role": "user" }, { "content": "generate_datasets_report", "role": "assistant" } ]
def x(self, catalogs=None, harvest='valid', report=None, export_path=None): if isinstance(catalogs, string_types + (dict,)): catalogs = [catalogs] if harvest == 'report': if not report: raise ValueError( """ Usted eligio 'report' como criterio de harvest, pero no proveyo un valor para el argumento 'report'. Por favor, intentelo nuevamente.""" ) datasets_report = readers.read_table(report) elif harvest in ['valid', 'none', 'all']: assert isinstance(catalogs, string_types + (dict, list)) datasets_report = self.generate_datasets_report(catalogs, harvest) else: raise ValueError( """ {} no es un criterio de harvest reconocido. Pruebe con 'all', 'none', 'valid' o 'report'.""" .format(harvest)) config_keys = ['catalog_federation_id', 'catalog_federation_org', 'dataset_identifier'] config_translator = {'catalog_federation_id': 'catalog_id', 'catalog_federation_org': 'dataset_organization'} translated_keys = [config_translator.get(k, k) for k in config_keys] harvester_config = [OrderedDict([(config_translator.get(k, k), v) for k, v in dataset.items() if k in config_keys]) for dataset in datasets_report if bool(int(dataset['harvest']))] required_keys = set(translated_keys) for row in harvester_config: row_keys = set(row.keys()) msg = 'Hay una fila con claves {} y debe tener claves {}'.format( row_keys, required_keys) assert row_keys == required_keys, msg if export_path: writers.write_table(harvester_config, export_path) else: return harvester_config
generate_harvester_config
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, catalogs=None, harvest='valid', report=None, export_path=None):\n if isinstance(catalogs, string_types + (dict,)):\n catalogs = [catalogs]\n if harvest == 'report':\n if not report:\n raise ValueError(\n \"\"\"\nUsted eligio 'report' como criterio de harvest, pero no proveyo un valor para\nel argumento 'report'. Por favor, intentelo nuevamente.\"\"\"\n )\n datasets_report = readers.read_table(report)\n elif harvest in ['valid', 'none', 'all']:\n assert isinstance(catalogs, string_types + (dict, list))\n datasets_report = self.generate_datasets_report(catalogs, harvest)\n else:\n raise ValueError(\n \"\"\"\n{} no es un criterio de harvest reconocido. Pruebe con 'all', 'none', 'valid' o\n'report'.\"\"\"\n .format(harvest))\n config_keys = ['catalog_federation_id', 'catalog_federation_org',\n 'dataset_identifier']\n config_translator = {'catalog_federation_id': 'catalog_id',\n 'catalog_federation_org': 'dataset_organization'}\n translated_keys = [config_translator.get(k, k) for k in config_keys]\n harvester_config = [OrderedDict([(config_translator.get(k, k), v) for k,\n v in dataset.items() if k in config_keys]) for dataset in\n datasets_report if bool(int(dataset['harvest']))]\n required_keys = set(translated_keys)\n for row in harvester_config:\n row_keys = set(row.keys())\n msg = 'Hay una fila con claves {} y debe tener claves {}'.format(\n row_keys, required_keys)\n assert row_keys == required_keys, msg\n if export_path:\n writers.write_table(harvester_config, export_path)\n else:\n return harvester_config\n", "role": "user" }, { "content": "generate_harvester_config", "role": "assistant" } ]
def x(self, catalogs, harvest='all', report=None, export_path=None): assert isinstance(catalogs, string_types + (dict, list)) if isinstance(catalogs, string_types + (dict,)): catalogs = [catalogs] harvestable_catalogs = [readers.read_catalog(c) for c in catalogs] catalogs_urls = [(catalog if isinstance(catalog, string_types) else None) for catalog in catalogs] if harvest == 'all': pass elif harvest == 'none': for catalog in harvestable_catalogs: catalog['dataset'] = [] elif harvest == 'valid': report = self.generate_datasets_report(catalogs, harvest) return self.generate_harvestable_catalogs(catalogs=catalogs, harvest='report', report=report, export_path=export_path) elif harvest == 'report': if not report: raise ValueError( """ Usted eligio 'report' como criterio de harvest, pero no proveyo un valor para el argumento 'report'. Por favor, intentelo nuevamente.""" ) datasets_to_harvest = self._extract_datasets_to_harvest(report) for idx_cat, catalog in enumerate(harvestable_catalogs): catalog_url = catalogs_urls[idx_cat] if 'dataset' in catalog and isinstance(catalog['dataset'], list): catalog['dataset'] = [dataset for dataset in catalog[ 'dataset'] if (catalog_url, dataset.get('title')) in datasets_to_harvest] else: catalog['dataset'] = [] else: raise ValueError( """ {} no es un criterio de harvest reconocido. Pruebe con 'all', 'none', 'valid' o 'report'.""" .format(harvest)) if export_path and os.path.isdir(export_path): for idx, catalog in enumerate(harvestable_catalogs): filename = os.path.join(export_path, 'catalog_{}'.format(idx)) writers.write_json(catalog, filename) elif export_path: writers.write_json(harvestable_catalogs, export_path) else: return harvestable_catalogs
generate_harvestable_catalogs
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, catalogs, harvest='all', report=None, export_path=None):\n assert isinstance(catalogs, string_types + (dict, list))\n if isinstance(catalogs, string_types + (dict,)):\n catalogs = [catalogs]\n harvestable_catalogs = [readers.read_catalog(c) for c in catalogs]\n catalogs_urls = [(catalog if isinstance(catalog, string_types) else\n None) for catalog in catalogs]\n if harvest == 'all':\n pass\n elif harvest == 'none':\n for catalog in harvestable_catalogs:\n catalog['dataset'] = []\n elif harvest == 'valid':\n report = self.generate_datasets_report(catalogs, harvest)\n return self.generate_harvestable_catalogs(catalogs=catalogs,\n harvest='report', report=report, export_path=export_path)\n elif harvest == 'report':\n if not report:\n raise ValueError(\n \"\"\"\nUsted eligio 'report' como criterio de harvest, pero no proveyo un valor para\nel argumento 'report'. Por favor, intentelo nuevamente.\"\"\"\n )\n datasets_to_harvest = self._extract_datasets_to_harvest(report)\n for idx_cat, catalog in enumerate(harvestable_catalogs):\n catalog_url = catalogs_urls[idx_cat]\n if 'dataset' in catalog and isinstance(catalog['dataset'], list):\n catalog['dataset'] = [dataset for dataset in catalog[\n 'dataset'] if (catalog_url, dataset.get('title')) in\n datasets_to_harvest]\n else:\n catalog['dataset'] = []\n else:\n raise ValueError(\n \"\"\"\n{} no es un criterio de harvest reconocido. Pruebe con 'all', 'none', 'valid' o\n'report'.\"\"\"\n .format(harvest))\n if export_path and os.path.isdir(export_path):\n for idx, catalog in enumerate(harvestable_catalogs):\n filename = os.path.join(export_path, 'catalog_{}'.format(idx))\n writers.write_json(catalog, filename)\n elif export_path:\n writers.write_json(harvestable_catalogs, export_path)\n else:\n return harvestable_catalogs\n", "role": "user" }, { "content": "generate_harvestable_catalogs", "role": "assistant" } ]
def x(self, catalog, export_path=None): catalog = readers.read_catalog(catalog) if 'dataset' in catalog and isinstance(catalog['dataset'], list): datasets = [(d if isinstance(d, dict) else {}) for d in catalog[ 'dataset']] else: datasets = [] validation = self.validate_catalog(catalog)['error']['dataset'] def x(index, dataset): info = OrderedDict() info['indice'] = index info['titulo'] = dataset.get('title') info['identificador'] = dataset.get('identifier') info['estado_metadatos'] = validation[index]['status'] info['cant_errores'] = len(validation[index]['errors']) info['cant_distribuciones'] = len(dataset['distribution']) return info summary = [info_dataset(i, ds) for i, ds in enumerate(datasets)] if export_path: writers.write_table(summary, export_path) else: return summary
generate_datasets_summary
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, catalog, export_path=None):\n catalog = readers.read_catalog(catalog)\n if 'dataset' in catalog and isinstance(catalog['dataset'], list):\n datasets = [(d if isinstance(d, dict) else {}) for d in catalog[\n 'dataset']]\n else:\n datasets = []\n validation = self.validate_catalog(catalog)['error']['dataset']\n\n def x(index, dataset):\n info = OrderedDict()\n info['indice'] = index\n info['titulo'] = dataset.get('title')\n info['identificador'] = dataset.get('identifier')\n info['estado_metadatos'] = validation[index]['status']\n info['cant_errores'] = len(validation[index]['errors'])\n info['cant_distribuciones'] = len(dataset['distribution'])\n return info\n summary = [info_dataset(i, ds) for i, ds in enumerate(datasets)]\n if export_path:\n writers.write_table(summary, export_path)\n else:\n return summary\n", "role": "user" }, { "content": "generate_datasets_summary", "role": "assistant" } ]
def x(cls, report): assert isinstance(report, string_types + (list,)) if isinstance(report, list) and all([(isinstance(x, tuple) and len(x) == 2) for x in report]): return report table = readers.read_table(report) table_keys = table[0].keys() expected_keys = ['catalog_metadata_url', 'dataset_title', 'dataset_accrualPeriodicity'] for key in expected_keys: if key not in table_keys: raise KeyError( """ El reporte no contiene la clave obligatoria {}. Pruebe con otro archivo. """ .format(key)) if 'harvest' in table_keys: datasets_to_harvest = [(row['catalog_metadata_url'], row[ 'dataset_title']) for row in table if int(row['harvest'])] else: datasets_to_harvest = [(row['catalog_metadata_url'], row[ 'dataset_title']) for row in table] return datasets_to_harvest
extract_datasets_to_harvest
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(cls, report):\n assert isinstance(report, string_types + (list,))\n if isinstance(report, list) and all([(isinstance(x, tuple) and len(x) ==\n 2) for x in report]):\n return report\n table = readers.read_table(report)\n table_keys = table[0].keys()\n expected_keys = ['catalog_metadata_url', 'dataset_title',\n 'dataset_accrualPeriodicity']\n for key in expected_keys:\n if key not in table_keys:\n raise KeyError(\n \"\"\"\nEl reporte no contiene la clave obligatoria {}. Pruebe con otro archivo.\n\"\"\"\n .format(key))\n if 'harvest' in table_keys:\n datasets_to_harvest = [(row['catalog_metadata_url'], row[\n 'dataset_title']) for row in table if int(row['harvest'])]\n else:\n datasets_to_harvest = [(row['catalog_metadata_url'], row[\n 'dataset_title']) for row in table]\n return datasets_to_harvest\n", "role": "user" }, { "content": "extract_datasets_to_harvest", "role": "assistant" } ]
def x(self, dataset_identifier, export_path=None, catalog=None): catalog = DataJson(catalog) or self dataset = catalog.get_dataset(dataset_identifier) text = documentation.dataset_to_markdown(dataset) if export_path: with open(export_path, 'wb') as f: f.write(text.encode('utf-8')) else: return text
generate_dataset_documentation
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, dataset_identifier, export_path=None, catalog=None):\n catalog = DataJson(catalog) or self\n dataset = catalog.get_dataset(dataset_identifier)\n text = documentation.dataset_to_markdown(dataset)\n if export_path:\n with open(export_path, 'wb') as f:\n f.write(text.encode('utf-8'))\n else:\n return text\n", "role": "user" }, { "content": "generate_dataset_documentation", "role": "assistant" } ]
def x(path: str) ->Callable: module_path, _, class_name = path.rpartition('.') return getattr(import_module(module_path), class_name)
import_by_path
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(path: str) ->Callable:\n module_path, _, class_name = path.rpartition('.')\n return getattr(import_module(module_path), class_name)\n", "role": "user" }, { "content": "import_by_path", "role": "assistant" } ]
def x(catalog, default_values=None, catalog_format=None): unknown_catalog_repr_msg = ( """No se pudo inferir una representación válida de un catálogo del parámetro provisto: {}.""" .format(catalog)) assert isinstance(catalog, string_types + (dict,) ), unknown_catalog_repr_msg if isinstance(catalog, dict): catalog_dict = catalog else: suffix = catalog.split('.')[-1].strip('/') if suffix in ('json', 'xlsx'): catalog_format = catalog_format or suffix if catalog_format == 'xlsx': try: catalog_dict = read_xlsx_catalog(catalog) except (openpyxl_exceptions + (ValueError, AssertionError, IOError, BadZipfile)) as e: raise ce.NonParseableCatalog(catalog, str(e)) elif catalog_format == 'json': try: catalog_dict = read_json(catalog) except (ValueError, TypeError, IOError) as e: raise ce.NonParseableCatalog(catalog, str(e)) elif catalog_format == 'ckan': catalog_dict = read_ckan_catalog(catalog) else: catalog_dict = read_suffixless_catalog(catalog) if default_values: _apply_default_values(catalog_dict, default_values) return catalog_dict
read_catalog
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(catalog, default_values=None, catalog_format=None):\n unknown_catalog_repr_msg = (\n \"\"\"No se pudo inferir una representación válida de un catálogo del\n parámetro provisto: {}.\"\"\"\n .format(catalog))\n assert isinstance(catalog, string_types + (dict,)\n ), unknown_catalog_repr_msg\n if isinstance(catalog, dict):\n catalog_dict = catalog\n else:\n suffix = catalog.split('.')[-1].strip('/')\n if suffix in ('json', 'xlsx'):\n catalog_format = catalog_format or suffix\n if catalog_format == 'xlsx':\n try:\n catalog_dict = read_xlsx_catalog(catalog)\n except (openpyxl_exceptions + (ValueError, AssertionError,\n IOError, BadZipfile)) as e:\n raise ce.NonParseableCatalog(catalog, str(e))\n elif catalog_format == 'json':\n try:\n catalog_dict = read_json(catalog)\n except (ValueError, TypeError, IOError) as e:\n raise ce.NonParseableCatalog(catalog, str(e))\n elif catalog_format == 'ckan':\n catalog_dict = read_ckan_catalog(catalog)\n else:\n catalog_dict = read_suffixless_catalog(catalog)\n if default_values:\n _apply_default_values(catalog_dict, default_values)\n return catalog_dict\n", "role": "user" }, { "content": "read_catalog", "role": "assistant" } ]
def x(catalog, default_values): for field, default_value in iteritems(default_values): class_metadata = field.split('_')[0] field_json_path = field.split('_')[1:] if class_metadata == 'catalog': _set_default_value(catalog, field_json_path, default_value) elif class_metadata == 'dataset': for dataset in catalog['dataset']: _set_default_value(dataset, field_json_path, default_value) elif class_metadata == 'distribution': for dataset in catalog['dataset']: for distribution in dataset['distribution']: _set_default_value(distribution, field_json_path, default_value) elif class_metadata == 'field': for dataset in catalog['dataset']: for distribution in dataset['distribution']: if distribution.get('field'): for field in distribution['field']: _set_default_value(field, field_json_path, default_value)
apply_default_values
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(catalog, default_values):\n for field, default_value in iteritems(default_values):\n class_metadata = field.split('_')[0]\n field_json_path = field.split('_')[1:]\n if class_metadata == 'catalog':\n _set_default_value(catalog, field_json_path, default_value)\n elif class_metadata == 'dataset':\n for dataset in catalog['dataset']:\n _set_default_value(dataset, field_json_path, default_value)\n elif class_metadata == 'distribution':\n for dataset in catalog['dataset']:\n for distribution in dataset['distribution']:\n _set_default_value(distribution, field_json_path,\n default_value)\n elif class_metadata == 'field':\n for dataset in catalog['dataset']:\n for distribution in dataset['distribution']:\n if distribution.get('field'):\n for field in distribution['field']:\n _set_default_value(field, field_json_path,\n default_value)\n", "role": "user" }, { "content": "apply_default_values", "role": "assistant" } ]
def x(dict_obj, keys, value): variable = dict_obj if len(keys) == 1: if not variable.get(keys[0]): variable[keys[0]] = value else: for idx, field in enumerate(keys): if idx < len(keys) - 1: variable = variable[field] if not variable.get(keys[-1]): variable[keys[-1]] = value
set_default_value
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(dict_obj, keys, value):\n variable = dict_obj\n if len(keys) == 1:\n if not variable.get(keys[0]):\n variable[keys[0]] = value\n else:\n for idx, field in enumerate(keys):\n if idx < len(keys) - 1:\n variable = variable[field]\n if not variable.get(keys[-1]):\n variable[keys[-1]] = value\n", "role": "user" }, { "content": "set_default_value", "role": "assistant" } ]
def x(json_path_or_url): assert isinstance(json_path_or_url, string_types) parsed_url = urlparse(json_path_or_url) if parsed_url.scheme in ['http', 'https']: res = requests.get(json_path_or_url, verify=False) json_dict = json.loads(res.content, encoding='utf-8') else: path_start = parsed_url.path.split('.')[0] if path_start == 'www' or path_start.isdigit(): warnings.warn( """ La dirección del archivo JSON ingresada parece una URL, pero no comienza con 'http' o 'https' así que será tratada como una dirección local. ¿Tal vez quiso decir 'http://{}'?""" .format(json_path_or_url).encode('utf-8')) with io.open(json_path_or_url, encoding='utf-8') as json_file: json_dict = json.load(json_file) return json_dict
read_json
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(json_path_or_url):\n assert isinstance(json_path_or_url, string_types)\n parsed_url = urlparse(json_path_or_url)\n if parsed_url.scheme in ['http', 'https']:\n res = requests.get(json_path_or_url, verify=False)\n json_dict = json.loads(res.content, encoding='utf-8')\n else:\n path_start = parsed_url.path.split('.')[0]\n if path_start == 'www' or path_start.isdigit():\n warnings.warn(\n \"\"\"\nLa dirección del archivo JSON ingresada parece una URL, pero no comienza\ncon 'http' o 'https' así que será tratada como una dirección local. ¿Tal vez\nquiso decir 'http://{}'?\"\"\"\n .format(json_path_or_url).encode('utf-8'))\n with io.open(json_path_or_url, encoding='utf-8') as json_file:\n json_dict = json.load(json_file)\n return json_dict\n", "role": "user" }, { "content": "read_json", "role": "assistant" } ]
def x(xlsx_path_or_url, logger=None): logger = logger or pydj_logger assert isinstance(xlsx_path_or_url, string_types) parsed_url = urlparse(xlsx_path_or_url) if parsed_url.scheme in ['http', 'https']: res = requests.get(xlsx_path_or_url, verify=False) tmpfilename = '.tmpfile.xlsx' with io.open(tmpfilename, 'wb') as tmpfile: tmpfile.write(res.content) catalog_dict = read_local_xlsx_catalog(tmpfilename, logger) os.remove(tmpfilename) else: path_start = parsed_url.path.split('.')[0] if path_start == 'www' or path_start.isdigit(): warnings.warn( """ La dirección del archivo JSON ingresada parece una URL, pero no comienza con 'http' o 'https' así que será tratada como una dirección local. ¿Tal vez quiso decir 'http://{}'? """ .format(xlsx_path_or_url).encode('utf8')) catalog_dict = read_local_xlsx_catalog(xlsx_path_or_url) return catalog_dict
read_xlsx_catalog
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(xlsx_path_or_url, logger=None):\n logger = logger or pydj_logger\n assert isinstance(xlsx_path_or_url, string_types)\n parsed_url = urlparse(xlsx_path_or_url)\n if parsed_url.scheme in ['http', 'https']:\n res = requests.get(xlsx_path_or_url, verify=False)\n tmpfilename = '.tmpfile.xlsx'\n with io.open(tmpfilename, 'wb') as tmpfile:\n tmpfile.write(res.content)\n catalog_dict = read_local_xlsx_catalog(tmpfilename, logger)\n os.remove(tmpfilename)\n else:\n path_start = parsed_url.path.split('.')[0]\n if path_start == 'www' or path_start.isdigit():\n warnings.warn(\n \"\"\"\nLa dirección del archivo JSON ingresada parece una URL, pero no comienza\ncon 'http' o 'https' así que será tratada como una dirección local. ¿Tal vez\nquiso decir 'http://{}'?\n \"\"\"\n .format(xlsx_path_or_url).encode('utf8'))\n catalog_dict = read_local_xlsx_catalog(xlsx_path_or_url)\n return catalog_dict\n", "role": "user" }, { "content": "read_xlsx_catalog", "role": "assistant" } ]
def x(catalog_or_dataset): level = catalog_or_dataset keys = [k for k in ['publisher_name', 'publisher_mbox'] if k in level] if keys: level['publisher'] = {key.replace('publisher_', ''): level.pop(key) for key in keys} return level
make_publisher
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(catalog_or_dataset):\n level = catalog_or_dataset\n keys = [k for k in ['publisher_name', 'publisher_mbox'] if k in level]\n if keys:\n level['publisher'] = {key.replace('publisher_', ''): level.pop(key) for\n key in keys}\n return level\n", "role": "user" }, { "content": "make_publisher", "role": "assistant" } ]
def x(dataset): keys = [k for k in ['contactPoint_fn', 'contactPoint_hasEmail'] if k in dataset] if keys: dataset['contactPoint'] = {key.replace('contactPoint_', ''): dataset.pop(key) for key in keys} return dataset
make_contact_point
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(dataset):\n keys = [k for k in ['contactPoint_fn', 'contactPoint_hasEmail'] if k in\n dataset]\n if keys:\n dataset['contactPoint'] = {key.replace('contactPoint_', ''):\n dataset.pop(key) for key in keys}\n return dataset\n", "role": "user" }, { "content": "make_contact_point", "role": "assistant" } ]
def x(catalog, dataset_identifier, dataset_title, logger=None): logger = logger or pydj_logger matching_datasets = [] for idx, dataset in enumerate(catalog['catalog_dataset']): if dataset['dataset_identifier'] == dataset_identifier: if dataset['dataset_title'] == dataset_title: matching_datasets.append(idx) else: logger.warning(ce.DatasetUnexpectedTitle(dataset_identifier, dataset['dataset_title'], dataset_title)) no_dsets_msg = 'No hay ningun dataset con el identifier {}'.format( dataset_identifier) many_dsets_msg = 'Hay mas de un dataset con el identifier {}: {}'.format( dataset_identifier, matching_datasets) if len(matching_datasets) == 0: logger.error(no_dsets_msg) return None elif len(matching_datasets) > 1: logger.error(many_dsets_msg) return None else: return matching_datasets[0]
get_dataset_index
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(catalog, dataset_identifier, dataset_title, logger=None):\n logger = logger or pydj_logger\n matching_datasets = []\n for idx, dataset in enumerate(catalog['catalog_dataset']):\n if dataset['dataset_identifier'] == dataset_identifier:\n if dataset['dataset_title'] == dataset_title:\n matching_datasets.append(idx)\n else:\n logger.warning(ce.DatasetUnexpectedTitle(dataset_identifier,\n dataset['dataset_title'], dataset_title))\n no_dsets_msg = 'No hay ningun dataset con el identifier {}'.format(\n dataset_identifier)\n many_dsets_msg = 'Hay mas de un dataset con el identifier {}: {}'.format(\n dataset_identifier, matching_datasets)\n if len(matching_datasets) == 0:\n logger.error(no_dsets_msg)\n return None\n elif len(matching_datasets) > 1:\n logger.error(many_dsets_msg)\n return None\n else:\n return matching_datasets[0]\n", "role": "user" }, { "content": "get_dataset_index", "role": "assistant" } ]
def x(catalog, dataset_identifier, dataset_title, distribution_identifier, distribution_title, logger=None): logger = logger or pydj_logger dataset_index = _get_dataset_index(catalog, dataset_identifier, dataset_title) if dataset_index is None: return None, None else: dataset = catalog['catalog_dataset'][dataset_index] matching_distributions = [] for idx, distribution in enumerate(dataset['dataset_distribution']): if distribution['distribution_identifier'] == distribution_identifier: if distribution['distribution_title'] == distribution_title: matching_distributions.append(idx) else: logger.warning(ce.DistributionUnexpectedTitle( distribution_identifier, distribution[ 'distribution_title'], distribution_title)) if len(matching_distributions) == 0: logger.warning(ce.DistributionTitleNonExistentError( distribution_title, dataset_identifier)) return dataset_index, None elif len(matching_distributions) > 1: logger.warning(ce.DistributionTitleRepetitionError( distribution_title, dataset_identifier, matching_distributions)) return dataset_index, None else: distribution_index = matching_distributions[0] return dataset_index, distribution_index
get_distribution_indexes
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(catalog, dataset_identifier, dataset_title, distribution_identifier,\n distribution_title, logger=None):\n logger = logger or pydj_logger\n dataset_index = _get_dataset_index(catalog, dataset_identifier,\n dataset_title)\n if dataset_index is None:\n return None, None\n else:\n dataset = catalog['catalog_dataset'][dataset_index]\n matching_distributions = []\n for idx, distribution in enumerate(dataset['dataset_distribution']):\n if distribution['distribution_identifier'] == distribution_identifier:\n if distribution['distribution_title'] == distribution_title:\n matching_distributions.append(idx)\n else:\n logger.warning(ce.DistributionUnexpectedTitle(\n distribution_identifier, distribution[\n 'distribution_title'], distribution_title))\n if len(matching_distributions) == 0:\n logger.warning(ce.DistributionTitleNonExistentError(\n distribution_title, dataset_identifier))\n return dataset_index, None\n elif len(matching_distributions) > 1:\n logger.warning(ce.DistributionTitleRepetitionError(\n distribution_title, dataset_identifier, matching_distributions))\n return dataset_index, None\n else:\n distribution_index = matching_distributions[0]\n return dataset_index, distribution_index\n", "role": "user" }, { "content": "get_distribution_indexes", "role": "assistant" } ]
def x(xlsx_path, logger=None): logger = logger or pydj_logger wb = pyxl.load_workbook(open(xlsx_path, 'rb'), data_only=True, read_only=True) ws_catalog = helpers.get_ws_case_insensitive(wb, 'catalog') ws_dataset = helpers.get_ws_case_insensitive(wb, 'dataset') ws_distribution = helpers.get_ws_case_insensitive(wb, 'distribution') ws_theme = helpers.get_ws_case_insensitive(wb, 'theme') ws_field = helpers.get_ws_case_insensitive(wb, 'field') catalogs = helpers.sheet_to_table(ws_catalog) assert len(catalogs) != 0, "No hay ningun catálogo en la hoja 'Catalog'" assert len(catalogs) < 2, "Hay mas de un catálogo en la hoja 'Catalog'" catalog = catalogs[0] catalog['catalog_dataset'] = helpers.sheet_to_table(ws_dataset) for dataset in catalog['catalog_dataset']: dataset['dataset_identifier'] = text_type(dataset['dataset_identifier'] ) catalog['catalog_themeTaxonomy'] = helpers.sheet_to_table(ws_theme) for dataset in catalog['catalog_dataset']: dataset['dataset_distribution'] = [] distributions = helpers.sheet_to_table(ws_distribution) for distribution in distributions: distribution['dataset_identifier'] = text_type(distribution[ 'dataset_identifier']) distribution['distribution_identifier'] = text_type(distribution[ 'distribution_identifier']) dataset_index = _get_dataset_index(catalog, distribution[ 'dataset_identifier'], distribution['dataset_title'], logger) if dataset_index is None: logger.error( """La distribucion con ID '{}' y titulo '{}' no se pudo asignar a un dataset, y no figurara en el data.json de salida.""" .format(distribution['distribution_identifier'], distribution['distribution_title'])) else: dataset = catalog['catalog_dataset'][dataset_index] dataset['dataset_distribution'].append(distribution) fields = helpers.sheet_to_table(ws_field) for idx, field in enumerate(fields): field['dataset_identifier'] = text_type(field['dataset_identifier']) field['distribution_identifier'] = text_type(field[ 'distribution_identifier']) dataset_index, distribution_index = _get_distribution_indexes(catalog, field['dataset_identifier'], field['dataset_title'], field[ 'distribution_identifier'], field['distribution_title'], logger) if dataset_index is None: logger.error( """No se encontro el dataset '{}' especificado para el campo '{}' (fila #{} de la hoja "Field"). Este campo no figurara en el data.json de salida.""" .format(unidecode(field['dataset_title']), unidecode(field[ 'field_title']), idx + 2)) elif distribution_index is None: logger.error( """No se encontro la distribucion '{}' especificada para el campo'{}' (fila #{} de la hoja "Field"). Este campo no figurara en el data.json de salida.""" .format(unidecode(field['distribution_title']), unidecode( field['field_title']), idx + 2)) else: dataset = catalog['catalog_dataset'][dataset_index] distribution = dataset['dataset_distribution'][distribution_index] if 'distribution_field' in distribution: distribution['distribution_field'].append(field) else: distribution['distribution_field'] = [field] if 'catalog_language' in catalog: catalog['catalog_language'] = helpers.string_to_list(catalog[ 'catalog_language'], filter_empty=True) for dataset in catalog['catalog_dataset']: array_fields = ['dataset_superTheme', 'dataset_theme', 'dataset_tags', 'dataset_keyword', 'dataset_language'] for field in array_fields: if field in dataset: dataset[field] = helpers.string_to_list(dataset[field], filter_empty=True) _remove_prefixes_and_unprefixed_fields(catalog, 'catalog_') for theme in catalog['themeTaxonomy']: _remove_prefixes_and_unprefixed_fields(theme, 'theme_') for dataset in catalog['dataset']: _remove_prefixes_and_unprefixed_fields(dataset, 'dataset_') for dataset in catalog['dataset']: for distribution in dataset['distribution']: _remove_prefixes_and_unprefixed_fields(distribution, 'distribution_') for dataset in catalog['dataset']: for distribution in dataset['distribution']: if 'field' in distribution: for field in distribution['field']: _remove_prefixes_and_unprefixed_fields(field, 'field_') catalog = _make_publisher(catalog) for dataset in catalog['dataset']: dataset = _make_publisher(dataset) dataset = _make_contact_point(dataset) return catalog
read_local_xlsx_catalog
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(xlsx_path, logger=None):\n logger = logger or pydj_logger\n wb = pyxl.load_workbook(open(xlsx_path, 'rb'), data_only=True,\n read_only=True)\n ws_catalog = helpers.get_ws_case_insensitive(wb, 'catalog')\n ws_dataset = helpers.get_ws_case_insensitive(wb, 'dataset')\n ws_distribution = helpers.get_ws_case_insensitive(wb, 'distribution')\n ws_theme = helpers.get_ws_case_insensitive(wb, 'theme')\n ws_field = helpers.get_ws_case_insensitive(wb, 'field')\n catalogs = helpers.sheet_to_table(ws_catalog)\n assert len(catalogs) != 0, \"No hay ningun catálogo en la hoja 'Catalog'\"\n assert len(catalogs) < 2, \"Hay mas de un catálogo en la hoja 'Catalog'\"\n catalog = catalogs[0]\n catalog['catalog_dataset'] = helpers.sheet_to_table(ws_dataset)\n for dataset in catalog['catalog_dataset']:\n dataset['dataset_identifier'] = text_type(dataset['dataset_identifier']\n )\n catalog['catalog_themeTaxonomy'] = helpers.sheet_to_table(ws_theme)\n for dataset in catalog['catalog_dataset']:\n dataset['dataset_distribution'] = []\n distributions = helpers.sheet_to_table(ws_distribution)\n for distribution in distributions:\n distribution['dataset_identifier'] = text_type(distribution[\n 'dataset_identifier'])\n distribution['distribution_identifier'] = text_type(distribution[\n 'distribution_identifier'])\n dataset_index = _get_dataset_index(catalog, distribution[\n 'dataset_identifier'], distribution['dataset_title'], logger)\n if dataset_index is None:\n logger.error(\n \"\"\"La distribucion con ID '{}' y titulo '{}' no se\npudo asignar a un dataset, y no figurara en el data.json de salida.\"\"\"\n .format(distribution['distribution_identifier'],\n distribution['distribution_title']))\n else:\n dataset = catalog['catalog_dataset'][dataset_index]\n dataset['dataset_distribution'].append(distribution)\n fields = helpers.sheet_to_table(ws_field)\n for idx, field in enumerate(fields):\n field['dataset_identifier'] = text_type(field['dataset_identifier'])\n field['distribution_identifier'] = text_type(field[\n 'distribution_identifier'])\n dataset_index, distribution_index = _get_distribution_indexes(catalog,\n field['dataset_identifier'], field['dataset_title'], field[\n 'distribution_identifier'], field['distribution_title'], logger)\n if dataset_index is None:\n logger.error(\n \"\"\"No se encontro el dataset '{}' especificado para el campo\n '{}' (fila #{} de la hoja \"Field\"). Este campo no figurara en\n el data.json de salida.\"\"\"\n .format(unidecode(field['dataset_title']), unidecode(field[\n 'field_title']), idx + 2))\n elif distribution_index is None:\n logger.error(\n \"\"\"No se encontro la distribucion '{}' especificada para el\n campo'{}' (fila #{} de la hoja \"Field\"). Este campo no figurara\n en el data.json de salida.\"\"\"\n .format(unidecode(field['distribution_title']), unidecode(\n field['field_title']), idx + 2))\n else:\n dataset = catalog['catalog_dataset'][dataset_index]\n distribution = dataset['dataset_distribution'][distribution_index]\n if 'distribution_field' in distribution:\n distribution['distribution_field'].append(field)\n else:\n distribution['distribution_field'] = [field]\n if 'catalog_language' in catalog:\n catalog['catalog_language'] = helpers.string_to_list(catalog[\n 'catalog_language'], filter_empty=True)\n for dataset in catalog['catalog_dataset']:\n array_fields = ['dataset_superTheme', 'dataset_theme',\n 'dataset_tags', 'dataset_keyword', 'dataset_language']\n for field in array_fields:\n if field in dataset:\n dataset[field] = helpers.string_to_list(dataset[field],\n filter_empty=True)\n _remove_prefixes_and_unprefixed_fields(catalog, 'catalog_')\n for theme in catalog['themeTaxonomy']:\n _remove_prefixes_and_unprefixed_fields(theme, 'theme_')\n for dataset in catalog['dataset']:\n _remove_prefixes_and_unprefixed_fields(dataset, 'dataset_')\n for dataset in catalog['dataset']:\n for distribution in dataset['distribution']:\n _remove_prefixes_and_unprefixed_fields(distribution,\n 'distribution_')\n for dataset in catalog['dataset']:\n for distribution in dataset['distribution']:\n if 'field' in distribution:\n for field in distribution['field']:\n _remove_prefixes_and_unprefixed_fields(field, 'field_')\n catalog = _make_publisher(catalog)\n for dataset in catalog['dataset']:\n dataset = _make_publisher(dataset)\n dataset = _make_contact_point(dataset)\n return catalog\n", "role": "user" }, { "content": "read_local_xlsx_catalog", "role": "assistant" } ]