function
stringlengths
11
56k
repo_name
stringlengths
5
60
features
sequence
def test_incompatible_higher_rank_inputs_raises(self, use_edges, use_receiver_nodes, use_sender_nodes, use_globals, field): """A exception should be raised if the inputs have incompatible shapes.""" input_graph = self._get_shaped_input_graph() input_graph = input_graph.replace( **{field: tf.transpose(getattr(input_graph, field), [0, 2, 1, 3])}) network = blocks.EdgeBlock( functools.partial(snt.Conv2D, output_channels=10, kernel_shape=[3, 3]), use_edges=use_edges, use_receiver_nodes=use_receiver_nodes, use_sender_nodes=use_sender_nodes, use_globals=use_globals ) with self.assertRaisesRegexp(ValueError, "in both shapes must be equal"): network(input_graph)
deepmind/graph_nets
[ 5225, 778, 5225, 5, 1535703568 ]
def test_incompatible_higher_rank_inputs_no_raise(self, use_edges, use_receiver_nodes, use_sender_nodes, use_globals, field): """No exception should occur if a differently shapped field is not used.""" input_graph = self._get_shaped_input_graph() input_graph = input_graph.replace( **{field: tf.transpose(getattr(input_graph, field), [0, 2, 1, 3])}) network = blocks.EdgeBlock( functools.partial(snt.Conv2D, output_channels=10, kernel_shape=[3, 3]), use_edges=use_edges, use_receiver_nodes=use_receiver_nodes, use_sender_nodes=use_sender_nodes, use_globals=use_globals ) self._assert_build_and_run(network, input_graph)
deepmind/graph_nets
[ 5225, 778, 5225, 5, 1535703568 ]
def test_unused_field_can_be_none( self, use_edges, use_nodes, use_globals, none_field): """Checks that computation can handle non-necessary fields left None.""" input_graph = self._get_input_graph([none_field]) edge_block = blocks.EdgeBlock( edge_model_fn=self._edge_model_fn, use_edges=use_edges, use_receiver_nodes=use_nodes, use_sender_nodes=use_nodes, use_globals=use_globals) output_graph = edge_block(input_graph) model_inputs = [] if use_edges: model_inputs.append(input_graph.edges) if use_nodes: model_inputs.append(blocks.broadcast_receiver_nodes_to_edges(input_graph)) model_inputs.append(blocks.broadcast_sender_nodes_to_edges(input_graph)) if use_globals: model_inputs.append(blocks.broadcast_globals_to_edges(input_graph)) model_inputs = tf.concat(model_inputs, axis=-1) self.assertEqual(input_graph.nodes, output_graph.nodes) self.assertEqual(input_graph.globals, output_graph.globals) with tf.Session() as sess: actual_edges, model_inputs_out = sess.run( (output_graph.edges, model_inputs)) expected_output_edges = model_inputs_out * self._scale self.assertNDArrayNear(expected_output_edges, actual_edges, err=1e-4)
deepmind/graph_nets
[ 5225, 778, 5225, 5, 1535703568 ]
def setUp(self): super(NodeBlockTest, self).setUp() self._scale = 10. self._node_model_fn = lambda: lambda features: features * self._scale
deepmind/graph_nets
[ 5225, 778, 5225, 5, 1535703568 ]
def test_output_values( self, use_received_edges, use_sent_edges, use_nodes, use_globals, received_edges_reducer, sent_edges_reducer): """Compares the output of a NodeBlock to an explicit computation.""" input_graph = self._get_input_graph() node_block = blocks.NodeBlock( node_model_fn=self._node_model_fn, use_received_edges=use_received_edges, use_sent_edges=use_sent_edges, use_nodes=use_nodes, use_globals=use_globals, received_edges_reducer=received_edges_reducer, sent_edges_reducer=sent_edges_reducer) output_graph = node_block(input_graph) model_inputs = [] if use_received_edges: model_inputs.append( blocks.ReceivedEdgesToNodesAggregator( received_edges_reducer)(input_graph)) if use_sent_edges: model_inputs.append( blocks.SentEdgesToNodesAggregator(sent_edges_reducer)(input_graph)) if use_nodes: model_inputs.append(input_graph.nodes) if use_globals: model_inputs.append(blocks.broadcast_globals_to_nodes(input_graph)) model_inputs = tf.concat(model_inputs, axis=-1) self.assertEqual(input_graph.edges, output_graph.edges) self.assertEqual(input_graph.globals, output_graph.globals) with tf.Session() as sess: output_graph_out, model_inputs_out = sess.run( (output_graph, model_inputs)) expected_output_nodes = model_inputs_out * self._scale self.assertNDArrayNear( expected_output_nodes, output_graph_out.nodes, err=1e-4)
deepmind/graph_nets
[ 5225, 778, 5225, 5, 1535703568 ]
def test_created_variables(self, use_received_edges, use_sent_edges, use_nodes, use_globals, expected_first_dim_w): """Verifies the variable names and shapes created by a NodeBlock.""" output_size = 10 expected_var_shapes_dict = { "node_block/mlp/linear_0/b:0": [output_size], "node_block/mlp/linear_0/w:0": [expected_first_dim_w, output_size]} input_graph = self._get_input_graph() node_block = blocks.NodeBlock( node_model_fn=functools.partial(snt.nets.MLP, output_sizes=[output_size]), use_received_edges=use_received_edges, use_sent_edges=use_sent_edges, use_nodes=use_nodes, use_globals=use_globals) node_block(input_graph) variables = node_block.get_variables() var_shapes_dict = {var.name: var.get_shape().as_list() for var in variables} self.assertDictEqual(expected_var_shapes_dict, var_shapes_dict)
deepmind/graph_nets
[ 5225, 778, 5225, 5, 1535703568 ]
def test_missing_field_raises_exception( self, use_received_edges, use_sent_edges, use_nodes, use_globals, none_fields): """Checks that missing a required field raises an exception.""" input_graph = self._get_input_graph(none_fields) node_block = blocks.NodeBlock( node_model_fn=self._node_model_fn, use_received_edges=use_received_edges, use_sent_edges=use_sent_edges, use_nodes=use_nodes, use_globals=use_globals) with self.assertRaisesRegexp(ValueError, "field cannot be None"): node_block(input_graph)
deepmind/graph_nets
[ 5225, 778, 5225, 5, 1535703568 ]
def test_missing_aggregation_raises_exception( self, use_received_edges, use_sent_edges, received_edges_reducer, sent_edges_reducer): """Checks that missing a required aggregation argument raises an error.""" with self.assertRaisesRegexp(ValueError, "should not be None"): blocks.NodeBlock( node_model_fn=self._node_model_fn, use_received_edges=use_received_edges, use_sent_edges=use_sent_edges, use_nodes=False, use_globals=False, received_edges_reducer=received_edges_reducer, sent_edges_reducer=sent_edges_reducer)
deepmind/graph_nets
[ 5225, 778, 5225, 5, 1535703568 ]
def test_incompatible_higher_rank_inputs_raises(self, use_received_edges, use_sent_edges, use_nodes, use_globals, field): """A exception should be raised if the inputs have incompatible shapes.""" input_graph = self._get_shaped_input_graph() input_graph = input_graph.replace( **{field: tf.transpose(getattr(input_graph, field), [0, 2, 1, 3])}) network = blocks.NodeBlock( functools.partial(snt.Conv2D, output_channels=10, kernel_shape=[3, 3]), use_received_edges=use_received_edges, use_sent_edges=use_sent_edges, use_nodes=use_nodes, use_globals=use_globals ) with self.assertRaisesRegexp(ValueError, "in both shapes must be equal"): network(input_graph)
deepmind/graph_nets
[ 5225, 778, 5225, 5, 1535703568 ]
def test_incompatible_higher_rank_inputs_no_raise(self, use_received_edges, use_sent_edges, use_nodes, use_globals, field): """No exception should occur if a differently shapped field is not used.""" input_graph = self._get_shaped_input_graph() input_graph = input_graph.replace( **{field: tf.transpose(getattr(input_graph, field), [0, 2, 1, 3])}) network = blocks.NodeBlock( functools.partial(snt.Conv2D, output_channels=10, kernel_shape=[3, 3]), use_received_edges=use_received_edges, use_sent_edges=use_sent_edges, use_nodes=use_nodes, use_globals=use_globals ) self._assert_build_and_run(network, input_graph)
deepmind/graph_nets
[ 5225, 778, 5225, 5, 1535703568 ]
def test_unused_field_can_be_none( self, use_edges, use_nodes, use_globals, none_field): """Checks that computation can handle non-necessary fields left None.""" input_graph = self._get_input_graph([none_field]) node_block = blocks.NodeBlock( node_model_fn=self._node_model_fn, use_received_edges=use_edges, use_sent_edges=use_edges, use_nodes=use_nodes, use_globals=use_globals) output_graph = node_block(input_graph) model_inputs = [] if use_edges: model_inputs.append( blocks.ReceivedEdgesToNodesAggregator( tf.unsorted_segment_sum)(input_graph)) model_inputs.append( blocks.SentEdgesToNodesAggregator( tf.unsorted_segment_sum)(input_graph)) if use_nodes: model_inputs.append(input_graph.nodes) if use_globals: model_inputs.append(blocks.broadcast_globals_to_nodes(input_graph)) model_inputs = tf.concat(model_inputs, axis=-1) self.assertEqual(input_graph.edges, output_graph.edges) self.assertEqual(input_graph.globals, output_graph.globals) with tf.Session() as sess: actual_nodes, model_inputs_out = sess.run( (output_graph.nodes, model_inputs)) expected_output_nodes = model_inputs_out * self._scale self.assertNDArrayNear(expected_output_nodes, actual_nodes, err=1e-4)
deepmind/graph_nets
[ 5225, 778, 5225, 5, 1535703568 ]
def setUp(self): super(GlobalBlockTest, self).setUp() self._scale = 10. self._global_model_fn = lambda: lambda features: features * self._scale
deepmind/graph_nets
[ 5225, 778, 5225, 5, 1535703568 ]
def test_output_values( self, use_edges, use_nodes, use_globals, edges_reducer, nodes_reducer): """Compares the output of a GlobalBlock to an explicit computation.""" input_graph = self._get_input_graph() global_block = blocks.GlobalBlock( global_model_fn=self._global_model_fn, use_edges=use_edges, use_nodes=use_nodes, use_globals=use_globals, edges_reducer=edges_reducer, nodes_reducer=nodes_reducer) output_graph = global_block(input_graph) model_inputs = [] if use_edges: model_inputs.append( blocks.EdgesToGlobalsAggregator(edges_reducer)(input_graph)) if use_nodes: model_inputs.append( blocks.NodesToGlobalsAggregator(nodes_reducer)(input_graph)) if use_globals: model_inputs.append(input_graph.globals) model_inputs = tf.concat(model_inputs, axis=-1) self.assertEqual(input_graph.edges, output_graph.edges) self.assertEqual(input_graph.nodes, output_graph.nodes) with tf.Session() as sess: output_graph_out, model_inputs_out = sess.run( (output_graph, model_inputs)) expected_output_globals = model_inputs_out * self._scale self.assertNDArrayNear( expected_output_globals, output_graph_out.globals, err=1e-4)
deepmind/graph_nets
[ 5225, 778, 5225, 5, 1535703568 ]
def test_created_variables(self, use_edges, use_nodes, use_globals, expected_first_dim_w): """Verifies the variable names and shapes created by a GlobalBlock.""" output_size = 10 expected_var_shapes_dict = { "global_block/mlp/linear_0/b:0": [output_size], "global_block/mlp/linear_0/w:0": [expected_first_dim_w, output_size]} input_graph = self._get_input_graph() global_block = blocks.GlobalBlock( global_model_fn=functools.partial(snt.nets.MLP, output_sizes=[output_size]), use_edges=use_edges, use_nodes=use_nodes, use_globals=use_globals) global_block(input_graph) variables = global_block.get_variables() var_shapes_dict = {var.name: var.get_shape().as_list() for var in variables} self.assertDictEqual(expected_var_shapes_dict, var_shapes_dict)
deepmind/graph_nets
[ 5225, 778, 5225, 5, 1535703568 ]
def test_missing_field_raises_exception( self, use_edges, use_nodes, use_globals, none_field): """Checks that missing a required field raises an exception.""" input_graph = self._get_input_graph([none_field]) global_block = blocks.GlobalBlock( global_model_fn=self._global_model_fn, use_edges=use_edges, use_nodes=use_nodes, use_globals=use_globals) with self.assertRaisesRegexp(ValueError, "field cannot be None"): global_block(input_graph)
deepmind/graph_nets
[ 5225, 778, 5225, 5, 1535703568 ]
def test_unused_field_can_be_none( self, use_edges, use_nodes, use_globals, none_field): """Checks that computation can handle non-necessary fields left None.""" input_graph = self._get_input_graph([none_field]) global_block = blocks.GlobalBlock( global_model_fn=self._global_model_fn, use_edges=use_edges, use_nodes=use_nodes, use_globals=use_globals) output_graph = global_block(input_graph) model_inputs = [] if use_edges: model_inputs.append( blocks.EdgesToGlobalsAggregator(tf.unsorted_segment_sum)(input_graph)) if use_nodes: model_inputs.append( blocks.NodesToGlobalsAggregator(tf.unsorted_segment_sum)(input_graph)) if use_globals: model_inputs.append(input_graph.globals) model_inputs = tf.concat(model_inputs, axis=-1) self.assertEqual(input_graph.edges, output_graph.edges) self.assertEqual(input_graph.nodes, output_graph.nodes) with tf.Session() as sess: actual_globals, model_inputs_out = sess.run( (output_graph.globals, model_inputs)) expected_output_globals = model_inputs_out * self._scale self.assertNDArrayNear(expected_output_globals, actual_globals, err=1e-4)
deepmind/graph_nets
[ 5225, 778, 5225, 5, 1535703568 ]
def test_incompatible_higher_rank_inputs_raises(self, use_edges, use_nodes, use_globals, field): """A exception should be raised if the inputs have incompatible shapes.""" input_graph = self._get_shaped_input_graph() input_graph = input_graph.replace( **{field: tf.transpose(getattr(input_graph, field), [0, 2, 1, 3])}) network = blocks.GlobalBlock( functools.partial(snt.Conv2D, output_channels=10, kernel_shape=[3, 3]), use_edges=use_edges, use_nodes=use_nodes, use_globals=use_globals ) with self.assertRaisesRegexp(ValueError, "in both shapes must be equal"): network(input_graph)
deepmind/graph_nets
[ 5225, 778, 5225, 5, 1535703568 ]
def test_incompatible_higher_rank_inputs_no_raise(self, use_edges, use_nodes, use_globals, field): """No exception should occur if a differently shapped field is not used.""" input_graph = self._get_shaped_input_graph() input_graph = input_graph.replace( **{field: tf.transpose(getattr(input_graph, field), [0, 2, 1, 3])}) network = blocks.GlobalBlock( functools.partial(snt.Conv2D, output_channels=10, kernel_shape=[3, 3]), use_edges=use_edges, use_nodes=use_nodes, use_globals=use_globals ) self._assert_build_and_run(network, input_graph)
deepmind/graph_nets
[ 5225, 778, 5225, 5, 1535703568 ]
def test_missing_aggregation_raises_exception( self, use_edges, use_nodes, edges_reducer, nodes_reducer): """Checks that missing a required aggregation argument raises an error.""" with self.assertRaisesRegexp(ValueError, "should not be None"): blocks.GlobalBlock( global_model_fn=self._global_model_fn, use_edges=use_edges, use_nodes=use_nodes, use_globals=False, edges_reducer=edges_reducer, nodes_reducer=nodes_reducer)
deepmind/graph_nets
[ 5225, 778, 5225, 5, 1535703568 ]
def test_dynamic_batch_sizes(self, block_constructor): """Checks that all batch sizes are as expected through a GraphNetwork.""" input_graph = self._get_input_graph() placeholders = input_graph.map(_mask_leading_dimension, graphs.ALL_FIELDS) model = block_constructor( functools.partial(snt.nets.MLP, output_sizes=[10])) output = model(placeholders) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) other_input_graph = utils_np.data_dicts_to_graphs_tuple( [SMALL_GRAPH_1, SMALL_GRAPH_2]) actual = sess.run(output, {placeholders: other_input_graph}) for k, v in other_input_graph._asdict().items(): self.assertEqual(v.shape[0], getattr(actual, k).shape[0])
deepmind/graph_nets
[ 5225, 778, 5225, 5, 1535703568 ]
def test_dtypes(self, data_dtype, indices_dtype, block_constructor): """Checks that all the output types are as expected for blocks.""" input_graph = self._get_input_graph() input_graph = input_graph.map(lambda v: tf.cast(v, data_dtype), ["nodes", "edges", "globals"]) input_graph = input_graph.map(lambda v: tf.cast(v, indices_dtype), ["receivers", "senders"]) model = block_constructor( functools.partial(snt.nets.MLP, output_sizes=[10])) output = model(input_graph) for field in ["nodes", "globals", "edges"]: self.assertEqual(data_dtype, getattr(output, field).dtype) for field in ["receivers", "senders"]: self.assertEqual(indices_dtype, getattr(output, field).dtype)
deepmind/graph_nets
[ 5225, 778, 5225, 5, 1535703568 ]
def read_projects_to_test(workflows_path): with open(os.path.join(workflows_path, "projects_to_test.txt"), "r") as f: file_content = f.read() projects = file_content.splitlines() return projects
google/CFU-Playground
[ 357, 91, 357, 130, 1615325898 ]
def read_proj_excluded_targets(projects_path, proj_name): proj_path = os.path.join(projects_path, proj_name) try: with open(os.path.join(proj_path, "ci", "ci_exclude_targets.txt"), "r") as f: file_content = f.read() excluded_targets = file_content.splitlines() except IOError: excluded_targets = "" return excluded_targets
google/CFU-Playground
[ 357, 91, 357, 130, 1615325898 ]
def get_proj_supported_targets(all_targets, excluded_targets): proj_targets = [x for x in all_targets if x not in excluded_targets] return proj_targets
google/CFU-Playground
[ 357, 91, 357, 130, 1615325898 ]
def list_to_json_str(final_list): json_str = json.dumps(final_list) return json_str
google/CFU-Playground
[ 357, 91, 357, 130, 1615325898 ]
def main(config="../../config.yaml", namespace=""): # obtain config if isinstance(config, str): config = load_job_config(config) parties = config.parties guest = parties.guest[0] host = parties.host[0] guest_train_data = {"name": "breast_hetero_guest", "namespace": f"experiment{namespace}"} host_train_data = {"name": "breast_hetero_host", "namespace": f"experiment{namespace}"} pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host) reader_0 = Reader(name="reader_0") reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data) reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data) data_transform_0 = DataTransform(name="data_transform_0") data_transform_0.get_party_instance(role='guest', party_id=guest).component_param(with_label=True) data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False) pipeline.add_component(reader_0) pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data)) pipeline.compile() pipeline.fit()
FederatedAI/FATE
[ 4887, 1449, 4887, 637, 1548325963 ]
def test_info_all(self): request = "statistics" nodes_info = self.as_connection.info_all(request) assert nodes_info is not None assert type(nodes_info) == dict
aerospike/aerospike-client-python
[ 122, 108, 122, 67, 1405113896 ]
def test_info_all_with_None_policy(self): request = "statistics" nodes_info = self.as_connection.info_all(request, None) assert nodes_info is not None assert type(nodes_info) == dict
aerospike/aerospike-client-python
[ 122, 108, 122, 67, 1405113896 ]
def test_positive_info_all(self, container_type, container_name): """ Test to see whether a namespace, set, and bin exist after a key is added """ key = ('test', 'demo', 'list_key') rec = {'names': ['John', 'Marlen', 'Steve']} self.as_connection.put(key, rec) response = self.as_connection.info_all(container_type) self.as_connection.remove(key) found = False for keys in response.keys(): for value in response[keys]: if value is not None: if container_name in value: found = True assert found
aerospike/aerospike-client-python
[ 122, 108, 122, 67, 1405113896 ]
def test_info_all_for_invalid_request(self): request = "fake_request_string_not_real" hosts = [host for host in self.connection_config['hosts']] nodes_info = self.as_connection.info_all(request) assert isinstance(nodes_info, dict) assert nodes_info.values() is not None
aerospike/aerospike-client-python
[ 122, 108, 122, 67, 1405113896 ]
def test_info_all_without_parameters(self): with pytest.raises(TypeError) as err_info: self.as_connection.info_all()
aerospike/aerospike-client-python
[ 122, 108, 122, 67, 1405113896 ]
def __init__(self): super(MidonetPluginV2, self).__init__() # Instantiate MidoNet API client conf = cfg.CONF.MIDONET neutron_extensions.append_api_extensions_path(extensions.__path__) self.api_cli = client.MidonetClient(conf.midonet_uri, conf.username, conf.password, project_id=conf.project_id) self.setup_rpc() self.repair_quotas_table() self.base_binding_dict = { portbindings.VIF_TYPE: portbindings.VIF_TYPE_MIDONET, portbindings.VNIC_TYPE: portbindings.VNIC_NORMAL, portbindings.VIF_DETAILS: { # TODO(rkukura): Replace with new VIF security details portbindings.CAP_PORT_FILTER: 'security-group' in self.supported_extension_aliases}} self.network_scheduler = importutils.import_object( cfg.CONF.network_scheduler_driver )
midokura/python-neutron-plugin-midonet
[ 2, 3, 2, 1, 1408526215 ]
def repair_quotas_table(self): query = ("CREATE TABLE `quotas` ( `id` varchar(36) NOT NULL, " "`tenant_id` varchar(255) DEFAULT NULL, " "`resource` varchar(255) DEFAULT NULL, " "`limit` int(11) DEFAULT NULL, " "PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8;") session = db.get_session() try: session.execute(query) except sa_exc.OperationalError: # If the table already exists, then this is expected. pass
midokura/python-neutron-plugin-midonet
[ 2, 3, 2, 1, 1408526215 ]
def create_network(self, context, network): """Create Neutron network. Create a new Neutron network and its corresponding MidoNet bridge. """ LOG.info(_('MidonetPluginV2.create_network called: network=%r'), network) net = self._process_create_network(context, network) try: self.api_cli.create_network(net) except Exception as ex: LOG.error(_("Failed to create a network %(net_id)s in Midonet:" "%(err)s"), {"net_id": net["id"], "err": ex}) with excutils.save_and_reraise_exception(): super(MidonetPluginV2, self).delete_network(context, net['id']) LOG.info(_("MidonetPluginV2.create_network exiting: net=%r"), net) return net
midokura/python-neutron-plugin-midonet
[ 2, 3, 2, 1, 1408526215 ]
def update_network(self, context, id, network): """Update Neutron network. Update an existing Neutron network and its corresponding MidoNet bridge. """ LOG.info(_("MidonetPluginV2.update_network called: id=%(id)r, " "network=%(network)r"), {'id': id, 'network': network}) with context.session.begin(subtransactions=True): net = super(MidonetPluginV2, self).update_network( context, id, network) self._process_l3_update(context, net, network['network']) self.api_cli.update_network(id, net) LOG.info(_("MidonetPluginV2.update_network exiting: net=%r"), net) return net
midokura/python-neutron-plugin-midonet
[ 2, 3, 2, 1, 1408526215 ]
def delete_network(self, context, id): """Delete a network and its corresponding MidoNet bridge.""" LOG.info(_("MidonetPluginV2.delete_network called: id=%r"), id) with context.session.begin(subtransactions=True): self._process_l3_delete(context, id) super(MidonetPluginV2, self).delete_network(context, id) self.api_cli.delete_network(id) LOG.info(_("MidonetPluginV2.delete_network exiting: id=%r"), id)
midokura/python-neutron-plugin-midonet
[ 2, 3, 2, 1, 1408526215 ]
def create_subnet(self, context, subnet): """Create Neutron subnet. Creates a Neutron subnet and a DHCP entry in MidoNet bridge. """ LOG.info(_("MidonetPluginV2.create_subnet called: subnet=%r"), subnet) sn_entry = super(MidonetPluginV2, self).create_subnet(context, subnet) try: self.api_cli.create_subnet(sn_entry) except Exception as ex: LOG.error(_("Failed to create a subnet %(s_id)s in Midonet:" "%(err)s"), {"s_id": sn_entry["id"], "err": ex}) with excutils.save_and_reraise_exception(): super(MidonetPluginV2, self).delete_subnet(context, sn_entry['id']) LOG.info(_("MidonetPluginV2.create_subnet exiting: sn_entry=%r"), sn_entry) return sn_entry
midokura/python-neutron-plugin-midonet
[ 2, 3, 2, 1, 1408526215 ]
def delete_subnet(self, context, id): """Delete Neutron subnet. Delete neutron network and its corresponding MidoNet bridge. """ LOG.info(_("MidonetPluginV2.delete_subnet called: id=%s"), id) with context.session.begin(subtransactions=True): super(MidonetPluginV2, self).delete_subnet(context, id) self.api_cli.delete_subnet(id) LOG.info(_("MidonetPluginV2.delete_subnet exiting"))
midokura/python-neutron-plugin-midonet
[ 2, 3, 2, 1, 1408526215 ]
def update_subnet(self, context, id, subnet): """Update the subnet with new info. """ LOG.info(_("MidonetPluginV2.update_subnet called: id=%s"), id) with context.session.begin(subtransactions=True): s = super(MidonetPluginV2, self).update_subnet(context, id, subnet) self.api_cli.update_subnet(id, s) return s
midokura/python-neutron-plugin-midonet
[ 2, 3, 2, 1, 1408526215 ]
def create_port(self, context, port): """Create a L2 port in Neutron/MidoNet.""" LOG.info(_("MidonetPluginV2.create_port called: port=%r"), port) new_port = self._process_create_port(context, port) try: self.api_cli.create_port(new_port) except Exception as ex: LOG.error(_("Failed to create a port %(new_port)s: %(err)s"), {"new_port": new_port, "err": ex}) with excutils.save_and_reraise_exception(): super(MidonetPluginV2, self).delete_port(context, new_port['id']) LOG.info(_("MidonetPluginV2.create_port exiting: port=%r"), new_port) return new_port
midokura/python-neutron-plugin-midonet
[ 2, 3, 2, 1, 1408526215 ]
def delete_port(self, context, id, l3_port_check=True): """Delete a neutron port and corresponding MidoNet bridge port.""" LOG.info(_("MidonetPluginV2.delete_port called: id=%(id)s " "l3_port_check=%(l3_port_check)r"), {'id': id, 'l3_port_check': l3_port_check}) # if needed, check to see if this is a port owned by # and l3-router. If so, we should prevent deletion. if l3_port_check: self.prevent_l3_port_deletion(context, id) with context.session.begin(subtransactions=True): super(MidonetPluginV2, self).disassociate_floatingips( context, id, do_notify=False) super(MidonetPluginV2, self).delete_port(context, id) self.api_cli.delete_port(id) LOG.info(_("MidonetPluginV2.delete_port exiting: id=%r"), id)
midokura/python-neutron-plugin-midonet
[ 2, 3, 2, 1, 1408526215 ]
def update_port(self, context, id, port): """Handle port update, including security groups and fixed IPs.""" LOG.info(_("MidonetPluginV2.update_port called: id=%(id)s " "port=%(port)r"), {'id': id, 'port': port}) with context.session.begin(subtransactions=True): # update the port DB p = super(MidonetPluginV2, self).update_port(context, id, port) self._process_port_update(context, id, port, p) self._process_portbindings_create_and_update(context, port['port'], p) self.api_cli.update_port(id, p) LOG.info(_("MidonetPluginV2.update_port exiting: p=%r"), p) return p
midokura/python-neutron-plugin-midonet
[ 2, 3, 2, 1, 1408526215 ]
def create_router(self, context, router): """Handle router creation. When a new Neutron router is created, its corresponding MidoNet router is also created. In MidoNet, this router is initialized with chains for inbound and outbound traffic, which will be used to hold other chains that include various rules, such as NAT. :param router: Router information provided to create a new router. """ LOG.info(_("MidonetPluginV2.create_router called: router=%(router)s"), {"router": router}) r = super(MidonetPluginV2, self).create_router(context, router) try: self.api_cli.create_router(r) except Exception as ex: LOG.error(_("Failed to create a router %(r_id)s in Midonet:" "%(err)s"), {"r_id": r["id"], "err": ex}) with excutils.save_and_reraise_exception(): super(MidonetPluginV2, self).delete_router(context, r['id']) LOG.info(_("MidonetPluginV2.create_router exiting: " "router=%(router)s."), {"router": r}) return r
midokura/python-neutron-plugin-midonet
[ 2, 3, 2, 1, 1408526215 ]
def update_router(self, context, id, router): """Handle router updates.""" LOG.info(_("MidonetPluginV2.update_router called: id=%(id)s " "router=%(router)r"), {"id": id, "router": router}) with context.session.begin(subtransactions=True): r = super(MidonetPluginV2, self).update_router(context, id, router) self.api_cli.update_router(id, r) LOG.info(_("MidonetPluginV2.update_router exiting: router=%r"), r) return r
midokura/python-neutron-plugin-midonet
[ 2, 3, 2, 1, 1408526215 ]
def delete_router(self, context, id): """Handler for router deletion. Deleting a router on Neutron simply means deleting its corresponding router in MidoNet. :param id: router ID to remove """ LOG.info(_("MidonetPluginV2.delete_router called: id=%s"), id) with context.session.begin(subtransactions=True): super(MidonetPluginV2, self).delete_router(context, id) self.api_cli.delete_router(id) LOG.info(_("MidonetPluginV2.delete_router exiting: id=%s"), id)
midokura/python-neutron-plugin-midonet
[ 2, 3, 2, 1, 1408526215 ]
def add_router_interface(self, context, router_id, interface_info): """Handle router linking with network.""" LOG.info(_("MidonetPluginV2.add_router_interface called: " "router_id=%(router_id)s " "interface_info=%(interface_info)r"), {'router_id': router_id, 'interface_info': interface_info}) info = super(MidonetPluginV2, self).add_router_interface( context, router_id, interface_info) try: self.api_cli.add_router_interface(router_id, info) except Exception: LOG.error(_("Failed to create MidoNet resources to add router " "interface. info=%(info)s, router_id=%(router_id)s"), {"info": info, "router_id": router_id}) with excutils.save_and_reraise_exception(): self.remove_router_interface(context, router_id, info) LOG.info(_("MidonetPluginV2.add_router_interface exiting: info=%r"), info) return info
midokura/python-neutron-plugin-midonet
[ 2, 3, 2, 1, 1408526215 ]
def remove_router_interface(self, context, router_id, interface_info): """Handle router un-linking with network.""" LOG.info(_("MidonetPluginV2.remove_router_interface called: " "router_id=%(router_id)s " "interface_info=%(interface_info)r"), {'router_id': router_id, 'interface_info': interface_info}) with context.session.begin(subtransactions=True): info = super(MidonetPluginV2, self).remove_router_interface( context, router_id, interface_info) self.api_cli.remove_router_interface(router_id, interface_info) LOG.info(_("MidonetPluginV2.remove_router_interface exiting: " "info=%r"), info) return info
midokura/python-neutron-plugin-midonet
[ 2, 3, 2, 1, 1408526215 ]
def create_floatingip(self, context, floatingip): """Handle floating IP creation.""" LOG.info(_("MidonetPluginV2.create_floatingip called: ip=%r"), floatingip) fip = super(MidonetPluginV2, self).create_floatingip(context, floatingip) try: self.api_cli.create_floating_ip(fip) except Exception as ex: LOG.error(_("Failed to create floating ip %(fip)s: %(err)s"), {"fip": fip, "err": ex}) with excutils.save_and_reraise_exception(): # Try removing the fip self.delete_floatingip(context, fip['id']) LOG.info(_("MidonetPluginV2.create_floatingip exiting: fip=%r"), fip) return fip
midokura/python-neutron-plugin-midonet
[ 2, 3, 2, 1, 1408526215 ]
def delete_floatingip(self, context, id): """Handle floating IP deletion.""" LOG.info(_("MidonetPluginV2.delete_floatingip called: id=%s"), id) with context.session.begin(subtransactions=True): super(MidonetPluginV2, self).delete_floatingip(context, id) self.api_cli.delete_floating_ip(id) LOG.info(_("MidonetPluginV2.delete_floatingip exiting: id=%r"), id)
midokura/python-neutron-plugin-midonet
[ 2, 3, 2, 1, 1408526215 ]
def update_floatingip(self, context, id, floatingip): """Handle floating IP association and disassociation.""" LOG.info(_("MidonetPluginV2.update_floatingip called: id=%(id)s " "floatingip=%(floatingip)s "), {'id': id, 'floatingip': floatingip}) with context.session.begin(subtransactions=True): fip = super(MidonetPluginV2, self).update_floatingip(context, id, floatingip) # Update status based on association if fip.get('port_id') is None: fip['status'] = n_const.FLOATINGIP_STATUS_DOWN else: fip['status'] = n_const.FLOATINGIP_STATUS_ACTIVE self.update_floatingip_status(context, id, fip['status']) self.api_cli.update_floating_ip(id, fip) LOG.info(_("MidonetPluginV2.update_floating_ip exiting: fip=%s"), fip) return fip
midokura/python-neutron-plugin-midonet
[ 2, 3, 2, 1, 1408526215 ]
def create_security_group(self, context, security_group, default_sg=False): """Create security group. Create a new security group, including the default security group. In MidoNet, this means creating a pair of chains, inbound and outbound, as well as a new port group. """ LOG.info(_("MidonetPluginV2.create_security_group called: " "security_group=%(security_group)s " "default_sg=%(default_sg)s "), {'security_group': security_group, 'default_sg': default_sg}) sg = security_group.get('security_group') tenant_id = self._get_tenant_id_for_create(context, sg) if not default_sg: self._ensure_default_security_group(context, tenant_id) # Create the Neutron sg first sg = super(MidonetPluginV2, self).create_security_group( context, security_group, default_sg) try: # Process the MidoNet side self.api_cli.create_security_group(sg) except Exception: LOG.error(_("Failed to create MidoNet resources for sg %(sg)r"), {"sg": sg}) with excutils.save_and_reraise_exception(): super(MidonetPluginV2, self).delete_security_group(context, sg['id']) LOG.info(_("MidonetPluginV2.create_security_group exiting: sg=%r"), sg) return sg
midokura/python-neutron-plugin-midonet
[ 2, 3, 2, 1, 1408526215 ]
def delete_security_group(self, context, id): """Delete chains for Neutron security group.""" LOG.info(_("MidonetPluginV2.delete_security_group called: id=%s"), id) sg = super(MidonetPluginV2, self).get_security_group(context, id) if not sg: raise ext_sg.SecurityGroupNotFound(id=id) if sg["name"] == 'default' and not context.is_admin: raise ext_sg.SecurityGroupCannotRemoveDefault() with context.session.begin(subtransactions=True): super(MidonetPluginV2, self).delete_security_group(context, id) self.api_cli.delete_security_group(id) LOG.info(_("MidonetPluginV2.delete_security_group exiting: id=%r"), id)
midokura/python-neutron-plugin-midonet
[ 2, 3, 2, 1, 1408526215 ]
def create_security_group_rule(self, context, security_group_rule): """Create a security group rule Create a security group rule in the Neutron DB and corresponding MidoNet resources in its data store. """ LOG.info(_("MidonetPluginV2.create_security_group_rule called: " "security_group_rule=%(security_group_rule)r"), {'security_group_rule': security_group_rule}) rule = super(MidonetPluginV2, self).create_security_group_rule( context, security_group_rule) try: self.api_cli.create_security_group_rule(rule) except Exception as ex: LOG.error(_('Failed to create security group rule %(sg)s,' 'error: %(err)s'), {'sg': rule, 'err': ex}) with excutils.save_and_reraise_exception(): super(MidonetPluginV2, self).delete_security_group_rule( context, rule['id']) LOG.info(_("MidonetPluginV2.create_security_group_rule exiting: " "rule=%r"), rule) return rule
midokura/python-neutron-plugin-midonet
[ 2, 3, 2, 1, 1408526215 ]
def create_security_group_rule_bulk(self, context, security_group_rules): """Create multiple security group rules Create multiple security group rules in the Neutron DB and corresponding MidoNet resources in its data store. """ LOG.info(_("MidonetPluginV2.create_security_group_rule_bulk called: " "security_group_rules=%(security_group_rules)r"), {'security_group_rules': security_group_rules}) rules = super( MidonetPluginV2, self).create_security_group_rule_bulk_native( context, security_group_rules) try: self.api_cli.create_security_group_rule_bulk(rules) except Exception as ex: LOG.error(_("Failed to create bulk security group rules %(sg)s, " "error: %(err)s"), {"sg": rules, "err": ex}) with excutils.save_and_reraise_exception(): for rule in rules: super(MidonetPluginV2, self).delete_security_group_rule( context, rule['id']) LOG.info(_("MidonetPluginV2.create_security_group_rule_bulk exiting: " "rules=%r"), rules) return rules
midokura/python-neutron-plugin-midonet
[ 2, 3, 2, 1, 1408526215 ]
def delete_security_group_rule(self, context, sg_rule_id): """Delete a security group rule Delete a security group rule from the Neutron DB and corresponding MidoNet resources from its data store. """ LOG.info(_("MidonetPluginV2.delete_security_group_rule called: " "sg_rule_id=%s"), sg_rule_id) with context.session.begin(subtransactions=True): super(MidonetPluginV2, self).delete_security_group_rule(context, sg_rule_id) self.api_cli.delete_security_group_rule(sg_rule_id) LOG.info(_("MidonetPluginV2.delete_security_group_rule exiting: " "id=%r"), id)
midokura/python-neutron-plugin-midonet
[ 2, 3, 2, 1, 1408526215 ]
def create_vip(self, context, vip): LOG.debug("MidonetPluginV2.create_vip called: %(vip)r", {'vip': vip}) with context.session.begin(subtransactions=True): v = super(MidonetPluginV2, self).create_vip(context, vip) self.api_cli.create_vip(v) v['status'] = constants.ACTIVE self.update_status(context, loadbalancer_db.Vip, v['id'], v['status']) LOG.debug("MidonetPluginV2.create_vip exiting: id=%r", v['id']) return v
midokura/python-neutron-plugin-midonet
[ 2, 3, 2, 1, 1408526215 ]
def delete_vip(self, context, id): LOG.debug("MidonetPluginV2.delete_vip called: id=%(id)r", {'id': id}) with context.session.begin(subtransactions=True): super(MidonetPluginV2, self).delete_vip(context, id) self.api_cli.delete_vip(id) LOG.debug("MidonetPluginV2.delete_vip existing: id=%(id)r", {'id': id})
midokura/python-neutron-plugin-midonet
[ 2, 3, 2, 1, 1408526215 ]
def update_vip(self, context, id, vip): LOG.debug("MidonetPluginV2.update_vip called: id=%(id)r, " "vip=%(vip)r", {'id': id, 'vip': vip}) with context.session.begin(subtransactions=True): v = super(MidonetPluginV2, self).update_vip(context, id, vip) self.api_cli.update_vip(id, v) LOG.debug("MidonetPluginV2.update_vip exiting: id=%(id)r, " "vip=%(vip)r", {'id': id, 'vip': v}) return v
midokura/python-neutron-plugin-midonet
[ 2, 3, 2, 1, 1408526215 ]
def create_pool(self, context, pool): LOG.debug("MidonetPluginV2.create_pool called: %(pool)r", {'pool': pool}) router_id = pool['pool'].get(rsi.ROUTER_ID) if not router_id: msg = _("router_id is required for pool") raise n_exc.BadRequest(resource='router', msg=msg) if self._get_resource_router_id_binding(context, loadbalancer_db.Pool, router_id=router_id): msg = _("A pool is already associated with the router") raise n_exc.BadRequest(resource='router', msg=msg) with context.session.begin(subtransactions=True): p = super(MidonetPluginV2, self).create_pool(context, pool) res = { 'id': p['id'], rsi.ROUTER_ID: router_id } self._process_create_resource_router_id(context, res, loadbalancer_db.Pool) p[rsi.ROUTER_ID] = router_id self.api_cli.create_pool(p) p['status'] = constants.ACTIVE self.update_status(context, loadbalancer_db.Pool, p['id'], p['status']) LOG.debug("MidonetPluginV2.create_pool exiting: %(pool)r", {'pool': p}) return p
midokura/python-neutron-plugin-midonet
[ 2, 3, 2, 1, 1408526215 ]
def update_pool(self, context, id, pool): LOG.debug("MidonetPluginV2.update_pool called: id=%(id)r, " "pool=%(pool)r", {'id': id, 'pool': pool}) with context.session.begin(subtransactions=True): p = super(MidonetPluginV2, self).update_pool(context, id, pool) self.api_cli.update_pool(id, p) LOG.debug("MidonetPluginV2.update_pool exiting: id=%(id)r, " "pool=%(pool)r", {'id': id, 'pool': pool}) return p
midokura/python-neutron-plugin-midonet
[ 2, 3, 2, 1, 1408526215 ]
def delete_pool(self, context, id): LOG.debug("MidonetPluginV2.delete_pool called: %(id)r", {'id': id}) with context.session.begin(subtransactions=True): self._delete_resource_router_id_binding(context, id, loadbalancer_db.Pool) super(MidonetPluginV2, self).delete_pool(context, id) self.api_cli.delete_pool(id) LOG.debug("MidonetPluginV2.delete_pool exiting: %(id)r", {'id': id})
midokura/python-neutron-plugin-midonet
[ 2, 3, 2, 1, 1408526215 ]
def create_member(self, context, member): LOG.debug("MidonetPluginV2.create_member called: %(member)r", {'member': member}) with context.session.begin(subtransactions=True): m = super(MidonetPluginV2, self).create_member(context, member) self.api_cli.create_member(m) m['status'] = constants.ACTIVE self.update_status(context, loadbalancer_db.Member, m['id'], m['status']) LOG.debug("MidonetPluginV2.create_member exiting: %(member)r", {'member': m}) return m
midokura/python-neutron-plugin-midonet
[ 2, 3, 2, 1, 1408526215 ]
def update_member(self, context, id, member): LOG.debug("MidonetPluginV2.update_member called: id=%(id)r, " "member=%(member)r", {'id': id, 'member': member}) with context.session.begin(subtransactions=True): m = super(MidonetPluginV2, self).update_member(context, id, member) self.api_cli.update_member(id, m) LOG.debug("MidonetPluginV2.update_member exiting: id=%(id)r, " "member=%(member)r", {'id': id, 'member': m}) return m
midokura/python-neutron-plugin-midonet
[ 2, 3, 2, 1, 1408526215 ]
def delete_member(self, context, id): LOG.debug("MidonetPluginV2.delete_member called: %(id)r", {'id': id}) with context.session.begin(subtransactions=True): super(MidonetPluginV2, self).delete_member(context, id) self.api_cli.delete_member(id) LOG.debug("MidonetPluginV2.delete_member exiting: %(id)r", {'id': id})
midokura/python-neutron-plugin-midonet
[ 2, 3, 2, 1, 1408526215 ]
def create_health_monitor(self, context, health_monitor): LOG.debug("MidonetPluginV2.create_health_monitor called: " " %(health_monitor)r", {'health_monitor': health_monitor}) with context.session.begin(subtransactions=True): hm = super(MidonetPluginV2, self).create_health_monitor( context, health_monitor) self.api_cli.create_health_monitor(hm) LOG.debug("MidonetPluginV2.create_health_monitor exiting: " "%(health_monitor)r", {'health_monitor': hm}) return hm
midokura/python-neutron-plugin-midonet
[ 2, 3, 2, 1, 1408526215 ]
def update_health_monitor(self, context, id, health_monitor): LOG.debug("MidonetPluginV2.update_health_monitor called: id=%(id)r, " "health_monitor=%(health_monitor)r", {'id': id, 'health_monitor': health_monitor}) with context.session.begin(subtransactions=True): hm = super(MidonetPluginV2, self).update_health_monitor( context, id, health_monitor) self.api_cli.update_health_monitor(id, hm) LOG.debug("MidonetPluginV2.update_health_monitor exiting: id=%(id)r, " "health_monitor=%(health_monitor)r", {'id': id, 'health_monitor': hm}) return hm
midokura/python-neutron-plugin-midonet
[ 2, 3, 2, 1, 1408526215 ]
def delete_health_monitor(self, context, id): LOG.debug("MidonetPluginV2.delete_health_monitor called: %(id)r", {'id': id}) with context.session.begin(subtransactions=True): super(MidonetPluginV2, self).delete_health_monitor(context, id) self.api_cli.delete_health_monitor(id) LOG.debug("MidonetPluginV2.delete_health_monitor exiting: %(id)r", {'id': id})
midokura/python-neutron-plugin-midonet
[ 2, 3, 2, 1, 1408526215 ]
def create_pool_health_monitor(self, context, health_monitor, pool_id): LOG.debug("MidonetPluginV2.create_pool_health_monitor called: " "hm=%(health_monitor)r, pool_id=%(pool_id)r", {'health_monitor': health_monitor, 'pool_id': pool_id}) pool = self.get_pool(context, pool_id) monitors = pool.get('health_monitors') if len(monitors) > 0: msg = _("MidoNet right now can only support one monitor per pool") raise n_exc.BadRequest(resource='pool_health_monitor', msg=msg) hm = health_monitor['health_monitor'] with context.session.begin(subtransactions=True): monitors = super(MidonetPluginV2, self).create_pool_health_monitor( context, health_monitor, pool_id) self.api_cli.create_pool_health_monitor(hm, pool_id) LOG.debug("MidonetPluginV2.create_pool_health_monitor exiting: " "%(health_monitor)r, %(pool_id)r", {'health_monitor': health_monitor, 'pool_id': pool_id}) return monitors
midokura/python-neutron-plugin-midonet
[ 2, 3, 2, 1, 1408526215 ]
def _mobilenet_v2(net, depth_multiplier, output_stride, reuse=None, scope=None, final_endpoint=None): """Auxiliary function to add support for 'reuse' to mobilenet_v2. Args: net: Input tensor of shape [batch_size, height, width, channels]. depth_multiplier: Float multiplier for the depth (number of channels) for all convolution ops. The value must be greater than zero. Typical usage will be to set this value in (0, 1) to reduce the number of parameters or computation cost of the model. output_stride: An integer that specifies the requested ratio of input to output spatial resolution. If not None, then we invoke atrous convolution if necessary to prevent the network from reducing the spatial resolution of the activation maps. Allowed values are 8 (accurate fully convolutional mode), 16 (fast fully convolutional mode), 32 (classification mode). reuse: Reuse model variables. scope: Optional variable scope. final_endpoint: The endpoint to construct the network up to. Returns: Features extracted by MobileNetv2. """ with tf.variable_scope( scope, 'MobilenetV2', [net], reuse=reuse) as scope: return mobilenet_v2.mobilenet_base( net, conv_defs=mobilenet_v2.V2_DEF, depth_multiplier=depth_multiplier, min_depth=8 if depth_multiplier == 1.0 else 1, divisible_by=8 if depth_multiplier == 1.0 else 1, final_endpoint=final_endpoint or _MOBILENET_V2_FINAL_ENDPOINT, output_stride=output_stride, scope=scope)
cshallue/models
[ 6, 3, 6, 1, 1473384593 ]
def _preprocess_subtract_imagenet_mean(inputs): """Subtract Imagenet mean RGB value.""" mean_rgb = tf.reshape(_MEAN_RGB, [1, 1, 1, 3]) return inputs - mean_rgb
cshallue/models
[ 6, 3, 6, 1, 1473384593 ]
def mean_pixel(model_variant=None): """Gets mean pixel value. This function returns different mean pixel value, depending on the input model_variant which adopts different preprocessing functions. We currently handle the following preprocessing functions: (1) _preprocess_subtract_imagenet_mean. We simply return mean pixel value. (2) _preprocess_zero_mean_unit_range. We return [127.5, 127.5, 127.5]. The return values are used in a way that the padded regions after pre-processing will contain value 0. Args: model_variant: Model variant (string) for feature extraction. For backwards compatibility, model_variant=None returns _MEAN_RGB. Returns: Mean pixel value. """ if model_variant in ['resnet_v1_50', 'resnet_v1_101'] or model_variant is None: return _MEAN_RGB else: return [127.5, 127.5, 127.5]
cshallue/models
[ 6, 3, 6, 1, 1473384593 ]
def __virtual__(): """ Confirm this module is on a Debian-based system """ # If your minion is running an OS which is Debian-based but does not have # an "os_family" grain of Debian, then the proper fix is NOT to check for # the minion's "os_family" grain here in the __virtual__. The correct fix # is to add the value from the minion's "os" grain to the _OS_FAMILY_MAP # dict in salt/grains/core.py, so that we assign the correct "os_family" # grain to the minion. if __grains__.get("os_family") == "Debian": return __virtualname__ return False, "The pkg module could not be loaded: unsupported OS family"
saltstack/salt
[ 13089, 5388, 13089, 3074, 1298233016 ]
def __init__(self, line, file=None): self.invalid = False self.comps = [] self.disabled = False self.comment = "" self.dist = "" self.type = "" self.uri = "" self.line = line self.architectures = [] self.file = file if not self.file: self.file = str(pathlib.Path(os.sep, "etc", "apt", "sources.list")) self._parse_sources(line)
saltstack/salt
[ 13089, 5388, 13089, 3074, 1298233016 ]
def _parse_sources(self, line): """ Parse lines from sources files """ self.disabled = False repo_line = self.line.strip().split() if not repo_line: self.invalid = True return False if repo_line[0].startswith("#"): repo_line.pop(0) self.disabled = True if repo_line[0] not in ["deb", "deb-src", "rpm", "rpm-src"]: self.invalid = True return False if repo_line[1].startswith("["): opts = re.search(r"\[.*\]", self.line).group(0).strip("[]") repo_line = [x for x in (line.strip("[]") for line in repo_line) if x] for opt in opts.split(): if opt.startswith("arch"): self.architectures.extend(opt.split("=", 1)[1].split(",")) try: repo_line.pop(repo_line.index(opt)) except ValueError: repo_line.pop(repo_line.index("[" + opt + "]")) self.type = repo_line[0] self.uri = repo_line[1] self.dist = repo_line[2] self.comps = repo_line[3:]
saltstack/salt
[ 13089, 5388, 13089, 3074, 1298233016 ]
def __init__(self): self.list = [] self.files = [ pathlib.Path(os.sep, "etc", "apt", "sources.list"), pathlib.Path(os.sep, "etc", "apt", "sources.list.d"), ] for file in self.files: if file.is_dir(): for fp in file.glob("**/*.list"): self.add_file(file=fp) else: self.add_file(file)
saltstack/salt
[ 13089, 5388, 13089, 3074, 1298233016 ]
def add_file(self, file): """ Add the lines of a file to self.list """ if file.is_file(): with salt.utils.files.fopen(file) as source: for line in source: self.list.append(SourceEntry(line, file=str(file))) else: log.debug("The apt sources file %s does not exist", file)
saltstack/salt
[ 13089, 5388, 13089, 3074, 1298233016 ]
def remove(self, source): """ remove a source from the list of sources """ self.list.remove(source)
saltstack/salt
[ 13089, 5388, 13089, 3074, 1298233016 ]
def _get_ppa_info_from_launchpad(owner_name, ppa_name): """ Idea from softwareproperties.ppa. Uses urllib2 which sacrifices server cert verification. This is used as fall-back code or for secure PPAs :param owner_name: :param ppa_name: :return: """ lp_url = "https://launchpad.net/api/1.0/~{}/+archive/{}".format( owner_name, ppa_name ) request = _Request(lp_url, headers={"Accept": "application/json"}) lp_page = _urlopen(request) return salt.utils.json.load(lp_page)
saltstack/salt
[ 13089, 5388, 13089, 3074, 1298233016 ]
def _call_apt(args, scope=True, **kwargs): """ Call apt* utilities. """ cmd = [] if ( scope and salt.utils.systemd.has_scope(__context__) and __salt__["config.get"]("systemd.scope", True) ): cmd.extend(["systemd-run", "--scope", "--description", '"{}"'.format(__name__)]) cmd.extend(args) params = { "output_loglevel": "trace", "python_shell": False, "env": salt.utils.environment.get_module_environment(globals()), } params.update(kwargs) cmd_ret = __salt__["cmd.run_all"](cmd, **params) count = 0 while "Could not get lock" in cmd_ret.get("stderr", "") and count < 10: count += 1 log.warning("Waiting for dpkg lock release: retrying... %s/100", count) time.sleep(2 ** count) cmd_ret = __salt__["cmd.run_all"](cmd, **params) return cmd_ret
saltstack/salt
[ 13089, 5388, 13089, 3074, 1298233016 ]
def normalize_name(name): """ Strips the architecture from the specified package name, if necessary. CLI Example: .. code-block:: bash salt '*' pkg.normalize_name zsh:amd64 """ try: pkgname, pkgarch = name.rsplit(PKG_ARCH_SEPARATOR, 1) except ValueError: pkgname = name pkgarch = __grains__["osarch"] return pkgname if pkgarch in (__grains__["osarch"], "all", "any") else name
saltstack/salt
[ 13089, 5388, 13089, 3074, 1298233016 ]
def latest_version(*names, **kwargs): """ Return the latest version of the named package available for upgrade or installation. If more than one package name is specified, a dict of name/version pairs is returned. If the latest version of a given package is already installed, an empty string will be returned for that package. A specific repo can be requested using the ``fromrepo`` keyword argument. cache_valid_time .. versionadded:: 2016.11.0 Skip refreshing the package database if refresh has already occurred within <value> seconds CLI Example: .. code-block:: bash salt '*' pkg.latest_version <package name> salt '*' pkg.latest_version <package name> fromrepo=unstable salt '*' pkg.latest_version <package1> <package2> <package3> ... """ refresh = salt.utils.data.is_true(kwargs.pop("refresh", True)) show_installed = salt.utils.data.is_true(kwargs.pop("show_installed", False)) if "repo" in kwargs: raise SaltInvocationError( "The 'repo' argument is invalid, use 'fromrepo' instead" ) fromrepo = kwargs.pop("fromrepo", None) cache_valid_time = kwargs.pop("cache_valid_time", 0) if not names: return "" ret = {} # Initialize the dict with empty strings for name in names: ret[name] = "" pkgs = list_pkgs(versions_as_list=True) repo = ["-o", "APT::Default-Release={}".format(fromrepo)] if fromrepo else None # Refresh before looking for the latest version available if refresh: refresh_db(cache_valid_time) for name in names: cmd = ["apt-cache", "-q", "policy", name] if repo is not None: cmd.extend(repo) out = _call_apt(cmd, scope=False) candidate = "" for line in salt.utils.itertools.split(out["stdout"], "\n"): if "Candidate" in line: comps = line.split() if len(comps) >= 2: candidate = comps[-1] if candidate.lower() == "(none)": candidate = "" break installed = pkgs.get(name, []) if not installed: ret[name] = candidate elif installed and show_installed: ret[name] = candidate elif candidate: # If there are no installed versions that are greater than or equal # to the install candidate, then the candidate is an upgrade, so # add it to the return dict if not any( salt.utils.versions.compare( ver1=x, oper=">=", ver2=candidate, cmp_func=version_cmp ) for x in installed ): ret[name] = candidate # Return a string if only one package name passed if len(names) == 1: return ret[names[0]] return ret
saltstack/salt
[ 13089, 5388, 13089, 3074, 1298233016 ]
def version(*names, **kwargs): """ Returns a string representing the package version or an empty string if not installed. If more than one package name is specified, a dict of name/version pairs is returned. CLI Example: .. code-block:: bash salt '*' pkg.version <package name> salt '*' pkg.version <package1> <package2> <package3> ... """ return __salt__["pkg_resource.version"](*names, **kwargs)
saltstack/salt
[ 13089, 5388, 13089, 3074, 1298233016 ]
def install( name=None, refresh=False, fromrepo=None, skip_verify=False, debconf=None, pkgs=None, sources=None, reinstall=False, downloadonly=False, ignore_epoch=False, **kwargs
saltstack/salt
[ 13089, 5388, 13089, 3074, 1298233016 ]
def _uninstall(action="remove", name=None, pkgs=None, **kwargs): """ remove and purge do identical things but with different apt-get commands, this function performs the common logic. """ try: pkg_params = __salt__["pkg_resource.parse_targets"](name, pkgs)[0] except MinionError as exc: raise CommandExecutionError(exc) old = list_pkgs() old_removed = list_pkgs(removed=True) targets = [x for x in pkg_params if x in old] if action == "purge": targets.extend([x for x in pkg_params if x in old_removed]) if not targets: return {} cmd = ["apt-get", "-q", "-y", action] cmd.extend(targets) env = _parse_env(kwargs.get("env")) env.update(DPKG_ENV_VARS.copy()) out = _call_apt(cmd, env=env) if out["retcode"] != 0 and out["stderr"]: errors = [out["stderr"]] else: errors = [] __context__.pop("pkg.list_pkgs", None) new = list_pkgs() new_removed = list_pkgs(removed=True) changes = salt.utils.data.compare_dicts(old, new) if action == "purge": ret = { "removed": salt.utils.data.compare_dicts(old_removed, new_removed), "installed": changes, } else: ret = changes if errors: raise CommandExecutionError( "Problem encountered removing package(s)", info={"errors": errors, "changes": ret}, ) return ret
saltstack/salt
[ 13089, 5388, 13089, 3074, 1298233016 ]
def remove(name=None, pkgs=None, **kwargs): """ .. versionchanged:: 2015.8.12,2016.3.3,2016.11.0 On minions running systemd>=205, `systemd-run(1)`_ is now used to isolate commands which modify installed packages from the ``salt-minion`` daemon's control group. This is done to keep systemd from killing any apt-get/dpkg commands spawned by Salt when the ``salt-minion`` service is restarted. (see ``KillMode`` in the `systemd.kill(5)`_ manpage for more information). If desired, usage of `systemd-run(1)`_ can be suppressed by setting a :mod:`config option <salt.modules.config.get>` called ``systemd.scope``, with a value of ``False`` (no quotes). .. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html .. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html Remove packages using ``apt-get remove``. name The name of the package to be deleted. Multiple Package Options: pkgs A list of packages to delete. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. .. versionadded:: 0.16.0 Returns a dict containing the changes. CLI Example: .. code-block:: bash salt '*' pkg.remove <package name> salt '*' pkg.remove <package1>,<package2>,<package3> salt '*' pkg.remove pkgs='["foo", "bar"]' """ return _uninstall(action="remove", name=name, pkgs=pkgs, **kwargs)
saltstack/salt
[ 13089, 5388, 13089, 3074, 1298233016 ]
def upgrade(refresh=True, dist_upgrade=False, **kwargs): """ .. versionchanged:: 2015.8.12,2016.3.3,2016.11.0 On minions running systemd>=205, `systemd-run(1)`_ is now used to isolate commands which modify installed packages from the ``salt-minion`` daemon's control group. This is done to keep systemd from killing any apt-get/dpkg commands spawned by Salt when the ``salt-minion`` service is restarted. (see ``KillMode`` in the `systemd.kill(5)`_ manpage for more information). If desired, usage of `systemd-run(1)`_ can be suppressed by setting a :mod:`config option <salt.modules.config.get>` called ``systemd.scope``, with a value of ``False`` (no quotes). .. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html .. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html Upgrades all packages via ``apt-get upgrade`` or ``apt-get dist-upgrade`` if ``dist_upgrade`` is ``True``. Returns a dictionary containing the changes: .. code-block:: python {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} dist_upgrade Whether to perform the upgrade using dist-upgrade vs upgrade. Default is to use upgrade. .. versionadded:: 2014.7.0 refresh : True If ``True``, the apt cache will be refreshed first. By default, this is ``True`` and a refresh is performed. cache_valid_time .. versionadded:: 2016.11.0 Skip refreshing the package database if refresh has already occurred within <value> seconds download_only (or downloadonly) Only download the packages, don't unpack or install them. Use downloadonly to be in line with yum and zypper module. .. versionadded:: 2018.3.0 force_conf_new Always install the new version of any configuration files. .. versionadded:: 2015.8.0 allow_downgrades Allow apt to downgrade packages without a prompt. .. versionadded:: 3005 CLI Example: .. code-block:: bash salt '*' pkg.upgrade """ cache_valid_time = kwargs.pop("cache_valid_time", 0) if salt.utils.data.is_true(refresh): refresh_db(cache_valid_time) old = list_pkgs() if "force_conf_new" in kwargs and kwargs["force_conf_new"]: dpkg_options = ["--force-confnew"] else: dpkg_options = ["--force-confold", "--force-confdef"] cmd = [ "apt-get", "-q", "-y", ] for option in dpkg_options: cmd.append("-o") cmd.append("DPkg::Options::={}".format(option)) if kwargs.get("force_yes", False): cmd.append("--force-yes") if kwargs.get("skip_verify", False): cmd.append("--allow-unauthenticated") if kwargs.get("download_only", False) or kwargs.get("downloadonly", False): cmd.append("--download-only") if kwargs.get("allow_downgrades", False): cmd.append("--allow-downgrades") cmd.append("dist-upgrade" if dist_upgrade else "upgrade") result = _call_apt(cmd, env=DPKG_ENV_VARS.copy()) __context__.pop("pkg.list_pkgs", None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if result["retcode"] != 0: raise CommandExecutionError( "Problem encountered upgrading packages", info={"changes": ret, "result": result}, ) return ret
saltstack/salt
[ 13089, 5388, 13089, 3074, 1298233016 ]
def unhold(name=None, pkgs=None, sources=None, **kwargs): # pylint: disable=W0613 """ .. versionadded:: 2014.7.0 Set package current in 'hold' state to install state, meaning it will be upgraded. name The name of the package, e.g., 'tmux' CLI Example: .. code-block:: bash salt '*' pkg.unhold <package name> pkgs A list of packages to unhold. Must be passed as a python list. CLI Example: .. code-block:: bash salt '*' pkg.unhold pkgs='["foo", "bar"]' """ if not name and not pkgs and not sources: raise SaltInvocationError("One of name, pkgs, or sources must be specified.") if pkgs and sources: raise SaltInvocationError("Only one of pkgs or sources can be specified.") targets = [] if pkgs: targets.extend(pkgs) elif sources: for source in sources: targets.append(next(iter(source))) else: targets.append(name) ret = {} for target in targets: if isinstance(target, dict): target = next(iter(target)) ret[target] = {"name": target, "changes": {}, "result": False, "comment": ""} state = get_selections(pattern=target) if not state: ret[target]["comment"] = "Package {} does not have a state.".format(target) elif salt.utils.data.is_true(state.get("hold", False)): if "test" in __opts__ and __opts__["test"]: ret[target].update(result=None) ret[target]["comment"] = "Package {} is set not to be held.".format( target ) else: result = set_selections(selection={"install": [target]}) ret[target].update(changes=result[target], result=True) ret[target]["comment"] = "Package {} is no longer being held.".format( target ) else: ret[target].update(result=True) ret[target]["comment"] = "Package {} is already set not to be held.".format( target ) return ret
saltstack/salt
[ 13089, 5388, 13089, 3074, 1298233016 ]
def list_pkgs( versions_as_list=False, removed=False, purge_desired=False, **kwargs
saltstack/salt
[ 13089, 5388, 13089, 3074, 1298233016 ]
def _get_upgradable(dist_upgrade=True, **kwargs): """ Utility function to get upgradable packages Sample return data: { 'pkgname': '1.2.3-45', ... } """ cmd = ["apt-get", "--just-print"] if dist_upgrade: cmd.append("dist-upgrade") else: cmd.append("upgrade") try: cmd.extend(["-o", "APT::Default-Release={}".format(kwargs["fromrepo"])]) except KeyError: pass call = _call_apt(cmd) if call["retcode"] != 0: msg = "Failed to get upgrades" for key in ("stderr", "stdout"): if call[key]: msg += ": " + call[key] break raise CommandExecutionError(msg) else: out = call["stdout"] # rexp parses lines that look like the following: # Conf libxfont1 (1:1.4.5-1 Debian:testing [i386]) rexp = re.compile("(?m)^Conf " "([^ ]+) " r"\(([^ ]+)") # Package name # Version keys = ["name", "version"] _get = lambda l, k: l[keys.index(k)] upgrades = rexp.findall(out) ret = {} for line in upgrades: name = _get(line, "name") version_num = _get(line, "version") ret[name] = version_num return ret
saltstack/salt
[ 13089, 5388, 13089, 3074, 1298233016 ]
def upgrade_available(name, **kwargs): """ Check whether or not an upgrade is available for a given package CLI Example: .. code-block:: bash salt '*' pkg.upgrade_available <package name> """ return latest_version(name) != ""
saltstack/salt
[ 13089, 5388, 13089, 3074, 1298233016 ]
def _split_repo_str(repo): """ Return APT source entry as a tuple. """ split = SourceEntry(repo) return split.type, split.architectures, split.uri, split.dist, split.comps
saltstack/salt
[ 13089, 5388, 13089, 3074, 1298233016 ]
def list_repo_pkgs(*args, **kwargs): # pylint: disable=unused-import """ .. versionadded:: 2017.7.0 Returns all available packages. Optionally, package names (and name globs) can be passed and the results will be filtered to packages matching those names. This function can be helpful in discovering the version or repo to specify in a :mod:`pkg.installed <salt.states.pkg.installed>` state. The return data will be a dictionary mapping package names to a list of version numbers, ordered from newest to oldest. For example: .. code-block:: python { 'bash': ['4.3-14ubuntu1.1', '4.3-14ubuntu1'], 'nginx': ['1.10.0-0ubuntu0.16.04.4', '1.9.15-0ubuntu1'] } CLI Examples: .. code-block:: bash salt '*' pkg.list_repo_pkgs salt '*' pkg.list_repo_pkgs foo bar baz """ if args: # Get only information about packages in args cmd = ["apt-cache", "show"] + [arg for arg in args] else: # Get information about all available packages cmd = ["apt-cache", "dump"] out = _call_apt(cmd, scope=False, ignore_retcode=True) ret = {} pkg_name = None skip_pkg = False new_pkg = re.compile("^Package: (.+)") for line in salt.utils.itertools.split(out["stdout"], "\n"): if not line.strip(): continue try: cur_pkg = new_pkg.match(line).group(1) except AttributeError: pass else: if cur_pkg != pkg_name: pkg_name = cur_pkg continue comps = line.strip().split(None, 1) if comps[0] == "Version:": ret.setdefault(pkg_name, []).append(comps[1]) return ret
saltstack/salt
[ 13089, 5388, 13089, 3074, 1298233016 ]
def list_repos(**kwargs): """ Lists all repos in the sources.list (and sources.lists.d) files CLI Example: .. code-block:: bash salt '*' pkg.list_repos salt '*' pkg.list_repos disabled=True """ repos = {} sources = SourcesList() for source in sources.list: if _skip_source(source): continue repo = {} repo["file"] = source.file repo["comps"] = getattr(source, "comps", []) repo["disabled"] = source.disabled repo["dist"] = source.dist repo["type"] = source.type repo["uri"] = source.uri repo["line"] = source.line.strip() repo["architectures"] = getattr(source, "architectures", []) repos.setdefault(source.uri, []).append(repo) return repos
saltstack/salt
[ 13089, 5388, 13089, 3074, 1298233016 ]
def del_repo(repo, **kwargs): """ Delete a repo from the sources.list / sources.list.d If the .list file is in the sources.list.d directory and the file that the repo exists in does not contain any other repo configuration, the file itself will be deleted. The repo passed in must be a fully formed repository definition string. CLI Examples: .. code-block:: bash salt '*' pkg.del_repo "myrepo definition" """ is_ppa = False if repo.startswith("ppa:") and __grains__["os"] in ("Ubuntu", "Mint", "neon"): # This is a PPA definition meaning special handling is needed # to derive the name. is_ppa = True dist = __grains__["lsb_distrib_codename"] if not HAS_SOFTWAREPROPERTIES: _warn_software_properties(repo) owner_name, ppa_name = repo[4:].split("/") if "ppa_auth" in kwargs: auth_info = "{}@".format(kwargs["ppa_auth"]) repo = LP_PVT_SRC_FORMAT.format(auth_info, dist, owner_name, ppa_name) else: repo = LP_SRC_FORMAT.format(owner_name, ppa_name, dist) else: if hasattr(softwareproperties.ppa, "PPAShortcutHandler"): repo = softwareproperties.ppa.PPAShortcutHandler(repo).expand(dist)[0] else: repo = softwareproperties.ppa.expand_ppa_line(repo, dist)[0] sources = SourcesList() repos = [s for s in sources.list if not s.invalid] if repos: deleted_from = dict() try: ( repo_type, repo_architectures, repo_uri, repo_dist, repo_comps, ) = _split_repo_str(repo) except SyntaxError: raise SaltInvocationError( "Error: repo '{}' not a well formatted definition".format(repo) ) for source in repos: if ( source.type == repo_type and source.architectures == repo_architectures and source.uri == repo_uri and source.dist == repo_dist ): s_comps = set(source.comps) r_comps = set(repo_comps) if s_comps.intersection(r_comps): deleted_from[source.file] = 0 source.comps = list(s_comps.difference(r_comps)) if not source.comps: try: sources.remove(source) except ValueError: pass # PPAs are special and can add deb-src where expand_ppa_line # doesn't always reflect this. Lets just cleanup here for good # measure if ( is_ppa and repo_type == "deb" and source.type == "deb-src" and source.uri == repo_uri and source.dist == repo_dist ): s_comps = set(source.comps) r_comps = set(repo_comps) if s_comps.intersection(r_comps): deleted_from[source.file] = 0 source.comps = list(s_comps.difference(r_comps)) if not source.comps: try: sources.remove(source) except ValueError: pass sources.save() if deleted_from: ret = "" for source in sources: if source.file in deleted_from: deleted_from[source.file] += 1 for repo_file, count in deleted_from.items(): msg = "Repo '{0}' has been removed from {1}.\n" if count == 0 and "sources.list.d/" in repo_file: if os.path.isfile(repo_file): msg = "File {1} containing repo '{0}' has been removed." try: os.remove(repo_file) except OSError: pass ret += msg.format(repo, repo_file) # explicit refresh after a repo is deleted refresh_db() return ret raise CommandExecutionError( "Repo {} doesn't exist in the sources.list(s)".format(repo) )
saltstack/salt
[ 13089, 5388, 13089, 3074, 1298233016 ]
def get_repo_keys(): """ .. versionadded:: 2017.7.0 List known repo key details. :return: A dictionary containing the repo keys. :rtype: dict CLI Examples: .. code-block:: bash salt '*' pkg.get_repo_keys """ ret = dict() repo_keys = list() # The double usage of '--with-fingerprint' is necessary in order to # retrieve the fingerprint of the subkey. cmd = [ "apt-key", "adv", "--batch", "--list-public-keys", "--with-fingerprint", "--with-fingerprint", "--with-colons", "--fixed-list-mode", ] cmd_ret = _call_apt(cmd, scope=False) if cmd_ret["retcode"] != 0: log.error(cmd_ret["stderr"]) return ret lines = [line for line in cmd_ret["stdout"].splitlines() if line.strip()] # Reference for the meaning of each item in the colon-separated # record can be found here: https://goo.gl/KIZbvp for line in lines: items = [ _convert_if_int(item.strip()) if item.strip() else None for item in line.split(":") ] key_props = dict() if len(items) < 2: log.debug("Skipping line: %s", line) continue if items[0] in ("pub", "sub"): key_props.update( { "algorithm": items[3], "bits": items[2], "capability": items[11], "date_creation": items[5], "date_expiration": items[6], "keyid": items[4], "validity": items[1], } ) if items[0] == "pub": repo_keys.append(key_props) else: repo_keys[-1]["subkey"] = key_props elif items[0] == "fpr": if repo_keys[-1].get("subkey", False): repo_keys[-1]["subkey"].update({"fingerprint": items[9]}) else: repo_keys[-1].update({"fingerprint": items[9]}) elif items[0] == "uid": repo_keys[-1].update({"uid": items[9], "uid_hash": items[7]}) for repo_key in repo_keys: ret[repo_key["keyid"]] = repo_key return ret
saltstack/salt
[ 13089, 5388, 13089, 3074, 1298233016 ]
def del_repo_key(name=None, **kwargs): """ .. versionadded:: 2015.8.0 Remove a repo key using ``apt-key del`` name Repo from which to remove the key. Unnecessary if ``keyid`` is passed. keyid The KeyID of the GPG key to remove keyid_ppa : False If set to ``True``, the repo's GPG key ID will be looked up from ppa.launchpad.net and removed. .. note:: Setting this option to ``True`` requires that the ``name`` param also be passed. CLI Examples: .. code-block:: bash salt '*' pkg.del_repo_key keyid=0123ABCD salt '*' pkg.del_repo_key name='ppa:foo/bar' keyid_ppa=True """ if kwargs.get("keyid_ppa", False): if isinstance(name, str) and name.startswith("ppa:"): owner_name, ppa_name = name[4:].split("/") ppa_info = _get_ppa_info_from_launchpad(owner_name, ppa_name) keyid = ppa_info["signing_key_fingerprint"][-8:] else: raise SaltInvocationError("keyid_ppa requires that a PPA be passed") else: if "keyid" in kwargs: keyid = kwargs.get("keyid") else: raise SaltInvocationError("keyid or keyid_ppa and PPA name must be passed") result = _call_apt(["apt-key", "del", keyid], scope=False) if result["retcode"] != 0: msg = "Failed to remove keyid {0}" if result["stderr"]: msg += ": {}".format(result["stderr"]) raise CommandExecutionError(msg) return keyid
saltstack/salt
[ 13089, 5388, 13089, 3074, 1298233016 ]
def file_list(*packages, **kwargs): """ List the files that belong to a package. Not specifying any packages will return a list of _every_ file on the system's package database (not generally recommended). CLI Examples: .. code-block:: bash salt '*' pkg.file_list httpd salt '*' pkg.file_list httpd postfix salt '*' pkg.file_list """ return __salt__["lowpkg.file_list"](*packages)
saltstack/salt
[ 13089, 5388, 13089, 3074, 1298233016 ]
def expand_repo_def(**kwargs): """ Take a repository definition and expand it to the full pkg repository dict that can be used for comparison. This is a helper function to make the Debian/Ubuntu apt sources sane for comparison in the pkgrepo states. This is designed to be called from pkgrepo states and will have little use being called on the CLI. """ if "repo" not in kwargs: raise SaltInvocationError("missing 'repo' argument") sanitized = {} repo = kwargs["repo"] if repo.startswith("ppa:") and __grains__["os"] in ("Ubuntu", "Mint", "neon"): dist = __grains__["lsb_distrib_codename"] owner_name, ppa_name = repo[4:].split("/", 1) if "ppa_auth" in kwargs: auth_info = "{}@".format(kwargs["ppa_auth"]) repo = LP_PVT_SRC_FORMAT.format(auth_info, owner_name, ppa_name, dist) else: if HAS_SOFTWAREPROPERTIES: if hasattr(softwareproperties.ppa, "PPAShortcutHandler"): repo = softwareproperties.ppa.PPAShortcutHandler(repo).expand(dist)[ 0 ] else: repo = softwareproperties.ppa.expand_ppa_line(repo, dist)[0] else: repo = LP_SRC_FORMAT.format(owner_name, ppa_name, dist) if "file" not in kwargs: filename = "/etc/apt/sources.list.d/{0}-{1}-{2}.list" kwargs["file"] = filename.format(owner_name, ppa_name, dist) source_entry = SourceEntry(repo) for list_args in ("architectures", "comps"): if list_args in kwargs: kwargs[list_args] = [ kwarg.strip() for kwarg in kwargs[list_args].split(",") ] for kwarg in _MODIFY_OK: if kwarg in kwargs: setattr(source_entry, kwarg, kwargs[kwarg]) source_list = SourcesList() source_entry = source_list.add( type=source_entry.type, uri=source_entry.uri, dist=source_entry.dist, orig_comps=getattr(source_entry, "comps", []), architectures=getattr(source_entry, "architectures", []), ) sanitized["file"] = source_entry.file sanitized["comps"] = getattr(source_entry, "comps", []) sanitized["disabled"] = source_entry.disabled sanitized["dist"] = source_entry.dist sanitized["type"] = source_entry.type sanitized["uri"] = source_entry.uri sanitized["line"] = source_entry.line.strip() sanitized["architectures"] = getattr(source_entry, "architectures", []) return sanitized
saltstack/salt
[ 13089, 5388, 13089, 3074, 1298233016 ]
def get_selections(pattern=None, state=None): """ View package state from the dpkg database. Returns a dict of dicts containing the state, and package names: .. code-block:: python {'<host>': {'<state>': ['pkg1', ... ] }, ... } CLI Example: .. code-block:: bash salt '*' pkg.get_selections salt '*' pkg.get_selections 'python-*' salt '*' pkg.get_selections state=hold salt '*' pkg.get_selections 'openssh*' state=hold """ ret = {} cmd = ["dpkg", "--get-selections"] cmd.append(pattern if pattern else "*") stdout = __salt__["cmd.run_stdout"]( cmd, output_loglevel="trace", python_shell=False ) ret = _parse_selections(stdout) if state: return {state: ret.get(state, [])} return ret
saltstack/salt
[ 13089, 5388, 13089, 3074, 1298233016 ]
def set_selections(path=None, selection=None, clear=False, saltenv="base"): """ Change package state in the dpkg database. The state can be any one of, documented in ``dpkg(1)``: - install - hold - deinstall - purge This command is commonly used to mark specific packages to be held from being upgraded, that is, to be kept at a certain version. When a state is changed to anything but being held, then it is typically followed by ``apt-get -u dselect-upgrade``. Note: Be careful with the ``clear`` argument, since it will start with setting all packages to deinstall state. Returns a dict of dicts containing the package names, and the new and old versions: .. code-block:: python {'<host>': {'<package>': {'new': '<new-state>', 'old': '<old-state>'} }, ... } CLI Example: .. code-block:: bash salt '*' pkg.set_selections selection='{"install": ["netcat"]}' salt '*' pkg.set_selections selection='{"hold": ["openssh-server", "openssh-client"]}' salt '*' pkg.set_selections salt://path/to/file salt '*' pkg.set_selections salt://path/to/file clear=True """ ret = {} if not path and not selection: return ret if path and selection: err = ( "The 'selection' and 'path' arguments to " "pkg.set_selections are mutually exclusive, and cannot be " "specified together" ) raise SaltInvocationError(err) if isinstance(selection, str): try: selection = salt.utils.yaml.safe_load(selection) except ( salt.utils.yaml.parser.ParserError, salt.utils.yaml.scanner.ScannerError, ) as exc: raise SaltInvocationError("Improperly-formatted selection: {}".format(exc)) if path: path = __salt__["cp.cache_file"](path, saltenv) with salt.utils.files.fopen(path, "r") as ifile: content = [salt.utils.stringutils.to_unicode(x) for x in ifile.readlines()] selection = _parse_selections(content) if selection: valid_states = ("install", "hold", "deinstall", "purge") bad_states = [x for x in selection if x not in valid_states] if bad_states: raise SaltInvocationError( "Invalid state(s): {}".format(", ".join(bad_states)) ) if clear: cmd = ["dpkg", "--clear-selections"] if not __opts__["test"]: result = _call_apt(cmd, scope=False) if result["retcode"] != 0: err = "Running dpkg --clear-selections failed: {}".format( result["stderr"] ) log.error(err) raise CommandExecutionError(err) sel_revmap = {} for _state, _pkgs in get_selections().items(): sel_revmap.update({_pkg: _state for _pkg in _pkgs}) for _state, _pkgs in selection.items(): for _pkg in _pkgs: if _state == sel_revmap.get(_pkg): continue cmd = ["dpkg", "--set-selections"] cmd_in = "{} {}".format(_pkg, _state) if not __opts__["test"]: result = _call_apt(cmd, scope=False, stdin=cmd_in) if result["retcode"] != 0: log.error("failed to set state %s for package %s", _state, _pkg) else: ret[_pkg] = {"old": sel_revmap.get(_pkg), "new": _state} return ret
saltstack/salt
[ 13089, 5388, 13089, 3074, 1298233016 ]