function
stringlengths
11
56k
repo_name
stringlengths
5
60
features
sequence
def test_view_failed_delitem(attr): adata = gen_adata((10, 10)) view = adata[5:7, :][:, :5] adata_hash = joblib.hash(adata) view_hash = joblib.hash(view) with pytest.raises(KeyError): getattr(view, attr).__delitem__("not a key") assert view.is_view assert adata_hash == joblib.hash(adata) assert view_hash == joblib.hash(view)
theislab/anndata
[ 355, 126, 355, 257, 1502460606 ]
def test_view_delitem(attr): adata = gen_adata((10, 10)) getattr(adata, attr)["to_delete"] = np.ones((10, 10)) # Shouldn’t be a subclass, should be an ndarray assert type(getattr(adata, attr)["to_delete"]) is np.ndarray view = adata[5:7, :][:, :5] adata_hash = joblib.hash(adata) view_hash = joblib.hash(view) getattr(view, attr).__delitem__("to_delete") assert not view.is_view assert "to_delete" not in getattr(view, attr) assert "to_delete" in getattr(adata, attr) assert adata_hash == joblib.hash(adata) assert view_hash != joblib.hash(view)
theislab/anndata
[ 355, 126, 355, 257, 1502460606 ]
def test_view_delattr(attr, subset_func): base = gen_adata((10, 10)) orig_hash = joblib.hash(base) subset = base[subset_func(base.obs_names), subset_func(base.var_names)] empty = ad.AnnData(obs=subset.obs[[]], var=subset.var[[]]) delattr(subset, attr) assert not subset.is_view # Should now have same value as default assert_equal(getattr(subset, attr), getattr(empty, attr)) assert orig_hash == joblib.hash(base) # Original should not be modified
theislab/anndata
[ 355, 126, 355, 257, 1502460606 ]
def test_view_setattr_machinery(attr, subset_func, subset_func2): # Tests that setting attributes on a view doesn't mess anything up too bad adata = gen_adata((10, 10)) view = adata[subset_func(adata.obs_names), subset_func2(adata.var_names)] actual = view.copy() setattr(view, attr, getattr(actual, attr)) assert_equal(actual, view, exact=True)
theislab/anndata
[ 355, 126, 355, 257, 1502460606 ]
def test_view_of_view(matrix_type, subset_func, subset_func2): adata = gen_adata((30, 15), X_type=matrix_type) adata.raw = adata if subset_func is single_subset: pytest.xfail("Other subset generating functions have trouble with this") var_s1 = subset_func(adata.var_names, min_size=4) var_view1 = adata[:, var_s1] var_s2 = subset_func2(var_view1.var_names) var_view2 = var_view1[:, var_s2] assert var_view2._adata_ref is adata obs_s1 = subset_func(adata.obs_names, min_size=4) obs_view1 = adata[obs_s1, :] obs_s2 = subset_func2(obs_view1.obs_names) assert adata[obs_s1, :][:, var_s1][obs_s2, :]._adata_ref is adata view_of_actual_copy = adata[:, var_s1].copy()[obs_s1, :].copy()[:, var_s2].copy() view_of_view_copy = adata[:, var_s1][obs_s1, :][:, var_s2].copy() assert_equal(view_of_actual_copy, view_of_view_copy, exact=True)
theislab/anndata
[ 355, 126, 355, 257, 1502460606 ]
def test_double_index(subset_func, subset_func2): adata = gen_adata((10, 10)) obs_subset = subset_func(adata.obs_names) var_subset = subset_func2(adata.var_names) v1 = adata[obs_subset, var_subset] v2 = adata[obs_subset, :][:, var_subset] assert np.all(asarray(v1.X) == asarray(v2.X)) assert np.all(v1.obs == v2.obs) assert np.all(v1.var == v2.var)
theislab/anndata
[ 355, 126, 355, 257, 1502460606 ]
def test_modify_uns_in_copy(): # https://github.com/theislab/anndata/issues/571 adata = ad.AnnData(np.ones((5, 5)), uns={"parent": {"key": "value"}}) adata_copy = adata[:3].copy() adata_copy.uns["parent"]["key"] = "new_value" assert adata.uns["parent"]["key"] != adata_copy.uns["parent"]["key"]
theislab/anndata
[ 355, 126, 355, 257, 1502460606 ]
def test_invalid_scalar_index(adata, index): # https://github.com/theislab/anndata/issues/619 with pytest.raises(IndexError, match=r".*index.* out of range\."): _ = adata[index]
theislab/anndata
[ 355, 126, 355, 257, 1502460606 ]
def test_negative_scalar_index(adata, index: int, obs: bool): pos_index = index + (adata.n_obs if obs else adata.n_vars) if obs: adata_pos_subset = adata[pos_index] adata_neg_subset = adata[index] else: adata_pos_subset = adata[:, pos_index] adata_neg_subset = adata[:, index] np.testing.assert_array_equal( adata_pos_subset.obs_names, adata_neg_subset.obs_names ) np.testing.assert_array_equal( adata_pos_subset.var_names, adata_neg_subset.var_names )
theislab/anndata
[ 355, 126, 355, 257, 1502460606 ]
def test_deepcopy_subset(adata, spmat: type): adata.obsp["arr"] = np.zeros((adata.n_obs, adata.n_obs)) adata.obsp["spmat"] = spmat((adata.n_obs, adata.n_obs)) adata = deepcopy(adata[:10].copy()) assert isinstance(adata.obsp["arr"], np.ndarray) assert not isinstance(adata.obsp["arr"], ArrayView) np.testing.assert_array_equal(adata.obsp["arr"].shape, (10, 10)) assert isinstance(adata.obsp["spmat"], spmat) assert not isinstance( adata.obsp["spmat"], SparseCSRView if spmat is sparse.csr_matrix else SparseCSCView, ) np.testing.assert_array_equal(adata.obsp["spmat"].shape, (10, 10))
theislab/anndata
[ 355, 126, 355, 257, 1502460606 ]
def test_view_mixin_copies_data(adata, array_type: type, attr): N = 100 adata = ad.AnnData( obs=pd.DataFrame(index=np.arange(N)), var=pd.DataFrame(index=np.arange(N)) ) X = array_type(sparse.eye(N, N).multiply(np.arange(1, N + 1))) if attr == "X": adata.X = X else: getattr(adata, attr)["arr"] = X view = adata[:50] if attr == "X": arr_view = view.X else: arr_view = getattr(view, attr)["arr"] arr_view_copy = arr_view.copy() if sparse.issparse(X): assert not np.shares_memory(arr_view.indices, arr_view_copy.indices) assert not np.shares_memory(arr_view.indptr, arr_view_copy.indptr) assert not np.shares_memory(arr_view.data, arr_view_copy.data) arr_view_copy.data[0] = -5 assert not np.array_equal(arr_view_copy.data, arr_view.data) else: assert not np.shares_memory(arr_view, arr_view_copy) arr_view_copy[0, 0] = -5 assert not np.array_equal(arr_view_copy, arr_view)
theislab/anndata
[ 355, 126, 355, 257, 1502460606 ]
def upgrade(): ### commands auto generated by Alembic - please adjust! ### op.add_column('citizen_complaints', sa.Column('service_type', sa.String(length=255), nullable=True)) op.add_column('citizen_complaints', sa.Column('source', sa.String(length=255), nullable=True)) ### end Alembic commands ###
codeforamerica/comport
[ 22, 14, 22, 54, 1441228033 ]
def image(self, instance): return '<img src="%s" />' % (instance.photo_130,)
ramusus/django-vkontakte-video
[ 4, 2, 4, 3, 1417606246 ]
def image_preview(self, obj): return u'<a href="%s"><img src="%s" height="30" /></a>' % (obj.photo_160, obj.photo_160)
ramusus/django-vkontakte-video
[ 4, 2, 4, 3, 1417606246 ]
def image_preview(self, obj): return u'<a href="%s"><img src="%s" height="30" /></a>' % (obj.photo_130, obj.photo_130)
ramusus/django-vkontakte-video
[ 4, 2, 4, 3, 1417606246 ]
def read_user_data( fn ): """ Given a filename, returns the file's contents in a string. """ r = '' with open( fn ) as fh: r = fh.read() fh.close() return r
smithfarm/ceph-auto-aws
[ 24, 9, 24, 18, 1442047504 ]
def get_tag( ec, obj, tag ): """ Get the value of a tag associated with the given resource object. Returns None if the tag is not set. Warning: EC2 tags are case-sensitive. """ tags = get_tags( ec, obj.id ) found = 0 for t in tags: if t.name == tag: found = 1 break if found: return t else: return None
smithfarm/ceph-auto-aws
[ 24, 9, 24, 18, 1442047504 ]
def init_region( r ): """ Takes a region string. Connects to that region. Returns EC2Connection and VPCConnection objects in a tuple. """ # connect to region c = vpc.connect_to_region( r ) ec = ec2.connect_to_region( r ) return ( c, ec )
smithfarm/ceph-auto-aws
[ 24, 9, 24, 18, 1442047504 ]
def init_subnet( c, vpc_id, cidr ): """ Takes VPCConnection object, which is actually a connection to a region, and a CIDR block string. Looks for our subnet in that region. If subnet does not exist, creates it. Returns the subnet resource object on success, raises exception on failure. """ # look for our VPC all_subnets = c.get_all_subnets() found = False our_subnet = None for s in all_subnets: if s.cidr_block == cidr: #print "Found subnet {}".format(cidr) our_subnet = s found = True break if not found: our_subnet = c.create_subnet( vpc_id, cidr ) return our_subnet
smithfarm/ceph-auto-aws
[ 24, 9, 24, 18, 1442047504 ]
def derive_ip_address( cidr_block, delegate, final8 ): """ Given a CIDR block string, a delegate number, and an integer representing the final 8 bits of the IP address, construct and return the IP address derived from this values. For example, if cidr_block is 10.0.0.0/16, the delegate number is 10, and the final8 is 8, the derived IP address will be 10.0.10.8. """ result = '' match = re.match( r'\d+\.\d+', cidr_block ) if match: result = '{}.{}.{}'.format( match.group(0), delegate, final8 ) else: raise SpinupError( "{} passed to derive_ip_address() is not a CIDR block!".format(cidr_block) ) return result
smithfarm/ceph-auto-aws
[ 24, 9, 24, 18, 1442047504 ]
def get_master_instance( ec2_conn, subnet_id ): """ Given EC2Connection object and Master Subnet id, check that there is just one instance running in that subnet - this is the Master. Raise exception if the number of instances is != 0. Return the Master instance object. """ instances = ec2_conn.get_only_instances( filters={ "subnet-id": subnet_id } ) if 1 > len(instances): raise SpinupError( "There are no instances in the master subnet" ) if 1 < len(instances): raise SpinupError( "There are too many instances in the master subnet" ) return instances[0]
smithfarm/ceph-auto-aws
[ 24, 9, 24, 18, 1442047504 ]
def process_user_data( fn, vars = [] ): """ Given filename of user-data file and a list of environment variable names, replaces @@...@@ tokens with the values of the environment variables. Returns the user-data string on success raises exception on failure. """ # Get user_data string. buf = read_user_data( fn ) for e in vars: if not e in environ: raise SpinupError( "Missing environment variable {}!".format( e ) ) buf = template_token_subst( buf, '@@'+e+'@@', environ[e] ) return buf
smithfarm/ceph-auto-aws
[ 24, 9, 24, 18, 1442047504 ]
def make_reservation( ec, ami_id, **kwargs ): """ Given EC2Connection object, delegate number, AMI ID, as well as all the kwargs referred to below, make a reservation for an instance and return the registration object. """ # extract arguments to be passed to ec.run_instances() our_kwargs = { "key_name": kwargs['key_name'], "subnet_id": kwargs['subnet_id'], "instance_type": kwargs['instance_type'], "private_ip_address": kwargs['private_ip_address'] } # Master or minion? if kwargs['master']: our_kwargs['user_data'] = kwargs['user_data'] else: # perform token substitution in user-data string u = kwargs['user_data'] u = template_token_subst( u, '@@MASTER_IP@@', kwargs['master_ip'] ) u = template_token_subst( u, '@@DELEGATE@@', kwargs['delegate_no'] ) u = template_token_subst( u, '@@ROLE@@', kwargs['role'] ) u = template_token_subst( u, '@@NODE_NO@@', kwargs['node_no'] ) our_kwargs['user_data'] = u # Make the reservation. reservation = ec.run_instances( ami_id, **our_kwargs ) # Return the reservation object. return reservation
smithfarm/ceph-auto-aws
[ 24, 9, 24, 18, 1442047504 ]
def wait_for_available( ec2_conn, volume_id ): """ Given a volume id, wait for its state to change to "available". """ print "Waiting for {} available state".format( volume_id ) while True: volumes = ec2_conn.get_all_volumes( volume_ids=[ volume_id ] ) print "Current status is {}".format( volumes[0].status ) if volumes[0].status != 'available': print "Sleeping for 5 seconds" time.sleep(5) else: break
smithfarm/ceph-auto-aws
[ 24, 9, 24, 18, 1442047504 ]
def __init__(self, jid): self.jid = JID(jid)
IgnitedAndExploded/pyfire
[ 5, 1, 5, 2, 1311503726 ]
def __init__(self, jid, **kwds): super(Contact, self).__init__() # required if isinstance(jid, basestring): self.jid = JID(jid) elif isinstance(jid, JID): self.jid = jid self.jid.validate(raise_error=True) else: raise AttributeError("Needs valid jid either as string or JID instance") # optional self.approved = False self.ask = None self.name = None self.subscription = "none" self.groups = [] for k, v in kwds.iteritems(): setattr(self, k, v)
IgnitedAndExploded/pyfire
[ 5, 1, 5, 2, 1311503726 ]
def local_action_activate(x = None): '''{ "title": "Turn on", "desc": "Turn on." }''' queue.put({'function': 'remote_action_PowerOn', 'delay': 120}) queue.put({'function': 'remote_action_SetInput', 'arg':{"source":"DIGITAL", "number":1}, 'delay': 5}) print 'Activated'
museumsvictoria/nodel-recipes
[ 10, 11, 10, 17, 1438606188 ]
def local_action_deactivate(x = None): '''{ "title": "Turn off", "desc": "Turn off." }''' queue.put({'function': 'remote_action_PowerOff', 'delay': 120}) print 'Deactivated'
museumsvictoria/nodel-recipes
[ 10, 11, 10, 17, 1438606188 ]
def __init__(self): threading.Thread.__init__(self) self.event = threading.Event()
museumsvictoria/nodel-recipes
[ 10, 11, 10, 17, 1438606188 ]
def stop(self): self.event.set()
museumsvictoria/nodel-recipes
[ 10, 11, 10, 17, 1438606188 ]
def cleanup(): print 'shutdown' th.stop()
museumsvictoria/nodel-recipes
[ 10, 11, 10, 17, 1438606188 ]
def _make_node(Name, Fields, Attributes, Bases): def create_node(self, *args, **kwargs): nbparam = len(args) + len(kwargs) assert nbparam in (0, len(Fields)), \ "Bad argument number for {}: {}, expecting {}".\ format(Name, nbparam, len(Fields)) self._fields = Fields self._attributes = Attributes for argname, argval in zip(self._fields, args): setattr(self, argname, argval) for argname, argval in kwargs.items(): assert argname in Fields, \ "Invalid Keyword argument for {}: {}".format(Name, argname) setattr(self, argname, argval) setattr(_sys.modules[__name__], Name, type(Name, Bases, {'__init__': create_node}))
ryfeus/lambda-packs
[ 1086, 234, 1086, 13, 1476901359 ]
def parse(*args, **kwargs): return ast_to_gast(_ast.parse(*args, **kwargs))
ryfeus/lambda-packs
[ 1086, 234, 1086, 13, 1476901359 ]
def setUp(self): self.docs = [ 'This cat dog is running happy.', 'This cat dog runs sad.' ]
frnsys/broca
[ 73, 9, 73, 6, 1411051143 ]
def test_rake(self): expected_t_docs = [ ['cat dog', 'running happy'], ['cat dog runs sad'] ] t_docs = keyword.RAKETokenizer().tokenize(self.docs) # Order not necessarily preserved for i, output in enumerate(t_docs): self.assertEqual(set(output), set(expected_t_docs[i]))
frnsys/broca
[ 73, 9, 73, 6, 1411051143 ]
def test_pos(self): expected_t_docs = [ ['cat dog'], ['cat dog'] ] t_docs = keyword.POSTokenizer().tokenize(self.docs) self.assertEqual(t_docs, expected_t_docs)
frnsys/broca
[ 73, 9, 73, 6, 1411051143 ]
def test_rake_parallel(self): expected_t_docs = [ ['cat dog', 'running happy'], ['cat dog runs sad'] ] t_docs = keyword.RAKETokenizer(n_jobs=-1).tokenize(self.docs) # Order not necessarily preserved for i, output in enumerate(t_docs): self.assertEqual(set(output), set(expected_t_docs[i]))
frnsys/broca
[ 73, 9, 73, 6, 1411051143 ]
def setUp(self): self.docs = [ 'This cat dog is running happy.', 'This cat dog runs sad.' ]
frnsys/broca
[ 73, 9, 73, 6, 1411051143 ]
def test_selected_attribute(): node = xpath.XPathNode(element='element', selected_attribute='value') assert node.xpath == '//element/@value' tree = etree.XML("<top><container><element value='1'>" "</element><element value='2'></element>" "</container></top>") builder = xpath.xpb.container.element.select_attribute_('value') assert builder.xpath == './/container//element/@value' assert builder.apply_(tree) == ['1', '2'] assert xpath.xpb.element.select_attribute_('value', elem=tree) == \ ['1', '2']
IvanMalison/okcupyd
[ 105, 18, 105, 24, 1411441891 ]
def test_attribute_contains(): tree = etree.XML("<top><elem a='complete'></elem></top>") assert xpath.xpb.elem.attribute_contains('a', 'complet').apply_(tree) != []
IvanMalison/okcupyd
[ 105, 18, 105, 24, 1411441891 ]
def _not(_rcvr): return falseObject
SOM-st/PySOM
[ 24, 4, 24, 5, 1382259745 ]
def _and_and_if_true(_rcvr, arg): if isinstance(arg, _Block): block_method = arg.get_method() return block_method.invoke_1(arg) return arg
SOM-st/PySOM
[ 24, 4, 24, 5, 1382259745 ]
def _if_true_if_false(_rcvr, true_block, _false_block): if isinstance(true_block, _Block): block_method = true_block.get_method() return block_method.invoke_1(true_block) return true_block
SOM-st/PySOM
[ 24, 4, 24, 5, 1382259745 ]
def create(kernel): result = Intangible() result.template = "object/draft_schematic/droid/component/shared_advanced_droid_frame.iff" result.attribute_template_id = -1 result.stfName("string_id_table","")
anhstudios/swganh
[ 62, 37, 62, 37, 1297996365 ]
def callback_function(data): #FILL IN HERE global publisher_name, msg msg.linear.x = -data.linear.x msg.angular.z = -data.angular.z publisher_name.publish(msg)
BARCproject/barc
[ 201, 290, 201, 11, 1443204258 ]
def __init__(self, config): self._config = config self._device = \ self._new_capture_device(config['camera']['device_index']) self.set_dimensions( config['camera']['width'], config['camera']['height'], )
GNOME-MouseTrap/mousetrap
[ 4, 12, 4, 10, 1400179132 ]
def _new_capture_device(cls, device_index): capture = cv2.VideoCapture(device_index) if not capture.isOpened(): capture.release() raise IOError(cls.S_CAPTURE_OPEN_ERROR % device_index) return capture
GNOME-MouseTrap/mousetrap
[ 4, 12, 4, 10, 1400179132 ]
def read_image(self): ret, image = self._device.read() if not ret: raise IOError(self.S_CAPTURE_READ_ERROR) return Image(self._config, image)
GNOME-MouseTrap/mousetrap
[ 4, 12, 4, 10, 1400179132 ]
def __init__(self, config): self._config = config self._haar_files = config['haar_files'] self._haar_cache = {}
GNOME-MouseTrap/mousetrap
[ 4, 12, 4, 10, 1400179132 ]
def from_file(self, file_, cache_name=None): import os if cache_name in self._haar_cache: return self._haar_cache[cache_name] current_dir = os.path.dirname(os.path.realpath(__file__)) haar_file = os.path.join(current_dir, file_) haar = cv2.CascadeClassifier(haar_file) if not cache_name is None: if not cache_name in self._haar_cache: self._haar_cache[cache_name] = haar return haar
GNOME-MouseTrap/mousetrap
[ 4, 12, 4, 10, 1400179132 ]
def get_detector(cls, config, name, scale_factor=1.1, min_neighbors=3): key = (name, scale_factor, min_neighbors) if key in cls._INSTANCES: LOGGER.info("Reusing %s detector.", key) return cls._INSTANCES[key] cls._INSTANCES[key] = FeatureDetector( config, name, scale_factor, min_neighbors) return cls._INSTANCES[key]
GNOME-MouseTrap/mousetrap
[ 4, 12, 4, 10, 1400179132 ]
def clear_all_detection_caches(cls): for instance in cls._INSTANCES.values(): instance.clear_cache()
GNOME-MouseTrap/mousetrap
[ 4, 12, 4, 10, 1400179132 ]
def detect(self, image): if image in self._detect_cache: message = "Detection cache hit: %(image)d -> %(result)s" % \ {'image':id(image), 'result':self._detect_cache[image]} LOGGER.debug(message) if isinstance(self._detect_cache[image], FeatureNotFoundException): message = str(self._detect_cache[image]) raise FeatureNotFoundException(message, cause=self._detect_cache[image]) return self._detect_cache[image] try: self._image = image self._detect_plural() self._exit_if_none_detected() self._unpack_first() self._extract_image() self._calculate_center() self._detect_cache[image] = self._single return self._detect_cache[image] except FeatureNotFoundException as exception: self._detect_cache[image] = exception raise
GNOME-MouseTrap/mousetrap
[ 4, 12, 4, 10, 1400179132 ]
def _exit_if_none_detected(self): if len(self._plural) == 0: message = _('Feature not detected: %s') % (self._name) if self._last_attempt_successful: self._last_attempt_successful = False LOGGER.info(message) raise FeatureNotFoundException(message) else: if not self._last_attempt_successful: self._last_attempt_successful = True message = _('Feature detected: %s') % (self._name) LOGGER.info(message)
GNOME-MouseTrap/mousetrap
[ 4, 12, 4, 10, 1400179132 ]
def _calculate_center(self): self._single["center"] = { "x": (self._single["x"] + self._single["width"]) // 2, "y": (self._single["y"] + self._single["height"]) // 2, }
GNOME-MouseTrap/mousetrap
[ 4, 12, 4, 10, 1400179132 ]
def clear_cache(self): self._detect_cache.clear()
GNOME-MouseTrap/mousetrap
[ 4, 12, 4, 10, 1400179132 ]
def __init__(self, config): super(FeatureDetectorClearCachePlugin, self).__init__(config) self._config = config
GNOME-MouseTrap/mousetrap
[ 4, 12, 4, 10, 1400179132 ]
def __init__(self, msg): super(TransactionError, self).__init__(msg)
rpm-software-management/dnf
[ 1066, 367, 1066, 40, 1331307069 ]
def __init__(self, filename, errors): """ :param filename: The name of the transaction file being replayed :param errors: a list of error classes or a string with an error description """ # store args in case someone wants to read them from a caught exception self.filename = filename if isinstance(errors, (list, tuple)): self.errors = errors else: self.errors = [errors] if filename: msg = _('The following problems occurred while replaying the transaction from file "{filename}":').format(filename=filename) else: msg = _('The following problems occurred while running a transaction:') for error in self.errors: msg += "\n " + str(error) super(TransactionReplayError, self).__init__(msg)
rpm-software-management/dnf
[ 1066, 367, 1066, 40, 1331307069 ]
def __init__(self, filename, msg): super(IncompatibleTransactionVersionError, self).__init__(filename, msg)
rpm-software-management/dnf
[ 1066, 367, 1066, 40, 1331307069 ]
def serialize_transaction(transaction): """ Serializes a transaction to a data structure that is equivalent to the stored JSON format. :param transaction: the transaction to serialize (an instance of dnf.db.history.TransactionWrapper) """ data = { "version": VERSION, } rpms = [] groups = [] environments = [] if transaction is None: return data for tsi in transaction.packages(): if tsi.is_package(): rpms.append({ "action": tsi.action_name, "nevra": tsi.nevra, "reason": libdnf.transaction.TransactionItemReasonToString(tsi.reason), "repo_id": tsi.from_repo }) elif tsi.is_group(): group = tsi.get_group() group_data = { "action": tsi.action_name, "id": group.getGroupId(), "packages": [], "package_types": libdnf.transaction.compsPackageTypeToString(group.getPackageTypes()) } for pkg in group.getPackages(): group_data["packages"].append({ "name": pkg.getName(), "installed": pkg.getInstalled(), "package_type": libdnf.transaction.compsPackageTypeToString(pkg.getPackageType()) }) groups.append(group_data) elif tsi.is_environment(): env = tsi.get_environment() env_data = { "action": tsi.action_name, "id": env.getEnvironmentId(), "groups": [], "package_types": libdnf.transaction.compsPackageTypeToString(env.getPackageTypes()) } for grp in env.getGroups(): env_data["groups"].append({ "id": grp.getGroupId(), "installed": grp.getInstalled(), "group_type": libdnf.transaction.compsPackageTypeToString(grp.getGroupType()) }) environments.append(env_data) if rpms: data["rpms"] = rpms if groups: data["groups"] = groups if environments: data["environments"] = environments return data
rpm-software-management/dnf
[ 1066, 367, 1066, 40, 1331307069 ]
def __init__( self, base, filename="", data=None, ignore_extras=False, ignore_installed=False, skip_unavailable=False
rpm-software-management/dnf
[ 1066, 367, 1066, 40, 1331307069 ]
def _load_from_file(self, fn): self._filename = fn with open(fn, "r") as f: try: replay_data = json.load(f) except json.decoder.JSONDecodeError as e: raise TransactionReplayError(fn, str(e) + ".") try: self._load_from_data(replay_data) except TransactionError as e: raise TransactionReplayError(fn, e)
rpm-software-management/dnf
[ 1066, 367, 1066, 40, 1331307069 ]
def _raise_or_warn(self, warn_only, msg): if warn_only: self._warnings.append(msg) else: raise TransactionError(msg)
rpm-software-management/dnf
[ 1066, 367, 1066, 40, 1331307069 ]
def _verify_toplevel_json(self, replay_data): fn = self._filename if "version" not in replay_data: raise TransactionReplayError(fn, _('Missing key "{key}".'.format(key="version"))) self._assert_type(replay_data["version"], str, "version", "string") _check_version(replay_data["version"], fn)
rpm-software-management/dnf
[ 1066, 367, 1066, 40, 1331307069 ]
def _create_swdb_group(self, group_id, pkg_types, pkgs): comps_group = self._base.comps._group_by_id(group_id) if not comps_group: self._raise_or_warn(self._skip_unavailable, _("Group id '%s' is not available.") % group_id) return None swdb_group = self._base.history.group.new(group_id, comps_group.name, comps_group.ui_name, pkg_types) try: for pkg in pkgs: name = pkg["name"] self._assert_type(name, str, "groups.packages.name", "string") installed = pkg["installed"] self._assert_type(installed, bool, "groups.packages.installed", "boolean") package_type = pkg["package_type"] self._assert_type(package_type, str, "groups.packages.package_type", "string") try: swdb_group.addPackage(name, installed, libdnf.transaction.stringToCompsPackageType(package_type)) except libdnf.error.Error as e: raise TransactionError(str(e)) except KeyError as e: raise TransactionError( _('Missing object key "{key}" in groups.packages.').format(key=e.args[0]) ) return swdb_group
rpm-software-management/dnf
[ 1066, 367, 1066, 40, 1331307069 ]
def _swdb_group_upgrade(self, group_id, pkg_types, pkgs): if not self._base.history.group.get(group_id): self._raise_or_warn( self._ignore_installed, _("Group id '%s' is not installed.") % group_id) return swdb_group = self._create_swdb_group(group_id, pkg_types, pkgs) if swdb_group is not None: self._base.history.group.upgrade(swdb_group)
rpm-software-management/dnf
[ 1066, 367, 1066, 40, 1331307069 ]
def _create_swdb_environment(self, env_id, pkg_types, groups): comps_env = self._base.comps._environment_by_id(env_id) if not comps_env: self._raise_or_warn(self._skip_unavailable, _("Environment id '%s' is not available.") % env_id) return None swdb_env = self._base.history.env.new(env_id, comps_env.name, comps_env.ui_name, pkg_types) try: for grp in groups: id = grp["id"] self._assert_type(id, str, "environments.groups.id", "string") installed = grp["installed"] self._assert_type(installed, bool, "environments.groups.installed", "boolean") group_type = grp["group_type"] self._assert_type(group_type, str, "environments.groups.group_type", "string") try: group_type = libdnf.transaction.stringToCompsPackageType(group_type) except libdnf.error.Error as e: raise TransactionError(str(e)) if group_type not in ( libdnf.transaction.CompsPackageType_MANDATORY, libdnf.transaction.CompsPackageType_OPTIONAL ): raise TransactionError( _('Invalid value "{group_type}" of environments.groups.group_type, ' 'only "mandatory" or "optional" is supported.' ).format(group_type=grp["group_type"]) ) swdb_env.addGroup(id, installed, group_type) except KeyError as e: raise TransactionError( _('Missing object key "{key}" in environments.groups.').format(key=e.args[0]) ) return swdb_env
rpm-software-management/dnf
[ 1066, 367, 1066, 40, 1331307069 ]
def _swdb_environment_upgrade(self, env_id, pkg_types, groups): if not self._base.history.env.get(env_id): self._raise_or_warn(self._ignore_installed,_("Environment id '%s' is not installed.") % env_id) return swdb_env = self._create_swdb_environment(env_id, pkg_types, groups) if swdb_env is not None: self._base.history.env.upgrade(swdb_env)
rpm-software-management/dnf
[ 1066, 367, 1066, 40, 1331307069 ]
def get_data(self): """ :returns: the loaded data of the transaction """ return self._replay_data
rpm-software-management/dnf
[ 1066, 367, 1066, 40, 1331307069 ]
def run(self): """ Replays the transaction. """ fn = self._filename errors = [] for pkg_data in self._rpms: try: self._replay_pkg_action(pkg_data) except TransactionError as e: errors.append(e) for group_data in self._groups: try: action = group_data["action"] group_id = group_data["id"] try: pkg_types = libdnf.transaction.stringToCompsPackageType(group_data["package_types"]) except libdnf.error.Error as e: errors.append(TransactionError(str(e))) continue if action == "Install": self._swdb_group_install(group_id, pkg_types, group_data["packages"]) elif action == "Upgrade": self._swdb_group_upgrade(group_id, pkg_types, group_data["packages"]) elif action == "Removed": self._swdb_group_remove(group_id, pkg_types, group_data["packages"]) else: errors.append(TransactionError( _('Unexpected value of group action "{action}" for group "{group}".') .format(action=action, group=group_id) )) except KeyError as e: errors.append(TransactionError( _('Missing object key "{key}" in a group.').format(key=e.args[0]) )) except TransactionError as e: errors.append(e) for env_data in self._environments: try: action = env_data["action"] env_id = env_data["id"] try: pkg_types = libdnf.transaction.stringToCompsPackageType(env_data["package_types"]) except libdnf.error.Error as e: errors.append(TransactionError(str(e))) continue if action == "Install": self._swdb_environment_install(env_id, pkg_types, env_data["groups"]) elif action == "Upgrade": self._swdb_environment_upgrade(env_id, pkg_types, env_data["groups"]) elif action == "Removed": self._swdb_environment_remove(env_id, pkg_types, env_data["groups"]) else: errors.append(TransactionError( _('Unexpected value of environment action "{action}" for environment "{env}".') .format(action=action, env=env_id) )) except KeyError as e: errors.append(TransactionError( _('Missing object key "{key}" in an environment.').format(key=e.args[0]) )) except TransactionError as e: errors.append(e) if errors: raise TransactionReplayError(fn, errors)
rpm-software-management/dnf
[ 1066, 367, 1066, 40, 1331307069 ]
def __init__(self, branch, package): Base.__init__(self) self.branch = branch self.package = package self.sp_obj = None
ingvagabund/gofed
[ 67, 28, 67, 88, 1413793073 ]
def getProvides(self): """Fetch a spec file from pkgdb and get provides from all its [sub]packages""" if self.sp_obj == None: return {} return self.sp_obj.getProvides()
ingvagabund/gofed
[ 67, 28, 67, 88, 1413793073 ]
def add_cases(request, run_ids, case_ids): """Add one or more cases to the selected test runs. :param run_ids: give one or more run IDs. It could be an integer, a string containing comma separated IDs, or a list of int each of them is a run ID. :type run_ids: int, str or list :param case_ids: give one or more case IDs. It could be an integer, a string containing comma separated IDs, or a list of int each of them is a case ID. :type case_ids: int, str or list :return: a list which is empty on success or a list of mappings with failure codes if a failure occured. :rtype: list Example:: # Add case id 10 to run 1 TestRun.add_cases(1, 10) # Add case ids list [10, 20] to run list [1, 2] TestRun.add_cases([1, 2], [10, 20]) # Add case ids list '10, 20' to run list '1, 2' with String TestRun.add_cases('1, 2', '10, 20') """ trs = TestRun.objects.filter(run_id__in=pre_process_ids(run_ids)) tcs = TestCase.objects.filter(case_id__in=pre_process_ids(case_ids)) for tr in trs.iterator(): for tc in tcs.iterator(): tr.add_case_run(case=tc)
Nitrate/Nitrate
[ 222, 99, 222, 60, 1413958586 ]
def remove_cases(request, run_ids, case_ids): """Remove one or more cases from the selected test runs. :param run_ids: give one or more run IDs. It could be an integer, a string containing comma separated IDs, or a list of int each of them is a run ID. :type run_ids: int, str or list :param case_ids: give one or more case IDs. It could be an integer, a string containing comma separated IDs, or a list of int each of them is a case ID. :type case_ids: int, str or list :return: a list which is empty on success or a list of mappings with failure codes if a failure occured. :rtype: list Example:: # Remove case 10 from run 1 TestRun.remove_cases(1, 10) # Remove case ids list [10, 20] from run list [1, 2] TestRun.remove_cases([1, 2], [10, 20]) # Remove case ids list '10, 20' from run list '1, 2' with String TestRun.remove_cases('1, 2', '10, 20') """ trs = TestRun.objects.filter(run_id__in=pre_process_ids(run_ids)) for tr in trs.iterator(): crs = TestCaseRun.objects.filter(run=tr, case__in=pre_process_ids(case_ids)) crs.delete()
Nitrate/Nitrate
[ 222, 99, 222, 60, 1413958586 ]
def add_tag(request, run_ids, tags): """Add one or more tags to the selected test runs. :param run_ids: give one or more run IDs. It could be an integer, a string containing comma separated IDs, or a list of int each of them is a run ID. :type run_ids: int, str or list :param tags: tag name or a list of tag names to remove. :type tags: str or list :return: a list which is empty on success or a list of mappings with failure codes if a failure occured. :rtype: list Example:: # Add tag 'foobar' to run 1 TestPlan.add_tag(1, 'foobar') # Add tag list ['foo', 'bar'] to run list [1, 2] TestPlan.add_tag([1, 2], ['foo', 'bar']) # Add tag list ['foo', 'bar'] to run list [1, 2] with String TestPlan.add_tag('1, 2', 'foo, bar') """ trs = TestRun.objects.filter(pk__in=pre_process_ids(value=run_ids)) tags: List[str] = TestTag.string_to_list(tags) for tag in tags: t, _ = TestTag.objects.get_or_create(name=tag) tr: TestRun for tr in trs.iterator(): tr.add_tag(tag=t)
Nitrate/Nitrate
[ 222, 99, 222, 60, 1413958586 ]
def create(request, values): """Creates a new Test Run object and stores it in the database. :param dict values: a mapping containing these data to create a test run. * plan: (int) **Required** ID of test plan * build: (int)/(str) **Required** ID of Build * manager: (int) **Required** ID of run manager * summary: (str) **Required** * product: (int) **Required** ID of product * product_version: (int) **Required** ID of product version * default_tester: (int) optional ID of run default tester * plan_text_version: (int) optional * estimated_time: (str) optional, could be in format ``2h30m30s``, which is recommended or ``HH:MM:SS``. * notes: (str) optional * status: (int) optional 0:RUNNING 1:STOPPED (default 0) * case: list or (str) optional list of case ids to add to the run * tag: list or (str) optional list of tag to add to the run :return: a mapping representing newly created :class:`TestRun`. :rtype: dict .. versionchanged:: 4.5 Argument ``errata_id`` is removed. Example:: values = { 'build': 2, 'manager': 1, 'plan': 1, 'product': 1, 'product_version': 2, 'summary': 'Testing XML-RPC for TCMS', } TestRun.create(values) """ from datetime import datetime from tcms.core import forms from tcms.testruns.forms import XMLRPCNewRunForm if not values.get("product"): raise ValueError("Value of product is required") # TODO: XMLRPC only accept HH:MM:SS rather than DdHhMm if values.get("estimated_time"): values["estimated_time"] = pre_process_estimated_time(values.get("estimated_time")) if values.get("case"): values["case"] = pre_process_ids(value=values["case"]) form = XMLRPCNewRunForm(values) form.populate(product_id=values["product"]) if form.is_valid(): tr = TestRun.objects.create( product_version=form.cleaned_data["product_version"], plan_text_version=form.cleaned_data["plan_text_version"], stop_date=form.cleaned_data["status"] and datetime.now() or None, summary=form.cleaned_data["summary"], notes=form.cleaned_data["notes"], estimated_time=form.cleaned_data["estimated_time"], plan=form.cleaned_data["plan"], build=form.cleaned_data["build"], manager=form.cleaned_data["manager"], default_tester=form.cleaned_data["default_tester"], ) if form.cleaned_data["case"]: for c in form.cleaned_data["case"]: tr.add_case_run(case=c) del c if form.cleaned_data["tag"]: tags = form.cleaned_data["tag"] tags = [c.strip() for c in tags.split(",") if c] for tag in tags: t, c = TestTag.objects.get_or_create(name=tag) tr.add_tag(tag=t) del tag, t, c else: raise ValueError(forms.errors_to_list(form)) return tr.serialize()
Nitrate/Nitrate
[ 222, 99, 222, 60, 1413958586 ]
def env_value(request, action, run_ids, env_value_ids): """ Add or remove env values to the given runs, function is same as link_env_value or unlink_env_value :param str action: what action to do, ``add`` or ``remove``. :param run_ids: give one or more run IDs. It could be an integer, a string containing comma separated IDs, or a list of int each of them is a run ID. :type run_ids: int, str or list :param env_value_ids: give one or more environment value IDs. It could be an integer, a string containing comma separated IDs, or a list of int each of them is a environment value ID. :type env_value_ids: int, str or list :return: a list which is empty on success or a list of mappings with failure codes if a failure occured. :rtype: list Example:: # Add env value 20 to run id 8 TestRun.env_value('add', 8, 20) """ __env_value_operation(request, action, run_ids, env_value_ids)
Nitrate/Nitrate
[ 222, 99, 222, 60, 1413958586 ]
def filter(request, values={}): """Performs a search and returns the resulting list of test runs. :param dict values: a mapping containing these criteria. * build: ForeignKey: TestBuild * cc: ForeignKey: Auth.User * env_value: ForeignKey: Environment Value * default_tester: ForeignKey: Auth.User * run_id: (int) * manager: ForeignKey: Auth.User * notes: (str) * plan: ForeignKey: TestPlan * summary: (str) * tag: ForeignKey: Tag * product_version: ForeignKey: Version :return: list of mappings of found :class:`TestRun`. :rtype: list Example:: # Get all of runs contain 'TCMS' in summary TestRun.filter({'summary__icontain': 'TCMS'}) # Get all of runs managed by xkuang TestRun.filter({'manager__username': 'xkuang'}) # Get all of runs the manager name starts with x TestRun.filter({'manager__username__startswith': 'x'}) # Get runs contain the case ID 1, 2, 3 TestRun.filter({'case_run__case__case_id__in': [1, 2, 3]}) """ return TestRun.to_xmlrpc(values)
Nitrate/Nitrate
[ 222, 99, 222, 60, 1413958586 ]
def filter_count(request, values={}): """Performs a search and returns the resulting count of runs. :param dict values: a mapping containing criteria. See also :meth:`TestRun.filter <tcms.xmlrpc.api.testrun.filter>`. :return: total matching runs. :rtype: int .. seealso:: See examples of :meth:`TestRun.filter <tcms.xmlrpc.api.testrun.filter>`. """ return distinct_count(TestRun, values)
Nitrate/Nitrate
[ 222, 99, 222, 60, 1413958586 ]
def get(request, run_id): """Used to load an existing test run from the database. :param int run_id: test run ID. :return: a mapping representing found :class:`TestRun`. :rtype: dict Example:: TestRun.get(1) """ try: tr = TestRun.objects.get(run_id=run_id) except TestRun.DoesNotExist as error: return error response = tr.serialize() # get the xmlrpc tags tag_ids = tr.tag.values_list("id", flat=True) query = {"id__in": tag_ids} tags = TestTag.to_xmlrpc(query) # cut 'id' attribute off, only leave 'name' here tags_without_id = [tag["name"] for tag in tags] # replace tag_id list in the serialize return data response["tag"] = tags_without_id return response
Nitrate/Nitrate
[ 222, 99, 222, 60, 1413958586 ]
def get_issues(request, run_ids): """Get the list of issues attached to this run. :param run_ids: give one or more run IDs. It could be an integer, a string containing comma separated IDs, or a list of int each of them is a run ID. :type run_ids: int, str or list :return: a list of mappings of :class:`Issue <tcms.issuetracker.models.Issue>`. :rtype: list[dict] Example:: # Get issues belonging to ID 12345 TestRun.get_issues(1) # Get issues belonging to run ids list [1, 2] TestRun.get_issues([1, 2]) # Get issues belonging to run ids list 1 and 2 with string TestRun.get_issues('1, 2') """ query = {"case_run__run__in": pre_process_ids(run_ids)} return Issue.to_xmlrpc(query)
Nitrate/Nitrate
[ 222, 99, 222, 60, 1413958586 ]
def get_change_history(request, run_id): """Get the list of changes to the fields of this run. :param int run_id: run ID. :return: list of mapping with changed fields and their details. :rtype: list .. warning:: NOT IMPLEMENTED - History is different than before. """ raise NotImplementedError("Not implemented RPC method") # pragma: no cover
Nitrate/Nitrate
[ 222, 99, 222, 60, 1413958586 ]
def get_completion_report(request, run_ids): """Get a report of the current status of the selected runs combined. :param run_ids: give one or more run IDs. It could be an integer, a string containing comma separated IDs, or a list of int each of them is a run ID. :type run_ids: int, str or list :return: A mapping containing counts and percentages of the combined totals of case-runs in the run. Counts only the most recently statused case-run for a given build and environment. :rtype: dict .. warning:: NOT IMPLEMENTED """ raise NotImplementedError("Not implemented RPC method") # pragma: no cover
Nitrate/Nitrate
[ 222, 99, 222, 60, 1413958586 ]
def get_env_values(request, run_id): """Get the list of env values to this run. :param int run_id: run ID. :return: a list of mappings representing found :class:`TCMSEnvValue`. :rtype: List[dict] Example:: TestRun.get_env_values(8) """ from tcms.management.models import TCMSEnvValue # FIXME: return [] if run_id is None or "" query = {"testrun__pk": run_id} return TCMSEnvValue.to_xmlrpc(query)
Nitrate/Nitrate
[ 222, 99, 222, 60, 1413958586 ]
def get_tags(request, run_id): """Get the list of tags attached to this run. :param int run_id: run ID. :return: a mapping representing found :class:`TestTag`. :rtype: dict Example:: TestRun.get_tags(1) """ tr = TestRun.objects.get(run_id=run_id) tag_ids = tr.tag.values_list("id", flat=True) query = {"id__in": tag_ids} return TestTag.to_xmlrpc(query)
Nitrate/Nitrate
[ 222, 99, 222, 60, 1413958586 ]
def get_test_case_runs(request, run_id): """Get the list of cases that this run is linked to. :param int run_id: run ID. :return: a list of mappings of found :class:`TestCaseRun`. :rtype: list[dict] Example:: # Get all of case runs TestRun.get_test_case_runs(1) """ return TestCaseRun.to_xmlrpc({"run__run_id": run_id})
Nitrate/Nitrate
[ 222, 99, 222, 60, 1413958586 ]
def get_test_cases(request, run_id): """Get the list of cases that this run is linked to. :param int run_id: run ID. :return: a list of mappings of found :class:`TestCase`. :rtype: list[dict] Example:: TestRun.get_test_cases(1) """ tcs_serializer = TestCase.to_xmlrpc(query={"case_run__run_id": run_id}) qs = TestCaseRun.objects.filter(run_id=run_id).values("case", "pk", "case_run_status__name") extra_info = {row["case"]: row for row in qs.iterator()} for case in tcs_serializer: info = extra_info[case["case_id"]] case["case_run_id"] = info["pk"] case["case_run_status"] = info["case_run_status__name"] return tcs_serializer
Nitrate/Nitrate
[ 222, 99, 222, 60, 1413958586 ]
def get_test_plan(request, run_id): """Get the plan that this run is associated with. :param int run_id: run ID. :return: a mapping of found :class:`TestPlan`. :rtype: dict Example:: TestRun.get_test_plan(1) """ return TestRun.objects.select_related("plan").get(run_id=run_id).plan.serialize()
Nitrate/Nitrate
[ 222, 99, 222, 60, 1413958586 ]
def remove_tag(request, run_ids, tags): """Remove a tag from a run. :param run_ids: give one or more run IDs. It could be an integer, a string containing comma separated IDs, or a list of int each of them is a run ID. :type run_ids: int, str or list :param tags: tag name or a list of tag names to remove. :type tags: str or list :return: a list which is empty on success. :rtype: list Example:: # Remove tag 'foo' from run 1 TestRun.remove_tag(1, 'foo') # Remove tag 'foo' and 'bar' from run list [1, 2] TestRun.remove_tag([1, 2], ['foo', 'bar']) # Remove tag 'foo' and 'bar' from run list '1, 2' with String TestRun.remove_tag('1, 2', 'foo, bar') """ trs = TestRun.objects.filter(run_id__in=pre_process_ids(value=run_ids)) tgs = TestTag.objects.filter(name__in=TestTag.string_to_list(tags)) tr: TestRun for tr in trs.iterator(): for tg in tgs.iterator(): tr.remove_tag(tag=tg)
Nitrate/Nitrate
[ 222, 99, 222, 60, 1413958586 ]
def update(request, run_ids, values): """Updates the fields of the selected test run. :param run_ids: give one or more run IDs. It could be an integer, a string containing comma separated IDs, or a list of int each of them is a run ID. :type run_ids: int, str or list :param dict values: a mapping containing these data to update specified runs. * plan: (int) TestPlan.plan_id * product: (int) Product.id * build: (int) Build.id * manager: (int) Auth.User.id * default_tester: Intege Auth.User.id * summary: (str) * estimated_time: (TimeDelta) in format ``2h30m30s`` which is recommended or ``HH:MM:SS``. * product_version: (int) * plan_text_version: (int) * notes: (str) * status: (int) 0:RUNNING 1:FINISHED :return: list of mappings of the updated test runs. :rtype: list[dict] .. versionchanged:: 4.5 Argument ``errata_id`` is removed. Example:: # Update status to finished for run 1 and 2 TestRun.update([1, 2], {'status': 1}) """ from datetime import datetime from tcms.core import forms from tcms.testruns.forms import XMLRPCUpdateRunForm if values.get("product_version") and not values.get("product"): raise ValueError('Field "product" is required by product_version') if values.get("estimated_time"): values["estimated_time"] = pre_process_estimated_time(values.get("estimated_time")) form = XMLRPCUpdateRunForm(values) if values.get("product_version"): form.populate(product_id=values["product"]) if form.is_valid(): trs = TestRun.objects.filter(pk__in=pre_process_ids(value=run_ids)) _values = dict() if form.cleaned_data["plan"]: _values["plan"] = form.cleaned_data["plan"] if form.cleaned_data["build"]: _values["build"] = form.cleaned_data["build"] if form.cleaned_data["manager"]: _values["manager"] = form.cleaned_data["manager"] if "default_tester" in values: default_tester = form.cleaned_data["default_tester"] if values.get("default_tester") and default_tester: _values["default_tester"] = default_tester else: _values["default_tester"] = None if form.cleaned_data["summary"]: _values["summary"] = form.cleaned_data["summary"] if values.get("estimated_time") is not None: _values["estimated_time"] = form.cleaned_data["estimated_time"] if form.cleaned_data["product_version"]: _values["product_version"] = form.cleaned_data["product_version"] if "notes" in values: if values["notes"] in (None, ""): _values["notes"] = values["notes"] if form.cleaned_data["notes"]: _values["notes"] = form.cleaned_data["notes"] if form.cleaned_data["plan_text_version"]: _values["plan_text_version"] = form.cleaned_data["plan_text_version"] if isinstance(form.cleaned_data["status"], int): if form.cleaned_data["status"]: _values["stop_date"] = datetime.now() else: _values["stop_date"] = None trs.update(**_values) else: raise ValueError(forms.errors_to_list(form)) query = {"pk__in": trs.values_list("pk", flat=True)} return TestRun.to_xmlrpc(query)
Nitrate/Nitrate
[ 222, 99, 222, 60, 1413958586 ]
def link_env_value(request, run_ids, env_value_ids): """Link env values to the given runs. :param run_ids: give one or more run IDs. It could be an integer, a string containing comma separated IDs, or a list of int each of them is a run ID. :type run_ids: int, str or list :param env_value_ids: give one or more environment value IDs. It could be an integer, a string containing comma separated IDs, or a list of int each of them is a environment value ID. :type env_value_ids: int, str or list :return: a list which is empty on success or a list of mappings with failure codes if a failure occured. :rtype: list Example:: # Add env value 1 to run id 2 TestRun.link_env_value(2, 1) """ return __env_value_operation(request, "add", run_ids, env_value_ids)
Nitrate/Nitrate
[ 222, 99, 222, 60, 1413958586 ]
def find_clang(conf): """ Find the program clang, and if present, try to detect its version number """ cc = conf.find_program(['clang', 'cc'], var='CC') cc = conf.cmd_to_list(cc) conf.get_cc_version(cc, gcc=True) conf.env.CC_NAME = 'clang' conf.env.CC = cc
Gnomescroll/Gnomescroll
[ 29, 13, 29, 2, 1385070845 ]
def clang_common_flags(conf): """ Common flags for clang on nearly all platforms """ v = conf.env v['CC_SRC_F'] = [] v['CC_TGT_F'] = ['-c', '-o'] # linker if not v['LINK_CC']: v['LINK_CC'] = v['CC'] v['CCLNK_SRC_F'] = [] v['CCLNK_TGT_F'] = ['-o'] v['CPPPATH_ST'] = '-I%s' v['DEFINES_ST'] = '-D%s' v['LIB_ST'] = '-l%s' # template for adding libs v['LIBPATH_ST'] = '-L%s' # template for adding libpaths v['STLIB_ST'] = '-l%s' v['STLIBPATH_ST'] = '-L%s' v['RPATH_ST'] = '-Wl,-rpath,%s' v['SONAME_ST'] = '-Wl,-h,%s' v['SHLIB_MARKER'] = '-Wl,-Bdynamic' v['STLIB_MARKER'] = '-Wl,-Bstatic' # program v['cprogram_PATTERN'] = '%s' # shared librar v['CFLAGS_cshlib'] = ['-fPIC'] v['LINKFLAGS_cshlib'] = ['-shared'] v['cshlib_PATTERN'] = 'lib%s.so' # static lib v['LINKFLAGS_cstlib'] = ['-Wl,-Bstatic'] v['cstlib_PATTERN'] = 'lib%s.a' # osx stuff v['LINKFLAGS_MACBUNDLE'] = ['-bundle', '-undefined', 'dynamic_lookup'] v['CFLAGS_MACBUNDLE'] = ['-fPIC'] v['macbundle_PATTERN'] = '%s.bundle'
Gnomescroll/Gnomescroll
[ 29, 13, 29, 2, 1385070845 ]
def clang_modifier_win32(conf): """Configuration flags for executing clang on Windows""" v = conf.env v['cprogram_PATTERN'] = '%s.exe' v['cshlib_PATTERN'] = '%s.dll' v['implib_PATTERN'] = 'lib%s.dll.a' v['IMPLIB_ST'] = '-Wl,--out-implib,%s' v['CFLAGS_cshlib'] = [] v.append_value('CFLAGS_cshlib', ['-DDLL_EXPORT']) # TODO adding nonstandard defines like this DLL_EXPORT is not a good idea # Auto-import is enabled by default even without this option, # but enabling it explicitly has the nice effect of suppressing the rather boring, debug-level messages # that the linker emits otherwise. v.append_value('LINKFLAGS', ['-Wl,--enable-auto-import'])
Gnomescroll/Gnomescroll
[ 29, 13, 29, 2, 1385070845 ]
def clang_modifier_cygwin(conf): """Configuration flags for executing clang on Cygwin""" clang_modifier_win32(conf) v = conf.env v['cshlib_PATTERN'] = 'cyg%s.dll' v.append_value('LINKFLAGS_cshlib', ['-Wl,--enable-auto-image-base']) v['CFLAGS_cshlib'] = []
Gnomescroll/Gnomescroll
[ 29, 13, 29, 2, 1385070845 ]
def clang_modifier_darwin(conf): """Configuration flags for executing clang on MacOS""" v = conf.env v['CFLAGS_cshlib'] = ['-fPIC', '-compatibility_version', '1', '-current_version', '1'] v['LINKFLAGS_cshlib'] = ['-dynamiclib'] v['cshlib_PATTERN'] = 'lib%s.dylib' v['FRAMEWORKPATH_ST'] = '-F%s' v['FRAMEWORK_ST'] = ['-framework'] v['ARCH_ST'] = ['-arch'] v['LINKFLAGS_cstlib'] = [] v['SHLIB_MARKER'] = [] v['STLIB_MARKER'] = [] v['SONAME_ST'] = []
Gnomescroll/Gnomescroll
[ 29, 13, 29, 2, 1385070845 ]
def clang_modifier_aix(conf): """Configuration flags for executing clang on AIX""" v = conf.env v['LINKFLAGS_cprogram'] = ['-Wl,-brtl'] v['LINKFLAGS_cshlib'] = ['-shared','-Wl,-brtl,-bexpfull'] v['SHLIB_MARKER'] = []
Gnomescroll/Gnomescroll
[ 29, 13, 29, 2, 1385070845 ]
def clang_modifier_hpux(conf): v = conf.env v['SHLIB_MARKER'] = [] v['CFLAGS_cshlib'] = ['-fPIC','-DPIC'] v['cshlib_PATTERN'] = 'lib%s.sl'
Gnomescroll/Gnomescroll
[ 29, 13, 29, 2, 1385070845 ]
def clang_modifier_platform(conf): """Execute platform-specific functions based on *clang_modifier_+NAME*""" # * set configurations specific for a platform. # * the destination platform is detected automatically by looking at the macros the compiler predefines, # and if it's not recognised, it fallbacks to sys.platform. clang_modifier_func = getattr(conf, 'clang_modifier_' + conf.env.DEST_OS, None) if clang_modifier_func: clang_modifier_func()
Gnomescroll/Gnomescroll
[ 29, 13, 29, 2, 1385070845 ]