function
stringlengths
11
56k
repo_name
stringlengths
5
60
features
sequence
def test_jointree_marginals3(self): ft = FactorTree.create_jointree(self.bn) resFactor = ft.marginals(["sprinkler"]) np.testing.assert_array_almost_equal(resFactor.get_potential(), np.array([0.42, 0.58]))
SocialCognitiveSystems/PRIMO
[ 4, 3, 4, 10, 1486413504 ]
def test_jointree_marginals_trivial_evidence(self): ft = FactorTree.create_jointree(self.bn) ft.set_evidence({"slippery_road":"true"}) resFactor = ft.marginals(["slippery_road"]) np.testing.assert_array_almost_equal(resFactor.get_potential(), np.array([1.0, 0.0]))
SocialCognitiveSystems/PRIMO
[ 4, 3, 4, 10, 1486413504 ]
def test_jointree_evidence_trivial(self): ft = FactorTree.create_jointree(self.bn) ft.set_evidence({"wet_grass": "false"}) resFactor = ft.marginals(["rain"]) np.testing.assert_array_almost_equal(resFactor.get_potential(), np.array([0.158858, 0.841142]))
SocialCognitiveSystems/PRIMO
[ 4, 3, 4, 10, 1486413504 ]
def test_jointree_marginal_evidence_trivial_multiple_evidence(self): ft = FactorTree.create_jointree(self.bn) ft.set_evidence({"sprinkler": "true", "rain": "false"}) resFactor = ft.marginals(["wet_grass"]) np.testing.assert_array_almost_equal(resFactor.get_potential(), np.array([0.1, 0.9]))
SocialCognitiveSystems/PRIMO
[ 4, 3, 4, 10, 1486413504 ]
def test_jointree_marginal_evidence(self): ft = FactorTree.create_jointree(self.bn) ft.set_evidence({"winter": "true"}) resFactor = ft.marginals(["wet_grass"]) np.testing.assert_array_almost_equal(resFactor.get_potential(), np.array([0.668, 0.332]))
SocialCognitiveSystems/PRIMO
[ 4, 3, 4, 10, 1486413504 ]
def test_jointree_marginal_evidence_multiple_evidence(self): ft = FactorTree.create_jointree(self.bn) ft.set_evidence( {"winter": "true", "rain": "false"}) resFactor = ft.marginals(["wet_grass"]) np.testing.assert_array_almost_equal(resFactor.get_potential(), np.array([0.02, 0.98]))
SocialCognitiveSystems/PRIMO
[ 4, 3, 4, 10, 1486413504 ]
def test_jointree_marginal_soft_evidence(self): bn = BayesianNetwork() cloth = DiscreteNode("cloth", ["green","blue", "red"]) sold = DiscreteNode("sold")
SocialCognitiveSystems/PRIMO
[ 4, 3, 4, 10, 1486413504 ]
def __init__(self, get_response=None): self.get_response = get_response super(MiddlewareMixin, self).__init__()
rdegges/django-sslify
[ 338, 46, 338, 10, 1335654320 ]
def _single_peak(values, relative_cutoff, minval, invalidate_distance): """Takes a single peak if it is high enough compared to all other peaks. Args: values: 1D tensor of values to take the peaks on. relative_cutoff: The fraction of the highest peak which all other peaks should be below. minval: The peak should have at least this value. invalidate_distance: Exclude values that are up to invalidate_distance away from the peak. Returns: The index of the single peak in `values`, or -1 if there is not a single peak that satisfies `relative_cutoff`. """ relative_cutoff = tf.convert_to_tensor(relative_cutoff, tf.float32) # argmax is safe because the histogram is always non-empty. peak = tf.to_int32(tf.argmax(values)) # Take values > minval away from the peak. other_values = tf.boolean_mask( values, tf.greater( tf.abs(tf.range(tf.shape(values)[0]) - peak), invalidate_distance)) should_take_peak = tf.logical_and( tf.greater_equal(values[peak], minval), # values[peak] * relative_cutoff must be >= other_values. tf.reduce_all( tf.greater_equal( tf.to_float(values[peak]) * relative_cutoff, tf.to_float(other_values)))) return tf.cond(should_take_peak, lambda: peak, lambda: -1)
tensorflow/moonlight
[ 311, 68, 311, 25, 1523981102 ]
def do_filter_peaks(): """Process the peaks if they are non-empty. Returns: The filtered peaks. Peaks below the cutoff when compared to the highest peak are removed. If the peaks are invalid, then an empty list is returned. """ histogram_size = tf.shape(staffline_distance_histogram)[0] peak_values = tf.to_float(tf.gather(staffline_distance_histogram, peaks)) max_value = tf.reduce_max(peak_values) allowed_peaks = tf.greater_equal(peak_values, max_value * tf.constant(_PEAK_CUTOFF)) # Check if there are too many detected staffline distances, and we should # return an empty list. allowed_peaks &= tf.less_equal( tf.reduce_sum(tf.to_int32(allowed_peaks)), _MAX_ALLOWED_UNIQUE_STAFFLINE_DISTANCES) # Check if any values sufficiently far away from the peaks are too high. # This means the peaks are not sharp enough and we should return an empty # list. far_from_peak = tf.greater( tf.reduce_min( tf.abs(tf.range(histogram_size)[None, :] - peaks[:, None]), axis=0), _STAFFLINE_DISTANCE_INVALIDATE_DISTANCE) allowed_peaks &= tf.less( tf.to_float( tf.reduce_max( tf.boolean_mask(staffline_distance_histogram, far_from_peak))), max_value * tf.constant(_PEAK_CUTOFF)) return tf.boolean_mask(peaks, allowed_peaks)
tensorflow/moonlight
[ 311, 68, 311, 25, 1523981102 ]
def _estimate_staffline_thickness(columns, values, lengths, staffline_distance): """Estimates the staffline thickness of a music score. Args: columns: 1D array. The column indices of each consecutive vertical run. values: 1D array. The value (0 or 1) of each vertical run. lengths: 1D array. The length of each vertical run. staffline_distance: A 1D tensor of the possible staffline distances in the image. One of the distances may be chosen arbitrarily. Returns: A scalar tensor with the staffline thickness for the entire page, or -1 if it could not be estimated (staffline_distance is empty, or there are not enough runs to estimate the staffline thickness). """ with tf.name_scope('estimate_staffline_thickness'): def do_estimate(): """Compute the thickness if distance detection was successful.""" run_pair_lengths = lengths[:-1] + lengths[1:] # Use the smallest staffline distance to estimate the staffline thickness. keep_pair = tf.logical_and( tf.equal(columns[:-1], columns[1:]), tf.equal(run_pair_lengths, staffline_distance[0])) run_pair_lengths = tf.boolean_mask(run_pair_lengths, keep_pair) start_values = tf.boolean_mask(values[:-1], keep_pair) start_lengths = tf.boolean_mask(lengths[:-1], keep_pair) end_lengths = tf.boolean_mask(lengths[1:], keep_pair) staffline_thickness_values = tf.where( tf.not_equal(start_values, 0), start_lengths, end_lengths) staffline_thickness_histogram = tf.bincount( staffline_thickness_values, minlength=_MAX_STAFFLINE_DISTANCE_THICKNESS_VALUE, maxlength=_MAX_STAFFLINE_DISTANCE_THICKNESS_VALUE) return _single_peak( staffline_thickness_histogram, _PEAK_CUTOFF, minval=1, invalidate_distance=_STAFFLINE_THICKNESS_INVALIDATE_DISTANCE) return tf.cond( tf.greater(tf.shape(staffline_distance)[0], 0), do_estimate, lambda: tf.constant(-1, tf.int32))
tensorflow/moonlight
[ 311, 68, 311, 25, 1523981102 ]
def save(self, *args, **kwargs): """Perform descriptor validation and save object.""" if self.descriptor_schema: try: validate_schema(self.descriptor, self.descriptor_schema.schema) self.descriptor_dirty = False except DirtyError: self.descriptor_dirty = True elif self.descriptor and self.descriptor != {}: raise ValueError( "`descriptor_schema` must be defined if `descriptor` is given" ) super().save()
genialis/resolwe
[ 34, 27, 34, 7, 1428595640 ]
def duplicate(self, contributor): """Duplicate (make a copy) ``Collection`` objects.""" return bulk_duplicate(collections=self, contributor=contributor)
genialis/resolwe
[ 34, 27, 34, 7, 1428595640 ]
def is_duplicate(self): """Return True if collection is a duplicate.""" return bool(self.duplicated)
genialis/resolwe
[ 34, 27, 34, 7, 1428595640 ]
def __virtual__(): return True
saltstack/salt
[ 13089, 5388, 13089, 3074, 1298233016 ]
def process_document_splitter_sample( project_id: str, location: str, processor_id: str, file_path: str
googleapis/python-documentai
[ 71, 29, 71, 3, 1575936569 ]
def page_refs_to_string(page_refs: dict) -> str: """ Converts a page ref to a string describing the page or page range.""" if len(page_refs) == 1: num = str(int(page_refs[0].page) + 1) return f"page {num} is" else: start = str(int(page_refs[0].page) + 1) end = str(int(page_refs[1].page) + 1) return f"pages {start} to {end} are"
googleapis/python-documentai
[ 71, 29, 71, 3, 1575936569 ]
def sample_complete_trial(): # Create a client client = aiplatform_v1.VizierServiceClient() # Initialize request argument(s) request = aiplatform_v1.CompleteTrialRequest( name="name_value", ) # Make the request response = client.complete_trial(request=request) # Handle the response print(response)
googleapis/python-aiplatform
[ 306, 205, 306, 52, 1600875819 ]
def requires_submit(func): """ Decorator to ensure that a submit has been performed before calling the method. Args: func (callable): test function to be decorated. Returns: callable: the decorated function. """ @functools.wraps(func) def _wrapper(self, *args, **kwargs): if self._future is None: raise JobError("Job not submitted yet!. You have to .submit() first!") return func(self, *args, **kwargs) return _wrapper
QISKit/qiskit-sdk-py
[ 3515, 1875, 3515, 1061, 1488560562 ]
def __init__(self, backend, job_id, fn, qobj): super().__init__(backend, job_id) self._fn = fn self._qobj = qobj self._future = None
QISKit/qiskit-sdk-py
[ 3515, 1875, 3515, 1061, 1488560562 ]
def result(self, timeout=None): # pylint: disable=arguments-differ """Get job result. The behavior is the same as the underlying concurrent Future objects, https://docs.python.org/3/library/concurrent.futures.html#future-objects Args: timeout (float): number of seconds to wait for results. Returns: qiskit.Result: Result object Raises: concurrent.futures.TimeoutError: if timeout occurred. concurrent.futures.CancelledError: if job cancelled before completed. """ return self._future.result(timeout=timeout)
QISKit/qiskit-sdk-py
[ 3515, 1875, 3515, 1061, 1488560562 ]
def cancel(self): return self._future.cancel()
QISKit/qiskit-sdk-py
[ 3515, 1875, 3515, 1061, 1488560562 ]
def status(self): """Gets the status of the job by querying the Python's future Returns: qiskit.providers.JobStatus: The current JobStatus Raises: JobError: If the future is in unexpected state concurrent.futures.TimeoutError: if timeout occurred. """ # The order is important here if self._future.running(): _status = JobStatus.RUNNING elif self._future.cancelled(): _status = JobStatus.CANCELLED elif self._future.done(): _status = JobStatus.DONE if self._future.exception() is None else JobStatus.ERROR else: # Note: There is an undocumented Future state: PENDING, that seems to show up when # the job is enqueued, waiting for someone to pick it up. We need to deal with this # state but there's no public API for it, so we are assuming that if the job is not # in any of the previous states, is PENDING, ergo INITIALIZING for us. _status = JobStatus.INITIALIZING return _status
QISKit/qiskit-sdk-py
[ 3515, 1875, 3515, 1061, 1488560562 ]
def on_init(self, prefix='sqlite', id=None, db=None, **kwargs): """ Adds processing to initialization :param prefix: the main keyword for configuration of this space :type prefix: str :param id: the unique identifier of the related space (optional) :type id: str :param db: name of the file that contains Sqlite data (optional) :type db: str Example:: store = SqliteStore(context=context, prefix='sqlite') Here we create a new store powered by Sqlite, and use settings under the key ``sqlite`` in the context of this bot. """ assert prefix self.prefix = prefix self.id = id if id else '*id' if db: self.context.set(self.prefix+'.db', db)
bernard357/shellbot
[ 10, 3, 10, 4, 1491172109 ]
def get_db(self): """ Gets a handle on the database """ db = self.context.get(self.prefix+'.db', 'store.db') return sqlite3.connect(db)
bernard357/shellbot
[ 10, 3, 10, 4, 1491172109 ]
def _set(self, key, value, handle=None): """ Sets a permanent value :param key: name of the value :type key: str :param value: actual value :type value: any serializable type is accepted :param handle: an optional instance of a Sqlite database :type handle: a connection This functions stores or updates a value in the back-end storage system. Example:: store._set('parameter_123', 'George') """ handle = handle if handle else self.get_db() cursor = handle.cursor() cursor.execute("DELETE FROM store WHERE context=? AND key=?", (self.id, key)) cursor.execute("INSERT INTO store (context,key,value) VALUES (?,?,?)", (self.id, key, value)) handle.commit() cursor.close()
bernard357/shellbot
[ 10, 3, 10, 4, 1491172109 ]
def show_metrics(step_value, metric_values, loss_value=None): print('{}: {}nominal accuracy = {:.2f}%, ' 'verified = {:.2f}%, attack = {:.2f}%'.format( step_value, 'loss = {}, '.format(loss_value) if loss_value is not None else '', metric_values.nominal_accuracy * 100., metric_values.verified_accuracy * 100., metric_values.attack_accuracy * 100.))
deepmind/interval-bound-propagation
[ 135, 31, 135, 2, 1542969398 ]
def main(unused_args): logging.info('Training IBP on %s...', FLAGS.dataset.upper()) step = tf.train.get_or_create_global_step() # Learning rate. learning_rate = ibp.parse_learning_rate(step, FLAGS.learning_rate) # Dataset. input_bounds = (0., 1.) num_classes = 10 if FLAGS.dataset == 'mnist': data_train, data_test = tf.keras.datasets.mnist.load_data() else: assert FLAGS.dataset == 'cifar10', ( 'Unknown dataset "{}"'.format(FLAGS.dataset)) data_train, data_test = tf.keras.datasets.cifar10.load_data() data_train = (data_train[0], data_train[1].flatten()) data_test = (data_test[0], data_test[1].flatten()) data = ibp.build_dataset(data_train, batch_size=FLAGS.batch_size, sequential=False) if FLAGS.dataset == 'cifar10': data = data._replace(image=ibp.randomize( data.image, (32, 32, 3), expand_shape=(40, 40, 3), crop_shape=(32, 32, 3), vertical_flip=True)) # Base predictor network. original_predictor = ibp.DNN(num_classes, layers(FLAGS.model)) predictor = original_predictor if FLAGS.dataset == 'cifar10': mean = (0.4914, 0.4822, 0.4465) std = (0.2023, 0.1994, 0.2010) predictor = ibp.add_image_normalization(original_predictor, mean, std) if FLAGS.crown_bound_init > 0 or FLAGS.crown_bound_final > 0: logging.info('Using CROWN-IBP loss.') model_wrapper = ibp.crown.VerifiableModelWrapper loss_helper = ibp.crown.create_classification_losses else: model_wrapper = ibp.VerifiableModelWrapper loss_helper = ibp.create_classification_losses predictor = model_wrapper(predictor) # Training. train_losses, train_loss, _ = loss_helper( step, data.image, data.label, predictor, FLAGS.epsilon_train, loss_weights={ 'nominal': { 'init': FLAGS.nominal_xent_init, 'final': FLAGS.nominal_xent_final, 'warmup': FLAGS.verified_xent_init + FLAGS.nominal_xent_init }, 'attack': { 'init': FLAGS.attack_xent_init, 'final': FLAGS.attack_xent_final }, 'verified': { 'init': FLAGS.verified_xent_init, 'final': FLAGS.verified_xent_final, 'warmup': 0. }, 'crown_bound': { 'init': FLAGS.crown_bound_init, 'final': FLAGS.crown_bound_final, 'warmup': 0. }, }, warmup_steps=FLAGS.warmup_steps, rampup_steps=FLAGS.rampup_steps, input_bounds=input_bounds) saver = tf.train.Saver(original_predictor.get_variables()) optimizer = tf.train.AdamOptimizer(learning_rate) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): train_op = optimizer.minimize(train_loss, step) # Test using while loop. def get_test_metrics(batch_size, attack_builder=ibp.UntargetedPGDAttack): """Returns the test metrics.""" num_test_batches = len(data_test[0]) // batch_size assert len(data_test[0]) % batch_size == 0, ( 'Test data is not a multiple of batch size.') def cond(i, *unused_args): return i < num_test_batches def body(i, metrics): """Compute the sum of all metrics.""" test_data = ibp.build_dataset(data_test, batch_size=batch_size, sequential=True) predictor(test_data.image, override=True, is_training=False) input_interval_bounds = ibp.IntervalBounds( tf.maximum(test_data.image - FLAGS.epsilon, input_bounds[0]), tf.minimum(test_data.image + FLAGS.epsilon, input_bounds[1])) predictor.propagate_bounds(input_interval_bounds) test_specification = ibp.ClassificationSpecification( test_data.label, num_classes) test_attack = attack_builder(predictor, test_specification, FLAGS.epsilon, input_bounds=input_bounds, optimizer_builder=ibp.UnrolledAdam) test_losses = ibp.Losses(predictor, test_specification, test_attack) test_losses(test_data.label) new_metrics = [] for m, n in zip(metrics, test_losses.scalar_metrics): new_metrics.append(m + n) return i + 1, new_metrics total_count = tf.constant(0, dtype=tf.int32) total_metrics = [tf.constant(0, dtype=tf.float32) for _ in range(len(ibp.ScalarMetrics._fields))] total_count, total_metrics = tf.while_loop( cond, body, loop_vars=[total_count, total_metrics], back_prop=False, parallel_iterations=1) total_count = tf.cast(total_count, tf.float32) test_metrics = [] for m in total_metrics: test_metrics.append(m / total_count) return ibp.ScalarMetrics(*test_metrics) test_metrics = get_test_metrics( FLAGS.batch_size, ibp.UntargetedPGDAttack) summaries = [] for f in test_metrics._fields: summaries.append( tf.summary.scalar(f, getattr(test_metrics, f))) test_summaries = tf.summary.merge(summaries) test_writer = tf.summary.FileWriter(os.path.join(FLAGS.output_dir, 'test')) # Run everything. tf_config = tf.ConfigProto() tf_config.gpu_options.allow_growth = True with tf.train.SingularMonitoredSession(config=tf_config) as sess: for _ in range(FLAGS.steps): iteration, loss_value, _ = sess.run( [step, train_losses.scalar_losses.nominal_cross_entropy, train_op]) if iteration % FLAGS.test_every_n == 0: metric_values, summary = sess.run([test_metrics, test_summaries]) test_writer.add_summary(summary, iteration) show_metrics(iteration, metric_values, loss_value=loss_value) saver.save(sess._tf_sess(), # pylint: disable=protected-access os.path.join(FLAGS.output_dir, 'model'), global_step=FLAGS.steps - 1)
deepmind/interval-bound-propagation
[ 135, 31, 135, 2, 1542969398 ]
def _module_dir(handle): """Returns the directory where to cache the module.""" cache_dir = resolver.tfhub_cache_dir(use_temp=True) return resolver.create_local_module_dir( cache_dir, hashlib.sha1(handle.encode("utf8")).hexdigest())
tensorflow/hub
[ 3285, 1698, 3285, 3, 1520841342 ]
def is_supported(self, handle): # HTTP(S) handles are assumed to point to tarfiles. if not self.is_http_protocol(handle): return False # AUTO defaults to COMPRESSED load_format = resolver.model_load_format() return load_format in [ resolver.ModelLoadFormat.COMPRESSED.value, resolver.ModelLoadFormat.AUTO.value ]
tensorflow/hub
[ 3285, 1698, 3285, 3, 1520841342 ]
def download(handle, tmp_dir): """Fetch a module via HTTP(S), handling redirect and download headers.""" request = urllib.request.Request( self._append_compressed_format_query(handle)) response = self._call_urlopen(request) return resolver.DownloadManager(handle).download_and_uncompress( response, tmp_dir)
tensorflow/hub
[ 3285, 1698, 3285, 3, 1520841342 ]
def _lock_file_timeout_sec(self): # This method is provided as a convenience to simplify testing. return LOCK_FILE_TIMEOUT_SEC
tensorflow/hub
[ 3285, 1698, 3285, 3, 1520841342 ]
def is_supported(self, handle): return handle.startswith("gs://") and _is_tarfile(handle)
tensorflow/hub
[ 3285, 1698, 3285, 3, 1520841342 ]
def download(handle, tmp_dir): return resolver.DownloadManager(handle).download_and_uncompress( tf.compat.v1.gfile.GFile(handle, "rb"), tmp_dir)
tensorflow/hub
[ 3285, 1698, 3285, 3, 1520841342 ]
def test_kdelta(): assert np.isclose(kdelta(1, 1), 1.) assert np.isclose(kdelta(0, 1), 0.)
quantumlib/ReCirq
[ 232, 110, 232, 34, 1584057093 ]
def test_energy_from_opdm(): """Build test assuming sampling functions work""" rhf_objective, molecule, parameters, obi, tbi = make_h6_1_3() unitary, energy, _ = rhf_func_generator(rhf_objective) parameters = np.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]) initial_opdm = np.diag([1] * 3 + [0] * 3) final_opdm = unitary(parameters) @ initial_opdm @ unitary( parameters).conj().T test_energy = energy_from_opdm(final_opdm, constant=molecule.nuclear_repulsion, one_body_tensor=obi, two_body_tensor=tbi) true_energy = energy(parameters) assert np.allclose(test_energy, true_energy)
quantumlib/ReCirq
[ 232, 110, 232, 34, 1584057093 ]
def test_mcweeny(): np.random.seed(82) opdm = np.array([[ 0.766034130, -0.27166330, -0.30936072, -0.08471057, -0.04878244, -0.01285432 ], [ -0.27166330, 0.67657015, -0.37519640, -0.02101843, -0.03568214, -0.05034585 ], [ -0.30936072, -0.37519640, 0.55896791, 0.04267370, -0.02258184, -0.08783738 ], [ -0.08471057, -0.02101843, 0.04267370, 0.05450848, 0.11291253, 0.17131658 ], [ -0.04878244, -0.03568214, -0.02258184, 0.11291253, 0.26821219, 0.42351185 ], [ -0.01285432, -0.05034585, -0.08783738, 0.17131658, 0.42351185, 0.67570713 ]]) for i, j in product(range(6), repeat=2): opdm[i, j] += np.random.randn() * 1.0E-3 opdm = 0.5 * (opdm + opdm.T) pure_opdm = mcweeny_purification(opdm) w, _ = np.linalg.eigh(pure_opdm) assert len(np.where(w < -1.0E-9)[0]) == 0
quantumlib/ReCirq
[ 232, 110, 232, 34, 1584057093 ]
def GetNotebook(): """Downloads the ipynb source of Colab notebook""" notebook = google_message.blocking_request( "get_ipynb", request="", timeout_sec=120)["ipynb"] return notebook
google/prog-edu-assistant
[ 25, 20, 25, 19, 1550714338 ]
def _accept(random_sample: float, cost_diff: float, temp: float) -> Tuple[bool, float]: """Calculates probability and draws if solution should be accepted. Based on exp(-Delta*E/T) formula. Args: random_sample: Uniformly distributed random number in the range [0, 1). cost_diff: Cost difference between new and previous solutions. temp: Current temperature. Returns: Tuple of boolean and float, with boolean equal to True if solution is accepted, and False otherwise. The float value is acceptance probability. """ exponent = -cost_diff / temp if exponent >= 0.0: return True, 1.0 probability = math.exp(exponent) return probability > random_sample, probability
quantumlib/Cirq
[ 3678, 836, 3678, 314, 1513294909 ]
def get_integration_folder(): """ returns the integration test folder """ return os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
cloudfoundry-community/splunk-firehose-nozzle
[ 28, 30, 28, 14, 1467750745 ]
def get_all_affiliations(request): """ return a dictionary of affiliation indicators. The first class affiliations: ["all_employee"]: employee or clinician (include student employee) ["employee"]: True if is current employee (not student employee, clinician) ["clinician"]: True if in uw affiliation clinical groups ["faculty"]: True if the user is currently faculty. ["instructor"]: True if is instructor in the past 6 years ["staff_employee"]: True if the user is currently staff. ["student"]: True if the user is currently an UW student. ["stud_employee"]: True if the user is currently a student employee. ["grad"]: True if the user is currently an UW graduate student. ["undergrad"]: True if the user is currently an UW undergraduate student. ["applicant"]: True if the user is currently a UW applicant ["pce"]: True if the user is an UW PCE student. ["grad_c2"]: True if the user takes UW PCE grad courses ["undergrad_c2"]: True if the user takes UW PCE undergrad courses ["seattle"]: True if the user is an UW Seattle student ["bothell"]: True if the user is an UW Bothell student ["tacoma"]: True if the user is an UW Tacoma student ["official_seattle"]: True if the user is Seattle employee ["official_bothell"]: True if the user is Bothell employee ["official_tacoma"]: True if the user is Tacoma employee ["official_pce"]: waiting on sws to add a field in Enrollment. ["class_level"]: class level in current term enrollment. ["latest_class_level"]: the class level in the latest enrollment. ["F1"]: F1 international student ["J1"]: J1 international student ["intl_stud"]: F1 or J1 international student ["hxt_viewer"]: Husky Experience Toolkit viewer ["no_1st_class_affi"]: not applicant, current employee, clinician, student, instructor The following are secondary affiliations (without 1st_class_aff): ["alumni"]: True if the user is currently an UW alumni and NOT current student, employee, applicant ["alum_asso"]: alumni association member ["retiree"]: True if the user is a retired staff and NOT current applicant, student, employee ["past_employee"]: True if the user is a former employee and NOT current student, applicant ["past_stud"]: True if the user is a former student and NOT current employee, applicant """ if hasattr(request, 'myuw_user_affiliations'): return request.myuw_user_affiliations not_major_affi = (not is_applicant(request) and not is_employee(request) and not is_clinician(request) and not is_instructor(request) and not is_student(request)) (is_sea_stud, is_undergrad, is_hxt_viewer) = get_is_hxt_viewer(request) data = {"class_level": None, "latest_class_level": get_latest_class_level(request), "grad": is_grad_student(request), "undergrad": is_undergrad, "applicant": is_applicant(request), "student": is_student(request), "pce": is_pce_student(request), "grad_c2": is_grad_c2(request), "undergrad_c2": is_undergrad_c2(request), "F1": False, "J1": False, "intl_stud": False, "2fa_permitted": is_2fa_permitted(request), "all_employee": is_employee(request) or is_clinician(request), "clinician": is_clinician(request), "employee": (is_employee(request) and not is_student_employee(request)), "faculty": is_faculty(request), "instructor": is_instructor(request), "staff_employee": is_staff_employee(request), "stud_employee": is_student_employee(request), "seattle": is_sea_stud, "bothell": is_bothell_student(request), "tacoma": is_tacoma_student(request), "official_seattle": False, "official_bothell": False, "official_tacoma": False, "hxt_viewer": is_hxt_viewer, "alum_asso": is_alum_asso(request), "alumni": is_alumni(request) and not_major_affi, "retiree": is_retiree(request) and not_major_affi, "past_employee": is_prior_employee(request) and not_major_affi, "past_stud": is_prior_student(request) and not_major_affi, "no_1st_class_affi": not_major_affi, } campuses = [] if data["student"]: data["class_level"] = get_cur_class_level(request) try: sws_person = get_profile_of_current_user(request) data["F1"] = sws_person.is_F1() data["J1"] = sws_person.is_J1() data["intl_stud"] = data["F1"] or data["J1"] except Exception: log_err(logger, "get_profile_of_current_user", traceback, request) # enhance student campus with current and future enrollments campuses = get_main_campus(request) if len(campuses) > 0: data["enrolled_stud"] = True data['seattle'] = data['seattle'] or ('Seattle' in campuses) data['bothell'] = data['bothell'] or ('Bothell' in campuses) data['tacoma'] = data['tacoma'] or ('Tacoma' in campuses) if data['seattle']: data["hxt_viewer"] = (data["hxt_viewer"] or data['seattle'] and data["undergrad"]) if is_employee(request): # determine employee primary campus based on their mailstop try: employee_campus = get_employee_campus(request) data['official_seattle'] = ('Seattle' == employee_campus) data['official_bothell'] = ('Bothell' == employee_campus) data['official_tacoma'] = ('Tacoma' == employee_campus) except IndeterminateCampusException: pass request.myuw_user_affiliations = data return data
uw-it-aca/myuw
[ 13, 6, 13, 3, 1417029795 ]
def epsg_3857_from_proj4(): """ Return a gdal spatial reference object with 3857 crs using the ImportFromProj4 method. """ spatial_ref = SpatialReference() spatial_ref.ImportFromProj4('+init=epsg:3857') return spatial_ref
ecometrica/gdal2mbtiles
[ 134, 24, 134, 12, 1353362021 ]
def epsg_3857_from_epsg(): """ Return a gdal spatial reference object with 3857 crs using the FromEPSG method. """ spatial_ref = SpatialReference.FromEPSG(EPSG_WEB_MERCATOR) return spatial_ref
ecometrica/gdal2mbtiles
[ 134, 24, 134, 12, 1353362021 ]
def __init__(self): super(JoinsInRoomResource, self).__init__() self.last_cleared = datetime.utcnow() self.request = request
thenetcircle/dino
[ 139, 6, 139, 11, 1475559609 ]
def do_get_with_params(self, room_id: str = None, room_name: str = None): return self._do_get(room_id, room_name)
thenetcircle/dino
[ 139, 6, 139, 11, 1475559609 ]
def do_get(self): is_valid, msg, json = self.validate_json(self.request, silent=False) if not is_valid: logger.error('invalid json: %s' % msg) return dict() logger.debug('GET request: %s' % str(json)) if 'room_ids' not in json and 'room_names' not in json: return dict() output = dict() if 'room_ids' in json: for room_id in json['room_ids']: output[room_id] = self.do_get_with_params(room_id=room_id) if 'room_names' in json: for room_name in json['room_names']: output[room_name] = self.do_get_with_params(room_name=b64d(room_name)) return output
thenetcircle/dino
[ 139, 6, 139, 11, 1475559609 ]
def _get_last_cleared(self): return self.last_cleared
thenetcircle/dino
[ 139, 6, 139, 11, 1475559609 ]
def _start(self, *args, **kwargs): self._proc = mock.Mock() self._proc.stdin = None self._proc.stdout = None self._proc.stderr = None
haypo/trollius
[ 186, 27, 186, 6, 1429023346 ]
def setUp(self): self.loop = self.new_test_loop() self.set_event_loop(self.loop)
haypo/trollius
[ 186, 27, 186, 6, 1429023346 ]
def test_proc_exited(self): waiter = asyncio.Future(loop=self.loop) transport, protocol = self.create_transport(waiter) transport._process_exited(6) self.loop.run_until_complete(waiter) self.assertEqual(transport.get_returncode(), 6) self.assertTrue(protocol.connection_made.called) self.assertTrue(protocol.process_exited.called) self.assertTrue(protocol.connection_lost.called) self.assertEqual(protocol.connection_lost.call_args[0], (None,)) self.assertFalse(transport._closed) self.assertIsNone(transport._loop) self.assertIsNone(transport._proc) self.assertIsNone(transport._protocol) # methods must raise ProcessLookupError if the process exited self.assertRaises(ProcessLookupError, transport.send_signal, signal.SIGTERM) self.assertRaises(ProcessLookupError, transport.terminate) self.assertRaises(ProcessLookupError, transport.kill) transport.close()
haypo/trollius
[ 186, 27, 186, 6, 1429023346 ]
def test_stdin_stdout(self): args = PROGRAM_CAT @asyncio.coroutine def run(data): proc = yield From(asyncio.create_subprocess_exec( *args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, loop=self.loop)) # feed data proc.stdin.write(data) yield From(proc.stdin.drain()) proc.stdin.close() # get output and exitcode data = yield From(proc.stdout.read()) exitcode = yield From(proc.wait()) raise Return(exitcode, data) task = run(b'some data') task = asyncio.wait_for(task, 60.0, loop=self.loop) exitcode, stdout = self.loop.run_until_complete(task) self.assertEqual(exitcode, 0) self.assertEqual(stdout, b'some data')
haypo/trollius
[ 186, 27, 186, 6, 1429023346 ]
def run(data): proc = yield From(asyncio.create_subprocess_exec( *args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, loop=self.loop)) stdout, stderr = yield From(proc.communicate(data)) raise Return(proc.returncode, stdout)
haypo/trollius
[ 186, 27, 186, 6, 1429023346 ]
def test_shell(self): create = asyncio.create_subprocess_shell('exit 7', loop=self.loop) proc = self.loop.run_until_complete(create) exitcode = self.loop.run_until_complete(proc.wait()) self.assertEqual(exitcode, 7)
haypo/trollius
[ 186, 27, 186, 6, 1429023346 ]
def test_start_new_session(self): def start_new_session(): os.setsid() # start the new process in a new session create = asyncio.create_subprocess_shell('exit 8', preexec_fn=start_new_session, loop=self.loop) proc = self.loop.run_until_complete(create) exitcode = self.loop.run_until_complete(proc.wait()) self.assertEqual(exitcode, 8)
haypo/trollius
[ 186, 27, 186, 6, 1429023346 ]
def test_terminate(self): args = PROGRAM_BLOCKED create = asyncio.create_subprocess_exec(*args, loop=self.loop) proc = self.loop.run_until_complete(create) proc.terminate() returncode = self.loop.run_until_complete(proc.wait()) if sys.platform == 'win32': self.assertIsInstance(returncode, int) # expect 1 but sometimes get 0 else: self.assertEqual(-signal.SIGTERM, returncode)
haypo/trollius
[ 186, 27, 186, 6, 1429023346 ]
def test_send_signal(self): code = '; '.join(( 'import sys, time', 'print("sleeping")', 'sys.stdout.flush()', 'time.sleep(3600)')) args = [sys.executable, '-c', code] create = asyncio.create_subprocess_exec(*args, stdout=subprocess.PIPE, loop=self.loop) proc = self.loop.run_until_complete(create) @asyncio.coroutine def send_signal(proc): # basic synchronization to wait until the program is sleeping line = yield From(proc.stdout.readline()) self.assertEqual(line, b'sleeping\n') proc.send_signal(signal.SIGHUP) returncode = yield From(proc.wait()) raise Return(returncode) returncode = self.loop.run_until_complete(send_signal(proc)) self.assertEqual(-signal.SIGHUP, returncode)
haypo/trollius
[ 186, 27, 186, 6, 1429023346 ]
def test_stdin_broken_pipe(self): proc, large_data = self.prepare_broken_pipe_test() @asyncio.coroutine def write_stdin(proc, data): proc.stdin.write(data) yield From(proc.stdin.drain()) coro = write_stdin(proc, large_data) # drain() must raise BrokenPipeError or ConnectionResetError with test_utils.disable_logger(): self.assertRaises((BrokenPipeError, ConnectionResetError), self.loop.run_until_complete, coro) self.loop.run_until_complete(proc.wait())
haypo/trollius
[ 186, 27, 186, 6, 1429023346 ]
def test_pause_reading(self): limit = 10 size = (limit * 2 + 1) @asyncio.coroutine def test_pause_reading(): code = '\n'.join(( 'import sys', 'sys.stdout.write("x" * %s)' % size, 'sys.stdout.flush()', )) connect_read_pipe = self.loop.connect_read_pipe @asyncio.coroutine def connect_read_pipe_mock(*args, **kw): connect = connect_read_pipe(*args, **kw) transport, protocol = yield From(connect) transport.pause_reading = mock.Mock() transport.resume_reading = mock.Mock() raise Return(transport, protocol) self.loop.connect_read_pipe = connect_read_pipe_mock proc = yield From(asyncio.create_subprocess_exec( sys.executable, '-c', code, stdin=asyncio.subprocess.PIPE, stdout=asyncio.subprocess.PIPE, limit=limit, loop=self.loop)) stdout_transport = proc._transport.get_pipe_transport(1) stdout, stderr = yield From(proc.communicate()) # The child process produced more than limit bytes of output, # the stream reader transport should pause the protocol to not # allocate too much memory. raise Return(stdout, stdout_transport) # Issue #22685: Ensure that the stream reader pauses the protocol # when the child process produces too much data stdout, transport = self.loop.run_until_complete(test_pause_reading()) self.assertEqual(stdout, b'x' * size) self.assertTrue(transport.pause_reading.called) self.assertTrue(transport.resume_reading.called)
haypo/trollius
[ 186, 27, 186, 6, 1429023346 ]
def len_message(message): code = 'import sys; data = sys.stdin.read(); print(len(data))' proc = yield From(asyncio.create_subprocess_exec( sys.executable, '-c', code, stdin=asyncio.subprocess.PIPE, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, close_fds=False, loop=self.loop)) stdout, stderr = yield From(proc.communicate(message)) exitcode = yield From(proc.wait()) raise Return(stdout, exitcode)
haypo/trollius
[ 186, 27, 186, 6, 1429023346 ]
def test_cancel_process_wait(self): # Issue #23140: cancel Process.wait() @asyncio.coroutine def cancel_wait(): proc = yield From(asyncio.create_subprocess_exec( *PROGRAM_BLOCKED, loop=self.loop)) # Create an internal future waiting on the process exit task = self.loop.create_task(proc.wait()) self.loop.call_soon(task.cancel) try: yield From(task) except asyncio.CancelledError: pass # Cancel the future task.cancel() # Kill the process and wait until it is done proc.kill() yield From(proc.wait()) self.loop.run_until_complete(cancel_wait())
haypo/trollius
[ 186, 27, 186, 6, 1429023346 ]
def cancel_make_transport(): coro = asyncio.create_subprocess_exec(*PROGRAM_BLOCKED, loop=self.loop) task = self.loop.create_task(coro) self.loop.call_soon(task.cancel) try: yield From(task) except asyncio.CancelledError: pass
haypo/trollius
[ 186, 27, 186, 6, 1429023346 ]
def test_cancel_post_init(self): @asyncio.coroutine def cancel_make_transport(): coro = self.loop.subprocess_exec(asyncio.SubprocessProtocol, *PROGRAM_BLOCKED) task = self.loop.create_task(coro) self.loop.call_soon(task.cancel) try: yield From(task) except asyncio.CancelledError: pass # ignore the log: # "Exception during subprocess creation, kill the subprocess" with test_utils.disable_logger(): self.loop.run_until_complete(cancel_make_transport()) test_utils.run_briefly(self.loop)
haypo/trollius
[ 186, 27, 186, 6, 1429023346 ]
def kill_running(): create = self.loop.subprocess_exec(asyncio.SubprocessProtocol, *PROGRAM_BLOCKED) transport, protocol = yield From(create) non_local = {'kill_called': False} def kill(): non_local['kill_called'] = True orig_kill() proc = transport.get_extra_info('subprocess') orig_kill = proc.kill proc.kill = kill returncode = transport.get_returncode() transport.close() yield From(transport._wait()) raise Return(returncode, non_local['kill_called'])
haypo/trollius
[ 186, 27, 186, 6, 1429023346 ]
def test_close_dont_kill_finished(self): @asyncio.coroutine def kill_running(): create = self.loop.subprocess_exec(asyncio.SubprocessProtocol, *PROGRAM_BLOCKED) transport, protocol = yield From(create) proc = transport.get_extra_info('subprocess') # kill the process (but asyncio is not notified immediatly) proc.kill() proc.wait() proc.kill = mock.Mock() proc_returncode = proc.poll() transport_returncode = transport.get_returncode() transport.close() raise Return(proc_returncode, transport_returncode, proc.kill.called) # Ignore "Unknown child process pid ..." log of SafeChildWatcher, # emitted because the test already consumes the exit status: # proc.wait() with test_utils.disable_logger(): result = self.loop.run_until_complete(kill_running()) test_utils.run_briefly(self.loop) proc_returncode, transport_return_code, killed = result self.assertIsNotNone(proc_returncode) self.assertIsNone(transport_return_code) # transport.close() must not kill the process if it finished, even if # the transport was not notified yet self.assertFalse(killed)
haypo/trollius
[ 186, 27, 186, 6, 1429023346 ]
def setUp(self): policy = asyncio.get_event_loop_policy() self.loop = policy.new_event_loop() self.set_event_loop(self.loop) watcher = self.Watcher() watcher.attach_loop(self.loop) policy.set_child_watcher(watcher) self.addCleanup(policy.set_child_watcher, None)
haypo/trollius
[ 186, 27, 186, 6, 1429023346 ]
def setUp(self): self.loop = asyncio.ProactorEventLoop() self.set_event_loop(self.loop)
haypo/trollius
[ 186, 27, 186, 6, 1429023346 ]
def testSourceManifest(self): recipestr = """
sassoftware/conary
[ 47, 9, 47, 4, 1396904066 ]
def setup(r): # random files we have around in archive... r.addArchive('asdf.tar.gz', dir='/', package='asdf') r.addSource('sourcefile', dir='/var/test/')
sassoftware/conary
[ 47, 9, 47, 4, 1396904066 ]
def testISOArchiveJoliet(self): recipestr = """
sassoftware/conary
[ 47, 9, 47, 4, 1396904066 ]
def setup(r): r.addArchive('jcd.iso', dir='/') r.SetModes('%(bindir)s/touch', 0755)
sassoftware/conary
[ 47, 9, 47, 4, 1396904066 ]
def testISOArchiveRockRidge(self): recipestr = """
sassoftware/conary
[ 47, 9, 47, 4, 1396904066 ]
def setup(r): r.addArchive('rrcd.iso', dir='/') r.SetModes('%(bindir)s/touch', 0755)
sassoftware/conary
[ 47, 9, 47, 4, 1396904066 ]
def testSourceTest1(self): """ Test build.source """ recipestr = """
sassoftware/conary
[ 47, 9, 47, 4, 1396904066 ]
def setup(r): # test unpacking and extracting from an RPM r.addArchive('tmpwatch-2.9.0.tar.gz', rpm='tmpwatch-2.9.0-2.src.rpm') # test unpacking and extracting from a src bz2 RPM r.addSource('mkinitrd.spec', rpm='rpm-with-bzip-5.0.29-1.src.rpm') # test unpacking and extracting from a bz2 RPM r.addArchive('rpm-with-bzip-5.0.29-1.i386.rpm') # test unpacking and extracting from an lzma RPM r.addArchive('gnome-main-menu-0.9.10-26.x86_64.rpm') # test unpacking from a tar.xz (CNY-3207) r.addArchive('foo.tar.xz', dir='/') # test applying a patch r.addPatch('tmpwatch.fakebug.patch') # test taking that patch right back out with a string to extraArgs r.addPatch('tmpwatch.fakebug.patch', extraArgs='--reverse') # test putting the patch back in again with a list to extraArgs this time r.addPatch('tmpwatch.fakebug.patch', extraArgs=['--ignore-whitespace',]) # test the dest= capability of addSource r.addSource('tmpwatch.fakebug.patch', dest='foo') r.addSource('tmpwatch.fakebug.patch', dir='/asdf', dest='foo') r.addSource('tmpwatch.fakebug.patch', dest='/asdf/foo2%%') r.addSource('tmpwatch.fakebug.patch', dest='/asdf/') # make sure spaces are OK r.addSource('name with spaces') r.addSource('localfoo') r.addSource('local name with spaces') r.addAction('ls foo') r.addAction('ls foo', dir='/asdf') r.addAction('ls foo2%%', dir='/asdf') r.addAction('ls tmpwatch.fakebug.patch', dir='/asdf') r.addArchive('tmpwatch-2.9.0.tar.gz', rpm='tmpwatch-2.9.0-2.src.rpm', dir='/asdf') r.addAction('ls tmpwatch-2.9.0', dir='/asdf') # XXX I'm not sure what this was intended to show #r.addAction('ls tmpwatch-2.9.0', dir='%(destdir)s/asdf') r.addPatch('tmpwatch.fakebug.patch', dir='/asdf/tmpwatch-2.9.0')
sassoftware/conary
[ 47, 9, 47, 4, 1396904066 ]
def access_xz(*args): if args[0].endswith('/unlzma'): return False return realExists(*args)
sassoftware/conary
[ 47, 9, 47, 4, 1396904066 ]
def testSourceTestUnlzma(self): """ Test build.source """ recipestr = """
sassoftware/conary
[ 47, 9, 47, 4, 1396904066 ]
def setup(r): # test unpacking and extracting from an lzma RPM if xz not available r.addArchive('gnome-main-menu-0.9.10-26.x86_64.rpm', dir='/') del r.NonMultilibDirectories
sassoftware/conary
[ 47, 9, 47, 4, 1396904066 ]
def access_lzma(*args): if args[0].endswith('/xz'): return False return realExists(*args)
sassoftware/conary
[ 47, 9, 47, 4, 1396904066 ]
def testSourceTestMissinglzma(self): """ Test build.source """ recipestr = """
sassoftware/conary
[ 47, 9, 47, 4, 1396904066 ]
def setup(r): # test unpacking and extracting from an lzma RPM if xz not available r.addArchive('gnome-main-menu-0.9.10-26.x86_64.rpm', dir='/')
sassoftware/conary
[ 47, 9, 47, 4, 1396904066 ]
def access_nolzma(*args): if args[0].split(os.sep)[-1] in ('xz', 'unlzma'): return False return realExists(*args)
sassoftware/conary
[ 47, 9, 47, 4, 1396904066 ]
def testUnpackOldRpm30(self): # CNY-3210 # Use a very old version of rpm, that does not have PAYLOADCOMPRESSOR # set # Downloaded from # http://ftpsearch.kreonet.re.kr/pub/tools/utils/rpm/rpm/dist/rpm-3.0.x/ destdir = os.path.join(self.workDir, 'dest') util.mkdirChain(destdir) rpmfile = os.path.join(self.cfg.sourceSearchDir, 'popt-1.5-4x.i386.rpm') source._extractFilesFromRPM(rpmfile, directory = destdir) self.assertTrue(os.path.exists(util.joinPaths(destdir, '/usr/include/popt.h')))
sassoftware/conary
[ 47, 9, 47, 4, 1396904066 ]
def testSourcePerms(self): recipestr = """
sassoftware/conary
[ 47, 9, 47, 4, 1396904066 ]
def setup(r): # test not preserving world-writeable permissions in builddir r.addArchive('worldwriteable.tar.bz2', dir='test-1') r.Install('worldwriteable', '/ww/notworldwriteable') # test preserving world-writeable permissions in root proxy r.addArchive('worldwriteable.tar.bz2', dir='/ww/') # test missing intermediate directory in tarball CNY-3060 r.addArchive('missing.tar', dir='/opt/f', preserveOwnership=True)
sassoftware/conary
[ 47, 9, 47, 4, 1396904066 ]
def testSourceTestSRPMCache(self): """ Test SRPM lookaside handling (CNY-771) """ recipe1 = """
sassoftware/conary
[ 47, 9, 47, 4, 1396904066 ]
def setup(r): r.macros.release = '1' r.macros.srpm = '%(name)s-%(version)s-%(release)s.src.rpm' r.addSource('bar', rpm='%(srpm)s') r.addSource('baz', rpm='%(srpm)s')
sassoftware/conary
[ 47, 9, 47, 4, 1396904066 ]
def setup(r): r.macros.release = '2' r.macros.srpm = '%(name)s-%(version)s-%(release)s.src.rpm' r.addSource('bar', rpm='%(srpm)s') r.addSource('baz', rpm='%(srpm)s')
sassoftware/conary
[ 47, 9, 47, 4, 1396904066 ]
def testSourceTestSigCheck(self): """ Test signatures """ # XXX use smaller bz2 file than distcc recipestr1 = """
sassoftware/conary
[ 47, 9, 47, 4, 1396904066 ]
def setup(r): r.addArchive('distcc-2.9.tar.bz2', keyid='A0B3E88B') r.addArchive('tmpwatch-2.9.0.tar.gz', rpm='tmpwatch-2.9.0-2.src.rpm', keyid='sdds', dir='new-subdir')
sassoftware/conary
[ 47, 9, 47, 4, 1396904066 ]
def mockedDownloadPublicKey(slf): if slf.keyid == 'A0B3E88B': f = file(os.path.join(resources.get_archive(), '0xA0B3E88B.pgp')) return openpgpfile.parseAsciiArmorKey(f) raise source.SourceError("Failed to retrieve PGP key %s" % slf.keyid)
sassoftware/conary
[ 47, 9, 47, 4, 1396904066 ]
def setup(r): r.addArchive('distcc-2.9.tar.bz2', keyid='BADBAD')
sassoftware/conary
[ 47, 9, 47, 4, 1396904066 ]
def testSourceTestSigCheckFailedDownload(self): """ Test a download failure for the key """ recipestr1 = """
sassoftware/conary
[ 47, 9, 47, 4, 1396904066 ]
def setup(r): r.addArchive('distcc-2.9.tar.bz2', keyid='A0B3E88B') r.Create("/usr/foo", contents="Bar!!!\\n")
sassoftware/conary
[ 47, 9, 47, 4, 1396904066 ]
def mockedDoDownloadPublicKey(slf, keyServer, lc = listcounter): lc.append(None) if len(lc) < 7: raise transport.TransportError("Blah!") f = file(os.path.join(resources.get_archive(), '0xA0B3E88B.pgp')) data = openpgpfile.parseAsciiArmorKey(f) return data
sassoftware/conary
[ 47, 9, 47, 4, 1396904066 ]
def testDontCheckKeyOfCommitedSource(self): # We choose not to check the public key for sources already committed, # instead relying on the check only at the time of commit. recipestr1 = """
sassoftware/conary
[ 47, 9, 47, 4, 1396904066 ]
def setup(r): r.addArchive('distcc-2.9.tar.bz2', keyid='A0B3E88B') r.Create("/usr/foo", contents="Bar!!!\\n")
sassoftware/conary
[ 47, 9, 47, 4, 1396904066 ]
def _checkSignature(self, file): listcounter.append(None) return
sassoftware/conary
[ 47, 9, 47, 4, 1396904066 ]
def testSourceTestApplyMacros(self): """ Test applymacros """ recipestr1 = """
sassoftware/conary
[ 47, 9, 47, 4, 1396904066 ]
def cleanup(r, builddir, destdir): pass
sassoftware/conary
[ 47, 9, 47, 4, 1396904066 ]
def setup(r): # avoid cleanup r.addArchive('tmpwatch-2.9.0.tar.gz', rpm='tmpwatch-2.9.0-2.src.rpm') # test applying a patch r.macros.bugid = 'BUGID' r.addPatch('tmpwatch.fakebug.patch', macros=True)
sassoftware/conary
[ 47, 9, 47, 4, 1396904066 ]