function
stringlengths
11
56k
repo_name
stringlengths
5
60
features
sequence
def _after_run(self): self._metrics_processor.process_elapsed_time_metric()
GoogleCloudPlatform/datacatalog-connectors-hive
[ 20, 14, 20, 3, 1588024316 ]
def _make_assembled_entries(self, metadata_dict, atlas_entity_types=None): assembled_entries = self._assembled_entry_factory.\ make_assembled_entries_list( metadata_dict, atlas_entity_types) return assembled_entries
GoogleCloudPlatform/datacatalog-connectors-hive
[ 20, 14, 20, 3, 1588024316 ]
def _ingest_metadata(self, tag_templates_dict, assembled_entries): metadata_ingestor = ingest.DataCatalogMetadataIngestor( self._project_id, self._location_id, self._ENTRY_GROUP_ID) managed_tag_template = self._tag_template_factory.\ get_tag_template_path( self._ENTRY_GROUP_ID) metadata_ingestor.ingest_metadata( assembled_entries, tag_templates_dict, {'delete_tags': { 'managed_tag_template': managed_tag_template }}) entries_count = len(assembled_entries) logging.info('==== %s entries synchronized!', entries_count)
GoogleCloudPlatform/datacatalog-connectors-hive
[ 20, 14, 20, 3, 1588024316 ]
def __init__(self, context_name, env): super(VcloudCleanupContext, self).__init__(context_name, env)
cloudify-cosmo/tosca-vcloud-plugin
[ 4, 10, 4, 1, 1419926867 ]
def clean_all(cls, env): """ Cleans *all* resources, including resources that were not created by the test """ super(VcloudCleanupContext, cls).clean_all(env)
cloudify-cosmo/tosca-vcloud-plugin
[ 4, 10, 4, 1, 1419926867 ]
def __init__(self, cloudify_config, manager_blueprint_path, **kwargs): super(CloudifyVcloudInputsConfigReader, self).__init__( cloudify_config, manager_blueprint_path=manager_blueprint_path, **kwargs)
cloudify-cosmo/tosca-vcloud-plugin
[ 4, 10, 4, 1, 1419926867 ]
def vcloud_username(self): return self.config['vcloud_username']
cloudify-cosmo/tosca-vcloud-plugin
[ 4, 10, 4, 1, 1419926867 ]
def vcloud_password(self): return self.config['vcloud_password']
cloudify-cosmo/tosca-vcloud-plugin
[ 4, 10, 4, 1, 1419926867 ]
def vcloud_url(self): return self.config['vcloud_url']
cloudify-cosmo/tosca-vcloud-plugin
[ 4, 10, 4, 1, 1419926867 ]
def vcloud_service(self): return self.config['vcloud_service']
cloudify-cosmo/tosca-vcloud-plugin
[ 4, 10, 4, 1, 1419926867 ]
def vcloud_org(self): return self.config['vcloud_org']
cloudify-cosmo/tosca-vcloud-plugin
[ 4, 10, 4, 1, 1419926867 ]
def vcloud_vdc(self): return self.config['vcloud_vdc']
cloudify-cosmo/tosca-vcloud-plugin
[ 4, 10, 4, 1, 1419926867 ]
def manager_server_name(self): return self.config['server_name']
cloudify-cosmo/tosca-vcloud-plugin
[ 4, 10, 4, 1, 1419926867 ]
def manager_server_catalog(self): return self.config['catalog']
cloudify-cosmo/tosca-vcloud-plugin
[ 4, 10, 4, 1, 1419926867 ]
def manager_server_template(self): return self.config['template']
cloudify-cosmo/tosca-vcloud-plugin
[ 4, 10, 4, 1, 1419926867 ]
def management_network_use_existing(self): return self.config['management_network_use_existing']
cloudify-cosmo/tosca-vcloud-plugin
[ 4, 10, 4, 1, 1419926867 ]
def management_network_name(self): return self.config['management_network_name']
cloudify-cosmo/tosca-vcloud-plugin
[ 4, 10, 4, 1, 1419926867 ]
def edge_gateway(self): return self.config['edge_gateway']
cloudify-cosmo/tosca-vcloud-plugin
[ 4, 10, 4, 1, 1419926867 ]
def floating_ip_public_ip(self): return self.config['floating_ip_public_ip']
cloudify-cosmo/tosca-vcloud-plugin
[ 4, 10, 4, 1, 1419926867 ]
def ssh_key_filename(self): return self.config['ssh_key_filename']
cloudify-cosmo/tosca-vcloud-plugin
[ 4, 10, 4, 1, 1419926867 ]
def agent_private_key_path(self): return self.config['agent_private_key_path']
cloudify-cosmo/tosca-vcloud-plugin
[ 4, 10, 4, 1, 1419926867 ]
def user_public_key(self): return self.config['user_public_key']
cloudify-cosmo/tosca-vcloud-plugin
[ 4, 10, 4, 1, 1419926867 ]
def agent_public_key(self): return self.config['user_public_key']
cloudify-cosmo/tosca-vcloud-plugin
[ 4, 10, 4, 1, 1419926867 ]
def management_port_ip_allocation_mode(self): return self.config['management_port_ip_allocation_mode']
cloudify-cosmo/tosca-vcloud-plugin
[ 4, 10, 4, 1, 1419926867 ]
def vcloud_service_type(self): return self.config['vcloud_service_type']
cloudify-cosmo/tosca-vcloud-plugin
[ 4, 10, 4, 1, 1419926867 ]
def vcloud_region(self): return self.config['vcloud_region']
cloudify-cosmo/tosca-vcloud-plugin
[ 4, 10, 4, 1, 1419926867 ]
def public_catalog(self): return 'Public Catalog'
cloudify-cosmo/tosca-vcloud-plugin
[ 4, 10, 4, 1, 1419926867 ]
def ubuntu_precise_template(self): return 'Ubuntu Server 12.04 LTS (amd64 20150127)'
cloudify-cosmo/tosca-vcloud-plugin
[ 4, 10, 4, 1, 1419926867 ]
def before_bootstrap(self): super(VcloudHandler, self).before_bootstrap() vca = login(self.env.cloudify_config) if vca.get_vdc(TEST_VDC): status, task = vca.delete_vdc(TEST_VDC) if status: wait_for_task(vca, task) else: raise RuntimeError("Can't delete test VDC") if vca: task = vca.create_vdc(TEST_VDC) wait_for_task(vca, task) else: raise RuntimeError("Can't create test VDC")
cloudify-cosmo/tosca-vcloud-plugin
[ 4, 10, 4, 1, 1419926867 ]
def login(env): vca = vcloudair.VCA( host=env['vcloud_url'], username=env['vcloud_username'], service_type=env['vcloud_service_type'], version="5.7", verify=False) logined = (vca.login(env['vcloud_password']) and vca.login_to_instance(env['vcloud_instance'], env['vcloud_password']) and vca.login_to_instance(env['vcloud_instance'], None, vca.vcloud_session.token, vca.vcloud_session.org_url)) if logined: return vca else: return None
cloudify-cosmo/tosca-vcloud-plugin
[ 4, 10, 4, 1, 1419926867 ]
def test_not_nan(self, num_centroids: int, tau: int, magnitude: float): """Check that we don't get Inf.""" def compute_ll(key: chex.PRNGKey) -> float: num_obs = jax.random.poisson(key, 1, [num_centroids]) nu = num_obs / jnp.sum(num_obs) q_hat = jnp.ones([num_centroids, tau]) * (10 ** magnitude) q_hat += jnp.expand_dims(nu == 0, 1).astype(jnp.float32) q_hat = jnp.clip(q_hat, 0, 1) return utils.log_sum_prod(nu, q_hat) keys = jax.random.split(jax.random.PRNGKey(0), 10) log_likelihoods = jax.jit(jax.vmap(compute_ll))(keys) assert jnp.all(jnp.isfinite(log_likelihoods))
deepmind/neural_testbed
[ 178, 11, 178, 1, 1633339646 ]
def api(request): """ API Documentation view (via iframe). """ context = collect_view_data(request, 'api_docs') return render(request, 'gui/docs/api.html', context)
erigones/esdc-ce
[ 106, 27, 106, 56, 1478554493 ]
def user_guide(request): """ User Guide view (via iframe). """ context = collect_view_data(request, 'user_guide') return render(request, 'gui/docs/user_guide.html', context)
erigones/esdc-ce
[ 106, 27, 106, 56, 1478554493 ]
def __init__(self, ads_uac_conn_id: str = 'google_ads_uac_default', ads_uac_dry_run: bool = False, **kwargs) -> None: """Initializes the generator of a specified BigQuery table. Args: ads_uac_conn_id: Connection id passed to airflow. ads_uac_dry_run: If true the hook will not send real hits to the endpoint. **kwargs: Other optional arguments. """ super().__init__(http_conn_id=ads_uac_conn_id) self.dry_run = ads_uac_dry_run
google/TaglessCRM
[ 16, 7, 16, 4, 1611719174 ]
def _validate_app_conversion_payload(self, payload: Dict[str, Any]) -> None: """Validates payload sent to UAC. Args: payload: The payload to be validated before sending to Google Ads UAC. Raises: DataOutConnectorValueError: If some value is missing or in wrong format. """ for key in _REQUIRED_FIELDS: if payload.get(key) is None: raise errors.DataOutConnectorValueError( """Missing {key} in payload.""".format(key=key), errors.ErrorNameIDMap.ADS_UAC_HOOK_ERROR_MISSING_MANDATORY_FIELDS) if payload.get('app_event_type') not in [item.value for item in AppEventType]: raise errors.DataOutConnectorValueError( """Unsupported app event type in payload. Example: 'first_open', 'session_start', 'in_app_purchase', 'view_item_list', 'view_item', 'view_search_results', 'add_to_cart', 'ecommerce_purchase', 'custom'.""", errors.ErrorNameIDMap.ADS_UAC_HOOK_ERROR_UNSUPPORTED_APP_EVENT_TYPE) if (payload.get('app_event_name') and payload.get('app_event_type') != 'custom'): raise errors.DataOutConnectorValueError( """App event type must be 'custom' when app event name exists.""", errors.ErrorNameIDMap.ADS_UAC_HOOK_ERROR_WRONG_APP_EVENT_TYPE) match = _RDID_REGEX.match(payload.get('rdid')) if not match: raise errors.DataOutConnectorValueError( """Wrong raw device id format in payload. Should be compatible with RFC4122.""", errors.ErrorNameIDMap.ADS_UAC_HOOK_ERROR_WRONG_RAW_DEVICE_ID_FORMAT) if payload.get('id_type') not in [item.value for item in IdType]: raise errors.DataOutConnectorValueError( """Wrong raw device id type in payload. Example: 'advertisingid', 'idfa'.""", errors.ErrorNameIDMap.ADS_UAC_HOOK_ERROR_WRONG_RAW_DEVICE_ID_TYPE) if payload.get('lat') != 0 and payload.get('lat') != 1: raise errors.DataOutConnectorValueError( """Wrong limit-ad-tracking status in payload. Example: 0, 1.""", errors.ErrorNameIDMap.ADS_UAC_HOOK_ERROR_WRONG_LAT_STATUS)
google/TaglessCRM
[ 16, 7, 16, 4, 1611719174 ]
def test_upcoming_future(self): event_future = generate_random_event(hours_ahead(1), hours_ahead(2)) self.assertTrue(event_future in Event.objects.upcoming())
armstrong/armstrong.apps.events
[ 5, 2, 5, 2, 1303427651 ]
def test_upcoming_happened_today(self): """ don't run this at 12am! go to bed """ event_happened_today = generate_random_event(hours_ago(2), hours_ago(1)) self.assertTrue(event_happened_today in Event.objects.upcoming()) self.assertTrue(event_happened_today in Event.objects.upcoming(days=0)) self.assertTrue(event_happened_today in Event.objects.upcoming(days=1))
armstrong/armstrong.apps.events
[ 5, 2, 5, 2, 1303427651 ]
def test_upcoming_tmrw(self): event_tmrw = generate_random_event(hours_ahead(24), hours_ahead(25)) self.assertFalse(event_tmrw in Event.objects.upcoming(days=0)) self.assertTrue(event_tmrw in Event.objects.upcoming(days=1))
armstrong/armstrong.apps.events
[ 5, 2, 5, 2, 1303427651 ]
def test_upcoming_asc_order(self): events = [generate_random_event(hours_ago(i), hours_ago(i + 1)) for i in random.sample(xrange(-48, 48), 10)] upcoming = list(Event.objects.upcoming()) self.assertTrue(upcoming == sorted(upcoming, key=lambda e: e.start_date))
armstrong/armstrong.apps.events
[ 5, 2, 5, 2, 1303427651 ]
def __init__(self, *args, **kwargs): super(App, self).__init__(*args, **kwargs)
lagopus/lagopus
[ 308, 101, 308, 61, 1406265039 ]
def switch_features_handler(self, ev): datapath = ev.msg.datapath [self.install_sample(datapath, n) for n in [0]]
lagopus/lagopus
[ 308, 101, 308, 61, 1406265039 ]
def install_sample(self, datapath, table_id): parser = datapath.ofproto_parser ofproto = datapath.ofproto req = parser.OFPQueueStatsRequest(datapath, 0, ofproto.OFPP_ANY, ofproto.OFPQ_ALL) datapath.send_msg(req)
lagopus/lagopus
[ 308, 101, 308, 61, 1406265039 ]
def _EmptyFolderSize(self): """Returns the size of an empty folder. This should match the current filesystem blocksize. """ size = int(subprocess.check_output(['stat', '-fc', '%s', '.']).strip()) return size
google/GiftStick
[ 125, 24, 125, 13, 1540306194 ]
def testGenerateTarCopyCommand(self): with tempfile.TemporaryDirectory() as path: d = directory.DirectoryArtifact(path, method='tar', compress=False) command = d._TAR_COMMAND command.append(path) self.assertEqual(d._GenerateCopyCommand(), command)
google/GiftStick
[ 125, 24, 125, 13, 1540306194 ]
def model_creator(config): model = tf.keras.models.Sequential([tf.keras.layers.Dense(config["hidden_size"], input_shape=(1,)), tf.keras.layers.Dense(1)]) model.compile(loss="mse", optimizer=tf.keras.optimizers.SGD(config["lr"]), metrics=["mse"]) return model
intel-analytics/analytics-zoo
[ 2553, 722, 2553, 534, 1493951250 ]
def get_x_y(size): x = np.random.rand(size) y = x / 2 x = x.reshape((-1, 1)) y = y.reshape((-1, 1)) return x, y
intel-analytics/analytics-zoo
[ 2553, 722, 2553, 534, 1493951250 ]
def create_linear_search_space(): from zoo.orca.automl import hp return { "hidden_size": hp.choice([5, 10]), "lr": hp.choice([0.001, 0.003, 0.01]), "batch_size": hp.choice([32, 64]) }
intel-analytics/analytics-zoo
[ 2553, 722, 2553, 534, 1493951250 ]
def setUp(self) -> None: from zoo.orca import init_orca_context init_orca_context(cores=4, init_ray_on_spark=True)
intel-analytics/analytics-zoo
[ 2553, 722, 2553, 534, 1493951250 ]
def test_fit(self): auto_est = AutoEstimator.from_keras(model_creator=model_creator, logs_dir="/tmp/zoo_automl_logs", resources_per_trial={"cpu": 2}, name="test_fit") data, validation_data = get_train_val_data() auto_est.fit(data=data, validation_data=validation_data, search_space=create_linear_search_space(), n_sampling=2, epochs=1, metric="mse") assert auto_est.get_best_model() best_config = auto_est.get_best_config() assert "hidden_size" in best_config assert all(k in best_config.keys() for k in create_linear_search_space().keys())
intel-analytics/analytics-zoo
[ 2553, 722, 2553, 534, 1493951250 ]
def test_fit_metric_func(self): auto_est = AutoEstimator.from_keras(model_creator=model_creator, logs_dir="/tmp/zoo_automl_logs", resources_per_trial={"cpu": 2}, name="test_fit") data, validation_data = get_train_val_data() def pyrmsle(y_true, y_pred): y_pred[y_pred < -1] = -1 + 1e-6 elements = np.power(np.log1p(y_true) - np.log1p(y_pred), 2) return float(np.sqrt(np.sum(elements) / len(y_true))) with pytest.raises(ValueError) as exeinfo: auto_est.fit(data=data, validation_data=validation_data, search_space=create_linear_search_space(), n_sampling=2, epochs=1, metric=pyrmsle) assert "metric_mode" in str(exeinfo) auto_est.fit(data=data, validation_data=validation_data, search_space=create_linear_search_space(), n_sampling=2, epochs=1, metric=pyrmsle, metric_mode="min")
intel-analytics/analytics-zoo
[ 2553, 722, 2553, 534, 1493951250 ]
def run_impl(self, dataset: DataSet, args: argparse.Namespace) -> None: create_submodels.run_dataset(dataset)
mapillary/OpenSfM
[ 2824, 787, 2824, 167, 1415789249 ]
def __init__(self, filename): super(BinMap, self).__init__(filename) fp = open(filename) for header, seq in read_block(fp, "group "): lg = header.split()[-1] self[lg] = [] for s in seq: if s.strip() == "" or s[0] == ";": continue marker, pos = s.split() pos = int(float(pos) * 1000) self[lg].append((marker, pos))
tanghaibao/jcvi
[ 570, 166, 570, 35, 1291245482 ]
def __init__(self, row, startidx=3): args = row.split() self.id = args[0] self.seqid, pos = self.id.split(".") self.pos = int(pos) self.genotype = "".join(args[startidx:])
tanghaibao/jcvi
[ 570, 166, 570, 35, 1291245482 ]
def __str__(self): return "{0}: {1}".format(self.id, self.genotype)
tanghaibao/jcvi
[ 570, 166, 570, 35, 1291245482 ]
def bedline(self): return "\t".join(str(x) for x in (self.seqid, self.pos - 1, self.pos, self.id))
tanghaibao/jcvi
[ 570, 166, 570, 35, 1291245482 ]
def __init__(self, filename): super(MSTMap, self).__init__(filename) fp = open(filename) startidx = 1 for row in fp: if row.startswith("locus_name"): if row.split()[1] == "seqid": startidx = 3 self.header = row.split() break for row in fp: self.append(MSTMapLine(row, startidx=startidx)) self.nmarkers = len(self) self.nind = len(self[0].genotype) logging.debug( "Map contains {0} markers in {1} individuals".format( self.nmarkers, self.nind ) )
tanghaibao/jcvi
[ 570, 166, 570, 35, 1291245482 ]
def __init__(self, matrix, markerheader, population_type, missing_threshold): self.matrix = matrix self.markerheader = markerheader self.population_type = population_type self.missing_threshold = missing_threshold self.ngenotypes = len(matrix) self.nind = len(markerheader) - 1 assert self.nind == len(matrix[0]) - 1 logging.debug( "Imported {0} markers and {1} individuals.".format( self.ngenotypes, self.nind ) )
tanghaibao/jcvi
[ 570, 166, 570, 35, 1291245482 ]
def main(): actions = ( ("breakpoint", "find scaffold breakpoints using genetic map"), ("ld", "calculate pairwise linkage disequilibrium"), ("bed", "convert MSTmap output to bed format"), ("fasta", "extract markers based on map"), ("anchor", "anchor scaffolds based on map"), ("rename", "rename markers according to the new mapping locations"), ("header", "rename lines in the map header"), # Plot genetic map ("blat", "make ALLMAPS input csv based on sequences"), ("dotplot", "make dotplot between chromosomes and linkage maps"), ) p = ActionDispatcher(actions) p.dispatch(globals())
tanghaibao/jcvi
[ 570, 166, 570, 35, 1291245482 ]
def dotplot(args): """ %prog dotplot map.csv ref.fasta Make dotplot between chromosomes and linkage maps. The input map is csv formatted, for example: ScaffoldID,ScaffoldPosition,LinkageGroup,GeneticPosition scaffold_2707,11508,1,0 scaffold_2707,11525,1,1.2 """ from natsort import natsorted from jcvi.assembly.allmaps import CSVMapLine from jcvi.formats.sizes import Sizes from jcvi.graphics.base import shorten from jcvi.graphics.dotplot import ( plt, savefig, markup, normalize_axes, downsample, plot_breaks_and_labels, thousands, ) p = OptionParser(dotplot.__doc__) p.set_outfile(outfile=None) opts, args, iopts = p.set_image_options( args, figsize="8x8", style="dark", dpi=90, cmap="copper" ) if len(args) != 2: sys.exit(not p.print_help()) csvfile, fastafile = args sizes = natsorted(Sizes(fastafile).mapping.items()) seen = set() raw_data = [] fig = plt.figure(1, (iopts.w, iopts.h)) root = fig.add_axes([0, 0, 1, 1]) # the whole canvas ax = fig.add_axes([0.1, 0.1, 0.8, 0.8]) # the dot plot fp = must_open(csvfile) for row in fp: m = CSVMapLine(row) seen.add(m.seqid) raw_data.append(m) # X-axis is the genome assembly ctgs, ctg_sizes = zip(*sizes) xsize = sum(ctg_sizes) qb = list(np.cumsum(ctg_sizes)) qbreaks = list(zip(ctgs, [0] + qb, qb)) qstarts = dict(zip(ctgs, [0] + qb)) # Y-axis is the map key = lambda x: x.lg raw_data.sort(key=key) ssizes = {} for lg, d in groupby(raw_data, key=key): ssizes[lg] = max([x.cm for x in d]) ssizes = natsorted(ssizes.items()) lgs, lg_sizes = zip(*ssizes) ysize = sum(lg_sizes) sb = list(np.cumsum(lg_sizes)) sbreaks = list(zip([("LG" + x) for x in lgs], [0] + sb, sb)) sstarts = dict(zip(lgs, [0] + sb)) # Re-code all the scatter dots data = [ (qstarts[x.seqid] + x.pos, sstarts[x.lg] + x.cm, "g") for x in raw_data if (x.seqid in qstarts) ] npairs = len(data) data = downsample(data) x, y, c = zip(*data) ax.scatter(x, y, c=c, edgecolors="none", s=2, lw=0) # Flip X-Y label gy, gx = op.basename(csvfile).split(".")[:2] gx, gy = shorten(gx, maxchar=30), shorten(gy, maxchar=30) xlim, ylim = plot_breaks_and_labels( fig, root, ax, gx, gy, xsize, ysize, qbreaks, sbreaks ) ax.set_xlim(xlim) ax.set_ylim(ylim) title = "Alignment: {} vs {}".format(gx, gy) title += " ({} markers)".format(thousands(npairs)) root.set_title(markup(title), x=0.5, y=0.96, color="k") logging.debug(title) normalize_axes(root) image_name = opts.outfile or (csvfile.rsplit(".", 1)[0] + "." + iopts.format) savefig(image_name, dpi=iopts.dpi, iopts=iopts) fig.clear()
tanghaibao/jcvi
[ 570, 166, 570, 35, 1291245482 ]
def calc_ldscore(a, b): assert len(a) == len(b), "{0}\n{1}".format(a, b) # Assumes markers as A/B c = Counter(zip(a, b)) c_aa = c[("A", "A")] c_ab = c[("A", "B")] c_ba = c[("B", "A")] c_bb = c[("B", "B")] n = c_aa + c_ab + c_ba + c_bb if n == 0: return 0 f = 1.0 / n x_aa = c_aa * f x_ab = c_ab * f x_ba = c_ba * f x_bb = c_bb * f p_a = x_aa + x_ab p_b = x_ba + x_bb q_a = x_aa + x_ba q_b = x_ab + x_bb D = x_aa - p_a * q_a denominator = p_a * p_b * q_a * q_b if denominator == 0: return 0 r2 = D * D / denominator return r2
tanghaibao/jcvi
[ 570, 166, 570, 35, 1291245482 ]
def header(args): """ %prog header map conversion_table Rename lines in the map header. The mapping of old names to new names are stored in two-column `conversion_table`. """ from jcvi.formats.base import DictFile p = OptionParser(header.__doc__) p.add_option("--prefix", default="", help="Prepend text to line number") p.add_option("--ids", help="Write ids to file") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) mstmap, conversion_table = args data = MSTMap(mstmap) hd = data.header conversion = DictFile(conversion_table) newhd = [opts.prefix + conversion.get(x, x) for x in hd] print("\t".join(hd)) print("--->") print("\t".join(newhd)) ids = opts.ids if ids: fw = open(ids, "w") print("\n".join(newhd), file=fw) fw.close()
tanghaibao/jcvi
[ 570, 166, 570, 35, 1291245482 ]
def anchor(args): """ %prog anchor map.bed markers.blast > anchored.bed Anchor scaffolds based on map. """ from jcvi.formats.blast import bed p = OptionParser(anchor.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) mapbed, blastfile = args bedfile = bed([blastfile]) markersbed = Bed(bedfile) markers = markersbed.order mapbed = Bed(mapbed, sorted=False) for b in mapbed: m = b.accn if m not in markers: continue i, mb = markers[m] new_accn = "{0}:{1}-{2}".format(mb.seqid, mb.start, mb.end) b.accn = new_accn print(b)
tanghaibao/jcvi
[ 570, 166, 570, 35, 1291245482 ]
def fasta(args): """ %prog fasta map.out scaffolds.fasta Extract marker sequences based on map. """ from jcvi.formats.sizes import Sizes p = OptionParser(fasta.__doc__) p.add_option( "--extend", default=1000, type="int", help="Extend seq flanking the gaps", ) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) mapout, sfasta = args Flank = opts.extend pf = mapout.split(".")[0] mapbed = pf + ".bed" bm = BinMap(mapout) bm.print_to_bed(mapbed) bed = Bed(mapbed, sorted=False) markersbed = pf + ".markers.bed" fw = open(markersbed, "w") sizes = Sizes(sfasta).mapping for b in bed: accn = b.accn scf, pos = accn.split(".") pos = int(pos) start = max(0, pos - Flank) end = min(pos + Flank, sizes[scf]) print("\t".join(str(x) for x in (scf, start, end, accn)), file=fw) fw.close() fastaFromBed(markersbed, sfasta, name=True)
tanghaibao/jcvi
[ 570, 166, 570, 35, 1291245482 ]
def check_markers(a, b, maxdiff): if a.seqid != b.seqid: return END, None diff = hamming_distance(a.genotype, b.genotype, ignore="-") max_allowed = len(a) * maxdiff if diff <= max_allowed: return OK, None return BREAK, (a.seqid, a.pos, b.pos)
tanghaibao/jcvi
[ 570, 166, 570, 35, 1291245482 ]
def __init__(self, versions=None): self._versions = versions
kylef/swiftenv-api
[ 25, 9, 25, 10, 1464061407 ]
def versions(self): if self._versions is None: version_paths = Path('versions').glob('**/*.yaml') version_files = map(str, version_paths) versions = map(Version.fromfile, version_files) versions = sorted(versions, key=lambda v: v.version) self._versions = list(versions) return self._versions
kylef/swiftenv-api
[ 25, 9, 25, 10, 1464061407 ]
def filter(self, version=None, pre_release=None, snapshots=None, platform=None): versions = self.versions if version: versions = [v for v in versions if v.version == version] if pre_release is True: versions = [v for v in versions if v.is_pre_release] if pre_release is False: versions = [v for v in versions if not v.is_pre_release] if snapshots is True: versions = [v for v in versions if v.is_snapshot] if snapshots is False: versions = [v for v in versions if not v.is_snapshot] if platform: versions = [v for v in versions if v.supports_platform(platform)] return VersionManager(versions)
kylef/swiftenv-api
[ 25, 9, 25, 10, 1464061407 ]
def fromfile(cls, path): version = os.path.splitext(os.path.basename(path))[0] with open(path) as fp: content = yaml.safe_load(fp.read()) binaries = {} for (key, value) in content['binaries'].items(): # convert between old and new schema if isinstance(value, str): binaries[key] = { 'x86_64': value, } else: binaries[key] = value if 'version' in content: version = content['version'] return cls(version, binaries)
kylef/swiftenv-api
[ 25, 9, 25, 10, 1464061407 ]
def __str__(self): return self.version
kylef/swiftenv-api
[ 25, 9, 25, 10, 1464061407 ]
def is_pre_release(self): return '-' in self.version
kylef/swiftenv-api
[ 25, 9, 25, 10, 1464061407 ]
def is_snapshot(self): return 'SNAPSHOT' in self.version
kylef/swiftenv-api
[ 25, 9, 25, 10, 1464061407 ]
def path(self): if self.version.startswith('DEVELOPMENT-SNAPSHOT-'): version = self.version[len('DEVELOPMENT-SNAPSHOT-'):] (year, month, rest) = version.split('-', 2) return os.path.join('versions', 'DEVELOPMENT-SNAPSHOT', year, month, '{}.yaml'.format(rest)) if '-' in self.version: version, rest = self.version.split('-', 1) else: version = self.version rest = None major = version.split('.', 1)[0] if rest: if rest.startswith('DEVELOPMENT-SNAPSHOT-'): rest = rest[len('DEVELOPMENT-SNAPSHOT-'):] return os.path.join('versions', major, '{}-DEVELOPMENT-SNAPSHOT'.format(version), '{}.yaml'.format(rest)) return os.path.join('versions', major, '{}.yaml'.format(self.version))
kylef/swiftenv-api
[ 25, 9, 25, 10, 1464061407 ]
def have_access(self): account = accounts.account() if not account or not account.DRAIN_ACCESS_RIGHT: return False return True
spiffcode/hostile-takeover
[ 168, 51, 168, 3, 1395611395 ]
def finish_post(self, selected, template_values): drain_command = None if self.request.get('drain'): drain_command = 'drain' if self.request.get('undrain'): drain_command = 'undrain' errors = [] if len(selected) == 0: errors.append('Must select at least one server.') if drain_command == None: errors.append('Must select drain or undrain.') else: for info in selected: serverinfo.ServerInfo.send_command(info, '{"command": "%s"}' % drain_command) errors.append('Server %s sent %s command.' % (info['name'], drain_command)) template_values['errors'] = errors self.response.headers['Content-Type'] = 'application/xhtml+xml' path = os.path.join(os.path.dirname(__file__), 'drain.xhtml') self.response.out.write(template.render(path, template_values))
spiffcode/hostile-takeover
[ 168, 51, 168, 3, 1395611395 ]
def is_dst(zonename, date): local_tz = pytz.timezone(zonename) localized_time = local_tz.localize(date) return localized_time.dst() != timedelta(0)
tidepool-org/dfaker
[ 2, 5, 2, 2, 1432772585 ]
def convert_to_mmol(iterable): conversion_factor = 18.01559 if isinstance(iterable, float) or isinstance(iterable, int): return iterable / conversion_factor return [reading / conversion_factor for reading in iterable]
tidepool-org/dfaker
[ 2, 5, 2, 2, 1432772585 ]
def make_timesteps(start_time, offset, timelist): """ Convert list of floats representing time into epoch time start_time -- a timezone naive datetime object offset -- offset in minutes timelist -- a list of incrementing floats representing time increments """ timesteps = [] epoch_ts = convert_ISO_to_epoch(str(start_time), '%Y-%m-%d %H:%M:%S') local_timestamp = epoch_ts - offset*60 for time_item in timelist: new_time = int(local_timestamp) + int(time_item * 60) timesteps.append(new_time) return timesteps
tidepool-org/dfaker
[ 2, 5, 2, 2, 1432772585 ]
def fullfact(levels): """ Create a general full-factorial design
tisimst/pyDOE
[ 218, 111, 218, 26, 1377625323 ]
def ff2n(n): """ Create a 2-Level full-factorial design
tisimst/pyDOE
[ 218, 111, 218, 26, 1377625323 ]
def fracfact(gen): """ Create a 2-level fractional-factorial design with a generator string.
tisimst/pyDOE
[ 218, 111, 218, 26, 1377625323 ]
def __init__(self, db, collection_name, has_stats=False, **kwargs): self.property_helper = None self.log_helper = None self.collection_name = collection_name self.db = db if 'logger' in kwargs: self.log_helper = kwargs['logger'] if collection_name in self.db.collection_names(): self.collection = self.db[collection_name] else: self.collection = self.db.create_collection(collection_name) if has_stats: self.add_stats_collection()
texttochange/vusion-backend
[ 5, 1, 5, 2, 1329910064 ]
def close_connection(self): pass
texttochange/vusion-backend
[ 5, 1, 5, 2, 1329910064 ]
def save_document(self, document): document.validate_fields() return self.collection.save(document.get_as_dict())
texttochange/vusion-backend
[ 5, 1, 5, 2, 1329910064 ]
def set_property_helper(self, property_helper): self.property_helper = property_helper
texttochange/vusion-backend
[ 5, 1, 5, 2, 1329910064 ]
def __getattr__(self,attr): orig_attr = self.collection.__getattribute__(attr) if callable(orig_attr): def hooked(*args, **kwargs): result = orig_attr(*args, **kwargs) # prevent wrapped_class from becoming unwrapped if result == self.collection: return self return result return hooked else: return orig_attr
texttochange/vusion-backend
[ 5, 1, 5, 2, 1329910064 ]
def log(self, msg, level='msg'): if self.log_helper is not None: self.log_helper.log(msg, level)
texttochange/vusion-backend
[ 5, 1, 5, 2, 1329910064 ]
def bonferroni(p, n): return np.minimum(1., p * n)
alexis-roche/nipy
[ 1, 1, 1, 1, 1272833701 ]
def cluster_stats(zimg, mask, height_th, height_control='fpr', cluster_th=0, nulls={}): """ Return a list of clusters, each cluster being represented by a dictionary. Clusters are sorted by descending size order. Within each cluster, local maxima are sorted by descending depth order. Parameters ---------- zimg: z-score image mask: mask image height_th: cluster forming threshold height_control: string false positive control meaning of cluster forming threshold: 'fpr'|'fdr'|'bonferroni'|'none' cluster_th: cluster size threshold null_s : cluster-level calibration method: None|'rft'|array Notes ----- This works only with three dimensional data """ # Masking if len(mask.shape) > 3: xyz = np.where((mask.get_data() > 0).squeeze()) zmap = zimg.get_data().squeeze()[xyz] else: xyz = np.where(mask.get_data() > 0) zmap = zimg.get_data()[xyz] xyz = np.array(xyz).T nvoxels = np.size(xyz, 0) # Thresholding if height_control == 'fpr': zth = sp_stats.norm.isf(height_th) elif height_control == 'fdr': zth = empirical_pvalue.gaussian_fdr_threshold(zmap, height_th) elif height_control == 'bonferroni': zth = sp_stats.norm.isf(height_th / nvoxels) else: ## Brute-force thresholding zth = height_th pth = sp_stats.norm.sf(zth) above_th = zmap > zth if len(np.where(above_th)[0]) == 0: return None, None ## FIXME zmap_th = zmap[above_th] xyz_th = xyz[above_th] # Clustering ## Extract local maxima and connex components above some threshold ff = field_from_graph_and_data(wgraph_from_3d_grid(xyz_th, k=18), zmap_th) maxima, depth = ff.get_local_maxima(th=zth) labels = ff.cc() ## Make list of clusters, each cluster being a dictionary clusters = [] for k in range(labels.max() + 1): s = np.sum(labels == k) if s >= cluster_th: in_cluster = labels[maxima] == k m = maxima[in_cluster] d = depth[in_cluster] sorted = d.argsort()[::-1] clusters.append({'size': s, 'maxima': m[sorted], 'depth': d[sorted]}) ## Sort clusters by descending size order clusters.sort(key=lambda c : c['size'], reverse=True) # FDR-corrected p-values fdr_pvalue = empirical_pvalue.gaussian_fdr(zmap)[above_th] # Default "nulls" if not 'zmax' in nulls: nulls['zmax'] = 'bonferroni' if not 'smax' in nulls: nulls['smax'] = None if not 's' in nulls: nulls['s'] = None # Report significance levels in each cluster for c in clusters: maxima = c['maxima'] zscore = zmap_th[maxima] pval = sp_stats.norm.sf(zscore) # Replace array indices with real coordinates c['maxima'] = apply_affine(get_affine(zimg), xyz_th[maxima]) c['zscore'] = zscore c['pvalue'] = pval c['fdr_pvalue'] = fdr_pvalue[maxima] # Voxel-level corrected p-values p = None if nulls['zmax'] == 'bonferroni': p = bonferroni(pval, nvoxels) elif isinstance(nulls['zmax'], np.ndarray): p = simulated_pvalue(zscore, nulls['zmax']) c['fwer_pvalue'] = p # Cluster-level p-values (corrected) p = None if isinstance(nulls['smax'], np.ndarray): p = simulated_pvalue(c['size'], nulls['smax']) c['cluster_fwer_pvalue'] = p # Cluster-level p-values (uncorrected) p = None if isinstance(nulls['s'], np.ndarray): p = simulated_pvalue(c['size'], nulls['s']) c['cluster_pvalue'] = p # General info info = {'nvoxels': nvoxels, 'threshold_z': zth, 'threshold_p': pth, 'threshold_pcorr': bonferroni(pth, nvoxels)} return clusters, info
alexis-roche/nipy
[ 1, 1, 1, 1, 1272833701 ]
def get_3d_peaks(image, mask=None, threshold=0., nn=18, order_th=0): """ returns all the peaks of image that are with the mask and above the provided threshold Parameters ---------- image, (3d) test image mask=None, (3d) mask image By default no masking is performed threshold=0., float, threshold value above which peaks are considered nn=18, int, number of neighbours of the topological spatial model order_th=0, int, threshold on topological order to validate the peaks Returns ------- peaks, a list of dictionaries, where each dict has the fields: vals, map value at the peak order, topological order of the peak ijk, array of shape (1,3) grid coordinate of the peak pos, array of shape (n_maxima,3) mm coordinates (mapped by affine) of the peaks """ # Masking if mask is not None: bmask = mask.get_data().ravel() data = image.get_data().ravel()[bmask > 0] xyz = np.array(np.where(bmask > 0)).T else: shape = image.shape data = image.get_data().ravel() xyz = np.reshape(np.indices(shape), (3, np.prod(shape))).T affine = get_affine(image) if not (data > threshold).any(): return None # Extract local maxima and connex components above some threshold ff = field_from_graph_and_data(wgraph_from_3d_grid(xyz, k=18), data) maxima, order = ff.get_local_maxima(th=threshold) # retain only the maxima greater than the specified order maxima = maxima[order > order_th] order = order[order > order_th] n_maxima = len(maxima) if n_maxima == 0: # should not occur ? return None # reorder the maxima to have decreasing peak value vals = data[maxima] idx = np.argsort(- vals) maxima = maxima[idx] order = order[idx] vals = data[maxima] ijk = xyz[maxima] pos = np.dot(np.hstack((ijk, np.ones((n_maxima, 1)))), affine.T)[:, :3] peaks = [{'val': vals[k], 'order': order[k], 'ijk': ijk[k], 'pos': pos[k]} for k in range(n_maxima)] return peaks
alexis-roche/nipy
[ 1, 1, 1, 1, 1272833701 ]
def prepare_arrays(data_images, vardata_images, mask_images): from .mask import intersect_masks # Compute mask intersection mask = intersect_masks(mask_images, threshold=1.) # Compute xyz coordinates from mask xyz = np.array(np.where(mask > 0)) # Prepare data & vardata arrays data = np.array([(d.get_data()[xyz[0], xyz[1], xyz[2]]).squeeze() for d in data_images]).squeeze() if vardata_images is None: vardata = None else: vardata = np.array([(d.get_data()[xyz[0], xyz[1], xyz[2]]).squeeze() for d in vardata_images]).squeeze() return data, vardata, xyz, mask
alexis-roche/nipy
[ 1, 1, 1, 1, 1272833701 ]
def twosample_test(data_images, vardata_images, mask_images, labels, stat_id, permutations=0, cluster_forming_th=0.01): """ Helper function for permutation-based mass univariate twosample group analysis. Labels is a binary vector (1-2). Regions more active for group 1 than group 2 are inferred. """ # Prepare arrays data, vardata, xyz, mask = prepare_arrays(data_images, vardata_images, mask_images) # Create two-sample permutation test instance if vardata_images is None: ptest = permutation_test_twosample( data[labels == 1], data[labels == 2], xyz, stat_id=stat_id) else: ptest = permutation_test_twosample( data[labels == 1], data[labels == 2], xyz, vardata1=vardata[labels == 1], vardata2=vardata[labels == 2], stat_id=stat_id) # Compute z-map image zmap = np.zeros(data_images[0].shape).squeeze() zmap[list(xyz)] = ptest.zscore() zimg = Image(zmap, get_affine(data_images[0])) # Compute mask image maskimg = Image(mask, get_affine(data_images[0])) # Multiple comparisons if permutations <= 0: return zimg, maskimg else: # Cluster definition: (threshold, diameter) cluster_def = (ptest.height_threshold(cluster_forming_th), None) # Calibration voxel_res, cluster_res, region_res = \ ptest.calibrate(nperms=permutations, clusters=[cluster_def]) nulls = {} nulls['zmax'] = ptest.zscore(voxel_res['perm_maxT_values']) nulls['s'] = cluster_res[0]['perm_size_values'] nulls['smax'] = cluster_res[0]['perm_maxsize_values'] # Return z-map image, mask image and dictionary of null # distribution for cluster sizes (s), max cluster size (smax) # and max z-score (zmax) return zimg, maskimg, nulls
alexis-roche/nipy
[ 1, 1, 1, 1, 1272833701 ]
def linear_model_fit(data_images, mask_images, design_matrix, vector): """ Helper function for group data analysis using arbitrary design matrix """ # Prepare arrays data, vardata, xyz, mask = prepare_arrays(data_images, None, mask_images) # Create glm instance G = glm(data, design_matrix) # Compute requested contrast c = G.contrast(vector) # Compute z-map image zmap = np.zeros(data_images[0].shape).squeeze() zmap[list(xyz)] = c.zscore() zimg = Image(zmap, get_affine(data_images[0])) return zimg
alexis-roche/nipy
[ 1, 1, 1, 1, 1272833701 ]
def __init__(self, data, design_matrix, mask=None, formula=None, model=def_model, method=None, niter=def_niter): # Convert input data and design into sequences if not hasattr(data, '__iter__'): data = [data] if not hasattr(design_matrix, '__iter__'): design_matrix = [design_matrix] # configure spatial properties # the 'sampling' direction is assumed to be the last # TODO: check that all input images have the same shape and # that it's consistent with the mask nomask = mask is None if nomask: self.xyz = None self.axis = len(data[0].shape) - 1 else: self.xyz = np.where(mask.get_data() > 0) self.axis = 1 self.spatial_shape = data[0].shape[0: -1] self.affine = get_affine(data[0]) self.glm = [] for i in range(len(data)): if not isinstance(design_matrix[i], np.ndarray): raise ValueError('Invalid design matrix') if nomask: Y = data[i].get_data() else: Y = data[i].get_data()[self.xyz] X = design_matrix[i] self.glm.append(glm(Y, X, axis=self.axis, formula=formula, model=model, method=method, niter=niter))
alexis-roche/nipy
[ 1, 1, 1, 1, 1272833701 ]
def contrast(self, vector): """Compute images of contrast and contrast variance. """ # Compute the overall contrast across models c = self.glm[0].contrast(vector) for g in self.glm[1:]: c += g.contrast(vector) def affect_inmask(dest, src, xyz): if xyz is None: dest = src else: dest[xyz] = src return dest con = np.zeros(self.spatial_shape) con_img = Image(affect_inmask(con, c.effect, self.xyz), self.affine) vcon = np.zeros(self.spatial_shape) vcon_img = Image(affect_inmask(vcon, c.variance, self.xyz), self.affine) z = np.zeros(self.spatial_shape) z_img = Image(affect_inmask(z, c.zscore(), self.xyz), self.affine) dof = c.dof return con_img, vcon_img, z_img, dof
alexis-roche/nipy
[ 1, 1, 1, 1, 1272833701 ]
def extractItsjustadreamwebWordpressCom(item): ''' Parser for 'itsjustadreamweb.wordpress.com' ''' vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title']) if not (chp or vol) or "preview" in item['title'].lower(): return None if "WATTT" in item['tags']: return buildReleaseMessageWithType(item, "WATTT", vol, chp, frag=frag, postfix=postfix) return False
fake-name/ReadableWebProxy
[ 191, 16, 191, 3, 1437712243 ]
def test_pvconst_npts_setter(): """Test pvconst property and setter methods""" pvconst = pvconstants.PVconstants() assert pvconst.npts == pvconstants.NPTS assert len(pvconst.pts) == pvconst.npts assert pvconst.pts[0] == 0 assert pvconst.pts[-1] == 1 assert len(pvconst.negpts) == pvconst.npts assert pvconst.negpts[0] == 1 assert pvconst.negpts[-1] > 0 pvconst.npts = 1001 assert pvconst.npts == 1001 assert len(pvconst.pts) == pvconst.npts assert pvconst.pts[0] == 0 assert pvconst.pts[-1] == 1 assert len(pvconst.negpts) == pvconst.npts assert pvconst.negpts[0] == 1 assert pvconst.negpts[-1] > 0
SunPower/PVMismatch
[ 56, 26, 56, 48, 1358900803 ]
def emit(self, event_name, **kwargs): payload = self._format_outbound_data(event_name, **kwargs) self.sendMessage(payload)
CptLemming/django-socket-server
[ 7, 8, 7, 2, 1416363509 ]
def onMessage(self, payload, isBinary): self.factory.handle_message(self, payload)
CptLemming/django-socket-server
[ 7, 8, 7, 2, 1416363509 ]
def __init__(self, *args, **kwargs): WebSocketClientFactory.__init__(self, *args, **kwargs) self.callbacks = {} self.register_callbacks()
CptLemming/django-socket-server
[ 7, 8, 7, 2, 1416363509 ]
def on(self, event_name, callback): self.callbacks[event_name] = callback
CptLemming/django-socket-server
[ 7, 8, 7, 2, 1416363509 ]