content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import argparse
def create_parser() -> ArgumentParser:
"""
Constructs the MFA argument parser
Returns
-------
ArgumentParser
MFA argument parser
"""
GLOBAL_CONFIG = load_global_config()
def add_global_options(subparser: argparse.ArgumentParser, textgrid_output: bool = False):
"""
Add a set of global options to a subparser
Parameters
----------
subparser: argparse.ArgumentParser
Subparser to augment
textgrid_output: bool
Flag for whether the subparser is used for a command that generates TextGrids
"""
subparser.add_argument(
"-t",
"--temp_directory",
type=str,
default=GLOBAL_CONFIG["temp_directory"],
help=f"Temporary directory root to store MFA created files, default is {GLOBAL_CONFIG['temp_directory']}",
)
subparser.add_argument(
"--disable_mp",
help=f"Disable any multiprocessing during alignment (not recommended), default is {not GLOBAL_CONFIG['use_mp']}",
action="store_true",
default=not GLOBAL_CONFIG["use_mp"],
)
subparser.add_argument(
"-j",
"--num_jobs",
type=int,
default=GLOBAL_CONFIG["num_jobs"],
help=f"Number of data splits (and cores to use if multiprocessing is enabled), defaults "
f"is {GLOBAL_CONFIG['num_jobs']}",
)
subparser.add_argument(
"-v",
"--verbose",
help=f"Output debug messages, default is {GLOBAL_CONFIG['verbose']}",
action="store_true",
default=GLOBAL_CONFIG["verbose"],
)
subparser.add_argument(
"--clean",
help=f"Remove files from previous runs, default is {GLOBAL_CONFIG['clean']}",
action="store_true",
default=GLOBAL_CONFIG["clean"],
)
subparser.add_argument(
"--overwrite",
help=f"Overwrite output files when they exist, default is {GLOBAL_CONFIG['overwrite']}",
action="store_true",
default=GLOBAL_CONFIG["overwrite"],
)
subparser.add_argument(
"--debug",
help=f"Run extra steps for debugging issues, default is {GLOBAL_CONFIG['debug']}",
action="store_true",
default=GLOBAL_CONFIG["debug"],
)
if textgrid_output:
subparser.add_argument(
"--disable_textgrid_cleanup",
help=f"Disable extra clean up steps on TextGrid output, default is {not GLOBAL_CONFIG['cleanup_textgrids']}",
action="store_true",
default=not GLOBAL_CONFIG["cleanup_textgrids"],
)
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest="subcommand")
subparsers.required = True
_ = subparsers.add_parser("version")
align_parser = subparsers.add_parser("align")
align_parser.add_argument("corpus_directory", help="Full path to the directory to align")
align_parser.add_argument(
"dictionary_path", help="Full path to the pronunciation dictionary to use"
)
align_parser.add_argument(
"acoustic_model_path",
help=f"Full path to the archive containing pre-trained model or language ({', '.join(acoustic_models)})",
)
align_parser.add_argument(
"output_directory",
help="Full path to output directory, will be created if it doesn't exist",
)
align_parser.add_argument(
"--config_path", type=str, default="", help="Path to config file to use for alignment"
)
align_parser.add_argument(
"-s",
"--speaker_characters",
type=str,
default="0",
help="Number of characters of file names to use for determining speaker, "
"default is to use directory names",
)
align_parser.add_argument(
"-a",
"--audio_directory",
type=str,
default="",
help="Audio directory root to use for finding audio files",
)
add_global_options(align_parser, textgrid_output=True)
adapt_parser = subparsers.add_parser("adapt")
adapt_parser.add_argument("corpus_directory", help="Full path to the directory to align")
adapt_parser.add_argument(
"dictionary_path", help="Full path to the pronunciation dictionary to use"
)
adapt_parser.add_argument(
"acoustic_model_path",
help=f"Full path to the archive containing pre-trained model or language ({', '.join(acoustic_models)})",
)
adapt_parser.add_argument(
"output_paths",
nargs="+",
help="Path to directory for aligned TextGrids, zip path to export acoustic model, or both",
)
adapt_parser.add_argument(
"-o",
"--output_model_path",
type=str,
default="",
help="Full path to save adapted acoustic model",
)
adapt_parser.add_argument(
"--full_train",
action="store_true",
help="Specify whether to do a round of speaker-adapted training rather than the default "
"remapping approach to adaptation",
)
adapt_parser.add_argument(
"--config_path", type=str, default="", help="Path to config file to use for alignment"
)
adapt_parser.add_argument(
"-s",
"--speaker_characters",
type=str,
default="0",
help="Number of characters of file names to use for determining speaker, "
"default is to use directory names",
)
adapt_parser.add_argument(
"-a",
"--audio_directory",
type=str,
default="",
help="Audio directory root to use for finding audio files",
)
add_global_options(adapt_parser, textgrid_output=True)
train_parser = subparsers.add_parser("train")
train_parser.add_argument(
"corpus_directory", help="Full path to the source directory to align"
)
train_parser.add_argument(
"dictionary_path", help="Full path to the pronunciation dictionary to use", default=""
)
train_parser.add_argument(
"output_paths",
nargs="+",
help="Path to directory for aligned TextGrids, zip path to export acoustic model, or both",
)
train_parser.add_argument(
"--config_path",
type=str,
default="",
help="Path to config file to use for training and alignment",
)
train_parser.add_argument(
"-o",
"--output_model_path",
type=str,
default="",
help="Full path to save resulting acoustic model",
)
train_parser.add_argument(
"-s",
"--speaker_characters",
type=str,
default="0",
help="Number of characters of filenames to use for determining speaker, "
"default is to use directory names",
)
train_parser.add_argument(
"-a",
"--audio_directory",
type=str,
default="",
help="Audio directory root to use for finding audio files",
)
add_global_options(train_parser, textgrid_output=True)
validate_parser = subparsers.add_parser("validate")
validate_parser.add_argument(
"corpus_directory", help="Full path to the source directory to align"
)
validate_parser.add_argument(
"dictionary_path", help="Full path to the pronunciation dictionary to use", default=""
)
validate_parser.add_argument(
"acoustic_model_path",
nargs="?",
default="",
help=f"Full path to the archive containing pre-trained model or language ({', '.join(acoustic_models)})",
)
validate_parser.add_argument(
"-s",
"--speaker_characters",
type=str,
default="0",
help="Number of characters of file names to use for determining speaker, "
"default is to use directory names",
)
validate_parser.add_argument(
"--test_transcriptions", help="Test accuracy of transcriptions", action="store_true"
)
validate_parser.add_argument(
"--ignore_acoustics",
help="Skip acoustic feature generation and associated validation",
action="store_true",
)
add_global_options(validate_parser)
g2p_model_help_message = f"""Full path to the archive containing pre-trained model or language ({', '.join(g2p_models)})
If not specified, then orthographic transcription is split into pronunciations."""
g2p_parser = subparsers.add_parser("g2p")
g2p_parser.add_argument("g2p_model_path", help=g2p_model_help_message, nargs="?")
g2p_parser.add_argument(
"input_path",
help="Corpus to base word list on or a text file of words to generate pronunciations",
)
g2p_parser.add_argument("output_path", help="Path to save output dictionary")
g2p_parser.add_argument(
"--include_bracketed",
help="Included words enclosed by brackets, job_name.e. [...], (...), <...>",
action="store_true",
)
g2p_parser.add_argument(
"--config_path", type=str, default="", help="Path to config file to use for G2P"
)
add_global_options(g2p_parser)
train_g2p_parser = subparsers.add_parser("train_g2p")
train_g2p_parser.add_argument("dictionary_path", help="Location of existing dictionary")
train_g2p_parser.add_argument("output_model_path", help="Desired location of generated model")
train_g2p_parser.add_argument(
"--config_path", type=str, default="", help="Path to config file to use for G2P"
)
train_g2p_parser.add_argument(
"--validate",
action="store_true",
help="Perform an analysis of accuracy training on "
"most of the data and validating on an unseen subset",
)
add_global_options(train_g2p_parser)
model_parser = subparsers.add_parser("model")
model_subparsers = model_parser.add_subparsers(dest="action")
model_subparsers.required = True
model_download_parser = model_subparsers.add_parser("download")
model_download_parser.add_argument(
"model_type", help=f"Type of model to download, options: {', '.join(MODEL_TYPES)}"
)
model_download_parser.add_argument(
"name",
help="Name of language code to download, if not specified, "
"will list all available languages",
nargs="?",
)
model_list_parser = model_subparsers.add_parser("list")
model_list_parser.add_argument(
"model_type", nargs="?", help=f"Type of model to list, options: {', '.join(MODEL_TYPES)}"
)
model_inspect_parser = model_subparsers.add_parser("inspect")
model_inspect_parser.add_argument(
"model_type",
nargs="?",
help=f"Type of model to download, options: {', '.join(MODEL_TYPES)}",
)
model_inspect_parser.add_argument(
"name", help="Name of pretrained model or path to MFA model to inspect"
)
model_save_parser = model_subparsers.add_parser("save")
model_save_parser.add_argument("model_type", help="Type of MFA model")
model_save_parser.add_argument(
"path", help="Path to MFA model to save for invoking with just its name"
)
model_save_parser.add_argument(
"--name",
help="Name to use as reference (defaults to the name of the zip file",
type=str,
default="",
)
model_save_parser.add_argument(
"--overwrite",
help="Flag to overwrite existing pretrained models with the same name (and model type)",
action="store_true",
)
train_lm_parser = subparsers.add_parser("train_lm")
train_lm_parser.add_argument(
"source_path",
help="Full path to the source directory to train from, alternatively "
"an ARPA format language model to convert for MFA use",
)
train_lm_parser.add_argument(
"output_model_path", type=str, help="Full path to save resulting language model"
)
train_lm_parser.add_argument(
"-m",
"--model_path",
type=str,
help="Full path to existing language model to merge probabilities",
)
train_lm_parser.add_argument(
"-w",
"--model_weight",
type=float,
default=1.0,
help="Weight factor for supplemental language model, defaults to 1.0",
)
train_lm_parser.add_argument(
"--dictionary_path", help="Full path to the pronunciation dictionary to use", default=""
)
train_lm_parser.add_argument(
"--config_path",
type=str,
default="",
help="Path to config file to use for training and alignment",
)
add_global_options(train_lm_parser)
train_dictionary_parser = subparsers.add_parser("train_dictionary")
train_dictionary_parser.add_argument(
"corpus_directory", help="Full path to the directory to align"
)
train_dictionary_parser.add_argument(
"dictionary_path", help="Full path to the pronunciation dictionary to use"
)
train_dictionary_parser.add_argument(
"acoustic_model_path",
help=f"Full path to the archive containing pre-trained model or language ({', '.join(acoustic_models)})",
)
train_dictionary_parser.add_argument(
"output_directory",
help="Full path to output directory, will be created if it doesn't exist",
)
train_dictionary_parser.add_argument(
"--config_path", type=str, default="", help="Path to config file to use for alignment"
)
train_dictionary_parser.add_argument(
"-s",
"--speaker_characters",
type=str,
default="0",
help="Number of characters of file names to use for determining speaker, "
"default is to use directory names",
)
add_global_options(train_dictionary_parser)
train_ivector_parser = subparsers.add_parser("train_ivector")
train_ivector_parser.add_argument(
"corpus_directory",
help="Full path to the source directory to " "train the ivector extractor",
)
train_ivector_parser.add_argument(
"dictionary_path", help="Full path to the pronunciation dictionary to use"
)
train_ivector_parser.add_argument(
"acoustic_model_path",
type=str,
default="",
help="Full path to acoustic model for alignment",
)
train_ivector_parser.add_argument(
"output_model_path",
type=str,
default="",
help="Full path to save resulting ivector extractor",
)
train_ivector_parser.add_argument(
"-s",
"--speaker_characters",
type=str,
default="0",
help="Number of characters of filenames to use for determining speaker, "
"default is to use directory names",
)
train_ivector_parser.add_argument(
"--config_path", type=str, default="", help="Path to config file to use for training"
)
add_global_options(train_ivector_parser)
classify_speakers_parser = subparsers.add_parser("classify_speakers")
classify_speakers_parser.add_argument(
"corpus_directory",
help="Full path to the source directory to " "run speaker classification",
)
classify_speakers_parser.add_argument(
"ivector_extractor_path", type=str, default="", help="Full path to ivector extractor model"
)
classify_speakers_parser.add_argument(
"output_directory",
help="Full path to output directory, will be created if it doesn't exist",
)
classify_speakers_parser.add_argument(
"-s", "--num_speakers", type=int, default=0, help="Number of speakers if known"
)
classify_speakers_parser.add_argument(
"--cluster", help="Using clustering instead of classification", action="store_true"
)
classify_speakers_parser.add_argument(
"--config_path",
type=str,
default="",
help="Path to config file to use for ivector extraction",
)
add_global_options(classify_speakers_parser)
create_segments_parser = subparsers.add_parser("create_segments")
create_segments_parser.add_argument(
"corpus_directory", help="Full path to the source directory to " "run VAD segmentation"
)
create_segments_parser.add_argument(
"output_directory",
help="Full path to output directory, will be created if it doesn't exist",
)
create_segments_parser.add_argument(
"--config_path", type=str, default="", help="Path to config file to use for segmentation"
)
add_global_options(create_segments_parser)
transcribe_parser = subparsers.add_parser("transcribe")
transcribe_parser.add_argument(
"corpus_directory", help="Full path to the directory to transcribe"
)
transcribe_parser.add_argument(
"dictionary_path", help="Full path to the pronunciation dictionary to use"
)
transcribe_parser.add_argument(
"acoustic_model_path",
help=f"Full path to the archive containing pre-trained model or language ({', '.join(acoustic_models)})",
)
transcribe_parser.add_argument(
"language_model_path",
help=f"Full path to the archive containing pre-trained model or language ({', '.join(language_models)})",
)
transcribe_parser.add_argument(
"output_directory",
help="Full path to output directory, will be created if it doesn't exist",
)
transcribe_parser.add_argument(
"--config_path", type=str, default="", help="Path to config file to use for transcription"
)
transcribe_parser.add_argument(
"-s",
"--speaker_characters",
type=str,
default="0",
help="Number of characters of file names to use for determining speaker, "
"default is to use directory names",
)
transcribe_parser.add_argument(
"-a",
"--audio_directory",
type=str,
default="",
help="Audio directory root to use for finding audio files",
)
transcribe_parser.add_argument(
"-e",
"--evaluate",
help="Evaluate the transcription " "against golden texts",
action="store_true",
)
add_global_options(transcribe_parser)
config_parser = subparsers.add_parser(
"configure",
help="The configure command is used to set global defaults for MFA so "
"you don't have to set them every time you call an MFA command.",
)
config_parser.add_argument(
"-t",
"--temp_directory",
type=str,
default="",
help=f"Set the default temporary directory, default is {GLOBAL_CONFIG['temp_directory']}",
)
config_parser.add_argument(
"-j",
"--num_jobs",
type=int,
help=f"Set the number of processes to use by default, defaults to {GLOBAL_CONFIG['num_jobs']}",
)
config_parser.add_argument(
"--always_clean",
help="Always remove files from previous runs by default",
action="store_true",
)
config_parser.add_argument(
"--never_clean",
help="Don't remove files from previous runs by default",
action="store_true",
)
config_parser.add_argument(
"--always_verbose", help="Default to verbose output", action="store_true"
)
config_parser.add_argument(
"--never_verbose", help="Default to non-verbose output", action="store_true"
)
config_parser.add_argument(
"--always_debug", help="Default to running debugging steps", action="store_true"
)
config_parser.add_argument(
"--never_debug", help="Default to not running debugging steps", action="store_true"
)
config_parser.add_argument(
"--always_overwrite", help="Always overwrite output files", action="store_true"
)
config_parser.add_argument(
"--never_overwrite",
help="Never overwrite output files (if file already exists, "
"the output will be saved in the temp directory)",
action="store_true",
)
config_parser.add_argument(
"--disable_mp",
help="Disable all multiprocessing (not recommended as it will usually "
"increase processing times)",
action="store_true",
)
config_parser.add_argument(
"--enable_mp",
help="Enable multiprocessing (recommended and enabled by default)",
action="store_true",
)
config_parser.add_argument(
"--disable_textgrid_cleanup",
help="Disable postprocessing of TextGrids that cleans up "
"silences and recombines compound words and clitics",
action="store_true",
)
config_parser.add_argument(
"--enable_textgrid_cleanup",
help="Enable postprocessing of TextGrids that cleans up "
"silences and recombines compound words and clitics",
action="store_true",
)
config_parser.add_argument(
"--disable_terminal_colors", help="Turn off colored text in output", action="store_true"
)
config_parser.add_argument(
"--enable_terminal_colors", help="Turn on colored text in output", action="store_true"
)
config_parser.add_argument(
"--terminal_width",
help=f"Set width of terminal output, "
f"currently set to {GLOBAL_CONFIG['terminal_width']}",
default=GLOBAL_CONFIG["terminal_width"],
type=int,
)
config_parser.add_argument(
"--blas_num_threads",
help=f"Number of threads to use for BLAS libraries, 1 is recommended "
f"due to how much MFA relies on multiprocessing. "
f"Currently set to {GLOBAL_CONFIG['blas_num_threads']}",
default=GLOBAL_CONFIG["blas_num_threads"],
type=int,
)
history_parser = subparsers.add_parser("history")
history_parser.add_argument("depth", help="Number of commands to list", nargs="?", default=10)
history_parser.add_argument(
"--verbose", help="Flag for whether to output additional information", action="store_true"
)
_ = subparsers.add_parser("annotator")
_ = subparsers.add_parser("anchor")
return parser | cb91b45e8c958b6e50f7cc31ef9e5e16c8cb4888 | 3,656,500 |
def build_params_comments(python_code, keyword, info):
"""Builds comments for parameters"""
for arg, arg_info in zip(info.get('expected_url_params').keys(), info.get('expected_url_params').values()):
python_code += '\n' + 2*TAB_BASE*SPACE + ':param ' + score_to_underscore(arg) + ': '
python_code += str(arg_info.get('description')) + ' ' + str(arg_info.get('possible_values'))
return python_code | ce7446bb49ff25cbb2fb08ed8ca389dea16919bd | 3,656,501 |
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the Netatmo component."""
hass.data[DOMAIN] = {}
hass.data[DOMAIN][DATA_PERSONS] = {}
if DOMAIN not in config:
return True
config_flow.NetatmoFlowHandler.async_register_implementation(
hass,
config_entry_oauth2_flow.LocalOAuth2Implementation(
hass,
DOMAIN,
config[DOMAIN][CONF_CLIENT_ID],
config[DOMAIN][CONF_CLIENT_SECRET],
OAUTH2_AUTHORIZE,
OAUTH2_TOKEN,
),
)
return True | 7913dd1b7eaa60e7bedfba2a9da199cb4045e7ba | 3,656,502 |
import asyncio
import os
import aiohttp
async def upload_artifact(req):
"""
Upload artifact created during sample creation using the Jobs API.
"""
db = req.app["db"]
pg = req.app["pg"]
sample_id = req.match_info["sample_id"]
artifact_type = req.query.get("type")
if not await db.samples.find_one(sample_id):
raise NotFound()
errors = virtool.uploads.utils.naive_validator(req)
if errors:
raise InvalidQuery(errors)
name = req.query.get("name")
artifact_file_path = (
virtool.samples.utils.join_sample_path(req.app["config"], sample_id) / name
)
if artifact_type and artifact_type not in ArtifactType.to_list():
raise HTTPBadRequest(text="Unsupported sample artifact type")
try:
artifact = await create_artifact_file(pg, name, name, sample_id, artifact_type)
except exc.IntegrityError:
raise HTTPConflict(
text="Artifact file has already been uploaded for this sample"
)
upload_id = artifact["id"]
try:
size = await virtool.uploads.utils.naive_writer(req, artifact_file_path)
except asyncio.CancelledError:
logger.debug(f"Artifact file upload aborted for sample: {sample_id}")
await delete_row(pg, upload_id, SampleArtifact)
await req.app["run_in_thread"](os.remove, artifact_file_path)
return aiohttp.web.Response(status=499)
artifact = await virtool.uploads.db.finalize(pg, size, upload_id, SampleArtifact)
headers = {"Location": f"/samples/{sample_id}/artifact/{name}"}
return json_response(artifact, status=201, headers=headers) | 79a9e76fb75ba12b9118e8facbb23d2c007158cf | 3,656,503 |
def hotkey(x: int, y: int) -> bool:
"""Try to copy by dragging over the string, and then use hotkey."""
gui.moveTo(x + 15, y, 0)
gui.mouseDown()
gui.move(70, 0)
gui.hotkey("ctrl", "c")
gui.mouseUp()
return check_copied() | 5cd789fd8e1b3ecf9dd1585a6831f6db92d4b6b0 | 3,656,504 |
import requests
def get_tv_imdbid_by_id( tv_id, verify = True ):
"""
Returns the IMDb_ ID for a TV show.
:param int tv_id: the TMDB_ series ID for the TV show.
:param bool verify: optional argument, whether to verify SSL connections. Default is ``True``.
:returns: the IMDB_ ID for that TV show. Otherwise returns ``None`` if cannot be found.
:rtype: str
.. _IMDb: https://www.imdb.com
"""
response = requests.get(
'https://api.themoviedb.org/3/tv/%d/external_ids' % tv_id,
params = { 'api_key' : tmdb_apiKey }, verify = verify )
if response.status_code != 200:
print( 'problem here, %s.' % response.content )
return None
data = response.json( )
if 'imdb_id' not in data: return None
return data['imdb_id'] | 363a2284d65fe1cfa2f3d2d07e3205de77bf67ef | 3,656,505 |
def test_reading_cosmos_catalog():
"""Returns the cosmos catalog"""
cosmos_catalog = CosmosCatalog.from_file(COSMOS_CATALOG_PATHS)
return cosmos_catalog | 1fc6f32cfc86ee28e114878d5ce7c13891e79ae1 | 3,656,506 |
def is_terminal(p):
"""
Check if a given packet is a terminal element.
:param p: element to check
:type p: object
:return: If ``p`` is a terminal element
:rtype: bool
"""
return isinstance(p, _TerminalPacket) | 189da8342e61d112a7d56d778de7562f7b609b82 | 3,656,507 |
def vgg11_bn(pretrained=False, **kwargs):
"""VGG 11-layer model (configuration "A") with batch normalization
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfg['A'], batch_norm=True), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg11_bn']))
return model | 3a8a03bd4a337143d56ed99ae89f2bbc3e312e63 | 3,656,508 |
def trf_input_method(config, patient_id="", key_namespace="", **_):
"""Streamlit GUI method to facilitate TRF data provision.
Notes
-----
TRF files themselves have no innate patient alignment. An option
for TRF collection is to use the CLI tool
``pymedphys trf orchestrate``. This connects to the SAMBA server
hosted on the Elekta NSS and downloads the diagnostic backup zips.
It then takes these TRF files and queries the Mosaiq database using
time of delivery to identify these with a patient id (Ident.Pat_ID1)
and name.
As such, all references to patient ID and name within this
``trf_input_method`` are actually a reference to their Mosaiq
database counterparts.
"""
FILE_UPLOAD = "File upload"
INDEXED_TRF_SEARCH = "Search indexed TRF directory"
import_method = st.radio(
"TRF import method",
[FILE_UPLOAD, INDEXED_TRF_SEARCH],
key=f"{key_namespace}_trf_file_import_method",
)
if import_method == FILE_UPLOAD:
selected_files = st.file_uploader(
"Upload TRF files",
key=f"{key_namespace}_trf_file_uploader",
accept_multiple_files=True,
)
if not selected_files:
return {}
data_paths = []
individual_identifiers = ["Uploaded TRF file(s)"]
if import_method == INDEXED_TRF_SEARCH:
try:
indexed_trf_directory = _config.get_indexed_trf_directory(config)
except KeyError:
st.write(
_exceptions.ConfigMissing(
"No indexed TRF directory is configured. Please use "
f"'{FILE_UPLOAD}' instead."
)
)
return {}
patient_id = st.text_input(
"Patient ID", patient_id, key=f"{key_namespace}_patient_id"
)
st.write(patient_id)
filepaths = list(indexed_trf_directory.glob(f"*/{patient_id}_*/*/*/*/*.trf"))
raw_timestamps = [
"_".join(path.parent.name.split("_")[0:2]) for path in filepaths
]
timestamps = list(
pd.to_datetime(raw_timestamps, format="%Y-%m-%d_%H%M%S").astype(str)
)
timestamp_filepath_map = dict(zip(timestamps, filepaths))
timestamps = sorted(timestamps, reverse=True)
if len(timestamps) == 0:
if patient_id != "":
st.write(
_exceptions.NoRecordsFound(
f"No TRF log file found for patient ID {patient_id}"
)
)
return {"patient_id": patient_id}
if len(timestamps) == 1:
default_timestamp = timestamps[0]
else:
default_timestamp = []
selected_trf_deliveries = st.multiselect(
"Select TRF delivery timestamp(s)",
timestamps,
default=default_timestamp,
key=f"{key_namespace}_trf_deliveries",
)
if not selected_trf_deliveries:
return {}
st.write(
"""
#### TRF filepath(s)
"""
)
selected_files = [
timestamp_filepath_map[timestamp] for timestamp in selected_trf_deliveries
]
st.write([str(path.resolve()) for path in selected_files])
individual_identifiers = [
f"{path.parent.parent.parent.parent.name} {path.parent.name}"
for path in selected_files
]
data_paths = selected_files
st.write(
"""
#### Log file header(s)
"""
)
headers = []
tables = []
for path_or_binary in selected_files:
try:
path_or_binary.seek(0)
except AttributeError:
pass
header, table = read_trf(path_or_binary)
headers.append(header)
tables.append(table)
headers = pd.concat(headers)
headers.reset_index(inplace=True)
headers.drop("index", axis=1, inplace=True)
st.write(headers)
deliveries = _deliveries.cached_deliveries_loading(
tables, _deliveries.delivery_from_trf
)
identifier = f"TRF ({individual_identifiers[0]})"
patient_name = _attempt_patient_name_from_mosaiq(config, headers)
return {
"site": None,
"patient_id": patient_id,
"patient_name": patient_name,
"data_paths": data_paths,
"identifier": identifier,
"deliveries": deliveries,
} | 710a3f47e58ea5ed879cba6e51624072340308cf | 3,656,509 |
from datetime import datetime
def plotter(fdict):
""" Go """
ctx = get_autoplot_context(fdict, get_description())
station = ctx['station']
network = ctx['network']
year = ctx['year']
season = ctx['season']
nt = NetworkTable(network)
table = "alldata_%s" % (station[:2],)
pgconn = get_dbconn('coop')
# Have to do a redundant query to get the running values
obs = read_sql("""
WITH trail as (
SELECT day, year,
avg((high+low)/2.) OVER (ORDER by day ASC ROWS 91 PRECEDING) as avgt
from """ + table + """ WHERE station = %s)
SELECT day, avgt from trail WHERE year between %s and %s ORDER by day ASC
""", pgconn, params=(station, year, year + 2), index_col='day')
df = read_sql("""
WITH trail as (
SELECT day, year,
avg((high+low)/2.) OVER (ORDER by day ASC ROWS 91 PRECEDING) as avgt
from """ + table + """ WHERE station = %s),
extremes as (
SELECT day, year, avgt,
rank() OVER (PARTITION by year ORDER by avgt ASC) as minrank,
rank() OVER (PARTITION by year ORDER by avgt DESC) as maxrank
from trail),
yearmax as (
SELECT year, min(day) as summer_end, min(avgt) as summer
from extremes where maxrank = 1 GROUP by year),
yearmin as (
SELECT year, min(day) as winter_end, min(avgt) as winter
from extremes where minrank = 1 GROUP by year)
SELECT x.year, winter_end, winter, summer_end, summer,
extract(doy from winter_end)::int as winter_end_doy,
extract(doy from summer_end)::int as summer_end_doy
from yearmax x JOIN yearmin n on (x.year = n.year) ORDER by x.year ASC
""", pgconn, params=(station, ), index_col='year')
# Throw out spring of the first year
for col in ['winter', 'winter_end_doy', 'winter_end']:
df.at[df.index.min(), col] = None
# Need to cull current year
if datetime.date.today().month < 8:
for col in ['summer', 'summer_end_doy', 'summer_end']:
df.at[datetime.date.today().year, col] = None
if datetime.date.today().month < 2:
for col in ['winter', 'winter_end_doy', 'winter_end']:
df.at[datetime.date.today().year, col] = None
df['spring_length'] = df['summer_end_doy'] - 91 - df['winter_end_doy']
# fall is a bit tricker
df['fall_length'] = None
df['fall_length'].values[:-1] = ((df['winter_end_doy'].values[1:] + 365) -
91 - df['summer_end_doy'].values[:-1])
df['fall_length'] = pd.to_numeric(df['fall_length'])
(fig, ax) = plt.subplots(3, 1, figsize=(8, 9))
ax[0].plot(obs.index.values, obs['avgt'].values)
ax[0].set_ylim(obs['avgt'].min() - 8, obs['avgt'].max() + 8)
ax[0].set_title(("%s-%s [%s] %s\n91 Day Average Temperatures"
) % (nt.sts[station]['archive_begin'].year,
year + 3, station, nt.sts[station]['name']))
ax[0].set_ylabel(r"Trailing 91 Day Avg T $^{\circ}$F")
ax[0].xaxis.set_major_formatter(mdates.DateFormatter('%b\n%Y'))
ax[0].grid(True)
# Label the maxes and mins
for yr in range(year, year+3):
if yr not in df.index:
continue
date = df.at[yr, 'winter_end']
val = df.at[yr, 'winter']
if date is not None:
ax[0].text(
date, val - 1,
r"%s %.1f$^\circ$F" % (date.strftime("%-d %b"), val),
ha='center', va='top',
bbox=dict(color='white', boxstyle='square,pad=0')
)
date = df.at[yr, 'summer_end']
val = df.at[yr, 'summer']
if date is not None:
ax[0].text(
date, val + 1,
r"%s %.1f$^\circ$F" % (date.strftime("%-d %b"), val),
ha='center', va='bottom',
bbox=dict(color='white', boxstyle='square,pad=0')
)
df2 = df.dropna()
p2col = 'winter_end_doy' if season == 'spring' else 'summer_end_doy'
slp, intercept, r, _, _ = stats.linregress(df2.index.values,
df2[p2col].values)
ax[1].scatter(df.index.values, df[p2col].values)
ax[1].grid(True)
# Do labelling
yticks = []
yticklabels = []
for doy in range(int(df[p2col].min()),
int(df[p2col].max())):
date = datetime.date(2000, 1, 1) + datetime.timedelta(days=(doy - 1))
if date.day in [1, 15]:
yticks.append(doy)
yticklabels.append(date.strftime("%-d %b"))
ax[1].set_yticks(yticks)
ax[1].set_yticklabels(yticklabels)
lbl = ("Date of Minimum (Spring Start)" if season == 'spring'
else "Date of Maximum (Fall Start)")
ax[1].set_ylabel(lbl)
ax[1].set_xlim(df.index.min() - 1, df.index.max() + 1)
avgv = df[p2col].mean()
ax[1].axhline(avgv, color='r')
ax[1].plot(df.index.values, intercept + (df.index.values * slp))
d = (datetime.date(2000, 1, 1) +
datetime.timedelta(days=int(avgv))).strftime("%-d %b")
ax[1].text(0.02, 0.02,
r"$\frac{\Delta days}{decade} = %.2f,R^2=%.2f, avg = %s$" % (
slp * 10.0, r ** 2, d), va='bottom',
transform=ax[1].transAxes)
ax[1].set_ylim(bottom=(ax[1].get_ylim()[0] - 10))
p3col = 'spring_length' if season == 'spring' else 'fall_length'
slp, intercept, r, _, _ = stats.linregress(df2.index.values,
df2[p3col])
ax[2].scatter(df.index.values, df[p3col])
ax[2].set_xlim(df.index.min() - 1, df.index.max() + 1)
ax[2].set_ylabel("Length of '%s' [days]" % (season.capitalize(),))
ax[2].grid(True)
avgv = df[p3col].mean()
ax[2].axhline(avgv, color='r')
ax[2].plot(df.index.values, intercept + (df.index.values * slp))
ax[2].text(0.02, 0.02,
r"$\frac{\Delta days}{decade} = %.2f,R^2=%.2f, avg = %.1fd$" % (
slp * 10.0, r ** 2, avgv),
va='bottom', transform=ax[2].transAxes)
ax[2].set_ylim(bottom=(ax[2].get_ylim()[0] - 15))
return fig, df | ff07233d7c716715f1b4a414f0e2066222439925 | 3,656,510 |
from .. import sim
def connectCells(self):
"""
Function for/to <short description of `netpyne.network.conn.connectCells`>
Parameters
----------
self : <type>
<Short description of self>
**Default:** *required*
"""
# Instantiate network connections based on the connectivity rules defined in params
sim.timing('start', 'connectTime')
if sim.rank==0:
print('Making connections...')
if sim.nhosts > 1: # Gather tags from all cells
allCellTags = sim._gatherAllCellTags()
else:
allCellTags = {cell.gid: cell.tags for cell in self.cells}
allPopTags = {-i: pop.tags for i,pop in enumerate(self.pops.values())} # gather tags from pops so can connect NetStim pops
if self.params.subConnParams: # do not create NEURON objs until synapses are distributed based on subConnParams
origCreateNEURONObj = bool(sim.cfg.createNEURONObj)
origAddSynMechs = bool(sim.cfg.addSynMechs)
sim.cfg.createNEURONObj = False
sim.cfg.addSynMechs = False
gapJunctions = False # assume no gap junctions by default
for connParamLabel,connParamTemp in self.params.connParams.items(): # for each conn rule or parameter set
connParam = connParamTemp.copy()
connParam['label'] = connParamLabel
# find pre and post cells that match conditions
preCellsTags, postCellsTags = self._findPrePostCellsCondition(allCellTags, connParam['preConds'], connParam['postConds'])
# if conn function not specified, select based on params
if 'connFunc' not in connParam:
if 'probability' in connParam: connParam['connFunc'] = 'probConn' # probability based func
elif 'convergence' in connParam: connParam['connFunc'] = 'convConn' # convergence function
elif 'divergence' in connParam: connParam['connFunc'] = 'divConn' # divergence function
elif 'connList' in connParam: connParam['connFunc'] = 'fromListConn' # from list function
else: connParam['connFunc'] = 'fullConn' # convergence function
connFunc = getattr(self, connParam['connFunc']) # get function name from params
# process string-based funcs and call conn function
if preCellsTags and postCellsTags:
# initialize randomizer in case used in string-based function (see issue #89 for more details)
self.rand.Random123(sim.hashStr('conn_'+connParam['connFunc']),
sim.hashList(sorted(preCellsTags)+sorted(postCellsTags)),
sim.cfg.seeds['conn'])
self._connStrToFunc(preCellsTags, postCellsTags, connParam) # convert strings to functions (for the delay, and probability params)
connFunc(preCellsTags, postCellsTags, connParam) # call specific conn function
# check if gap junctions in any of the conn rules
if not gapJunctions and 'gapJunction' in connParam: gapJunctions = True
if sim.cfg.printSynsAfterRule:
nodeSynapses = sum([len(cell.conns) for cell in sim.net.cells])
print((' Number of synaptic contacts on node %i after conn rule %s: %i ' % (sim.rank, connParamLabel, nodeSynapses)))
# add presynaptoc gap junctions
if gapJunctions:
# distribute info on presyn gap junctions across nodes
if not getattr(sim.net, 'preGapJunctions', False):
sim.net.preGapJunctions = [] # if doesn't exist, create list to store presynaptic cell gap junctions
data = [sim.net.preGapJunctions]*sim.nhosts # send cells data to other nodes
data[sim.rank] = None
gather = sim.pc.py_alltoall(data) # collect cells data from other nodes (required to generate connections)
sim.pc.barrier()
for dataNode in gather:
if dataNode: sim.net.preGapJunctions.extend(dataNode)
# add gap junctions of presynaptic cells (need to do separately because could be in different ranks)
for preGapParams in getattr(sim.net, 'preGapJunctions', []):
if preGapParams['gid'] in self.gid2lid: # only cells in this rank
cell = self.cells[self.gid2lid[preGapParams['gid']]]
cell.addConn(preGapParams)
# apply subcellular connectivity params (distribution of synaspes)
if self.params.subConnParams:
self.subcellularConn(allCellTags, allPopTags)
sim.cfg.createNEURONObj = origCreateNEURONObj # set to original value
sim.cfg.addSynMechs = origAddSynMechs # set to original value
cellsUpdate = [c for c in sim.net.cells if c.tags['cellModel'] not in ['NetStim', 'VecStim']]
if sim.cfg.createNEURONObj:
for cell in cellsUpdate:
# Add synMechs, stim and conn NEURON objects
cell.addStimsNEURONObj()
#cell.addSynMechsNEURONObj()
cell.addConnsNEURONObj()
nodeSynapses = sum([len(cell.conns) for cell in sim.net.cells])
if sim.cfg.createPyStruct:
nodeConnections = sum([len(set([conn['preGid'] for conn in cell.conns])) for cell in sim.net.cells])
else:
nodeConnections = nodeSynapses
print((' Number of connections on node %i: %i ' % (sim.rank, nodeConnections)))
if nodeSynapses != nodeConnections:
print((' Number of synaptic contacts on node %i: %i ' % (sim.rank, nodeSynapses)))
sim.pc.barrier()
sim.timing('stop', 'connectTime')
if sim.rank == 0 and sim.cfg.timing: print((' Done; cell connection time = %0.2f s.' % sim.timingData['connectTime']))
return [cell.conns for cell in self.cells] | 8f037f2ae6dbf8aab68c12fbbedcf71dd3ca6b31 | 3,656,511 |
import sys
def convert_numpy_str_to_uint16(data):
""" Converts a numpy.unicode\_ to UTF-16 in numpy.uint16 form.
Convert a ``numpy.unicode_`` or an array of them (they are UTF-32
strings) to UTF-16 in the equivalent array of ``numpy.uint16``. The
conversion will throw an exception if any characters cannot be
converted to UTF-16. Strings are expanded along rows (across columns)
so a 2x3x4 array of 10 element strings will get turned into a 2x30x4
array of uint16's if every UTF-32 character converts easily to a
UTF-16 singlet, as opposed to a UTF-16 doublet.
Parameters
----------
data : numpy.unicode\_ or numpy.ndarray of numpy.unicode\_
The string or array of them to convert.
Returns
-------
array : numpy.ndarray of numpy.uint16
The result of the conversion.
Raises
------
UnicodeEncodeError
If a UTF-32 character has no UTF-16 representation.
See Also
--------
convert_numpy_str_to_uint32
convert_to_numpy_str
"""
# An empty string should be an empty uint16
if data.nbytes == 0:
return np.uint16([])
# We need to use the UTF-16 codec for our endianness. Using the
# right one means we don't have to worry about removing the BOM.
if sys.byteorder == 'little':
codec = 'UTF-16LE'
else:
codec = 'UTF-16BE'
# numpy.char.encode can do the conversion element wise. Then, we
# just have convert to uin16 with the appropriate dimensions. The
# dimensions are gotten from the shape of the converted data with
# the number of column increased by the number of words (pair of
# bytes) in the strings.
cdata = np.char.encode(np.atleast_1d(data), codec)
shape = list(cdata.shape)
shape[-1] *= (cdata.dtype.itemsize // 2)
return np.ndarray(shape=shape, dtype='uint16',
buffer=cdata.tostring()) | 7a16934c7c90ab373b88b4945641ea784bd5a144 | 3,656,512 |
import math
def _get_process_num_examples(builder, split, process_batch_size, process_index,
process_count, drop_remainder):
"""Returns the number of examples in a given process's split."""
process_split = _get_process_split(
split,
process_index=process_index,
process_count=process_count,
drop_remainder=drop_remainder)
num_examples = builder.info.splits[process_split].num_examples
if drop_remainder:
device_batch_size = process_batch_size // jax.local_device_count()
num_examples = (
math.floor(num_examples / device_batch_size) * device_batch_size)
return num_examples | a0621a6146e919db78b0ff5e7a5ae6d3c1bb68a6 | 3,656,513 |
def export_python_function(earth_model):
"""
Exports model as a pure python function, with no numpy/scipy/sklearn dependencies.
:param earth_model: Trained pyearth model
:return: A function that accepts an iterator over examples, and returns an iterator over transformed examples
"""
i = 0
accessors = []
for bf in earth_model.basis_:
if not bf.is_pruned():
accessors.append(bf.func_factory(earth_model.coef_[0, i]))
i += 1
def func(example_iterator):
return [sum(accessor(row) for accessor in accessors) for row in example_iterator]
return func | 593d8cf9f1156359f2276f0481e02a2d00d8ffde | 3,656,514 |
def ehi(data, thr_95, axis=0, keepdims=False):
"""
Calculate Excessive Heat Index (EHI).
Parameters
----------
data: list/array
1D/2D array of daily temperature timeseries
thr_95: float
95th percentile daily mean value from climatology
axis: int
The axis along which the calculation is applied (default 0).
keepdims: boolean
If data is 2d (time in third dimesion) and keepdims is set to True,
calculation is applied to the zeroth axis (time) and returns a 2d array
of freq-int dists. If set to False (default) all values are
collectively assembled before calculation.
Returns
-------
EHI: float
Excessive heat index
"""
def ehi_calc(pdata, thr_95):
if all(np.isnan(pdata)):
print("All data missing/masked!")
ehi = np.nan
else:
# run_mean = moving_average(pdata, 3)
rmean = run_mean(pdata, 3)
ehi = ((rmean > thr_95)).sum()
return ehi
if keepdims:
EHI = np.apply_along_axis(ehi_calc, axis, data, thr_95)
else:
EHI = ehi_calc(data, thr_95)
return EHI | b56166dc070c9f44ce0d8197526c09ba2f95995c | 3,656,515 |
def make_transpose_tests(options):
"""Make a set of tests to do transpose."""
# TODO(nupurgarg): Add test for uint8.
test_parameters = [{
"dtype": [tf.int32, tf.int64, tf.float32],
"input_shape": [[2, 2, 3]],
"perm": [[0, 1, 2], [0, 2, 1]],
"constant_perm": [True, False],
}, {
"dtype": [tf.float32],
"input_shape": [[1, 2, 3, 4]],
"perm": [[0, 1, 2, 3], [3, 0, 1, 2]],
"constant_perm": [True, False],
}, {
"dtype": [tf.float32],
"input_shape": [[1, 2, 3, 4, 5]],
"perm": [[4, 3, 2, 1, 0]],
"constant_perm": [True, False],
}]
def build_graph(parameters):
"""Build a transpose graph given `parameters`."""
input_tensor = tf.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
if parameters["constant_perm"]:
perm = parameters["perm"]
input_tensors = [input_tensor]
else:
shape = [len(parameters["perm"]), 2]
perm = tf.placeholder(dtype=tf.int32, name="perm", shape=shape)
input_tensors = [input_tensor, perm]
out = tf.transpose(input_tensor, perm=perm)
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(parameters["dtype"], parameters["input_shape"])
]
if not parameters["constant_perm"]:
values.append(np.array(parameters["perm"]))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=9) | c6d28307d756b6475258f43893ca7811c69ff12d | 3,656,516 |
def get_disable_migration_module():
""" get disable migration """
class DisableMigration:
def __contains__(self, item):
return True
def __getitem__(self, item):
return None
return DisableMigration() | d44a26c5e597f23dbc2434488baf54ebccc5010c | 3,656,517 |
import requests
def weather():
"""The weather route of My Weather API."""
# Load URL and KEY args of Current Weather API of OpenWeatherMap
api_url = app.config.get("API_URL")
api_key = app.config.get("API_KEY")
validators.check_emptiness('API_URL', api_url)
validators.check_emptiness('API_KEY', api_key)
# Obtain and verify city and country args entered to route
city = request.args.get('city')
country = request.args.get('country')
validators.check_emptiness('city', city)
validators.check_emptiness('country', country)
validators.check_regex('city', city, "[A-Za-z ]+")
validators.check_regex('country', country, "[a-z]{2}")
# Construct URL request of Current Weather API of OpenWeatherMap
url = "{0}{1},{2}&units=metric&appid={3}".format(api_url, city, country,
api_key)
# Obtain response from Current Weather API of OpenWeatherMap
input_json = requests.get(url).json()
# Debugging: print the 'input_json' data in good style
# webfunctions.beautiful_json(input_json)
# If 'input_json' hasn't HTTP:200 status,
# then the response will be same that it was obtained from OpenWeatherMap
webfunctions.reply_bad_response(input_json)
# Create and return the final API response from My Weather API
output_json = webfunctions.create_response_body(input_json)
return jsonify(output_json) | 927335cb4d2ca41f0fc81a43a4f5dec7ec92b264 | 3,656,518 |
def __sbox_bytes(data, sbox):
"""S-Box substitution of a list of bytes"""
return [__sbox_single_byte(byte, sbox) for byte in data] | db4999ada745c07127d9eff66841877a157839ec | 3,656,519 |
def load_config_with_kwargs(cls, kwargs):
"""Takes a marshmallow class and dict of parameter values and appropriately instantiantes the schema."""
assert_is_a_marshmallow_class(cls)
schema = cls.Schema()
fields = schema.fields.keys()
return load_config(cls, **{k: v for k, v in kwargs.items() if k in fields}), {
k: v for k, v in kwargs.items() if k not in fields
} | 9058becb8ae387ad012554ff0afe7ac5fcbf62f7 | 3,656,520 |
def split_rows(sentences, column_names):
"""
Creates a list of sentence where each sentence is a list of lines
Each line is a dictionary of columns
:param sentences:
:param column_names:
:return:
"""
new_sentences = []
root_values = ['0', 'ROOT', 'ROOT', 'ROOT', 'ROOT', 'ROOT', '0', 'ROOT', '0', 'ROOT']
start = [dict(zip(column_names, root_values))]
for sentence in sentences:
rows = sentence.split('\n')
sentence = [dict(zip(column_names, row.split())) for row in rows if row[0] != '#']
sentence = start + sentence
new_sentences.append(sentence)
return new_sentences | 444733a9c169bedae8dc0045cd696cafed7085e2 | 3,656,521 |
import secrets
import os
import shutil
def compute_pw_sparse_out_of_memory2(tr,
row_size = 500,
pm_processes = 2,
pm_pbar = True,
max_distance = 50,
reassemble = True,
cleanup = True,
assign = True):
"""
Instead of calling TCRrep.compute_distances(), this
function permits a parallelizable approach that does
not require holding a large matrix in memory.
Default behavior is to reassemble a scipy
sparse matrix from a set of sub matrices written to disk fragment.
With <reassemble = True> function returns a scipy sparse matrix.
Space savings are achieved because any value above <max_distance> is set to zero.
True zero distances are set to -1.
Can be used to form a network of TCRs with tcrdistances < max_distance,
Parameters
----------
tr : TCRrep
TCRrep instance with clone_df
row_size : int
How many rows to process in memory at once
pm_processes : int
Numbe of concurrent parallel processes to run at once
pm_bar : bool
If True, show progress bar.
max_distance : int
Max distance
matrix_name : str
Name of matrix to return (i.e, 'rw_beta' or 'rw_alpha')
reassemble: True
If true, makes one matrix from all the sparse sub matrices.
cleanup: bool,
if True, deletes temporary files.
assign : bool
if True, assigns pw sparse matrices to TCRrep object.
That is TCRrep.pw_beta, TCRrep.pw_alpha will be assigned
the reassembled spare matrces.
Returns
-------
csr_full : sparse scipy matrix
dest : str
name of the folder that holds fragments
Examples
--------
import numpy as np
import pandas as pd
from tcrdist.repertoire import TCRrep
from tcrdist.rep_funcs import compute_pw_sparse_out_of_memory
df = pd.read_csv("dash.csv")
#(1)
tr = TCRrep(cell_df = df, #(2)
organism = 'mouse',
chains = ['beta'],
db_file = 'alphabeta_gammadelta_db.tsv',
compute_distances = True,
store_all_cdr = False)
S = compute_pw_sparse_out_of_memory(tr, matrix_name = "rw_beta", max_distance = 1000)
# S is a <1920x1920 sparse matrix of type '<class 'numpy.int16'>'
M = S.todense()
M[M==1] = 0
np.all(M == tr.pw_beta)
S, chunks = compute_pw_sparse_out_of_memory(tr, matrix_name = "rw_beta", max_distance = 50)
print(S)
# S is a <1920x1920 sparse matrix of type '<class 'numpy.int16'>'
"""
# Early warning to save heartache
if assign is True and reassemble is False:
raise ValueError("If you want to assign results to a TCRrep instance, you must set reassemble to True")
dest = secrets.token_hex(6)
os.mkdir(dest)
print(f"CREATED /{dest}/ FOR HOLDING DISTANCE OUT OF MEMORY")
row_chunks = memory._partition(range(tr.clone_df.shape[0]), row_size)
smatrix_chunks = [(tr, ind, f"{dest}/{i}") for i,ind in enumerate(row_chunks)]
csrfragments = parmap.starmap(memory.gen_sparse_rw_on_fragment2,
smatrix_chunks,
max_distance=max_distance,
pm_pbar=pm_pbar,
pm_processes = pm_processes)
if reassemble:
csr_full_dict = dict()
for chain in tr.chains:
chain_str = f"rw_{chain}"
csr_full = memory.collapse_csrs([f"{x[2]}.{chain_str}.npz" for x in smatrix_chunks])
print(f"RETURNING scipy.sparse csr_matrix w/dims {csr_full.shape}")
csr_full_dict[chain] = csr_full
else:
csr_full_dict= None
if assign:
for chain in tr.chains:
setattr(tr, f"pw_{chain}", csr_full_dict[chain])
if cleanup:
assert os.path.isdir(dest)
print(f"CLEANING UP {dest}")
shutil.rmtree(dest)
return csr_full_dict, smatrix_chunks | be155db9bb8990a84e8005c5b58903f3e5a600fe | 3,656,522 |
def _rollup_date(dts, interval=None):
"""format date/time string based on interval spec'd for summation
For Daily, it returns just the date. No time or timezeone.
For Hourly, it returns an ISO-8061 datetime range. This provides previously
missing clarity around whether the rainfall amount shown was for the
period starting at the returned datetime or the period preceeding it (the
latter being the correct but approach for datetimes but not dates.)
"""
if interval == INTERVAL_DAILY:
# strip the time entirely from the datetime string. Timezone is lost.
return parse(dts).strftime("%Y-%m-%d")
elif interval == INTERVAL_HOURLY:
# set the minutes, seconds, and microsecond to zeros. Timezone is preserved.
# This method returns the total for the hour, e.g a
# rainfall total of 1 inch with a timestamp of "2020-04-07T10:00:00-04:00"
# is actually 1 inch for intervals within the 10 o'clock hour.
# return parse(dts).replace(minute=0, second=0, microsecond=0).isoformat()
# NOTE: It may be more appropriate to use a timedelta+1 hour here,
# if the rainfall is to be interpreted as the total *up to* a point in time.
# Because we're looking at accumulation, we want timestamps that
# represent rainfall accumulated during the previous fifteen minutes
# within the hour represented. So in a list of [1:00, 1:15, 1:30, 1:45,
# 2:00], we scratch the 1:00 since it represents accumulation from
# 12:45 to 1:00, outside our hour of interest. Everything else rep's
# rain recorded between >1 and <=2 o'clock. We can get that by
# bumping everything back 15 minutes, then generating the hourly.
# start_dt = parse(dts).replace(minute=0, second=0, microsecond=0)
start_dt = parse(dts)
start_dt = start_dt - timedelta(minutes=MIN_INTERVAL)
start_dt = start_dt.replace(minute=0, second=0, microsecond=0)
end_dt = start_dt + timedelta(hours=1)
end_dt.replace(minute=0, second=0, microsecond=0)
return "{0}/{1}".format(start_dt.isoformat(), end_dt.isoformat())
else:
# return it as-is
return dts | 12f74d9becfa52c626d33174cb628dc9e0112c07 | 3,656,523 |
def offset_compensation(time_signal):
""" Offset compensation filter.
"""
return lfilter([1., -1], [1., -0.999], time_signal) | 0fc423646071dc07bf88f88698f3248fa302a41e | 3,656,524 |
import subprocess
import sys
import os
def process_dir(thisdir):
"""Process /thisdir/ recursively"""
res = []
shellparams = {'stdin':subprocess.PIPE,'stdout':sys.stdout,'shell':True}
command = [utils.assimp_bin_path,"testbatchload"]
for f in os.listdir(thisdir):
if os.path.splitext(f)[-1] in settings.exclude_extensions:
continue
fullpath = os.path.join(thisdir, f)
if os.path.isdir(fullpath):
if f != ".svn":
res += process_dir(fullpath)
continue
# import twice, importing the same file again introduces extra risk
# to crash due to garbage data lying around in the importer.
command.append(fullpath)
command.append(fullpath)
if len(command)>2:
# testbatchload returns always 0 if more than one file in the list worked.
# however, if it should segfault, the OS will return something not 0.
command += reversed(command[2:])
if subprocess.call(command, **shellparams):
res.append(thisdir)
return res | 6442bc6f0e77b56dc7f03ea457b59fd3ce14316b | 3,656,525 |
from typing import Callable
from re import T
from typing import cast
def _alias(default: Callable) -> Callable[[T], T]:
"""
Decorator which re-assigns a function `_f` to point to `default` instead.
Since global function calls in Python are somewhat expensive, this is
mainly done to reduce a bit of overhead involved in the functions calls.
For example, consider the below example::
def f2(o):
return o
def f1(o):
return f2(o)
Calling function `f1` will incur some additional overhead, as opposed to
simply calling `f2`.
Now assume we wrap `f1` with the `_alias` decorator::
def f2(o):
return o
@_alias(f2)
def f1(o):
...
This will essentially perform the assignment of `f1 = f2`, so calling
`f1()` in this case has no additional function overhead, as opposed to
just calling `f2()`.
"""
def new_func(_f: T) -> T:
return cast(T, default)
return new_func | f286472a7f14428ea5243d54a671b9d3d743c9ef | 3,656,526 |
def test_image(filename):
"""
Return the absolute path to image file having *filename* in test_files
directory.
"""
return absjoin(thisdir, 'test_files', filename) | bda20e51a495e56f8ebf373819e60ebdea3da535 | 3,656,527 |
import difflib
def menu(
ticker: str,
start: str,
interval: str,
stock: pd.DataFrame,
):
"""Sector and Industry Analysis Menu"""
sia_controller = SectorIndustryAnalysisController(ticker, start, interval, stock)
sia_controller.call_help(None)
while True:
# Get input command from user
if session and gtff.USE_PROMPT_TOOLKIT:
completer = NestedCompleter.from_nested_dict(
{c: None for c in sia_controller.CHOICES}
)
an_input = session.prompt(
f"{get_flair()} (stocks)>(sia)> ",
completer=completer,
)
else:
an_input = input(f"{get_flair()} (stocks)>(sia)> ")
try:
process_input = sia_controller.switch(an_input)
if process_input is not None:
return process_input
except SystemExit:
print("The command selected doesn't exist\n")
similar_cmd = difflib.get_close_matches(
an_input, sia_controller.CHOICES, n=1, cutoff=0.7
)
if similar_cmd:
print(f"Did you mean '{similar_cmd[0]}'?\n")
continue | 5c3d13d292525abdb5c7f98a2467274c2172cf8f | 3,656,528 |
def fname_template(orun, detname, ofname, nevts, tsec=None, tnsec=None):
"""Replaces parts of the file name specified as
#src, #exp, #run, #evts, #type, #date, #time, #fid, #sec, #nsec
with actual values
"""
template = replace(ofname, '#src', detname)
template = replace(template, '#exp', orun.expt)
template = replace(template, '#run', 'r%04d'%orun.runnum)
template = replace(template, '#type', '%s')
t_sec = tsec if tsec is not None else int(orun.timestamp>>32 & 0xFFFFFFFF)
t_nsec = tnsec if tnsec is not None else int(orun.timestamp & 0xFFFFFFFF)
template = replace(template, '#date', str_tstamp('%Y-%m-%d', t_sec))
template = replace(template, '#time', str_tstamp('%H%M%S', t_sec))
template = replace(template, '#sec', '%d' % t_sec)
template = replace(template, '#nsec', '%09d' % t_nsec)
template = replace(template, '#evts', 'e%06d' % nevts)
if not '%s' in template: template += '-%s'
return template | 7f38b638d89a7f99ab36b4e08369cfc7f22bb575 | 3,656,529 |
def opt_checked(method):
"""Like `@checked`, but it is legal to not specify the value. In this case,
the special `Unset` value is passed to the validation function. Storing
`Unset` causes the key to not be emitted during serialization."""
return Checked(method.__name__, method.__doc__, method, True) | 5d34db8fcc602dc51d69c128a1855eef44c81453 | 3,656,530 |
from datetime import datetime
def _metadata(case_study):
"""Collect metadata in a dictionnary."""
return {
'creation_date': datetime.strftime(datetime.now(), '%c'),
'imagery': case_study.imagery,
'latitude': case_study.lat,
'longitude': case_study.lon,
'area_of_interest': case_study.aoi_latlon.wkt,
'crs': str(case_study.crs),
'country': case_study.country
} | eb16892135326662029fe568922f2871f016090e | 3,656,531 |
def CoP_constraints_ds(
m,
foot_angles,
next_support_foot_pos,
stateX,
stateY,
N=16,
dt=0.1,
h=1.0,
g=9.81,
tPf=8,
):
"""
INPUTS
m (int): remaining time steps in current foot step;
foot_angles ([N, 1] vector): containing the orientations in radians
of the foot steps at each time step;
next_support_foot_pos ([2, 1] vec): next support foot position;
stateX ([3, 1] matrix): position, velocity, acceleration of CoM along x-axis;
stateY ([3, 1] matrix): position, velocity, acceleration of CoM along y-axis;
N (int): is the length of the preview horizon;
dt (float): time step size;
h (float): CoM height;
g (float): gravitational acceleration;
tPf (int): time steps per foot step;
Also calls a function that load the data for the foot edge
normal vectors and edge to center distances;
OUTPUTS
leftHandSide: size [ef*N, 2N+2l] Matrix, where l is the number
of remaining foots steps contained in the preview horizon and
ef is the number of edges in the robot foot, e being the
number of the edges of the foot, using a rectangular foot, ef=4;
rightHandSide: size [ef*N, 1] Matrix;
"""
Uz = get_Uz(N=N)
FutureStepsMat = stepsInFutureStepsMat(m, N=N)
middleMat_diag = np.hstack((Uz, -FutureStepsMat[:, 1:]))
middleMat = block_diag(middleMat_diag, middleMat_diag)
Sz = get_Sz(N=N)
rightVecX = FutureStepsMat[:, :1] * next_support_foot_pos[0] - Sz @ stateX
rightVecY = FutureStepsMat[:, :1] * next_support_foot_pos[1] - Sz @ stateY
rightVex = np.vstack((rightVecX, rightVecY))
# set_trace()
for i in range(N):
RotMat = angle2RotMat(foot_angles[i])
if i < m:
d, b = init_double_support_CoP()
else:
d, b = rectangular_foot_CoP()
# (Rd^T)^T = dR^T
dRot = d @ RotMat.T
if i == 0:
DMatX = block_diag(dRot[:, :1])
DMatY = block_diag(dRot[:, 1:])
bVec = b
else:
DMatX = block_diag(DMatX, dRot[:, :1])
DMatY = block_diag(DMatY, dRot[:, 1:])
bVec = np.vstack((bVec, b))
DMat = np.hstack((DMatX, DMatY))
leftHandSide = DMat @ middleMat
rightHandSide = bVec + DMat @ rightVex
return leftHandSide, rightHandSide | 647e9313b79523ae41ab47a61501c1b356d43785 | 3,656,532 |
import io
def HARRIS(img_path):
"""
extract HARR features
:param img_path:
:return:
:Version:1.0
"""
img = io.imread(img_path)
img = skimage.color.rgb2gray(img)
img = (img - np.mean(img)) / np.std(img)
feature = corner_harris(img, method='k', k=0.05, eps=1e-06, sigma=1)
return feature.reshape(feature.shape[0] * feature.shape[1]) | 5c11c9e5b2947b0ddeb2e1780d11be4020fe53a4 | 3,656,533 |
import os
import pickle
def load_object(f_name, directory=None):
"""Load a custom object, from a pickle file.
Parameters
----------
f_name : str
File name of the object to be loaded.
directory : str or SCDB, optional
Folder or database object specifying the save location.
Returns
-------
object
Custom object loaded from pickle file.
"""
load_path = None
if isinstance(directory, SCDB):
if check_ext(f_name, '.p') in directory.get_files('counts'):
load_path = os.path.join(directory.get_folder_path('counts'), f_name)
elif check_ext(f_name, '.p') in directory.get_files('words'):
load_path = os.path.join(directory.get_folder_path('words'), f_name)
elif isinstance(directory, str) or directory is None:
if f_name in os.listdir(directory):
load_path = os.path.join(directory, f_name)
if not load_path:
raise ValueError('Can not find requested file name.')
return pickle.load(open(check_ext(load_path, '.p'), 'rb')) | d8809fd6b95bb894ec3aed4a1d0499fbc1ad77ea | 3,656,534 |
def http_req(blink, url='http://example.com', data=None, headers=None,
reqtype='get', stream=False, json_resp=True, is_retry=False):
"""
Perform server requests and check if reauthorization neccessary.
:param blink: Blink instance
:param url: URL to perform request
:param data: Data to send (default: None)
:param headers: Headers to send (default: None)
:param reqtype: Can be 'get' or 'post' (default: 'get')
:param stream: Stream response? True/FALSE
:param json_resp: Return JSON response? TRUE/False
:param is_retry: Is this a retry attempt? True/FALSE
"""
if reqtype == 'post':
req = Request('POST', url, headers=headers, data=data)
elif reqtype == 'get':
req = Request('GET', url, headers=headers)
else:
raise BlinkException(ERROR.REQUEST)
prepped = req.prepare()
response = blink.session.send(prepped, stream=stream)
if json_resp and 'code' in response.json():
if is_retry:
raise BlinkAuthenticationException(
(response.json()['code'], response.json()['message']))
else:
headers = attempt_reauthorization(blink)
return http_req(blink, url=url, data=data, headers=headers,
reqtype=reqtype, stream=stream,
json_resp=json_resp, is_retry=True)
if json_resp:
return response.json()
return response | 0596f82752292216235e9d9f3b14bb01f053d0d7 | 3,656,535 |
def make_dataset(path, seq_length, mem_length, local_rank, lazy=False, xl_style=False,
shuffle=True, split=None, tokenizer=None, tokenizer_type='CharacterLevelTokenizer',
tokenizer_model_path=None, vocab_size=None, model_type='bpe', pad_token=0, character_converage=1.0,
non_binary_cols=None, sample_one_document=False, pre_tokenize=False, **kwargs):
"""function to create datasets+tokenizers for common options"""
if split is None:
split = [1.]
if non_binary_cols is not None:
# multilabel dataset support (only for csvs)
label_key = non_binary_cols
# make tokenizer for dataset
if tokenizer is None:
tokenizer = make_tokenizer(tokenizer_type, None, tokenizer_model_path, vocab_size, model_type,
pad_token, character_converage, **kwargs)
# get one or multiple datasets and concatenate
if isinstance(path, str):
ds = get_dataset(path, tokenizer=tokenizer, pre_tokenize=pre_tokenize, local_rank=local_rank)
else:
ds = [get_dataset(p, tokenizer=tokenizer, pre_tokenize=pre_tokenize, local_rank=local_rank) for p in path]
ds = ConcatDataset(ds)
ds_type = ''
if 'ds_type' in kwargs:
ds_type = kwargs['ds_type']
# Split dataset into train/val/test (and wrap bert dataset)
if should_split(split):
ds = split_ds(ds, split, shuffle=shuffle)
if ds_type.lower() == 'bert':
presplit_sentences = kwargs['presplit_sentences'] if 'presplit_sentences' in kwargs else False
ds = [bert_sentencepair_dataset(d, max_seq_len=seq_length,
presplit_sentences=presplit_sentences) if d is not None else None for d in
ds]
elif ds_type.lower() == 'gpt2':
if xl_style:
ds = [XLDataset(d, tokenizer, max_seq_len=seq_length, mem_len=mem_length,
sample_across_doc=not sample_one_document) if d is not None else None for d in ds]
else:
ds = [GPT2Dataset(d, tokenizer, max_seq_len=seq_length,
sample_across_doc=not sample_one_document) if d is not None else None for d in ds]
else:
if ds_type.lower() == 'bert':
presplit_sentences = kwargs['presplit_sentences'] if 'presplit_sentences' in kwargs else False
ds = bert_sentencepair_dataset(ds, max_seq_len=seq_length, presplit_sentences=presplit_sentences)
elif ds_type.lower() == 'gpt2':
if xl_style:
ds = XLDataset(ds, tokenizer, max_seq_len=seq_length, mem_len=mem_length,
sample_across_doc=not sample_one_document)
else:
ds = GPT2Dataset(ds, tokenizer, max_seq_len=seq_length, sample_across_doc=not sample_one_document)
return ds, tokenizer | 419e50d3dab13d9aa1f096b99a598c52441bb2ae | 3,656,536 |
import re
def fix_reference_name(name, blacklist=None):
"""Return a syntax-valid Python reference name from an arbitrary name"""
name = "".join(re.split(r'[^0-9a-zA-Z_]', name))
while name and not re.match(r'([a-zA-Z]+[0-9a-zA-Z_]*)$', name):
if not re.match(r'[a-zA-Z]', name[0]):
name = name[1:]
continue
name = str(name)
if not name:
name = "data"
if blacklist is not None and name in blacklist:
get_new_name = lambda index: name+('_%03d' % index)
index = 0
while get_new_name(index) in blacklist:
index += 1
name = get_new_name(index)
return name | 2f1a291fc7ac9816bc2620fceeeaf90a1bb3fd4a | 3,656,537 |
import sys
def handle_args():
"""Handles arguments both in the command line and in IDLE.
Output:
Tuple, consisting of:
- string (input filename or stdin)
- string (output filename or stdout)
- integer (number of CPUs)
"""
version_num = "0.0.2"
# Tries to execute the script with command line arguments.
try:
# Creates an instance of argparse.
argparser = ThrowingArgumentParser(prog=sys.argv[0],
description='samConcat2Tag, processes bwa mem sam format where \
the read comment has been appended to the mapping line following process_10\
xReads.py', epilog='For questions or comments, please contact Matt Settles \
<[email protected]>\n%(prog)s version: ' + version_num, add_help=True)
except ArgumentParserError:
print("Please run this script on the command line, with the \
correct arguments. Type -h for help.\n")
sys.exit()
else:
# Adds the positional arguments.
argparser.add_argument('inputfile', metavar='inputsam', type=str,
nargs='?', help='Sam file to process [default: %(default)s]',
default="stdin")
# Adds the optional arguments.
argparser.add_argument('--version', action='version',
version="%(prog)s version: " + version_num)
# TODO: ADD parameter for sample ID
argparser.add_argument('-o', '--output_base',
help="Directory + prefix to output, [default: %(default)s]",
action="store", type=str, dest="output_base", default="stdout")
argparser.add_argument("-@", "--cpus",
help="The number of CPUs to use.", type=int, default=1)
# Parses the arguments given in the shell.
args = argparser.parse_args()
inp = args.inputfile
outb = args.output_base
cpus = args.cpus
return inp, outb, cpus | d93457881e81dde1a11412be418f54181ff15c2b | 3,656,538 |
import time
import traceback
import traceback
import imp
import traceback
import gc
def load_scripts(reload_scripts=False, refresh_scripts=False):
"""
Load scripts and run each modules register function.
:arg reload_scripts: Causes all scripts to have their unregister method
called before loading.
:type reload_scripts: bool
:arg refresh_scripts: only load scripts which are not already loaded
as modules.
:type refresh_scripts: bool
"""
use_time = _bpy.app.debug
prefs = _bpy.context.user_preferences
if use_time:
t_main = time.time()
loaded_modules = set()
if refresh_scripts:
original_modules = _sys.modules.values()
if reload_scripts:
_bpy_types.TypeMap.clear()
# just unload, don't change user defaults, this means we can sync
# to reload. note that they will only actually reload of the
# modification time changes. This `won't` work for packages so...
# its not perfect.
for module_name in [ext.module for ext in prefs.addons]:
_addon_utils.disable(module_name, default_set=False)
def register_module_call(mod):
register = getattr(mod, "register", None)
if register:
try:
register()
except:
traceback.print_exc()
else:
print("\nWarning! '%s' has no register function, "
"this is now a requirement for registerable scripts" %
mod.__file__)
def unregister_module_call(mod):
unregister = getattr(mod, "unregister", None)
if unregister:
try:
unregister()
except:
traceback.print_exc()
def test_reload(mod):
# reloading this causes internal errors
# because the classes from this module are stored internally
# possibly to refresh internal references too but for now, best not to.
if mod == _bpy_types:
return mod
try:
return imp.reload(mod)
except:
traceback.print_exc()
def test_register(mod):
if refresh_scripts and mod in original_modules:
return
if reload_scripts and mod:
print("Reloading:", mod)
mod = test_reload(mod)
if mod:
register_module_call(mod)
_global_loaded_modules.append(mod.__name__)
if reload_scripts:
# module names -> modules
_global_loaded_modules[:] = [_sys.modules[mod_name]
for mod_name in _global_loaded_modules]
# loop over and unload all scripts
_global_loaded_modules.reverse()
for mod in _global_loaded_modules:
unregister_module_call(mod)
for mod in _global_loaded_modules:
test_reload(mod)
_global_loaded_modules[:] = []
for base_path in script_paths():
for path_subdir in _script_module_dirs:
path = _os.path.join(base_path, path_subdir)
if _os.path.isdir(path):
_sys_path_ensure(path)
# only add this to sys.modules, don't run
if path_subdir == "modules":
continue
for mod in modules_from_path(path, loaded_modules):
test_register(mod)
# deal with addons separately
_addon_utils.reset_all(reload_scripts)
# run the active integration preset
filepath = preset_find(prefs.inputs.active_keyconfig, "keyconfig")
if filepath:
keyconfig_set(filepath)
if reload_scripts:
print("gc.collect() -> %d" % gc.collect())
if use_time:
print("Python Script Load Time %.4f" % (time.time() - t_main)) | a6e8186575eb7cb04d64ee650a77035d9adfb16c | 3,656,539 |
from hybridq.gate.gate import _available_gates
def get_available_gates() -> tuple[str, ...]:
"""
Return available gates.
"""
return tuple(_available_gates) | f4d9e8d617675174f97d7d1cc3d6ea8bdadab725 | 3,656,540 |
def main():
"""
Entry point
Collect all reviews from the file system (FS) &
Dump it into JSON representation back to the FS
Returns:
int: The status code
"""
collector = Collector()
return collector.collect() | d6d15227fe37522357a3f1706cf446026e277a32 | 3,656,541 |
def __parse_tokens(sentence: spacy.tokens.Doc) -> ParsedUniversalDependencies:
"""Parses parts of speech from the provided tokens."""
#tokenize
# remove the stopwards, convert to lowercase
#bi/n-grams
adj = __get_word_by_ud_pos(sentence, "ADJ")
adp = __get_word_by_ud_pos(sentence, "ADP")
adv = __get_word_by_ud_pos(sentence, "ADV")
aux = __get_word_by_ud_pos(sentence, "AUX")
verb = __get_word_by_ud_pos(sentence, "VERB")
cconj = __get_word_by_ud_pos(sentence, "CCONJ")
det = __get_word_by_ud_pos(sentence, "DET")
intj = __get_word_by_ud_pos(sentence, "INTJ")
noun = __get_word_by_ud_pos(sentence, "NOUN")
num = __get_word_by_ud_pos(sentence, "NUM")
part = __get_word_by_ud_pos(sentence, "PART")
pron = __get_word_by_ud_pos(sentence, "PRON")
propn = __get_word_by_ud_pos(sentence, "PROPN")
punct = __get_word_by_ud_pos(sentence, "PUNCT")
sconj = __get_word_by_ud_pos(sentence, "SCONJ")
sym = __get_word_by_ud_pos(sentence, "SYM")
verb = __get_word_by_ud_pos(sentence, "VERB")
x = __get_word_by_ud_pos(sentence, "X")
return ParsedUniversalDependencies(
adj = adj,
adp = adp,
adv = adv,
aux = aux,
cconj = cconj,
det = det,
intj = intj,
noun = noun,
num = num,
part = part,
pron = pron,
propn = propn,
punct = punct,
sconj = sconj,
sym = sym,
verb = verb,
x = x) | 86553239aaac9d89203722f3853989ba0f95b8e3 | 3,656,542 |
from datetime import datetime
def main():
"""
In this main function, we connect to the database, and we create position table and intern table
and after that we create new position and new interns and insert the data into the position/intern
table
"""
database = r"interns.db"
sql_drop_positions_table="""
DROP TABLE positions
"""
sql_drop_interns_table="""
DROP TABLE interns
"""
sql_create_positions_table = """ CREATE TABLE IF NOT EXISTS positions (
name text PRIMARY KEY,
description text
); """
sql_create_interns_table = """CREATE TABLE IF NOT EXISTS interns (
id integer PRIMARY KEY,
last_name text NOT NULL,
first_name text NOT NULL,
position_applied text NOT NULL,
school text NOT NULL,
program text NOT NULL,
date_of_entry text NOT NULL,
FOREIGN KEY (position_applied) REFERENCES positions (name)
ON UPDATE NO ACTION
);"""
# create a database connection
conn = create_connection(database)
# create tables
if conn is not None:
#drop interns table before everything else
drop_table(conn, sql_drop_interns_table)
#drop positions table before everything else
drop_table(conn, sql_drop_positions_table)
# create projects table
create_table(conn, sql_create_positions_table)
# create tasks table
create_table(conn, sql_create_interns_table)
else:
print("Error! cannot create the database connection.")
with conn:
#create position-later on change the check condition
position=("Software Development Intern", "This position is for software development intern");
create_position(conn, position)
#create interns:
intern_1=("A","B","Software Development Intern","GWU","Data Analytics",datetime.datetime.now())
intern_2=("C","D","Software Development Intern","GWU","Data Analytics",datetime.datetime.now())
create_intern(conn,intern_1)
create_intern(conn,intern_2)
conn.commit()
conn.close()
return database | 89b88d681b4f4eaeada0a8e8de5a3dadad1ddd15 | 3,656,543 |
from typing import Tuple
def parse_date(month: int, day: int) -> Tuple[int, int, int]:
"""Parse a date given month and day only and convert to
a tuple.
Args:
month (int): 1-index month value (e.g. 1 for January)
day (int): a day of the month
Returns:
Tuple[int, int, int]: (year, month, day)
"""
if month < config.TODAY.month:
# Note that if you have not yet recorded/cached the current
# records, you should comment out the +1. The +1 is only
# meant to increment for future events that happen in
# the new year.
year = config.TODAY.year + 1
elif month - config.TODAY.month > 1:
# I realized that on June 10th, 2020, the schedule for UQs was
# posted June 10th but included June 9th (which had passed).
# There is a distinct possibility that this will happen again,
# when the schedule is posted on New Year's Day (around there)
# and includes a day for December. Because events are only
# at most a month away in the future, we should check whether
# the difference in months is greater than 1.
# e.g. 12 - 1 > 1 to represent December of previous year and
# January of the current year
year = config.TODAY.year - 1
else:
year = config.TODAY.year
return year, month, day | d9ebb40061c14c9a2b1336465921cea0d5c756a8 | 3,656,544 |
def usgs_perlite_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity", "Mine production2"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['perlite'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Mine production2":
prod = "production"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption:3":
prod = "import"
elif df.iloc[index]["Production"].strip() == "Exports:3":
prod = "export"
if df.iloc[index]["Production"].strip() in row_to_use:
product = df.iloc[index]["Production"].strip()
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Thousand Metric Tons"
data["FlowAmount"] = str(df.iloc[index][col_name])
if str(df.iloc[index][col_name]) == "W":
data["FlowAmount"] = WITHDRAWN_KEYWORD
data["Description"] = des
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe | 8b9b1dcf3312cb59f5a27873e791c4bc744599bc | 3,656,545 |
import warnings
def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
# token from https://github.com/bioinf-jku/TTUR/blob/master/fid.py
"""Numpy implementation of the Frechet Distance.
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by Dougal J. Sutherland.
Params:
-- mu1 : Numpy array containing the activations of the pool_3 layer of the
inception net ( like returned by the function 'get_predictions')
for generated samples.
-- mu2 : The sample mean over activations of the pool_3 layer, precalcualted
on an representive data set.
-- sigma1: The covariance matrix over activations of the pool_3 layer for
generated samples.
-- sigma2: The covariance matrix over activations of the pool_3 layer,
precalcualted on an representive data set.
Returns:
-- : The Frechet Distance.
"""
mu1 = np.atleast_1d(mu1)
mu2 = np.atleast_1d(mu2)
sigma1 = np.atleast_2d(sigma1)
sigma2 = np.atleast_2d(sigma2)
assert mu1.shape == mu2.shape, "Training and test mean vectors have different lengths"
assert sigma1.shape == sigma2.shape, "Training and test covariances have different dimensions"
diff = mu1 - mu2
# product might be almost singular
covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
if not np.isfinite(covmean).all():
msg = "fid calculation produces singular product; adding %s to diagonal of cov estimates" % eps
warnings.warn(msg)
offset = np.eye(sigma1.shape[0]) * eps
covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
# numerical error might give slight imaginary component
if np.iscomplexobj(covmean):
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
m = np.max(np.abs(covmean.imag))
#raise ValueError("Imaginary component {}".format(m))
print('FID is fucked up')
covmean = covmean.real
tr_covmean = np.trace(covmean)
return diff.dot(diff) + np.trace(sigma1) + np.trace(sigma2) - 2 * tr_covmean | 0f22ce0a99e9b8f2ffca7af4a190c020f376ce8c | 3,656,546 |
def _svdvals_eig(x): # pragma: no cover
"""SVD-decomposition via eigen, but return singular values only.
"""
if x.shape[0] > x.shape[1]:
s2 = np.linalg.eigvalsh(dag(x) @ x)
else:
s2 = np.linalg.eigvalsh(x @ dag(x))
return s2**0.5 | af47405994cf8fa1504fcb898b7621483eb1e346 | 3,656,547 |
def get_3d_object_section(target_object):
"""Returns 3D section includes given object like stl.
"""
target_object = target_object.flatten()
x_min = min(target_object[0::3])
x_max = max(target_object[0::3])
y_min = min(target_object[1::3])
y_max = max(target_object[1::3])
z_min = min(target_object[2::3])
z_max = max(target_object[2::3])
return [x_min, x_max, y_min, y_max, z_min, z_max] | e11d62ad06ada005d16803b2f440ac700e272599 | 3,656,548 |
import os
def load_spectr_folder(path, result_format="xy"):
"""
Load a folder containing demod scope files.
Return a list of 6 elements (one pere demod), which are either ``None``, if there's not data for this demod, or contain that demod's trace.
"""
data=[]
for demod in range(1,7):
file_path=os.path.join(path,"Freq{}.csv".format(demod))
if os.path.exists(file_path):
data.append(load_spectr_file(file_path,result_format=result_format))
else:
data.append(None)
return data | 3a84d180d9320ddd2722e2738dd710fb492bbb4d | 3,656,549 |
def make_row(filename, num_cols, col_names):
"""
Given a genome file, create and return a row of kmer counts
to be inerted into the mer matrix.
"""
# Filepath
thefile = str(filename[0])
# Get the genome id from the filepath
genomeid = filename[0].split('/')[-1]
genomeid = genomeid.split('.')[-2]
# Create a temp row to fill and return (later placed in the kmer_matrix)
temp_row = [0]*num_cols
# Walk through the file
for record in SeqIO.parse(thefile, "fasta"):
# Retrieve the sequence as a string
kmerseq = record.seq
#kmerseq = kmerseq._get_seq_str_and_check_alphabet(kmerseq)
kmerseq = str(kmerseq)
# Retrieve the kmer count as an int
kmercount = record.id
kmercount = int(kmercount)
if kmercount>255:
kmercount = 255
# Lookup the seq in the column list for the index
col_index = col_names[kmerseq]
# Put the kmercount in the right spot in the row
temp_row[col_index] = kmercount
return genomeid,temp_row | 59ed16c4a19da95145ed56164bc35ef24bc7f6bc | 3,656,550 |
def analytic_overlap_NM(
DQ: float,
w1: float,
w2: float,
n1: int,
n2: int
) -> float:
"""Compute the overlap between two displaced harmonic oscillators.
This function computes the overlap integral between two harmonic
oscillators with frequencies w1, w2 that are displaced by DQ for the
quantum numbers n1, n2. The integral is computed using an analytic formula
for the overlap of two displaced harmonic oscillators. The method comes
from B.P. Zapol, Chem. Phys. Lett. 93, 549 (1982).
Parameters
----------
DQ : float
displacement between harmonic oscillators in amu^{1/2} Angstrom
w1, w2 : float
frequencies of the harmonic oscillators in eV
n1, n2 : integer
quantum number of the overlap integral to calculate
Returns
-------
np.longdouble
overlap of the two harmonic oscillator wavefunctions
"""
w = np.double(w1 * w2 / (w1 + w2))
rho = np.sqrt(factor) * np.sqrt(w / 2) * DQ
sinfi = np.sqrt(w1) / np.sqrt(w1 + w2)
cosfi = np.sqrt(w2) / np.sqrt(w1 + w2)
Pr1 = (-1)**n1 * np.sqrt(2 * cosfi * sinfi) * np.exp(-rho**2)
Ix = 0.
k1 = n2 // 2
k2 = n2 % 2
l1 = n1 // 2
l2 = n1 % 2
for kx in range(k1+1):
for lx in range(l1+1):
k = 2 * kx + k2
l = 2 * lx + l2 # noqa: E741
Pr2 = (fact(n1) * fact(n2))**0.5 / \
(fact(k)*fact(l)*fact(k1-kx)*fact(l1-lx)) * \
2**((k + l - n2 - n1) / 2)
Pr3 = (sinfi**k)*(cosfi**l)
# f = hermval(rho, [0.]*(k+l) + [1.])
f = herm(np.float64(rho), k+l)
Ix = Ix + Pr1*Pr2*Pr3*f
return Ix | f0eba159f1bfb3fd05b1a825170e03e02587ef32 | 3,656,551 |
def init_manager(mocker):
"""Fixture to initialize a style constant."""
mocker.patch.object(manager.StyleManager, "__init__", lambda x: None)
def _create():
return manager.StyleManager()
return _create | da7838352c0a8c13acfcd0d345f78e329978409c | 3,656,552 |
def GaussLegendre(f, n):
"""Gauss-Legendre integration on [-1, 1] with n points."""
x, w = numint.GaussLegendre(n)
I = np.dot(f(x), w)
return I | 73fcd257e92852b56fcec7d0f21cbbcf87afdb51 | 3,656,553 |
from typing import List
from typing import Dict
from typing import OrderedDict
def directory_item_groups(
items: List[Item], level: int
) -> Dict[str, List[Item]]:
"""Split items into groups per directory at the given level.
The level is relative to the root directory, which is at level 0.
"""
module_items = OrderedDict()
for item in items:
module_items.setdefault(item.parent_path(level), []).append(item)
return module_items | 2a8e8138097ad48417f9988059a0ed19d63e4877 | 3,656,554 |
def mergeSort(x):
""" Function to sort an array using merge sort algorithm """
if len(x) == 0 or len(x) == 1:
return x
else:
middle = len(x)//2
a = mergeSort(x[:middle])
b = mergeSort(x[middle:])
return merge(a,b) | 9187209cd9e679c790d0cddc18d58e6edc3e6d3a | 3,656,555 |
from typing import Union
from typing import Optional
from typing import Dict
from typing import Any
async def join(
db,
query: Union[dict, str],
document: Optional[Dict[str, Any]] = None,
session: Optional[AsyncIOMotorClientSession] = None,
) -> Optional[Dict[str, Any]]:
"""
Join the otu associated with the supplied ``otu_id`` with its sequences.
If an OTU is passed, the document will not be pulled from the database.
:param db: the application database client
:param query: the id of the otu to join or a Mongo query.
:param document: use this otu document as a basis for the join
:param session: a Motor session to use for database operations
:return: the joined otu document
"""
# Get the otu entry if a ``document`` parameter was not passed.
document = document or await db.otus.find_one(query, session=session)
if document is None:
return None
cursor = db.sequences.find({"otu_id": document["_id"]}, session=session)
# Merge the sequence entries into the otu entry.
return virtool.otus.utils.merge_otu(document, [d async for d in cursor]) | d01dc90855692a149a279fbad9b8777d4a850a7d | 3,656,556 |
import time
import networkx
import math
def cp_solve(V, E, lb, ub, col_cov, cuts=[], tl=999999):
"""Solves a partial problem with a CP model.
Args:
V: List of vertices (columns).
E: List of edges (if a transition between two columns is allowed).
col_cov: Matrix of the zone coverages of the columns (c[i][j] == 1 if
zone i is covered by column j).
Returns:
- Objective value of the best Hamiltonian path, -1 if there is no
Hamiltonian path within the LB/UB limits, -2 if the graph is not
connected (this latter case has been removed).
- A feasible solution for this objective value.
"""
cp_start_time = time.time()
num_cols = len(V)
num_zones = len(col_cov)
# First, check if the graph is disconnected (in which case no
# Hamiltonian path exists).
G = networkx.Graph()
G.add_nodes_from(V)
G.add_edges_from(E)
# # If the graph is not connected, no Hamiltonian path can exist.
# if not networkx.is_connected(G):
# return -2, []
# Variables.
model = cp_model.CpModel()
x = [model.NewIntVar(0, num_cols-1, 'x'+str(i)) for i in range(num_rounds)]
# Alternative for GCC, since the constraint is not available in OR-Tools.
x_occs = []
for i in range(num_cols):
occs = []
for j in range(num_rounds):
boolvar = model.NewBoolVar('')
model.Add(x[j] == i).OnlyEnforceIf(boolvar)
model.Add(x[j] != i).OnlyEnforceIf(boolvar.Not())
occs.append(boolvar)
x_occs.append(sum(occs))
# if mp_integer:
# model.AddLinearConstraint(x_occs[i], 1, num_rounds-num_cols+1)
# Add the CP cuts.
for cut in cuts:
model.Add(sum(x_occs[i] for i in range(num_cols) if i in cut) <= num_rounds-1)
# Objective.
if ub == 9999:
ub = num_rounds+1
phi = model.NewIntVar(int(lb), math.floor(ub)-1, 'phi')
coverages = [model.NewIntVar(0, num_rounds, 'c'+str(i))
for i in range(num_zones)]
for i in range(num_zones):
model.Add(cp_model.LinearExpr.ScalProd(x_occs, col_cov[i]) == coverages[i])
phi_low = model.NewIntVar(0, num_rounds, 'phi_low')
phi_high = model.NewIntVar(0, num_rounds, 'phi_high')
model.AddMinEquality(phi_low, coverages)
model.AddMaxEquality(phi_high, coverages)
model.Add(phi == phi_high-phi_low)
model.Minimize(phi)
# Regular constraint (Hamiltonian path).
# For the initial state, we use a dummy node which is connected to
# all other nodes.
dummy = max(V)+1
start = dummy
end = V
arcs = [(dummy, i, i) for i in V]
for e in E:
arcs.append((e[0], e[1], e[1]))
# Node self-loops
for v in V:
arcs.append((v, v, v))
# If there is only one vertex then a Hamiltonian path exists.
if len(V) > 1:
model.AddAutomaton(x, start, end, arcs)
# Solve the model.
solver = cp_model.CpSolver()
solver.parameters.max_time_in_seconds = tl
status = solver.Solve(model)
#assert status == cp_model.OPTIMAL or status == cp_model.INFEASIBLE or status == cp_model.FEASIBLE
if status == cp_model.OPTIMAL:
solution = [solver.Value(x[i]) for i in range(num_rounds)]
return solver.ObjectiveValue(), solution, time.time()-cp_start_time
elif status == cp_model.INFEASIBLE or status == cp_model.UNKNOWN:
return -1, [], time.time()-cp_start_time
elif status == cp_model.FEASIBLE:
return solver.ObjectiveValue(), [], time.time()-cp_start_time | 6ad8ca02fcf119192e3aad4881a4eb9e0adf30d0 | 3,656,557 |
def file_exists(path: Text):
"""
Returns true if file exists at path.
Args:
path (str): Local path in filesystem.
"""
return file_io.file_exists_v2(path) | 9d9acf36ad0276a4fa440a54ed859b24e6bfee4e | 3,656,558 |
import requests
import json
def _get_page_num_detail():
"""
东方财富网-数据中心-特色数据-机构调研-机构调研详细
http://data.eastmoney.com/jgdy/xx.html
:return: int 获取 机构调研详细 的总页数
"""
url = "http://data.eastmoney.com/DataCenter_V3/jgdy/xx.ashx"
params = {
"pagesize": "5000",
"page": "1",
"js": "var SZGpIhFb",
"param": "",
"sortRule": "-1",
"sortType": "0",
"rt": "52581407",
}
res = requests.get(url, params=params)
data_json = json.loads(res.text[res.text.find("={")+1:])
return data_json["pages"] | 84c32485637cb481f1ebe6fe05609e5b545daece | 3,656,559 |
def freeze_session(
session,
keep_var_names=None,
output_names=None,
clear_devices=True):
"""
Freezes the state of a session into a pruned computation graph.
"""
graph = session.graph
with graph.as_default():
freeze_var_names = list(set(v.op.name for v in tf.global_variables())
.difference(keep_var_names or []))
output_names = output_names or []
output_names += [v.op.name for v in tf.global_variables()]
# Graph -> GraphDef ProtoBuf
input_graph_def = graph.as_graph_def()
if clear_devices:
for node in input_graph_def.node:
node.device = ""
frozen_graph = convert_variables_to_constants(
session, input_graph_def, output_names, freeze_var_names)
frozen_graph = tf.graph_util.remove_training_nodes(frozen_graph)
return frozen_graph | ad8335110c139b73fb0c5cebb56dbdeea702a751 | 3,656,560 |
def send_mail(subject, body, recipient_list, bcc_list=None, from_email=None, connection=None, attachments=None,
fail_silently=False, headers=None, cc_list=None, dc1_settings=None, content_subtype=None):
"""
Like https://docs.djangoproject.com/en/dev/topics/email/#send-mail
Attachment is a list of tuples (filename, content, mime_type), where mime_type can be None.
"""
if not dc1_settings:
dc1_settings = DefaultDc().settings
shadow_email = dc1_settings.SHADOW_EMAIL
# Global bcc
if shadow_email:
if bcc_list:
bcc_list = list(bcc_list)
bcc_list.append(shadow_email)
else:
bcc_list = [shadow_email]
bcc_list = set(bcc_list)
# Default "From:" header
if not from_email:
from_email = dc1_settings.DEFAULT_FROM_EMAIL
# Compose message
msg = EmailMessage(subject, body, from_email, recipient_list, bcc_list, connection=connection,
attachments=attachments, headers=headers, cc=cc_list)
if content_subtype:
msg.content_subtype = content_subtype
# Send mail
if attachments:
logger.info('Sending mail to "%s" with subject "%s" and attachments "%s"',
recipient_list, subject, [i[0] for i in attachments])
else:
logger.info('Sending mail to "%s" with subject "%s"', recipient_list, subject)
return msg.send(fail_silently=fail_silently) | 36389b7f7e0906aa92ce06c66c4f51faa2643e31 | 3,656,561 |
def distinct_by_t(func):
"""
Transformation for Sequence.distinct_by
:param func: distinct_by function
:return: transformation
"""
def distinct_by(sequence):
distinct_lookup = {}
for element in sequence:
key = func(element)
if key not in distinct_lookup:
distinct_lookup[key] = element
return distinct_lookup.values()
return Transformation("distinct_by({0})".format(name(func)), distinct_by, None) | 3e2811b9f1b69b5c45f65a561b7f67ae477c8825 | 3,656,562 |
import os
def are_datasets_created(path, number_of_datasets, suffix='parts'):
"""Checks existence and reads the dataset ids from the datasets file in
the path directory
"""
dataset_ids = []
try:
with open("%s%sdataset_%s" % (path, os.sep, suffix)) as datasets_file:
for line in datasets_file:
dataset = line.strip()
try:
dataset_id = bigml.api.get_dataset_id(dataset)
dataset_ids.append(dataset_id)
except ValueError:
return False, dataset_ids
if len(dataset_ids) == number_of_datasets:
return True, dataset_ids
else:
return False, dataset_ids
except IOError:
return False, dataset_ids | 602d41071e01ade333b52524907d8a30ac8c25ac | 3,656,563 |
def _get_partition_info(freq_unit):
"""
根据平台单位获取tdw的单位和格式
:param freq_unit: 周期单位
:return: tdw周期单位, 格式
"""
if freq_unit == "m":
# 分钟任务
cycle_unit = "I"
partition_value = ""
elif freq_unit == "H":
# 小时任务
cycle_unit = "H"
partition_value = "YYYYMMDDHH"
elif freq_unit == "d":
# 天任务
cycle_unit = "D"
partition_value = "YYYYMMDD"
elif freq_unit == "w":
# 周任务
cycle_unit = "W"
partition_value = "YYYYMMDD"
elif freq_unit == "M":
# 月任务
cycle_unit = "M"
partition_value = "YYYYMM"
elif freq_unit == "O":
# 一次性任务
cycle_unit = "O"
partition_value = ""
else:
# 其他任务
cycle_unit = "R"
partition_value = ""
return cycle_unit, partition_value | 1f7df3364a21018daa8d3a61507ee59c467c8ffc | 3,656,564 |
from typing import Any
def metadata_property(k: str) -> property:
"""
Make metadata fields available directly on a base class.
"""
def getter(self: MetadataClass) -> Any:
return getattr(self.metadata, k)
def setter(self: MetadataClass, v: Any) -> None:
return setattr(self.metadata, k, v)
return property(getter, setter) | 22d3ab3c8a7029564083a6ba544acd69f2ee5491 | 3,656,565 |
import torch
def adjust_contrast(img, contrast_factor):
"""Adjust contrast of an RGB image.
Args:
img (Tensor): Image to be adjusted.
contrast_factor (float): How much to adjust the contrast. Can be any
non negative number. 0 gives a solid gray image, 1 gives the
original image while 2 increases the contrast by a factor of 2.
Returns:
Tensor: Contrast adjusted image.
"""
if not F._is_tensor_image(img):
raise TypeError('tensor is not a torch image.')
mean = torch.mean(rgb_to_grayscale(img).to(torch.float))
return _blend(img, mean, contrast_factor) | 740c68fe269229329cd37d25424178a74f5ac7fc | 3,656,566 |
def license_wtfpl():
"""
Create a license object called WTF License.
"""
return mixer.blend(cc.License, license_name="WTF License") | d202d605fe84556c553fdc7cf70c5815eb1dbee4 | 3,656,567 |
import copy
def _add_embedding_column_map_fn(
k_v,
original_example_key,
delete_audio_from_output,
audio_key,
label_key,
speaker_id_key):
"""Combine a dictionary of named embeddings with a tf.train.Example."""
k, v_dict = k_v
if original_example_key not in v_dict:
raise ValueError(
f'Original key not found: {original_example_key} vs {v_dict.keys()}')
ex_l = v_dict[original_example_key]
assert len(ex_l) == 1, (len(ex_l), k_v[0], ex_l)
ex = copy.deepcopy(ex_l[0]) # Beam does not allow modifying the input.
assert isinstance(ex, tf.train.Example), type(ex)
for name, embedding_l in v_dict.items():
if name == original_example_key:
continue
assert len(embedding_l) == 1, embedding_l
embedding = embedding_l[0]
assert isinstance(embedding, np.ndarray)
assert embedding.ndim == 2, embedding.ndim
# Store the embedding 2D shape and store the 1D embedding. The original
# embedding can be recovered with `emb.reshape(feature['shape'])`.
ex = _add_embedding_to_tfexample(ex, embedding, f'embedding/{name}')
if delete_audio_from_output:
ex.features.feature.pop(audio_key, None)
# Assert that the label is present. If it's a integer, convert it to bytes.
if label_key:
if label_key not in ex.features.feature:
raise ValueError(f'Label not found: {label_key} vs {ex.features.feature}')
lbl_feat = ex.features.feature[label_key]
if lbl_feat.int64_list.value:
lbl_val_as_bytes = str(lbl_feat.int64_list.value[0]).encode('utf-8')
ex.features.feature.pop(label_key, None)
ex.features.feature[label_key].bytes_list.value.append(lbl_val_as_bytes)
# If provided, assert that the speaker_id field is present, and of type
# `bytes`.
if speaker_id_key:
feats = ex.features.feature
assert speaker_id_key in feats, (speaker_id_key, feats.keys())
assert feats[speaker_id_key].bytes_list.value, feats[speaker_id_key]
return k, ex | 710fd658b0f1d830c8e4e97d473b02f54a0d4414 | 3,656,568 |
def modelf(input_shape):
"""
Function creating the model's graph in Keras.
Argument:
input_shape -- shape of the model's input data (using Keras conventions)
Returns:
model -- Keras model instance
"""
X_input = Input(shape = input_shape)
### START CODE HERE ###
# Step 1: CONV layer (≈4 lines)
X = Conv1D(196, kernel_size = 15, strides = 4)(X_input) # CONV1D
X = BatchNormalization()(X) # Batch normalization
X = Activation("relu")(X) # ReLu activation
X = Dropout(0.8)(X) # dropout (use 0.8)
# Step 2: First GRU Layer (≈4 lines)
X = GRU(units = 128, return_sequences = True)(X) # GRU (use 128 units and return the sequences)
X = Dropout(0.8)(X) # dropout (use 0.8)
X = BatchNormalization()(X) # Batch normalization
# Step 3: Second GRU Layer (≈4 lines)
X = GRU(units = 128, return_sequences = True)(X) # GRU (use 128 units and return the sequences)
X = Dropout(0.8)(X) # dropout (use 0.8)
X = BatchNormalization()(X) # Batch normalization
X = Dropout(0.8)(X) # dropout (use 0.8)
# Step 4: Time-distributed dense layer (≈1 line)
X = TimeDistributed(Dense(1, activation = "sigmoid"))(X) # time distributed (sigmoid)
### END CODE HERE ###
model = Model(inputs = X_input, outputs = X)
return model | d8beaf7335e19c66ea3913ed019647d9e42f92d1 | 3,656,569 |
def get_mbed_official_psa_release(target=None):
"""
Creates a list of PSA targets with default toolchain and
artifact delivery directory.
:param target: Ask for specific target, None for all targets.
:return: List of tuples (target, toolchain, delivery directory).
"""
psa_targets_release_list = []
psa_secure_targets = [t for t in TARGET_NAMES if
Target.get_target(t).is_PSA_secure_target]
if target is not None:
if target not in psa_secure_targets:
raise Exception("{} is not a PSA secure target".format(target))
psa_targets_release_list.append(_get_target_info(target))
else:
for t in psa_secure_targets:
psa_targets_release_list.append(_get_target_info(target))
return psa_targets_release_list | 0f260c1d57b0d21d911fcd6998fadee0791600de | 3,656,570 |
def match_l2(X, Y, match_rows=False, normalize=True):
"""Return the minimum Frobenius distance between X and Y over permutations of columns (or rows)."""
res = _match_factors(X, Y, l2_similarity, match_rows)
res['score'] = np.sqrt(-res['score'])
if normalize:
res['score'] = res['score'] / np.linalg.norm(X, 'fro')
return res | 181ecde4c0837b69f7a37287bcf9e768fdaa3e58 | 3,656,571 |
import time
import scipy
def doNMFDriedger(V, W, L, r = 7, p = 10, c = 3, plotfn = None, plotfnw = None):
"""
Implement the technique from "Let It Bee-Towards NMF-Inspired
Audio Mosaicing"
:param V: M x N target matrix
:param W: An M x K matrix of template sounds in some time order\
along the second axis
:param L: Number of iterations
:param r: Width of the repeated activation filter
:param p: Degree of polyphony; i.e. number of values in each column\
of H which should be un-shrunken
:param c: Half length of time-continuous activation filter
"""
N = V.shape[1]
K = W.shape[1]
tic = time.time()
H = np.random.rand(K, N)
print("H.shape = ", H.shape)
print("Time elapsed H initializing: %.3g"%(time.time() - tic))
errs = np.zeros(L+1)
errs[0] = getKLError(V, W.dot(H))
if plotfnw:
plt.figure(figsize=(12, 3))
plotfnw(W)
plt.savefig("Driedger_W.svg", bbox_inches='tight')
if plotfn:
res=4
plt.figure(figsize=(res*2, res*2))
for l in range(L):
print("NMF Driedger iteration %i of %i"%(l+1, L))
iterfac = 1-float(l+1)/L
tic = time.time()
#Step 1: Avoid repeated activations
print("Doing Repeated Activations...")
MuH = scipy.ndimage.filters.maximum_filter(H, size=(1, r))
H[H<MuH] = H[H<MuH]*iterfac
#Step 2: Restrict number of simultaneous activations
print("Restricting simultaneous activations...")
#Use partitions instead of sorting for speed
colCutoff = -np.partition(-H, p, 0)[p, :]
H[H < colCutoff[None, :]] = H[H < colCutoff[None, :]]*iterfac
#Step 3: Supporting time-continuous activations
if c > 0:
print("Supporting time-continuous activations...")
di = K-1
dj = 0
for k in range(-H.shape[0]+1, H.shape[1]):
z = np.cumsum(np.concatenate((np.zeros(c), np.diag(H, k), np.zeros(c))))
x2 = z[2*c::] - z[0:-2*c]
H[di+np.arange(len(x2)), dj+np.arange(len(x2))] = x2
if di == 0:
dj += 1
else:
di -= 1
#KL Divergence Version
WH = W.dot(H)
WH[WH == 0] = 1
VLam = V/WH
WDenom = np.sum(W, 0)
WDenom[WDenom == 0] = 1
H = H*((W.T).dot(VLam)/WDenom[:, None])
print("Elapsed Time H Update %.3g"%(time.time() - tic))
errs[l+1] = getKLError(V, W.dot(H))
#Output plots every 20 iterations
if plotfn and ((l+1)==L or (l+1)%20 == 0):
plt.clf()
plotfn(V, W, H, l+1, errs)
plt.savefig("NMFDriedger_%i.png"%(l+1), bbox_inches = 'tight')
return H | 3b3b0fe9388992bdd87cfa6b4cb0748f4502adc7 | 3,656,572 |
def extract_red(image):
""" Returns the red channel of the input image. It is highly recommended to make a copy of the
input image in order to avoid modifying the original array. You can do this by calling:
temp_image = np.copy(image)
Args:
image (numpy.array): Input RGB (BGR in OpenCV) image.
Returns:
numpy.array: Output 2D array containing the red channel.
"""
# Since Red is last index, we want all rows, columns, and the last channel.
return np.copy(image[:, :, 2]) | 0f591099e439a038ef8e75d65e4eb26c200018d0 | 3,656,573 |
def _cleaned_data_to_key(cleaned_data):
"""
Return a tuple representing a unique key for the cleaned data of an InteractionCSVRowForm.
"""
# As an optimisation we could just track the pk for model instances,
# but that is omitted for simplicity
key = tuple(cleaned_data.get(field) for field in DUPLICATE_FIELD_MAPPING)
if all(key):
return key
# Some of the fields are missing (this happens if they did not pass validation)
return None | aa08e0cafd0ac4ba3749db65208655dc51671997 | 3,656,574 |
def schedule_for_cleanup(request, syn):
"""Returns a closure that takes an item that should be scheduled for cleanup.
The cleanup will occur after the module tests finish to limit the residue left behind
if a test session should be prematurely aborted for any reason."""
items = []
def _append_cleanup(item):
items.append(item)
def cleanup_scheduled_items():
_cleanup(syn, items)
request.addfinalizer(cleanup_scheduled_items)
return _append_cleanup | ccbdba1a1f8dea0f13e5717d0743739d599e22e6 | 3,656,575 |
import base64
def unpickle_context(content, pattern=None):
"""
Unpickle the context from the given content string or return None.
"""
pickle = get_pickle()
if pattern is None:
pattern = pickled_context_re
match = pattern.search(content)
if match:
return pickle.loads(base64.standard_b64decode(match.group(1)))
return None | 87fa831b038329313364d512107129f69db136ad | 3,656,576 |
def ask_openid(request, openid_url, redirect_to, on_failure=None,
sreg_request=None):
""" basic function to ask openid and return response """
on_failure = on_failure or signin_failure
trust_root = getattr(
settings, 'OPENID_TRUST_ROOT', get_url_host(request) + '/'
)
if xri.identifierScheme(openid_url) == 'XRI' and getattr(
settings, 'OPENID_DISALLOW_INAMES', False
):
msg = _("i-names are not supported")
return on_failure(request, msg)
consumer = Consumer(request.session, DjangoOpenIDStore())
try:
auth_request = consumer.begin(openid_url)
except DiscoveryFailure:
msg = _("The password or OpenID was invalid")
return on_failure(request, msg)
if sreg_request:
auth_request.addExtension(sreg_request)
redirect_url = auth_request.redirectURL(trust_root, redirect_to)
return HttpResponseRedirect(redirect_url) | bb5deefc32d1c4253d518eeead34b290e028a051 | 3,656,577 |
import torch
def get_accuracy_ANIL(logits, targets):
"""Compute the accuracy (after adaptation) of MAML on the test/query points
Parameters
----------
logits : `torch.FloatTensor` instance
Outputs/logits of the model on the query points. This tensor has shape
`(num_examples, num_classes)`.
targets : `torch.LongTensor` instance
A tensor containing the targets of the query points. This tensor has
shape `(num_examples,)`.
Returns
-------
accuracy : `torch.FloatTensor` instance
Mean accuracy on the query points
"""
_, predictions = torch.max(logits, dim=-1)
return torch.mean(predictions.eq(targets).float()) | 2ab61284da6d9cd96c066061823570d64567e9f3 | 3,656,578 |
import logging
def stream_logger():
""" sets up the logger for the Simpyl object to log to the output
"""
logger = logging.Logger('stream_handler')
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s %(message)s'))
logger.addHandler(handler)
return logger | 45f5af00a0006cc8155bb4a134cce531e51e646a | 3,656,579 |
def sql_coordinate_frame_lookup_key(bosslet_config, coordinate_frame):
"""
Get the lookup key that identifies the coordinate fram specified.
Args:
bosslet_config (BossConfiguration): Bosslet configuration object
coordinate_frame: Identifies coordinate frame.
Returns:
coordinate_set(str): Coordinate Frame lookup key.
"""
query = "SELECT id FROM coordinate_frame WHERE name = %s"
with bosslet_config.call.connect_rds() as cursor:
cursor.execute(query, (coordinate_frame,))
coordinate_set = cursor.fetchall()
if len(coordinate_set) != 1:
raise Exception(
"Can't find coordinate frame: {}".format(coordinate_frame))
else:
LOGGER.info("{} coordinate frame id: {}".format(coordinate_frame, coordinate_set[0][0]))
return coordinate_set[0][0] | 8bf7db01b171e13b0066a806eb097dec4a59c04e | 3,656,580 |
def entry_from_resource(resource, client, loggers):
"""Detect correct entry type from resource and instantiate.
:type resource: dict
:param resource: One entry resource from API response.
:type client: :class:`~google.cloud.logging.client.Client`
:param client: Client that owns the log entry.
:type loggers: dict
:param loggers:
A mapping of logger fullnames -> loggers. If the logger
that owns the entry is not in ``loggers``, the entry
will have a newly-created logger.
:rtype: :class:`~google.cloud.logging.entries._BaseEntry`
:returns: The entry instance, constructed via the resource
"""
if 'textPayload' in resource:
return TextEntry.from_api_repr(resource, client, loggers)
if 'jsonPayload' in resource:
return StructEntry.from_api_repr(resource, client, loggers)
if 'protoPayload' in resource:
return ProtobufEntry.from_api_repr(resource, client, loggers)
return EmptyEntry.from_api_repr(resource, client, loggers) | 0519ad63c11e04ca890288953440272de224b9db | 3,656,581 |
def make_preprocesser(training_data):
"""
Constructs a preprocessing function ready to apply to new dataframes.
Crucially, the interpolating that is done based on the training data set
is remembered so it can be applied to test datasets (e.g the mean age that
is used to fill in missing values for 'Age' will be fixed based on the mean
age within the training data set).
Summary by column:
['PassengerId',
'Survived', # this is our target, not a feature
'Pclass', # keep as is: ordinal value should work, even though it's inverted (higher number is lower class cabin)
'Name', # omit (could try some fancy stuff like inferring ethnicity, but skip for now)
'Sex', # code to 0 / 1
'Age', # replace missing with median
'SibSp',
'Parch',
'Ticket', # omit (doesn't seem like low hanging fruit, could look more closely for pattern later)
'Fare', # keep, as fare could be finer grained proxy for socio economic status, sense of entitlement / power in getting on boat
'Cabin', # one hot encode using first letter as cabin as the cabin sector
'Embarked'] # one hot encode
Params:
df: pandas.DataFrame containing the training data
Returns:
fn: a function to preprocess a dataframe (either before training or fitting a new dataset)
"""
def pick_features(df):
return df[['PassengerId', 'Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Cabin', 'Embarked']]
# save median Age so we can use it to fill in missing data consistently
# on any dataset
median_age_series = training_data[['Age', 'Fare']].median()
def fix_missing(df):
return df.fillna(median_age_series)
def map_sex(df):
df['Sex'] = df['Sex'].map({'male': 0, 'female': 1})
return df
def one_hot_cabin(df):
def cabin_sector(cabin):
if isinstance(cabin, str):
return cabin[0].lower()
else:
return cabin
df[['cabin_sector']] = df[['Cabin']].applymap(cabin_sector)
one_hot = pd.get_dummies(df['cabin_sector'], prefix="cabin_sector")
interesting_cabin_sectors = ["cabin_sector_{}".format(l) for l in 'bcde']
for column, _ in one_hot.iteritems():
if column.startswith('cabin_sector_') and column not in interesting_cabin_sectors:
one_hot = one_hot.drop(column, axis=1)
df = df.join(one_hot)
df = df.drop('Cabin', axis=1)
df = df.drop('cabin_sector', axis=1)
return df
def one_hot_embarked(df):
one_hot = pd.get_dummies(df['Embarked'], prefix="embarked")
df = df.join(one_hot)
df = df.drop('Embarked', axis=1)
return df
# We want standard scaling fit on the training data, so we get a scaler ready
# for application now. It needs to be applied to data that already has the other
# pre-processing applied.
training_data_all_but_scaled = map_sex(fix_missing(pick_features(training_data)))
stdsc = StandardScaler()
stdsc.fit(training_data_all_but_scaled[['Pclass', 'Age', 'SibSp', 'Parch', 'Fare']])
def scale_df(df):
df[['Pclass', 'Age', 'SibSp', 'Parch', 'Fare']] = \
stdsc.transform(df[['Pclass', 'Age', 'SibSp', 'Parch', 'Fare']])
df[['Sex']] = df[['Sex']].applymap(lambda x: 1 if x == 1 else -1)
for column, _ in df.iteritems():
if column.startswith('cabin_sector_') or column.startswith('embarked_'):
df[[column]] = df[[column]].applymap(lambda x: 1 if x == 1 else -1)
return df
def preprocess(df, scale=True):
"""
Preprocesses a dataframe so it is ready for use with a model (either for training or prediction).
Params:
scale: whether to apply feature scaling. E.g with random forests feature scaling isn't necessary.
"""
all_but_scaled = one_hot_embarked(one_hot_cabin(map_sex(fix_missing(pick_features(df)))))
if scale:
return scale_df(all_but_scaled)
else:
return all_but_scaled
return preprocess | 480ba5b02e5347e768bd5b2cdbc8b19af1ddee8c | 3,656,582 |
def get_breakeven_prob(predicted, threshold = 0):
"""
This function calculated the probability of a stock being above a certain threshhold, which can be defined as a value (final stock price) or return rate (percentage change)
"""
predicted0 = predicted.iloc[0,0]
predicted = predicted.iloc[-1]
predList = list(predicted)
over = [(i*100)/predicted0 for i in predList if ((i-predicted0)*100)/predicted0 >= threshold]
less = [(i*100)/predicted0 for i in predList if ((i-predicted0)*100)/predicted0 < threshold]
return (len(over)/(len(over) + len(less))) | a1cededbe7a0fbe7ffe19e9b873f55c8ce369590 | 3,656,583 |
def trim_whitespace(sub_map, df, source_col, op_col):
"""Trims whitespace on all values in the column"""
df[op_col] = df[op_col].transform(
lambda x: x.strip() if not pd.isnull(x) else x)
return df | 649a48cbb9246d4842555b5a21bc4d638a00ca00 | 3,656,584 |
from re import A
from re import T
def beneficiary():
""" RESTful CRUD controller """
# Normally only used in Report
# - make changes as component of Project
s3db.configure("project_beneficiary",
deletable = False,
editable = False,
insertable = False,
)
list_btn = A(T("Beneficiary Report"),
_href=URL(c="project", f="beneficiary",
args="report", vars=get_vars),
_class="action-btn")
#def prep(r):
# if r.method in ("create", "create.popup", "update", "update.popup"):
# # Coming from Profile page?
# location_id = r.get_vars.get("~.(location)", None)
# if location_id:
# field = r.table.location_id
# field.default = location_id
# field.readable = field.writable = False
# if r.record:
# field = r.table.location_id
# field.comment = None
# field.writable = False
# return True
#s3.prep = prep
return s3_rest_controller(hide_filter=False) | ec34dd0989154bcfe2ace8506fe1cbe9c1ba9c49 | 3,656,585 |
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
#cfg.merge_from_file(args.config_file)
#cfg.merge_from_file(model_zoo.get_config_file("/data/mostertrij/tridentnet/detectron2/configs/COCO-Detection/my_script_faster_rcnn_X_101_32x8d_FPN_3x.yaml"))
cfg.merge_from_file("/data/mostertrij/tridentnet/detectron2/configs/COCO-Detection/my_script_faster_rcnn_X_101_32x8d_FPN_3x.yaml")
DATASET_NAME= "LGZ_v5_more_rotations"
cfg.DATASETS.TRAIN = (f"{DATASET_NAME}_train",)
cfg.DATASETS.VAL = (f"{DATASET_NAME}_val",)
cfg.DATASETS.TEST = (f"{DATASET_NAME}_test",)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(cfg, args)
return cfg | a3053945cd6680c220fe8ea87189943c44558d8d | 3,656,586 |
def get_distinct_quotation_uid(*args, **kwargs):
"""
获取用户
:param args:
:param kwargs:
:return: List
"""
field = 'uid'
return map(lambda x: getattr(x, field), db_instance.get_distinct_field(Quotation, field, *args, **kwargs)) | 5a8fe7252f6ac233b69e57c0baac0f1f2d3f51ff | 3,656,587 |
import pathlib
def present_from(ref: pathlib.Path, obs: pathlib.Path) -> pathlib.Path:
"""Build a somehow least surprising difference folder from ref and obs."""
ref_code = ref.parts[-1]
if obs.is_file():
return pathlib.Path(*obs.parts[:-1], f'diff-of-{obs.parts[-1]}')
present = pathlib.Path(*obs.parts[:-1], f'diff-of-{ref_code}_{obs.parts[-1]}')
present.mkdir(parents=True, exist_ok=True)
return present | 59ae1eefaeacc9ddfac773c0c88974b98757d4a2 | 3,656,588 |
def dataQ_feeding(filename_queue, feat_dim, seq_len):
""" Reads and parse the examples from alignment dataset
Args:
filename_queue: A queue of strings with the filenames to read from.
Returns:
An object representing a single example, with the following fields:
MFCC sequence: 200 * 39 dimensions
"""
class MFCCRECORD(object):
pass
result = MFCCRECORD()
### use the line reader ###
reader = tf.TextLineReader()
#values = []
#for i in range(NUM_UP_TO):
# key, value = reader.read(filename_queue)
# values.append(value)
key, value = reader.read(filename_queue)
### try to read NUM_UP_TO lines in one time ###
### read the csv file into features ###
# seq = []
record_defaults = [[1.] for i in range(feat_dim*seq_len)]
# for value in values:
# seq.append(tf.decode_csv(value, record_defaults=record_defaults))
tmp_result = tf.decode_csv(value, record_defaults=record_defaults)
### so we have (NUM_UP_TO, seq_len *feat_dim ) ###
### reshape it into (NUM_UP_TO, seq_len, feat_dim) ###
### result.mfcc: sequence ###
mfcc = tf.cast(tf.reshape(tmp_result, shape=(seq_len , \
feat_dim)),tf.float32)
### result.rev_mfcc: reverse of sequence ###
# result.rev_mfcc = tf.reverse(result.mfcc, [False, True])
return mfcc, mfcc | 23d3e81bdd266f6cebe9bdff2160c4b7294e648c | 3,656,589 |
def dummy_backend(_, **kwargs):
"""
Dummy backend always returning stats with 0
"""
return _default_statement() | 875adb50540029022b28de6388738d1e5ba01e30 | 3,656,590 |
def comp_mass(self):
"""Compute the mass of the Frame
Parameters
----------
self : Frame
A Frame object
Returns
-------
Mfra: float
Mass of the Frame [kg]
"""
Vfra = self.comp_volume()
# Mass computation
return Vfra * self.mat_type.struct.rho | b78ef02f045c1f624b3277ec3e358921b3ea5c02 | 3,656,591 |
def write_DS9reg(x, y, filename=None, coord='IMAGE', ptype='x', size=20,
c='green', tag='all', width=1, text=None):
"""Write a region file for ds9 for a list of coordinates.
Taken from Neil Crighton's barak.io
Parameters
----------
x, y : arrays of floats, shape (N,)
The coordinates. These may be image or WCS.
Please make sure to update the coord keyword accordingly.
filename : str, optional
A filename to write to.
coord : str (`IMAGE` or `J2000`)
The coordinate type: `IMAGE` (pixel coordinates) or
`J2000` (celestial coordinates).
ptype : str or np.array of shape (N,)
DS9 point type (e.g. `circle`, `box`, `diamond`, `cross`, `x`, `arrow`,
`boxcircle`)
size : int or np.array of shape (N,)
DS9 point size.
c : str or np.array of shape (N,)
point colour: `cyan` `blue` `magenta` `red` `green` `yellow` `white`
`black`}.
tag : str or np.array of shape (N,)
DS9 tag. e.g. 'all'
width : int or np.array of shape (N,)
DS9 width
text : str or np.array of shape (N,)
Text
"""
header = ['global font="helvetica 10 normal" select=1 highlite=1 '
'edit=0 move=1 delete=1 include=1 fixed=0 source\n']
header.append(coord + '\n')
x = np.array(x)
y = np.array(y)
if isinstance(ptype, basestring):
ptype = [ptype] * len(x)
if isinstance(size, int):
size = [size] * len(x)
if isinstance(width, int):
width = [width] * len(x)
if isinstance(text, basestring):
text = [text] * len(x)
elif text is None:
text = list(range(len(x)))
if isinstance(tag, basestring):
tag = [tag] * len(x)
if isinstance(c, basestring):
c = [c] * len(x)
regions = []
# fmt = ('point(%12.8f,%12.8f) # \
# point=%s %s width=%s text={%s} color=%s tag={%s}\n')
for i in xrange(len(x)):
s = 'point({:.8f},{:.8f}) # point={} {} width={} text={{{}}} color={} tag={}\n'\
.format(x[i], y[i], ptype[i], size[i], width[i], text[i], c[i], tag[i])
regions.append(s)
if filename is not None:
fh = open(filename,'w')
fh.writelines(header + regions)
fh.close()
return header, regions | 9e2c67c8a681ba7abdd55e7f456079b32ed50688 | 3,656,592 |
def checkInputDataValid(lstX:list=None,lstY:list=None,f:object=None)->(int,tuple):
"""
:param lstX:
:param lstY:
:param f:
:return: int, (int,list, int,int)
"""
ret=-1
rettuple=(-1,[],-1,-1)
if lstX is None or lstY is None:
msg = "No input lists of arrays"
msg2log(None, msg, f)
return ret,rettuple
if not lstX or not lstY:
msg = "Empty input lists of arrays"
msg2log(None, msg, f)
return ret,rettuple
k=len(lstX)
k1=len(lstY)
if (k1 != k):
msg = "The input lists have a different naumber items: {} vs {}".format(k,k1)
msg2log(None, msg, f)
return ret,rettuple
lstP=[]
lstN=[]
lstNy=[]
for item in lstX:
X:np.array=item
(n,p)=X.shape
lstP.append(p)
lstN.append(n)
for item in lstY:
y:np.array=item
(n,)=y.shape
lstNy.append(n)
p=lstP[0]
for i in range(len(lstP)):
if p!=lstP[i]:
msg="The feature nimbers are different: {} vs {}".format(p,lstP[i])
msg2log(None,msg,f)
return ret,rettuple
if lstN!=lstNy:
msg="Different sample sizes:\n{}\n{}".format(lstN,lstNy)
msg2log(None, msg, f)
return ret,rettuple
rettuple=(k,lstN,p,sum(lstN))
ret=0
return ret,rettuple | daacee0ee3803c02c04fe2b7213c6f8d408b39f6 | 3,656,593 |
def parseManualTree(node):
"""Parses a tree of the manual Main_Page and returns it through a list containing tuples:
[(title, href, [(title, href, [...]), ...]), ...]"""
if node.nodeType != Node.ELEMENT_NODE: return []
result = []
lastadded = None
for e in node.childNodes:
if e.nodeType == Node.ELEMENT_NODE:
if e.localName == "ol":
assert lastadded != None
for i in xrange(len(result)):
if result[i][:2] == lastadded:
result[i] = lastadded + (parseManualTree(e),)
elif e.localName == "a":
href, title = parseAnchor(e)
lastadded = title, href
result.append((title, href, None))
return result | 6b62e9ad3b3ef4f3a0c6c60a931f1f2e940fe0f9 | 3,656,594 |
from typing import Union
from typing import List
from typing import Dict
from typing import Optional
from typing import Tuple
import tqdm
def validation_by_method(mapping_input: Union[List, Dict[str, List]],
graph: nx.Graph,
kernel: Matrix,
k: Optional[int] = 100
) -> Tuple[Dict[str, list], Dict[str, list]]:
"""Repeated holdout validation by diffustion method.
:param mapping_input: List or value dictionary of labels {'label':value}.
:param graph: Network as a graph object.
:param kernel: Network as a kernel.
:param k: Iterations for the repeated_holdout validation.
"""
auroc_metrics = defaultdict(list)
auprc_metrics = defaultdict(list)
for _ in tqdm(range(k)):
input_diff, validation_diff = _get_random_cv_split_input_and_validation(
mapping_input, kernel
)
scores_z = diffuse_raw(graph=None, scores=input_diff, k=kernel, z=True)
scores_raw = diffuse_raw(graph=None, scores=input_diff, k=kernel, z=False)
scores_page_rank = generate_pagerank_baseline(graph, kernel)
method_validation_scores = {
'raw': (validation_diff,
scores_raw
),
'z': (validation_diff,
scores_z
),
'random': (
validation_diff,
_generate_random_score_ranking(kernel)
),
'page_rank': (
validation_diff,
scores_page_rank
),
}
for method, validation_set in method_validation_scores.items():
try:
auroc, auprc = _get_metrics(*validation_set)
except ValueError:
auroc, auprc = (0, 0)
print(f'ROC AUC unable to calculate for {validation_set}')
auroc_metrics[method].append(auroc)
auprc_metrics[method].append(auprc)
return auroc_metrics, auprc_metrics | b7ce9e72af55dc6d111948cb393f5e07b7fedd68 | 3,656,595 |
def get_about_agent():
"""
This method returns general information of the agent, like the name and the about.
Args:
@param: token: Authentication token.
"""
data = request.get_json()
if "token" in data:
channel = get_channel_id(data["token"])
if channel is not None:
agent = channel.agent
return {"about": agent.about, "name": agent.name}
else:
return {"message": "token is no correct", "status": False}
else:
return {"message": "token is no correct", "status": False} | ca4301a9de5d4cb711892a221d4c984489c1e329 | 3,656,596 |
from io import StringIO
def add_namespace(tree, new_ns_name, new_ns_uri):
"""Add a namespace to a Schema.
Args:
tree (etree._ElementTree): The ElementTree to add a namespace to.
new_ns_name (str): The name of the new namespace. Must be valid against https://www.w3.org/TR/REC-xml-names/#NT-NSAttName
new_ns_uri (str): The URI for the new namespace. Must be non-empty and valid against https://www.ietf.org/rfc/rfc2396.txt
Returns:
etree.ElementTree: A copy of the provided `tree`, modified to include the specified namespace.
Raises:
TypeError: If an attempt is made to add a namespace to something other than a ElementTree.
ValueError: If the namespace name or URI are invalid values.
ValueError: If the namespace name already exists.
Note:
lxml does not allow modification of namespaces within a tree that already exists. As such, string manipulation is used. https://bugs.launchpad.net/lxml/+bug/555602
Todo:
Also add new namespaces to Datasets.
Add checks for the format of new_ns_name - for syntax, see: https://www.w3.org/TR/REC-xml-names/#NT-NSAttName
Add checks for the format of new_ns_uri - for syntax, see: https://www.ietf.org/rfc/rfc2396.txt
Tidy this up.
"""
if not isinstance(tree, etree._ElementTree): # pylint: disable=protected-access
msg = "The `tree` parameter must be of type `etree._ElementTree` - it was of type {0}".format(type(tree))
iati.utilities.log_error(msg)
raise TypeError(msg)
if not isinstance(new_ns_name, str) or not new_ns_name:
msg = "The `new_ns_name` parameter must be a non-empty string."
iati.utilities.log_error(msg)
raise ValueError(msg)
if not isinstance(new_ns_uri, str) or not new_ns_uri:
msg = "The `new_ns_uri` parameter must be a valid URI."
iati.utilities.log_error(msg)
raise ValueError(msg)
initial_nsmap = tree.getroot().nsmap
# prevent modification of existing namespaces
if new_ns_name in initial_nsmap:
if new_ns_uri == initial_nsmap[new_ns_name]:
return tree
else:
msg = "There is already a namespace called {0}.".format(new_ns_name)
iati.utilities.log_error(msg)
raise ValueError(msg)
# to add new namespace, use algorithm from http://stackoverflow.com/a/11350061
schema_str = etree.tostring(tree.getroot(), pretty_print=True).decode('unicode_escape')
interim_tree = etree.ElementTree(element=None, file=StringIO(schema_str))
root = interim_tree.getroot()
nsmap = root.nsmap
nsmap[new_ns_name] = new_ns_uri
new_root = etree.Element(root.tag, nsmap=nsmap)
new_root[:] = root[:]
new_tree = etree.ElementTree(new_root)
return new_tree | acd187389ef6de08aec2c4760212af40d4083fd1 | 3,656,597 |
def RZ(angle, invert):
"""Return numpy array with rotation gate around Z axis."""
gate = np.zeros(4, dtype=complex).reshape(2, 2)
if not invert:
gate[0, 0] = np.cos(-angle/2) + np.sin(-angle/2) * 1j
gate[1, 1] = np.cos(angle/2) + np.sin(angle/2) * 1j
else:
gate[0, 0] = np.cos(-angle/2) - np.sin(-angle/2) * 1j
gate[1, 1] = np.cos(angle/2) - np.sin(angle/2) * 1j
return gate | d99839fa49d92edea8d98653fd7a38861e6f49d8 | 3,656,598 |
def _create_unicode(code: str) -> str:
"""
Добавление экранизирующего юникод кода перед кодом цвета
:param code: Код, приоритетно ascii escape color code
:return:
"""
return u'\u001b[{}m'.format(code) | 523973766d4f18daca8870e641ac77967b715532 | 3,656,599 |
Subsets and Splits