content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def random_value(t):
"""
Construct a stream of values in type t
"""
if isinstance(t, TBag):
yield Bag()
for v in random_value(t.elem_type):
yield Bag((v,))
for v1 in random_value(t.elem_type):
for v2 in random_value(t.elem_type):
yield Bag((v1, v2))
elif isinstance(t, TInt):
yield random.randint(0, 100)
yield 0
elif isinstance(t, TNative):
yield (t.name, 0)
elif isinstance(t, TFloat):
yield random.randint(0, 100) / 100.0
yield 0.0
elif isinstance(t, TBool):
yield True
yield False
elif isinstance(t, TString):
yield ''.join(random.choice(string.ascii_letters) for _ in range(8))
elif isinstance(t, TRecord):
iterables = [random_value(ty) for _, ty in t.fields]
for vs in product(*iterables):
yield FrozenDict({field[0]: v for v, field in zip(vs, t.fields)})
else:
raise Exception("Unknown type for random value construction: {}".format(t)) | 5,357,400 |
def get_pool_name(pool_id):
"""Returns AS3 object name for TLS profiles related to pools
:param pool_id: octavia pool id
:return: AS3 object name
"""
return "{}{}".format(constants.PREFIX_TLS_POOL, pool_id) | 5,357,401 |
def simulate_processing():
"""Simulate daily processing and capture provenance."""
options.simulate = True
options.test = True
sequence_list = build_sequences(options.date)
# simulate data calibration and reduction
for sequence in sequence_list:
processed = False
for sub_list in sequence.subrun_list:
if sub_list.runobj.type == "PEDCALIB":
args_cal = parse_template(calibration_sequence_job_template(sequence), 0)
simulate_calibration(args_cal)
elif sub_list.runobj.type == "DATA":
with mp.Pool() as poolproc:
args_proc = [
parse_template(data_sequence_job_template(sequence), subrun_idx)
for subrun_idx in range(sub_list.subrun)
]
processed = poolproc.map(simulate_subrun_processing, args_proc)
drs4_pedestal_run_id = str(sequence.pedestal).split(".")[1].replace("Run", "")
pedcal_run_id = str(sequence.calibration).split(".")[1].replace("Run", "")
# produce prov if overwrite prov arg
if processed and options.provenance:
command = "provprocess"
args_pp = [
command,
"-c",
options.configfile,
drs4_pedestal_run_id,
pedcal_run_id,
sequence.run_str,
options.directory,
options.prod_id,
]
log.info(f"Processing provenance for run {sequence.run_str}")
subprocess.run(args_pp, check=True) | 5,357,402 |
def search_sliceable_by_yielded_chunks_for_str(sliceable, search_string, starting_index, down, case_insensitive):
"""This is the main entry point for everything in this module."""
for chunk, chunk_start_idx in search_chunk_yielder(sliceable, starting_index, down):
found_at_chunk_idx = search_list_for_str(chunk, search_string, 0 if down else len(chunk) - 1, down, case_insensitive)
if found_at_chunk_idx is not None:
return found_at_chunk_idx + chunk_start_idx
return None | 5,357,403 |
def split_page(array, limit, index):
"""
按限制要求分割数组,返回下标所指向的页面
:param array: 需要分割的数组
:param limit: 每个数组的大小
:param index: 需要返回的分割后的数组
:return: 数组
"""
end = index * limit
start = end - limit
return array[start:end] | 5,357,404 |
def update_metadata(desc, key, value):
"""
Update the metadata of a package descriptor.
If the key doesn't exist in the metadata yet the key-value pair is added.
If the key exists and the existing value as well as the passed value are
dictionaries the existing value is updated with the passed value.
If the key exists and the existing value as well as the passed value are
lists the existing value is extended with the passed value.
If the key exists and the existing value as well as the passed value are
sets the existing value is union updated with the passed value.
Otherwise the existing value is overwritten with the passed value.
If the types were different a warning message is logged.
:param desc: the package descriptor
:param key: The key
:param value: The value
"""
if key not in desc.metadata:
# add value to the metadata
# copy value to avoid changes to either of them to affect each other
desc.metadata[key] = copy.deepcopy(value)
return
old_value = desc.metadata[key]
if isinstance(old_value, dict) and isinstance(value, dict):
# update dictionary
old_value.update(value)
return
if isinstance(old_value, list) and isinstance(value, list):
# extend list
old_value += value
return
if isinstance(old_value, set) and isinstance(value, set):
# union update set
old_value |= value
return
if type(old_value) != type(value):
logger.warning(
"update package '{desc.name}' metadata '{key}' from value "
"'{old_value}' to '{value}'".format_map(locals()))
# overwrite existing value
# copy value to avoid changes to either of them to affect each other
desc.metadata[key] = copy.deepcopy(value) | 5,357,405 |
def creatKdpCols(mcTable, wls):
"""
Create the KDP column
Parameters
----------
mcTable: output from getMcSnowTable()
wls: wavelenght (iterable) [mm]
Returns
-------
mcTable with an empty column 'sKDP_*' for
storing the calculated KDP of a given wavelength.
"""
for wl in wls:
wlStr = '{:.2e}'.format(wl)
mcTable['sKDP_{0}'.format(wlStr)] = np.ones_like(mcTable['time'])*np.nan
return mcTable | 5,357,406 |
def create_db():
"""Creates the db tables."""
db.create_all() | 5,357,407 |
def test_range(init, final):
"""Test a range of numbers, using range with initial and final values."""
init = validate_num(init)
final = validate_num(final)
if init and final and final > init:
final += 1
for i in range(init, final):
steps = 0
new_value = i
while new_value != 1:
if steps > 10000:
print('10000 steps exceeded! Investigate ' + str(i) +
' furthur!')
break
steps += 1
if new_value % 2 == 0:
new_value = new_value / 2
else:
new_value = new_value * 3 + 1
print(str(i) + ' converged to 1 in ' + str(steps) + ' steps')
else:
print('Exiting function.')
print('Example usage: test_range(1,1000)') | 5,357,408 |
def get_products_by_user(user_openid, allowed_keys=None, filters=None):
"""Get all products that user can manage."""
return IMPL.get_products_by_user(user_openid, allowed_keys=allowed_keys,
filters=filters) | 5,357,409 |
def sample_from_ensemble(models, params, weights=None, fallback=False, default=None):
"""Sample models in proportion to weights and execute with
model_params. If fallback is true then call different model from
ensemble if the selected model throws an error. If Default is not
None then return default if all models fail
"""
if len(models) > 1:
model = ergo.random_choice(models, weights)
else:
model = models[0]
try:
result = model(**params)
if np.isnan(result):
raise KeyError
return result
except (KeyError, IndexError):
if fallback and len(models) > 1:
models_copy = models.copy()
weights_copy = weights.copy()
i = models.index(model)
del models_copy[i]
del weights_copy[i]
return sample_from_ensemble(
models_copy, params, weights_copy, fallback, default
)
return default | 5,357,410 |
def Flatten(matrix):
"""Flattens a 2d array 'matrix' to an array."""
array = []
for a in matrix:
array += a
return array | 5,357,411 |
def create_verification_token(
data: dict
) -> VerificationTokenModel:
"""
Save a Verification Token instance to database.
Args:
data (dictionary):
Returns:
VerificationToken:
Verification Token entity of VerificationTokenModel object
Raises:
None
"""
orm_verification_token = VerificationTokenModel(
user_id=data.get('user_id'),
token_type=data.get('token_type', 'SMS'),
token=True
)
orm_verification_token.save()
return orm_verification_token | 5,357,412 |
def upload_file(file_name, bucket, s3_client, object_name=None):
"""Upload a file to an S3 bucket
:param file_name: File to upload
:param bucket: Bucket to upload to
:param s3_client: boto3 s3 client
:param object_name: S3 object name. If not specified then file_name is used
:return: True if file was uploaded, else False
"""
# If S3 object_name was not specified, use file_name
if object_name is None:
object_name = file_name
# Upload the file
try:
print(file_name)
print(bucket)
print(object_name)
s3_client.upload_file(file_name, bucket, object_name)
except ClientError as e:
print(e)
raise e | 5,357,413 |
def _find_highest_cardinality(arrays: Union[int, Sequence, np.ndarray, Tuple]) -> int:
"""Find the highest cardinality of the given array.
Args:
arrays: a list of arrays or a single array
Returns:
The highest cardinality of the given array.
"""
return max([len(array) for array in arrays if hasattr(array, "__len__")] + [1]) | 5,357,414 |
def get_yesterday() -> tuple:
"""Get yesterday`s date and split it to year,month and day strings"""
logging.debug("Starting get_yesterday function.")
today = datetime.now(pytz.timezone("America/New_York"))
yesterday = (today - timedelta(days=1)).strftime("%Y-%m-%d")
yesterday_split = yesterday.split("-")
year = yesterday_split[0]
month = yesterday_split[1]
day = yesterday_split[2]
return year, month, day | 5,357,415 |
def rowmap(table, rowmapper, header, failonerror=False):
"""
Transform rows via an arbitrary function. E.g.::
>>> import petl as etl
>>> table1 = [['id', 'sex', 'age', 'height', 'weight'],
... [1, 'male', 16, 1.45, 62.0],
... [2, 'female', 19, 1.34, 55.4],
... [3, 'female', 17, 1.78, 74.4],
... [4, 'male', 21, 1.33, 45.2],
... [5, '-', 25, 1.65, 51.9]]
>>> def rowmapper(row):
... transmf = {'male': 'M', 'female': 'F'}
... return [row[0],
... transmf[row['sex']] if row['sex'] in transmf else None,
... row.age * 12,
... row.height / row.weight ** 2]
...
>>> table2 = etl.rowmap(table1, rowmapper,
... header=['subject_id', 'gender', 'age_months',
... 'bmi'])
>>> table2
+------------+--------+------------+-----------------------+
| subject_id | gender | age_months | bmi |
+============+========+============+=======================+
| 1 | 'M' | 192 | 0.0003772112382934443 |
+------------+--------+------------+-----------------------+
| 2 | 'F' | 228 | 0.0004366015456998006 |
+------------+--------+------------+-----------------------+
| 3 | 'F' | 204 | 0.0003215689675106949 |
+------------+--------+------------+-----------------------+
| 4 | 'M' | 252 | 0.0006509906805544679 |
+------------+--------+------------+-----------------------+
| 5 | None | 300 | 0.0006125608384287258 |
+------------+--------+------------+-----------------------+
The `rowmapper` function should accept a single row and return a single
row (list or tuple).
"""
return RowMapView(table, rowmapper, header, failonerror=failonerror) | 5,357,416 |
def squeeze(dataset, how: str = 'day'):
"""
Squeezes the data in dataset by close timestamps
Args:
dataset (DataFrame) - the data to squeeze
how (str) - one of 'second', 'minute', 'hour', 'day', 'month' (default day)
Returns:
dataset (DataFrame) - a dataframe where the indexes are squeezed together by closely related timestamps
determined by parameter how
"""
return dataset.groupby(by = lambda ts: timestamp_floor(ts, how = how)) | 5,357,417 |
def callback():
"""
Process response for "Login" try from Dropbox API.
If all OK - redirects to ``DROPBOX_LOGIN_REDIRECT`` url.
Could render template with error message on:
* oAuth token is not provided
* oAuth token is not equal to request token
* Error response from Dropbox API
Default template to render is ``'dropbox/callback.html'``, you could
overwrite it with ``DROPBOX_CALLBACK_TEMPLATE`` config var.
"""
# Initial vars
dropbox = current_app.extensions['dropbox']
template = dropbox.DROPBOX_CALLBACK_TEMPLATE or 'dropbox/callback.html'
# Get oAuth token from Dropbox
oauth_token = request.args.get('oauth_token')
if not oauth_token:
return render_template(template, error_oauth_token=True)
# oAuth token **should** be equal to stored request token
try:
key, secret = session.get(DROPBOX_REQUEST_TOKEN_KEY) or (None, None)
except ValueError:
return render_template(template, error_request_token=True)
if oauth_token != key:
return render_template(template, error_not_equal_tokens=True)
# Do login with current request token
try:
dropbox.login(OAuthToken(key, secret))
except ErrorResponse as e:
return render_template(template, error_response=True, error=e)
# Redirect to resulted page
redirect_to = safe_url_for(dropbox.DROPBOX_LOGIN_REDIRECT or '/')
return redirect(redirect_to) | 5,357,418 |
def get_ws_param(args, attr):
"""get the corresponding warm start parameter, if it is not exists, use the value of the general parameter"""
assert hasattr(args, attr), 'Invalid warm start parameter!'
val = getattr(args, attr)
if hasattr(args, 'ws_' + attr):
ws_val = getattr(args, 'ws_' + attr)
if isinstance(ws_val, str):
ws_val = ws_val.strip()
if ws_val or isinstance(ws_val, list) or isinstance(ws_val, int) or isinstance(ws_val, float):
val = ws_val
return val | 5,357,419 |
def _redundant_relation(lex: lmf.Lexicon, ids: _Ids) -> _Result:
"""redundant relation between source and target"""
redundant = _multiples(chain(
((s['id'], r['relType'], r['target']) for s, r in _sense_relations(lex)),
((ss['id'], r['relType'], r['target']) for ss, r in _synset_relations(lex)),
))
return {src: {'type': typ, 'target': tgt} for src, typ, tgt in redundant} | 5,357,420 |
def get_ph_bs_symm_line(bands_path, has_nac=False, labels_dict=None):
"""
Creates a pymatgen PhononBandStructure from a band.yaml file.
The labels will be extracted from the dictionary, if present.
If the 'eigenvector' key is found the eigendisplacements will be
calculated according to the formula:
\\exp(2*pi*i*(frac_coords \\dot q) / sqrt(mass) * v
and added to the object.
Args:
bands_path: path to the band.yaml file
has_nac: True if the data have been obtained with the option
--nac option. Default False.
labels_dict: dict that links a qpoint in frac coords to a label.
"""
return get_ph_bs_symm_line_from_dict(loadfn(bands_path), has_nac, labels_dict) | 5,357,421 |
def get_peer_ip(result_host_dic: dict):
"""
find peer multi address based on peerID
:param result_host_dic: [provider_peerID : who provides (peerID)]
:return: dic {provider_peerID : Address[]}
"""
provider_ip = {}
for peer in result_host_dic.keys():
process = subprocess.Popen(['/root/ipfs_bin/ipfs', 'dht', 'findpeer', peer], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
try:
r_code = process.wait(timeout=300)
if r_code != 0:
logging.info(f"Error on IPFS findpeer with Peer {peer} and exit code {r_code}")
provider_ip[peer] = []
return provider_ip
# case of no route find
for line in process.stderr.readlines():
if 'Error' in str(line):
logging.info(f"Error on IPFS findpeer with Peer {peer} output {str(line)}")
provider_ip[peer] = []
return provider_ip
provider_ip[peer] = []
with open(f'{peer}_ip.txt', 'w+') as stdout:
for line in process.stdout.readlines():
line = line.decode('utf-8')
# store all peer ip
stdout.write(line)
line = line.replace("\n", "")
line = line.split("/")
ip_type = line[1]
ip_value = line[2]
protocol = line[3]
port = line[4]
if ip_type == 'ip6' and ip_value == '::1':
# local v6 ignore
continue
elif ip_type == 'ip4':
# exclude private ip address
if ipaddress.ip_address(ip_value) in ipaddress.IPv4Network('10.0.0.0/8') or \
ipaddress.ip_address(ip_value) in ipaddress.IPv4Network('172.16.0.0/12') or \
ipaddress.ip_address(ip_value) in ipaddress.IPv4Network('127.0.0.0/8') or \
ipaddress.ip_address(ip_value) in ipaddress.IPv4Network('192.168.0.0/16'):
continue
# add valid ip address info
logging.info(f'Peer {peer} has external IP {ip_value}:{port}, {ip_type}, {protocol}')
if peer not in provider_ip.keys():
provider_ip[peer] = []
address = Address(ip_value, ip_type, port, protocol)
provider_ip[peer].append(address)
except subprocess.TimeoutExpired as e:
logging.info(f"Timeout for {peer}")
return provider_ip | 5,357,422 |
def LinterPath():
"""Ascertain the dxl.exe path from this .py files path because sublime.packages_path is unavailable at startup."""
ThisPath = abspath(dirname(__file__))
if isfile(ThisPath):
# We are in a .sublime-package file in the 'Installed Package' folder
return abspath(join(ThisPath, '..', '..', 'Packages', 'DXL', 'Lint', 'dxl.exe'))
else:
# We are in a subfolder of the 'Packages' folder
return abspath(join(ThisPath, '..', 'DXL', 'Lint', 'dxl.exe')) | 5,357,423 |
def encrypt_and_encode(data, key):
""" Encrypts and encodes `data` using `key' """
return base64.urlsafe_b64encode(aes_encrypt(data, key)) | 5,357,424 |
def yum(packages):
"""Install yum packages.
Args:
packages (list): Yum packages to install.
"""
try:
if not shutil.which("yum"):
return
logging.info('Installing prerequisites using "yum".')
devtools = "sudo -E yum groupinstall -y --skip-broken " '"Development Tools"'
logging.debug("Yum install: %s", devtools)
output = execute.output(devtools)
logging.debug("Yum output: %s", output)
for package in packages:
cmd = "sudo -E yum install -y --skip-broken " + package
logging.debug("Yum install: %s", cmd)
output = execute.output(cmd)
logging.debug("Yum output: %s", output)
except IOError as err:
logging.debug(err)
except ValueError as err:
logging.debug(err)
except TypeError as err:
logging.debug(err) | 5,357,425 |
def _get_undelimited_identifier(identifier):
"""
Removes delimiters from the identifier if it is delimited.
"""
if pd.notna(identifier):
identifier = str(identifier)
if _is_delimited_identifier(identifier):
return identifier[1:-1]
return identifier | 5,357,426 |
def yield_rxn_syst(output_dir, pars, file=None, verbose=False):
"""
Iterate over reaction systems for analysis.
"""
if file is None:
react_syst_files = sorted(glob.glob(join(
output_dir,
'sRS-*.gpkl'
)))
else:
react_syst_files = []
with open(file, 'r') as f:
for line in f.readlines():
if exists(line.strip()):
react_syst_files.append(line.strip())
for rsf in react_syst_files:
rs = get_RS(
filename=rsf,
output_dir=output_dir,
pars=pars,
verbose=verbose
)
yield len(react_syst_files), rs | 5,357,427 |
def build_movie_json(mongodb_result, hug_timer):
"""
For reducing the duplicate lines in the 'get_goat_movies' function.
TODO: Modify nodejs code if integrating this info!
"""
combined_json_list = []
movie_vote_quantities = []
for result in mongodb_result:
#print(result)
total_votes = int(result['goat_upvotes'] + result['goat_downvotes'])
movie_vote_quantities.append(total_votes)
#median_vote_quantity = np.median(movie_vote_quantities)
mean_vote_quantity = np.mean(movie_vote_quantities)
std_deviation = np.std(movie_vote_quantities)
for result in mongodb_result:
total_result_votes = int(result['goat_upvotes'] + result['goat_downvotes'])
goat_score = int((result['goat_upvotes'] / total_result_votes)*100) # % of votes that are upvotes
#absolute_diff = abs(total_result_votes - median_vote_quantity) # Median vs Mean for identifying outliers?
absolute_diff = abs(total_result_votes - mean_vote_quantity) # Median vs Mean for identifying outliers?
if (absolute_diff <= 2*std_deviation):
# If within 2 std deviations, don't punish goat_score!
adjustment = 1
else:
# If they have greater than 2*std_deviation then we punish their score
adjustment = 1 - (((absolute_diff/std_deviation) - 2) * 0.1) # 10% per 1 std dev past 2nd
adjusted_goat_score = int(goat_score * adjustment)
combined_json_list.append({'imdbID': result['imdbID'],
'year': result['year'],
'title': result['title'],
'imdb_rating': result['imdbRating'],
'runtime': result['runtime'],
'upvotes': result['goat_upvotes'],
'downvotes': result['goat_downvotes'],
'adustment': adjustment,
'goat_score': adjusted_goat_score})
return combined_json_list | 5,357,428 |
def _pick_keywords(db):
"""
Go thru downloaded data stored in `db` and filter keywords, which are
parsed and then yielded.
Shows nice progress bar.
Args:
db (obj): Opened database connection.
Yields:
obj: :class:`KeywordInfo` instances for yeach keyword.
"""
for key, val in tqdm(db.iteritems(), total=len(db)):
# skip counter of the last downloaded document
if key == "last_id":
continue
# this is optimization to speed up skipping of the unwanted elements
# by the factor of ~20
piece = val[:500] if len(val) > 500 else val
if '<fixfield id="001">ph' not in piece.lower():
continue
parsed = MARCXMLRecord(val)
code = parsed.get("001")
if not code:
continue
# record was disabled
if parsed["682i"]:
continue
if code.lower().startswith("ph"):
yield KeywordInfo.from_marc(
sysno=int(key.split("_")[-1]), # item_xxx -> int(xxx)
marc=parsed,
) | 5,357,429 |
def has_product_been_used(uuid):
"""Check if this product has been used previously."""
existing = existing_processed_products()
if not isinstance(existing, pd.DataFrame):
return False
has_uuid = not existing.query("uuid == @uuid").empty
return has_uuid | 5,357,430 |
def _FilterMemberData(
mr, owner_ids, committer_ids, contributor_ids, indirect_member_ids,
project):
"""Return a filtered list of members that the user can view.
In most projects, everyone can view the entire member list. But,
some projects are configured to only allow project owners to see
all members. In those projects, committers and contributors do not
see any contributors. Regardless of how the project is configured
or the role that the user plays in the current project, we include
any indirect members through user groups that the user has access
to view.
Args:
mr: Commonly used info parsed from the HTTP request.
owner_views: list of user IDs for project owners.
committer_views: list of user IDs for project committers.
contributor_views: list of user IDs for project contributors.
indirect_member_views: list of user IDs for users who have
an indirect role in the project via a user group, and that the
logged in user is allowed to see.
project: the Project we're interested in.
Returns:
A list of owners, committer and visible indirect members if the user is not
signed in. If the project is set to display contributors to non-owners or
the signed in user has necessary permissions then additionally a list of
contributors.
"""
visible_members_ids = set()
# Everyone can view owners and committers
visible_members_ids.update(owner_ids)
visible_members_ids.update(committer_ids)
# The list of indirect members is already limited to ones that the user
# is allowed to see according to user group settings.
visible_members_ids.update(indirect_member_ids)
# If the user is allowed to view the list of contributors, add those too.
if permissions.CanViewContributorList(mr, project):
visible_members_ids.update(contributor_ids)
return sorted(visible_members_ids) | 5,357,431 |
def escape_name(name):
"""Escape sensor and request names to be valid Python identifiers."""
return name.replace('.', '_').replace('-', '_') | 5,357,432 |
def show_user_following(user_id):
"""Show list of people this user is following."""
user = User.query.get_or_404(user_id)
return render_template('users/following.html', user=user) | 5,357,433 |
def sqlsplit(sql, delimiter=";"):
"""A generator function for splitting out SQL statements according to the
specified delimiter. Ignores delimiter when in strings or comments."""
tokens = re.split("(--|'|\n|" + re.escape(delimiter) + "|\"|/\*|\*/)",
sql if isString(sql) else delimiter.join(sql))
statement = []
inComment = False
inLineComment = False
inString = False
inQuote = False
for t in tokens:
if not t:
continue
if inComment:
if t == "*/":
inComment = False
elif inLineComment:
if t == "\n":
inLineComment = False
elif inString:
if t == '"':
inString = False
elif inQuote:
if t == "'":
inQuote = False
elif t == delimiter:
sql = "".join(statement).strip()
if sql:
yield sql
statement = []
continue
elif t == "'":
inQuote = True
elif t == '"':
inString = True
elif t == "/*":
inComment = True
elif t == "--":
inLineComment = True
statement.append(t)
sql = "".join(statement).strip()
if sql:
yield sql | 5,357,434 |
def logout():
"""
This API revokes all the tokens including access and refresh tokens that belong to the user.
"""
current_user = get_jwt_identity()
logout_user(current_user.get('id'))
return jsonify(message="Token revoked."), 200 | 5,357,435 |
def multiset_counter(mset):
"""
Return the sum of occurences of elements present in a token ids multiset,
aka. the multiset cardinality.
"""
return sum(mset.values()) | 5,357,436 |
def get_v6_subnet(address):
"""derive subnet number for provided ipv6 address
Args:
address (str): ipv6 address in string with mask
Returns:
str: subnet zero == network address
"""
return IPv6(address).subnet_zero() | 5,357,437 |
def get_ros_package_path(env=None):
"""
Get the current ROS_PACKAGE_PATH.
:param env: (optional) environment override, ``dict``
"""
if env is None:
env = os.environ
return env.get(ROS_PACKAGE_PATH, None) | 5,357,438 |
def load_scenario(file_name: str) -> Waypoint:
"""
Create an object Waypoint from a Scenario file
:param file_name:
:return:
"""
# read file
with open(f"{waypoint_directory_path}/{file_name}", "r") as scenario_file:
scenario_data = yaml.load(scenario_file, Loader=yaml.FullLoader)
waypoint = Waypoint()
waypoint.build_from_json(scenario_data)
return waypoint | 5,357,439 |
def parseArguments(argv=None): # pragma: no cover
"""
I parse arguments in sys.argv and return the args object. The parser
itself is available as args.parser.
Adds the following members to args:
parser = the parser object
store_opt = the StoreOpt object
"""
store_opt = StoreOpt()
parser = argparse.ArgumentParser(
prog="green",
usage="%(prog)s [options] [target [target2 ...]]",
add_help=False,
description=dedent(
"""
Green is a clean, colorful, fast test runner for Python unit tests.
""".rstrip()
),
epilog=dedent(
"""
ENABLING SHELL COMPLETION
To enable bash- or zsh-completion, add the line below to the end of your
.bashrc or .zshrc file (or equivalent config file):
which green >& /dev/null && source "$( green --completion-file )"
Warning! Generating a completion list actually discovers and loads tests
-- this can be very slow if you run it in huge directories!
SETUP.PY RUNNER
To run green as a setup.py command, simply add green to the 'setup_requires'
section in the setup.py file, and specify a target as the 'test_suite'
parameter if you do not want green to load all the tests:
setup(
setup_requires = ['green'],
install_requires = 'myproject.tests'
)
Then simply run green as any other setup.py command (it accepts the same
parameters as the 'green' executable):
python setup.py green
python setup.py green -r # to run with coverage, etc.
CONFIG FILES
For documentation on config files, please see
https://github.com/CleanCut/green#config-files
""".rstrip()
),
formatter_class=argparse.RawDescriptionHelpFormatter,
)
target_args = parser.add_argument_group("Target Specification")
target_args.add_argument(
"targets",
action="store",
nargs="*",
metavar="target",
help=(
"""Targets to test. Any number of targets may be specified. If
blank, then discover all testcases in the current directory tree. Can
be a directory (or package), file (or module), or fully-qualified
'dotted name' like proj.tests.test_things.TestStuff. If a directory
(or package) is specified, then we will attempt to discover all tests
under the directory (even if the directory is a package and the tests
would not be accessible through the package's scope). In all other
cases, only tests accessible from introspection of the object will
be loaded."""
),
default=argparse.SUPPRESS,
)
concurrency_args = parser.add_argument_group("Concurrency Options")
store_opt(
concurrency_args.add_argument(
"-s",
"--processes",
action="store",
type=int,
metavar="NUM",
help="Number of processes to use to run tests. Note that your "
"tests need to be written to avoid using the same resources (temp "
"files, sockets, ports, etc.) for the multi-process mode to work "
"well (--initializer and --finalizer can help provision "
"per-process resources). Default is to run the same number of "
"processes as your machine has logical CPUs. Note that for a "
"small number of trivial tests, running everything in a single "
"process may be faster than the overhead of initializing all the "
"processes.",
default=argparse.SUPPRESS,
)
)
store_opt(
concurrency_args.add_argument(
"-i",
"--initializer",
action="store",
metavar="DOTTED_FUNCTION",
help="Python function to run inside of a single worker process "
"before it starts running tests. This is the way to provision "
"external resources that each concurrent worker process needs to "
"have exclusive access to. Specify the function in dotted "
"notation in a way that will be importable from the location you "
"are running green from.",
default=argparse.SUPPRESS,
)
)
store_opt(
concurrency_args.add_argument(
"-z",
"--finalizer",
action="store",
metavar="DOTTED_FUNCTION",
help="Same as --initializer, only run at the end of a worker "
"process's lifetime. Used to unprovision resources provisioned by "
"the initializer.",
default=argparse.SUPPRESS,
)
)
format_args = parser.add_argument_group("Format Options")
store_opt(
format_args.add_argument(
"-t",
"--termcolor",
action="store_true",
help="Force terminal colors on. Default is to autodetect.",
default=argparse.SUPPRESS,
)
)
store_opt(
format_args.add_argument(
"-T",
"--notermcolor",
action="store_true",
help="Force terminal colors off. Default is to autodetect.",
default=argparse.SUPPRESS,
)
)
store_opt(
format_args.add_argument(
"-W",
"--disable-windows",
action="store_true",
help="Disable Windows support by turning off Colorama",
default=argparse.SUPPRESS,
)
)
out_args = parser.add_argument_group("Output Options")
store_opt(
out_args.add_argument(
"-a",
"--allow-stdout",
action="store_true",
help=(
"Instead of capturing the stdout and stderr and presenting it "
"in the summary of results, let it come through. Note that "
"output from sources other than tests (like module/class setup "
"or teardown) is never captured."
),
default=argparse.SUPPRESS,
)
)
store_opt(
out_args.add_argument(
"-q",
"--quiet-stdout",
action="store_true",
help=(
"Instead of capturing the stdout and stderr and presenting it "
"in the summary of results, discard it completly for successful "
"tests. --allow-stdout option overrides it."
),
default=argparse.SUPPRESS,
)
)
store_opt(
out_args.add_argument(
"-k",
"--no-skip-report",
action="store_true",
help=(
"Don't print the report of skipped tests "
"after testing is done. Skips will still show up in the progress "
"report and summary count."
),
default=argparse.SUPPRESS,
)
)
store_opt(
out_args.add_argument(
"-e",
"--no-tracebacks",
action="store_true",
help=("Don't print tracebacks for failures and " "errors."),
default=argparse.SUPPRESS,
)
)
store_opt(
out_args.add_argument(
"-h",
"--help",
action="store_true",
help="Show this help message and exit.",
default=argparse.SUPPRESS,
)
)
store_opt(
out_args.add_argument(
"-V",
"--version",
action="store_true",
help="Print the version of Green and Python and exit.",
default=argparse.SUPPRESS,
)
)
store_opt(
out_args.add_argument(
"-l",
"--logging",
action="store_true",
help="Don't configure the root logger to redirect to /dev/null, "
"enabling internal debugging output, as well as any output test (or "
"tested) code may be sending via the root logger.",
default=argparse.SUPPRESS,
)
)
store_opt(
out_args.add_argument(
"-d",
"--debug",
action="count",
help=(
"Enable internal debugging statements. Implies --logging. Can "
"be specified up to three times for more debug output."
),
default=argparse.SUPPRESS,
)
)
store_opt(
out_args.add_argument(
"-v",
"--verbose",
action="count",
help=(
"Verbose. Can be specified up to three times for more "
"verbosity. Recommended levels are -v and -vv."
),
default=argparse.SUPPRESS,
)
)
store_opt(
out_args.add_argument(
"-U",
"--disable-unidecode",
action="store_true",
help=(
"Disable unidecode which converts test output from unicode to"
"ascii by default on Windows to avoid hard-to-debug crashes."
),
default=argparse.SUPPRESS,
)
)
other_args = parser.add_argument_group("Other Options")
store_opt(
other_args.add_argument(
"-f",
"--failfast",
action="store_true",
help=("Stop execution at the first test that fails or errors."),
default=argparse.SUPPRESS,
)
)
store_opt(
other_args.add_argument(
"-c",
"--config",
action="store",
metavar="FILE",
help="Use this config file to override any values from "
"the config file specified by environment variable GREEN_CONFIG, "
"~/.green, and .green in the current working directory.",
default=argparse.SUPPRESS,
)
)
store_opt(
other_args.add_argument(
"-p",
"--file-pattern",
action="store",
metavar="PATTERN",
help="Pattern to match test files. Default is test*.py",
default=argparse.SUPPRESS,
)
)
store_opt(
other_args.add_argument(
"-n",
"--test-pattern",
action="store",
metavar="PATTERN",
help="Pattern to match test method names after "
"'test'. Default is '*', meaning match methods named 'test*'.",
default=argparse.SUPPRESS,
)
)
store_opt(
other_args.add_argument(
"-j",
"--junit-report",
action="store",
metavar="FILENAME",
help=("Generate a JUnit XML report."),
default=argparse.SUPPRESS,
)
)
cov_args = parser.add_argument_group(
"Coverage Options ({})".format(coverage_version)
)
store_opt(
cov_args.add_argument(
"-r",
"--run-coverage",
action="store_true",
help=("Produce coverage output."),
default=argparse.SUPPRESS,
)
)
store_opt(
cov_args.add_argument(
"-g",
"--cov-config-file",
action="store",
metavar="FILE",
help=(
"Specify a coverage config file. "
"Implies --run-coverage See the coverage documentation "
"at https://coverage.readthedocs.io/en/v4.5.x/config.html "
"for coverage config file syntax. The [run] and [report] sections "
"are most relevant."
),
default=argparse.SUPPRESS,
)
),
store_opt(
cov_args.add_argument(
"-R",
"--quiet-coverage",
action="store_true",
help=(
"Do not print coverage report to stdout (coverage files will "
"still be created). Implies --run-coverage"
),
default=argparse.SUPPRESS,
)
)
store_opt(
cov_args.add_argument(
"-O",
"--clear-omit",
action="store_true",
help=(
"Green tries really hard to set up a good list of patterns of "
"files to omit from coverage reports. If the default list "
"catches files that you DO want to cover you can specify this "
"flag to leave the default list empty to start with. You can "
"then add patterns back in with --omit-patterns. The default "
"list is something like '*/test*,*/mock*,*(temp dir)*,*(python "
"system packages)*' -- only longer."
),
default=argparse.SUPPRESS,
)
)
store_opt(
cov_args.add_argument(
"-u",
"--include-patterns",
action="store",
metavar="PATTERN",
help=(
"Comma-separated file-patterns to include in coverage. This "
"implies that anything that does not match the include pattern is "
"omitted from coverage reporting."
),
default=argparse.SUPPRESS,
)
)
store_opt(
cov_args.add_argument(
"-o",
"--omit-patterns",
action="store",
metavar="PATTERN",
help=(
"Comma-separated file-patterns to omit from coverage. For "
"example, if coverage reported a file mypackage/foo/bar you could "
"omit it from coverage with 'mypackage*', '*/foo/*', or '*bar'"
),
default=argparse.SUPPRESS,
)
)
store_opt(
cov_args.add_argument(
"-m",
"--minimum-coverage",
action="store",
metavar="X",
type=int,
help=(
"Integer. A minimum coverage value. If "
"not met, then we will print a message and exit with a nonzero "
"status. Implies --run-coverage"
),
default=argparse.SUPPRESS,
)
)
integration_args = parser.add_argument_group("Integration Options")
store_opt(
integration_args.add_argument(
"--completion-file",
action="store_true",
help=(
"Location of the bash- and zsh-completion "
"file. To enable bash- or zsh-completion, see ENABLING SHELL "
"COMPLETION below."
),
default=argparse.SUPPRESS,
)
)
store_opt(
integration_args.add_argument(
"--completions",
action="store_true",
help=(
"Output possible completions of the given target. Used by "
"bash- and zsh-completion."
),
default=argparse.SUPPRESS,
)
)
store_opt(
integration_args.add_argument(
"--options",
action="store_true",
help="Output all options. Used by bash- and zsh-completion.",
default=argparse.SUPPRESS,
)
)
args = parser.parse_args(argv)
# Add additional members
args.parser = parser
args.store_opt = store_opt
return args | 5,357,440 |
def parse_example(serialized_example):
"""Parse a serialized example proto."""
features = tf.io.parse_single_example(
serialized_example,
dict(
beam_id=tf.io.FixedLenFeature(shape=[], dtype=tf.int64),
image_id=tf.io.FixedLenFeature(shape=[], dtype=tf.int64),
question_id=tf.io.FixedLenFeature(shape=[], dtype=tf.int64),
context=tf.io.FixedLenFeature(shape=[], dtype=tf.string),
question=tf.io.FixedLenFeature(shape=[], dtype=tf.string)))
return features | 5,357,441 |
def save_data(df):
"""
Save Data to SQLite Database
"""
#engine = create_engine('sqlite:///' + database_filepath)
df.to_csv('data/messages_response.csv', index=False) | 5,357,442 |
def _compute_bootstrap_quantiles_point_estimate_custom_bias_corrected_method(
metric_values: np.ndarray,
false_positive_rate: np.float64,
n_resamples: int,
random_seed: Optional[int] = None,
) -> Tuple[Number, Number]:
"""
An internal implementation of the "bootstrap" estimator method, returning a point estimate for a population
parameter of interest (lower and upper quantiles in this case). See
https://en.wikipedia.org/wiki/Bootstrapping_(statistics) for an introduction to "bootstrapping" in statistics.
The methods implemented here can be found in:
Efron, B., & Tibshirani, R. J. (1993). Estimates of bias. An Introduction to the Bootstrap (pp. 124-130).
Springer Science and Business Media Dordrecht. DOI 10.1007/978-1-4899-4541-9
This implementation is sub-par compared to the one available from the "SciPy" standard library
("https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.bootstrap.html"), in that it does not handle
multi-dimensional statistics. "scipy.stats.bootstrap" is vectorized, thus having the ability to accept a
multi-dimensional statistic function and process all dimensions.
Unfortunately, as of March 4th, 2022, the SciPy implementation has two issues: 1) it only returns a confidence
interval and not a point estimate for the population parameter of interest, which is what we require for our use
cases. 2) It can not handle multi-dimensional statistics and correct for bias simultaneously. You must either use
one feature or the other.
This implementation could only be replaced by "scipy.stats.bootstrap" if Great Expectations drops support for
Python 3.6, thereby enabling us to use a more up-to-date version of the "scipy" Python package (the currently used
version does not have "bootstrap"). Also, as discussed above, two contributions would need to be made to the SciPy
package to enable 1) bias correction for multi-dimensional statistics and 2) a return value of a point estimate for
the population parameter of interest (lower and upper quantiles in this case).
Additional future direction could include developing enhancements to bootstrapped estimator based on theory
presented in "http://dido.econ.yale.edu/~dwka/pub/p1001.pdf":
@article{Andrews2000a,
added-at = {2008-04-25T10:38:44.000+0200},
author = {Andrews, Donald W. K. and Buchinsky, Moshe},
biburl = {https://www.bibsonomy.org/bibtex/28e2f0a58cdb95e39659921f989a17bdd/smicha},
day = 01,
interhash = {778746398daa9ba63bdd95391f1efd37},
intrahash = {8e2f0a58cdb95e39659921f989a17bdd},
journal = {Econometrica},
keywords = {imported},
month = Jan,
note = {doi: 10.1111/1468-0262.00092},
number = 1,
pages = {23--51},
timestamp = {2008-04-25T10:38:52.000+0200},
title = {A Three-step Method for Choosing the Number of Bootstrap Repetitions},
url = {http://www.blackwell-synergy.com/doi/abs/10.1111/1468-0262.00092},
volume = 68,
year = 2000
}
The article outlines a three-step minimax procedure that relies on the Central Limit Theorem (C.L.T.) along with the
bootstrap sampling technique (see https://en.wikipedia.org/wiki/Bootstrapping_(statistics) for background) for
computing the stopping criterion, expressed as the optimal number of bootstrap samples, needed to achieve a maximum
probability that the value of the statistic of interest will be minimally deviating from its actual (ideal) value.
"""
lower_quantile_pct: float = false_positive_rate / 2
upper_quantile_pct: float = 1.0 - false_positive_rate / 2
sample_lower_quantile: np.ndarray = np.quantile(metric_values, q=lower_quantile_pct)
sample_upper_quantile: np.ndarray = np.quantile(metric_values, q=upper_quantile_pct)
if random_seed:
random_state: np.random.Generator = np.random.Generator(
np.random.PCG64(random_seed)
)
bootstraps: np.ndarray = random_state.choice(
metric_values, size=(n_resamples, metric_values.size)
)
else:
bootstraps: np.ndarray = np.random.choice(
metric_values, size=(n_resamples, metric_values.size)
)
bootstrap_lower_quantiles: Union[np.ndarray, Number] = np.quantile(
bootstraps,
q=lower_quantile_pct,
axis=1,
)
bootstrap_lower_quantile_point_estimate: float = np.mean(bootstrap_lower_quantiles)
bootstrap_lower_quantile_standard_error: float = np.std(bootstrap_lower_quantiles)
bootstrap_lower_quantile_bias: float = (
bootstrap_lower_quantile_point_estimate - sample_lower_quantile
)
# Bias / Standard Error > 0.25 is a rule of thumb for when to apply bias correction.
# See:
# Efron, B., & Tibshirani, R. J. (1993). Estimates of bias. An Introduction to the Bootstrap (pp. 128).
# Springer Science and Business Media Dordrecht. DOI 10.1007/978-1-4899-4541-9
lower_quantile_bias_corrected_point_estimate: Number
if bootstrap_lower_quantile_bias / bootstrap_lower_quantile_standard_error <= 0.25:
lower_quantile_bias_corrected_point_estimate = (
bootstrap_lower_quantile_point_estimate
)
else:
lower_quantile_bias_corrected_point_estimate = (
bootstrap_lower_quantile_point_estimate - bootstrap_lower_quantile_bias
)
bootstrap_upper_quantiles: Union[np.ndarray, Number] = np.quantile(
bootstraps,
q=upper_quantile_pct,
axis=1,
)
bootstrap_upper_quantile_point_estimate: np.ndarray = np.mean(
bootstrap_upper_quantiles
)
bootstrap_upper_quantile_standard_error: np.ndarray = np.std(
bootstrap_upper_quantiles
)
bootstrap_upper_quantile_bias: float = (
bootstrap_upper_quantile_point_estimate - sample_upper_quantile
)
# Bias / Standard Error > 0.25 is a rule of thumb for when to apply bias correction.
# See:
# Efron, B., & Tibshirani, R. J. (1993). Estimates of bias. An Introduction to the Bootstrap (pp. 128).
# Springer Science and Business Media Dordrecht. DOI 10.1007/978-1-4899-4541-9
upper_quantile_bias_corrected_point_estimate: Number
if bootstrap_upper_quantile_bias / bootstrap_upper_quantile_standard_error <= 0.25:
upper_quantile_bias_corrected_point_estimate = (
bootstrap_upper_quantile_point_estimate
)
else:
upper_quantile_bias_corrected_point_estimate = (
bootstrap_upper_quantile_point_estimate - bootstrap_upper_quantile_bias
)
return (
lower_quantile_bias_corrected_point_estimate,
upper_quantile_bias_corrected_point_estimate,
) | 5,357,443 |
def dem_plot(workdir, project_name, **kwargs):
"""
DEM3D creates a 3D representation from a Grass DEM file
"""
length = 1
width = 1
# Read the Header
# str_hd_dem = {'north':0,'south':0,'east':0,'west':0,'rows':0,'cols':0}
str_hd_dem = {}
with open(
os.path.join(workdir, project_name, "prepro/dem"), "r"
) as f: # open the file for reading
count = 0
for line in f: # iterate over each line
if count < 6:
str_hd, value_hd = line.split() # split it by whitespace
str_hd_dem[str_hd.replace(":", "")] = value_hd
count += 1
dem_file = open(os.path.join(workdir, project_name, "prepro/dem"), "r")
dem_mat = np.loadtxt(dem_file, skiprows=6)
dem_file.close()
x = np.zeros(int(str_hd_dem["rows"]))
y = np.zeros(int(str_hd_dem["cols"]))
for a in range(int(str_hd_dem["rows"])):
x[a] = float(str_hd_dem["west"]) + length * a
for a in range(int(str_hd_dem["cols"])):
y[a] = float(str_hd_dem["south"]) + width * a
# x=x-width/2
# y=y-length/2
fig = plt.figure()
ax = plt.axes(projection="3d")
# Make data.
X, Y = np.meshgrid(x, y)
# Plot the surface.
surf = ax.plot_surface(X, Y, dem_mat.T, cmap="viridis")
# Add a color bar which maps values to colors.
cbar = fig.colorbar(surf, shrink=0.25, orientation='horizontal',
label='Elevation (m)')
ax.set(xlabel="Easting (m)", ylabel="Northing (m)", zlabel="Elevation (m)")
plt.show() | 5,357,444 |
def test_snail_attributes():
"""Test snail attributes."""
s = Snail()
assert s.nlate == 0
assert s.deaths == 0
assert s.nlate_max == -1
assert not s.active
assert s.delay_max == 0.0
assert s.delay == 0.0 | 5,357,445 |
def bytes_to_b64(data: bytes, remove_padding=True) -> str:
"""
byte string to URL safe Base64 string, with option to remove B64 LSB padding
:param data: byte string
:param remove_padding: remove b64 padding (``=`` char). True by default
:return: base64 unicode string
"""
text = urlsafe_b64encode(data).decode()
if remove_padding:
return text.replace('=', '')
else:
return text | 5,357,446 |
def _case_sensitive_replace(string, old, new):
"""
Replace text, retaining exact case.
Args:
string (str): String in which to perform replacement.
old (str): Word or substring to replace.
new (str): What to replace `old` with.
Returns:
repl_string (str): Version of string where instances of
`old` has been replaced with `new`, retaining case.
"""
def repl(match):
current = match.group()
# treat multi-word sentences word-by-word
old_words = current.split(" ")
new_words = new.split(" ")
out = []
for old_word, new_word in zip(old_words, new_words):
result = []
all_upper = True
for ind, chr in enumerate(old_word):
if ind >= len(new):
break
if chr.isupper():
result.append(new_word[ind].upper())
else:
result.append(new_word[ind].lower())
all_upper = False
# special cases - keep remaing case)
if new_word.lower() in CASE_WORD_EXCEPTIONS:
result.append(new_word[ind + 1 :])
# append any remaining characters from new
elif all_upper:
result.append(new_word[ind + 1 :].upper())
else:
result.append(new_word[ind + 1 :].lower())
out.append("".join(result))
# if we have more new words than old ones, just add them verbatim
out.extend([new_word for ind, new_word in enumerate(new_words) if ind >= len(old_words)])
return " ".join(out)
if string is None:
return None
regex = re.compile(re.escape(old), re.I)
return regex.sub(repl, string) | 5,357,447 |
def test_read_message_with_parent_process_dead_and_should_not_exit(os):
"""
Test simple worker processes exit when parent is dead and shutdown is not
set when reading messages
"""
# Setup SQS Queue
conn = boto3.client('sqs', region_name='us-east-1')
queue_url = conn.create_queue(QueueName="tester")['QueueUrl']
# Setup PPID
os.getppid.return_value = 123
# When I have no parent process, and shutdown is not set
worker = SimpleProcessWorker(queue_url, INTERVAL, BATCHSIZE, parent_id=1)
worker.read_message = Mock()
# Then I return from run()
worker.run().should.be.none | 5,357,448 |
def testauth():
"""Tests auth-related functions"""
# check the hashpass() base function
p = 'dofij'
pw = hashpass(p)
print pw
x = hashpass(p, pw)
print x
assert x == pw, 'The two passes should be identical'
# check the auth() wrapper
u = 'user 1'
p = 'user password'
hashpw = hashpass(p)
userfunc = lambda uname: dict(username=uname, passwd=hashpw, id=1)
x = auth(u, p, userfunc)
print 'Should be Valid: ', x
try:
x = auth(u, 'wrong password', userfunc)
print 'Should never get here: ', x
except Exception, e:
print 'Should get InvalidPasswordException: got %s: %s' % (type(e), e)
try:
x = auth(u, 'user password', lambda u: None)
print 'Should never get here: ', x
except Exception, e:
print 'Should get InvalidUserException: got %s: %s' % (type(e), e) | 5,357,449 |
def deserialize_transaction_from_etherscan(
data: Dict[str, Any],
internal: bool,
) -> EthereumTransaction:
"""Reads dict data of a transaction from etherscan and deserializes it
Can throw DeserializationError if something is wrong
"""
try:
# internal tx list contains no gasprice
gas_price = FVal(-1) if internal else FVal(data['gasPrice'])
tx_hash = read_hash(data, 'hash')
input_data = read_hash(data, 'input')
timestamp = deserialize_timestamp(data['timeStamp'])
block_number = read_integer(data, 'blockNumber')
nonce = -1 if internal else read_integer(data, 'nonce')
return EthereumTransaction(
timestamp=timestamp,
block_number=block_number,
tx_hash=tx_hash,
from_address=data['from'],
to_address=data['to'],
value=deserialize_fval(data['value']),
gas=deserialize_fval(data['gas']),
gas_price=gas_price,
gas_used=deserialize_fval(data['gasUsed']),
input_data=input_data,
nonce=nonce,
)
except KeyError as e:
raise DeserializationError(f'Etherscan ethereum transaction missing expected key {str(e)}') | 5,357,450 |
def ar(p):
"""
Given a quaternion p, return the 4x4 matrix A_R(p)
which when multiplied with a column vector q gives
the quaternion product qp.
Parameters
----------
p : numpy.ndarray
4 elements, represents quaternion
Returns
-------
numpy.ndarray
4x4 matrix describing action of quaternion multiplication
"""
return np.array([[p[0], -p[1], -p[2], -p[3]],
[p[1], p[0], p[3], -p[2]],
[p[2], -p[3], p[0], p[1]],
[p[3], p[2], -p[1], p[0]]]) | 5,357,451 |
def preprocess_list(lst,tokenizer,max_len=None):
"""
function preprocesses a list of values returning tokenized sequences
Args:
lst: list of strings to be processed
tokenizer: a tokenizer object
max_len: if we need to ensure the same length of strings, we can provide an integer here
Returns:
a numpy array with tokenized sequences. Each sequence in a separate row
"""
return_seq = tokenizer.texts_to_sequences(lst)
seq = np.array(
pad_sequences(return_seq, maxlen=max_len,padding="post"),
dtype="float32"
)
return seq | 5,357,452 |
def get_GEOS5_as_ds_via_OPeNDAP(collection='inst3_3d_aer_Nv',
fcast_start_hour=12,
mode='seamless', dt=None):
"""
Get the GEOS-5 model product (GEOS-5) as a xr.Dataset (using OPeNDAP)
Parameters
----------
mode (str): retrieve the forecast (fcast) or assimilated fields (assim) or both
(seemless)
dt (datetime.datetime): date to retrieve forecast from or assimilation for
collection (str): data collection to access (e.g. chm_inst_1hr_g1440x721_p23)
fcast_start_hour (int): hour the forcast started on a given day
Returns
-------
(xr.dataset)
NOTES
---
- default is to get the latest forecast for chemistry (via seamless route)
- See documentation for details: https://geos5.org/wiki/index.php?title=GEOS-5_Earth_System_Modeling_and_Data_Assimilation
- Collections include:
- The forecast set going at different times are for different length.
00 - ~10 days
06 - ~1.5 days
12 - ~5 days
18 - ~1.5 days
- the 5 day forecast for a given day is selected as default (fcast_start_hour)
"""
# Root OPeNDAP directory
root_url = 'https://opendap.nccs.nasa.gov/dods/GEOS-5/fp/0.25_deg/{}/'
root_url = root_url.format(mode)
# Make up the complete URL for a forecast or assimilation field
if (mode == 'fcast') or (mode == 'seamless'):
# Which date to use?
if isinstance(dt, type(None)):
# Use the lastest file (default)
URL = '{}/{}.latest'.format(root_url, collection)
else:
# Use a file specified in arguments
correct_type = type(dt) == datetime.datetime
assert correct_type, "'date' variable must be a datetime.datetime object"
# Use the 'lastest' file (default)
# NOTE: lastest 12 file is used to match up with GEOS-CF
# TODO: update this. this will not give enough data
dstr = dt.strftime(format='%Y%m%d')
URL = '{}/{}/{}.{}_{:0>2}'
URL = URL.format(root_url, collection, collection, dstr, fcast_start_hour)
elif mode == 'assim':
# Just retrieve an OPeNDAP pointer to the entire dataset for now
URL = '{}/{}'.format(root_url, collection)
else:
print("WARNING: GEOS-5 mode provided ('{}') not known".format(mode))
sys.exit()
# Open the dataset via OPeNDAP and return
ds = xr.open_dataset(URL)
return ds | 5,357,453 |
def to_dataframe(ticks: list) -> pd.DataFrame:
"""Convert list to Series compatible with the library."""
df = pd.DataFrame(ticks)
df['time'] = pd.to_datetime(df['time'], unit='s')
df.set_index("time", inplace=True)
return df | 5,357,454 |
def initialize_conditions(segment):
"""Sets the specified conditions which are given for the segment type.
Assumptions:
Descent segment with a constant rate.
Source:
N/A
Inputs:
segment.altitude [meters]
segment.tim [second]
state.numerics.dimensionless.control_points [Unitless]
state.conditions.frames.inertial.time [seconds]
Outputs:
conditions.frames.inertial.velocity_vector [meters/second]
conditions.frames.inertial.position_vector [meters]
conditions.freestream.altitude [meters]
conditions.frames.inertial.time [seconds]
Properties Used:
N/A
"""
# unpack
alt = segment.altitude
duration = segment.time
conditions = segment.state.conditions
# check for initial altitude
if alt is None:
if not segment.state.initials: raise AttributeError('altitude not set')
alt = -1.0 *segment.state.initials.conditions.frames.inertial.position_vector[-1,2]
# dimensionalize time
t_initial = conditions.frames.inertial.time[0,0]
t_nondim = segment.state.numerics.dimensionless.control_points
time = t_nondim * (duration) + t_initial
# pack
segment.state.conditions.freestream.altitude[:,0] = alt
segment.state.conditions.frames.inertial.position_vector[:,2] = -alt # z points down
segment.state.conditions.frames.inertial.velocity_vector[:,0] = 0.
segment.state.conditions.frames.inertial.time[:,0] = time[:,0] | 5,357,455 |
def keysCode(code):
"""
Download user's keys from an email link
GET: If the code is valid, download user keys
Else abort with a 404
"""
#Check if code exists and for the correct purpose. Else abort
if (hl.checkCode(code,"Keys")):
user = hl.getUserFromCode(code)
else:
abort(404)
#Mark code as used
hl.flagCode(code)
#return
return getKeys(user["Name"]) | 5,357,456 |
def lengthenFEN(fen):
"""Lengthen FEN to 71-character form (ex. '3p2Q' becomes '111p11Q')"""
return fen.replace('8','11111111').replace('7','1111111') \
.replace('6','111111').replace('5','11111') \
.replace('4','1111').replace('3','111').replace('2','11') | 5,357,457 |
def keyboard_mapping(display):
"""Generates a mapping from *keysyms* to *key codes* and required
modifier shift states.
:param Xlib.display.Display display: The display for which to retrieve the
keyboard mapping.
:return: the keyboard mapping
"""
mapping = {}
shift_mask = 1 << 0
group_mask = alt_gr_mask(display)
# Iterate over all keysym lists in the keyboard mapping
min_keycode = display.display.info.min_keycode
keycode_count = display.display.info.max_keycode - min_keycode + 1
for index, keysyms in enumerate(display.get_keyboard_mapping(
min_keycode, keycode_count)):
key_code = index + min_keycode
# Normalise the keysym list to yield a tuple containing the two groups
normalized = keysym_normalize(keysyms)
if not normalized:
continue
# Iterate over the groups to extract the shift and modifier state
for groups, group in zip(normalized, (False, True)):
for keysym, shift in zip(groups, (False, True)):
if not keysym:
continue
shift_state = 0 \
| (shift_mask if shift else 0) \
| (group_mask if group else 0)
# Prefer already known lesser shift states
if keysym in mapping and mapping[keysym][1] < shift_state:
continue
mapping[keysym] = (key_code, shift_state)
return mapping | 5,357,458 |
def convertPeaks(peaksfile, bedfile):
"""Convert a MACS output file `peaksfile' to a BED file. Also works if the input is already in BED format."""
regnum = 1
with open(bedfile, "w") as out:
with open(peaksfile, "r") as f:
tot = 0
chrom = ""
start = 0
end = 0
c = CSVreader(f)
for line in c:
if len(line) == 0 or line[0][0] == '#' or line[0] == 'chr':
continue
bchrom = line[0]
if "_" in bchrom: # get rid of weird chromosomes
continue
# New chromosome?
if bchrom != chrom:
if end > 0:
out.write("{}\t{}\t{}\treg{}\t{}\t+\n".format(chrom, start, end, regnum, regnum))
regnum += 1
chrom = bchrom
start = 0
end = 0
# Unwanted chromosome?
if bchrom == 'chrM' or "random" in bchrom:
start = 0
end = 0
continue
# Good line
bstart = int(line[1])
bend = int(line[2])
if start <= bstart <= end:
# Extend current region
end = bend
else:
# Start new region
tot += (end - start)
if end > 0:
out.write("{}\t{}\t{}\treg{}\t{}\t+\n".format(chrom, start, end, regnum, regnum))
regnum += 1
start = bstart
end = bend
out.write("{}\t{}\t{}\treg{}\t{}\t+\n".format(chrom, start, end, regnum, regnum))
tot += (end - start)
return (tot, regnum) | 5,357,459 |
def run():
"""Starts the node
Runs to start the node and initialize everthing. Runs forever via Spin()
:returns: Nothing
:rtype: None
"""
rospy.init_node('yumi_trials', anonymous=True)
# Positions measured
# x positions
x_inizio = 0.150 # [m]
# Length of desk in x direction
x_desk = 0.360 # [m]
x_pos = 0.010 # [m]
x = x_inizio + x_desk + x_pos # [m]
# y positions
y = -0.200
# z positions
# Length of the Gripper from datasheet
z_gripper = 0.136 # [m]
# Height of the desk from JOGGING
z_desk = 0.041 # [m]
z_pos = 0.100 # [m]
# Contact position measured from JOGGING
z_contact_desk = 0.171 # [m]
z = z_desk + z_gripper + z_pos # [m]
p1_L = [0.30000, 0.30000, 0.38000, 0, pi, 0]
# Target of the rubber
p_target = [0.31500, -0.20200, 0.172, 0, pi, pi]
# Start by connecting to ROS and MoveIt!
yumi.init_Moveit()
# Show the current pose
# Print current joint angles
yumi.print_current_joint_states(yumi.RIGHT)
yumi.print_current_joint_states(yumi.LEFT)
# Opening the grippers
yumi.open_grippers(yumi.LEFT)
yumi.open_grippers(yumi.RIGHT)
# Reset pose
yumi.Start_State_To_Current_State(yumi.BOTH)
rospy.loginfo('Reset pose')
yumi.reset_pose()
# rospy.sleep(2.0)
rospy.loginfo('La posizione e\':')
rospy.loginfo(yumi.get_current_pose(yumi.LEFT))
# Move the left in order to don't busy the space
rospy.loginfo('Move the left in order to don\'t busy the space')
yumi.go_to_simple(p1_L[0], p1_L[1], p1_L[2], p1_L[3], p1_L[4], p1_L[5], yumi.LEFT)
# Picking task
rospy.loginfo('Picking task')
# Going to a "close position"
rospy.loginfo('Going to a close position')
p_target[2] += 0.300
print(p_target[2])
rospy.sleep(0.1)
# Go to the position
yumi.move_and_grasp(yumi.RIGHT, p_target)
rospy.sleep(1.0)
# Opening the gripper
rospy.loginfo('Opening the gripper')
yumi.open_grippers(yumi.RIGHT)
rospy.sleep(0.5)
# Printing the pose of the right arm
rospy.loginfo(yumi.get_current_pose(yumi.RIGHT))
# Randezvous
# Changing the speed
fraction = 0.25
rospy.loginfo('Changing the speed to {}'.format(fraction))
yumi.change_speed(yumi.RIGHT, fraction)
# Going to the Target position with z height of 10 cm
rospy.loginfo('Going to the target position with z at the height of 10cm')
p_target[1] -= 0.01000
p_target[2] -= 0.20000
rospy.sleep(0.1)
# Go to the position
yumi.move_and_grasp(yumi.RIGHT, p_target)
# rospy.sleep(0.5)
# Going closer
p_target[2] = z_contact_desk + 0.005
fraction = 0.50
rospy.loginfo('Changing the speed to {}'.format(fraction))
yumi.change_speed(yumi.RIGHT, fraction)
# Go to the position and close the gripper
yumi.move_and_grasp(yumi.RIGHT, p_target, 15.0)
rospy.sleep(0.5)
rospy.loginfo('La posa nel gripping e\'')
actual_pose = yumi.get_current_pose(yumi.RIGHT)
rospy.loginfo(actual_pose)
rospy.loginfo('Posizione desiderata: {}'.format(p_target))
errore = [actual_pose.pose.position.x - p_target[0], actual_pose.pose.position.y - p_target[1],
actual_pose.pose.position.z - p_target[2]]
rospy.loginfo('L\' errore: {}'.format(errore))
rospy.sleep(0.1)
# Raising in z
rospy.loginfo('Raising in z with gripper closed')
p_target[2] += 0.20000
# Increasing the speed from 10% to 25%
fraction = 1.0
rospy.loginfo('Changing the speed to {}'.format(fraction))
yumi.change_speed(yumi.RIGHT, fraction)
# Go to the position
yumi.move_and_grasp(yumi.RIGHT, p_target)
rospy.sleep(1.0)
# increasing the height and orientate the right arm in horizontal
p1_R = p_target
p1_R[0] += 0.150
p1_R[1] = -z_gripper
p1_R[2] = 0.300
p1_R[3] = - pi/2
p1_R[4] = 0.000
p1_R[5] = 0.000
yumi.change_speed(yumi.RIGHT, 0.25)
yumi.move_and_grasp(yumi.RIGHT, p_target)
# rospy.sleep(1.0)
# Printing of the pose
rospy.loginfo('print the current pose')
rospy.loginfo(yumi.get_current_pose(yumi.RIGHT))
rospy.loginfo('print the RIGHT joints values:')
rospy.loginfo(yumi.get_current_joint_values(yumi.RIGHT))
rospy.loginfo('print the current pose')
rospy.loginfo(yumi.get_current_pose(yumi.RIGHT))
rospy.loginfo('print the LEFT joints values:')
rospy.loginfo(yumi.get_current_joint_values(yumi.LEFT))
# Rotazione del braccio sinistro in orizzontale
rospy.loginfo('Rotazione del braccio sinistro in orizzontale')
p1_L[3] = - pi/2
p1_L[4] = pi
p1_L[5] = pi
yumi.go_to_simple(p1_L[0], p1_L[1], p1_L[2], p1_L[3], p1_L[4], p1_L[5], yumi.LEFT)
rospy.sleep(0.5)
# Open the gripper of the Left Arm
yumi.open_grippers(yumi.LEFT)
# Allineamento del braccio sinistro rispetto al destro
rospy.loginfo('Allineamento del braccio sinistro su quello destro')
p1_L[:3] = p1_R[:3]
p1_L[1] += 0.200 + 2*z_gripper
p1_L[2] += 0.020
yumi.change_speed(yumi.LEFT, 0.75)
yumi.go_to_simple(p1_L[0], p1_L[1], p1_L[2], p1_L[3], p1_L[4], p1_L[5], yumi.LEFT)
# Avvicinamento del braccio sinistro su quello destro
rospy.loginfo('Avvicinamento del braccio sinistro su quello destro')
p1_L[:3] = p1_R[:3]
p1_L[1] += 2*z_gripper
p1_L[2] += 0.025000
yumi.change_speed(yumi.LEFT, 0.10)
yumi.go_to_simple(p1_L[0], p1_L[1], p1_L[2], p1_L[3], p1_L[4], p1_L[5], yumi.LEFT)
# Printing of the pose
rospy.loginfo('print the current pose: RIGHT')
rospy.loginfo(yumi.get_current_pose(yumi.RIGHT))
rospy.loginfo('print the RIGHT joints values:')
rospy.loginfo(yumi.get_current_joint_values(yumi.RIGHT))
rospy.loginfo('print the current pose: LEFT')
rospy.loginfo(yumi.get_current_pose(yumi.LEFT))
rospy.loginfo('print the LEFT joints values:')
rospy.loginfo(yumi.get_current_joint_values(yumi.LEFT))
# Exchange of the rubber
rospy.sleep(0.2)
yumi.gripper_effort(yumi.LEFT, 15.0)
rospy.sleep(0.5)
yumi.open_grippers(yumi.RIGHT)
# rospy.sleep(1.0)
p1_L[1] += 0.10000
yumi.change_speed(yumi.LEFT, 0.10)
yumi.go_to_simple(p1_L[0], p1_L[1], p1_L[2], p1_L[3], p1_L[4], p1_L[5], yumi.LEFT)
# Readings
rospy.loginfo('reading of the left arm:')
rospy.loginfo(yumi.get_current_pose(yumi.LEFT))
rospy.loginfo('reading of the right arm:')
rospy.loginfo(yumi.get_current_pose(yumi.RIGHT))
# Prepare a motion with 2 arms together
rospy.loginfo('Prepare a motion with 2 arms together')
p2_L = [0.30000, 0.30000, 0.30000, 0, pi, 0]
p2_R = [0.31500, -0.20200, 0.30000, 0, pi, pi]
yumi.move_both(p2_L, p2_R)
yumi.open_grippers(yumi.LEFT) | 5,357,460 |
def set_workspace(path=None):
""" Set a custom workspace for use """
if not Settings.InstaPy_is_running:
if path:
path = verify_workspace_name(path)
workspace_is_new = differ_paths(WORKSPACE["path"], path)
if workspace_is_new:
update_workspace(path)
update_locations()
message = "Custom workspace set: \"{}\" :]".format(path)
highlight_print(Settings.profile["name"],
message,
"workspace",
"info",
Settings.logger)
else:
message = "Given workspace path is identical as current :/"
highlight_print(Settings.profile["name"],
message,
"workspace",
"info",
Settings.logger)
else:
message = "No any custom workspace provided.\t~using existing.."
highlight_print(Settings.profile["name"],
message,
"workspace",
"info",
Settings.logger)
else:
message = ("Sorry! You can't change workspace after"
" InstaPy has started :>\t~using existing..")
highlight_print(Settings.profile["name"],
message,
"workspace",
"info",
Settings.logger) | 5,357,461 |
def create_freud_box(box: np.ndarray, is_2D=True) -> Box:
"""Convert an array of box values to a box for use with freud functions
The freud package has a special type for the description of the simulation cell, the
Box class. This is a function to take an array of lengths and tilts to simplify the
creation of the Box class for use with freud.
"""
# pylint: disable=invalid-name
Lx, Ly, Lz = box[:3]
xy = xz = yz = 0
if len(box) == 6:
xy, xz, yz = box[3:6]
if is_2D:
return Box(Lx=Lx, Ly=Ly, xy=xy, is2D=is_2D)
return Box(Lx=Lx, Ly=Ly, Lz=Lz, xy=xy, xz=xz, yz=yz)
# pylint: enable=invalid-name | 5,357,462 |
def hrrr_snotel_pixel(file, x_pixel_index, y_pixel_index):
"""
Read GRIB file surface values, remove unsed dimensions, and
set the time dimension.
Required to be able to concatenate all GRIB file to a time series
"""
hrrr_file = xr.open_dataset(
file.as_posix(),
engine='cfgrib',
backend_kwargs={
'errors': 'ignore',
'indexpath': '',
'filter_by_keys': {
'level': 0,
'typeOfLevel': 'surface',
}
},
).isel(x=[x_pixel_index], y=[y_pixel_index])
del hrrr_file.coords['valid_time']
del hrrr_file.coords['surface']
del hrrr_file.coords['step']
return hrrr_file.expand_dims(time=[hrrr_file.time.values]) | 5,357,463 |
def write_model_inputs(
scenario_directory, scenario_id, subscenarios, subproblem, stage, conn
):
"""
Get inputs from database and write out the model input
transmission_lines.tab file.
:param scenario_directory: string, the scenario directory
:param subscenarios: SubScenarios object with all subscenario info
:param subproblem:
:param stage:
:param conn: database connection
:return:
"""
transmission_lines = get_inputs_from_database(
scenario_id, subscenarios, subproblem, stage, conn
)
with open(
os.path.join(
scenario_directory,
str(subproblem),
str(stage),
"inputs",
"transmission_lines.tab",
),
"w",
newline="",
) as transmission_lines_tab_file:
writer = csv.writer(
transmission_lines_tab_file, delimiter="\t", lineterminator="\n"
)
# Write header
writer.writerow(
[
"transmission_line",
"tx_capacity_type",
"tx_availability_type",
"tx_operational_type",
"load_zone_from",
"load_zone_to",
"tx_simple_loss_factor",
"reactance_ohms",
]
)
for row in transmission_lines:
replace_nulls = ["." if i is None else i for i in row]
writer.writerow(replace_nulls) | 5,357,464 |
def convert_to_diact_uttseg_interactive_tag(previous, tag):
"""Returns the dialogue act but with the fact it is keeping or
taking the turn.
"""
if not previous:
previous = ""
trp_tag = uttseg_pattern(tag)
return trp_tag.format(convert_to_diact_interactive_tag(previous, tag)) | 5,357,465 |
def refresh(app):
"""Refresh all migrations.
Arguments:
app (App): Main Service Container
"""
app.make('migrator').refresh() | 5,357,466 |
def allocate_gpio_pins(project_json, ip_json) -> None:
"""
Goes through the project, finds unassigned "GPIO" pinmaps, assigns them to
unclaimed GPIO pins in ascending order of pins and order of appearance of
modules.
:param project_json:
:param ip_json:
:return: Edits project_json in place
"""
gpio_free_list = list(GPIO_PINS_LIST)
gpio_reserved_set = set() # set for just a little bit of O(1)
gpio_unmapped_list = []
# reserve explicitly mapped gpio pins
for m in project_json: # for each module in the project
# for each entry in the module's pinmaps array (which always exists even if empty)
for i in range(len(m['pinmaps'])):
for pinmap in m['pinmaps'][i]:
if_inst_name = f"{m['parameters']['Verilog Instance Name']}_if{i}"
if pinmap[PINMAP_EXTNAME_INDEX] == "GPIO":
# add to the unmapped list for later processing if it's an unassigned GPIO
gpio_unmapped_list.append((pinmap, if_inst_name))
elif pinmap[PINMAP_EXTNAME_INDEX].startswith("GPIO_"):
# register in the reserved list if it's an explicitly assigned GPIO
if pinmap[PINMAP_EXTNAME_INDEX] in gpio_reserved_set:
print(f"ERROR[{Errors.DUPLICATE_GPIO_RESERVATION}]: "
f"DUPLICATE GPIO PIN RESERVATION SPECIFIED: "
f"{pinmap} in {if_inst_name}")
exit(Errors.DUPLICATE_GPIO_RESERVATION)
if pinmap[PINMAP_EXTNAME_INDEX] not in gpio_free_list:
print(f"ERROR[{Errors.NONEXISTENT_GPIO_RESERVATION}]: "
f"NON_EXISTENT GPIO PIN RESERVATION SPECIFIED: "
f"{pinmap} in {if_inst_name}")
exit(Errors.NONEXISTENT_GPIO_RESERVATION)
gpio_free_list.remove(pinmap[PINMAP_EXTNAME_INDEX])
gpio_reserved_set.add(pinmap[PINMAP_EXTNAME_INDEX])
# assign unmapped gpio pins
for pinmap in gpio_unmapped_list: # (pinmap:list, if_inst_name:str)
if not gpio_free_list:
print(f"ERROR[{Errors.OUT_OF_FREE_GPIOS}]: "
f"OUT OF FREE GPIOS FOR NEXT ASSIGNMENT: {pinmap[0]} in {pinmap[1]}")
exit(Errors.OUT_OF_FREE_GPIOS)
pinmap[0][PINMAP_EXTNAME_INDEX] = gpio_free_list[0]
gpio_reserved_set.add(gpio_free_list[0])
gpio_free_list.remove(gpio_free_list[0])
pass | 5,357,467 |
def R12(FMzul, FAmax, FZ, alphaA, Phi_en, qF = 1,
muTmin = 1, qM = 1, ra = 1, Atau = 1, Rm = 1,
tauB = 1, deltaFVth = 1, FQmax = 1, MYmax = 1):
"""
R12 Determining the safety margin against slipping SG and
the shearing stress tauQmax
(Sec 5.5.6)
---
FMzul
FAmax
Fz
alphaA
Phi_en
---
deltaFVth
---
"""
# The minimum residual clamp load
if deltaFVth < 0 : deltaFVth = 0
FKRmin = (FMzul / alphaA - (1.0 - Phi_en) * FAmax
- FZ - deltaFVth) # (R12/1)
# The clamp load required for transmiting tranverse loads
FKQerf = ((FQmax / (qF * muTmin))
+ (MYmax / (qM * ra * muTmin))) # (R12/2)
# the following must apply:
# FKRmin > FKQerf # (R12/3)
# Alternative safety verification against slipping:
SG = FKRmin / FKQerf # (R12/4)
# For the shearing stress in the bolts cross section
# Atau at the interface:
tauQmax = FQmax / Atau # (R12/5)
# The aim is to avoid shearing of the bolt:
# tauQmax < tauB # (R12/6-1)
# or
FQmax = Atau * Rm * (tauB / Rm) # (R12/6-2)
# Alternative safety verification against shearing:
SA = (tauB * Atau / FQmax) # > 1.1 (R12/7)
#
#
# | 5,357,468 |
def compute_good_coils(raw, t_step=0.01, t_window=0.2, dist_limit=0.005,
prefix='', gof_limit=0.98, verbose=None):
"""Comute time-varying coil distances."""
try:
from mne.chpi import compute_chpi_amplitudes, compute_chpi_locs
except ImportError:
chpi_locs = _old_chpi_locs(raw, t_step, t_window, prefix)
else:
chpi_amps = compute_chpi_amplitudes(
raw, t_step_min=t_step, t_window=t_window)
chpi_locs = compute_chpi_locs(raw.info, chpi_amps)
from mne.chpi import _get_hpi_initial_fit
hpi_dig_head_rrs = _get_hpi_initial_fit(raw.info, verbose=False)
hpi_coil_dists = cdist(hpi_dig_head_rrs, hpi_dig_head_rrs)
counts = np.empty(len(chpi_locs['times']), int)
for ii, (t, coil_dev_rrs, gof) in enumerate(zip(
chpi_locs['times'], chpi_locs['rrs'], chpi_locs['gofs'])):
these_dists = cdist(coil_dev_rrs, coil_dev_rrs)
these_dists = np.abs(hpi_coil_dists - these_dists)
# there is probably a better algorithm for finding the bad ones...
use_mask = gof >= gof_limit
good = False
while not good:
d = these_dists[use_mask][:, use_mask]
d_bad = d > dist_limit
good = not d_bad.any()
if not good:
if use_mask.sum() == 2:
use_mask[:] = False
break # failure
# exclude next worst point
badness = (d * d_bad).sum(axis=0)
exclude_coils = np.where(use_mask)[0][np.argmax(badness)]
use_mask[exclude_coils] = False
counts[ii] = use_mask.sum()
t = chpi_locs['times'] - raw.first_samp / raw.info['sfreq']
return t, counts, len(hpi_dig_head_rrs), chpi_locs | 5,357,469 |
def prep_request(items, local_id="id"):
"""
Process the incoming items into an AMR request.
<map name="cite_1">
<val name="{id_type}">{value}</val>
</map>
"""
map_items = ET.Element("map")
for idx, pub in enumerate(items):
if pub is None:
continue
local_id_value = pub.get(local_id) or pub.get(local_id.upper())
if local_id_value is None:
local_id_value = str(idx)
this_item = ET.Element("map", name=local_id_value)
for k, v in pub.items():
if v is None:
continue
de = ET.Element("val", name=k.lower())
de.text = v.strip()
this_item.append(de)
map_items.append(this_item)
request_items = ET.tostring(map_items)
xml = id_request_template.format(user=client.USER, password=client.PASSWORD, items=request_items)
return xml | 5,357,470 |
def calc_variables ( ):
"""Calculates all variables of interest.
They are collected and returned as a list, for use in the main program.
"""
# In this example we simulate using the cut (but not shifted) potential
# but we only report results which have had the long-range corrections applied
# The value of the cut-and-shifted potential is not used, in this example
import numpy as np
import math
from averages_module import VariableType
from lrc_module import potential_lrc, pressure_lrc
# Preliminary calculations (n,r,total are taken from the calling program)
vol = box**3 # Volume
rho = n / vol # Density
kin = 1.5 * n * p * temperature # Average kinetic energy for NP-atom system
kin_q = kin - total_spr # Quantum estimator for kinetic energy
rad_g = rad_gyr ( r )
# Variables of interest, of class VariableType, containing three attributes:
# .val: the instantaneous value
# .nam: used for headings
# .method: indicating averaging method
# If not set below, .method adopts its default value of avg
# The .nam and some other attributes need only be defined once, at the start of the program,
# but for clarity and readability we assign all the values together below
# Acceptance ratio of atomic moves
r_r = VariableType ( nam = 'Atomic move ratio', val = r_ratio, instant = False )
# Acceptance ratio of centre-of-mass moves
c_r = VariableType ( nam = 'COM move ratio', val = c_ratio, instant = False )
# Internal energy per atom for full potential with LRC
# LRC plus cut (but not shifted) PE already divided by factor P
# plus KE estimator: total classical KE for NP-atom system MINUS total spring potential
# all divided by N
e_f = VariableType ( nam = 'E/N full', val = potential_lrc(rho,r_cut) + (kin_q+total.pot)/n )
# Kinetic energy per atom, just for interest's sake
k_q = VariableType ( nam = 'KE/N', val = kin_q/n )
# Pressure for full potential with LRC
# LRC plus ideal gas contribution plus total virial divided by V
kin_q = kin_q / 1.5 # Convert KE estimator to kinetic energy part of pressure
p_f = VariableType ( nam = 'P full', val = pressure_lrc(rho,r_cut) + (kin_q+total.vir)/vol )
# Quantum spring energy per atom, just for interest's sake
e_q = VariableType ( nam = 'Espring/N', val = total_spr/n )
# Quantum polymer radius of gyration, just for interest's sake
r_g = VariableType ( nam = 'Radius of gyration', val = rad_g )
# Collect together into a list for averaging
return [ r_r, c_r, e_f, p_f, e_q, k_q, r_g ] | 5,357,471 |
def analyse_results_ds_one_station(dss, field='WetZ', verbose=None,
plot=False):
"""analyse and find an overlapping signal to fields 'WetZ' or 'WetZ_error'
in dss"""
# algorithm for zwd stitching of 30hrs gipsyx runs:
# just take the mean of the two overlapping signals
# and then smooth is with savgol_filter using 3 hours more data in each
# direction...
import matplotlib.pyplot as plt
import pandas as pd
import logging
def select_two_ds_from_gipsyx_results(ds, names=['WetZ_0', 'WetZ_1'],
hours_offset=None):
"""selects two dataarrays from the raw gipsyx results dataset"""
import pandas as pd
import xarray as xr
time0 = list(set(ds[names[0]].dims))[0]
time1 = list(set(ds[names[1]].dims))[0]
time = list(set(ds[names[0]][time0].values).intersection(set(ds[names[1]][time1].values)))
# time = dim_intersection([ds[names[0]], ds[names[1]]], dim='time')
if not time:
return None
time = sorted(pd.to_datetime(time))
if hours_offset is not None:
# freq = pd.infer_freq(time)
start = time[0] - pd.DateOffset(hours=hours_offset)
end = time[-1] + pd.DateOffset(hours=hours_offset)
# time = pd.date_range(start, end, freq=freq)
first = ds[names[0]].sel({time0: slice(start, end)})
second = ds[names[1]].sel({time1: slice(start, end)})
else:
first = ds[names[0]].sel({time0: time})
second = ds[names[1]].sel({time1: time})
first = first.rename({time0: 'time'})
second = second.rename({time1: 'time'})
two = xr.Dataset()
two[first.name] = first
two[second.name] = second
df = two.to_dataframe()
return df
logger = logging.getLogger('gipsyx_post_proccesser')
if verbose == 0:
logger.info('analysing {} field.'.format(field))
# first, group different vars for different stitching schemes:
to_smooth = ['GradEast', 'GradNorth', 'WetZ']
to_simple_mean = ['X', 'Y', 'Z']
to_error_mean = [x + '_error' for x in to_smooth] + [x + '_error' for x in
to_simple_mean]
# second, select the field to work on:
nums = sorted(list(set([int(x.split('-')[1])
for x in dss if x.split('-')[0] == field])))
ds = dss[['{}-{}'.format(field, i) for i in nums]]
df_list = []
for i, _ in enumerate(ds):
if i == len(ds) - 1:
break
first = ds['{}-{}'.format(field, i)]
second = ds['{}-{}'.format(field, i + 1)]
if verbose == 1:
print('proccesing {} and {}'.format(first.name, second.name))
# 3 hours addition to each side:
df = select_two_ds_from_gipsyx_results(ds, [first.name, second.name],
3)
if df is not None:
if field in to_smooth:
wn = 25
order = 3
stitched = stitch_two_cols(df, wn, order, method='smooth_mean')
action = 'stitched and replaced daily discontinuities '\
'with smooth(savgol filter, window:{}, order:{}) mean'.format(wn, order)
elif field in to_simple_mean:
stitched = stitch_two_cols(df, method='simple_mean')
action = 'stitched and replaced daily discontinuities '\
'with simple mean'
elif field in to_error_mean:
stitched = stitch_two_cols(df, method='error_mean')
action = 'stitched and replaced daily discontinuities '\
'with error mean (sqrt(errorA^2 + errorB^2))'
df_list.append(stitched)
# df_list.append(find_cross_points(df, None))
elif df is None:
if verbose:
logger.warning('skipping {} and {}...'.format(first.name, second.name))
da = pd.concat([x['stitched_signal'] for x in df_list]).to_xarray()
attrs_list = [(x, y)
for x, y in dss.attrs.items() if field == x.split('>')[0]]
attrs_list.append(('{}>action'.format(field), action))
for items in attrs_list:
da.attrs[items[0]] = items[1]
da.attrs['station'] = dss.attrs['station']
if plot:
fig, ax = plt.subplots(figsize=(16, 5))
da.plot.line(marker='.', linewidth=0., ax=ax, color='k')
for i, ppp in enumerate(ds):
ds['{}-{}'.format(field, i)].plot(ax=ax)
units = dss.attrs['{}>units'.format(field)]
sta = da.attrs['station']
desc = da.attrs['{}>desc'.format(field)]
ax.set_ylabel('{} [{}]'.format(field, units))
ax.set_xlabel('')
fig.suptitle('30 hours stitched {} for GNSS station {}'.format(desc, sta), fontweight='bold')
fig.tight_layout()
fig.subplots_adjust(top=0.95)
ax.grid()
# dfs = []
# for df in df_list:
# # check if there is an offset:
# A = df.columns.values[0]
# B = df.columns.values[1]
# if all([x is None for x in df.Cross]):
# offset = df.Diff.median()
# df['{}_new'.format(B)] = df[B] + offset
# dfs.append(df)
return da | 5,357,472 |
def check_gradients(m: torch.nn.Module, nonzero: bool) -> None:
""" Helper function to test whether gradients are nonzero. """
for param in m.parameters():
if nonzero:
assert (param.grad != 0).any()
else:
assert param.grad is None or (param.grad == 0).all() | 5,357,473 |
def findDocument_MergeFields(document):
"""this function creates a new docx document based on
a template with Merge fields and a JSON content"""
the_document = MailMerge(document)
all_fields = the_document.get_merge_fields()
res = {element:'' for element in all_fields}
return res | 5,357,474 |
def load_mushroom(data_home=None, return_dataset=False):
"""
Loads the mushroom multivariate dataset that is well suited to binary
classification tasks. The dataset contains 8123 instances with 3
categorical attributes and a discrete target.
The Yellowbrick datasets are hosted online and when requested, the dataset
is downloaded to your local computer for use. Note that if the dataset
hasn't been downloaded before, an Internet connection is required. However,
if the data is cached locally, no data will be downloaded. Yellowbrick
checks the known signature of the dataset with the data downloaded to
ensure the download completes successfully.
Datasets are stored alongside the code, but the location can be specified
with the ``data_home`` parameter or the $YELLOWBRICK_DATA envvar.
Parameters
----------
data_home : str, optional
The path on disk where data is stored. If not passed in, it is looked
up from YELLOWBRICK_DATA or the default returned by ``get_data_home``.
return_dataset : bool, default=False
Return the raw dataset object instead of X and y numpy arrays to
get access to alternative targets, extra features, content and meta.
Returns
-------
X : array-like with shape (n_instances, n_features) if return_dataset=False
A pandas DataFrame or numpy array describing the instance features.
y : array-like with shape (n_instances,) if return_dataset=False
A pandas Series or numpy array describing the target vector.
dataset : Dataset instance if return_dataset=True
The Yellowbrick Dataset object provides an interface to accessing the
data in a variety of formats as well as associated metadata and content.
"""
return _load_dataset('mushroom', data_home, return_dataset) | 5,357,475 |
def get_current_project(user_id):
"""Return from database user current project"""
try:
current = CurrentProject.objects.get(user_id=user_id)
except CurrentProject.DoesNotExist:
return None
keystone = KeystoneNoRequest()
return keystone.project_get(current.project) | 5,357,476 |
def main() -> None:
"""
Loads the configration files and then attempts to start the
WDS and flask processes accordingly.
"""
config: Union[config_loader.ConfigTemplate, None] = None
# Allow setting config file from cli
if "--config-file" in sys.argv:
config = config_loader.main(sys.argv[sys.argv.index("--config-file") + 1])
else:
config = config_loader.main()
if config.MODE == "development" and config.RUN_WDS_IN_DEVELOPMENT:
pkg_manager = find_package_manager()
logger.info(f"Using \x1b[32m{ pkg_manager }\x1b[m package manager...")
if platform.system().lower() == "linux":
open_wds_in_linux(config, pkg_manager)
elif platform.system().lower() == "windows":
logger.info("Attempting to start webpack dev server...")
env = finalize_wds_config(config)
env.update(os.environ)
subprocess.Popen(
["cmd.exe", "/C", f"start {pkg_manager} start"],
cwd=os.path.join(PROJECT_ROOT, "frontend"),
env=env,
).wait()
else:
logger.warn("Unable to start webpack dev server. Please start it manually.")
start_flask(config) | 5,357,477 |
def docstring(app, what, name, obj, options, lines):
"""Converts doc-strings from (Commonmark) Markdown to reStructuredText."""
md = '\n'.join(lines)
ast = commonmark.Parser().parse(md)
rst = commonmark.ReStructuredTextRenderer().render(ast)
lines.clear()
lines += rst.splitlines() | 5,357,478 |
def cmd_busca_fundo(args):
"""Busca informacoes cadastral sobre os fundos."""
inf_cadastral = Cadastral()
inf_cadastral.cria_df_cadastral()
if args.cnpj:
inf_cadastral.mostra_detalhes_fundo(args.cnpj)
else:
fundo = inf_cadastral.busca_fundos(args.name, args.type, args.all)
if fundo.empty:
msg("red", "Erro: Fundo com nome {} nao encontrado".format(args.name), 1)
pd.set_option("max_colwidth", None)
pd.set_option("max_rows", None)
pd.set_option("display.width", None)
print(
fundo[["DENOM_SOCIAL", "SIT", "CLASSE"]].rename(
columns=Cadastral.csv_columns
)
) | 5,357,479 |
def get_random_successful_answer(intent: str) -> str:
"""
Get a random successful answer for this intent
* `intent`: name-parameter of the yml-section with which the successful answers were imported
**Returns:** None if no successful answers are known for this intent,
otherwise a random element of the successful answers for this intent
"""
return random_or_none(get_successful_answer_list(intent)) | 5,357,480 |
def formatter(
source: str,
language: str,
css_class: str,
options: dict[str, Any],
md: Markdown,
classes: list[str] | None = None,
id_value: str = "",
attrs: dict[str, Any] | None = None,
**kwargs: Any,
) -> str:
"""Execute code and return HTML.
Parameters:
source: The code to execute.
language: The code language, like python or bash.
css_class: The CSS class to add to the HTML element.
options: The container for options.
attrs: The container for attrs:
md: The Markdown instance.
classes: Additional CSS classes.
id_value: An optional HTML id.
attrs: Additional attributes
**kwargs: Additional arguments passed to SuperFences default formatters.
Returns:
HTML contents.
"""
fmt = _formatters.get(language, lambda source, *args, **kwargs: source)
return fmt(source, md, **options) | 5,357,481 |
def generate():
"""
Command to generate faces with a trained model.
"""
parser = argparse.ArgumentParser(
description = "Generate faces using a trained model.",
usage = "faces [<args>]",
)
parser.add_argument('-m', '--model', type=str, required=True, help=
"Model definition file to use.")
parser.add_argument('-o', '--output', type=str, required=True, help=
"Directory to output results to.")
parser.add_argument('-f', '--gen-file', type=str, required=True, help=
"YAML file that specifies the parameters to generate.")
parser.add_argument('-b', '--batch_size', type=int, default=64, help=
"Batch size to use while generating images.")
parser.add_argument('-ext', '--extension', type=str, default='jpg', help=
"Image file extension to use when saving images.")
args = parser.parse_args(sys.argv[2:])
import faces.generate
faces.generate.generate_from_yaml(args.gen_file, args.model, args.output,
batch_size=args.batch_size, extension=args.extension) | 5,357,482 |
def test_custom_class_no_backends(UseBackend: HasBackends):
"""Ensures custom defined test class does not have registered backends."""
assert len(UseBackend.get_backends()) == 0 | 5,357,483 |
def find_visible(vertex_candidates, edges_to_check):
"""
# IMPORTANT: self.translate(new_origin=query_vertex) always has to be called before!
(for computing the angle representations wrt. the query vertex)
query_vertex: a vertex for which the visibility to the vertices should be checked.
also non extremity vertices, polygon vertices and vertices with the same coordinates are allowed.
query point also might lie directly on an edge! (angle = 180deg)
:param vertex_candidates: the set of all vertices which should be checked for visibility.
IMPORTANT: is being manipulated, so has to be a copy!
IMPORTANT: must not contain the query vertex!
:param edges_to_check: the set of edges which determine visibility
:return: a set of tuples of all vertices visible from the query vertex and the corresponding distance
"""
visible_vertices = set()
if len(vertex_candidates) == 0:
return visible_vertices
priority_edges = set()
# goal: eliminating all vertices lying 'behind' any edge
# TODO improvement in combination with priority: process edges roughly in sequence, but still allow jumps
# would follow closer edges more often which have a bigger chance to eliminate candidates -> speed up
while len(vertex_candidates) > 0 and len(edges_to_check) > 0:
# check prioritized items first
try:
edge = priority_edges.pop()
edges_to_check.remove(edge)
except KeyError:
edge = edges_to_check.pop()
lies_on_edge = False
v1, v2 = edge.vertex1, edge.vertex2
if v1.get_distance_to_origin() == 0.0:
# vertex1 has the same coordinates as the query vertex -> on the edge
lies_on_edge = True
# (but does not belong to the same polygon, not identical!)
# mark this vertex as not visible (would otherwise add 0 distance edge in the graph)
vertex_candidates.discard(v1)
# its angle representation is not defined (no line segment from vertex1 to query vertex!)
range_less_180 = v1.is_extremity
# do not check the other neighbouring edge of vertex1 in the future
e1 = v1.edge1
edges_to_check.discard(e1)
priority_edges.discard(e1)
# everything between its two neighbouring edges is not visible for sure
v1, v2 = v1.get_neighbours()
elif v2.get_distance_to_origin() == 0.0:
lies_on_edge = True
vertex_candidates.discard(v2)
range_less_180 = v2.is_extremity
e1 = v2.edge2
edges_to_check.discard(e1)
priority_edges.discard(e1)
v1, v2 = v2.get_neighbours()
repr1 = v1.get_angle_representation()
repr2 = v2.get_angle_representation()
repr_diff = abs(repr1 - repr2)
if repr_diff == 2.0:
# angle == 180deg -> on the edge
lies_on_edge = True
range_less_180 = False # does actually not matter here
if lies_on_edge:
# when the query vertex lies on an edge (or vertex) no behind/in front checks must be performed!
# the neighbouring edges are visible for sure
try:
vertex_candidates.remove(v1)
visible_vertices.add(v1)
except KeyError:
pass
try:
vertex_candidates.remove(v2)
visible_vertices.add(v2)
except KeyError:
pass
# all the candidates between the two vertices v1 v2 are not visible for sure
# candidates with the same representation should not be deleted, because they can be visible!
vertex_candidates.difference_update(
find_within_range(repr1, repr2, repr_diff, vertex_candidates, angle_range_less_180=range_less_180,
equal_repr_allowed=False))
continue
# case: a 'regular' edge
# eliminate all candidates which are blocked by the edge
# that means inside the angle range spanned by the edge and actually behind it
vertices_to_check = vertex_candidates.copy()
# the vertices belonging to the edge itself (its vertices) must not be checked.
# use discard() instead of remove() to not raise an error (they might not be candidates)
vertices_to_check.discard(v1)
vertices_to_check.discard(v2)
if len(vertices_to_check) == 0:
continue
# assert repr1 is not None
# assert repr2 is not None
# for all candidate edges check if there are any candidate vertices (besides the ones belonging to the edge)
# within this angle range
# the "view range" of an edge from a query point (spanned by the two vertices of the edge)
# is always < 180deg when the edge is not running through the query point (=180 deg)
# candidates with the same representation as v1 or v2 should be considered.
# they can be visible, but should be ruled out if they lie behind any edge!
vertices_to_check = find_within_range(repr1, repr2, repr_diff, vertices_to_check, angle_range_less_180=True,
equal_repr_allowed=True)
if len(vertices_to_check) == 0:
continue
# if a candidate is farther away from the query point than both vertices of the edge,
# it surely lies behind the edge
max_distance = max(v1.get_distance_to_origin(), v2.get_distance_to_origin())
vertices_behind = set(filter(lambda extr: extr.get_distance_to_origin() > max_distance, vertices_to_check))
# they do not have to be checked, no intersection computation necessary
# TODO improvement: increase the neighbouring edges' priorities when there were extremities behind
vertices_to_check.difference_update(vertices_behind)
if len(vertices_to_check) == 0:
# also done later, only needed if skipping this edge
vertex_candidates.difference_update(vertices_behind)
continue
# if the candidate is closer than both edge vertices it surely lies in front (
min_distance = min(v1.get_distance_to_origin(), v2.get_distance_to_origin())
vertices_in_front = set(
filter(lambda extr: extr.get_distance_to_origin() < min_distance, vertices_to_check))
# they do not have to be checked (safes computation)
vertices_to_check.difference_update(vertices_in_front)
# for all remaining vertices v it has to be tested if the line segment from query point (=origin) to v
# has an intersection with the current edge p1---p2
# vertices directly on the edge are allowed (not eliminated)!
p1 = v1.get_coordinates_translated()
p2 = v2.get_coordinates_translated()
for vertex in vertices_to_check:
if lies_behind(p1, p2, vertex.get_coordinates_translated()):
vertices_behind.add(vertex)
else:
vertices_in_front.add(vertex)
# vertices behind any edge are not visible
vertex_candidates.difference_update(vertices_behind)
# if there are no more candidates left. immediately quit checking edges
if len(vertex_candidates) == 0:
break
# check the neighbouring edges of all vertices which lie in front of the edge next first
# (prioritize them)
# they lie in front and hence will eliminate other vertices faster
# the fewer vertex candidates remain, the faster the procedure
# TODO improvement: increase priority every time and draw highest priority items
# but this involves sorting (expensive for large polygons!)
# idea: work with a list of sets, add new set for higher priority, no real sorting, but still managing!
# TODO test speed impact
for e in vertices_in_front:
# only add the neighbour edges to the priority set if they still have to be checked!
if type(e) == PolygonVertex:
# only vertices belonging to polygons have neighbours
priority_edges.update(edges_to_check.intersection({e.edge1, e.edge2}))
# all edges have been checked
# all remaining vertices were not concealed behind any edge and hence are visible
visible_vertices.update(vertex_candidates)
# return a set of tuples: (vertex, distance)
return {(e, e.get_distance_to_origin()) for e in visible_vertices} | 5,357,484 |
def gather_data(
network_stats: Iterable, start_time: int, end_time: int, step: int
) -> Dict:
"""This function takes Prometheus data and reshapes it into a multi-level
dictionary of network name to link name to link dir to list of values."""
label_val_map: defaultdict = defaultdict(
lambda: defaultdict(lambda: defaultdict(list))
)
for network, prom_results in network_stats:
if prom_results is None:
continue
for query, values in prom_results.items():
logging.info(f"Processing data for network {network} and metric {query}")
if not values:
logging.debug(f"Found no {query} results for {network}")
continue
for result in values:
link_name = result["metric"][consts.link_name]
link_dir = result["metric"][consts.link_direction]
val_array = label_val_map[network][link_name][link_dir]
if len(val_array) == 0:
# Create empty array of length equal to duration_s sampled at step_s
val_array = [np.nan] * int((end_time - start_time) / step)
label_val_map[network][link_name][link_dir] = val_array
for timestamp, metric_value in result["values"]:
# Put values at the approporate index of array based on timestamp
val_array[int((int(timestamp) - start_time) / step - 1)] = int(
metric_value
)
return label_val_map | 5,357,485 |
def delete_protection(ProtectionId=None):
"""
Deletes an AWS Shield Advanced Protection .
See also: AWS API Documentation
:example: response = client.delete_protection(
ProtectionId='string'
)
:type ProtectionId: string
:param ProtectionId: [REQUIRED]
The unique identifier (ID) for the Protection object to be deleted.
:rtype: dict
:return: {}
"""
pass | 5,357,486 |
def get_balance_sheet(ticker, limit, key, period):
"""Get the Balance sheet."""
URL = 'https://financialmodelingprep.com/api/v3/balance-sheet-statement/'
try:
r = requests.get(
'{}{}?period={}&?limit={}&apikey={}'.format(URL,
ticker,
period,
limit,
key))
balanceSheet = pd.DataFrame.from_dict(r.json()).transpose()
balanceSheet.columns = balanceSheet.iloc[0]
return balanceSheet[1:]
except requests.exceptions.HTTPError as e:
# We want a 200 value
print('Requesting Balance sheet statement ERROR: ', str(e)) | 5,357,487 |
def softmax_loss(scores, y):
"""
Computes the loss and gradient for softmax classification.
Inputs:
- scores: Input data, of shape (N, C) where x[i, j] is the score for the jth
class for the ith input.
- y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and
0 <= y[i] < C
Returns a tuple of:
- loss: Scalar giving the loss
- dscores: Gradient of the loss with respect to x
"""
N, C = scores.shape
scores = scores - np.max(scores, 1, keepdims=True)
loss = np.sum(-1 * scores[np.arange(N), y]) + np.sum(np.log(np.sum(np.exp(scores), 1)))
loss /= N
scores_e = np.exp(scores)
dscores = scores_e / np.sum(scores_e, 1).reshape(N, 1)
dscores[np.arange(N), y] = dscores[np.arange(N), y] - 1
dscores /= N
return loss, dscores | 5,357,488 |
def load_flags(save_dir, save_file="flags.obj"):
"""
This function inflate the pickled object to flags object for reuse, typically during evaluation (after training)
:param save_dir: The place where the obj is located
:param save_file: The file name of the file, usually flags.obj
:return: flags
"""
with open(os.path.join(save_dir, save_file), 'rb') as f: # Open the file
flags = pickle.load(f) # Use pickle to inflate the obj back to RAM
return flags | 5,357,489 |
def text_to_lines(path):
"""
Parse a text file into lines.
Parameters
----------
path : str
Fully specified path to text file
Returns
-------
list
Non-empty lines in the text file
"""
delimiter = None
with open(path, encoding='utf-8-sig', mode='r') as f:
text = f.read()
if delimiter is not None and delimiter not in text:
e = DelimiterError(
'The delimiter specified does not create multiple words. Please specify another delimiter.')
raise (e)
lines = [x.strip().split(delimiter) for x in text.splitlines() if x.strip() != '']
return lines | 5,357,490 |
def measure_hemijunctions_timelapse(ims_labels, ims_labels_hjs):
"""
Measure the hemijunction traits from a timelapse of a live-imaged epithelium.
Parameters
----------
ims_labels : 3D ndarray (t,y,x)
Each timepoint is a 2D array with labeled regions.
ims_labels_hjs : 3D ndarray (t,y,x)
Each timepoint is a 2D array with hemijunctions labeled such that each one
has the same label as its "sending cell". Each "interface" spans a cell-cell
junction and is composed of two hemijunctions.
Returns
-------
df_hjs : pandas DataFrame
Each row is a single hemijunction from a single time step.
"""
# Total number of frames
total_t = np.shape(ims_labels)[0]
dfs = []
for t in range(total_t):
print(f"Measuring hemijunctions for timepoint {t} out of {total_t - 1}")
df_tmp = measure_hemijunctions(ims_labels[t], ims_labels_hjs[t])
# Add a column for t_step
df_tmp["t_step"] = [t] * len(df_tmp.index)
dfs.append(df_tmp)
df_hjs = pd.concat(dfs, ignore_index=True)
return df_hjs | 5,357,491 |
def get_compare_tables_checks_tasks():
"""Get list of tasks that will compare tables checks between databases.
Args:
Returns:
list: list of tasks to be executed in a process pool. Each item is a dict instance with following strucutre:
{
'function' (function): the function to be executed.
'kwds': keyworded args to be passed to the function.
}
"""
return [{
'function': compare_tables_checks,
'kwds': {}
}] | 5,357,492 |
def label_pr_failures(pull: Union[PullRequest, ShortPullRequest]) -> Set[str]:
"""
Labels the given pull request to indicate which checks are failing.
:param pull:
:return: The new labels set for the pull request.
"""
pr_checks = get_checks_for_pr(pull)
failure_labels: Set[str] = set()
success_labels: Set[str] = set()
def determine_labels(from_, to):
for check in from_:
if _python_dev_re.match(check):
continue
if check in {"Flake8", "docs"}:
to.add(f"failure: {check.lower()}")
elif check.startswith("mypy"):
to.add("failure: mypy")
elif check.startswith("ubuntu"):
to.add("failure: Linux")
elif check.startswith("windows"):
to.add("failure: Windows")
determine_labels(pr_checks.failing, failure_labels)
determine_labels(pr_checks.successful, success_labels)
issue: Issue = pull.issue()
current_labels = {label.name for label in issue.labels()}
for label in success_labels:
if label in current_labels and label not in failure_labels:
issue.remove_label(label)
new_labels = current_labels - success_labels
new_labels.update(failure_labels)
if new_labels != current_labels:
issue.add_labels(*new_labels)
return new_labels | 5,357,493 |
def test_index_is_target():
"""Assert that an error is raised when the index is the target column."""
with pytest.raises(ValueError, match=r".*same as the target column.*"):
ATOMClassifier(X_bin, index="worst fractal dimension", random_state=1) | 5,357,494 |
def _replace_folder_path(path: str, from_folder: str, to_folder: str) -> Optional[str]:
"""Changes the path from the source ('from') folder to the destination ('to') folder
Arguments:
path: the path to adjust
from_folder: the folder to change from
to_folder: the folder to change the path to
Return:
A copy of the path with the folder changed when 'path' starts with 'from_folder', othwerwise
None is returned
Notes:
Only fully qualified partial paths are considered valid. Thus, '/a/b/c' is NOT considered the start of path '/a/b/concord', but
is the considered the start of '/a/b/c' and '/a/b/c/dogs.csv'
"""
# Make sure we have a legitimate 'from' path
if not path.startswith(from_folder):
logging.debug('Replace folder path: original path "%s" doesn\'t start with expected folder "%s"', path, from_folder)
return None
check_idx = len(from_folder)
if from_folder[-1:] == '/' or from_folder[-1:] == '\\':
check_idx -= 1
if not path[check_idx] =='/' and not path[check_idx] =='\\':
return None
# Return the new path
rem = path[len(from_folder):]
if rem[0] == '/' or rem[0] == '\\':
rem = rem[1:]
return os.path.join(to_folder, rem) | 5,357,495 |
def arrange_images(total_width, total_height, *images_positions):
"""Return a composited image based on the (image, pos) arguments."""
result = mel.lib.common.new_image(total_height, total_width)
for image, pos in images_positions:
mel.lib.common.copy_image_into_image(image, result, pos[1], pos[0])
return result | 5,357,496 |
def import_core_utilities() -> Tuple[ModuleType, ModuleType, ModuleType]:
"""Dynamically imports and return Tracing, Logging, and Metrics modules"""
return (
importlib.import_module(TRACING_PACKAGE),
importlib.import_module(LOGGING_PACKAGE),
importlib.import_module(METRICS_PACKAGE),
) | 5,357,497 |
def create_folders(*folders):
"""
Utility for creating directories
:param folders: directory names
:return:
"""
for folder in folders:
os.makedirs(folder, exist_ok=True) | 5,357,498 |
def pancakeSort(self, A):
# ! 这个方法实际上是在每轮循环中寻找最大的那个数,使其在正确的位置
"""
:type A: List[int]
:rtype: List[int]
"""
bucket = sorted(A)
ans = []
for k in range(len(A),0,-1):
i = A.index(bucket.pop())+1
ans += [i, k]
A = A[i:k][::-1] + A[:i] + A[k:]
print(A)
return ans | 5,357,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.