content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def test_validation_happy(name, mode, parent, tmp_trestle_dir: pathlib.Path) -> None:
"""Test successful validation runs."""
(tmp_trestle_dir / test_utils.TARGET_DEFS_DIR / 'my_test_model').mkdir(exist_ok=True, parents=True)
(tmp_trestle_dir / test_utils.TARGET_DEFS_DIR / 'my_test_model2').mkdir(exist_ok=True, parents=True)
shutil.copyfile(
test_data_dir / 'yaml/good_target.yaml',
tmp_trestle_dir / test_utils.TARGET_DEFS_DIR / 'my_test_model/target-definition.yaml'
)
shutil.copyfile(
test_data_dir / 'yaml/good_target.yaml',
tmp_trestle_dir / test_utils.TARGET_DEFS_DIR / 'my_test_model2/target-definition.yaml'
)
model_def_file = tmp_trestle_dir / test_utils.TARGET_DEFS_DIR / name / ('target-definition.yaml')
if mode == '-f':
if not parent:
testcmd = f'trestle validate {mode} {model_def_file} -m duplicates'
else:
testcmd = f'trestle validate {mode} {model_def_file.parent} -m duplicates'
elif mode == '-n':
testcmd = f'trestle validate -t target-definition -n {name} -m duplicates'
elif mode == '-x':
testcmd = f'trestle validate -t target-definition -n {name}'
else:
testcmd = 'trestle validate -a -m duplicates'
with patch.object(sys, 'argv', testcmd.split()):
with pytest.raises(SystemExit) as pytest_wrapped_e:
cli.run()
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == 0 | 5,355,200 |
def edit(project: Any, params: Dict[str, str]) -> Dict[str, str]:
"""
Add a new method to a Python class in its given module.
TODO: See why an <EOF> char is added along with the new method
"""
eng = project.context().pathExpressionEngine()
res = eng.evaluate(project, "/Directory()/File()[@name='"+params['mod_name']+"']/PythonFile()//classdef()[/NAME[@value='"+params['class_name']+"']]")
for match in res.matches():
match.append("\n def "+params['method_name']+"(self):\n print('hey')\n")
return {"status":"OK", "message": "Method added to class"} | 5,355,201 |
def write_version_py(filename: str = 'python/esroofit/version.py') -> None:
"""Write package version to version.py.
This will ensure that the version in version.py is in sync with us.
:param filename: The version.py to write too.
:type filename: str
:return:
:rtype: None
"""
# Do not modify the indentation of version_str!
version_str = """\"\"\"THIS FILE IS AUTO-GENERATED BY ESKAPADE SETUP.PY.\"\"\"
name = '{name!s}'
version = '{version!s}'
full_version = '{full_version!s}'
release = {is_release!s}
"""
version_file = open(filename, 'w')
try:
version_file.write(version_str.format(name=NAME.lower(),
version=VERSION,
full_version=FULL_VERSION,
is_release=not DEV))
finally:
version_file.close() | 5,355,202 |
def get_country_code(country_name):
""" Return the Pygal 2-digit country code for the given country."""
for code, name in COUNTRIES.items():
if name == country_name:
return code
# If the country wasn't found, return None.
return None | 5,355,203 |
def get_divmod(up, down, minute=False, limit=2):
"""
获取商
:param up: 被除数
:param down: 除数
:param minute: 换算成分钟单位
:param limit: 保留小数的位数
:return: 商
"""
if up == 0:
return 0
if down == 0:
return 0
if minute:
return round(up/down/60.0, limit)
return round(float(up)/down, limit) | 5,355,204 |
def find_kernel_base():
"""Find the kernel base."""
return idaapi.get_fileregion_ea(0) | 5,355,205 |
def main():
"""
实例化棋盘并将结果打印
"""
checkers = CheckerBoard()
print(f'checkers: {checkers}')
chess = ChessBoard()
print(f'chess: {chess}') | 5,355,206 |
def get_padding(x, padding_value=0, dtype=tf.float32):
"""Return float tensor representing the padding values in x.
Args:
x: int tensor with any shape
padding_value: int value that
dtype: type of the output
Returns:
float tensor with same shape as x containing values 0 or 1.
0 -> non-padding, 1 -> padding
"""
# print("get_padding", dtype)
with tf.name_scope("padding"):
return tf.cast(tf.equal(x, padding_value), dtype=dtype) | 5,355,207 |
def log_loss_and_acc(model_name: str, loss: torch.Tensor, acc: torch.Tensor, experiment_logger: LightningLoggerBase,
global_step: int):
"""
Logs the loss and accuracy in an histogram as well as scalar
:param model_name: name for logging
:param loss: loss tensor
:param acc: acc tensor
:param experiment_logger: lightning logger
:param global_step: global step
:return:
"""
experiment_logger.experiment.add_histogram(f'{model_name}/acc/test', acc, global_step=global_step)
experiment_logger.experiment.add_scalar(f'{model_name}/acc/test/mean', torch.mean(acc),
global_step=global_step)
if loss.dim() == 0:
loss = torch.tensor([loss])
for x in loss:
if torch.isnan(x) or torch.isinf(x):
return
experiment_logger.experiment.add_histogram(f'{model_name}/loss/test/', loss, global_step=global_step)
experiment_logger.experiment.add_scalar(f'{model_name}/loss/test/mean', torch.mean(loss),
global_step=global_step) | 5,355,208 |
def canonical_symplectic_form_inverse (darboux_coordinates_shape:typing.Tuple[int,...], *, dtype:typing.Any) -> np.ndarray:
"""
Returns the inverse of canonical_symplectic_form(dtype=dtype). See documentation for that function for more.
In particular, the inverse of the canonical symplectic form is
[ 0 I ]
[ -I 0 ]
The inverse of the canonical symplectic form is a section of
TM \wedge TM
or can be thought of (as it is used here) as an alternating section of
TM \otimes TM
and therefore "naturally converts" a covector field on M (i.e. a section of T^{*}M) into a vector field on M
(i.e. a section of TM).
This form is what's used in the definition of the symplectic gradient of a function.
"""
validate_darboux_coordinates_shape_or_raise(darboux_coordinates_shape)
assert vorpy.tensor.dimension_of_shape(darboux_coordinates_shape) % 2 == 0
configuration_space_dimension = vorpy.tensor.dimension_of_shape(darboux_coordinates_shape) // 2
omega_inv = vorpy.tensor.contract(
'ik,jl',
canonical_symplectic_form_abstract_inverse(dtype=dtype),
np.eye(configuration_space_dimension, dtype=dtype),
dtype=dtype,
)
assert omega_inv.shape == (2,configuration_space_dimension,2,configuration_space_dimension)
return omega_inv.reshape(darboux_coordinates_shape+darboux_coordinates_shape) | 5,355,209 |
def update_with_error(a, b, path=None):
"""Merges `b` into `a` like dict.update; however, raises KeyError if values of a
key shared by `a` and `b` conflict.
Adapted from: https://stackoverflow.com/a/7205107
"""
if path is None:
path = []
for key in b:
if key in a:
if isinstance(a[key], dict) and isinstance(b[key], dict):
update_with_error(a[key], b[key], path + [str(key)])
elif a[key] == b[key]:
pass # same leaf value
elif a[key] is None:
a[key] = b[key]
elif (isinstance(a[key], (list, tuple)) and
not isinstance(a[key], str) and
isinstance(b[key], (list, tuple)) and
not isinstance(b[key], str) and
len(a[key]) == len(b[key]) and
all((av is None or av == bv) for av, bv in zip(a[key], b[key]))): # yapf: disable
a[key] = b[key]
else:
raise KeyError('Conflict at {}: {} vs. {}'.format('.'.join(path + [str(key)]), a[key], b[key]))
else:
a[key] = b[key]
return a | 5,355,210 |
def tokenize(sent):
"""Return the tokens of a sentence including punctuation.
>>> tokenize("Bob dropped the apple. Where is the apple?")
["Bob", "dropped", "the", "apple", ".", "Where", "is", "the", "apple", "?"]
"""
return [x.strip() for x in re.split(r"(\W+)?", sent) if x and x.strip()] | 5,355,211 |
def tree_unflatten(flat, tree, copy_from_tree=None):
"""Unflatten a list into a tree given the tree shape as second argument.
Args:
flat: a flat list of elements to be assembled into a tree.
tree: a tree with the structure we want to have in the new tree.
copy_from_tree: optional list of elements that we just copy from tree.
This argument is used when the flat version does not contain all elements
of the expected tree but just a subset, while the rest are filled from
the tree itself. It allows to omit "unnecessary" elements. For example,
consider trees (A, (B, X), X) and (X, (A, X), B) where X is some element
we do not care about. Flattening the first tree and removing X will yield
a flat list [A, B] and the second tree can then be reconstructed from this
list and the tree (X, (E, X), E) with copy_from_tree=[X]. One example
where this is used is the weights-tree of a model, where layers with no
weights have () in the tree and we use copy_from_tree=[()] to restore
a model from a file that only has a list of trainable weights.
Returns:
A pair (new_tree, rest_of_flat) where the new tree that has the structure
of tree but with leaves from flat, and the remaining elements of flat if
more were provided than the number of leaves of tree (useful for recursion).
"""
if copy_from_tree is not None and tree in copy_from_tree:
return tree, flat
if isinstance(tree, (list, tuple)):
new_tree, rest = [], flat
for t in tree:
new_t, rest = tree_unflatten(rest, t, copy_from_tree=copy_from_tree)
new_tree.append(new_t)
new_tree = tuple(new_tree) if isinstance(tree, tuple) else new_tree
return new_tree, rest
if isinstance(tree, dict):
new_tree, rest = {}, flat
for k in tree:
new_v, rest = tree_unflatten(rest, tree[k], copy_from_tree=copy_from_tree)
new_tree[k] = new_v
return new_tree, rest
return flat[0], flat[1:] | 5,355,212 |
def ByteOffsetToCodepointOffset( line_value, byte_offset ):
"""The API calls for byte offsets into the UTF-8 encoded version of the
buffer. However, ycmd internally uses unicode strings. This means that
when we need to walk 'characters' within the buffer, such as when checking
for semantic triggers and similar, we must use codepoint offets, rather than
byte offsets.
This method converts the |byte_offset|, which is a utf-8 byte offset, into
a codepoint offset in the unicode string |line_value|."""
byte_line_value = ToBytes( line_value )
return len( ToUnicode( byte_line_value[ : byte_offset - 1 ] ) ) + 1 | 5,355,213 |
def get_authed_tweepy(access_token, token_secret):
"""Returns an authed instance of the twitter api wrapper tweepy for a given user."""
social_app_twitter = get_object_or_404(SocialApp, provider='twitter')
auth = tweepy.OAuthHandler(social_app_twitter.client_id, social_app_twitter.secret)
auth.set_access_token(access_token, token_secret)
return tweepy.API(auth) | 5,355,214 |
def suffix_for_status(status):
"""Return ``title`` suffix for given status"""
suffix = STATUS_SUFFIXES.get(status)
if not suffix:
return ''
return ' {}'.format(suffix) | 5,355,215 |
def test_codesystem_1(base_settings):
"""No. 1 tests collection for CodeSystem.
Test File: codesystem-contract-security-classification.json
"""
filename = (
base_settings["unittest_data_dir"]
/ "codesystem-contract-security-classification.json"
)
inst = codesystem.CodeSystem.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "CodeSystem" == inst.resource_type
impl_codesystem_1(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "CodeSystem" == data["resourceType"]
inst2 = codesystem.CodeSystem(**data)
impl_codesystem_1(inst2) | 5,355,216 |
def login():
"""
login an existing user
"""
try:
username = json.loads(request.data.decode())['username'].replace(" ", "")
password = json.loads(request.data.decode())['password'].replace(" ", "")
user = User(username, "", "")
user = user.exists()
if check_password_hash(user.password_hash, password):
"""token if password is correct"""
token = auth_encode(user.user_id)
if token:
response = {'response': 'login successful', 'token': token.decode()}
return jsonify(response), 200
else:
return jsonify({'response': 'invalid username/password'}), 422
except (KeyError, ValueError) as ex:
print('error in login', ex)
return jsonify({'response': 'json body must contain username and password'}), 400
except (psycopg2.DatabaseError, psycopg2.IntegrityError, Exception) as ex:
print('error in login', ex)
return jsonify({'response': 'user not found'}), 404 | 5,355,217 |
def soma_radius(morph):
"""Get the radius of a morphology's soma."""
return morph.soma.radius | 5,355,218 |
async def async_get_authorization_server(hass: HomeAssistant) -> AuthorizationServer:
"""Return authorization server."""
return AuthorizationServer(
authorize_url=AUTHORIZATION_ENDPOINT,
token_url=TOKEN_ENDPOINT,
) | 5,355,219 |
def process_video_list(filename):
"""
submit multiple videos from a json file
"""
import django,json
sys.path.append(os.path.dirname(__file__))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dva.settings")
django.setup()
from dvaapp.views import handle_youtube_video
vlist = json.load(file(filename))
for video in vlist:
handle_youtube_video(video['name'],video['url']) | 5,355,220 |
def check_proposal_functions(
model: Model, state: Optional[flow.SamplingState] = None, observed: Optional[dict] = None,
) -> bool:
"""
Check for the non-default proposal generation functions
Parameters
----------
model : pymc4.Model
Model to sample posterior for
state : Optional[flow.SamplingState]
Current state
observed : Optional[Dict[str, Any]]
Observed values (optional)
"""
(_, state, _, _, continuous_distrs, discrete_distrs) = initialize_state(
model, observed=observed, state=state
)
init = state.all_unobserved_values
init_state = list(init.values())
init_keys = list(init.keys())
for i, state_part in enumerate(init_state):
untrs_var, unscoped_tr_var = scope_remove_transformed_part_if_required(
init_keys[i], state.transformed_values
)
# get the distribution for the random variable name
distr = continuous_distrs.get(untrs_var, None)
if distr is None:
distr = discrete_distrs[untrs_var]
func = distr._default_new_state_part
if callable(func):
return True
return False | 5,355,221 |
def test_random_crop_01_py():
"""
Test RandomCrop op with py_transforms: size is a single integer, expected to pass
"""
logger.info("test_random_crop_01_py")
original_seed = config_get_set_seed(0)
original_num_parallel_workers = config_get_set_num_parallel_workers(1)
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
# Note: If size is an int, a square crop of size (size, size) is returned.
transforms = [
py_vision.Decode(),
py_vision.RandomCrop(512),
py_vision.ToTensor()
]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms)
data = data.map(operations=transform, input_columns=["image"])
filename = "random_crop_01_py_result.npz"
save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN)
# Restore config setting
ds.config.set_seed(original_seed)
ds.config.set_num_parallel_workers(original_num_parallel_workers) | 5,355,222 |
def smaller_n(n1, n2):
""" Compare two N_Numbers and returns smaller one. """
p1, s1 = n1
p2, s2 = n2
p1l = len(str(p1)) + s1
p2l = len(str(p2)) + s2
if p1l < p2l:
return n1
elif p1l > p2l:
return n2
p1 = p1.ljust(36, '9')
p2 = p2.ljust(36, '9')
if p1 <= p2:
return n1
else:
return n2 | 5,355,223 |
def deploy_gcs_audit_logs(config):
"""Deploys the GCS logs bucket to the remote audit logs project, if used."""
if FLAGS.enable_new_style_resources:
logging.info('GCS audit logs will be deployed through CFT.')
return
# The GCS logs bucket must be created before the data buckets.
if not config.audit_logs_project:
logging.info('Using local GCS audit logs.')
return
logs_gcs_bucket = config.project['audit_logs'].get('logs_gcs_bucket')
if not logs_gcs_bucket:
logging.info('No remote GCS logs bucket required.')
return
logging.info('Creating remote GCS logs bucket.')
data_project_id = config.project['project_id']
logs_project = config.audit_logs_project
audit_project_id = logs_project['project_id']
deployment_name = 'audit-logs-{}-gcs'.format(
data_project_id.replace('_', '-'))
path = os.path.join(
os.path.dirname(__file__), 'templates/remote_audit_logs.py')
dm_template_dict = {
'imports': [{
'path': path
}],
'resources': [{
'type': path,
'name': deployment_name,
'properties': {
'owners_group': logs_project['owners_group'],
'auditors_group': config.project['auditors_group'],
'logs_gcs_bucket': logs_gcs_bucket,
},
}]
}
utils.run_deployment(dm_template_dict, deployment_name, audit_project_id) | 5,355,224 |
def sortDict(dictionary: dict):
"""Lambdas made some cringe and stupid thing some times, so this dirty thing was developed"""
sortedDictionary = {}
keys = list(dictionary.keys())
keys.sort()
for key in keys:
sortedDictionary[key] = dictionary[key]
return sortedDictionary | 5,355,225 |
def elina_linexpr0_alloc(lin_discr, size):
"""
Allocate a linear expressions with coefficients by default of type ElinaScalar and c_double.
If sparse representation, corresponding new dimensions are initialized with ELINA_DIM_MAX.
Parameters
----------
lin_discr : c_uint
Enum of type ElinaLinexprDiscr that defines the representation (sparse or dense).
size : c_size_t
Size of the internal array.
Returns
-------
linexpr : ElinaLinexpr0Ptr
Pointer to the newly allocated ElinaLinexpr0
"""
linexpr = None
try:
elina_linexpr0_alloc_c = elina_auxiliary_api.elina_linexpr0_alloc
elina_linexpr0_alloc_c.restype = ElinaLinexpr0Ptr
elina_linexpr0_alloc_c.argtypes = [c_uint, c_size_t]
linexpr = elina_linexpr0_alloc_c(lin_discr, size)
except:
print('Problem with loading/calling "elina_linexpr0_alloc" from "libelinaux.so"')
print('Make sure you are passing c_uint, c_size_t to the function')
return linexpr | 5,355,226 |
def FrameTag_get_tag():
"""FrameTag_get_tag() -> std::string"""
return _RMF.FrameTag_get_tag() | 5,355,227 |
def noise4(x: float, y: float, z: float, w: float) -> float:
"""
Generate 4D OpenSimplex noise from X,Y,Z,W coordinates.
"""
return _default.noise4(x, y, z, w) | 5,355,228 |
def damerau_levenshtein_distance(word1: str, word2: str) -> int:
"""Calculates the distance between two words."""
inf = len(word1) + len(word2)
table = [[inf for _ in range(len(word1) + 2)] for _ in range(len(word2) + 2)]
for i in range(1, len(word1) + 2):
table[1][i] = i - 1
for i in range(1, len(word2) + 2):
table[i][1] = i - 1
da = {}
for col, c1 in enumerate(word1, 2):
last_row = 0
for row, c2 in enumerate(word2, 2):
last_col = da.get(c2, 0)
addition = table[row - 1][col] + 1
deletion = table[row][col - 1] + 1
substitution = table[row - 1][col - 1] + (0 if c1 == c2 else 1)
transposition = (
table[last_row - 1][last_col - 1]
+ (col - last_col - 1)
+ (row - last_row - 1)
+ 1
)
table[row][col] = min(addition, deletion, substitution, transposition)
if c1 == c2:
last_row = row
da[c1] = col
return table[len(word2) + 1][len(word1) + 1] | 5,355,229 |
def closeSession(log_file, seen_tweets):
"""Write final files."""
with open(log, 'w') as outfile:
outfile.write(log_file)
with open(seentweets, 'w') as outfile:
outfile.write(seen_tweets) | 5,355,230 |
def ga_validator(value: Any) -> str | int:
"""Validate that value is parsable as GroupAddress or InternalGroupAddress."""
if isinstance(value, (str, int)):
try:
parse_device_group_address(value)
return value
except CouldNotParseAddress:
pass
raise vol.Invalid(
f"value '{value}' is not a valid KNX group address '<main>/<middle>/<sub>', '<main>/<sub>' "
"or '<free>' (eg.'1/2/3', '9/234', '123'), nor xknx internal address 'i-<string>'."
) | 5,355,231 |
def getTime(sim):
"""
Get the network time
@param sim: the SIM serial handle
"""
sim.write(b'AT+CCLK?\n')
line = sim.readline()
res = None
while not line.endswith(b'OK\r\n'):
time.sleep(0.5)
matcher = re.match(br'^\+CCLK: "([^+]+)\+[0-9]+"\r\n', line)
if matcher:
ts = matcher.group(1).decode('ascii')
res = datetime.datetime.strptime(ts[:ts.find('+')], "%y/%m/%d,%H:%M:%S")
line = sim.readline()
return res | 5,355,232 |
def start_buffer_thread(buffer_thread_config):
""" 开启一个buffer队列线程,监视所有的buffer队列,
根据buffer队列对应的job队列拥塞情况, 将buffer队列的任务合适的推送到相应的job队列
"""
if not buffer_thread_config: return
global buffer_thread_instance
if buffer_thread_instance is not None:
buffer_thread_instance.stop()
buffer_thread = BufferThread(buffer_thread_config)
buffer_thread.setDaemon(True)
buffer_thread.start()
buffer_thread_instance = buffer_thread
sys.stdout.write('start a buffer thread. \n') | 5,355,233 |
def defaultTargetLicense():
"""
Default license for targets, shared for all targets that do not specify
their own custom license, which is useful for saving storage space as this
license is globally referenced by and applies to the majority of targets.
"""
import makehuman
return makehuman.getAssetLicense( {"license": "AGPL3",
"author": "MakeHuman",
"copyright": "2016 Data Collection AB, Joel Palmius, Jonas Hauquier"} ) | 5,355,234 |
def generate_app(path, template=None, create=False):
""" Generates a CouchApp in app_dir
:attr verbose: boolean, default False
:return: boolean, dict. { 'ok': True } if ok,
{ 'ok': False, 'error': message }
if something was wrong.
"""
TEMPLATES = ['app']
prefix = ''
if template is not None:
prefix = os.path.join(*template.split('/'))
try:
os.makedirs(path)
except OSError, e:
errno, message = e
raise AppError("Can't create a CouchApp in %s: %s" % (path, message))
for n in DEFAULT_APP_TREE:
tp = os.path.join(path, n)
os.makedirs(tp)
for t in TEMPLATES:
appdir = path
if prefix:
# we do the job twice for now to make sure an app or vendor
# template exist in user template location
# fast on linux since there is only one user dir location
# but could be a little slower on windows
for user_location in user_path():
location = os.path.join(user_location, 'templates', prefix, t)
if os.path.exists(location):
t = os.path.join(prefix, t)
break
copy_helper(appdir, t)
# add vendor
vendor_dir = os.path.join(appdir, 'vendor')
os.makedirs(vendor_dir)
copy_helper(vendor_dir, '', tname="vendor")
fid = os.path.join(appdir, '_id')
if not os.path.isfile(fid):
with open(fid, 'wb') as f:
f.write('_design/%s' % os.path.split(appdir)[1])
if create:
localdoc.document(path, create=True)
logger.info("%s generated." % path) | 5,355,235 |
def RunTestsOnNaCl(targets, build_args):
"""Run a test suite for the NaCl version."""
# Currently we can only run the limited test set which is defined as
# nacl_test_targets in nacl_extension.gyp.
if targets:
PrintErrorAndExit('Targets [%s] are not supported.' % ', '.join(targets))
nacl_gyp = os.path.join(SRC_DIR, 'chrome', 'nacl', 'nacl_extension.gyp')
(build_options, build_targets) = ParseBuildOptions(
build_args + [nacl_gyp + ':run_nacl_test'])
# Run the test suite in NaCl.
BuildMain(build_options, build_targets) | 5,355,236 |
def get_repo_info(main_path):
""" Get the info of repo.
Args:
main_path: the file store location.
Return:
A json object.
"""
with open(main_path + '/repo_info.json') as read_file:
repo_info = json.load(read_file)
return repo_info | 5,355,237 |
def parse_settings(settings_file: str) -> dict:
"""
The function parses settings file into dict
Parameters
----------
settings_file : str
File with the model settings, must be in yaml.
Returns
-------
ydict : dict
Parsed settings used for modeling.
"""
with open(settings_file, 'r') as fstream:
ydict = yaml.safe_load(fstream)
return ydict | 5,355,238 |
def get_mac_address(path):
"""
input: path to the file with the location of the mac address
output: A string containing a mac address
Possible exceptions:
FileNotFoundError - when the file is not found
PermissionError - in the absence of access rights to the file
TypeError - If the function argument is not a string.
"""
if type(path) is not str:
raise TypeError("The path must be a string value")
try:
file = open(path)
except FileNotFoundError as e:
raise e
except PermissionError as e:
raise e
return file.readline().strip().upper() | 5,355,239 |
def jwt_get_username_from_payload_handler(payload):
"""
Override this function if username is formatted differently in payload
"""
return payload.get('name') | 5,355,240 |
def grr_uname(line):
"""Returns certain system infornamtion.
Args:
line: A string representing arguments passed to the magic command.
Returns:
String representing some system information.
Raises:
NoClientSelectedError: Client is not selected to perform this operation.
"""
args = grr_uname.parser.parse_args(shlex.split(line))
return magics_impl.grr_uname_impl(args.machine, args.kernel_release) | 5,355,241 |
def stock_szse_summary(date: str = "20200619") -> pd.DataFrame:
"""
深证证券交易所-总貌-证券类别统计
http://www.szse.cn/market/overview/index.html
:param date: 最近结束交易日
:type date: str
:return: 证券类别统计
:rtype: pandas.DataFrame
"""
url = "http://www.szse.cn/api/report/ShowReport"
params = {
"SHOWTYPE": "xlsx",
"CATALOGID": "1803_sczm",
"TABKEY": "tab1",
"txtQueryDate": "-".join([date[:4], date[4:6], date[6:]]),
"random": "0.39339437497296137",
}
r = requests.get(url, params=params)
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
temp_df = pd.read_excel(BytesIO(r.content), engine="openpyxl")
temp_df["证券类别"] = temp_df["证券类别"].str.strip()
temp_df.iloc[:, 2:] = temp_df.iloc[:, 2:].applymap(lambda x: x.replace(",", ""))
temp_df.columns = ["证券类别", "数量", "成交金额", "总市值", "流通市值"]
temp_df["数量"] = pd.to_numeric(temp_df["数量"])
temp_df["成交金额"] = pd.to_numeric(temp_df["成交金额"])
temp_df["总市值"] = pd.to_numeric(temp_df["总市值"], errors="coerce")
temp_df["流通市值"] = pd.to_numeric(temp_df["流通市值"], errors="coerce")
return temp_df | 5,355,242 |
def find_all_movies_shows(pms): # pragma: no cover
""" Helper of get all the shows on a server.
Args:
func (callable): Run this function in a threadpool.
Returns: List
"""
all_shows = []
for section in pms.library.sections():
if section.TYPE in ('movie', 'show'):
all_shows += section.all()
return all_shows | 5,355,243 |
def dropout_gradient_descent(Y, weights, cache, alpha, keep_prob, L):
"""
Updates the weights of a neural network with Dropout regularization using
gradient descent
Y is a one-hot numpy.ndarray of shape (classes, m) that contains the
correct labels for the data
classes is the number of classes
m is the number of data points
weights is a dictionary of the weights and biases of the neural network
cache is a dictionary of the outputs and dropout masks of each layer of
the neural network
alpha is the learning rate
keep_prob is the probability that a node will be kept
L is the number of layers of the network
All layers use thetanh activation function except the last, which uses
the softmax activation function
The weights of the network should be updated in place
"""
m = Y.shape[1]
dz = [cache['A{}'.format(L)] - Y]
for layer in range(L, 0, -1):
A = cache['A{}'.format(layer - 1)]
W = weights['W{}'.format(layer)]
dw = np.matmul(dz[L - layer], A.T) / m
db = np.sum(dz[L - layer], axis=1, keepdims=True) / m
if layer > 1:
rglz = (1 - (A ** 2)) * (cache['D' + str(layer - 1)] / keep_prob)
dzdx = dz.append(np.matmul(W.T, dz[L - layer]) * rglz)
weights['W{}'.format(layer)] -= alpha * dw
weights['b{}'.format(layer)] -= alpha * db | 5,355,244 |
def parse_vars(vars):
"""
Transform a list of NAME=value environment variables into a dict
"""
retval = {}
for var in vars:
key, value = var.split("=", 1)
retval[key] = value
return retval | 5,355,245 |
def find_dup_items(values: List) -> List:
"""Find duplicate items in a list
Arguments:
values {List} -- A list of items
Returns:
List -- A list of duplicated items
"""
dup = [t for t, c in collections.Counter(values).items() if c > 1]
return dup | 5,355,246 |
def circle_location_Pass(circle_, image_, margin=0.15):
"""
Function for check if the circle_ is overlapping
with the margin of the image_.
"""
cy, cx, rad, accum = circle_
image_sizeY_, image_sizeX_ = image_.shape[0], image_.shape[1]
margin_min_x = int(image_sizeX_ * margin)
margin_max_x = int(image_sizeX_ * (1 - margin))
margin_min_y = int(image_sizeY_ * margin)
margin_max_y = int(image_sizeY_ * (1 - margin))
margin_min_xh = int(image_sizeX_ * margin/2.)
margin_max_xh = int(image_sizeX_ * (1 - margin/2.))
margin_min_yh = int(image_sizeY_ * margin/2.)
margin_max_yh = int(image_sizeY_ * (1 - margin/2.))
if cy<margin_min_y or cy>margin_max_y:
return False
if cx<margin_min_x or cx>margin_max_x:
return False
if cy-rad<margin_min_yh or cy+rad>margin_max_yh:
return False
if cx-rad<margin_min_xh or cx+rad>margin_max_xh:
return False
return True | 5,355,247 |
def test_deck_size():
"""Tests the len of the deck"""
assert len(Deck()) == len(cards) | 5,355,248 |
def session_factory(
base_class=ftplib.FTP,
port=21,
use_passive_mode=None,
*,
encrypt_data_channel=True,
debug_level=None,
):
"""
Create and return a session factory according to the keyword
arguments.
base_class: Base class to use for the session class (e. g.
`ftplib.FTP_TLS` or `M2Crypto.ftpslib.FTP_TLS`, default is
`ftplib.FTP`).
port: Port number (integer) for the command channel (default 21).
If you don't know what "command channel" means, use the default or
use what the provider gave you as "the FTP port".
use_passive_mode: If `True`, explicitly use passive mode. If
`False`, explicitly don't use passive mode. If `None` (default),
let the `base_class` decide whether it wants to use active or
passive mode.
encrypt_data_channel: If `True` (the default), call the `prot_p`
method of the base class if it has the method. If `False` or
`None` (`None` is the default), don't call the method.
debug_level: Debug level (integer) to be set on a session
instance. The default is `None`, meaning no debugging output.
This function should work for the base classes `ftplib.FTP`,
`ftplib.FTP_TLS`. Other base classes should work if they use the
same API as `ftplib.FTP`.
Usage example:
my_session_factory = session_factory(
base_class=ftplib.FTP_TLS,
use_passive_mode=True,
encrypt_data_channel=True)
with ftputil.FTPHost(host, user, password,
session_factory=my_session_factory) as host:
...
"""
class Session(base_class):
"""Session factory class created by `session_factory`."""
def __init__(self, host, user, password):
super().__init__()
self.connect(host, port)
if debug_level is not None:
self.set_debuglevel(debug_level)
self.login(user, password)
# `set_pasv` can be called with `True` (causing passive
# mode) or `False` (causing active mode).
if use_passive_mode is not None:
self.set_pasv(use_passive_mode)
if encrypt_data_channel and hasattr(base_class, "prot_p"):
self.prot_p()
return Session | 5,355,249 |
def calculate_molecular_mass(symbols):
"""
Calculate the mass of a molecule.
Parameters
----------
symbols : list
A list of elements.
Returns
-------
mass : float
The mass of the molecule
"""
mass = 0
for i in range(len(symbols)):
mass = mass + atomic_weights[symbols[i]]
return mass | 5,355,250 |
def _color_str(string, color):
"""Simple color formatter for logging formatter"""
# For bold add 1; after "["
start_seq = '\033[{:d}m'.format(COLOR_DICT[color])
return start_seq + string + '\033[0m' | 5,355,251 |
def scrape_data(urls: list):
"""Use Multithreading for scraping."""
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
executor.map(parse_lob_data, urls) | 5,355,252 |
def roi():
"""
calculate return of investment, displays values and relative title
values displayed in descending order with relative title
delete the ROI series after display
"""
df['ROI'] = (df["Gross Earnings"] / df["Budget"] * 100).round(2)
sort_highest_roi = df[['Title', 'ROI']].sort_values(
by='ROI', ascending=False).dropna()
print(f"""{Fore.YELLOW + Style.BRIGHT}
The most profitable films of the decade:
{Style.RESET_ALL}{sort_highest_roi.head(10).to_string(index=False)}\n""")
df.drop(['ROI'], axis=1, inplace=True)
welcome() | 5,355,253 |
def init_db(config, verbose=False):
"""
Initialize db if necessary: create the sole non-admin user
"""
client = pymongo.MongoClient(
host=config["sentinel"]["database"]["host"],
port=config["sentinel"]["database"]["port"],
username=config["sentinel"]["database"]["admin_username"],
password=config["sentinel"]["database"]["admin_password"],
)
# _id: db_name.user_name
user_ids = []
for _u in client.admin.system.users.find({}, {"_id": 1}):
user_ids.append(_u["_id"])
db_name = config["sentinel"]["database"]["db"]
username = config["sentinel"]["database"]["username"]
_mongo = client[db_name]
if f"{db_name}.{username}" not in user_ids:
_mongo.command(
"createUser",
config["sentinel"]["database"]["username"],
pwd=config["sentinel"]["database"]["password"],
roles=["readWrite"],
)
if verbose:
log("Successfully initialized db")
_mongo.client.close() | 5,355,254 |
def parser_tool_main(args):
"""Main function for the **parser** tool.
This method will parse a JSON formatted Facebook conversation,
reports informations and retrieve data from it, depending on the
arguments passed.
Parameters
----------
args : Namespace (dict-like)
Arguments passed by the `ArgumentParser`.
See Also
--------
FBParser: Class used for the **parser** tool.
main : method used for parsing arguments
"""
with args.cookie as f:
user_raw_data = f.read()
print("[+] - Parsing JSON for {} files".format(len(args.infile)))
data_formatted = build_fmt_str_from_enum(args.data)
print("[+] - Parsing JSON to retrieve {}".format(data_formatted))
fb_parser = FBParser(user_raw_data,
infile_json=args.infile, mode=args.mode,
data=args.data, output=args.output,
threads=args.threads)
fb_parser.parse(to_stdout=True, verbose=args.verbose)
print("[+] - JSON parsed succesfully, saving results "
"inside folder '" + str(args.output) + "'")
return 0 | 5,355,255 |
def merge_all_channel_images(all_patient_paths, output_dir, image_resize):
"""
Function used to merge all channel images into one
:param all_patient_paths: list of all paths, one for each patient
:param output_dir: output dir for new concatenated images
:return:
"""
# clean and make output directory
if os.path.isdir(output_dir):
shutil.rmtree(output_dir, ignore_errors=True)
os.makedirs(output_dir)
# need to iterate through all patients to combine image files
for patient in all_patient_paths:
patient_basename = os.path.basename(patient)
channels_search = os.path.join(patient, '*')
# make output dir for patient's new concatenated data
patient_output_dir = os.path.join(output_dir, patient_basename)
os.mkdir(patient_output_dir)
# get image for each channel iterate chronologically i.e 0 ... 45
for i in range(LAST_IMAGE_COUNT):
image_list = []
# get image for each channel
image_name = '{0}.png'.format(i)
images_search = os.path.join(channels_search, image_name)
image_list = glob.glob(images_search)
image_list.sort()
# need to load all images and combine
loaded_images = []
for image in image_list:
img = Image.open(image).convert('RGB').resize((image_resize, image_resize))
loaded_images.append(img)
# combine all loaded images
# image_size = [width, height]
image_size = loaded_images[0].size
# total count of images
images_count = len(loaded_images)
new_image = Image.new('RGB',(image_size[0], images_count * image_size[1]), (250,250,250))
for j in range(len(loaded_images)):
new_image.paste(loaded_images[j],(0,j * image_size[1]))
new_image.paste(loaded_images[1],(0, image_size[1]))
image_output = os.path.join(patient_output_dir, '{0}.jpg'.format(i))
new_image.save(image_output,"JPEG") | 5,355,256 |
def merge(from_args):
"""Merge a sequence of operations into a cross-product tree.
from_args: A dictionary mapping a unique string id to a
raco.algebra.Operation instance.
Returns: a single raco.algebra.Operation instance and an opaque
data structure suitable for passing to the rewrite_refs function.
"""
assert len(from_args) > 0
def cross(x, y):
return algebra.CrossProduct(x, y)
from_ops = from_args.values()
op = reduce(cross, from_ops)
return (op, __calculate_offsets(from_args)) | 5,355,257 |
def get_content_type(file_resource):
"""Gets a file's MIME type.
Favors returning the result of `file -b --mime ...` if the command is
available and users have enabled it. Otherwise, it returns a type based on the
file's extension.
Args:
file_resource (resource_reference.FileObjectResource): The file to return a
type for.
Returns:
A MIME type (str).
If a type cannot be guessed, request_config_factory.DEFAULT_CONTENT_TYPE is
returned.
"""
if file_resource.storage_url.is_pipe:
return request_config_factory.DEFAULT_CONTENT_TYPE
path = file_resource.storage_url.object_name
# Some common extensions are not recognized by the mimetypes library and
# "file" command, so we'll hard-code support for them.
for extension, content_type in COMMON_EXTENSION_RULES.items():
if path.endswith(extension):
return content_type
if (not platforms.OperatingSystem.IsWindows() and
properties.VALUES.storage.use_magicfile.GetBool()):
output = subprocess.run(['file', '-b', '--mime', path],
check=True,
stdout=subprocess.PIPE,
universal_newlines=True)
content_type = output.stdout.strip()
else:
content_type, _ = mimetypes.guess_type(path)
if content_type:
return content_type
return request_config_factory.DEFAULT_CONTENT_TYPE | 5,355,258 |
def graph(task_id):
"""Return the graph.json results"""
return get_file(task_id, "graph.json") | 5,355,259 |
def com_google_fonts_check_varfont_bold_wght_coord(ttFont, bold_wght_coord):
"""The variable font 'wght' (Weight) axis coordinate must be 700 on the 'Bold' instance."""
if bold_wght_coord == 700:
yield PASS, "Bold:wght is 700."
else:
yield FAIL,\
Message("not-700",
f'The "wght" axis coordinate of'
f' the "Bold" instance must be 700.'
f' Got {bold_wght_coord} instead.') | 5,355,260 |
def shoulders(agents, mask):
"""Positions of the center of mass, left- and right shoulders.
Args:
agents (ndarray):
Numpy array of datatype ``dtype=agent_type_three_circle``.
"""
for agent, m in zip(agents, mask):
if not m:
continue
tangent = rotate270(unit_vector(agent['orientation']))
offset = tangent * agent['r_ts']
agent['position_ls'][:] = agent['position'] - offset
agent['position_rs'][:] = agent['position'] + offset | 5,355,261 |
def parse_components_from_aminochange(aminochange):
""" Returns a dictionary containing (if possible) 'ref', 'pos', and 'alt'
characteristics of the supplied aminochange string.
If aminochange does not parse, returns None.
:param aminochange: (str) describing amino acid change
:return: dict or None
"""
match = re_aminochange_comp_long.match(aminochange)
if match:
# reverse long-form amino strings to short-form.
stuff = match.groupdict()
return {'ref': amino_acid_map[stuff['ref']],
'pos': stuff['pos'],
'alt': amino_acid_map[stuff['alt']],
}
else:
match = re_aminochange_comp_short.match(aminochange)
return match.groupdict()
return None | 5,355,262 |
def get_logs_csv():
"""
get target's logs through the API in JSON type
Returns:
an array with JSON events
"""
api_key_is_valid(app, flask_request)
target = get_value(flask_request, "target")
data = logs_to_report_json(target)
keys = data[0].keys()
filename = "report-" + now(
model="%Y_%m_%d_%H_%M_%S"
) + "".join(
random.choice(
string.ascii_lowercase
) for _ in range(10)
)
with open(filename, "w") as report_path_filename:
dict_writer = csv.DictWriter(
report_path_filename,
fieldnames=keys,
quoting=csv.QUOTE_ALL
)
dict_writer.writeheader()
for event in data:
dict_writer.writerow(
{
key: value for key, value in event.items() if key in keys
}
)
with open(filename, 'r') as report_path_filename:
reader = report_path_filename.read()
return Response(
reader, mimetype='text/csv',
headers={
'Content-Disposition': 'attachment;filename=' + filename + '.csv'
}
) | 5,355,263 |
def _cache_key_format(lang_code, request_path, qs_hash=None):
"""
função que retorna o string que será a chave no cache.
formata o string usando os parâmetros da função:
- lang_code: código do idioma: [pt_BR|es|en]
- request_path: o path do request
- qs_hash: o hash gerado a partir dos parametros da querystring (se não for None)
"""
cache_key = "/LANG=%s/PATH=%s" % (lang_code, request_path)
if qs_hash is not None:
cache_key = "%s?QS=%s" % (cache_key, qs_hash)
return cache_key | 5,355,264 |
def select_from(paths: Iterable[Path],
filter_func: Callable[[Any], bool] = default_filter,
transform: Callable[[Path], Any] = None,
order_func: Callable[[Any], Any] = None,
order_asc: bool = True,
fn_base: int = 10,
limit: int = None) -> (List[Any], List[Path]):
"""Filter, order, and truncate the given paths based on the filter and
other parameters.
:param paths: A list of paths to filter, order, and limit.
:param transform: Function to apply to each path before applying filters
or ordering. The filter and order functions should expect the type
returned by this.
:param filter_func: A function that takes a directory, and returns whether
to include that directory. True -> include, False -> exclude
:param order_func: A function that returns a comparable value for sorting,
as per the list.sort keys argument. Items for which this returns
None are removed.
:param order_asc: Whether to sort in ascending or descending order.
:param fn_base: Number base for file names. 10 by default, ensure dir name
is a valid integer.
:param limit: The max items to return. None denotes return all.
:returns: A filtered, ordered list of transformed objects, and the list
of untransformed paths.
"""
if transform is None:
transform = lambda v: v
selected = []
for path in paths:
if not path.is_dir():
continue
try:
int(path.name, fn_base)
except ValueError:
continue
try:
item = transform(path)
except ValueError:
continue
if not filter_func(item):
continue
if order_func is not None and order_func(item) is None:
continue
selected.append((item, path))
if order_func is not None:
selected.sort(key=lambda d: order_func(d[0]), reverse=not order_asc)
return SelectItems(
[item[0] for item in selected][:limit],
[item[1] for item in selected][:limit]) | 5,355,265 |
def prd(o, all = False):
"""
Pretty dump.
@param (object) o
@param (bool) all
@return (None)
"""
name = o.__module__
for attrName in dir(o):
# show only attrs
attrValue = getattr(o, attrName)
if all == False and not hasattr(attrValue, '__call__'):
print("<%s>.%s = %s" % (name, attrName, attrValue))
elif all == True:
print("<%s>.%s = %s" % (name, attrName, attrValue)) | 5,355,266 |
def dbscan(data:torch.Tensor, epsilon:float, **kwargs) -> torch.Tensor:
"""
Generate mask using DBSCAN.
Note, data in the largest cluster have True values.
Parameters
----------
data: torch.Tensor
input data with shape (n_samples, n_features)
epsilon: float
DBSCAN epsilon
**kwargs:
passed to DBSCAN()
Returns
-------
mask (torch.Tensor)
"""
group = DBSCAN(eps=epsilon, **kwargs).fit(data.cpu().numpy())
label = Counter(group.labels_)
label = max(label, key=label.get)
return torch.tensor(group.labels_ == label).to(data.device) | 5,355,267 |
def data_splitter(data_path: str = "../../data/", split_perc: tuple = (0.7, 0.2, 0.1)):
"""
Input:
data_path: string
, default "../../data/"
Path to data folder
train_perc : float, default 0.7
Percentage of the data to be included in the training set.
copy : boolean, default True
If True the images are copied; otherwise they are cut.
Function splits the images into train and validation set and copies (or moves) them
into train_dir and val_dir respectively. It also creates two new cvs files named
'{old_cvs_file_name}_train.csv' and '{old_cvs_file_name}_val.csv',
which are also respectively saved into train_dir and val_dir and contain only the coordinates
relevant to their sets (train csv file contains only the coordinates for the training set and vice versa).
Note: This function DOES NOT unfold the images or the csv files!
"""
img_root_dir = os.path.join(data_path, 'raw/data')
csv_file = os.path.join(data_path, 'raw/data.csv')
interim_dir = os.path.join(data_path, "interim")
for dir in ['', 'train', 'validation', 'test']:
path = os.path.join(interim_dir, dir)
if not os.path.isdir(path):
os.mkdir(os.path.join(interim_dir, path))
train_dir = os.path.join(data_path, 'interim/train')
val_dir = os.path.join(data_path, 'interim/validation')
test_dir = os.path.join(data_path, 'interim/test')
# Splitting the dataframe
google_frame = pd.read_csv(csv_file)
train, val = train_test_split(google_frame, train_size=split_perc[0], random_state=8)
val, test = train_test_split(val, train_size=split_perc[1] / (split_perc[1] + split_perc[2]), random_state=12)
old_file_name = os.path.splitext(os.path.basename(csv_file))
train_file_name = old_file_name[0] + '_train' + old_file_name[1]
val_file_name = old_file_name[0] + '_validation' + old_file_name[1]
test_file_name = old_file_name[0] + '_test' + old_file_name[1]
train.to_csv(os.path.join(train_dir, train_file_name), index=False)
val.to_csv(os.path.join(val_dir, val_file_name), index=False)
test.to_csv(os.path.join(test_dir, test_file_name), index=False)
# Splitting images
folders = os.listdir(img_root_dir)
for folder in folders:
folder_path = os.path.join(img_root_dir, folder)
target_dir = None
if folder in train["uuid"].values:
target_dir = os.path.join(train_dir, folder)
elif folder in val["uuid"].values:
target_dir = os.path.join(val_dir, folder)
elif folder in test["uuid"].values:
target_dir = os.path.join(test_dir, folder)
if target_dir:
shutil.copytree(folder_path, target_dir) | 5,355,268 |
def simulate_multivariate_ts(mu, alpha, beta, num_of_nodes=-1,\
Thorizon = 60, seed=None, output_rejected_data=False):
"""
Inputs:
mu: baseline intesnities M X 1 array
alpha: excitiation rates of multivariate kernel pf HP M X M array
beta: decay rates of kernel of multivariate HP
node: k-th node of multivariate HP
"""
#################
# Initialisation
#################
if num_of_nodes < 0:
num_of_nodes = np.shape(mu)[0]
rng = default_rng(seed) # get instance of random generator
ts = [num_of_nodes * np.array([])] # create M number of empty lise to store ordered set of timestamps of each nodes
t = 0 # initialise current time to be 0
num_of_events = np.zeros(num_of_nodes) # set event counter to be 0 for all nodes
epsilon = 10**(-10) # This was used in many HP code
M_star = copy.copy(mu) # upper bound at current time t = 0
accepted_event_intensity = [num_of_nodes * np.array([])]
rejected_points = [num_of_nodes * np.array([])]; rpy = [num_of_nodes * np.array([])] # containter for rejected time points and their correspodning intensities
M_x = [num_of_nodes * []]; M_y = [num_of_nodes * np.array([])] # M_y stores Maximum or upper bound of current times while M_x stores their x-values
#################
# Begin loop
#################
while(t < Thorizon):
previous_M_star = M_star; previous_t = t
M_star = np.sum(multiv_cif(t+epsilon, ts, mu, alpha, beta)) # compute upper bound of intensity using conditional intensity function
u = rng.uniform(0,1) # draw a uniform random number between interval (0,1)
tau = -np.log(u)/M_star # sample inter-arrival time
t = t + tau # update current time by adding tau to current time (hence t is the candidate point)
M_x += [previous_t,t]
M_y += [previous_M_star]
s = rng.uniform(0,1) # draw another standard uniform random number
M_t = np.sum(multiv_cif(t, ts, mu, alpha, beta)) # compute intensity function at current time t
if t <= Thorizon:
##########################
## Rejection Sampling test where probability of acceptance: M_t/M_star
if s <= M_t/M_star:
k = 0 # initialise k to be the first node '0'
# Search for node k such that the 'while condition' below is satisfied
while s*M_star <= np.sum(multiv_cif(t, ts, mu, alpha, beta)[0:k+1]):
k += 1
num_of_events[k] += 1 # update number of points in node k
ts[k] = np.append(ts[k], float(t)) # accept candidate point t in node k
accepted_event_intensity.append(M_t)
else:
rejected_points += [t]
rpy += [M_t]
else:
break
if output_rejected_data:
return ts, num_of_events, accepted_event_intensity, rejected_points, rpy
else:
return ts, num_of_events | 5,355,269 |
def token_urlsafe(nbytes):
"""Return a random URL-safe text string, in Base64 encoding.
The string has *nbytes* random bytes. If *nbytes* is ``None``
or not supplied, a reasonable default is used.
>>> token_urlsafe(16) #doctest:+SKIP
'Drmhze6EPcv0fN_81Bj-nA'
"""
tok = token_bytes(nbytes)
return base64.urlsafe_b64encode(tok).rstrip(b'=').decode('ascii') | 5,355,270 |
def sha206a_get_pk_useflag_count(pk_avail_count):
"""
calculates available Parent Key use counts
Args:
pk_avail_count counts available bit's as 1 (int)
Returns:
Status Code
"""
if not isinstance(pk_avail_count, AtcaReference):
status = Status.ATCA_BAD_PARAM
else:
c_pk_avail_count = c_uint8(pk_avail_count.value)
status = get_cryptoauthlib().sha206a_get_pk_useflag_count(byref(c_pk_avail_count))
pk_avail_count.value = c_pk_avail_count.value
return status | 5,355,271 |
def test_board_group_update():
"""Test that we can create a board group of testing boards."""
board_group = BoardGroup(MockBoard, NoBoardMockBackend())
board_group.update_boards() | 5,355,272 |
def test_tile_valid_default():
"""Should return a 3 bands array and a full valid mask."""
tile_z = 21
tile_x = 438217
tile_y = 801835
data, mask = main.tile(ADDRESS, tile_x, tile_y, tile_z)
assert data.shape == (3, 256, 256)
assert mask.all() | 5,355,273 |
def rotate_points_around_origin(
x: tf.Tensor,
y: tf.Tensor,
angle: tf.Tensor,
) -> Tuple[tf.Tensor, tf.Tensor]:
"""Rotates points around the origin.
Args:
x: Tensor of shape [batch_size, ...].
y: Tensor of shape [batch_size, ...].
angle: Tensor of shape [batch_size, ...].
Returns:
Rotated x, y, each with shape [batch_size, ...].
"""
tx = tf.cos(angle) * x - tf.sin(angle) * y
ty = tf.sin(angle) * x + tf.cos(angle) * y
return tx, ty | 5,355,274 |
def get_text_blocks(path):
"""
Used to extract text from images
"""
for df in get_ocr_data(path):
groups = df.groupby(by="block_num").groups
# print(groups.groups)
keys = sorted(list(groups.keys()))
text_blocks = []
for k in keys:
word_idxs = groups[k]
tmp = df.iloc[word_idxs]
w = tmp.to_dict("records")[0]
x1 = min(tmp["left"])
y1 = min(tmp["top"])
x2 = max(tmp["left"] + tmp["width"])
y2 = max(tmp["top"] + tmp["height"])
coords = pil_2_rel([x1, y1, x2, y2], w["page_width"], w["page_height"])
text = " ".join(tmp["text"])
text_blocks.append(
{
"value": text,
"page": w["page"],
"rel_coords": coords,
"pil_coords": [x1, y1, x2, y2],
}
)
yield text_blocks | 5,355,275 |
async def test_conformance_008_autocorrect(caplog):
"""
oadrDistributeEvent eventSignal interval durations for a given event MUST
add up to eiEvent eiActivePeriod duration.
"""
event_id = generate_id()
event = {'event_descriptor':
{'event_id': event_id,
'modification_number': 0,
'modification_date': datetime.now(),
'priority': 0,
'market_context': 'MarketContext001',
'created_date_time': datetime.now(),
'event_status': enums.EVENT_STATUS.FAR,
'test_event': False,
'vtn_comment': 'No Comment'},
'active_period':
{'dtstart': datetime.now(),
'duration': timedelta(minutes=5)},
'event_signals':
[{'intervals': [{'duration': timedelta(minutes=10),
'signal_payload': 1},
{'duration': timedelta(minutes=10),
'signal_payload': 2},
{'duration': timedelta(minutes=10),
'signal_payload': 3}],
'signal_name': enums.SIGNAL_NAME.SIMPLE,
'signal_type': enums.SIGNAL_TYPE.DELTA,
'signal_id': generate_id()
}],
'targets': [{'ven_id': '123'}]
}
# Create a message with this event
msg = create_message('oadrDistributeEvent',
response={'response_code': 200,
'response_description': 'OK',
'request_id': generate_id()},
request_id=generate_id(),
vtn_id=generate_id(),
events=[event])
assert caplog.record_tuples == [("openleadr", logging.WARNING, f"The active_period duration for event {event_id} (0:05:00) differs from the sum of the interval's durations (0:30:00). The active_period duration has been adjusted to (0:30:00).")]
parsed_type, parsed_msg = parse_message(msg)
assert parsed_type == 'oadrDistributeEvent'
total_time = sum([i['duration'] for i in parsed_msg['events'][0]['event_signals'][0]['intervals']],
timedelta(seconds=0))
assert parsed_msg['events'][0]['active_period']['duration'] == total_time | 5,355,276 |
def form_of(state):
"""Return the form of the given state."""
if hasattr(state, "__form__"):
if callable(state.__form__) and not inspect.isclass(state.__form__):
return state.__form__()
else:
return state.__form__
else:
raise ValueError(f"{state} has no form") | 5,355,277 |
def poly_to_geopandas(polys, columns):
"""
Converts a GeoViews Paths or Polygons type to a geopandas dataframe.
Parameters
----------
polys : gv.Path or gv.Polygons
GeoViews element
columns: list(str)
List of columns
Returns
-------
gdf : Geopandas dataframe
"""
rows = []
for g in polys.geom():
rows.append(dict({c: '' for c in columns}, geometry=g))
return gpd.GeoDataFrame(rows, columns=columns+['geometry']) | 5,355,278 |
def test_set_style():
"""
Tests that setting the matplotlib style works.
"""
set_style() | 5,355,279 |
def create_parent_dirs(path: str):
"""
:param path: the file path to try to create the parent directories for
"""
parent = os.path.dirname(path)
create_dirs(parent) | 5,355,280 |
def test_rdb_aggregation_context():
"""
Check that the aggregation context of the rules is saved in rdb. Write data with not a full bucket,
then save it and restore, add more data to the bucket and check the rules results considered the previous data
that was in that bucket in their calculation. Check on avg and min, since all the other rules use the same
context as min.
"""
start_ts = 3
samples_count = 4 # 1 full bucket and another one with 1 value
with Env().getConnection() as r:
assert r.execute_command('TS.CREATE', 'tester')
assert r.execute_command('TS.CREATE', 'tester_agg_avg_3')
assert r.execute_command('TS.CREATE', 'tester_agg_min_3')
assert r.execute_command('TS.CREATE', 'tester_agg_sum_3')
assert r.execute_command('TS.CREATE', 'tester_agg_std_3')
assert r.execute_command('TS.CREATERULE', 'tester', 'tester_agg_avg_3', 'AGGREGATION', 'AVG', 3)
assert r.execute_command('TS.CREATERULE', 'tester', 'tester_agg_min_3', 'AGGREGATION', 'MIN', 3)
assert r.execute_command('TS.CREATERULE', 'tester', 'tester_agg_sum_3', 'AGGREGATION', 'SUM', 3)
assert r.execute_command('TS.CREATERULE', 'tester', 'tester_agg_std_3', 'AGGREGATION', 'STD.S', 3)
_insert_data(r, 'tester', start_ts, samples_count, list(range(samples_count)))
data_tester = r.execute_command('dump', 'tester')
data_avg_tester = r.execute_command('dump', 'tester_agg_avg_3')
data_min_tester = r.execute_command('dump', 'tester_agg_min_3')
data_sum_tester = r.execute_command('dump', 'tester_agg_sum_3')
data_std_tester = r.execute_command('dump', 'tester_agg_std_3')
r.execute_command('DEL', 'tester', 'tester_agg_avg_3', 'tester_agg_min_3', 'tester_agg_sum_3',
'tester_agg_std_3')
r.execute_command('RESTORE', 'tester', 0, data_tester)
r.execute_command('RESTORE', 'tester_agg_avg_3', 0, data_avg_tester)
r.execute_command('RESTORE', 'tester_agg_min_3', 0, data_min_tester)
r.execute_command('RESTORE', 'tester_agg_sum_3', 0, data_sum_tester)
r.execute_command('RESTORE', 'tester_agg_std_3', 0, data_std_tester)
assert r.execute_command('TS.ADD', 'tester', start_ts + samples_count, samples_count)
assert r.execute_command('TS.ADD', 'tester', start_ts + samples_count + 10, 0) # closes the last time_bucket
# if the aggregation context wasn't saved, the results were considering only the new value added
expected_result_avg = [[start_ts, b'1'], [start_ts + 3, b'3.5']]
expected_result_min = [[start_ts, b'0'], [start_ts + 3, b'3']]
expected_result_sum = [[start_ts, b'3'], [start_ts + 3, b'7']]
expected_result_std = [[start_ts, b'1'], [start_ts + 3, b'0.7071']]
actual_result_avg = r.execute_command('TS.range', 'tester_agg_avg_3', start_ts, start_ts + samples_count)
assert actual_result_avg == expected_result_avg
actual_result_min = r.execute_command('TS.range', 'tester_agg_min_3', start_ts, start_ts + samples_count)
assert actual_result_min == expected_result_min
actual_result_sum = r.execute_command('TS.range', 'tester_agg_sum_3', start_ts, start_ts + samples_count)
assert actual_result_sum == expected_result_sum
actual_result_std = r.execute_command('TS.range', 'tester_agg_std_3', start_ts, start_ts + samples_count)
assert actual_result_std[0] == expected_result_std[0]
assert abs(float(actual_result_std[1][1]) - float(expected_result_std[1][1])) < ALLOWED_ERROR | 5,355,281 |
def get_sequences(query_file=None, query_ids=None):
"""Convenience function to get dictionary of query sequences from file or IDs.
Parameters:
query_file (str): Path to FASTA file containing query protein sequences.
query_ids (list): NCBI sequence accessions.
Raises:
ValueError: Did not receive values for query_file or query_ids.
Returns:
sequences (dict): Dictionary of query sequences keyed on accession.
"""
if query_file and not query_ids:
with open(query_file) as query:
sequences = parse_fasta(query)
elif query_ids:
sequences = efetch_sequences(query_ids)
else:
raise ValueError("Expected 'query_file' or 'query_ids'")
return sequences | 5,355,282 |
def _get_tickets(manifest, container_dir):
"""Get tickets."""
principals = set(manifest.get('tickets', []))
if not principals:
return False
tkts_spool_dir = os.path.join(
container_dir, 'root', 'var', 'spool', 'tickets')
try:
tickets.request_tickets(
context.GLOBAL.zk.conn,
manifest['name'],
tkts_spool_dir,
principals
)
except Exception:
_LOGGER.exception('Exception processing tickets.')
raise exc.ContainerSetupError('Get tickets error',
app_abort.AbortedReason.TICKETS)
# Check that all requested tickets are valid.
for princ in principals:
krbcc_file = os.path.join(tkts_spool_dir, princ)
if not tickets.krbcc_ok(krbcc_file):
_LOGGER.error('Missing or expired tickets: %s, %s',
princ, krbcc_file)
raise exc.ContainerSetupError(princ,
app_abort.AbortedReason.TICKETS)
else:
_LOGGER.info('Ticket ok: %s, %s', princ, krbcc_file)
return True | 5,355,283 |
def make_plots_stratified_by_category_type(results,
category_type,
emotion_pairs=None,
cycle_types_to_plot = ['near_period',
'middle_of_night',
'weekend',
'summer',
'winter'],
data_to_use='binary_analysis_no_individual_mean',
top_margin=.8,
axis_fontsize=11):
"""
Checked.
category_type is what category to substratify by.
analysis = binary_analysis_no_individual_mean
plots results broken down by category.
"""
assert data_to_use in ['binary_analysis_no_individual_mean', 'binary_analysis']
print 'USING DATA TYPE %s' % data_to_use
if emotion_pairs is None:
emotion_pairs = results.keys()
for emotion_pair in emotion_pairs:
good_symptom, bad_symptom = emotion_pair.split('_versus_')
results_by_cat = results[emotion_pair][category_type]
if category_type == 'no_substratification':
plt.figure(figsize = [5, 2])
elif category_type == 'by_largest_timezones': # need extra room because so many categories.
plt.figure(figsize = [15, 5])
else:
plt.figure(figsize = [len(cycle_types_to_plot) * 5, 2 + .5 * len(results_by_cat.keys())])
if category_type != 'no_substratification':
# create one subplot for each cycle type.
subplot_idx = 1
for cycle_type in cycle_types_to_plot:
plt.subplot(1, len(cycle_types_to_plot), subplot_idx)
# we want to plot the differences by subcategory for each cycle.
diffs = []
cat_levels = []
for level in order_subcategories(results_by_cat.keys(), category_type):
cat_levels.append(level)
if cycle_type in ['summer', 'winter'] and HEART_SUBSTRING in emotion_pair:
# no reliable data
diffs.append(0)
else:
diffs.append(results_by_cat[level][data_to_use]['%s_mean' % cycle_type] -
results_by_cat[level][data_to_use]['not_%s_mean' % cycle_type])
if subplot_idx == 1:
# make sure category levels are in same order across subplots, if not something is very weird
original_cat_levels = cat_levels
else:
assert cat_levels == original_cat_levels
assert sum(np.isnan(diffs)) == 0
assert len(diffs) == len(results_by_cat.keys())
plt.barh(range(len(diffs)),
diffs,
color = ['blue' if x < 0 else 'red' for x in diffs])
if subplot_idx == 1:
plt.yticks(range(len(diffs)),
[str(a).replace('_', ' ').replace('America/', '').replace('Europe/', '') for a in cat_levels],
fontsize=axis_fontsize)
plt.ylabel(category_type.replace('by_', '').replace('_', ' '), fontsize=axis_fontsize)
else:
plt.yticks([])
if HEART_SUBSTRING in emotion_pair:
plt.xticks([-3, 3],
['-3\nBPM', '+3\nBPM'],
fontweight = 'bold',
fontsize=axis_fontsize)
plt.xlim([-3, 3])
elif BBT_SUBSTRING in emotion_pair:
plt.xticks([-.5, .5],
['-0.5\ndeg F', '+0.5\ndeg F'],
fontweight = 'bold',
fontsize=axis_fontsize)
plt.xlim([-.2, .2])
elif WEIGHT_SUBSTRING in emotion_pair:
plt.xticks([-.5, .5],
['-.5 LBS', '+.5 LBS'],
fontweight = 'bold',
fontsize=axis_fontsize)
else:
plt.xticks([-.1, -.05, 0, .05, .1],
['10%', '5%', '0%', '5%', '10%'],
fontweight = 'bold',
fontsize=axis_fontsize)
plt.xlim([-.1, .1])
plt.title('%s effect' % cycle_type.replace('_' , ' '), fontweight='bold', fontsize=16)
pretty_bad_symptom_name = bad_symptom.split('*')[1].replace('_', ' ').replace('emotion', '').replace('6 hours or less', '<6 hrs')
pretty_good_symptom_name = good_symptom.split('*')[1].replace('_', ' ').replace('emotion', '').replace('6 hours or more', '>6 hrs')
# put bad symptom first because it's on the left in the plot.
plt.suptitle('<-%s vs. %s->' % (pretty_bad_symptom_name,
pretty_good_symptom_name),
fontweight='bold',
fontsize=16)
subplot_idx += 1
else:
# if we're not substratifying,
# we just want to make a simple plot with one bar for each type of cycle.
diffs_by_cycle_type = []
for cycle_type in cycle_types_to_plot:
diffs_by_cycle_type.append(results_by_cat[data_to_use]['%s_mean' % cycle_type] -
results_by_cat[data_to_use]['not_%s_mean' % cycle_type])
assert sum(np.isnan(diffs_by_cycle_type)) == 0
barwidth = .8
plt.barh(range(len(diffs_by_cycle_type)),
diffs_by_cycle_type,
color = ['blue' if x < 0 else 'red' for x in diffs_by_cycle_type], height = barwidth)
plt.yticks(range(len(diffs_by_cycle_type)),
[str(a).replace('_', ' ') for a in cycle_types_to_plot],
fontweight = 'bold')
plt.xlim([-.18, .18]) # we put the positive negative emotion labels as xticks,
plt.xticks([-.18, -.1, 0, .1, .18],
[bad_symptom.split('*')[1].replace('_', ' '),
'10%', '0%', '10%',
good_symptom.split('*')[1].replace('_', ' ')], fontweight = 'bold')
ylimits = [-.2 - barwidth / 2, 4 + barwidth / 2 + .2]
plt.plot([0, 0], ylimits, color = 'black')
plt.ylim(ylimits)
plt.subplots_adjust(left = .2)
plt.title(data_to_use)
plt.subplots_adjust(wspace = .15, hspace = .4, top = top_margin)
plt.show() | 5,355,284 |
def test_fb_forward_multichannel(fb_class, fb_config, ndim):
""" Test encoder/decoder in multichannel setting"""
# Definition
enc = Encoder(fb_class(**fb_config))
dec = Decoder(fb_class(**fb_config))
# 3D Forward with several channels
tensor_shape = tuple([random.randint(2, 4) for _ in range(ndim)]) + (4000,)
inp = torch.randn(tensor_shape)
tf_out = enc(inp)
assert tf_out.shape[: ndim + 1] == (tensor_shape[:-1] + (enc.filterbank.n_feats_out,))
out = dec(tf_out)
assert out.shape[:-1] == inp.shape[:-1] | 5,355,285 |
def random_show_date(database_connection: mysql.connector.connect) -> str:
"""Return a random show date from the ww_shows table"""
database_connection.reconnect()
cursor = database_connection.cursor(dictionary=True)
query = ("SELECT s.showdate FROM ww_shows s "
"WHERE s.showdate <= NOW() "
"ORDER BY RAND() "
"LIMIT 1;")
cursor.execute(query)
result = cursor.fetchone()
cursor.close()
if not result:
return None
return result["showdate"].isoformat() | 5,355,286 |
def get_output_tensor(interpreter, index):
"""Returns the output tensor at the given index."""
output_details = interpreter.get_output_details()[index]
tensor = np.squeeze(interpreter.get_tensor(output_details["index"]))
return tensor | 5,355,287 |
def make_d_mappings(n_dir, chain_opts):
"""Generate direction to solution interval mapping."""
# Get direction dependence for all terms.
dd_terms = [dd for _, dd in yield_from(chain_opts, "direction_dependent")]
# Generate a mapping between model directions gain directions.
d_map_arr = (np.arange(n_dir, dtype=np.int32)[:, None] * dd_terms).T
return d_map_arr | 5,355,288 |
def main():
"""Loop to test the postgres generation with REPL"""
envs = cast(Dict[str, str], os.environ)
if "HAYSTACK_DB" not in envs:
envs["HAYSTACK_DB"] = "sqlite3:///:memory:"
provider = get_provider("shaystack.providers.sql", envs)
conn = cast(SQLProvider, provider).get_connect()
scheme = urlparse(envs["HAYSTACK_DB"]).scheme
# noinspection PyMethodMayBeStatic
class HaystackRequest(cmd.Cmd):
""" Haystack REPL interface """
__slots__ = ("conn",)
# noinspection PyShadowingNames
def __init__(self, conn):
super().__init__()
self.conn = conn
def do_python(self, arg: str) -> None: # pylint: disable=no-self-use
# noinspection PyBroadException
try:
_, python_code = _filter_to_python(arg)
print(python_code)
print()
except Exception: # pylint: disable=broad-except
traceback.print_exc()
def do_pg(self, arg: str) -> None:
# noinspection PyBroadException
try:
sql_request = pg_sql_filter("haystack", arg, FAKE_NOW, 1, "customer")
print(sql_request)
print()
if scheme.startswith("postgres"):
cursor = self.conn.cursor()
cursor.execute(sql_request)
cursor.close()
except Exception: # pylint: disable=broad-except
traceback.print_exc()
finally:
conn.rollback()
def do_mysql(self, arg: str) -> None:
# noinspection PyBroadException
try:
sql_request = mysql_sql_filter("haystack", arg, FAKE_NOW, 1, "customer")
print(sql_request)
print()
if scheme.startswith("mysql"):
cursor = self.conn.cursor()
cursor.execute(sql_request)
cursor.close()
except Exception: # pylint: disable=broad-except
traceback.print_exc()
finally:
conn.rollback()
def do_sqlite(self, arg: str) -> None:
# noinspection PyBroadException
try:
sql_request = sqlite_sql_filter("haystack", arg, FAKE_NOW, 1, "customer")
print(sql_request)
print()
if scheme.startswith("sqlite"):
cursor = self.conn.cursor()
cursor.execute(sql_request)
cursor.close()
except Exception: # pylint: disable=broad-except
traceback.print_exc()
finally:
conn.rollback()
def do_mongo(self, arg: str) -> None: # pylint: disable=no-self-use
# noinspection PyBroadException
try:
mongo_request = _mongo_filter(arg, FAKE_NOW, 1, "customer")
pprint.PrettyPrinter(indent=4).pprint(mongo_request)
print()
except Exception: # pylint: disable=broad-except
traceback.print_exc()
finally:
conn.rollback()
def do_bye(self, _: str) -> bool: # pylint: disable=unused-argument,no-self-use
return True
try:
HaystackRequest(conn).cmdloop()
except KeyboardInterrupt:
return 0
return 0 | 5,355,289 |
def _calculateVolumeByBoolean(vtkDataSet1,vtkDataSet2,iV):
"""
Function to calculate the volumes of a cell intersecting a mesh.
Uses a boolean polydata filter to calculate the intersection,
a general implementation but slow.
"""
# Triangulate polygon and calc normals
baseC = vtkTools.dataset.getCell2vtp(vtkDataSet2,iV)
baseVol = vtkTools.polydata.calculateVolume(baseC)
# print iV, baseVol
# Extract cells from the first mesh that intersect the base cell
extractCells = vtkTools.extraction.extractDataSetWithPolygon(vtkDataSet1,baseC,extInside=True,extBoundaryCells=True,extractBounds=True)
extInd = npsup.vtk_to_numpy(extractCells.GetCellData().GetArray('id'))
# print extInd
# Assert if there are no cells cutv
assert extractCells.GetNumberOfCells() > 0, 'No cells in the clip, cell id {:d}'.format(iV)
# Calculate the volumes of the clipped cells and insert to the matrix
volL = []
for nrCC,iR in enumerate(extInd):
tempCell = vtkTools.dataset.thresholdCellId2vtp(extractCells,iR)
# Find the intersection of the 2 cells
boolFilt = vtk.vtkBooleanOperationPolyDataFilter()
boolFilt.SetInputData(0,tempCell)
boolFilt.SetInputData(1,baseC)
boolFilt.SetOperationToIntersection()
# If they intersect, calculate the volumes
if boolFilt.GetOutput().GetNumberOfPoints() > 0:
cleanInt = vtkTools.polydata.cleanPolyData(boolFilt.GetOutputPort())
del3dFilt = vtk.vtkDelaunay3D()
del3dFilt.SetInputData(cleanInt)
del3dFilt.Update()
# Get the output
intC = vtkTools.extraction.vtu2vtp(del3dFilt.GetOutput())
intVol = vtkTools.polydata.calculateVolume(tempCell)
# Calculate the volume
volVal = intVol/baseVol
# print iR, intVol, volVal
# Insert the value
if volVal > 0.0:
volL.append(volVal)
return extInd,np.array(volL) | 5,355,290 |
def WebChecks(input_api, output_api):
"""Run checks on the web/ directory."""
if input_api.is_committing:
error_type = output_api.PresubmitError
else:
error_type = output_api.PresubmitPromptWarning
output = []
output += input_api.RunTests([input_api.Command(
name='web presubmit',
cmd=[
input_api.python_executable,
input_api.os_path.join('web', 'web.py'),
'presubmit',
],
kwargs={},
message=error_type,
)])
return output | 5,355,291 |
def numpy2stl(A, fn, scale=0.1, mask_val=None, ascii=False,
max_width=235.,
max_depth=140.,
max_height=150.,
solid=False,
rotate=True,
min_thickness_percent=0.1,
force_python=False):
"""
Reads a numpy array, and outputs an STL file
Inputs:
A (ndarray) - an 'm' by 'n' 2D numpy array
fn (string) - filename to use for STL file
Optional input:
scale (float) - scales the height (surface) of the
resulting STL mesh. Tune to match needs
mask_val (float) - any element of the inputted array that is less
than this value will not be included in the mesh.
default renders all vertices (x > -inf for all float x)
ascii (bool) - sets the STL format to ascii or binary (default)
max_width, max_depth, max_height (floats) - maximum size of the stl
object (in mm). Match this to
the dimensions of a 3D printer
platform
solid (bool): sets whether to create a solid geometry (with sides and
a bottom) or not.
min_thickness_percent (float) : when creating the solid bottom face, this
multiplier sets the minimum thickness in
the final geometry (shallowest interior
point to bottom face), as a percentage of
the thickness of the model computed up to
that point.
Returns: (None)
"""
m, n = A.shape
if n >= m and rotate:
# rotate to best fit a printing platform
A = np.rot90(A, k=3)
m, n = n, m
A = scale * (A - A.min())
if not mask_val:
mask_val = A.min() - 1.
if c_lib and not force_python: # try to use c library
# needed for memoryviews
A = np.ascontiguousarray(A, dtype=float)
facets = np.asarray(tessellate(A, mask_val, min_thickness_percent,
solid))
# center on platform
facets[:, 3::3] += -m / 2
facets[:, 4::3] += -n / 2
else: # use python + numpy
facets = []
mask = np.zeros((m, n))
print("Creating top mesh...")
for i, k in product(range(m - 1), range(n - 1)):
this_pt = np.array([i - m / 2., k - n / 2., A[i, k]])
top_right = np.array([i - m / 2., k + 1 - n / 2., A[i, k + 1]])
bottom_left = np.array([i + 1. - m / 2., k - n / 2., A[i + 1, k]])
bottom_right = np.array(
[i + 1. - m / 2., k + 1 - n / 2., A[i + 1, k + 1]])
n1, n2 = np.zeros(3), np.zeros(3)
if (this_pt[-1] > mask_val and top_right[-1] > mask_val and
bottom_left[-1] > mask_val):
facet = np.concatenate([n1, top_right, this_pt, bottom_right])
mask[i, k] = 1
mask[i, k + 1] = 1
mask[i + 1, k] = 1
facets.append(facet)
if (this_pt[-1] > mask_val and bottom_right[-1] > mask_val and
bottom_left[-1] > mask_val):
facet = np.concatenate(
[n2, bottom_right, this_pt, bottom_left])
facets.append(facet)
mask[i, k] = 1
mask[i + 1, k + 1] = 1
mask[i + 1, k] = 1
facets = np.array(facets)
if solid:
print("Computed edges...")
edge_mask = np.sum([roll2d(mask, (i, k))
for i, k in product([-1, 0, 1], repeat=2)],
axis=0)
edge_mask[np.where(edge_mask == 9.)] = 0.
edge_mask[np.where(edge_mask != 0.)] = 1.
edge_mask[0::m - 1, :] = 1.
edge_mask[:, 0::n - 1] = 1.
X, Y = np.where(edge_mask == 1.)
locs = zip(X - m / 2., Y - n / 2.)
zvals = facets[:, 5::3]
zmin, zthickness = zvals.min(), zvals.ptp()
minval = zmin - min_thickness_percent * zthickness
bottom = []
print("Extending edges, creating bottom...")
for i, facet in enumerate(facets):
if (facet[3], facet[4]) in locs:
facets[i][5] = minval
if (facet[6], facet[7]) in locs:
facets[i][8] = minval
if (facet[9], facet[10]) in locs:
facets[i][11] = minval
this_bottom = np.concatenate(
[facet[:3], facet[6:8], [minval], facet[3:5], [minval],
facet[9:11], [minval]])
bottom.append(this_bottom)
facets = np.concatenate([facets, bottom])
xsize = facets[:, 3::3].ptp()
if xsize > max_width:
facets = facets * float(max_width) / xsize
ysize = facets[:, 4::3].ptp()
if ysize > max_depth:
facets = facets * float(max_depth) / ysize
zsize = facets[:, 5::3].ptp()
if zsize > max_height:
facets = facets * float(max_height) / zsize
writeSTL(facets, fn, ascii=ascii) | 5,355,292 |
def synonyms(species: str) -> str:
"""
Check to see if there are other names that we should be using for
a particular input. E.g. If CFC-11 or CFC11 was input, go on to use cfc-11,
as this is used in species_info.json
Args:
species (str): Input string that you're trying to match
Returns:
str: Matched species string
"""
from HUGS.Util import load_hugs_json
# Load in the species data
species_data = load_hugs_json(filename="acrg_species_info.json")
# First test whether site matches keys (case insensitive)
matched_strings = [k for k in species_data if k.upper() == species.upper()]
# Used to access the alternative names in species_data
alt_label = "alt"
# If not found, search synonyms
if not matched_strings:
for key in species_data:
# Iterate over the alternative labels and check for a match
matched_strings = [s for s in species_data[key][alt_label] if s.upper() == species.upper()]
if matched_strings:
matched_strings = [key]
break
if matched_strings:
updated_species = matched_strings[0]
return updated_species
else:
raise ValueError(f"Unable to find synonym for species {species}") | 5,355,293 |
def test_parcel_profile_reference_equals_first_height():
"""Test Parcel.profile when reference height equals first final height."""
height = [3000, 2000, 1000]*units.meter
z_init = 3000*units.meter
t_initial = -2*units.celsius
q_initial = 1e-4*units.dimensionless
l_initial = 0*units.dimensionless
rate = 0.5/units.km
truth_t = [-2., 7.59522877, 14.32321658]*units.celsius
truth_q = [0.0001, 0.00045977, 0.00213945]*units.dimensionless
truth_l = [0., 0., 0.]*units.dimensionless
actual_t, actual_q, actual_l = sydney.profile(
height, t_initial, q_initial, l_initial, rate, reference_height=z_init)
assert_almost_equal(actual_t, truth_t, 3)
assert_almost_equal(actual_q, truth_q, 6)
assert_array_equal(actual_l, truth_l) | 5,355,294 |
def test_time():
""" Tests if te algorithm is cabla to finish the solution for N=12 in less than 10 minutes
"""
for N in range(8,20):
_ ,time = solveN(N)
if(time>600):
print("Test don't passed at N={N} should be less than 10 min Taken:{time}")
break
print(f"Time Test passed for N = {N} {time:.2f}s taken") | 5,355,295 |
def _blkid_output(out):
"""
Parse blkid output.
"""
flt = lambda data: [el for el in data if el.strip()]
data = {}
for dev_meta in flt(out.split("\n\n")):
dev = {}
for items in flt(dev_meta.strip().split("\n")):
key, val = items.split("=", 1)
dev[key.lower()] = val
if dev.pop("type", None) == "xfs":
dev["label"] = dev.get("label")
data[dev.pop("devname")] = dev
mounts = _get_mounts()
for device in mounts:
if data.get(device):
data[device].update(mounts[device])
return data | 5,355,296 |
def test_generate_file_with_subclass(input_mock, getpass_mock):
"""Test inheritance."""
class CustomCommand(generate_settings.Command):
"""Custom test class to specify settings."""
settings_template_file = TEMPLATE_FILE_PATH
settings_file_path = CREATED_FILE_PATH
force_secret_key = True
input_mock.side_effect = ["user"]
getpass_mock.return_value = "pass"
init_and_launch_command([], CustomCommand)
assert os.path.exists(CREATED_FILE_PATH)
os.remove(CREATED_FILE_PATH) | 5,355,297 |
def as_iso_datetime(qdatetime):
""" Convert a QDateTime object into an iso datetime string.
"""
return qdatetime.toString(Qt.ISODate) | 5,355,298 |
def spec_means_and_magnitudes(action_spec):
"""Get the center and magnitude of the ranges in action spec."""
action_means = tf.nest.map_structure(
lambda spec: (spec.maximum + spec.minimum) / 2.0, action_spec)
action_magnitudes = tf.nest.map_structure(
lambda spec: (spec.maximum - spec.minimum) / 2.0, action_spec)
return tf.cast(
action_means, dtype=tf.float32), tf.cast(
action_magnitudes, dtype=tf.float32) | 5,355,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.