content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def profile(request):
"""
Update a User profile using built in Django Users Model if the user is logged in
otherwise redirect them to registration version
"""
if request.user.is_authenticated():
obj = get_object_or_404(TolaUser, user=request.user)
form = RegistrationForm(request.POST or None, instance=obj,initial={'username': request.user})
if request.method == 'POST':
if form.is_valid():
form.save()
messages.error(request, 'Your profile has been updated.', fail_silently=False)
return render(request, "registration/profile.html", {
'form': form, 'helper': RegistrationForm.helper
})
else:
return HttpResponseRedirect("/accounts/register") | 225ac41ec6565e30f54ece3cad76b2a0770a319d | 3,653,006 |
def conj(Q):
"""Returns the conjugate of a dual quaternion.
"""
res = cs.SX.zeros(8)
res[0] = -Q[0]
res[1] = -Q[1]
res[2] = -Q[2]
res[3] = Q[3]
res[4] = -Q[4]
res[5] = -Q[5]
res[6] = -Q[6]
res[7] = Q[7]
return res | e0a6d67d322f2c939e2d8249983789222c96363d | 3,653,007 |
def benchmark_summary(benchmark_snapshot_df):
"""Creates summary table for a benchmark snapshot with columns:
|fuzzer|time||count|mean|std|min|25%|median|75%|max|
"""
groups = benchmark_snapshot_df.groupby(['fuzzer', 'time'])
summary = groups['edges_covered'].describe()
summary.rename(columns={'50%': 'median'}, inplace=True)
return summary.sort_values(('median'), ascending=False) | 5cdaa888adb47906659a249076c8a4acb27c6d1d | 3,653,008 |
def is_pio_job_running(*target_jobs: str) -> bool:
"""
pass in jobs to check if they are running
ex:
> result = is_pio_job_running("od_reading")
> result = is_pio_job_running("od_reading", "stirring")
"""
with local_intermittent_storage("pio_jobs_running") as cache:
for job in target_jobs:
if cache.get(job, b"0") == b"1":
return True
return False | 0ed9daf39372ead913ad52d5c93426eeb06f74ed | 3,653,009 |
def encode(text, encoding='utf-8'):
"""
Returns a unicode representation of the string
"""
if isinstance(text, basestring):
if not isinstance(text, unicode):
text = unicode(text, encoding, 'ignore')
return text | 81d9d2d5cf920c0f15ffc5e50fb670b079ae1f90 | 3,653,010 |
def calculate_sparsity(df: pd.DataFrame) -> tuple:
"""Calculate the data sparsity based on ratings and reviews.
Args:
df ([pd.DataFrame]): DataFrame with counts of `overall` and `reviewText`
measured against total `reviewerID` * `asin`.
Returns:
[tuple]: Tuple of data sparsity wrt. ratings (`overall`) and reviews (`reviewText`).
"""
# no. of ratings
rating_numerator = df["overall"].count()
review_numerator = df["reviewText"].count()
# number of users and items
num_users = df["reviewerID"].nunique()
num_items = df["asin"].nunique()
denominator = num_users * num_items
rating_sparsity = (1.0 - (rating_numerator * 1.0) / denominator) * 100
review_sparsity = (1.0 - (review_numerator * 1.0) / denominator) * 100
return rating_sparsity, review_sparsity | 53e6b2682b67ceb8bbb4f5a6857cdbd565321421 | 3,653,011 |
def char_fun_est(
train_data,
paras=[3, 20], n_trees = 200, uv = 0, J = 1, include_reward = 0, fixed_state_comp = None):
"""
For each cross-fitting-task, use QRF to do prediction
paras == "CV_once": use CV_once to fit
get_CV_paras == True: just to get paras by using CV
Returns
-------
a list of four estimated fun, and a list of four true y vectors
"""
char_funs = []
X1, y1 = get_pairs(train_data, is_forward = 1, J = J,
include_reward = include_reward, fixed_state_comp = fixed_state_comp)
X2, y2 = get_pairs(train_data, is_forward = 0, J = J,
include_reward = include_reward, fixed_state_comp = fixed_state_comp)
X, y = [X1, X2], [y1, y2]
if paras in ["CV", "CV_once"]:
for i in range(2):
rfqr = RandomForestQuantileRegressor(random_state=0, n_estimators = n_trees)
gd = GridSearchCV(estimator = rfqr, param_grid = param_grid,
cv = 5, n_jobs = n_jobs, verbose=0)
gd.fit(X[i], y[i])
best_paras = gd.best_params_
if paras == "CV_once": # only return forward
return [best_paras['max_depth'], best_paras['min_samples_leaf']]
elif paras == "CV":
print("best_paras:", best_paras)
# use the optimal paras and the whole dataset
rfqr1 = RandomForestQuantileRegressor(
random_state=0,
n_estimators = n_trees,
max_depth=best_paras['max_depth'],
min_samples_leaf=best_paras['min_samples_leaf'],
n_jobs = n_jobs)
char_funs.append(rfqr1.fit(X[i], y[i]))
else: # pre-specified paras
max_depth, min_samples_leaf = paras
for i in range(2):
char_funs.append(
RandomForestQuantileRegressor(
random_state=0, n_estimators = n_trees,
max_depth = max_depth, min_samples_leaf = min_samples_leaf,
n_jobs = n_jobs).fit( X[i], y[i]))
return char_funs | 51012cc870d9bcd1f86fe69534e26d7d365ad271 | 3,653,012 |
def create_tables_for_import(volume_id, namespace):
"""Create the import or permanent obs_ tables and all the mult tables they
reference. This does NOT create the target-specific obs_surface_geometry
tables because we don't yet know what target names we have."""
volume_id_prefix = volume_id[:volume_id.find('_')]
instrument_name = VOLUME_ID_PREFIX_TO_INSTRUMENT_NAME[volume_id_prefix]
if instrument_name is None:
instrument_name = 'GB'
mission_abbrev = VOLUME_ID_PREFIX_TO_MISSION_ABBREV[volume_id_prefix]
mission_name = MISSION_ABBREV_TO_MISSION_TABLE_SFX[mission_abbrev]
mult_table_schema = import_util.read_schema_for_table('mult_template')
# This is an awful hack because this one mult table has an extra field
# in it. Yuck! XXX
mult_target_name_table_schema = (
import_util.read_schema_for_table(
'mult_target_name_template'))
table_schemas = {}
table_names_in_order = []
for table_name in TABLES_TO_POPULATE:
table_name = table_name.replace('<INST>', instrument_name.lower())
table_name = table_name.replace('<MISSION>', mission_name.lower())
if table_name.startswith('obs_surface_geometry__'):
# Note that we aren't replacing <TARGET> here because we don't know
# the target name! We're only using this schema to get field names,
# data source, source order, etc. The real use of the schema will be
# later when we finally create and insert into the correct table for
# each target.
table_schema = import_util.read_schema_for_table(
'obs_surface_geometry_target')
else:
table_schema = import_util.read_schema_for_table(table_name)
if table_schema is None:
continue
table_schemas[table_name] = table_schema
table_names_in_order.append(table_name)
if table_name.startswith('obs_surface_geometry__'):
# Skip surface geo tables until they are needed
continue
# Create the referenced mult_ tables
for table_column in table_schema:
if table_column.get('put_mults_here', False):
continue
field_name = table_column['field_name']
pi_form_type = table_column.get('pi_form_type', None)
if pi_form_type is not None and pi_form_type.find(':') != -1:
pi_form_type = pi_form_type[:pi_form_type.find(':')]
if pi_form_type in GROUP_FORM_TYPES:
mult_name = import_util.table_name_mult(table_name, field_name)
if mult_name in MULT_TABLES_WITH_TARGET_GROUPING:
schema = mult_target_name_table_schema
else:
schema = mult_table_schema
if (impglobals.DATABASE.create_table(namespace, mult_name,
schema) and
namespace == 'import'):
_CREATED_IMP_MULT_TABLES.add(mult_name)
impglobals.DATABASE.create_table(namespace, table_name,
table_schema)
return table_schemas, table_names_in_order | 8e02e98031e4242e2e0d559750258d74180593db | 3,653,013 |
def org_repos(info):
"""
处理组织的仓库
:param info: 字典
:return: 两个列表,第一个包含字典(id,全名,url),第二个包含所用到的语言
"""
repo_info = []
languages = []
if info:
for repo in info:
temp = {"id": repo["id"], "full_name": repo["full_name"], "url": repo["url"], "language": repo["language"]}
repo_info.append(temp)
languages.append(repo["language"])
return repo_info, languages | 9d5633bf834845e1301e0fd383a57c42f2bd530c | 3,653,014 |
from typing import Union
from datetime import datetime
def year(yyyy_mm_dd: Union[str, datetime.date]) -> int:
"""
Extracts the year of a given date, similar to yyyy function but returns an int
>>> year('2020-05-14')
2020
"""
date, _ = _parse(yyyy_mm_dd, at_least="%Y")
return date.year | eb34fb578d5ec7130d5670332aa4bbb9aca186ac | 3,653,015 |
def getTypeLevel(Type):
"""Checks whether a spectral data type is available in the endmember library.
Args:
Type: the type of spectra to select.
Returns:
level: the metadata "level" of the group for subsetting. returns 0 if not found.
"""
for i in range(4):
level = i + 1
available_types = listTypes(level=level)
if Type in available_types:
return level
return 0 | 6c26f7dc570b5a7f0cacdc1171ae733b005e7992 | 3,653,016 |
from typing import Union
def construct_creator(creator: Union[dict, str], ignore_email):
"""Parse input and return an instance of Person."""
if not creator:
return None, None
if isinstance(creator, str):
person = Person.from_string(creator)
elif isinstance(creator, dict):
person = Person.from_dict(creator)
else:
raise errors.ParameterError("Invalid creator type")
message = 'A valid format is "Name <email> [affiliation]"'
if not person.name: # pragma: no cover
raise errors.ParameterError(f'Name is invalid: "{creator}".\n{message}')
if not person.email:
if not ignore_email: # pragma: no cover
raise errors.ParameterError(f'Email is invalid: "{creator}".\n{message}')
else:
no_email_warning = creator
else:
no_email_warning = None
return person, no_email_warning | 5306f288874f4d15d5823c34268321121909a3ad | 3,653,017 |
def _encode_query(items: dict, *, mask=False) -> str:
"""Encode a dict to query string per CLI specifications."""
pairs = []
for key in sorted(items.keys()):
value = _MASK if mask and key in _MASKED_PARAMS else items[key]
item = "{}={}".format(key, _quote(value))
# Ensure 'url' goes last per CLI spec
if key == "url":
pairs.append(item)
else:
pairs.insert(0, item)
return "&".join(pairs) | 918f0aa4198367fb3889eb67bba622d272082af7 | 3,653,018 |
def spatial_shape_after_conv(input_spatial_shape, kernel_size, strides, dilation, padding):
""" This function calculates the spatial shape after conv layer.
The formula is obtained from: https://www.tensorflow.org/api_docs/python/tf/nn/convolution
It should be note that current function assumes PS is done before conv
:param input_spatial_shape:
:param kernel_size:
:param strides:
:param dilation:
:param padding:
:return:
"""
if isinstance(input_spatial_shape, (list, tuple)):
return [spatial_shape_after_conv(
one_shape, kernel_size, strides, dilation, padding) for one_shape in input_spatial_shape]
else:
if padding in ['same', 'SAME']:
return np.int(np.ceil(input_spatial_shape / strides))
else:
return np.int(np.ceil((input_spatial_shape - (kernel_size - 1) * dilation) / strides)) | a7d924260feb478e44a9ec166fe4248b51632270 | 3,653,019 |
def sample_partition(dependency_tensor, null_distribution,
updates=100,
initial_partition=None
):
"""
Sample partition for a multilayer network with specified interlayer dependencies
:param dependency_tensor: dependency tensor
:param null_distribution: null distribution (function that takes a state-node as input and returns a random mesoset
assignment
:param updates: expected number of (pseudo-)Gibbs-sampling updates per state-node (has no effect for fully ordered
dependency tensor. (optional, default=100)
:param initial_partition: mapping of state-nodes to initial meso-set assignment.
(optional, default=sampled from null distribution)
:return: sampled partition as a mapping (dict) from state-nodes to meso-set assignments.
"""
if initial_partition is None:
partition = {node: null_distribution(node) for node in dependency_tensor.state_nodes()}
else:
partition = {node: initial_partition[node] for node in dependency_tensor.state_nodes()}
random_layers = list(dependency_tensor.random_aspect_layers())
if len(random_layers) <= 1:
n_updates = 1
else:
n_updates = updates * len(random_layers)
for ordered_layer in dependency_tensor.ordered_aspect_layers():
for it in range(n_updates):
random_layer = _rand.choice(random_layers)
layer = tuple(o+r for o, r in zip(ordered_layer, random_layer))
for node in dependency_tensor.state_nodes(layer):
update_node = dependency_tensor.getrandneighbour(node)
if update_node == node:
partition[node] = null_distribution(node)
else:
partition[node] = partition[update_node]
return partition | d6c469054057f18ad1e7ab3abd96103e81931649 | 3,653,020 |
import re
def normalize_spaces(s: str) -> str:
"""
連続する空白を1つのスペースに置き換え、前後の空白を削除した新しい文字列を取得する。
"""
return re.sub(r'\s+', ' ', s).strip() | aac95ed5b77b5c65f9ce16cfa685d80c56f0e66f | 3,653,021 |
def power_iter(mat_g, error_tolerance=1e-6, num_iters=100):
"""Power iteration.
Args:
mat_g: the symmetric PSD matrix.
error_tolerance: Iterative exit condition.
num_iters: Number of iterations.
Returns:
eigen vector, eigen value, num_iters
"""
mat_g_size = mat_g.shape[-1]
def _iter_condition(state):
i, unused_v, unused_s, unused_s_v, run_step = state
return jnp.logical_and(i < num_iters, run_step)
def _iter_body(state):
"""One step of power iteration."""
i, new_v, s, s_v, unused_run_step = state
new_v = new_v / jnp.linalg.norm(new_v)
s_v = jnp.einsum(
'ij,j->i', mat_g, new_v, precision=_INVERSE_PTH_ROOT_PRECISION)
s_new = jnp.einsum(
'i,i->', new_v, s_v, precision=_INVERSE_PTH_ROOT_PRECISION)
return (i + 1, s_v, s_new, s_v,
jnp.greater(jnp.abs(s_new - s), error_tolerance))
# Figure out how to use step as seed for random.
v_0 = onp.random.uniform(-1.0, 1.0, mat_g_size).astype(mat_g.dtype)
init_state = tuple([0, v_0, jnp.zeros([], dtype=mat_g.dtype), v_0, True])
num_iters, v_out, s_out, _, _ = lax.while_loop(
_iter_condition, _iter_body, init_state)
v_out = v_out / jnp.linalg.norm(v_out)
return v_out, s_out, num_iters | 11717fda8b3dedce94e9be3157a78d8d95e0e989 | 3,653,022 |
from typing import Union
from typing import Any
def _get_values_target_representation(
val: Union[str, Any],
target_representation: str,
conversion_type: str,
conversion_rate: float,
n_round: int,
split: bool,
input_symbol: str,
target_symbol: str,
) -> Any:
"""
Returns the value of the converted currency in the specified format.
The two formats specified are "abbr", "decimal".
"""
val_new = 0.0
val = float(val)
# 1. for fiat-to-fiat and crypto-to-fiat we multiply
# 2. for fiat-to-crypto we divide
if conversion_type in ("fiat_to_fiat", "crypto_to_fiat"):
val_new = val * conversion_rate
else:
val_new = val / conversion_rate
if target_representation == "abbr":
val = "{:,.{a}f}".format(val, a=n_round)
target_val = "{:,.{a}f}".format(val_new, a=n_round)
if split:
return val, target_val
else:
return input_symbol.upper() + str(val), target_symbol.upper() + str(target_val)
else:
return np.round(val, n_round), np.round(val_new, n_round) | 188fe2da51a177fc743ee30c67807b46730a3a34 | 3,653,023 |
from typing import OrderedDict
def GetResidues(mol, atom_list=None):
"""Create dictrionary that maps residues to atom IDs:
(res number, res name, chain id) --> [atom1 idx, atom2 idx, ...]
"""
residues = OrderedDict()
if atom_list is None:
atom_list = range(mol.GetNumAtoms())
for aid in atom_list:
res_id = GetAtomResidueId(mol.GetAtomWithIdx(aid))
if res_id not in residues:
residues[res_id] = []
residues[res_id].append(aid)
return residues | 51f66cf9c3203573df5660205581fd0571826876 | 3,653,024 |
def BIC(y_pred, y, k, llf = None):
"""Bayesian Information Criterion
Args:
y_pred (array-like)
y (array-like)
k (int): number of featuers
llf (float): result of log-likelihood function
"""
n = len(y)
if llf is None:
llf = np.log(SSE(y_pred, y))
return np.log(n) * k - 2 * llf | f070400f045b1e8f98b453b8d5f8661271b1969e | 3,653,025 |
def create_abstract_insert(table_name, row_json, return_field=None):
"""Create an abstracted raw insert psql statement for inserting a single
row of data
:param table_name: String of a table_name
:param row_json: dictionary of ingestion data
:param return_field: String of the column name to RETURNING in statement
:return: String of an insert statement
"""
columns = []
for key, value in row_json.items():
if key in columns:
continue
else:
columns.append(key)
values = [':' + item for item in columns]
values = ', '.join(map(str, values))
list_columns = ', '.join(map(str, columns))
if return_field is not None:
statement = 'INSERT INTO ' + str(table_name) + '(' + list_columns + ')' \
+ ' VALUES (' + values + ') RETURNING ' + str(return_field)
else:
statement = 'INSERT INTO ' + str(table_name) + '(' + list_columns + ')' \
+ ' VALUES (' + values + ')'
return statement | 8b0a960178a0162b7a0c339682541f0f13520d85 | 3,653,026 |
def descsum_create(s):
"""Add a checksum to a descriptor without"""
symbols = descsum_expand(s) + [0, 0, 0, 0, 0, 0, 0, 0]
checksum = descsum_polymod(symbols) ^ 1
return s + '#' + ''.join(CHECKSUM_CHARSET[(checksum >> (5 * (7 - i))) & 31] for i in range(8)) | 52ce47b470dada282318cd23c61665adfb7554c3 | 3,653,028 |
def _get_header(key):
"""Return message header"""
try:
return request.headers[key]
except KeyError:
abort(400, "Missing header: " + key) | cbdf9928f6ce4c41145529c68039761eab65c3d0 | 3,653,029 |
def compute_solution(primes_list, triangle_sequence):
""" Auxiliary function to compute the solution to the problem.
"""
factorise_w_primes = partial(factorise, primes=primes_list)
all_factors = vmap(factorise_w_primes)(triangle_sequence)
# number of divisors = number of possible combinations of prime factors
# = inner product(number of states for each prime in a number)
# e.g. 1024 has 11 states for prime=2, and 1 state for the others
# 3072 has 11 states for prime=2 and 2 states for prime=3 -> 22 divisors
all_factors = all_factors + 1
n_combinations = jnp.prod(all_factors, axis=1).astype(jnp.int32)
return n_combinations | 5df3444b10a4ae316fab1c21c87e3187d4792f14 | 3,653,030 |
import platform
def key_description(character):
"""
Return the readable description for a key.
:param character: An ASCII character.
:return: Readable description for key.
"""
if "Windows" in platform.system():
for key, value in hex_keycodes.items():
if value == character:
return key
else:
return ""
else:
ascii_code = ord(chr(character))
if ascii_code < 32:
return 'Ctrl+{:c}'.format(ord('@') + ascii_code)
else:
return repr(character) | 9ed5bd198898c2f5cf234cb0c46924286fa18e51 | 3,653,031 |
def combine_index(df, n1, n2):
"""將dataframe df中的股票代號與股票名稱合併
Keyword arguments:
Args:
df (pandas.DataFrame): 此dataframe含有column n1, n2
n1 (str): 股票代號
n2 (str): 股票名稱
Returns:
df (pandas.DataFrame): 此dataframe的index為「股票代號+股票名稱」
"""
return df.set_index(df[n1].str.replace(' ', '') + \
' ' + df[n2].str.replace(' ', '')).drop([n1, n2], axis=1) | 645c62fdc7d8e541c9b55b5f1621d6c442ca683a | 3,653,033 |
def safe_get_stopwords(stopwords_language):
"""
:type stopwords_language: basestring
:rtype: list
"""
try:
return get_stopwords(stopwords_language)
except LanguageNotAvailable:
return [] | f6a32da469e59341aa9a928cac362b0075e5d792 | 3,653,034 |
def setup_mock_device(mock_device):
"""Prepare mock ONVIFDevice."""
mock_device.async_setup = AsyncMock(return_value=True)
mock_device.available = True
mock_device.name = NAME
mock_device.info = DeviceInfo(
MANUFACTURER,
MODEL,
FIRMWARE_VERSION,
SERIAL_NUMBER,
MAC,
)
mock_device.capabilities = Capabilities()
mock_device.profiles = []
def mock_constructor(hass, config):
"""Fake the controller constructor."""
return mock_device
mock_device.side_effect = mock_constructor | f39951f9109e5646d1e4cdd4782907cce5ee3a1c | 3,653,036 |
def convert(digits, base1, base2):
"""Convert given digits in base1 to digits in base2.
digits: str -- string representation of number (in base1)
base1: int -- base of given number
base2: int -- base to convert to
return: str -- string representation of number (in base2)"""
# Handle up to base 36 [0-9a-z]
assert 2 <= base1 <= 36, 'base1 is out of range: {}'.format(base1)
assert 2 <= base2 <= 36, 'base2 is out of range: {}'.format(base2)
return encode(decode(digits, base1), base2) | 482e7207a27c6acfdaf088ef8195792f29d14452 | 3,653,039 |
def h(b, W ,X):
"""
This function implments the softmax regression hypothesis function
Argument:
b -- bias
W -- predictive weight matrix
X -- data matrix of size (numbers_examples, number_predictors)
Returns:
softmax(XW + b)
"""
return softmax( (X @ W) + b) | abca116d09993b310a70ddf80fcf0eea73b6d542 | 3,653,040 |
def get_random_points(N):
"""
- Takes number of parameters N
- Returns tuple (x1,x2), where x1 and x2 are vectors
"""
x1 = np.random.uniform(-1,1,N)
x2 = np.random.uniform(-1,1,N)
return (x1,x2) | e118d2dbbc472bfa31fa30ffe1fabbf625a9c924 | 3,653,041 |
def n_bit_color(clk, din, vga_signals, vga_ports):
"""
Maps n bit input, din, to n bit vga color ports
Ex: din=10010101, r=100, g=101, b=01
"""
blu = len(vga_ports.blu)
grn = len(vga_ports.grn) + blu
red = len(vga_ports.red) + grn
assert len(din) == red
@always(clk.posedge)
def colors():
vga_ports.h_sync.next = vga_signals.h_sync
vga_ports.v_sync.next = vga_signals.v_sync
vga_ports.red.next = 0
vga_ports.grn.next = 0
vga_ports.blu.next = 0
if vga_signals.video_on == 1:
vga_ports.red.next = din[red:grn]
vga_ports.grn.next = din[grn:blu]
vga_ports.blu.next = din[blu:0]
return colors | e760c21c0b87c54d5977e6ba29bed0f5dc20b6ab | 3,653,042 |
from typing import Tuple
from typing import Dict
def point_cloud_transform_net(point_cloud: nn.Variable, train: bool) -> Tuple[nn.Variable, Dict[str, nn.Variable]]:
"""T net, create transformation matrix for point cloud
Args:
point_cloud (nn.Variable): point cloud, shape(batch, number of points, 3)
train (bool): training flag
Returns:
Tuple[nn.Variable, Dict[str, nn.Variable]]: transformation matrix and internal variables
"""
batch_size, num_points, _ = point_cloud.shape
# expand dim to B*C(=K)*H(=num_points)*W(=dim)
point_cloud = F.reshape(point_cloud, shape=(batch_size, 1, num_points, 3))
with nn.parameter_scope("conv1"):
conv_h1 = PF.convolution(
point_cloud, 64, (1, 3), stride=(1, 1), with_bias=False)
conv_h1 = PF.batch_normalization(conv_h1, batch_stat=train)
conv_h1 = F.relu(conv_h1)
with nn.parameter_scope("conv2"):
conv_h2 = PF.convolution(conv_h1, 128, (1, 1),
stride=(1, 1), with_bias=False)
conv_h2 = PF.batch_normalization(conv_h2, batch_stat=train)
conv_h2 = F.relu(conv_h2)
with nn.parameter_scope("conv3"):
conv_h3 = PF.convolution(
conv_h2, 1024, (1, 1), stride=(1, 1), with_bias=False)
conv_h3 = PF.batch_normalization(conv_h3, batch_stat=train)
conv_h3 = F.relu(conv_h3)
pool_h = F.max_pooling(conv_h3, (num_points, 1))
pool_h = F.reshape(pool_h, (batch_size, -1))
with nn.parameter_scope("affine1"):
affine_h1 = PF.affine(pool_h, 512, with_bias=False)
affine_h1 = PF.batch_normalization(affine_h1, batch_stat=train)
affine_h1 = F.relu(affine_h1)
with nn.parameter_scope("affine2"):
affine_h2 = PF.affine(affine_h1, 256, with_bias=False)
affine_h2 = PF.batch_normalization(affine_h2, batch_stat=train)
affine_h2 = F.relu(affine_h2)
with nn.parameter_scope("affine3"):
# transform points (3 dim) so the matrix size is (3*3)
transform_h = PF.affine(affine_h2, 3 * 3)
eye_mat = nn.Variable.from_numpy_array(
np.array([1, 0, 0, 0, 1, 0, 0, 0, 1], dtype=np.float32))
eye_mat = F.reshape(eye_mat, (1, 9))
transform_h = transform_h + eye_mat
transform_h = F.reshape(transform_h, (batch_size, 3, 3))
return transform_h, {
"conv_h1": conv_h1,
"conv_h2": conv_h2,
"conv_h3": conv_h3,
"pool_h": pool_h,
"affine_h1": affine_h1,
"affine_h2": affine_h2,
"transform_h": transform_h,
} | 59a3a30ef874dd1ce47a0d9a369c9170b30ac4ea | 3,653,043 |
def posix_getpgid(space, pid):
""" posix_getpgid - Get process group id for job control """
try:
return space.newint(os.getpgid(pid))
except OSError, e:
space.set_errno(e.errno)
return space.newbool(False)
except OverflowError:
return space.newbool(False) | b01f0d363ce7f0937a52c824aefc0a262f739757 | 3,653,045 |
def on_display_disconnected():
"""Shortcut for registering handlers for ACTION_DISPLAY_DISCONNECTED events.
Functions decorated with this decorator will be called when push2-python loses connection with the Push2
display. It will have the following positional arguments:
* Push2 object instance
Examples:
@push2_python.on_display_disconnected()
def function(push):
print('Connection with Push2 display was just lost!')
"""
return action_handler(ACTION_DISPLAY_DISCONNECTED) | bfb4314e6da432193a0b1e9691ad60d0eb7de039 | 3,653,046 |
from typing import Union
from typing import Dict
from typing import Any
def parse_received_data(blockpage_matcher: BlockpageMatcher,
received: Union[str, Dict[str,
Any]], anomaly: bool) -> Row:
"""Parse a received field into a section of a row to write to bigquery.
Args:
blockpage_matcher: Matcher object
received: a dict parsed from json data, or a str
anomaly: whether data may indicate blocking
Returns:
a dict containing the 'received_' keys/values in SCAN_BIGQUERY_SCHEMA
"""
if isinstance(received, str):
row: Row = {'received_status': received}
_add_blockpage_match(blockpage_matcher, received, anomaly, row)
return row
row = {
'received_status': received['status_line'],
'received_body': received['body'],
'received_headers': parse_received_headers(received.get('headers', {})),
}
full_http_response = _reconstruct_http_response(row)
_add_blockpage_match(blockpage_matcher, full_http_response, anomaly, row)
# hyperquack v1 TLS format
tls = received.get('tls', None)
if tls:
tls_row = {
'received_tls_version': tls['version'],
'received_tls_cipher_suite': tls['cipher_suite'],
'received_tls_cert': tls['cert']
}
row.update(tls_row)
# hyperquack v2 TLS format
if 'TlsVersion' in received:
tls_row = {
'received_tls_version': received['TlsVersion'],
'received_tls_cipher_suite': received['CipherSuite'],
'received_tls_cert': received['Certificate']
}
row.update(tls_row)
return row | 30a01d1b045d0f67e279cca34ade90aaf46b9c62 | 3,653,047 |
def get_filters():
""" Returns sidebar filters """
filters = {
'organisations': Organisation.objects.all(),
'topics': Topic.objects.all(),
'licenses': License.objects.all(),
'formats': Format.objects.all()
}
return filters | da137bbcd37d6504358e9e94a6722495bbc81d65 | 3,653,048 |
from xdsl.dialects.builtin import DenseIntOrFPElementsAttr, i32
import typing
from typing import List
from typing import Tuple
def irdl_op_builder(cls: typing.Type[OpT], operands: List,
operand_defs: List[Tuple[str, OperandDef]],
res_types: List, res_defs: List[Tuple[str, ResultDef]],
attributes: typing.Dict[str, typing.Any],
attr_defs: typing.Dict[str, AttributeDef], successors,
regions, options) -> OpT:
"""Builder for an irdl operation."""
# We need irdl to define DenseIntOrFPElementsAttr, but here we need
# DenseIntOrFPElementsAttr.
# So we have a circular dependency that we solve by importing in this function.
# Build operands by forwarding the values to SSAValue.get
if len(operand_defs) != len(operands):
raise ValueError(
f"Expected {len(operand_defs)} operands, got {len(operands)}")
built_operands = []
for ((_, operand_def), operand) in zip(operand_defs, operands):
if isinstance(operand_def, VarOperandDef):
if not isinstance(operand, list):
raise ValueError(
f"Expected list for variadic operand builder, got {operand}"
)
built_operands.extend([SSAValue.get(arg) for arg in operand])
else:
built_operands.append(SSAValue.get(operand))
# Build results by forwarding the values to the attribute builders
if len(res_defs) != len(res_types):
raise ValueError(
f"Expected {len(res_defs)} results, got {len(res_types)}")
built_res_types = []
for ((_, res_def), res_type) in zip(res_defs, res_types):
if isinstance(res_def, VarResultDef):
if not isinstance(res_type, list):
raise ValueError(
f"Expected list for variadic result builder, got {res_type}"
)
built_res_types.extend([
irdl_build_attribute(res_def.constr, res) for res in res_type
])
else:
built_res_types.append(
irdl_build_attribute(res_def.constr, res_type))
# Build attributes by forwarding the values to the attribute builders
attr_defs = {name: def_ for (name, def_) in attr_defs}
built_attributes = dict()
for attr_name, attr in attributes.items():
if attr_name not in attr_defs:
if isinstance(attr, Attribute):
built_attributes[attr_name] = attr
continue
raise ValueError(
f"Unexpected attribute name {attr_name} for operation {cls.name}"
)
built_attributes[attr_name] = irdl_build_attribute(
attr_defs[attr_name].constr, attr)
# Take care of variadic operand and result segment sizes.
if AttrSizedOperandSegments() in options:
sizes = [
(len(operand) if isinstance(operand_def, VarOperandDef) else 1)
for operand, (_, operand_def) in zip(operands, operand_defs)
]
built_attributes[AttrSizedOperandSegments.attribute_name] =\
DenseIntOrFPElementsAttr.vector_from_list(sizes, i32)
if AttrSizedResultSegments() in options:
sizes = [(len(result) if isinstance(result_def, VarResultDef) else 1)
for result, (_, result_def) in zip(res_types, res_defs)]
built_attributes[AttrSizedResultSegments.attribute_name] =\
DenseIntOrFPElementsAttr.vector_from_list(sizes, i32)
# Build regions using `Region.get`.
regions = [Region.get(region) for region in regions]
return cls.create(operands=built_operands,
result_types=built_res_types,
attributes=built_attributes,
successors=successors,
regions=regions) | 24c103897b040f2b4959b3d3c1642bc6eca6fda2 | 3,653,049 |
def _listify(single: st.SearchStrategy) -> st.SearchStrategy:
"""
Put the result of `single` strategy into a list
(all strategies should return lists)
"""
@st.composite
def listify_(draw):
return [draw(single)]
strategy = listify_()
strategy.function.__name__ = f"listified<{repr(single)}>"
return strategy | eb4efb742e2c465754e79ba979b69b412f6c066e | 3,653,050 |
def get_text(selector):
"""
Type the keys specified into the element, or the currently active element.
"""
if not get_instance():
raise Exception("You need to start a browser first with open_browser()")
return get_text_g(get_instance(), selector) | b2866c93b80dcf3c61b8330fb09e4af054937e0b | 3,653,051 |
from functools import partial
import time
import textwrap
def _eps(code, version, file_or_path, scale=1, module_color=(0, 0, 0),
background=None, quiet_zone=4):
"""This function writes the QR code out as an EPS document. The
code is drawn by drawing only the modules corresponding to a 1. They
are drawn using a line, such that contiguous modules in a row
are drawn with a single line. The file parameter is used to
specify where to write the document to. It can either be a writable (text)
stream or a file path. The scale parameter is sets how large to draw
a single module. By default one point (1/72 inch) is used to draw a single
module. This may make the code to small to be read efficiently.
Increasing the scale will make the code larger. This function will accept
fractional scales (e.g. 2.5).
:param module_color: Color of the QR code (default: ``(0, 0, 0)`` (black))
The color can be specified as triple of floats (range: 0 .. 1) or
triple of integers (range: 0 .. 255) or as hexadecimal value (i.e.
``#36c`` or ``#33B200``).
:param background: Optional background color.
(default: ``None`` (no background)). See `module_color` for the
supported values.
:param quiet_zone: Border around the QR code (also known as quiet zone)
(default: ``4``). Set to zero (``0``) if the code shouldn't
have a border.
"""
def write_line(writemeth, content):
"""\
Writes `content` and ``LF``.
"""
# Postscript: Max. 255 characters per line
for line in textwrap.wrap(content, 255):
writemeth(line)
writemeth('\n')
def line(offset, length):
"""\
Returns coordinates to draw a line with the provided length.
"""
res = ''
if offset > 0:
res = ' {0} 0 m'.format(offset)
res += ' {0} 0 l'.format(length)
return res
def rgb_to_floats(color):
"""\
Converts the provided color into an acceptable format for Postscript's
``setrgbcolor``
"""
def to_float(clr):
if isinstance(clr, float):
if not 0.0 <= clr <= 1.0:
raise ValueError('Invalid color "{0}". Not in range 0 .. 1'
.format(clr))
return clr
if not 0 <= clr <= 255:
raise ValueError('Invalid color "{0}". Not in range 0 .. 255'
.format(clr))
return 1/255.0 * clr if clr != 1 else clr
if not isinstance(color, (tuple, list)):
color = _hex_to_rgb(color)
return tuple([to_float(i) for i in color])
f, autoclose = _get_writable(file_or_path, 'w')
writeline = partial(write_line, f.write)
size = tables.version_size[version] * scale + (2 * quiet_zone * scale)
# Write common header
writeline('%!PS-Adobe-3.0 EPSF-3.0')
writeline('%%Creator: PyQRCode <https://pypi.python.org/pypi/PyQRCode/>')
writeline('%%CreationDate: {0}'.format(time.strftime("%Y-%m-%d %H:%M:%S")))
writeline('%%DocumentData: Clean7Bit')
writeline('%%BoundingBox: 0 0 {0} {0}'.format(size))
# Write the shortcuts
writeline('/M { moveto } bind def')
writeline('/m { rmoveto } bind def')
writeline('/l { rlineto } bind def')
mod_color = module_color if module_color == (0, 0, 0) else rgb_to_floats(module_color)
if background is not None:
writeline('{0:f} {1:f} {2:f} setrgbcolor clippath fill'
.format(*rgb_to_floats(background)))
if mod_color == (0, 0, 0):
# Reset RGB color back to black iff module color is black
# In case module color != black set the module RGB color later
writeline('0 0 0 setrgbcolor')
if mod_color != (0, 0, 0):
writeline('{0:f} {1:f} {2:f} setrgbcolor'.format(*mod_color))
if scale != 1:
writeline('{0} {0} scale'.format(scale))
writeline('newpath')
# Current pen position y-axis
# Note: 0, 0 = lower left corner in PS coordinate system
y = tables.version_size[version] + quiet_zone + .5 # .5 = linewidth / 2
last_bit = 1
# Loop through each row of the code
for row in code:
offset = 0 # Set x-offset of the pen
length = 0
y -= 1 # Move pen along y-axis
coord = '{0} {1} M'.format(quiet_zone, y) # Move pen to initial pos
for bit in row:
if bit != last_bit:
if length:
coord += line(offset, length)
offset = 0
length = 0
last_bit = bit
if bit == 1:
length += 1
else:
offset += 1
if length:
coord += line(offset, length)
writeline(coord)
writeline('stroke')
writeline('%%EOF')
if autoclose:
f.close() | 65e3f4d69eea5aa0385c1b53693d164aa0f5db6d | 3,653,052 |
from typing import List
from typing import Dict
import click
def get_packager_targets(
targets: List[Target], connections: Dict[str, Connection], remote_api: ConnectionClient
) -> List[PackagerTarget]:
"""
Build targets for calling packager. Fetch and base64 decode connections by names using local manifest and
ODAHU connections API
:param targets: Targets from packaging manifest
:param connections: Connections found in local manifest files
:param remote_api: ConnectionClient to fetch missing Connections
"""
packager_targets: List[PackagerTarget] = []
for t in targets:
conn = connections.get(t.connection_name)
if not conn:
click.echo(
f'The "{t.connection_name}" connection of "{t.name}" target is not found in the manifest files. '
f'Trying to retrieve it from API server'
)
conn = remote_api.get_decrypted(t.connection_name)
_decode_connection(conn)
packager_targets.append(
PackagerTarget(conn, t.name)
)
return packager_targets | 1aac3748b2176f5f11ed8ed137f78d64bf01c112 | 3,653,054 |
def elina_texpr0_permute_dimensions(texpr2, dimperm):
"""
Permute dimensions of an ElinaTexpr0 following the semantics of an ElinaDimperm.
Parameters
----------
texpr2 : ElinaTexpr0Ptr
Pointer to the ElinaTexpr0 which dimensions we want to permute.
dimperm : ElinaDimpermPtr
Pointer to the ElinaDimperm which semantics we want to follow.
Returns
-------
texpr1 : ElinaTexpr0Ptr
Pointer to the newly created ElinaTexpr0 with permuted dimensions.
"""
texpr1 = None
try:
elina_texpr0_permute_dimensions_c = elina_auxiliary_api.elina_texpr0_permute_dimensions
elina_texpr0_permute_dimensions_c.restype = ElinaTexpr0Ptr
elina_texpr0_permute_dimensions_c.argtypes = [ElinaTexpr0Ptr, ElinaDimpermPtr]
texpr1 = elina_texpr0_permute_dimensions_c(texpr2, dimperm)
except:
print('Problem with loading/calling "elina_texpr0_permute_dimensions" from "libelinaux.so"')
print('Make sure you are passing ElinaTexpr0Ptr, ElinaDimpermPtr to the function')
return texpr1 | f9c60e6285bc4e934eddf0a0be0511f08a57f45d | 3,653,055 |
def rgb(red: int, green: int, blue: int, background: bool = False) -> Chalk:
"""Generate a new truecolor chalk from an RGB tuple.
Args:
red (int):
The intensity of red (0-255).
green (int):
The intensity of green (0-255).
blue (int):
The intensity of blue (0-255).
background (bool, optional):
If ``True`` will generate the new chalk to be applied as a background color.
Defaults to False.
Returns:
:class:`~.chalk.Chalk`:
The new chalk instance.
"""
color = TrueColor(red, green, blue)
return Chalk(background=color) if background else Chalk(foreground=color) | d5c1e79cc1bf7ee37f1e1df17e9518ac0e11f02b | 3,653,056 |
def first(x: pd.Series) -> pd.Series:
"""
First value of series
:param x: time series
:return: time series of first value
**Usage**
Return series with first value of `X` for all dates:
:math:`R_t = X_0`
where :math:`X_0` is the first value in the series
**Examples**
Last value of series:
>>> series = generate_series(100)
>>> returns = first(series)
**See also**
:func:`last`
"""
return pd.Series(x[0], x.index) | 1a2c856bdff7158ecd7512e43158427530cbc8e4 | 3,653,057 |
def simulate(iterations, graph_generator, graph_params, n_nodes, beta, rho, steps, n_infected_init, vacc=None):
"""Perform `iterations` simulations and compute averages. If vacc is not
None, run the simulation using the SIRV model, otherwise use SIR."""
# Initialize arrays for computing averages over simulations
s = np.zeros((iterations, steps + 1), dtype=int)
i = np.zeros((iterations, steps + 1), dtype=int)
r = np.zeros((iterations, steps + 1), dtype=int)
ni = np.zeros((iterations, steps + 1), dtype=int)
if vacc is not None:
v = np.zeros((iterations, steps + 1), dtype=int)
nv = np.zeros((iterations, steps + 1), dtype=int)
for sim_id in range(iterations):
graph = graph_generator(**{'n': n_nodes, **graph_params})
if vacc is not None:
epidemic = Epidemic('sirv', graph, steps,
beta=beta, rho=rho, n_infected_init=n_infected_init, vacc=vacc)
else:
epidemic = Epidemic('sir', graph, steps,
beta=beta, rho=rho, n_infected_init=n_infected_init)
sim = epidemic.simulate()
# Compute four (steps, ) array containing the total number, at each
# step, of susceptible (S), infected (I), recovered (R) and vaccinated
# (V) respectively.
s[sim_id] = np.ma.masked_not_equal(sim, 0).count(axis=1)
i[sim_id] = np.ma.masked_not_equal(sim, 1).count(axis=1)
r[sim_id] = np.ma.masked_not_equal(sim, 2).count(axis=1)
if vacc is not None:
v[sim_id] = np.ma.masked_not_equal(sim, 3).count(axis=1)
# Compute a (steps, ) array containing the number of newly infected
# individuals at each step. The number of newly infected at time t is
# defined as the sum of nodes that went from state 0 (S) at time t-1
# to state 1 (I) at time t.
ni[sim_id] = np.array(
[n_infected_init] + [((sim[t - 1] == 0) & (sim[t] == 1)).sum() for t in range(1, steps + 1)],
dtype=int)
# Compute the same kind of array for newly vaccinated individuals.
if vacc is not None:
nv[sim_id] = np.array(
[v[sim_id, 0]] + [((sim[t - 1] != 3) & (sim[t] == 3)).sum() for t in range(1, steps + 1)],
dtype=int)
# Compute the average total number of susceptible, infected, recovered and
# vaccinated nodes at each week.
s = s.mean(axis=0)
i = i.mean(axis=0)
r = r.mean(axis=0)
if vacc is not None:
v = v.mean(axis=0)
# Compute the average number of newly infected and vaccinated individuals
# each week.
ni = ni.mean(axis=0)
if vacc is not None:
nv = nv.mean(axis=0)
if vacc is not None:
return s, i, r, v, ni, nv
else:
return s, i, r, ni | 96eeb8be72ceb336d62f858337d983cc8f8d5a9d | 3,653,058 |
import urllib
import json
def lookup_location():
"""
Geolocation lookup of current position.
Determines latitude and longitude coordinates of the system's position
using the ipinfo.io service.
Returns:
Tuple (lat, lon) containing the latitude and longitude coordinates
associated with the IP from which the request is performed.
"""
response = urllib.request.urlopen("https://ipinfo.io/json")
data = json.loads(response.read())
coordinates = data["loc"]
lat, lon = coordinates.split(",")
return float(lat), float(lon) | 5d654314aa8d53cbca2b488bb7c9eb3f1f9cf81a | 3,653,059 |
def _str_or_none(value):
"""Helper: serialize value to JSON string."""
if value is not None:
return str(value) | 7aa1550f71accaa4111386153b2c331e2ff076bc | 3,653,060 |
def create_song_graph_from_songs(songs: list[Song],
parent_graph: song_graph.SongGraph = None,
year_separation: int = 10) -> song_graph.SongGraph:
"""Create and return a song graph from a list of songs.
(Optional) Add a parent graph from a larger dataset to the new song graph.
(Optional) year_separation defines the way year attribute vertices are to be
created. I.e. the intervals in year attribute vertices. For example,
a year_separation of 10 will create year attribute vertices
for each decade spanned by the playlist.
Preconditions:
- parent_graph is None or parent_graph.are_attributes_created()
# parent_graph is not None implies parent_graph.are_attributes_created()
"""
graph = song_graph.SongGraph(parent_graph)
for song in songs:
graph.add_song(song)
if parent_graph is None:
graph.generate_attribute_vertices(year_separation)
else:
graph.generate_attribute_vertices(use_parent=True, year_separation=year_separation)
return graph | 647eb1ce77cf0c596c2fabd41aa32062636ca8a4 | 3,653,061 |
def convert(secs):
"""Takes a time in seconds and converts to min:sec:msec"""
mins = int(secs // 60)
secs %= 60
msecs = int(round(((secs - int(secs)) * 1000)))
secs = int(secs)
return f'{mins} mins, {secs} secs, {msecs} msecs' | 70752f190f94d3bdb4cb3b562b6bf9d1c7d28479 | 3,653,062 |
def from_data(symbols, key_matrix, name_matrix, one_indexed=False):
""" z-matrix constructor
:param symbols: atomic symbols
:type symbols: tuple[str]
:param key_matrix: key/index columns of the z-matrix, zero-indexed
:type key_matrix: tuple[tuple[float, float or None, float or None]]
:param name_matrix: coordinate name columns of the z-matrix
:type name_matrix; tuple[tuple[str, str or None, str or None]]
"""
syms = list(map(pt.to_E, symbols))
natms = len(syms)
key_mat = _key_matrix(key_matrix, natms, one_indexed)
name_mat = _name_matrix(name_matrix, natms)
vma = tuple(zip(syms, key_mat, name_mat))
return vma | 5b3f98b98dca797223a95af967e9aaff311d24f8 | 3,653,063 |
def should_drop_from_right_deck(n_left: int, n_right:int, seed: int=None) -> bool:
"""
Determine whether we drop a card from the right or left sub-deck.
Either `n_left` or `n_right` (or both) must be greater than zero.
:param n_left: the number of cards in the left sub-deck.
:param n_right: the number of cards in the right sub-deck.
:param seed: optional seed for the random number generator to
enable deterministic behavior.
:return: True if we should drop a card from the right sub-deck,
False otherwise.
Examples:
>>> should_drop_from_right_deck(n_left=32, n_right=5, seed=0, )
True
>>> should_drop_from_right_deck(n_left=0, n_right=5, )
True
>>> should_drop_from_right_deck(n_left=7, n_right=0, )
False
>>> should_drop_from_right_deck(n_left=0, n_right=0, )
Traceback (most recent call last):
...
ValueError: Either `n_left` or `n_right` (or both) must be greater than zero.
"""
if n_left > 0 and n_right > 0:
# There are cards left in both sub-decks, so pick a
# sub-deck at random.
random = sklearn.utils.check_random_state(seed=seed)
num = random.randint(low=0, high=2)
boolean = (num == 0)
return boolean
elif n_left == 0 and n_right > 0:
# There are no more cards in the left sub-deck, only
# the right sub-deck, so we drop from the right sub-deck.
return True
elif n_left > 0 and n_right == 0:
# There are no more cards in the right sub-deck, only
# the left sub-deck, so we drop from the left sub-deck.
return False
else:
# There are no more cards in either sub-deck.
raise ValueError ('Either `n_left` or `n_right` ' '(or both) must be greater than zero.') | 42bbfc3c8a129f090b50c0979d95e53fd6d6a13f | 3,653,064 |
def EXPOSED(mu=1.0):
"""
matrix of exposed sites
Parameters
----------
mu: rate
"""
pis = np.array([0.088367,0.078147,0.047163,0.087976,0.004517,0.058526,0.128039,0.056993,0.024856,0.025277,0.045202,0.094639,0.012338,0.016158,0.060124,0.055346,0.051290,0.006771,0.021554,0.036718])
W = np.array([
[0.0,0.526738,0.48315,0.658902,2.051872,1.280002,1.306565,1.370782,0.540809,0.171986,0.430511,0.697731,1.043937,0.265209,1.270693,4.826665,2.131819,0.143081,0.208643,2.544463],
[0.526738,0.0,0.505837,0.051052,2.214326,2.039552,0.137928,0.363365,2.288922,0.237023,0.670514,3.881079,0.656943,0.097443,0.166534,0.751947,0.584329,0.47559,0.196271,0.313443],
[0.48315,0.505837,0.0,3.902456,0.961103,1.301786,0.285806,1.8201,4.949307,0.337226,0.158937,1.677194,0.539827,0.182522,0.068692,4.412265,2.133604,0.061094,0.599369,0.172264],
[0.658902,0.051052,3.902456,0.0,0.129989,0.399061,3.100403,0.885317,0.70089,0.018315,0.021949,0.10545,0.066925,0.026918,0.228829,0.975564,0.368887,0.042618,0.121313,0.073705],
[2.051872,2.214326,0.961103,0.129989,0.0,0.456521,0.033946,0.886564,2.172284,1.037046,1.702066,0.146263,1.846562,3.002586,0.156216,5.294149,2.067387,1.603125,3.842632,4.207648],
[1.280002,2.039552,1.301786,0.399061,0.456521,0.0,2.514377,0.320746,3.755421,0.212032,1.261113,2.570254,1.973592,0.080193,0.362501,1.033459,1.013613,0.210329,0.15847,0.497398],
[1.306565,0.137928,0.285806,3.100403,0.033946,2.514377,0.0,0.303966,0.270957,0.084442,0.110508,0.730337,0.18816,0.023999,0.214847,0.382235,0.51139,0.048276,0.064648,0.48462],
[1.370782,0.363365,1.8201,0.885317,0.886564,0.320746,0.303966,0.0,0.401311,0.012279,0.052946,0.279865,0.158136,0.084663,0.1489,1.970857,0.174527,0.186382,0.03928,0.132496],
[0.540809,2.288922,4.949307,0.70089,2.172284,3.755421,0.270957,0.401311,0.0,0.317239,0.869247,0.598289,0.519993,2.047163,0.323141,0.99331,0.58096,0.961546,8.230282,0.329895],
[0.171986,0.237023,0.337226,0.018315,1.037046,0.212032,0.084442,0.012279,0.317239,0.0,8.675343,0.338782,9.483497,2.193062,0.071992,0.190509,2.56363,0.208313,0.517123,23.711178],
[0.430511,0.670514,0.158937,0.021949,1.702066,1.261113,0.110508,0.052946,0.869247,8.675343,0.0,0.313102,14.176858,4.802817,0.343919,0.389101,0.522334,1.130724,0.713426,3.466991],
[0.697731,3.881079,1.677194,0.10545,0.146263,2.570254,0.730337,0.279865,0.598289,0.338782,0.313102,0.0,1.013268,0.044792,0.19547,0.592156,1.147459,0.052858,0.084962,0.348362],
[1.043937,0.656943,0.539827,0.066925,1.846562,1.973592,0.18816,0.158136,0.519993,9.483497,14.176858,1.013268,0.0,3.261401,0.099252,0.557254,2.960091,1.328785,0.812142,4.136445],
[0.265209,0.097443,0.182522,0.026918,3.002586,0.080193,0.023999,0.084663,2.047163,2.193062,4.802817,0.044792,3.261401,0.0,0.08702,0.668834,0.24442,5.210001,23.228875,1.199764],
[1.270693,0.166534,0.068692,0.228829,0.156216,0.362501,0.214847,0.1489,0.323141,0.071992,0.343919,0.19547,0.099252,0.08702,0.0,1.223981,0.413148,0.045945,0.043249,0.368231],
[4.826665,0.751947,4.412265,0.975564,5.294149,1.033459,0.382235,1.970857,0.99331,0.190509,0.389101,0.592156,0.557254,0.668834,1.223981,0.0,7.384701,0.316078,0.40531,0.266531],
[2.131819,0.584329,2.133604,0.368887,2.067387,1.013613,0.51139,0.174527,0.58096,2.56363,0.522334,1.147459,2.960091,0.24442,0.413148,7.384701,0.0,0.144393,0.234217,3.184874],
[0.143081,0.47559,0.061094,0.042618,1.603125,0.210329,0.048276,0.186382,0.961546,0.208313,1.130724,0.052858,1.328785,5.210001,0.045945,0.316078,0.144393,0.0,4.903887,0.252132],
[0.208643,0.196271,0.599369,0.121313,3.842632,0.15847,0.064648,0.03928,8.230282,0.517123,0.713426,0.084962,0.812142,23.228875,0.043249,0.40531,0.234217,4.903887,0.0,0.459187],
[2.544463,0.313443,0.172264,0.073705,4.207648,0.497398,0.48462,0.132496,0.329895,23.711178,3.466991,0.348362,4.136445,1.199764,0.368231,0.266531,3.184874,0.252132,0.459187,0.0]
])
gtr = GTR(alphabet=alphabets['aa_nogap'])
gtr.assign_rates(mu=mu, pi=pis, W=W)
return gtr | c203eb8affeddd4b2620836759acb35ae6f9114c | 3,653,066 |
async def rename_conflicting_targets(
ptgts: PutativeTargets, all_existing_tgts: AllUnexpandedTargets
) -> UniquelyNamedPutativeTargets:
"""Ensure that no target addresses collide."""
existing_addrs: set[str] = {tgt.address.spec for tgt in all_existing_tgts}
uniquely_named_putative_targets: list[PutativeTarget] = []
for ptgt in ptgts:
if not ptgt.addressable:
# Non-addressable PutativeTargets never have collision issues.
uniquely_named_putative_targets.append(ptgt)
continue
idx = 0
possibly_renamed_ptgt = ptgt
# Targets in root-level BUILD files must be named explicitly.
if possibly_renamed_ptgt.path == "" and possibly_renamed_ptgt.kwargs.get("name") is None:
possibly_renamed_ptgt = possibly_renamed_ptgt.rename("root")
# Eliminate any address collisions.
while possibly_renamed_ptgt.address.spec in existing_addrs:
possibly_renamed_ptgt = ptgt.rename(f"{ptgt.name}{idx}")
idx += 1
uniquely_named_putative_targets.append(possibly_renamed_ptgt)
existing_addrs.add(possibly_renamed_ptgt.address.spec)
return UniquelyNamedPutativeTargets(PutativeTargets(uniquely_named_putative_targets)) | 9ac25549de1fca5912104135b87a1a17f1cd43fe | 3,653,067 |
def outer2D(v1, v2):
"""Calculates the magnitude of the outer product of two 2D vectors, v1 and v2"""
return v1[0]*v2[1] - v1[1]*v2[0] | b1f80afa3b8537eb11d79b17d0f12903bec9387c | 3,653,068 |
from typing import Union
def get_item_indent(item: Union[int, str]) -> Union[int, None]:
"""Gets the item's indent.
Returns:
indent as a int or None
"""
return internal_dpg.get_item_configuration(item)["indent"] | a80997e8c2cfa76a76ff8d09c7308196f0572f86 | 3,653,069 |
def V_RSJ_asym(i, ic_pos, ic_neg, rn, io, vo):
"""Return voltage with asymmetric Ic's in RSJ model"""
if ic_pos < 0 or ic_neg > 0 or rn < 0:
#or abs(ic_neg/ic_pos) > 1.2 or abs(ic_pos/ic_neg) > 1.2 :
# set boundaries for fitting
#pass
v = 1e10
else:
v = np.zeros(len(i))
n = i>io+ic_pos; v[n] = rn*np.sqrt((i[n]-io)**2-ic_pos**2)+vo
n = i<io+ic_neg; v[n] = -rn*np.sqrt((i[n]-io)**2-ic_neg**2)+vo
n = np.logical_and(i>=io+ic_neg, i<=io+ic_pos); v[n]=vo
return v | 5005beec6a90bf1a5054836f6f22dbe42dcda6f1 | 3,653,070 |
def h2_gas_costs(pipe_dist=-102.75, truck_dist=-106.0, pipeline=True, max_pipeline_dist=2000):
"""Calculates the transport cost of H2 gas. Requires as input the distance that H2 will be piped and
trucked."""
if max_pipeline_dist > pipe_dist > 400:
pipe = 0.0004 * pipe_dist + 0.0424
elif pipe_dist < 400:
pipe = 0.0004 * 400 + 0.0424
else:
pipe = np.nan
if pipeline == False:
pipe = np.nan
truck = 0.003 * truck_dist + 0.3319
return pipe + truck | 5b3623d33862038a9629349e8052e8214ddba51c | 3,653,071 |
from typing import Union
def number_to_words(input_: Union[int, str], capitalize: bool = False) -> str:
"""Converts integer version of a number into words.
Args:
input_: Takes the integer version of a number as an argument.
capitalize: Boolean flag to capitalize the first letter.
Returns:
str:
String version of the number.
"""
result = inflect.engine().number_to_words(num=input_)
return result[0].upper() + result[1:] if capitalize else result | 22c5f7c64354a76404150cdf888e3bc3582659f1 | 3,653,072 |
from typing import Dict
from typing import OrderedDict
def get_size_reduction_by_cropping(analyzer: DatasetAnalyzer) -> Dict[str, Dict]:
"""
Compute all size reductions of each case
Args:
analyzer: analzer which calls this property
Returns:
Dict: computed size reductions
`size_reductions`: dictionary with each case id and reduction
"""
size_reduction = OrderedDict()
for case_id in analyzer.case_ids:
props = load_properties_of_cropped(analyzer.cropped_data_dir / case_id)
shape_before_crop = props["original_size_of_raw_data"]
shape_after_crop = props['size_after_cropping']
size_red = np.prod(shape_after_crop) / np.prod(shape_before_crop)
size_reduction[case_id] = size_red
return {"size_reductions": size_reduction} | ce1aad85d8f971cccc8cb0547b14be8074228261 | 3,653,073 |
import re
def getProxyFile(path):
"""
Opens a text file and returns the contents with any setting of a certificate file
replaced with the mitmproxy certificate.
"""
with open(path, "r") as fd:
contents = fd.read()
certReferences = re.findall("setcertificatesfile\(.*\)", contents, re.IGNORECASE)
for certReference in certReferences:
msg = "using mitmproxy certificate: %s (%s)" % (certReference, path)
print(bcolors.OKBLUE + msg + bcolors.ENDC)
contents = contents.replace(certReference, 'setCertificatesFile("pkg:/source/mitmproxy.crt")')
return contents | c5c0d562e2b430b79b91b2a9ffd23f6d18320b6f | 3,653,074 |
def bytes_filesize_to_readable_str(bytes_filesize: int) -> str:
"""Convert bytes integer to kilobyte/megabyte/gigabyte/terabyte equivalent string"""
if bytes_filesize < 1024:
return "{} B"
num = float(bytes_filesize)
for unit in ["B", "KB", "MB", "GB"]:
if abs(num) < 1024.0:
return "{:.1f} {}".format(num, unit)
num /= 1024.0
return "{:.1f} {}".format(num, "TB") | cdeb228de80422f541c5fa682422d77a44d19ca2 | 3,653,075 |
def braf_mane_data():
"""Create test fixture for BRAF MANE data."""
return {
"#NCBI_GeneID": "GeneID:673",
"Ensembl_Gene": "ENSG00000157764.14",
"HGNC_ID": "HGNC:1097",
"symbol": "BRAF",
"name": "B-Raf proto-oncogene, serine/threonine kinase",
"RefSeq_nuc": "NM_004333.6",
"RefSeq_prot": "NP_004324.2",
"Ensembl_nuc": "ENST00000646891.2",
"Ensembl_prot": "ENSP00000493543.1",
"MANE_status": "MANE Select",
"GRCh38_chr": "7",
"chr_start": 140730665,
"chr_end": 140924929,
"chr_strand": "-"
} | 7e62545147ef1a6f81c75e56d85f5ab8df3895e8 | 3,653,076 |
def import_class(path):
"""
Import a class from a dot-delimited module path. Accepts both dot and
colon seperators for the class portion of the path.
ex::
import_class('package.module.ClassName')
or
import_class('package.module:ClassName')
"""
if ':' in path:
module_path, class_name = path.split(':')
else:
module_path, class_name = path.rsplit('.', 1)
module = __import__(module_path, fromlist=[class_name], level=0)
return getattr(module, class_name) | dcdf71a3bb665dae1fe5913e19be3a4c0aa3c5d3 | 3,653,078 |
import torch
def elastic(X, kernel, padding, alpha=34.0):
# type: (Tensor, Tensor, int, float) -> Tensor
"""
X: [(N,) C, H, W]
"""
H, W = X.shape[-2:]
dx = torch.rand(X.shape[-2:], device=kernel.device) * 2 - 1
dy = torch.rand(X.shape[-2:], device=kernel.device) * 2 - 1
xgrid = torch.arange(W, device=dx.device).repeat(H, 1)
ygrid = torch.arange(H, device=dy.device).repeat(W, 1).T
dx = alpha * F.conv2d(unsqueeze_as(dx, X, 0), kernel, bias=None, padding=padding)
dy = alpha * F.conv2d(unsqueeze_as(dy, X, 0), kernel, bias=None, padding=padding)
H /= 2
W /= 2
dx = (dx + xgrid - W) / W
dy = (dy + ygrid - H) / H
grid = torch.stack((dx.squeeze(1), dy.squeeze(1)), dim=-1)
return F.grid_sample(X, grid, padding_mode="reflection", align_corners=False) | 580c9600cb4ddd77d114ae94303fc2c2a416cf17 | 3,653,079 |
def fixture_make_bucket(request):
"""
Return a factory function that can be used to make a bucket for testing.
:param request: The Pytest request object that contains configuration data.
:return: The factory function to make a test bucket.
"""
def _make_bucket(s3_stub, wrapper, bucket_name, region_name=None):
"""
Make a bucket that can be used for testing. When stubbing is used, a stubbed
bucket is created. When AWS services are used, the bucket is deleted after
the test completes.
:param s3_stub: The S3Stubber object, configured for stubbing or AWS.
:param wrapper: The bucket wrapper object, used to create the bucket.
:param bucket_name: The unique name for the bucket.
:param region_name: The Region in which to create the bucket.
:return: The test bucket.
"""
if not region_name:
region_name = s3_stub.region_name
s3_stub.stub_create_bucket(bucket_name, region_name)
# Bucket.wait_until_exists calls head_bucket on a timer until it returns 200.
s3_stub.stub_head_bucket(bucket_name)
bucket = wrapper.create_bucket(bucket_name, region_name)
def fin():
if not s3_stub.use_stubs and wrapper.bucket_exists(bucket_name):
bucket.delete()
request.addfinalizer(fin)
return bucket
return _make_bucket | bdfbbad1b80f43a1b81f5bf8f69db350128e3304 | 3,653,080 |
def get_member_struc(*args):
"""
get_member_struc(fullname) -> struc_t
Get containing structure of member by its full name "struct.field".
@param fullname (C++: const char *)
"""
return _ida_struct.get_member_struc(*args) | ac2c226725af8bde1510a6f7fd2fdb64a8c52d01 | 3,653,081 |
from datetime import datetime
def pop():
"""Check the first task in redis(which is the task with the smallest score)
if the score(timestamp) is smaller or equal to current timestamp, the task
should be take out and done.
:return: True if task is take out, and False if it is not the time.
"""
task = connection.zrange(QUEUE_KEY, 0, 0)
if not task:
return False, 'No emails now!'
msg_id = task[0]
timestamp = connection.zscore(QUEUE_KEY, msg_id)
now = datetime.datetime.now().timestamp()
if timestamp < now or abs(timestamp - now) <= 1e-6:
message = connection.get(msg_id)
pipeline = connection.pipeline()
pipeline.zrem(QUEUE_KEY, msg_id)
pipeline.delete(msg_id)
pipeline.execute()
return True, message
return False, "It's too early now!" | 0472d0bcffee84547d7ee400d547ecbb86e50e87 | 3,653,082 |
def xml_to_dictform(node):
""" Converts a minidom node to "dict" form. See parse_xml_to_dictform. """
if node.nodeType != node.ELEMENT_NODE:
raise Exception("Expected element node")
result = (node.nodeName, {}, []) # name, attrs, items
if node.attributes != None:
attrs = node.attributes # hard to imagine a more contrived way of accessing attributes...
for key, value in ((attrs.item(i).name, attrs.item(i).value) for i in xrange(attrs.length)):
result[1][key] = value
for child in node.childNodes:
if child.nodeType == child.ELEMENT_NODE:
result[2].append(xml_to_dictform(child))
return result | 8fdc07070a32eb34c38e46cb12d23d367d71c606 | 3,653,083 |
def TranslateCoord(data, res, mode):
"""
Translates position of point to unified coordinate system
Max value in each direction is 1.0 and the min is 0.0
:param data: (tuple(float, float)) Position to be translated
:param res: (tuple(float, float)) Target resolution
:param mode: (TranslationMode) Work mode. Available modes are: Encode, Decode.
:returns: (tuple(int, int), tuple(float, float))
"""
x, y = data
resX, resY = res
#encode
if mode == TranslationMode.Encode:
uX = x / resX
uY = y / resY
return (uX, uY)
#decode
elif mode == TranslationMode.Decode:
x = Clamp(x, 0, 1)
y = Clamp(y, 0, 1)
tX = x * resX
tY = y * resY
return (int(tX), int(tY)) | c89515692330ce02c0f6f371c16a9028c51e9bbe | 3,653,084 |
def _get_mutator_plugins_bucket_url():
"""Returns the url of the mutator plugin's cloud storage bucket."""
mutator_plugins_bucket = environment.get_value('MUTATOR_PLUGINS_BUCKET')
if not mutator_plugins_bucket:
logs.log_warn('MUTATOR_PLUGINS_BUCKET is not set in project config, '
'skipping custom mutator strategy.')
return None
return 'gs://%s' % mutator_plugins_bucket | 31073e1fbaf817d63d02de93a2fc224bd2904dec | 3,653,085 |
from io import StringIO
def objectify_json_lines(path_buf_stream,
from_string=False,
fatal_errors=True,
encoding=_DEFAULT_ENCODING,
ensure_ascii=False,
encode_html_chars=False,
avoid_memory_pressure=True):
"""Generator return an object for each line of JSON in a file, stream or string
in: path_buf_stream:
(str) A string file path containing JSON
(stream) An open readable stream from a file containing JSON
(stream) A string of JSON content (also requires `from_string=True`)
This function intentionally operates as a generator, to avoid using huge
amounts of memory when loading a very large file- afterall, this is the
primary benefit of the JSON lines format. It is meant to be called many
times in succession, sometimes up to millions of times, so it is important
that it is relatively quick/simple.
There are three ways to invoke this function
Each of them returns a native Python object
for obj in objectify_json_lines('file.json'):
print(obj.items())
json_fd = open('file.json', 'r', encoding='utf-8')
for obj in objectify_json_lines(json_fd):
print(obj.items())
json_str = '{"A": "B"}\n{"C": "D"}'
for obj in objectify_json_lines(json_str, from_string=True):
print(obj.items())
"""
if from_string is True:
# If caller specifies path_buf_stream is a string, turn it into
# a stream to avoid an extra set of logic below
assert isinstance(path_buf_stream, str)
path_buf_stream = StringIO(path_buf_stream)
# If path_buf_stream has a read method, it is effectively stream
reader = getattr(path_buf_stream, 'read', None)
with (path_buf_stream if reader else open(path_buf_stream, 'r', encoding=encoding)) as infd:
# If the user doesn't care about memory pressure, don't bother with a generator, just
# give them a regular list of objects from the JSON lines file. I guess most of the time
# nobody cares, and have to work with a generator in Python3 can be annoying for the caller
if avoid_memory_pressure is False:
if fatal_errors is True:
try:
return [loads(line) for line in infd.read.splitlines() if line]
except JSONDecodeError:
return None
obj_list = list()
for line in infd.read.splitlines():
try:
obj = loads(line)
obj_list.append(obj)
except JSONDecodeError:
# Silently ignore bad lines ..
continue
return obj_list
for line in infd.readlines():
line = line.strip()
# Exception handlers are expensive to set up and even more expensive
# when they fire. If errors should be fatal, don't bother setting one
# up at all
if fatal_errors is True:
yield loads(line)
else:
# The more expensive path, preparing to catch an exception and
# continue gracefully if fatal_errors is False
try:
yield loads(line)
except Exception as err:
error('bad JSON-line line: {}'.format(repr(err)))
continue | a3de3cd8f13c7a245573bd34944e67908dfd4786 | 3,653,086 |
def gll_int(f, a, b):
"""Integrate f from a to b using its values at gll points."""
n = f.size
x, w = gll(n)
return 0.5*(b-a)*np.sum(f*w) | d405e4c3951f9764077508fcdb73e000c107e4d4 | 3,653,087 |
def _get_remote_user():
"""
Get the remote username.
Returns
-------
str: the username.
"""
return input('\nRemote User Name: ') | 5f2bb67b5f55ec053a755c015755f488ab6d8c71 | 3,653,089 |
def nf_regnet_b4(pretrained=False, **kwargs):
""" Normalization-Free RegNet-B4
`Characterizing signal propagation to close the performance gap in unnormalized ResNets`
- https://arxiv.org/abs/2101.08692
"""
return _create_normfreenet('nf_regnet_b4', pretrained=pretrained, **kwargs) | 642ba43a132128a16273bb6cc76178b71be6beaf | 3,653,092 |
from typing import Tuple
def load_data_binary_labels(path: str) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""Loads data from CSV file and returns features (X) and
only binary labels meaning (any kind of) toxic or not"""
df = pd.read_csv(path)
X = df.comment_text.to_frame()
y = df[config.LIST_CLASSES].max(axis=1).to_frame(name="toxic")
return X, y | e73b99b2d00d388298f9f6e2cdfca15f121a0238 | 3,653,093 |
from bs4 import BeautifulSoup
def parse(html_url):
"""Parse."""
html = www.read(html_url)
soup = BeautifulSoup(html, 'html.parser')
data = {'paragraphs': []}
content = soup.find('div', class_=CLASS_NAME_CONTENT)
for child in content.find_all():
text = _clean(child.text)
if child.name == 'h3':
data['title'] = text
elif child.name == 'h4':
data['subtitle'] = text
elif child.name == 'p':
data['paragraphs'].append(text)
return data | 226245618d220db00eb2f298aaf462c1c861c32b | 3,653,094 |
import logging
def get_tp_algorithm(name: str) -> GenericTopologyProgramming:
""" returns the requested topology programming instance """
name = name.lower()
if name == "uniform_tp":
return UniformTP()
if name == "joint_tp":
return JointTP()
if name == "ssp_oblivious_tp":
return SSPObliviousTP()
err_msg = f"wan tp name not found: {name}"
logging.error(err_msg)
raise Exception(err_msg) | 6f98613c13becf1ed85cb8a667fc35cfac86973f | 3,653,095 |
def get_first_job_queue_with_capacity():
"""Returns the first job queue that has capacity for more jobs.
If there are no job queues with capacity, returns None.
"""
job_queue_depths = get_job_queue_depths()["all_jobs"]
for job_queue in settings.AWS_BATCH_QUEUE_WORKERS_NAMES:
if job_queue_depths[job_queue] <= settings.MAX_JOBS_PER_NODE:
return job_queue
return None | a23bf9dcef39d1377a1d7cb2a37abbe1186fac0a | 3,653,096 |
def rotations(images, n_rot, ccw_limit, cw_limit):
"""
Rotates every image in the list "images" n_rot times, between 0 and cw_limit
(clockwise limit) n_rot times and between 0 and ccw_limit (counterclockwise
limit) n_rot times more. The limits are there to make sense of the data
augmentation. E.g: Rotating an mnist digit 180 degrees turns a 6 into a 9,
which makes no sense at all.
cw_limit and ccw_limit are in degrees!
Returns a list with all the rotated samples. Size will be 2*n_rot+1, because
we also want the original sample to be included
Example: images=[img],n_rot=3,ccw_limit=90,cw_limit=90
Returns: [img1: original,
img2: 90 degrees rot ccw,
img3: 60 degrees rot ccw,
img4: 30 degrees rot ccw,
img5: 30 degrees rot cw,
img5: 60 degrees rot cw
img5: 90 degrees rot cw]
"""
# if we only have 1 image, transform into a list to work with same script
if type(images) is not list:
images = [images]
# calculate the initial angle and the step
cw_step_angle = float(cw_limit) / float(n_rot)
ccw_step_angle = float(ccw_limit) / float(n_rot)
# container for rotated images
rotated_images = []
# get every image and apply the number of desired rotations
for img in images:
# get rows and cols to rotate
rows, cols, depth = img.shape
# append the original one too
rotated_images.append(img)
# rotate the amount of times we want them rotated
for i in range(1, n_rot + 1):
# create rotation matrix with center in the center of the image,
# scale 1, and the desired angle (we travel counter clockwise first, and
# then clockwise
M_ccw = cv2.getRotationMatrix2D(
(cols / 2, rows / 2), i * ccw_step_angle, 1)
# rotate using the matrix (using bicubic interpolation)
rot_img = cv2.warpAffine(img, M_ccw, (cols, rows), flags=cv2.INTER_CUBIC)
# append to rotated images container
rotated_images.append(rot_img)
M_cw = cv2.getRotationMatrix2D(
(cols / 2, rows / 2), -i * cw_step_angle, 1)
# rotate using the matrix (using bicubic interpolation)
rot_img = cv2.warpAffine(img, M_cw, (cols, rows), flags=cv2.INTER_CUBIC)
# append to rotated images container
rotated_images.append(rot_img)
return rotated_images | b1ca7a609faa6ed8903424976b94b477a4798096 | 3,653,097 |
def num_range(num):
"""
Use in template language to loop through numberic range
"""
return range(num) | 7b66e4ffd264ea7b49850a9300c3a6c80282fce1 | 3,653,098 |
def datamodel_flights_column_names():
"""
Get FLIGHTS_CSV_SCHEMA column names (keys)
:return: list
"""
return list(FLIGHTS_CSV_SCHEMA.keys()) | 6e5edfa181e02955976602a289576eca307a13bc | 3,653,099 |
def create_tomography_circuits(circuit, qreg, creg, tomoset):
"""
Add tomography measurement circuits to a QuantumProgram.
The quantum program must contain a circuit 'name', which is treated as a
state preparation circuit for state tomography, or as teh circuit being
measured for process tomography. This function then appends the circuit
with a set of measurements specified by the input `tomography_set`,
optionally it also prepends the circuit with state preparation circuits if
they are specified in the `tomography_set`.
For n-qubit tomography with a tomographically complete set of preparations
and measurements this results in $4^n 3^n$ circuits being added to the
quantum program.
Args:
circuit (QuantumCircuit): The circuit to be appended with tomography
state preparation and/or measurements.
qreg (QuantumRegister): the quantum register containing qubits to be
measured.
creg (ClassicalRegister): the classical register containing bits to
store measurement outcomes.
tomoset (tomography_set): the dict of tomography configurations.
Returns:
list: A list of quantum tomography circuits for the input circuit.
Raises:
QISKitError: if circuit is not a valid QuantumCircuit
Example:
For a tomography set specififying state tomography of qubit-0 prepared
by a circuit 'circ' this would return:
```
['circ_meas_X(0)', 'circ_meas_Y(0)', 'circ_meas_Z(0)']
```
For process tomography of the same circuit with preparation in the
SIC-POVM basis it would return:
```
[
'circ_prep_S0(0)_meas_X(0)', 'circ_prep_S0(0)_meas_Y(0)',
'circ_prep_S0(0)_meas_Z(0)', 'circ_prep_S1(0)_meas_X(0)',
'circ_prep_S1(0)_meas_Y(0)', 'circ_prep_S1(0)_meas_Z(0)',
'circ_prep_S2(0)_meas_X(0)', 'circ_prep_S2(0)_meas_Y(0)',
'circ_prep_S2(0)_meas_Z(0)', 'circ_prep_S3(0)_meas_X(0)',
'circ_prep_S3(0)_meas_Y(0)', 'circ_prep_S3(0)_meas_Z(0)'
]
```
"""
if not isinstance(circuit, QuantumCircuit):
raise QISKitError('Input circuit must be a QuantumCircuit object')
dics = tomoset['circuits']
labels = tomography_circuit_names(tomoset, circuit.name)
tomography_circuits = []
for label, conf in zip(labels, dics):
tmp = circuit
# Add prep circuits
if 'prep' in conf:
prep = QuantumCircuit(qreg, creg, name='tmp_prep')
for qubit, op in conf['prep'].items():
tomoset['prep_basis'].prep_gate(prep, qreg[qubit], op)
prep.barrier(qreg[qubit])
tmp = prep + tmp
# Add measurement circuits
meas = QuantumCircuit(qreg, creg, name='tmp_meas')
for qubit, op in conf['meas'].items():
meas.barrier(qreg[qubit])
tomoset['meas_basis'].meas_gate(meas, qreg[qubit], op)
meas.measure(qreg[qubit], creg[qubit])
tmp = tmp + meas
# Add label to the circuit
tmp.name = label
tomography_circuits.append(tmp)
logger.info('>> created tomography circuits for "%s"', circuit.name)
return tomography_circuits | ab42a0b57ccd94f6ffbb64425473c3a90dd10888 | 3,653,100 |
def filter_background(bbox, bg_data):
"""
Takes bounding box and background geojson file assumed to be the US states, and outputs a geojson-like dictionary
containing only those features with at least one point within the bounding box, or any state that completely
contains the bounding box.
This tests if a feature contains the bounding box by drawing the box that contains the feature and checking if that
box also contains the bounding box. Because features are odd shapes, this may find that more than one feature
completely contains the bounding box. E.g., if you draw a box around Maryland it will also contain a chunk of West
Virginia. To deal with this, we are allowed to find that multiple states contain the bounding box.
:param bbox: The coordinates of the bounding box as [lon, lat, lon, lat]
:param bg_data: a geojson-like dict describing the background
:return: the features from bg_filename whose borders intersect bbox OR the feature which completely contains bbox
"""
box_lon = [bbox[0], bbox[2]]
box_lat = [bbox[1], bbox[3]]
features = bg_data['features']
in_box = []
for f in features:
starting_len = len(in_box)
# Define points for bounding box around the feature.
feature_max_lat = -90
feature_max_lon = -180
feature_min_lat = 90
feature_min_lon = 180
coordinates = f['geometry']['coordinates']
for group in coordinates:
if len(in_box) > starting_len:
# This feature has already been added
break
# actual points for MultiPolygons are nested one layer deeper than those for polygons
if f['geometry']['type'] == 'MultiPolygon':
geom = group[0]
else:
geom = group
for lon, lat in geom:
# check if any point along the state's borders falls within the bounding box.
if min(box_lon) <= lon <= max(box_lon) and min(box_lat) <= lat <= max(box_lat):
in_box.append(f)
break
# If any point of a feature falls within the bounding box, then the feature cannot contain the box,
# so this only needs to be run if the above if statement is not executed
feature_min_lon = min(feature_min_lon, lon)
feature_min_lat = min(feature_min_lat, lat)
feature_max_lon = max(feature_max_lon, lon)
feature_max_lat = max(feature_max_lat, lat)
# If the box containing a feature also contains the bounding box, keep this feature
# Allow adding more than one because otherwise MD contains boxes in WV, and CA would contain most of NV.
if feature_min_lat < min(box_lat) and feature_max_lat > max(box_lat) and \
feature_min_lon < min(box_lon) and feature_max_lon > max(box_lon):
in_box.append(f)
keepers = {
'type': 'FeatureCollection',
'features': in_box
}
return keepers | f06fe5efe1e3920d8b1092601a121e313da4eec4 | 3,653,101 |
def rename_columns(table, mapper):
""" Renames the table headings to conform with the ketos naming convention.
Args:
table: pandas DataFrame
Annotation table.
mapper: dict
Dictionary mapping the headings of the input table to the
standard ketos headings.
Returns:
: pandas DataFrame
Table with new headings
"""
return table.rename(columns=mapper) | c9c9228f4f477b8d5ade234964c2540fd20ddd09 | 3,653,102 |
from typing import Union
import json
def parse_tuple(s: Union[str, tuple]) -> tuple:
"""Helper for load_detections_csv, to parse string column into column of Tuples."""
if isinstance(s, str):
result = s.replace("(", "[").replace(")", "]")
result = result.replace("'", '"').strip()
result = result.replace(",]", "]")
if result:
# print(result)
return tuple(sorted((json.loads(result))))
else:
return tuple()
else:
return s | ad568bfc8ccdf8440378e852daccaf2f24a7e2d0 | 3,653,104 |
import re
def clean(tweet):
"""
clean tweet text by removing links, special characters
using simple regex statements
Parameters
----------
tweet : String
Single Twitter message
Returns
-------
tokenized_tweet : List
List of cleaned tokens derived from the input Twitter message
"""
# convert to lower
tweet = tweet.lower()
# get the stop-words available from the nltk.corpus lib
# as the corpus would haver also delete a lot of negations from the tweets
# it is considered to use just a subset
stop_words = ['i', 'me', 'my', 'myself', 'we', 'our', 'ours',
'ourselves', 'you', "you're", "you've", "you'll",
"you'd", 'your', 'yours', 'yourself', 'yourselves',
'he', 'him', 'his', 'himself', 'she', "she's", 'her',
'hers', 'herself', 'it', "it's", 'its', 'itself',
'they', 'them', 'their', 'theirs', 'themselves', 'what',
'which', 'who', 'whom', 'this', 'that', "that'll",
'these', 'those', 'am', 'is', 'are', 'was', 'were',
'be', 'been', 'being', 'have', 'has', 'had', 'having',
'do', 'does', 'did', 'doing', 'a', 'an', 'the', 'and',
'if', 'or', 'because', 'as', 'until', 'while', 'of',
'at', 'by', 'for', 'with', 'about', 'against', 'between',
'into', 'through', 'during', 'before', 'after', 'above',
'below', 'to', 'from', 'up', 'down', 'in', 'out', 'on',
'off', 'over', 'under', 'again', 'further', 'then', 'once',
'here', 'there', 'when', 'where', 'why', 'how', 'all', 'any',
'both', 'each', 'few', 'more', 'most', 'other', 'some', 'such',
'only', 'own', 'same', 'so', 'than', 'too', 'very', 's', 't',
'will', 'just', 'should', "should've", 'now', 'd', 'll',
'm', 'o', 're', 've', 'y', 'ain', 'ma', '.', ',', ';', '!', '?',
'@...', '@', '@…']
# convert to string again as re expects a string-like object (and not a list)
# remove all the stopwords as well as the numbers and words shorter than
# two letters also check the spelling
tmp = ""
tmp_c = [tmp +
item.replace(",","").replace(";","").replace("?","").replace("!","").replace("#","")
for item in tweet.split() if item not in stop_words
and not item.isdigit()]
tmp_c = " ".join(item for item in tmp_c)
# remove other special characters including @, URLs, Usernames and other
# special characters
return ' '.join(re.sub("(@[A-Za-z0-9]+)| M^|(\w+:\/\/\S+)",
" ",
tmp_c).split()) | fbfacb49f88638610fb071cb6b14d02dadf665e1 | 3,653,105 |
def predict(x, u):
"""
:param x: Particle state (x,y,theta) [size 3 array]
:param u: Robot inputs (u1,u2) [size 2 array]
:return: Particle's updated state sampled from the motion model
"""
x = x + motionModel(x, u) + np.random.multivariate_normal(np.zeros(3), Q)
return x | 7fe3e9fa42e1e74ac448a0139ca6dae8ff5388ad | 3,653,106 |
def plot_multiple(datasets, method='scatter', pen=True, labels=None, **kwargs):
"""
Plot a series of 1D datasets as a scatter plot
with optional lines between markers.
Parameters
----------
datasets : a list of ndatasets
method : str among [scatter, pen]
pen : bool, optional, default: True
if method is scatter, this flag tells to draw also the lines
between the marks.
labels : a list of str, optional
labels used for the legend.
**kwargs : other parameters that will be passed to the plot1D function
"""
if not is_sequence(datasets):
# we need a sequence. Else it is a single plot.
return datasets.plot(**kwargs)
if not is_sequence(labels) or len(labels) != len(datasets):
# we need a sequence of labels of same lentgh as datasets
raise ValueError('the list of labels must be of same length '
'as the datasets list')
for dataset in datasets:
if dataset._squeeze_ndim > 1:
raise NotImplementedError('plot multiple is designed to work on '
'1D dataset only. you may achieved '
'several plots with '
'the `clear=False` parameter as a work '
'around '
'solution')
# do not save during this plots, nor apply any commands
# we will make this when all plots will be done
output = kwargs.get('output', None)
kwargs['output'] = None
commands = kwargs.get('commands', [])
kwargs['commands'] = []
clear = kwargs.pop('clear', True)
legend = kwargs.pop('legend', None) # remove 'legend' from kwargs before calling plot
# else it will generate a conflict
for s in datasets: # , colors, markers):
ax = s.plot(method=method, pen=pen, marker='AUTO', color='AUTO', ls='AUTO', clear=clear, **kwargs)
clear = False # clear=False is necessary for the next plot to say # that we will plot on the same figure
# scale all plots
if legend is not None:
_ = ax.legend(ax.lines, labels, shadow=True, loc=legend, frameon=True, facecolor='lightyellow')
# now we can output the final figure
kw = {'output': output, 'commands': commands}
datasets[0]._plot_resume(datasets[-1], **kw)
return ax | 85b41b719cb8d33884dd7364b3e0937100167c6a | 3,653,107 |
def _scale_enum(anchor, scales):
""" 列举关于一个anchor的三种尺度 128*128,256*256,512*512
Enumerate a set of anchors for each scale wrt an anchor.
"""
w, h, x_ctr, y_ctr = _whctrs(anchor) #返回宽高和中心坐标,w:16,h:16,x_ctr:7.5,y_ctr:7.5
ws = w * scales #[128 256 512]
hs = h * scales #[128 256 512]
anchors = _mkanchors(ws, hs, x_ctr, y_ctr) #[[-56 -56 71 71] [-120 -120 135 135] [-248 -248 263 263]]
return anchors | b0b8e9418b935daf2961fbd690a4ccf2b6bd6d7b | 3,653,108 |
import codecs
def metamodel_from_file(file_name, **kwargs):
"""
Creates new metamodel from the given file.
Args:
file_name(str): The name of the file with textX language description.
other params: See metamodel_from_str.
"""
with codecs.open(file_name, 'r', 'utf-8') as f:
lang_desc = f.read()
metamodel = metamodel_from_str(lang_desc=lang_desc,
file_name=file_name,
**kwargs)
return metamodel | 526a1876ed11aff2341133d061573ae9f3bfb1fe | 3,653,109 |
import sirepo.template
import copy
def _python(data):
"""Generate python in current directory
Args:
data (dict): simulation
Returns:
py.path.Local: file to append
"""
template = sirepo.template.import_module(data)
res = pkio.py_path('run.py')
res.write(template.python_source_for_model(copy.deepcopy(data), None))
return res | df0d2eae8155f1093dde02db73fd5185983d6847 | 3,653,110 |
def load_hosts_conf(path='/etc/hosts'):
"""parse hosts file"""
hosts = {}
try:
with open(path, 'r') as f:
for line in f.readlines():
parts = line.strip().split()
if len(parts) < 2:
continue
addr = ip_address(parts[0])
if addr:
for hostname in parts[1:]:
if hostname:
hosts[hostname] = addr
except IOError as e:
hosts['localhost'] = '127.0.0.1'
return hosts | c6e2d1f34f5aa140a3bccfbd4d9791641cc75fff | 3,653,111 |
import random
def _waveform_distortion(waveform, distortion_methods_conf):
""" Apply distortion on waveform
This distortion will not change the length of the waveform.
Args:
waveform: numpy float tensor, (length,)
distortion_methods_conf: a list of config for ditortion method.
a method will be randomly selected by 'method_rate' and
apply on the waveform.
Returns:
distorted waveform.
"""
r = random.uniform(0, 1)
acc = 0.0
for distortion_method in distortion_methods_conf:
method_rate = distortion_method['method_rate']
acc += method_rate
if r < acc:
distortion_type = distortion_method['name']
distortion_conf = distortion_method['params']
point_rate = distortion_method['point_rate']
return distort_wav_conf(waveform, distortion_type,
distortion_conf , point_rate)
return waveform | cca32854f3d72f381d40a5ca8802c29996413149 | 3,653,114 |
def _pad_returns(returns):
"""
Pads a returns Series or DataFrame with business days, in case the
existing Date index is sparse (as with PNL csvs). Sparse indexes if not
padded will affect the Sharpe ratio because the 0 return days will not be
included in the mean and std.
"""
bdays = pd.date_range(start=returns.index.min(), end=returns.index.max(),freq="B")
idx = returns.index.union(bdays)
return returns.reindex(index=idx).fillna(0) | fde27dd928d3f0510f98fa4eba8f89f7d6b81922 | 3,653,115 |
def add_header(r):
"""
Add headers to both force latest IE rendering engine or Chrome Frame,
and also to cache the rendered page for 10 minutes.
"""
r.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
r.headers["Pragma"] = "no-cache"
r.headers["Expires"] = "0"
r.headers['Cache-Control'] = 'public, max-age=0'
#if r.headers["Content-Type"] !="application/json" and r.status_code!=304:
# print(str(r.status_code)+" -",end="")
return r | 3a758340d1c13cc29f0013f3d2fec77c47099c02 | 3,653,116 |
def pair_sorter(aln):
"""Get the alignment name and attributes for sorting."""
return (
aln.name,
not aln.first_in_pair,
aln.unmapped,
aln.supplementary_alignment,
aln.secondary_alignment) | 217eac7c89a12f68f4c9fe324c4feb6c2a955d58 | 3,653,117 |
def project_to2d(
pts: np.ndarray, K: np.ndarray, R: np.ndarray, t: np.ndarray
) -> np.ndarray:
"""Project 3d points to 2d.
Projects a set of 3-D points, pts, into 2-D using the camera intrinsic
matrix (K), and the extrinsic rotation matric (R), and extrinsic
translation vector (t). Note that this uses the matlab
convention, such that
M = [R;t] * K, and pts2d = pts3d * M
"""
M = np.concatenate((R, t), axis=0) @ K
projPts = np.concatenate((pts, np.ones((pts.shape[0], 1))), axis=1) @ M
projPts[:, :2] = projPts[:, :2] / projPts[:, 2:]
return projPts | 5f9bc03ae0086649746651da4e5e8e1d870db6bd | 3,653,119 |
import copy
def uncontract_general(basis, use_copy=True):
"""
Removes the general contractions from a basis set
The input basis set is not modified. The returned basis
may have functions with coefficients of zero and may have duplicate
shells.
If use_copy is True, the input basis set is not modified.
"""
if use_copy:
basis = copy.deepcopy(basis)
for k, el in basis['elements'].items():
if not 'electron_shells' in el:
continue
newshells = []
for sh in el['electron_shells']:
# See if we actually have to uncontract
# Also, don't uncontract sp, spd,.... orbitals
# (leave that to uncontract_spdf)
if len(sh['coefficients']) == 1 or len(sh['angular_momentum']) > 1:
newshells.append(sh)
else:
if len(sh['angular_momentum']) == 1:
for c in sh['coefficients']:
# copy, them replace 'coefficients'
newsh = sh.copy()
newsh['coefficients'] = [c]
newshells.append(newsh)
el['electron_shells'] = newshells
# If use_basis is True, we already made our deep copy
return prune_basis(basis, False) | de6638ae34b4d6f0b3a467048683ac363f71f9c1 | 3,653,120 |
def read_sd15ch1_images(root_dir,
image_relative_path_seq,
resize=None,
color=False):
"""
WARNING
-------
- All images must have the same shape (this is the case for the frames, and all models but the
ones of the "01-original" category).
- Loading many images at one can quickly fill up your RAM.
Returns
-------
- np.array((number_of_images, images_height, images_width)) if `color` is `False`
- np.array((number_of_images, images_height, images_width, image_channels)) otherwise.
"""
# Read first image, if any, to get image shape
# Note: all images must have the same shape
if len(image_relative_path_seq) == 0:
return np.array([])
# We have a least 1 element
img0 = read_sd15ch1_image(root_dir, image_relative_path_seq[0], resize, color)
# allocate some contiguous memory to host the decoded images
dim_axis0 = (len(image_relative_path_seq), ) # make it a tuple
dim_axis_others = img0.shape
imgs_shape = dim_axis0 + dim_axis_others
__info("About to allocate %d bytes for an array of shape %s." % (np.prod(imgs_shape) * 4, imgs_shape))
imgs = np.zeros(imgs_shape, dtype=np.float32)
# Handle first image
imgs[0, ...] = img0
# Loop over other images
for ii, rel_path in enumerate(image_relative_path_seq[1:], start=1):
imgi = read_sd15ch1_image(root_dir, rel_path, resize, color)
if imgi.shape != dim_axis_others:
__err("All images must have the same shape. Inconsistent dataset. Aborting loading.", RuntimeError)
imgs[ii, ...] = imgi
return imgs | 0d6efd2eac2762440ae532564e4f680a1f056d30 | 3,653,121 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.