content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def painel(request):
""" Exibe o painel do usuário. """
return render(request, "lancamentos/painel.html") | ff40db732402077eb6678f8586582877d96e3ede | 4,526 |
def q_statistic(y, c1, c2):
""" Q-Statistic.
Parameters
----------
y : numpy.array
Target sample.
c1 : numpy.array
Output of the first classifier.
c2 : numpy.array
Output of the second classifier.
Returns
-------
float
Return the Q-Statistic measure between the classifiers 'c1' and 'c2'.
Q-Statistic takes value in the range of [-1, 1]:
- is zero if 'c1' and 'c2' are independent.
- is positive if 'c1' and 'c2' make similar predictions.
- is negative if 'c1' and 'c2' make different predictions.
References
----------
.. [1] Zhi-Hua Zhou. (2012), pp 105:
Ensemble Methods Foundations and Algorithms
Chapman & Hall/CRC Machine Learning & Pattern Recognition Series.
"""
a, b, c, d = contingency_table(y, c1, c2)
return (a * d - b * c) / (a * d + b * c) | 83f83bffcb469ff45c22a1f35efc6e60ccdd0d2d | 4,528 |
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
Taken from: https://stackoverflow.com/questions/6518811/interpolate-nan-values-in-a-numpy-array
"""
return np.isnan(y), lambda z: z.nonzero()[0] | b6bd981369403a5542f8bcefb3e8a68315fb697f | 4,529 |
import re
def convert_parameters(child, text=False, tail=False, **kwargs):
"""
Get child text or tail
:param child:
:param text:
:param tail:
:return:
"""
p = re.compile(r'\S')
# Remove empty info
child_text = child.text if child.text else ''
child_tail = child.tail if child.tail else ''
child_text = child_text if p.search(child_text) else ''
child_tail = child_tail if p.search(child_tail) else ''
# all
if text and tail:
convert_string = child_text + child_tail
# only_text
elif text:
convert_string = child_text
# only_tail
elif tail:
convert_string = child_tail
else:
convert_string = ''
# replace params
mybatis_param_list = get_params(child)
for mybatis_param in mybatis_param_list:
convert_value = ''
if mybatis_param.sql_param.is_function:
# eval function
convert_value = __eval_function(mybatis_param, **kwargs)
else:
# 类型转换
param_value = __get_param(mybatis_param.param_name, **kwargs)
print(mybatis_param.param_name+ ' value:'+str(param_value))
convert_value = PY_MYBATIS_TYPE_HANDLER.convert(mybatis_param.python_type, mybatis_param.sql_type,
param_value,
PyMybatisTypeHandler.PYTHON2SQL_TYPE_HANDLER_CONVERT_MODE)
#longjb modify 2021.10.29:
if convert_value!='null' and len(convert_value)>0 and( mybatis_param.sql_type=='raw' or mybatis_param.python_type=='raw'):
convert_value= convert_value.replace("'","`")
#convert_value= convert_value[1:len(convert_value)-1]
# print('name:'+str(mybatis_param.name))
# print('value:'+convert_value)
# print('sql_type:'+str(mybatis_param.sql_type))
# print('python_type:'+str(mybatis_param.python_type))
convert_string = convert_string.replace(mybatis_param.full_name, convert_value, 1)
# convert CDATA string
convert_cdata(convert_string)
return convert_string | 2421e515491f1256c56eb9ac6935a3c0c1de64be | 4,532 |
def Get_Country_Name_From_ISO3_Extended(countryISO):
"""
Creates a subset of the quick chart data for a specific country. The subset includes all those rows containing
the given country either as the origin or as the country of asylum.
"""
countryName = ""
# June-22 - This function has been updated to include a to upper without a check on if the data is null or not
# So we need to wrap it in a try catch
try:
countryName = Country.get_country_name_from_iso3(countryISO)
except:
print("Failed to get the country from get_country_name_from_iso3.")
# Now lets try to find it for the three typical non-standard codes
if countryName is None or countryName == "":
print("Non-standard ISO code:", countryISO)
if countryISO == "UKN":
countryName = "Various / unknown"
elif countryISO == "STA":
countryName = "Stateless"
elif countryISO == "TIB":
countryName = "Tibetan"
else:
print("!!SERIOUS!! Unknown ISO code identified:", countryISO)
# Lets add a sensible default here...
countryName = "Various / unknown"
return countryName | d6e5b34223582f3a5a5ca20fd798ef5cfb1b1e8d | 4,533 |
def thetaG(t,t1,t2):
"""
Return a Gaussian pulse.
Arguments:
t -- time of the pulse
t1 -- initial time
t2 -- final time
Return:
theta -- Scalar or vector with the dimensions of t,
"""
tau = (t2-t1)/5
to = t1 + (t2-t1)/2
theta = (np.sqrt(np.pi)/(2*tau))*np.exp(-((t-to)/tau)**2)
return theta | 9e05358bfbf5f11b30f2a6b44504214ab4db4ea5 | 4,536 |
def choose_string(g1, g2):
"""Function used by merge_similar_guesses to choose between 2 possible
properties when they are strings.
If the 2 strings are similar, or one is contained in the other, the latter is returned
with an increased confidence.
If the 2 strings are dissimilar, the one with the higher confidence is returned, with
a weaker confidence.
Note that here, 'similar' means that 2 strings are either equal, or that they
differ very little, such as one string being the other one with the 'the' word
prepended to it.
>>> s(choose_string(('Hello', 0.75), ('World', 0.5)))
('Hello', 0.25)
>>> s(choose_string(('Hello', 0.5), ('hello', 0.5)))
('Hello', 0.75)
>>> s(choose_string(('Hello', 0.4), ('Hello World', 0.4)))
('Hello', 0.64)
>>> s(choose_string(('simpsons', 0.5), ('The Simpsons', 0.5)))
('The Simpsons', 0.75)
"""
v1, c1 = g1 # value, confidence
v2, c2 = g2
if not v1:
return g2
elif not v2:
return g1
v1, v2 = v1.strip(), v2.strip()
v1l, v2l = v1.lower(), v2.lower()
combined_prob = 1 - (1 - c1) * (1 - c2)
if v1l == v2l:
return (v1, combined_prob)
# check for common patterns
elif v1l == 'the ' + v2l:
return (v1, combined_prob)
elif v2l == 'the ' + v1l:
return (v2, combined_prob)
# if one string is contained in the other, return the shortest one
elif v2l in v1l:
return (v2, combined_prob)
elif v1l in v2l:
return (v1, combined_prob)
# in case of conflict, return the one with highest confidence
else:
if c1 > c2:
return (v1, c1 - c2)
else:
return (v2, c2 - c1) | e39a66c9f3f941b12225dde879bc92956694d2d0 | 4,537 |
def update_alert_command(client: MsClient, args: dict):
"""Updates properties of existing Alert.
Returns:
(str, dict, dict). Human readable, context, raw response
"""
alert_id = args.get('alert_id')
assigned_to = args.get('assigned_to')
status = args.get('status')
classification = args.get('classification')
determination = args.get('determination')
comment = args.get('comment')
args_list = [assigned_to, status, classification, determination, comment]
check_given_args_update_alert(args_list)
json_data, context = add_args_to_json_and_context(alert_id, assigned_to, status, classification, determination,
comment)
alert_response = client.update_alert(alert_id, json_data)
entry_context = {
'MicrosoftATP.Alert(val.ID === obj.ID)': context
}
human_readable = f'The alert {alert_id} has been updated successfully'
return human_readable, entry_context, alert_response | 237aa63f449dc6395390a26007b15123d5763874 | 4,538 |
def create_payment(context: SagaContext) -> SagaContext:
"""For testing purposes."""
context["payment"] = "payment"
return context | e96db6e57996d8f704e453bf14b8e4a3c63da1a6 | 4,539 |
import asyncio
async def TwitterAuthURLAPI(
request: Request,
current_user: User = Depends(User.getCurrentUser),
):
"""
Twitter アカウントと連携するための認証 URL を取得する。<br>
認証 URL をブラウザで開くとアプリ連携の許可を求められ、ユーザーが許可すると /api/twitter/callback に戻ってくる。
JWT エンコードされたアクセストークンがリクエストの Authorization: Bearer に設定されていないとアクセスできない。<br>
"""
# コールバック URL を設定
## Twitter API の OAuth 連携では、事前にコールバック先の URL をデベロッパーダッシュボードから設定しておく必要がある
## 一方 KonomiTV サーバーの URL はまちまちなので、コールバック先の URL を一旦 https://app.konomi.tv/api/redirect/twitter に集約する
## この API は、リクエストを "server" パラメーターで指定された KonomiTV サーバーの TwitterAuthCallbackAPI にリダイレクトする
## 最後に KonomiTV サーバーがリダイレクトを受け取ることで、コールバック対象の URL が定まらなくても OAuth 連携ができるようになる
## Twitter だけ他のサービスと違い OAuth 1.0a なので、フローがかなり異なる
## ref: https://github.com/tsukumijima/KonomiTV-API
callback_url = f'https://app.konomi.tv/api/redirect/twitter?server={request.url.scheme}://{request.url.netloc}/'
# OAuth1UserHandler を初期化し、認証 URL を取得
## signin_with_twitter を True に設定すると、oauth/authenticate の認証 URL が生成される
## oauth/authorize と異なり、すでにアプリ連携している場合は再承認することなくコールバック URL にリダイレクトされる
## ref: https://developer.twitter.com/ja/docs/authentication/api-reference/authenticate
try:
oauth_handler = tweepy.OAuth1UserHandler(Interlaced(1), Interlaced(2), callback=callback_url)
authorization_url = await asyncio.to_thread(oauth_handler.get_authorization_url, signin_with_twitter=True) # 同期関数なのでスレッド上で実行
except tweepy.TweepyException:
raise HTTPException(
status_code = status.HTTP_422_UNPROCESSABLE_ENTITY,
detail = 'Failed to get Twitter authorization URL',
)
# 仮で TwitterAccount のレコードを作成
## 戻ってきたときに oauth_token がどのユーザーに紐づいているのかを判断するため
## TwitterAuthCallbackAPI は仕組み上認証をかけられないので、勝手に任意のアカウントを紐付けられないためにはこうせざるを得ない
twitter_account = TwitterAccount()
twitter_account.user = current_user
twitter_account.name = 'Temporary'
twitter_account.screen_name = 'Temporary'
twitter_account.icon_url = 'Temporary'
twitter_account.access_token = oauth_handler.request_token['oauth_token'] # 暫定的に oauth_token を格納 (認証 URL の ?oauth_token= と同じ値)
twitter_account.access_token_secret = oauth_handler.request_token['oauth_token_secret'] # 暫定的に oauth_token_secret を格納
await twitter_account.save()
return {'authorization_url': authorization_url} | 2245c3b2d842c455fa9cb36390c84c8470c3b8e1 | 4,540 |
import random
def post_sunday(request):
"""Post Sunday Details, due on the date from the form"""
date_form = SelectDate(request.POST or None)
if request.method == 'POST':
if date_form.is_valid():
groups = DetailGroup.objects.filter(semester=get_semester())
details = settings.SUNDAY_DETAILS
g = [e for e in groups]
groups = g
random.shuffle(groups)
random.shuffle(details)
emails = []
for group in groups:
if len(details) <= 0:
break
group_detail = SundayGroupDetail(
group=group, due_date=date_form.cleaned_data['due_date']
)
group_detail.save()
for _ in range(group.size()):
if len(details) <= 0:
break
d = details.pop()
det = SundayDetail(
short_description=d['name'],
long_description="\n".join(d['tasks']),
due_date=date_form.cleaned_data['due_date']
)
det.save()
group_detail.details.add(det)
group_detail.save()
emails.append(
build_sunday_detail_email(
group_detail,
request.scheme + "://" + request.get_host()
)
)
det_manager_email = Position.objects.get(
title=Position.PositionChoices.DETAIL_MANAGER
).brothers.first().user.email
for (subject, message, to) in emails:
send_mail(subject, message, det_manager_email, to)
context = {
'form': date_form,
'date': 'sunday',
}
return render(request, 'detail-manager/post-details.html', context) | 84787109d0981920bbced7a734d0b67c84d4a9a7 | 4,541 |
from typing import Dict
from typing import List
def reconstruct(lvl: Level, flow_dict: Dict[int, Dict[int, int]], info: Dict[int, NodeInfo]) -> List[List[int]]:
"""Reconstruct agent paths from the given flow and node information"""
paths: List[List[int]] = [[]] * len(lvl.scenario.agents)
start_flows = flow_dict[0]
agent_starts = {agent.origin: i for i, agent in enumerate(lvl.scenario.agents)}
for n in start_flows:
if start_flows[n] > 0:
agent = agent_starts[info[n].id]
paths[agent] = follow_path(n, flow_dict, info)
return paths | d792ed6b937f49177ac85609ada3edb2089e2642 | 4,542 |
import traceback
def arch_explain_instruction(bv, instruction, lifted_il_instrs):
""" Returns the explanation string from explanations_en.json, formatted with the preprocessed instruction token list """
if instruction is None:
return False, []
parsed = parse_instruction(bv, instruction, lifted_il_instrs)
if len(parsed) == 0:
return False, []
out = []
out_bool = False
for name in parsed:
name = find_proper_name(name).lower()
if name in explanations:
try:
# Get the string from the JSON and format it
out_bool = out_bool or name not in dont_supersede_llil
out.append(explanations[name].format(instr=preprocess(bv, parsed, lifted_il_instrs, name)))
except (AttributeError, KeyError):
# Usually a bad format string. Shouldn't show up unless something truly weird happens.
log_error("Bad Format String in binja_explain_instruction")
traceback.print_exc()
out.append(name)
return out_bool, out | 57c6146ac06317df8a9e9b846a279fa950a970bc | 4,543 |
def get_subnet_mask(subnet: int, v6: bool) -> int:
"""Get the subnet mask given a CIDR prefix 'subnet'."""
if v6:
return bit_not((1 << (128 - subnet)) - 1, 128)
else:
return bit_not((1 << (32 - subnet)) - 1, 32) | 57c8de0bff70b0939dd8c646da0840be7c2839e1 | 4,545 |
def home(request):
"""return HttpResponse('<h1>Hello, Welcome to this test</h1>')"""
"""Le chemin des templates est renseigne dans "DIRS" de "TEMPLATES" dans settings.py
DONC PAS BESOIN DE RENSEIGNER LE CHEMIN ABSOLU"""
return render(request, "index.html") | 04a671daa9425ea76841b491f8eefd133b6e2c67 | 4,547 |
def extract_commands(data, *commands):
"""Input function to find commands output in the "data" text"""
ret = ""
hostname = _ttp_["variable"]["gethostname"](data, "input find_command function")
if hostname:
for command in commands:
regex = r"{}[#>] *{} *\n([\S\s]+?)(?={}[#>]|$)".format(
hostname, command, hostname
)
match = search(regex, data)
if match:
ret += "\n{}\n".format(match.group())
if ret:
return ret, None
return data, None | 6fcbf9584f5a2f799839c9964a5ae6235f4e8b50 | 4,549 |
def get_version() -> str:
"""
Returns the version string for the ufotest project. The version scheme of ufotest loosely follows the
technique of `Semantic Versioning <https://semver.org/>`_. Where a minor version change may introduce backward
incompatible changes, due to the project still being in active development with many features being subject to
change.
The return value of this function is subject to the "get_version" filter hook, which is able to modify the version
string *after* it has been loaded from the file and sanitized.
*EXAMPLE*
.. code-block:: python
version = get_version() # "1.2.1"
:returns: The version string without any additional characters or whitespaces.
"""
with open(VERSION_PATH) as version_file:
version = version_file.read()
version = version.replace(' ', '').replace('\n', '')
# Here we actually need to check if the plugin management system is actually initialized (this is what the boolean
# return of is_prepared indicates) because the version function needs to be functional even when the ufotest
# installation folder and thus the config file does not yet exist.
if CONFIG.is_prepared():
version = CONFIG.pm.apply_filter('get_version', value=version)
return version | b34eac3aef7661b65408c60ce606cd24a06ae0ee | 4,550 |
async def clear_pending_revocations(request: web.BaseRequest):
"""
Request handler for clearing pending revocations.
Args:
request: aiohttp request object
Returns:
Credential revocation ids still pending revocation by revocation registry id.
"""
context: AdminRequestContext = request["context"]
body = await request.json()
purge = body.get("purge")
rev_manager = RevocationManager(context.profile)
try:
results = await rev_manager.clear_pending_revocations(purge)
except StorageError as err:
raise web.HTTPBadRequest(reason=err.roll_up) from err
return web.json_response({"rrid2crid": results}) | 98db34266f3afbe9ecfeddcf802c1441ae7ea58b | 4,551 |
from datetime import datetime
def add_filter(field, bind, criteria):
"""Generate a filter."""
if 'values' in criteria:
return '{0}=any(:{1})'.format(field, bind), criteria['values']
if 'date' in criteria:
return '{0}::date=:{1}'.format(field, bind), datetime.strptime(criteria['date'], '%Y-%m-%d').date()
if 'gte' in criteria:
return '{0}>=:{1}'.format(field, bind), criteria['gte']
if 'lte' in criteria:
return '{0}<=:{1}'.format(field, bind), criteria['lte']
raise ValueError('criteria not supported') | 2358cab297b2a2cbc42af02b3b6d14ac134c8b71 | 4,552 |
def ireject(predicate, iterable):
"""Reject all items from the sequence for which the predicate is true.
ireject(function or None, sequence) --> iterator
:param predicate:
Predicate function. If ``None``, reject all truthy items.
:param iterable:
Iterable to filter through.
:yields:
A sequence of all items for which the predicate is false.
"""
return _ifilterfalse(predicate, iterable) | 98f9416ac1db1f2909d1d895ee0c0bc70c8b2249 | 4,553 |
def construct_config_error_msg(config, errors):
"""Construct an error message for an invalid configuration setup
Parameters
----------
config: Dict[str, Any]
Merged dictionary of configuration options from CLI, user configfile and
default configfile
errors: Dict[str, Any]
Dictionary of schema validation errors passed by Marshmallow
Returns
-------
str
"""
error_msg = "Failed to parse config\n"
for error_param, exception_msg in errors.items():
error_msg += parse_config_error(error_param, exception_msg)
return error_msg | 02954620115308d7d50ca28b23b98a2ba410489f | 4,554 |
def _haversine_GC_distance(φ1, φ2, λ1, λ2):
"""
Haversine formula for great circle distance. Suffers from rounding errors for
antipodal points.
Parameters
----------
φ1, φ2 : :class:`numpy.ndarray`
Numpy arrays wih latitudes.
λ1, λ2 : :class:`numpy.ndarray`
Numpy arrays wih longitude.
"""
Δλ = np.abs(λ1 - λ2)
Δφ = np.abs(φ1 - φ2)
return 2 * np.arcsin(
np.sqrt(np.sin(Δφ / 2) ** 2 + np.cos(φ1) * np.cos(φ2) * np.sin(Δλ / 2) ** 2)
) | bb57ddeacd761abead5ee499610ead8c9ba38a9f | 4,556 |
def differentiate_branch(branch, suffix="deriv"):
"""calculates difference between each entry and the previous
first entry in the new branch is difference between first and last entries in the input"""
def bud(manager):
return {add_suffix(branch,suffix):manager[branch]-np.roll(manager[branch],1)}
return bud | 298b19b1e151e04df9c040f0c48e4799bcc3f3d2 | 4,557 |
import typing
def etf_holders(apikey: str, symbol: str) -> typing.Optional[typing.List[typing.Dict]]:
"""
Query FMP /etf-holder/ API.
:param apikey: Your API key.
:param symbol: Company ticker.
:return: A list of dictionaries.
"""
path = f"etf-holder/{symbol}"
query_vars = {"apikey": apikey}
return __return_json_v3(path=path, query_vars=query_vars) | f405fa92296c28a8ba8ca87b6edac27392ec1f85 | 4,558 |
def clean_visibility_flags(horizon_dataframe: pd.DataFrame) -> pd.DataFrame:
"""
assign names to unlabeled 'visibility flag' columns -- solar presence,
lunar/interfering body presence, is-target-on-near-side-of-parent-body,
is-target-illuminated; drop then if empty
"""
flag_mapping = {
unlabeled_flag: flag_name
for unlabeled_flag, flag_name in zip(
[c for c in horizon_dataframe.columns if 'Unnamed' in c],
VISIBILITY_FLAG_NAMES
)
}
horizon_dataframe = horizon_dataframe.rename(mapper=flag_mapping, axis=1)
empty_flags = []
for flag_column in flag_mapping.values():
if horizon_dataframe[flag_column].isin([' ', '']).all():
empty_flags.append(flag_column)
return horizon_dataframe.drop(empty_flags, axis=1) | 906432120babffacb709b1d45e7c4dd86c60775d | 4,559 |
def calib(phase, k, axis=1):
"""Phase calibration
Args:
phase (ndarray): Unwrapped phase of CSI.
k (ndarray): Subcarriers index
axis (int): Axis along which is subcarrier. Default: 1
Returns:
ndarray: Phase calibrated
ref:
[Enabling Contactless Detection of Moving Humans with Dynamic Speeds Using CSI]
(http://tns.thss.tsinghua.edu.cn/wifiradar/papers/QianKun-TECS2017.pdf)
"""
p = np.asarray(phase)
k = np.asarray(k)
slice1 = [slice(None, None)] * p.ndim
slice1[axis] = slice(-1, None)
slice1 = tuple(slice1)
slice2 = [slice(None, None)] * p.ndim
slice2[axis] = slice(None, 1)
slice2 = tuple(slice2)
shape1 = [1] * p.ndim
shape1[axis] = k.shape[0]
shape1 = tuple(shape1)
k_n, k_1 = k[-1], k[1]
a = (p[slice1] - p[slice2]) / (k_n - k_1)
b = p.mean(axis=axis, keepdims=True)
k = k.reshape(shape1)
phase_calib = p - a * k - b
return phase_calib | 5e1f59c0a13440ad8e1304523976c2fbe6562d5a | 4,560 |
def rescale_as_int(
s: pd.Series, min_value: float = None, max_value: float = None, dtype=np.int16
) -> pd.Series:
"""Cannot be converted to njit because np.clip is unsupported."""
valid_dtypes = {np.int8, np.int16, np.int32}
if dtype not in valid_dtypes:
raise ValueError(f"dtype: expecting [{valid_dtypes}] but found [{dtype}]")
if min_value is None:
min_value = min(s)
if max_value is None:
max_value = max(s)
if min_value == 0 and max_value == 0:
raise ValueError("Both min_value and max_value must not be zero")
limit = max(abs(min_value), abs(max_value))
res = np.clip(s / limit, 0, 1) * np.iinfo(dtype).max
return res.astype(dtype) | 31772759c67d33f20b89fd87aa91c9249ae2bb9a | 4,561 |
def format_headers(headers):
"""Formats the headers of a :class:`Request`.
:param headers: the headers to be formatted.
:type headers: :class:`dict`.
:return: the headers in lower case format.
:rtype: :class:`dict`.
"""
dictionary = {}
for k, v in headers.items():
if isinstance(k, unicode):
k = k.encode('utf-8')
if isinstance(v, unicode):
v = v.encode('utf-8')
dictionary[k.lower()] = v.lower()
return dictionary | 0a0890c10378d9f8e20f353b1b9383e728f0a4f7 | 4,562 |
def decode_field(value):
"""Decodes a field as defined in the 'Field Specification' of the actions
man page: http://www.openvswitch.org/support/dist-docs/ovs-actions.7.txt
"""
parts = value.strip("]\n\r").split("[")
result = {
"field": parts[0],
}
if len(parts) > 1 and parts[1]:
field_range = parts[1].split("..")
start = field_range[0]
end = field_range[1] if len(field_range) > 1 else start
if start:
result["start"] = int(start)
if end:
result["end"] = int(end)
return result | 1a1659e69127ddd3c63eb7d4118ceb4e53a28ca0 | 4,563 |
import tqdm
def compute_norm(x_train, in_ch):
"""Returns image-wise mean and standard deviation per channel."""
mean = np.zeros((1, 1, 1, in_ch))
std = np.zeros((1, 1, 1, in_ch))
n = np.zeros((1, 1, 1, in_ch))
# Compute mean.
for x in tqdm(x_train, desc='Compute mean'):
mean += np.sum(x, axis=(0, 1, 2), keepdims=True)
n += np.sum(x > 0, axis=(0, 1, 2), keepdims=True)
mean /= n
# Compute std.
for x in tqdm(x_train, desc='Compute std'):
std += np.sum((x - mean) ** 2, axis=(0, 1, 2), keepdims=True)
std = (std / n) ** 0.5
return mean, std | e49012075adfa03b33bb6308d1d50f4c22c1cc2c | 4,564 |
def _nonempty_line_count(src: str) -> int:
"""Count the number of non-empty lines present in the provided source string."""
return sum(1 for line in src.splitlines() if line.strip()) | ad2ac0723f9b3e1f36b331175dc32a8591c67893 | 4,565 |
import json
def geom_to_xml_element(geom):
"""Transform a GEOS or OGR geometry object into an lxml Element
for the GML geometry."""
if geom.srs.srid != 4326:
raise NotImplementedError("Only WGS 84 lat/long geometries (SRID 4326) are supported.")
# GeoJSON output is far more standard than GML, so go through that
return geojson_to_gml(json.loads(geom.geojson)) | a2702e8ac4e3cb24f787513f820df60ad973e305 | 4,566 |
def precision(y_true, y_pred):
"""Precision metric.
Only computes a batch-wise average of precision.
Computes the precision, a metric for multi-label classification of
how many selected items are relevant.
Parameters
----------
y_true : numpy array
an array of true labels
y_pred : numpy array
an array of predicted labels
Returns
-------
recall : float
the batch-wise average of precision value
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision | d57f1d782628e312b2e52098658be81e32351f3d | 4,568 |
def get_validators(setting):
"""
:type setting: dict
"""
if 'validate' not in setting:
return []
validators = []
for validator_name in setting['validate'].keys():
loader_module = load_module(
'spreadsheetconverter.loader.validator.{}',
validator_name)
validators.append(loader_module.Validator(setting))
return validators | db3b5594122685f3190cdae053ab7a385065d17e | 4,569 |
def worker(remote, parent_remote, env_fn_wrapper):
""" worker func to execute vec_env commands
"""
def step_env(env, action):
ob, reward, done, info = env.step(action)
if done:
ob = env.reset()
return ob, reward, done, info
parent_remote.close()
envs = [env_fn_wrapper() for env_fn_wrapper in env_fn_wrappers.x]
try:
while True:
cmd, data = remote.recv()
# branch out for requests
if cmd == 'step':
res = [step_env(env, action) for env, action in zip(envs, data)]
remote.send(res)
elif cmd == 'reset':
remote.send([env.reset() for env in envs])
elif cmd == 'render':
remote.send([env.render(mode='rgb_array') for env in envs])
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces':
remote.send(CloudpickleWrapper(
(envs[0].observation_space, envs[0].action_space)
))
elif cmd == 'get_agent_types':
if all([hasattr(a, 'adversary') for a in envs[0].agents]):
res = [
'adversary' if a.adversary else 'agent'
for a in envs[0].agents
]
else: # fully cooperative
res = ['agent' for _ in envs[0].agents]
remote.send(res)
else:
raise NotImplementedErrors
except KeyboardInterrupt:
print('SubprocVecEnv worker: got KeyboardInterrupt')
except:
print('Environment runner process failed...')
finally:
for env in envs:
env.close() | aaf5a16a72e97ec46e3a1ae4676c4591bc7f0183 | 4,571 |
from functools import reduce
def greedysplit_general(n, k, sigma, combine=lambda a,
b: a + b, key=lambda a: a):
""" Do a greedy split """
splits = [n]
s = sigma(0, n)
def score(splits, sigma):
splits = sorted(splits)
return key(reduce(combine, (sigma(a, b)
for (a, b) in tools.seg_iter(splits))))
while k > 0:
usedinds = set(splits)
new = min((score(splits + [i], sigma), splits + [i])
for i in range(1, n) if i not in usedinds)
splits = new[1]
s = new[0]
k -= 1
return sorted(splits), s | 6480db8f613f37704e7bf6552407e5b0f851ab47 | 4,572 |
def public_assignment_get(assignment_id: str):
"""
Get a specific assignment spec
:param assignment_id:
:return:
"""
return success_response({
'assignment': get_assignment_data(current_user.id, assignment_id)
}) | 2f3d828975c0d7db663556da5f0dc590124075b2 | 4,573 |
def recursion_detected(frame, keys):
"""Detect if we have a recursion by finding if we have already seen a
call to this function with the same locals. Comparison is done
only for the provided set of keys.
"""
current = frame
current_filename = current.f_code.co_filename
current_function = current.f_code.co_name
current_locals = {k: v
for k, v in current.f_locals.items()
if k in keys}
while frame.f_back:
frame = frame.f_back
fname = frame.f_code.co_filename
if not(fname.endswith(".py") or
fname == "<template>"):
return False
if fname != current_filename or \
frame.f_code.co_name != current_function:
continue
if ({k: v
for k, v in frame.f_locals.items()
if k in keys} == current_locals):
return True
return False | ebf30e715d2901169095bc920e8af6c715f2a1de | 4,574 |
def pars_to_blocks(pars):
""" this simulates one of the phases the markdown library goes through when parsing text and returns the paragraphs grouped as blocks, as markdown handles them
"""
pars = list(pars)
m = markdown.Markdown()
bp = markdown.blockprocessors.build_block_parser(m)
root = markdown.util.etree.Element('div')
blocks = []
while pars:
parsbefore = list(pars)
for processor in bp.blockprocessors.values():
if processor.test(root, pars[0]):
processor.run(root, pars)
while len(parsbefore) > len(pars):
blocks.append(parsbefore[0])
parsbefore = parsbefore[1:]
if pars and pars[0].strip('\n') != parsbefore[0].strip('\n'):
strippedbefore = parsbefore[0].strip('\n')
strippedcurrent = pars[0].strip('\n')
if strippedbefore.endswith(strippedcurrent):
beforelength = len(strippedbefore)
currentlength = len(strippedcurrent)
block = strippedbefore[0:beforelength - currentlength]
blocks.append(block)
else:
raise Exception('unsupported change by blockprocessor. abort! abort!')
break
return blocks | f71d4460847ec4b69ad53470aba26c145d296388 | 4,576 |
from bs4 import BeautifulSoup
def extract_intersections_from_osm_xml(osm_xml):
"""
Extract the GPS coordinates of the roads intersections
Return a list of gps tuples
"""
soup = BeautifulSoup(osm_xml)
retval = []
segments_by_extremities = {}
Roads = []
RoadRefs = []
Coordinates = {}
for point in soup.osm.findAll('node'):
Coordinates[point['id']] = (float(point['lat']), float(point['lon']))
for way in soup.osm.findAll(lambda node : node.name=="way" and node.findAll(k='highway')):
name = ""
roadPoints = []
nodes = way.findAll('nd')
for node in nodes:
roadPoints.append(node['ref'])
RoadRefs.append(roadPoints)
# iterate over the list of street and over each segment of a street.
# for each segment extremity, build a list of segment leading to it
for roadIdx, roadRef in enumerate(RoadRefs):
for segIdx, seg in enumerate(roadRef):
coords = Coordinates[seg]
if coords not in segments_by_extremities:
segments_by_extremities[coords] = []
segments_by_extremities[coords].append([roadIdx, segIdx])
# Iterate over the extremity lists, only keep the ones with at least three segments leading to them
# Otherwise, they are not an intersection, just a turn in a road
for k in segments_by_extremities.keys():
if len(segments_by_extremities[k]) <2:
del(segments_by_extremities[k])
#finally return just the keys
return segments_by_extremities.keys() | 6cff1fe39891eb4a6c595196eabfd4569af2fd8e | 4,577 |
def spark_session(request):
"""Fixture for creating a spark context."""
spark = (SparkSession
.builder
.master('local[2]')
.config('spark.jars.packages', 'com.databricks:spark-avro_2.11:3.0.1')
.appName('pytest-pyspark-local-testing')
.enableHiveSupport()
.getOrCreate())
request.addfinalizer(lambda: spark.stop())
quiet_py4j()
return spark | e7a95ad7ebea876976923c6dd16c7a761116427d | 4,578 |
import yaml
def _load_model_from_config(config_path, hparam_overrides, vocab_file, mode):
"""Loads model from a configuration file"""
with gfile.GFile(config_path) as config_file:
config = yaml.load(config_file)
model_cls = locate(config["model"]) or getattr(models, config["model"])
model_params = config["model_params"]
if hparam_overrides:
model_params.update(hparam_overrides)
# Change the max decode length to make the test run faster
model_params["decoder.params"]["max_decode_length"] = 5
model_params["vocab_source"] = vocab_file
model_params["vocab_target"] = vocab_file
return model_cls(params=model_params, mode=mode) | 97af7dc919de5af96332c8445e162990006079f4 | 4,579 |
import ast
def _get_assignment_node_from_call_frame(frame):
"""
Helper to get the Assign or AnnAssign AST node for a call frame.
The call frame will point to a specific file and line number, and we use the
source index to retrieve the AST nodes for that line.
"""
filename = frame.f_code.co_filename
# Go up the AST from a node in the call frame line until we find an Assign or
# AnnAssign, since the (Ann)Assign may be over multiple lines.
nodes_in_line = _get_source_index(filename).get(frame.f_lineno, [])
cur_node = nodes_in_line[0]
while cur_node:
if isinstance(cur_node, (ast.Assign, ast.AnnAssign)):
return cur_node
cur_node = cur_node.parent
raise Exception("Could not find AST assignment node in the line"
f" {filename}:{frame.f_lineno}") | edb7f2425d170721e12dc4c1e2427e9584aeed8c | 4,580 |
def check_existing_user(username):
"""
a function that is used to check and return all exissting accounts
"""
return User.user_exist(username) | 573e9a8a6c0e504812d3b90eb4a27b15edec35ab | 4,581 |
def createevent():
""" An event is a (immediate) change of the world. It has no
duration, contrary to a StaticSituation that has a non-null duration.
This function creates and returns such a instantaneous situation.
:sees: situations.py for a set of standard events types
"""
sit = Situation(type = GENERIC, pattern = None)
return sit | 998f0a473c47828435d7e5310de29ade1fbd7810 | 4,582 |
def _dump_multipoint(obj, fmt):
"""
Dump a GeoJSON-like MultiPoint object to WKT.
Input parameters and return value are the MULTIPOINT equivalent to
:func:`_dump_point`.
"""
coords = obj['coordinates']
mp = 'MULTIPOINT (%s)'
points = (' '.join(fmt % c for c in pt) for pt in coords)
# Add parens around each point.
points = ('(%s)' % pt for pt in points)
mp %= ', '.join(points)
return mp | cdea05b91c251b655e08650807e3f74d3bb5e77b | 4,583 |
def do_inference(engine, pics_1, h_input_1, d_input_1, h_output, d_output, stream, batch_size, height, width):
"""
This is the function to run the inference
Args:
engine : Path to the TensorRT engine
pics_1 : Input images to the model.
h_input_1: Input in the host
d_input_1: Input in the device
h_output_1: Output in the host
d_output_1: Output in the device
stream: CUDA stream
batch_size : Batch size for execution time
height: Height of the output image
width: Width of the output image
Output:
The list of output images
"""
load_images_to_buffer(pics_1, h_input_1)
with engine.create_execution_context() as context:
# Transfer input data to the GPU.
cuda.memcpy_htod_async(d_input_1, h_input_1, stream)
# Run inference.
context.profiler = trt.Profiler()
context.execute(batch_size=1, bindings=[int(d_input_1), int(d_output)])
# Transfer predictions back from the GPU.
cuda.memcpy_dtoh_async(h_output, d_output, stream)
# Synchronize the stream
stream.synchronize()
# Return the host output.
out = h_output.reshape((batch_size,-1, height, width))
return out | e9e452e96d42167bf17bc6bef8dc014fa31dbe8f | 4,584 |
import ast
def make_import():
"""Import(alias* names)"""
return ast.Import(names=[make_alias()]) | e9085ee9b4b0438857b50b891fbee0b88d256f8b | 4,585 |
from typing import Union
from typing import List
def preprocess(
image: Union[np.ndarray, Image.Image],
threshold: int = None,
resize: int = 64,
quantiles: List[float] = [.01, .05, 0.1,
0.25, 0.5, 0.75, 0.9, 0.95, 0.99],
reduction: Union[str, List[str]] = ['max', 'median', 'mean', 'min']
) -> dict:
"""
Basic preprocessing metrics for a histological image.
Args:
image (Union[np.ndarray, Image.Image]): Input image.
threshold (int, optional): Threshold for tissue detection. If not
defined Otsu's binarization will be used (which) may fail for images
with data loss or only background. Defaults to None.
resize (int, optional): For artifact() function. Defaults to 64.
quantiles (List[float], optional): For HSV_quantiles() and RGB_quantiles
functions. Defaults to
[.01, .05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99].
reduction (Union[str, List[str]], optional): Reduction methods for
sharpness() function. Defaults to ['max', 'median', 'mean', 'min'].
Raises:
TypeError: Invalid type for ``image``.
Returns:
dict: Dictionary of basic preprocessing metrics.
"""
if isinstance(image, Image.Image):
if image.mode != 'RGB':
image = image.convert('RGB')
image = np.array(image, dtype=np.uint8)
elif isinstance(image, np.ndarray):
image = image.astype(np.uint8)
else:
raise TypeError('Excpected {} or {} not {}.'.format(
np.ndarray, Image.Image, type(image)
))
# Initialize results and other shit.
results = {}
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
mask = tissue_mask(image, threshold=threshold)
# Background percentage.
results['background'] = (mask == 0).sum()/mask.size
# Sharpness.
results.update(sharpness(gray, reduction=reduction))
# Data loss.
results.update(data_loss(gray))
# Artifacts.
small_img = cv2.resize(image, (resize, resize), cv2.INTER_LANCZOS4)
small_mask = cv2.resize(mask, (resize, resize), cv2.INTER_LANCZOS4)
results.update(HSV_quantiles(
small_img, mask=small_mask, quantiles=quantiles))
results.update(RGB_quantiles(
small_img, mask=small_mask, quantiles=quantiles))
return results | afa36739309ada2e97e18e63ae65362546b1b52c | 4,586 |
def binary_distance(label1, label2):
"""Simple equality test.
0.0 if the labels are identical, 1.0 if they are different.
>>> from nltk.metrics import binary_distance
>>> binary_distance(1,1)
0.0
>>> binary_distance(1,3)
1.0
"""
return 0.0 if label1 == label2 else 1.0 | 2c4eaebda2d6955a5012cc513857aed66df60194 | 4,587 |
def calc_commission_futures_global(trade_cnt, price):
"""
国际期货:差别很大,最好外部自定义自己的计算方法,这里只简单按照0.002计算
:param trade_cnt: 交易的股数(int)
:param price: 每股的价格(美元)
:return: 计算结果手续费
"""
cost = trade_cnt * price
# 国际期货各个券商以及代理方式差别很大,最好外部自定义计算方法,这里只简单按照0.002计算
commission = cost * 0.002
return commission | ddd2c4571abfcdf7021a28b6cc78fe6441da2bd3 | 4,589 |
def is_section_command(row):
"""CSV rows are cosidered new section commands if they start with
<SECTION> and consist of at least two columns column.
>>> is_section_command('<SECTION>\tSection name'.split('\t'))
True
>>> is_section_command('<other>\tSection name'.split('\t'))
False
>>> is_section_command(['<SECTION>', 'Section name', 'some more'])
True
"""
return len(row) >= 2 and row[0] == __CSV_SECTION_PREFIX | 7942625e119c4a0d3707fd5884ade6e48b2dfb1a | 4,590 |
def to_int(matrix):
"""
Funciton to convert the eact element of the matrix to int
"""
for row in range(rows(matrix)):
for col in range(cols(matrix)):
for j in range(3):
matrix[row][col][j] = int(matrix[row][col][j])
return matrix | 9f277ab0c0fe7df145e8a4c0da36fba25a523756 | 4,592 |
def create_tastypie_resource(class_inst):
"""
Usage: url(r'^api/', include(create_tastypie_resource(UfsObjFileMapping).urls)),
Access url: api/ufs_obj_file_mapping/?format=json
:param class_inst:
:return:
"""
return create_tastypie_resource_class(class_inst)() | cba76e51073612124c5cd968c9360e9c4748d604 | 4,593 |
def make_collector(entries):
""" Creates a function that collects the location data from openLCA. """
def fn(loc):
entry = [loc.getCode(), loc.getName(), loc.getRefId()]
entries.append(entry)
return fn | 83fb167c38626fde79262a32f500b33a72ab8308 | 4,594 |
def apiname(funcname):
""" Define what name the API uses, the short or the gl version.
"""
if funcname.startswith('gl'):
return funcname
else:
if funcname.startswith('_'):
return '_gl' + funcname[1].upper() + funcname[2:]
else:
return 'gl' + funcname[0].upper() + funcname[1:] | 06575fce76ac02990c973a6dd17ff177ae5e3ddc | 4,595 |
def add_numeric_gene_pos(gene_info):
"""
Add numeric gene (start) genomic position to a gene_info dataframe
"""
gene_chr_numeric = gene_info['chr']
gene_chr_numeric = ['23' if x == 'X' else x for x in gene_chr_numeric]
gene_chr_numeric = ['24' if x == 'Y' else x for x in gene_chr_numeric]
gene_start_vec = gene_info['start']
gene_start_vec = [str(x).zfill(10) for x in gene_start_vec]
gene_pos_numeric = [x + '.' + y for x, y in zip(gene_chr_numeric, gene_start_vec)]
gene_pos_numeric = np.array([float(x) for x in gene_pos_numeric])
gene_info['genome_pos_numeric'] = gene_pos_numeric
return gene_info | ab77e6c3a1f6e8d780f5b83a3beb4d94eaf8198b | 4,596 |
import pathlib
def read_list_from_file(filename: str) -> set:
"""Build a set from a simple multiline text file.
Args:
filename: name of the text file
Returns:
a set of the unique lines from the file
"""
filepath = pathlib.Path(__file__).parent.joinpath(filename)
lines = filepath.read_text().splitlines()
return set(lines) | c6fd5f80e05cc74bad600a7af21e36b5bd672b63 | 4,597 |
def qlist(q):
"""Convenience function that converts asyncio.Queues into lists.
This is inefficient and should not be used in real code.
"""
l = []
# get the messages out
while not q.empty():
l.append(q.get_nowait())
# now put the messages back (since we popped them out)
for i in l[::-1]:
q.put_nowait(item)
return l | 0ce6fb0d543646fb036c35c800d75bbadf670b0d | 4,601 |
def is_stdin(name):
"""Tell whether or not the given name represents stdin."""
return name in STDINS | 535ce3fee9e4a9a42ef24e4b35f84420a61cc529 | 4,602 |
def filter_marker_y_padding(markers_y_indexes, padding_y_top, padding_y_bottom):
"""
Filter the markers indexes for padding space in the top and bottom of answer sheet
:param markers_y_indexes:
:param padding_y_top:
:param padding_y_bottom:
:return:
"""
return markers_y_indexes[(markers_y_indexes > padding_y_top)
& (markers_y_indexes < padding_y_bottom)] | b1eed0ac24bd6a6354072427be4375ad188572a5 | 4,603 |
def hr_admin(request):
""" Views for HR2 Admin page """
template = 'hr2Module/hradmin.html'
# searched employee
query = request.GET.get('search')
if(request.method == "GET"):
if(query != None):
emp = ExtraInfo.objects.filter(
Q(user__first_name__icontains=query) |
Q(user__last_name__icontains=query)
).distinct()
emp = emp.filter(user_type="faculty")
else:
emp = ExtraInfo.objects.all()
emp = emp.filter(user_type="faculty")
else:
emp = ExtraInfo.objects.all()
emp = emp.filter(user_type="faculty")
context = {'emps': emp}
return render(request, template, context) | b78f78c57282b60b527bbaa03eab9064d881aea1 | 4,605 |
def create_aws_clients(region='us-east-1'):
"""Creates an S3, IAM, and Redshift client to interact with.
Parameters
----------
region : str
The aws region to create each client (default 'us-east-1').
Returns
-------
ec3
A boto3 ec2 resource.
s3
A boto3 s3 resource.
iam
A boto3 iam client.
redshift
A boto3 redshift client.
"""
ec2 = boto3.resource(
'ec2',
region_name=region,
aws_access_key_id=KEY,
aws_secret_access_key=SECRET
)
s3 = boto3.resource(
's3',
region_name=region,
aws_access_key_id=KEY,
aws_secret_access_key=SECRET
)
iam = boto3.client(
'iam',
region_name=region,
aws_access_key_id=KEY,
aws_secret_access_key=SECRET
)
redshift = boto3.client(
'redshift',
region_name=region,
aws_access_key_id=KEY,
aws_secret_access_key=SECRET
)
return ec2, s3, iam, redshift | 3a422ac88791e404d67127bc85bab12b6a8aa4d9 | 4,606 |
def apply_function(f, *args, **kwargs):
""" Apply a function or staticmethod/classmethod to the given arguments.
"""
if callable(f):
return f(*args, **kwargs)
elif len(args) and hasattr(f, '__get__'):
# support staticmethod/classmethod
return f.__get__(None, args[0])(*args, **kwargs)
else:
assert False, "expected a function or staticmethod/classmethod" | 374be0283a234d4121435dbd3fa873640f2b9ad1 | 4,607 |
def join_data(ycom_county, census, land_area_data):
"""
Getting one dataframe from the three datasets
"""
census['LogPopDensity'] = np.log10(census['TotalPop']/land_area_data['LND110200D'])
data = pd.concat(([ycom_county, census]), axis=1)
return data | 171c08d0c5dac721c3100df9be747c90b299a6c1 | 4,608 |
def path_graph():
"""Return a path graph of length three."""
G = nx.path_graph(3, create_using=nx.DiGraph)
G.graph["name"] = "path"
nx.freeze(G)
return G | c5fd4ea322b512bd26755d94581d56ddfb4d52bf | 4,610 |
def dropStudentsWithEvents(df, events,
saveDroppedAs=None,
studentId='BookletNumber',
eventId='Label',
verbose=True):
"""
Drop students with certain events.
It finds students with the events, and use dropStudents() to drop them.
:param df: input data frame with data from multiple students
:param events: a list of events. Each event is a string of event name
:param saveDroppedAs: optionally saving the dropped data to a csv or pickle file. Remember to specify .csv or .pickle
:param studentId: name of the column containing the student ID info; default ot "BookletNumber"
:param eventId: name of the column containing the event name; default to "Label"
:param verbose: default to True
:return: a data frame with students having any of these events dropped.
"""
# error checks
assert (isinstance(df, pd.DataFrame))
for v in [studentId, eventId]:
assert (v in df.columns)
studentsToDrop = df.loc[df[eventId].isin(events), studentId].unique()
if verbose:
print("\ndropStudentsWithEvents:")
print(events)
return dropStudents(df, studentsToDrop, saveDroppedAs, studentId, verbose) | 5308ec96c8d5d3c9704f4a42202656bc4126e645 | 4,611 |
def create_slides(user, node, slideshow_data):
""" Generate SlideshowSlides from data """
""" Returns a collection of SlideshowSlide objects """
slides = []
with transaction.atomic():
for slide in slideshow_data:
slide_obj = SlideshowSlide(
contentnode=node,
sort_order=slide.get("sort_order"),
metadata={
"caption": slide.get('caption'),
"descriptive_text": slide.get('descriptive_text'),
"checksum": slide.get('checksum'),
"extension": slide.get('extension')
}
)
slide_obj.save()
slides.append(slide_obj)
return slides | 6fc31c11f0dc24d17fd82eacd366a0026fb95157 | 4,613 |
def is_valid(sequence):
"""
A string is not valid if the knight moves onto a blank square
and the string cannot contain more than two vowels.
"""
if any(letter == "_" for letter in sequence):
return False
# Check for vowels
# Strings shorter than 3 letters are always ok, as they
# can't contain more than two vowels
if len(sequence) < 3:
return True
# Check longer sequences for number of vowels
vowels="AEIUO"
num_vowels = len([v for v in sequence if v in vowels])
if num_vowels > 2:
return False
# Check for duplicate characters.
# The original question did not say anything about
# repeated characters, but ignoring them would lead to infinite
# sequences, such as AMAMAMA..., where the knight makes the same sequence
# of moves over and over again
if duplicate_characters(sequence):
return False
return True | 0c3a72d05155eaf69ffeb7a734e9ceeabe0c44c2 | 4,614 |
import urllib
def is_dataproc_VM():
"""Check if this installation is being executed on a Google Compute Engine dataproc VM"""
try:
dataproc_metadata = urllib.request.urlopen("http://metadata.google.internal/0.1/meta-data/attributes/dataproc-bucket").read()
if dataproc_metadata.decode("UTF-8").startswith("dataproc"):
return True
except:
pass
return False | 21044a482b534ce3625b49080d1c472d587039ad | 4,618 |
def lookup_all(base):
"""Looks up a subclass of a base class from the registry.
Looks up a subclass of a base class with name provided from the
registry. Returns a list of registered subclass if found, None otherwise.
Args:
base: The base class of the subclass to be found.
Returns:
A list of subclass of the name if found, None otherwise.
"""
basename = base.__name__
if basename not in _registries:
return None
registry = _registries[basename]
output = []
for name in registry.keys():
init_args = registry[name][_INIT_ARGS]
if init_args is not None:
output.append(registry[name][_TYPE_TAG](**init_args))
else:
output.append(registry[name][_TYPE_TAG])
return output | de6a8504d0c6cf6f149b597e4d8b41f7b5fc1eff | 4,619 |
def makepyfile(testdir):
"""Fixture for making python files with single function and docstring."""
def make(*args, **kwargs):
func_name = kwargs.pop('func_name', 'f')
# content in args and kwargs is treated as docstring
wrap = partial(_wrap_docstring_in_func, func_name)
args = map(wrap, args)
kwargs = dict(zip(kwargs.keys(), map(wrap, kwargs.values())))
return testdir.makepyfile(*args, **kwargs)
return make | 420733f4ee299514dba4172cfcc93b7429c635ca | 4,620 |
from PIL import Image, ImageDraw, ImageFont
def createTextWatermark(msg, size, loc, fontcolor='white', fontpath='arial.ttf', fontsize=18):
"""Creates a watermark image of the given text.
Puts it at the given location in an RGBA image of the given size.
Location should be a 2-tuple denoting the center location of the text."""
im = Image.new('RGBA', size, (0,0,0,0))
draw = ImageDraw.Draw(im)
font = ImageFont.truetype(fontpath, fontsize)
tw, th = draw.textsize(msg, font=font)
loc = (loc[0] - tw//2, loc[1] - th//2)
draw.text(loc, msg, font=font, fill=fontcolor)
return im | 6a1ae202a92b351f7d7301735dc825e826898522 | 4,621 |
def get_server_pull_config(config:dict):
"""
takes a config dictionary and returns the variables related to server deployment (pull from intersections).
If there is any error in the configuration, returns a quadruple of -1 with a console output of the exception
"""
try:
server = config["DataTransfer"]["server"]
intersection = config["DataTransfer"]["intersection"]
startHour = config["DataTransfer"]["StartTime_PullFromIntersections"]["hour"]
startMinute = config["DataTransfer"]["StartTime_PullFromIntersections"]["minute"]
return server, intersection, startHour, startMinute
except Exception as e:
print(e)
return -1, -1, -1, -1 | 3a5a882bf91cb65462cdbf4fe202bbbc9d52ae2c | 4,622 |
def buff_push(item: BufferItem):
"""
Add BufferItem to the buffer and execute if the buffer is full
"""
q.put(item)
make_dependencies(item)
if q.full():
return buff_empty_partial(q.maxsize - 1)
return None | d45c0f67fa21cade7a0c2462e1cd8167f4939e0b | 4,623 |
from rx.core.operators.take import _take
from typing import Callable
def take(count: int) -> Callable[[Observable], Observable]:
"""Returns a specified number of contiguous elements from the start
of an observable sequence.
.. marble::
:alt: take
-----1--2--3--4----|
[ take(2) ]
-----1--2-|
Example:
>>> op = take(5)
Args:
count: The number of elements to return.
Returns:
An operator function that takes an observable source and
returns an observable sequence that contains the specified
number of elements from the start of the input sequence.
"""
return _take(count) | 636cc982c6c8c9b13a2cecb675bb0ca7aadbcd91 | 4,625 |
from typing import List
from typing import Union
def format_fields_for_join(
fields: List[Union[Field, DrivingKeyField]],
table_1_alias: str,
table_2_alias: str,
) -> List[str]:
"""Get formatted list of field names for SQL JOIN condition.
Args:
fields: Fields to be formatted.
table_1_alias: Alias that should be used in the field on the left side of the
equality sign.
table_2_alias: alias that should be used in the field on the right side of the
equality sign.
Returns:
Fields list formatted for an SQL JOIN condition.
"""
return [
JOIN_CONDITION_SQL_TEMPLATE.format(
field_name=field.name,
table_1_alias=table_1_alias,
table_2_alias=table_2_alias,
)
for field in fields
] | 691a154f8b984b11ed177a7948fe74398c693b25 | 4,626 |
def get_payment_balance(currency):
"""
Returns available balance for selected currency
This method requires authorization.
"""
result = get_data("/payment/balances", ("currency", currency))
payment_balance = namedtuple("Payment_balance", get_namedtuple(result[0]))
return [payment_balance(**element) for element in result] | 354abbf4e9bc1b22a32e31555106ce68a21e9cd1 | 4,627 |
import torch
def build_scheduler(optimizer, config):
"""
"""
scheduler = None
config = config.__dict__
sch_type = config.pop('type')
if sch_type == 'LambdaLR':
burn_in, steps = config['burn_in'], config['steps']
# Learning rate setup
def burnin_schedule(i):
if i < burn_in:
factor = pow(i / burn_in, 4)
elif i < steps[0]:
factor = 1.0
elif i < steps[1]:
factor = 0.1
else:
factor = 0.01
return factor
scheduler = optim.lr_scheduler.LambdaLR(optimizer, burnin_schedule)
elif sch_type == 'StepLR':
# 等间隔调整学习率, 调整倍数为gamma倍,调整间隔为step_size,间隔单位是step,step通常是指epoch。
step_size, gamma = config['step_size'], config['gamma']
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=gamma)
elif sch_type == 'ReduceLROnPlateau':
# 当某指标不再变化(下降或升高),调整学习率,这是非常实用的学习率调整策略。例如,当验证集的loss不再下降时,进行学习率调整;或者监测验证集的accuracy,当accuracy不再上升时,则调整学习率。
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.1,
patience=3, verbose=True, threshold=1e-4)
return scheduler | b205b323db322336426f3c13195cb49735d7284d | 4,628 |
def rpca_alm(X, lmbda=None, tol=1e-7, max_iters=1000, verbose=True,
inexact=True):
"""
Augmented Lagrange Multiplier
"""
if lmbda is None:
lmbda = 1.0 / np.sqrt(X.shape[0])
Y = np.sign(X)
norm_two = svd(Y, 1)[1]
norm_inf = np.abs(Y).max() / lmbda
dual_norm = np.max([norm_two, norm_inf])
Y = Y / dual_norm
A = np.zeros(Y.shape)
E = np.zeros(Y.shape)
dnorm = la.norm(X, ord='fro')
tol_primal = 1e-6 * dnorm
total_svd = 0
mu = 0.5 / norm_two
rho = 6
sv = 5
n = Y.shape[0]
for iter1 in xrange(max_iters):
primal_converged = False
sv = sv + np.round(n * 0.1)
primal_iter = 0
while not primal_converged:
Eraw = X - A + (1/mu) * Y
Eupdate = np.maximum(
Eraw - lmbda/mu, 0) + np.minimum(Eraw + lmbda / mu, 0)
U, S, V = svd(X - Eupdate + (1 / mu) * Y, sv)
svp = (S > 1/mu).sum()
if svp < sv:
sv = np.min([svp + 1, n])
else:
sv = np.min([svp + round(.05 * n), n])
Aupdate = np.dot(
np.dot(U[:, :svp], np.diag(S[:svp] - 1/mu)), V[:svp, :])
if primal_iter % 10 == 0 and verbose >= 2:
print(la.norm(A - Aupdate, ord='fro'))
if ((la.norm(A - Aupdate, ord='fro') < tol_primal and
la.norm(E - Eupdate, ord='fro') < tol_primal) or
(inexact and primal_iter > 5)):
primal_converged = True
A = Aupdate
E = Eupdate
primal_iter += 1
total_svd += 1
Z = X - A - E
Y = Y + mu * Z
mu *= rho
if la.norm(Z, ord='fro') / dnorm < tol:
if verbose:
print('\nConverged at iteration {}'.format(iter1))
break
if verbose:
_verbose(A, E, X)
return A, E | 8c09f8f4b004b9a00655402e5466636aa9fc4390 | 4,629 |
def dwt_embed(wmImage, hostImage, alpha, beta):
"""Embeds a watermark image into a host image, using the First Level
Discrete Wavelet Transform and Alpha Blending.\n
The formula used for the alpha blending is:
resultLL = alpha * hostLL + beta * watermarkLL
Arguments:
wmImage (NumPy array) -- the image to be embedded
hostImage (NumPy array) -- the image to be watermarked
alpha (float) -- the first embedding strength factor
beta (float) -- the second embedding strength factor
Returns:
NumPy array type -- the watermarked image, in float64 format
"""
# Take the dimensions of the host and watermark images
wmHeight, wmWidth = wmImage.shape[:2]
hostHeight, hostWidth = hostImage.shape[:2]
# Resize the watermark image so that it is the same size as the host image
if wmHeight > hostHeight or wmWidth > hostWidth:
# Scale down the watermark image
wmImage = cv2.resize(wmImage, (hostWidth, hostHeight), interpolation = cv2.INTER_AREA)
elif wmHeight < hostHeight or wmWidth < hostWidth:
# Scale up the watermark image
wmImage = cv2.resize(wmImage, (hostWidth, hostHeight), interpolation = cv2.INTER_LINEAR)
# Take the new dimensions of the watermark image
wmHeight, wmWidth = wmImage.shape[:2]
# Split both images into channels
hostB, hostG, hostR = cv2.split(hostImage)
wmB, wmG, wmR = cv2.split(wmImage)
# Compute the first level bidimensional DWT for each channel of both images
# (LL, (HL, LH, HH))
cAhostB, (cHhostB, cVhostB, cDhostB) = pywt.dwt2(hostB, 'db2')
cAhostG, (cHhostG, cVhostG, cDhostG) = pywt.dwt2(hostG, 'db2')
cAhostR, (cHhostR, cVhostR, cDhostR) = pywt.dwt2(hostR, 'db2')
cAhostHeight, cAhostWidth = cAhostB.shape
cAwmB, (cHwmB, cVwmB, cDwmB) = pywt.dwt2(wmB, 'db2')
cAwmG, (cHwmG, cVwmG, cDwmG) = pywt.dwt2(wmG, 'db2')
cAwmR, (cHwmR, cVwmR, cDwmR) = pywt.dwt2(wmR, 'db2')
cAwmHeight, cAwmWidth = cAwmB.shape
# Generate image matrix for containing all four host coefficients images
coeffsHost = np.zeros((cAhostHeight * 2, cAhostWidth * 2, 3), dtype = 'float64')
# Merge channels for each of A, H, V and D and build the host coefficients image
cAhost = cv2.merge([cAhostB, cAhostG, cAhostR])
coeffsHost[0:cAhostHeight, 0:cAhostWidth] = cAhost
cHhost = cv2.merge([cHhostB, cHhostG, cHhostR])
coeffsHost[0:cAhostHeight, cAhostWidth:cAhostWidth * 2] = cHhost
cVhost = cv2.merge([cVhostB, cVhostG, cVhostR])
coeffsHost[cAhostHeight:cAhostHeight * 2, 0:cAhostWidth] = cVhost
cDhost = cv2.merge([cDhostB, cDhostG, cDhostR])
coeffsHost[cAhostHeight:cAhostHeight * 2, cAhostWidth:cAhostWidth * 2] = cDhost
# Display the host coefficients image
temp = np.uint8(np.rint(coeffsHost))
cv2.imshow('Host DWT', temp)
# Generate image matrix for containing all four watermark coefficients images
coeffsWm = np.zeros((cAwmHeight * 2, cAwmWidth * 2, 3), dtype = 'float64')
# Merge channels for each of A, H, V and D and build the wm coefficients image
cAwm = cv2.merge([cAwmB, cAwmG, cAwmR])
coeffsWm[0:cAwmHeight, 0:cAwmWidth] = cAwm
cHwm = cv2.merge([cHwmB, cHwmG, cHwmR])
coeffsWm[0:cAwmHeight, cAwmWidth:cAwmWidth * 2] = cHwm
cVwm = cv2.merge([cVwmB, cVwmG, cVwmR])
coeffsWm[cAwmHeight:cAwmHeight * 2, 0:cAwmWidth] = cVwm
cDwm = cv2.merge([cDwmB, cDwmG, cDwmR])
coeffsWm[cAwmHeight:cAwmHeight * 2, cAwmWidth:cAwmWidth * 2] = cDwm
# Display the watermark coefficients image
temp = np.uint8(np.rint(coeffsWm))
cv2.imshow('Watermark DWT', temp)
# Apply the Alpha Blending Technique
# wmImageLL = alpha * hostLL + beta * wmLL
cAresult = alpha * cAhost + beta * cAwm
cAresultB, cAresultG, cAresultR = cv2.split(cAresult)
# Compute the channels of the watermarked image by applying the inverse DWT
resultB = pywt.idwt2((cAresultB, (cHhostB, cVhostB, cDhostB)), 'db2')
resultG = pywt.idwt2((cAresultG, (cHhostG, cVhostG, cDhostG)), 'db2')
resultR = pywt.idwt2((cAresultR, (cHhostR, cVhostR, cDhostR)), 'db2')
# Merge the channels and obtain the final watermarked image
resultImage = cv2.merge([resultB, resultG, resultR])
return resultImage | 939e8d14ceb9452dc873f7b2d9472630211c0432 | 4,630 |
def make_file_iterator(filename):
"""Return an iterator over the contents of the given file name."""
# pylint: disable=C0103
with open(filename) as f:
contents = f.read()
return iter(contents.splitlines()) | e7b612465717dafc3155d9df9fd007f7aa9af509 | 4,631 |
def little_endian_bytes_to_int(little_endian_byte_seq):
"""Converts a pair of bytes into an integer.
The `little_endian_byte_seq` input must be a 2 bytes sequence defined
according to the little-endian notation (i.e. the less significant byte
first).
For instance, if the `little_endian_byte_seq` input is equals to
``(0xbc, 0x02)`` this function returns the decimal value ``700`` (0x02bc in
hexadecimal notation).
:param bytes little_endian_byte_seq: the 2 bytes sequence to be converted.
It must be compatible with the "bytes" type and defined according to the
little-endian notation.
"""
# Check the argument and convert it to "bytes" if necessary.
# Assert "little_endian_byte_seq" items are in range (0, 0xff).
# "TypeError" and "ValueError" are sent by the "bytes" constructor if
# necessary.
# The statement "tuple(little_endian_byte_seq)" implicitely rejects
# integers (and all non-iterable objects) to compensate the fact that the
# bytes constructor doesn't reject them: bytes(2) is valid and returns
# b'\x00\x00'
little_endian_byte_seq = bytes(tuple(little_endian_byte_seq))
# Check that the argument is a sequence of two items
if len(little_endian_byte_seq) != 2:
raise ValueError("A sequence of two bytes is required.")
integer = little_endian_byte_seq[1] * 0x100 + little_endian_byte_seq[0]
return integer | d8d0c6d4ebb70ea541e479b21deb913053886748 | 4,633 |
def higher_follower_count(A, B):
""" Compares follower count key between two dictionaries"""
if A['follower_count'] >= B['follower_count']: return "A"
return "B" | d4d182ca5a3c5bff2bc7229802603a82d44a4d67 | 4,634 |
def _element_or_none(germanium, selector, point):
"""
Function to check if the given selector is only a regular
element without offset clicking. If that is the case, then we
enable the double hovering in the mouse actions, to solve a
host of issues with hovering and scrolling, such as elements
appearing on mouse in, or edge not hovering correctly.
:param germanium:
:param selector:
:param point:
:return:
"""
if isinstance(selector, Point):
return None
if point:
return None
return _element(germanium, selector) | b3de13ecefc7b8593d4b61e7caf63eee41d1521a | 4,635 |
def ENDLEMuEpP_TransferMatrix( style, tempInfo, crossSection, productFrame, angularData, EMuEpPData, multiplicity, comment = None ) :
"""This is LLNL I = 1, 3 type data."""
logFile = tempInfo['logFile']
workDir = tempInfo['workDir']
s = versionStr + '\n'
s += "Process: 'Double differential EMuEpP data transfer matrix'\n"
s += commonDataToString( comment, style, tempInfo, crossSection, productFrame, multiplicity = multiplicity )
s += angularToString( angularData, crossSection )
s += EMuEpPDataToString( EMuEpPData )
return( executeCommand( logFile, transferMatrixExecute, s, workDir, tempInfo['workFile'], tempInfo['restart'] ) ) | 224e72f52ad6b143e51a50962d548084a8e7c283 | 4,636 |
def createfourierdesignmatrix_chromatic(toas, freqs, nmodes=30, Tspan=None,
logf=False, fmin=None, fmax=None,
idx=4):
"""
Construct Scattering-variation fourier design matrix.
:param toas: vector of time series in seconds
:param freqs: radio frequencies of observations [MHz]
:param nmodes: number of fourier coefficients to use
:param freq: option to output frequencies
:param Tspan: option to some other Tspan
:param logf: use log frequency spacing
:param fmin: lower sampling frequency
:param fmax: upper sampling frequency
:param idx: Index of chromatic effects
:return: F: Chromatic-variation fourier design matrix
:return: f: Sampling frequencies
"""
# get base fourier design matrix and frequencies
F, Ffreqs = utils.createfourierdesignmatrix_red(
toas, nmodes=nmodes, Tspan=Tspan, logf=logf,
fmin=fmin, fmax=fmax)
# compute the DM-variation vectors
Dm = (1400/freqs) ** idx
return F * Dm[:, None], Ffreqs | 59420ea9bde77f965f4571bdec5112d026c63478 | 4,638 |
def get_word_data(char_data):
"""
获取分词的结果
:param char_data:
:return:
"""
seq_data = [''.join(l) for l in char_data]
word_data = []
# stop_words = [line.strip() for line in open(stop_word_file, 'r', encoding='utf-8')]
for seq in seq_data:
seq_cut = jieba.cut(seq, cut_all=False)
word_data.append([w for w in seq_cut ])
return word_data | 8ca306d0f3f4c94f6d67cdc7b865ddef4f639291 | 4,639 |
from typing import List
from typing import Dict
from typing import Any
def get_output_stream(items: List[Dict[str, Any]]) -> List[OutputObject]:
"""Convert a list of items in an output stream into a list of output
objects. The element in list items are expected to be in default
serialization format for output objects.
Paramaters
----------
items: list(dict)
Items in the output stream in default serialization format
Returns
-------
list(vizier.viztrail.module.OutputObject)
"""
result = list()
for item in items:
result.append(
OutputObject(
type=item[KEY_OUTPUT_TYPE],
value=item[KEY_OUTPUT_VALUE]
)
)
return result | 841bffba3f0e4aeab19ca31b62807a5a30e818f1 | 4,641 |
def lvnf_stats(**kwargs):
"""Create a new module."""
return RUNTIME.components[LVNFStatsWorker.__module__].add_module(**kwargs) | 1bdf94687101b8ab90684b67227acec35205e320 | 4,642 |
import re
def parse_float(string):
"""
Finds the first float in a string without casting it.
:param string:
:return:
"""
matches = re.findall(r'(\d+\.\d+)', string)
if matches:
return matches[0]
else:
return None | 4adea9226d0f67cd4d2dfe6a2b65bfd24f3a7ecb | 4,643 |
def objectproxy_realaddress(obj):
"""
Obtain a real address as an integer from an objectproxy.
"""
voidp = QROOT.TPython.ObjectProxy_AsVoidPtr(obj)
return C.addressof(C.c_char.from_buffer(voidp)) | 6c2f1a2b0893ef2fd90315a2cd3a7c5c5524707f | 4,644 |
def delta_shear(observed_gal, psf_deconvolve, psf_reconvolve, delta_g1, delta_g2):
"""
Takes in an observed galaxy object, two PSFs for metacal (deconvolving
and re-convolving), and the amount by which to shift g1 and g2, and returns
a tuple of tuples of modified galaxy objects.
((g1plus, g1minus), (g2plus, g2minus))
"""
# Deconvolving by psf_deconvolve
inv_psf = galsim.Deconvolve(psf_deconvolve)
deconvolved = galsim.Convolve(observed_gal, inv_psf)
# Applying second shear in g1
sheared_plus_g1 = deconvolved.shear(g1=delta_g1, g2=0)
sheared_minus_g1 = deconvolved.shear(g1=-delta_g1, g2=0)
# Applying second shear in g2
sheared_plus_g2 = deconvolved.shear(g1=0, g2=delta_g2)
sheared_minus_g2 = deconvolved.shear(g1=0, g2=-delta_g2)
# Reconvolving by psf_reconvolve for g1
reconvolved_plus_g1 = galsim.Convolve(sheared_plus_g1, psf_reconvolve)
reconvolved_minus_g1 = galsim.Convolve(sheared_minus_g1, psf_reconvolve)
g1_plus_minus = (reconvolved_plus_g1, reconvolved_minus_g1)
# Reconvolving by psf_reconvolve for g2
reconvolved_plus_g2 = galsim.Convolve(sheared_plus_g2, psf_reconvolve)
reconvolved_minus_g2 = galsim.Convolve(sheared_minus_g2, psf_reconvolve)
g2_plus_minus = (reconvolved_plus_g2, reconvolved_minus_g2)
# g1_plus_minus = (sheared_plus_g1, sheared_minus_g1)
# g2_plus_minus = (sheared_plus_g2, sheared_minus_g2)
# adding noshear reconvolved for testing
reconvolved_noshear = galsim.Convolve(deconvolved, psf_reconvolve)
return g1_plus_minus, g2_plus_minus, reconvolved_noshear | 13ab29088a1a88305e9f74ab1b43351f2d19b3c6 | 4,646 |
def estimateModifiedPiSquared(n):
"""
Estimates that value of Pi^2 through a formula involving partial sums.
n is the number of terms to be summed; the larger the more accurate the
estimation of Pi^2 tends to be (but not always).
The modification relative to estimatePiSquared() is that the n terms are
added in reverse order (i.e. the smallest values are added first).
"""
partialSum = 0 # Initializing
# Implementation of the mathematical formula involving summing
for k in range(n, 0, -1): # Order reversed
partialSum += 1 / (k ** 2)
estimate = 6*partialSum
return estimate | 652376bf0964990905bf25b12ad8ab5156975dea | 4,647 |
def pattern_match(template, image, upsampling=16, metric=cv2.TM_CCOEFF_NORMED, error_check=False):
"""
Call an arbitrary pattern matcher using a subpixel approach where the template and image
are upsampled using a third order polynomial.
Parameters
----------
template : ndarray
The input search template used to 'query' the destination
image
image : ndarray
The image or sub-image to be searched
upsampling : int
The multiplier to upsample the template and image.
func : object
The function to be used to perform the template based matching
Options: {cv2.TM_CCORR_NORMED, cv2.TM_CCOEFF_NORMED, cv2.TM_SQDIFF_NORMED}
In testing the first two options perform significantly better with Apollo data.
error_check : bool
If True, also apply a different matcher and test that the values
are not too divergent. Default, False.
Returns
-------
x : float
The x offset
y : float
The y offset
strength : float
The strength of the correlation in the range [-1, 1].
"""
if upsampling < 1:
raise ValueError
# Fit a 3rd order polynomial to upsample the images
if upsampling != 1:
u_template = zoom(template, upsampling, order=3)
u_image = zoom(image, upsampling, order=3)
else:
u_template = template
u_image = image
result = cv2.matchTemplate(u_image, u_template, method=metric)
_, max_corr, min_loc, max_loc = cv2.minMaxLoc(result)
if metric == cv2.TM_SQDIFF or metric == cv2.TM_SQDIFF_NORMED:
x, y = (min_loc[0], min_loc[1])
else:
x, y = (max_loc[0], max_loc[1])
# Compute the idealized shift (image center)
ideal_y = u_image.shape[0] / 2
ideal_x = u_image.shape[1] / 2
# Compute the shift from template upper left to template center
y += (u_template.shape[0] / 2)
x += (u_template.shape[1] / 2)
x = (x - ideal_x) / upsampling
y = (y - ideal_y) / upsampling
return x, y, max_corr, result | adb98b96d9ca778a909868c0c0851bf52b1f0a1b | 4,648 |
def main(argv=[__name__]):
"""Raspi_x10 command line interface.
"""
try:
try:
devices_file, rules_file, special_days_file = argv[1:]
except ValueError:
raise Usage('Wrong number of arguments')
sched = Schedule()
try:
sched.load_conf(devices_file, 'x10_devices', 'devices')
sched.load_conf(rules_file, 'x10_rules', 'rules')
sched.load_conf(special_days_file, 'special_days', 'special_days')
except IOError:
raise Usage
except KeyError as err:
raise Usage('KeyError: {0}'.format(err))
sched.build()
sched.write()
return 0
except Usage as err:
log.error('{0.msg}\n{0.usage}'.format(err))
return 2 | 583df25dc3fb3059d6ed5b87d61a547fc1a11935 | 4,649 |
def HexaMeshIndexCoord2VoxelValue(nodes, elements, dim, elementValues):
"""
Convert hexamesh (bricks) in index coordinates to volume in voxels with value of voxels assigned according to elementValues.
dim: dimension of volume in x, y and z in voxels (tuple)
elementValues: len(elements) == len(elementValues)
Example: to retrieve nodes corresponding to element 217:
nodesSortedUnique[elements[217],:]
Given the default voxelSize and origin, coordinates range from (-0.5 to dimXYZ+0.5)
nodesSortedUnique.shape = (nodes,3)
"""
volume = np.zeros(dim, dtype=elementValues.dtype) # initialize volume of False
xyz = nodes[elements,:][:,0,:] + 0.5 # voxel coordinates of bone
xyz = xyz.astype(int)
volume[tuple(xyz.T)] = elementValues
return volume | 8dcab059dd137173e780b7dd9941c80c89d7929c | 4,650 |
def hamiltonian(latt: Lattice, eps: (float, np.ndarray) = 0.,
t: (float, np.ndarray) = 1.0,
dense: bool = True) -> (csr_matrix, np.ndarray):
"""Computes the Hamiltonian-matrix of a tight-binding model.
Parameters
----------
latt : Lattice
The lattice the tight-binding model is defined on.
eps : array_like, optional
The on-site energies of the model.
t : array_like, optional
The hopping energies of the model.
dense : bool, optional
If ``True`` the hamiltonian matrix is returned as a ``np.ndarray``
Returns
-------
ham : csr_matrix or np.ndarray
The Hamiltonian-matrix as a sparse or dense matrix.
"""
dmap = latt.data.map()
data = np.zeros(dmap.size)
data[dmap.onsite()] = eps
data[dmap.hopping()] = t
ham = csr_matrix((data, dmap.indices))
if dense:
ham = ham.toarray()
return ham | 63df0f8557ba13fe3501506974c402faca1811f5 | 4,651 |
def pad_in(string: str, space: int) -> str:
"""
>>> pad_in('abc', 0)
'abc'
>>> pad_in('abc', 2)
' abc'
"""
return "".join([" "] * space) + string | 325c0751da34982e33e8fae580af6f439a2dcac0 | 4,652 |
def get_existing_rule(text):
"""
Return the matched rule if the text is an existing rule matched exactly,
False otherwise.
"""
matches = get_license_matches(query_string=text)
if len(matches) == 1:
match = matches[0]
if match.matcher == MATCH_HASH:
return match.rule | 9c41241532977b0a30485c7b7609da3c6e75b59c | 4,654 |
Subsets and Splits