content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def get_all_migrations(ctxt, inactive=0):
"""Get all non-deleted source hypervisors.
Pass true as argument if you want deleted sources returned also.
"""
return db.migration_get_all(ctxt, inactive) | c8e8ae084ca42d560e79412e4ff56d79059055a6 | 3,651,154 |
def extract(input_data: str) -> tuple:
"""take input data and return the appropriate data structure"""
rules = input_data.split('\n')
graph = dict()
reverse_graph = dict()
for rule in rules:
container, contents = rule.split('contain')
container = ' '.join(container.split()[:2])
content_graph = dict()
for content in contents.split(','):
if content == " no other bags.":
break
parts = content.split()
amount = int(parts[0])
color = ' '.join(parts[1:3])
content_graph[color] = amount
if color in reverse_graph.keys():
reverse_graph[color].append(container)
else:
reverse_graph[color] = [container]
graph[container] = content_graph
return (graph, reverse_graph) | f71cdc23fdfaf6ef0d054c0c68e513db66289c12 | 3,651,155 |
def get_total_indemnity(date_of_joining, to_date):
"""To Calculate the total Indemnity of an employee based on employee's Joining date.
Args:
date_of_joining ([date]): Employee's Joining Date
to_date ([data]): up until date
Returns:
total_allocation: Total Indemnity Allocation calculated from joining date till 'to_date'.
"""
#get no. of year and days employee has worked.
total_working_year = relativedelta(to_date, date_of_joining ).years
total_working_days = (to_date - date_of_joining).days
#reason: Any no. of days after completing 5 years as different calculation.
five_year_in_days = 5*365
# up until 5 years of working year, the monthly calculation takes "15 days" salary in to consideration.
if total_working_year < 5 or (total_working_year == 5 and total_working_days == 5*365):
#15 days salary is divided over a year and that becomes each day's allocation.
return 15 / 365 * total_working_days
elif total_working_year >= 5 and total_working_days > 5*365:
#calculation takes 15 days salary for 5 years and 30 days salary after 5 years
return (15 / 365 * five_year_in_days) + (30 / 365 * (total_working_days-five_year_in_days)) | 1b09d0dc7971ab4c3d63c303a93f64da924dcfa4 | 3,651,156 |
def api_2_gamma_oil(value):
"""
converts density in API(American Petroleum Institute gravity) to gamma_oil (oil relative density by water)
:param value: density in API(American Petroleum Institute gravity)
:return: oil relative density by water
"""
return (value + 131.5) / 141.5 | 20e625f22092461fcf4bc2e2361525abf8051f97 | 3,651,158 |
def compute_metrics(pred, label):
"""Compute metrics like True/False Positive, True/False Negative.`
MUST HAVE ONLY 2 CLASSES: BACKGROUND, OBJECT.
Args:
pred (numpy.ndarray): Prediction, one-hot encoded. Shape: [2, H, W], dtype: uint8
label (numpy.ndarray): Ground Truth, one-hot encoded. Shape: [H, W], dtype: uint8
Returns:
float: IOU, TP, TN, FP, FN
"""
if len(pred.shape) > 3:
raise ValueError("pred should have shape [2, H, W], got: {}".format(pred.shape))
if len(label.shape) > 2:
raise ValueError("label should have shape [H, W], got: {}".format(label.shape))
total_pixels = pred.shape[0] * pred.shape[1]
tp = np.sum(np.logical_and(pred == 1, label > 0))
tn = np.sum(np.logical_and(pred == 0, label == 0))
fp = np.sum(np.logical_and(pred == 1, label == 0))
fn = np.sum(np.logical_and(pred == 0, label > 0))
if (tp + tn + fp + fn) != total_pixels:
raise ValueError('The number of total pixels ({}) and sum of tp,fp,tn,fn ({}) is not equal'.format(
total_pixels, (tp + tn + fp + fn)))
iou = tp / (tp + fp + fn)
_tp = tp / np.sum(label == 1)
tp_rate = (tp / (tp + fn)) * 100
fp_rate = (fp / (fp + tn)) * 100
tn_rate = (tn / (tn + fp)) * 100
fn_rate = (fn / (fn + tp)) * 100
return iou, tp_rate, tn_rate, fp_rate, fn_rate | be8415c997197c06a5998671ffe09e70c6d3719c | 3,651,159 |
import jinja2
def expand_template(template, variables, imports, raw_imports=None):
"""Expand a template."""
if raw_imports is None:
raw_imports = imports
env = jinja2.Environment(loader=OneFileLoader(template))
template = env.get_template(template)
return template.render(imports=imports, variables=variables, raw_imports=raw_imports) | c5ebe1610a6e2fa9e0b18afa7d23652c1f7c25ba | 3,651,160 |
from typing import Any
from operator import truth
def __contains__(container: Any, item: Any, /) -> bool:
"""Check if the first item contains the second item: `b in a`."""
container_type = type(container)
try:
contains_method = debuiltins._mro_getattr(container_type, "__contains__")
except AttributeError:
# Cheating until `for` is unravelled (and thus iterators).
return debuiltins.any(x is item or x == item for x in container)
else:
if contains_method is None:
raise TypeError(f"{container_type.__name__!r} object is not a container")
is_contained = contains_method(container, item)
return truth(is_contained) | b58a5f400895df472f83a5e2410dff9cd112fc91 | 3,651,161 |
def generate_search_url(request_type):
"""Given a request type, generate a query URL for kitsu.io."""
url = BASE_URL_KITSUIO.format(request_type)
return url | 9508d909fb8eb018770b2191f7d62ccb3881f285 | 3,651,162 |
from typing import Callable
def register_magic(func: Callable[[Expr], Expr]):
"""
Make a magic command more like Julia's macro system.
Instead of using string, you can register a magic that uses Expr as the
input and return a modified Expr. It is usually easier and safer to
execute metaprogramming this way.
Parameters
----------
func : Callable[[Expr], Expr]
Function that will used as a magic command.
Returns
-------
Callable
Registered function itself.
Examples
--------
.. code-block:: python
@register_magic
def print_code(expr):
print(expr)
return expr
The ``print_code`` magic is registered as an ipython magic.
.. code-block:: python
%print_code a = 1
.. code-block:: python
%%print_code
def func(a):
return a + 1
"""
@register_line_cell_magic
@needs_local_scope
@wraps(func)
def _ipy_magic(line: str, cell: str = None, local_ns=None):
if cell is None:
cell = line
block = parse(cell)
block_out = func(block)
return block_out.eval(local_ns, local_ns)
return func | 06d93f8a48758dc39679af396c10a54927e3696e | 3,651,163 |
def ValidatePregnum(resp):
"""Validate pregnum in the respondent file.
resp: respondent DataFrame
"""
# read the pregnancy frame
preg = nsfg.ReadFemPreg()
# make the map from caseid to list of pregnancy indices
preg_map = nsfg.MakePregMap(preg)
# iterate through the respondent pregnum series
for index, pregnum in resp.pregnum.items():
caseid = resp.caseid[index]
indices = preg_map[caseid]
# check that pregnum from the respondent file equals
# the number of records in the pregnancy file
if len(indices) != pregnum:
print(caseid, len(indices), pregnum)
return False
return True | a51f3af130cbad4a5cd3d3c9707788f783302000 | 3,651,165 |
def is_super_admin(view, view_args, view_kwargs, *args, **kwargs):
"""
Permission function for things allowed exclusively to super admin.
Do not use this if the resource is also accessible by a normal admin, use the is_admin decorator instead.
:return:
"""
user = current_user
if not user.is_super_admin:
return ForbiddenError({'source': ''}, 'Super admin access is required').respond()
return view(*view_args, **view_kwargs) | 503550fcd52e62053d42a3059aba298009d3eb01 | 3,651,166 |
def normalize_depth(val, min_v, max_v):
"""
print 'nomalized depth value'
nomalize values to 0-255 & close distance value has high value. (similar to stereo vision's disparity map)
"""
return (((max_v - val) / (max_v - min_v)) * 255).astype(np.uint8) | 431cda7af30ef1127c60069b6958ef4d8234eaae | 3,651,167 |
def parse_iori_block(block):
"""Turn IORI data blocks into `IoriData` objects.
Convert rotation from Quaternion format to Euler angles.
Parameters
----------
block: list of KVLItem
A list of KVLItem corresponding to a IORI data block.
Returns
-------
iori_data: IoriData
A IoriData object holding the IORI information of a block.
"""
block_dict = {
s.key: s for s in block
}
data = block_dict['IORI'].value * 1.0 / block_dict["SCAL"].value
rotation = np.array([R.from_quat(q).as_euler('zyx', degrees=True) for q in data])
z, y, x = rotation.T
return IoriData(
cts = block_dict['STMP'].value,
z = z,
y = y,
x = x,
) | b9ad59677e51c30b2bec51a0503fc2718cde0f7d | 3,651,168 |
def ungap_all(align):
"""
Removes all gaps (``-`` symbols) from all sequences of the :class:`~data.Align`
instance *align* and returns the resulting ~data.Container instance.
"""
result = data.Container()
for n,s,g in align:
result.append(n, s.translate(None, '-'), g)
return result | 511b6aeb7fc262b733a97b5180a23c7f044fea06 | 3,651,169 |
def expandBcv(bcv):
"""If the bcv is an interval, expand if.
"""
if len(bcv) == 6:
return bcv
else:
return "-".join(splitBcv(bcv)) | abfb1bf31acca579fecb526d571b32cefa7ecd61 | 3,651,170 |
def cluster_profile_platform(cluster_profile):
"""Translate from steps.cluster_profile to workflow.as slugs."""
if cluster_profile == 'azure4':
return 'azure'
if cluster_profile == 'packet':
return 'metal'
return cluster_profile | 0a01f566562002fe43c3acbb00d5efcc09d25314 | 3,651,172 |
def get_price_lambda_star_lp_1_cvxpy(w: np.ndarray, c_plus: np.ndarray, psi_plus: np.ndarray) \
-> float:
"""
Computes lambda_star based on dual program of the projection of w_star.
:param w: current state in workload space.
:param c_plus: vector normal to the level set in the monotone region 'right above' the face.
:param psi_plus: vector normal to the closest face.
:return: lambda_star: price of random oscillations along the closest face.
"""
assert not StrategicIdlingHedging._is_w_inside_artificial_monotone_region(w, psi_plus)
num_wl = w.shape[0]
lambda_var = cvx.Variable(1)
v_dagger_var = cvx.Variable((num_wl, 1))
objective = cvx.Maximize(v_dagger_var.T @ w)
constraints = [c_plus - v_dagger_var - lambda_var * psi_plus == 0,
v_dagger_var >= 0]
prob = cvx.Problem(objective, constraints)
_ = prob.solve(solver=cvx.SCS, eps=1e-8)
lambda_star = lambda_var.value[0]
if prob.status != 'optimal':
lambda_star = None
return lambda_star | 0a1a658cd86a0253fe3caf8a5e162393926b351a | 3,651,173 |
import typing
import random
def _get_nodes(
network: typing.Union[NetworkIdentifier, Network],
sample_size: typing.Optional[int],
predicate: typing.Callable,
) -> typing.List[Node]:
"""Decaches domain objects: Node.
"""
nodeset = [i for i in get_nodes(network) if predicate(i)]
if sample_size is not None:
sample_size = min(sample_size, len(nodeset))
return nodeset if sample_size is None else random.sample(nodeset, sample_size) | f3be401c2fd0adf58f10b679d254ff2075f4546b | 3,651,174 |
def cdl_key():
"""Four-class system (grain, forage, vegetable, orchard. Plus 5: non-ag/undefined"""
key = {1: ('Corn', 1),
2: ('Cotton', 1),
3: ('Rice', 1),
4: ('Sorghum', 1),
5: ('Soybeans', 1),
6: ('Sunflower', 1),
7: ('', 5),
8: ('', 5),
9: ('', 5),
10: ('Peanuts', 1),
11: ('Tobacco', 2),
12: ('Sweet Corn', 1),
13: ('Pop or Orn Corn', 1),
14: ('Mint', 2),
15: ('', 5),
16: ('', 5),
17: ('', 5),
18: ('', 5),
19: ('', 5),
20: ('', 5),
21: ('Barley', 1),
22: ('Durum Wheat', 1),
23: ('Spring Wheat', 1),
24: ('Winter Wheat', 1),
25: ('Other Small Grains', 1),
26: ('Dbl Crop WinWht/Soybeans', 1),
27: ('Rye', 1),
28: ('Oats', 1),
29: ('Millet', 1),
30: ('Speltz', 1),
31: ('Canola', 1),
32: ('Flaxseed', 1),
33: ('Safflower', 1),
34: ('Rape Seed', 1),
35: ('Mustard', 1),
36: ('Alfalfa', 3),
37: ('Other Hay/Non Alfalfa', 3),
38: ('Camelina', 1),
39: ('Buckwheat', 1),
40: ('', 5),
41: ('Sugarbeets', 2),
42: ('Dry Beans', 2),
43: ('Potatoes', 2),
44: ('Other Crops', 2),
45: ('Sugarcane', 2),
46: ('Sweet Potatoes', 2),
47: ('Misc Vegs & Fruits', 2),
48: ('Watermelons', 2),
49: ('Onions', 2),
50: ('Cucumbers', 2),
51: ('Chick Peas', 2),
52: ('Lentils', 2),
53: ('Peas', 2),
54: ('Tomatoes', 2),
55: ('Caneberries', 2),
56: ('Hops', 2),
57: ('Herbs', 2),
58: ('Clover/Wildflowers', 3),
59: ('Sod/Grass Seed', 3),
60: ('Switchgrass', 3),
61: ('Fallow/Idle Cropland', 3),
62: ('Pasture/Grass', 3),
63: ('Forest', 5),
64: ('Shrubland', 5),
65: ('Barren', 5),
66: ('Cherries', 4),
67: ('Peaches', 4),
68: ('Apples', 4),
69: ('Grapes', 4),
70: ('Christmas Trees', 4),
71: ('Other Tree Crops', 4),
72: ('Citrus', 4),
73: ('', 5),
74: ('Pecans', 4),
75: ('Almonds', 4),
76: ('Walnuts', 4),
77: ('Pears', 4),
78: ('', 5),
79: ('', 5),
80: ('', 5),
81: ('Clouds/No Data', 5),
82: ('Developed', 5),
83: ('Water', 5),
84: ('', 5),
85: ('', 5),
86: ('', 5),
87: ('Wetlands', 5),
88: ('Nonag/Undefined', 5),
89: ('', 5),
90: ('', 5),
91: ('', 5),
92: ('Aquaculture', 5),
93: ('', 5),
94: ('', 5),
95: ('', 5),
96: ('', 5),
97: ('', 5),
98: ('', 5),
99: ('', 5),
100: ('', 5),
101: ('', 5),
102: ('', 5),
103: ('', 5),
104: ('', 5),
105: ('', 5),
106: ('', 5),
107: ('', 5),
108: ('', 5),
109: ('', 5),
110: ('', 5),
111: ('Open Water', 5),
112: ('Perennial Ice/Snow', 5),
113: ('', 5),
114: ('', 5),
115: ('', 5),
116: ('', 5),
117: ('', 5),
118: ('', 5),
119: ('', 5),
120: ('', 5),
121: ('Developed/Open Space', 5),
122: ('Developed/Low Intensity', 5),
123: ('Developed/Med Intensity', 5),
124: ('Developed/High Intensity', 5),
125: ('', 5),
126: ('', 5),
127: ('', 5),
128: ('', 5),
129: ('', 5),
130: ('', 5),
131: ('Barren', 5),
132: ('', 5),
133: ('', 5),
134: ('', 5),
135: ('', 5),
136: ('', 5),
137: ('', 5),
138: ('', 5),
139: ('', 5),
140: ('', 5),
141: ('Deciduous Forest', 5),
142: ('Evergreen Forest', 5),
143: ('Mixed Forest', 5),
144: ('', 5),
145: ('', 5),
146: ('', 5),
147: ('', 5),
148: ('', 5),
149: ('', 5),
150: ('', 5),
151: ('', 5),
152: ('Shrubland', 5),
153: ('', 5),
154: ('', 5),
155: ('', 5),
156: ('', 5),
157: ('', 5),
158: ('', 5),
159: ('', 5),
160: ('', 5),
161: ('', 5),
162: ('', 5),
163: ('', 5),
164: ('', 5),
165: ('', 5),
166: ('', 5),
167: ('', 5),
168: ('', 5),
169: ('', 5),
170: ('', 5),
171: ('', 5),
172: ('', 5),
173: ('', 5),
174: ('', 5),
175: ('', 5),
176: ('Grassland/Pasture', 5),
177: ('', 5),
178: ('', 5),
179: ('', 5),
180: ('', 5),
181: ('', 5),
182: ('', 5),
183: ('', 5),
184: ('', 5),
185: ('', 5),
186: ('', 5),
187: ('', 5),
188: ('', 5),
189: ('', 5),
190: ('Woody Wetlands', 5),
191: ('', 5),
192: ('', 5),
193: ('', 5),
194: ('', 5),
195: ('Herbaceous Wetlands', 5),
196: ('', 5),
197: ('', 5),
198: ('', 5),
199: ('', 5),
200: ('', 5),
201: ('', 5),
202: ('', 5),
203: ('', 5),
204: ('Pistachios', 4),
205: ('Triticale', 1),
206: ('Carrots', 2),
207: ('Asparagus', 2),
208: ('Garlic', 2),
209: ('Cantaloupes', 2),
210: ('Prunes', 2),
211: ('Olives', 2),
212: ('Oranges', 3),
213: ('Honeydew Melons', 2),
214: ('Broccoli', 2),
215: ('Avocados', 2),
216: ('Peppers', 2),
217: ('Pomegranates', 4),
218: ('Nectarines', 4),
219: ('Greens', 2),
220: ('Plums', 4),
221: ('Strawberries', 2),
222: ('Squash', 2),
223: ('Apricots', 4),
224: ('Vetch', 3),
225: ('Dbl Crop WinWht/Corn', 1),
226: ('Dbl Crop Oats/Corn', 1),
227: ('Lettuce', 2),
228: ('', 1),
229: ('Pumpkins', 2),
230: ('Dbl Crop Lettuce/Durum Wht', 2),
231: ('Dbl Crop Lettuce/Cantaloupe', 2),
232: ('Dbl Crop Lettuce/Cotton', 2),
233: ('Dbl Crop Lettuce/Barley', 2),
234: ('Dbl Crop Durum Wht/Sorghum', 1),
235: ('Dbl Crop Barley/Sorghum', 1),
236: ('Dbl Crop WinWht/Sorghum', 1),
237: ('Dbl Crop Barley/Corn', 1),
238: ('Dbl Crop WinWht/Cotton', 1),
239: ('Dbl Crop Soybeans/Cotton', 1),
240: ('Dbl Crop Soybeans/Oats', 1),
241: ('Dbl Crop Corn/Soybeans', 1),
242: ('Blueberries', 2),
243: ('Cabbage', 2),
244: ('Cauliflower', 2),
245: ('Celery', 2),
246: ('Radishes', 2),
247: ('Turnips', 2),
248: ('Eggplants', 2),
249: ('Gourds', 2),
250: ('Cranberries', 2),
251: ('', 5),
252: ('', 5),
253: ('', 5),
254: ('Dbl Crop Barley/Soybeans', 1),
255: ('', 5)}
return key | 634a35d2962695dd0ef1b38a0c353498ca3dea89 | 3,651,175 |
def colmeta(colname, infile=None, name=None, units=None, ucd=None, desc=None,
outfile=None):
"""
Modifies the metadata of one or more columns. Some or all of the name,
units, ucd, utype and description of the column(s),
identified by "colname" can be set by using some or all of the listed flags.
Typically, "colname" will simply be the name of a single column.
:param colname: string, name of the column to change meta data for
:param infile: string, the location and file name for the input file, if
not defined will return the STILTS command string
:param outfile: string, the location and file name for the output file,
if not defined will default to infile
:param name: string, new name for the column
:param units: string, new unit for the column
:param ucd: string, new UCD for the column
:param desc: string, new description for the column
:return:
"""
cmdstr = "colmeta "
if name is None and units is None and ucd is None and desc is None:
return 0
if name is not None:
cmdstr += '-name {0} '.format(__checkq__(str(name)))
if units is not None:
cmdstr += '-units {0} '.format(__checkq__(str(units)))
if ucd is not None:
cmdstr += '-ucd {0} '.format(__checkq__(str(ucd)))
if desc is not None:
cmdstr += '-desc {0} '.format(__checkq__(str(desc)))
cmdstr += '{0}'.format(colname)
if infile is None:
return cmdstr
if outfile is not None:
tpipe(cmdstr, infile=infile, outfile=outfile)
else:
tpipe(cmdstr, infile=infile, outfile=infile) | 15fc5b53e4ebd3563b00ef771a707d2ad2473ad7 | 3,651,176 |
def get_confusion_matrix_chart(cm, title):
"""Plot custom confusion matrix chart."""
source = pd.DataFrame([[0, 0, cm['TN']],
[0, 1, cm['FP']],
[1, 0, cm['FN']],
[1, 1, cm['TP']],
], columns=["actual values", "predicted values", "count"])
base = alt.Chart(source).encode(
y='actual values:O',
x='predicted values:O',
).properties(
width=200,
height=200,
title=title,
)
rects = base.mark_rect().encode(
color='count:Q',
)
text = base.mark_text(
align='center',
baseline='middle',
color='black',
size=12,
dx=0,
).encode(
text='count:Q',
)
return rects + text | 28884c46a51f3baf51dc5a6f3c0396a5c8f24e10 | 3,651,177 |
def get_ppo_plus_eco_params(scenario):
"""Returns the param for the 'ppo_plus_eco' method."""
assert scenario in DMLAB_SCENARIOS, (
'Non-DMLab scenarios not supported as of today by PPO+ECO method')
if scenario == 'noreward' or scenario == 'norewardnofire':
return md(get_common_params(scenario), {
'action_set': '' if scenario == 'noreward' else 'nofire',
'_gin.create_single_env.run_oracle_before_monitor': True,
'_gin.CuriosityEnvWrapper.scale_task_reward': 0.0,
'_gin.create_environments.scale_task_reward_for_eval': 0,
'_gin.create_environments.scale_surrogate_reward_for_eval': 1,
'_gin.OracleExplorationReward.reward_grid_size': 30,
'r_checkpoint': '',
'_gin.CuriosityEnvWrapper.scale_surrogate_reward':
0.03017241379310345,
'_gin.train.ent_coef': 0.002053525026457146,
'_gin.create_environments.online_r_training': True,
'_gin.RNetworkTrainer.observation_history_size': 60000,
'_gin.RNetworkTrainer.training_interval': -1,
'_gin.CuriosityEnvWrapper.exploration_reward_min_step': 60000,
'_gin.RNetworkTrainer.num_epochs': 10,
})
else:
return md(get_common_params(scenario), {
'action_set': '',
'r_checkpoint': '',
'_gin.EpisodicMemory.capacity': 200,
'_gin.similarity_to_memory.similarity_aggregation': 'percentile',
'_gin.EpisodicMemory.replacement': 'random',
'_gin.CuriosityEnvWrapper.scale_task_reward': 1.0,
'_gin.CuriosityEnvWrapper.scale_surrogate_reward':
0.03017241379310345,
'_gin.train.ent_coef': 0.002053525026457146,
'_gin.create_environments.online_r_training': True,
'_gin.RNetworkTrainer.observation_history_size': 60000,
'_gin.RNetworkTrainer.training_interval': -1,
'_gin.CuriosityEnvWrapper.exploration_reward_min_step': 60000,
'_gin.RNetworkTrainer.num_epochs': 10,
}) | 26bb3db0cf14eceea86cd659332c9bbc0195ab9b | 3,651,178 |
def field_display(name):
"""
Works with Django's get_FOO_display mechanism for fields with choices set. Given
the name of a field, returns a producer that calls get_<name>_display.
"""
return qs.include_fields(name), producers.method(f"get_{name}_display") | 7fbc17dddfa398934496099f605f6cee97a802ad | 3,651,179 |
from typing import Dict
from typing import List
def extract_attachments(payload: Dict) -> List[Image]:
"""
Extract images from attachments.
There could be other attachments, but currently we only extract images.
"""
attachments = []
for item in payload.get('attachment', []):
# noinspection PyProtectedMember
if item.get("type") in ("Document", "Image") and item.get("mediaType") in Image._valid_media_types:
if item.get('pyfed:inlineImage', False):
# Skip this image as it's indicated to be inline in content and source already
continue
attachments.append(
ActivitypubImage(
url=item.get('url'),
name=item.get('name') or "",
media_type=item.get("mediaType"),
)
)
return attachments | afb9d959e680c51fc327d6c7e5f5e74fdc5db5e6 | 3,651,181 |
from ...model_zoo import get_model
def yolo3_mobilenet1_0_custom(
classes,
transfer=None,
pretrained_base=True,
pretrained=False,
norm_layer=BatchNorm, norm_kwargs=None,
**kwargs):
"""YOLO3 multi-scale with mobilenet base network on custom dataset.
Parameters
----------
classes : iterable of str
Names of custom foreground classes. `len(classes)` is the number of foreground classes.
transfer : str or None
If not `None`, will try to reuse pre-trained weights from yolo networks trained on other
datasets.
pretrained_base : boolean
Whether fetch and load pretrained weights for base network.
norm_layer : object
Normalization layer used (default: :class:`mxnet.gluon.nn.BatchNorm`)
Can be :class:`mxnet.gluon.nn.BatchNorm` or :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
norm_kwargs : dict
Additional `norm_layer` arguments, for example `num_devices=4`
for :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
Returns
-------
mxnet.gluon.HybridBlock
Fully hybrid yolo3 network.
"""
if transfer is None:
base_net = get_mobilenet(multiplier=1,
pretrained=pretrained_base,
norm_layer=norm_layer, norm_kwargs=norm_kwargs,
**kwargs)
stages = [base_net.features[:33],
base_net.features[33:69],
base_net.features[69:-2]]
anchors = [
[10, 13, 16, 30, 33, 23],
[30, 61, 62, 45, 59, 119],
[116, 90, 156, 198, 373, 326]]
strides = [8, 16, 32]
net = get_yolov3(
'mobilenet1.0', stages, [512, 256, 128], anchors, strides, classes, 'voc',
pretrained=pretrained, norm_layer=norm_layer, norm_kwargs=norm_kwargs, **kwargs)
else:
net = get_model(
'yolo3_mobilenet1.0_' +
str(transfer),
pretrained=True,
**kwargs)
net.reset_class(classes)
return net | 2da86fe66538e3cd9a21c456c00312a217ab5ca0 | 3,651,182 |
def calculate_levenshtein_distance(str_1, str_2):
"""
The Levenshtein distance is a string metric for measuring the difference between two sequences.
It is calculated as the minimum number of single-character edits necessary to transform one string into another
"""
distance = 0
buffer_removed = buffer_added = 0
for x in ndiff(str_1, str_2):
code = x[0]
# Code ? is ignored as it does not translate to any modification
if code == ' ':
distance += max(buffer_removed, buffer_added)
buffer_removed = buffer_added = 0
elif code == '-':
buffer_removed += 1
elif code == '+':
buffer_added += 1
distance += max(buffer_removed, buffer_added)
return distance | 949d54fbcbd2169aa06cedc7341e98c12412d03c | 3,651,183 |
from datetime import datetime
def make_datetime(value, *, format_=DATETIME_FORMAT):
"""
>>> make_datetime('2001-12-31T23:59:59')
datetime.datetime(2001, 12, 31, 23, 59, 59)
"""
return datetime.datetime.strptime(value, format_) | 5c6d79ae0ddc9f4c47592a90ed3232f556df0a49 | 3,651,184 |
import inspect
def named_struct_dict(typename, field_names=None, default=None, fixed=False, *, structdict_module=__name__,
base_dict=None, sorted_repr=None, verbose=False, rename=False, module=None, qualname_prefix=None,
frame_depth=1):
"""Returns a new subclass of StructDict with all fields as properties."""
# Validate the field names. At the user's option, either generate an error
# message or automatically replace the field name with a valid name.
if fixed:
mixin_type = NamedFixedStructDictMixin.__name__
else:
mixin_type = NamedStructDictMixin.__name__
if inspect.isclass(base_dict):
base_dict = base_dict.__name__
if base_dict is None:
base_dict = 'dict'
elif base_dict not in ('dict', 'OrderedDict', 'SortedDict'):
raise NotImplementedError(f"base_dict: {base_dict} is not supported.")
if sorted_repr is None:
sorted_repr = True if base_dict in ('dict',) else False
if isinstance(field_names, str):
field_names = field_names.replace(',', ' ').split()
field_names = list(map(str, field_names)) if field_names else []
typename = str(typename)
if rename:
seen = set()
for index, name in enumerate(field_names):
if (not name.isidentifier()
or _iskeyword(name)
or name.startswith('_')
or name in seen):
field_names[index] = f"_{index}"
seen.add(name)
for name in [typename, structdict_module] + field_names:
if type(name) is not str:
raise TypeError('Type names, field names and structdict_module must be strings')
if name is not structdict_module and not name.isidentifier():
raise ValueError(f"Type names and field names must be valid identifiers: {name!r}")
if _iskeyword(name):
raise ValueError(f"Type names and field names cannot be a keyword: {name!r}")
seen = set()
for name in field_names:
if name.startswith('_') and not rename:
raise ValueError(f"Field names cannot start with an underscore: {name!r}")
if name in seen:
raise ValueError(f"Encountered duplicate field name: {name!r}")
seen.add(name)
default_val = "None" if default is None else 'default_val'
# Fill-in the class template
class_definition = _struct_prop_dict_class_template.format(
structdict_module=structdict_module,
mixin_type=mixin_type,
base_dict=base_dict,
typename=typename,
field_names=tuple(field_names),
kwargs_map=(", ".join([f"{field_name}={default_val}" for field_name in field_names]).replace("'", "")) + (
"," if field_names else ""),
kwargs_eq_map=(", ".join([f"{field_name}={field_name}" for field_name in field_names]).replace("'", "")) + (
"," if field_names else ""),
sorted_repr=sorted_repr
)
# Execute the template string in a temporary namespace and support
# tracing utilities by setting a value for frame.f_globals['__name__']
namespace = dict(__name__=f"struct_prop_dict_{typename}")
namespace.update(default_val=default)
exec(class_definition, namespace)
result = namespace[typename]
result._source = class_definition
if verbose:
print(result._source)
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named structdict is created. Bypass this step in environments where
# _sys._getframe is not defined (Jython for example) or _sys._getframe is not
# defined for arguments greater than 0 (IronPython), or where the user has
# specified a particular module.
try:
frame = _sys._getframe(frame_depth)
except (AttributeError, ValueError):
pass
else:
if module is None:
module = frame.f_globals.get('__name__', '__main__')
if qualname_prefix is None:
qualname_prefix = frame.f_locals.get('__qualname__', '')
if module is not None:
result.__module__ = module
if qualname_prefix:
result.__qualname__ = f'{qualname_prefix}.' + result.__qualname__
return result | 465ac4783697b749c092d96fa8af498e67f15d51 | 3,651,185 |
from .pytorch.pytorch_onnxruntime_model import PytorchONNXRuntimeModel
def PytorchONNXRuntimeModel(model, input_sample=None, onnxruntime_session_options=None):
"""
Create a ONNX Runtime model from pytorch.
:param model: 1. Pytorch model to be converted to ONNXRuntime for inference
2. Path to ONNXRuntime saved model.
:param input_sample: A set of inputs for trace, defaults to None if you have trace before or
model is a LightningModule with any dataloader attached,
defaults to None.
:param onnxruntime_session_options: A session option for onnxruntime accelerator.
:return: A PytorchONNXRuntimeModel instance
"""
return PytorchONNXRuntimeModel(model, input_sample,
onnxruntime_session_options=onnxruntime_session_options) | d925b67c3628995d75d1ea6c687e5beb022fdbd8 | 3,651,186 |
def intensity_variance(mask: np.ndarray, image: np.ndarray) -> float:
"""Returns variance of all intensity values in region of interest."""
return np.var(image[mask]) | e967b4cd3c3a896fba785d8c9e5f8bf07daa620d | 3,651,188 |
def permute_array(arr, axis=0):
"""Permute array along a certain axis
Args:
arr: numpy array
axis: axis along which to permute the array
"""
if axis == 0:
return np.random.permutation(arr)
else:
return np.random.permutation(arr.swapaxes(0, axis)).swapaxes(0, axis) | ce5f6d571062f36888d22836579332034f4fe924 | 3,651,189 |
def dsmatch(name, dataset, fn):
"""
Fuzzy search best matching object for string name in dataset.
Args:
name (str): String to look for
dataset (list): List of objects to search for
fn (function): Function to obtain a string from a element of the dataset
Returns:
First element with the maximun fuzzy ratio.
"""
max_ratio = 0
matching = None
for e in dataset:
if fuzz and name:
ratio = fuzz.token_sort_ratio(normalize(name), normalize(fn(e)))
if ratio > max_ratio:
max_ratio = ratio
matching = e
elif normalize(name) == normalize(fn(e)):
matching = e
break
return matching | 0835c0da3773eedab95c78e1b4f7f28abde0d8fd | 3,651,191 |
def f(q):
"""Constraint map for the origami."""
return 0.5 * (np.array([
q[0] ** 2,
(q[1] - q[0]) ** 2 + q[2] ** 2 + q[3] ** 2,
(q[4] - q[1]) ** 2 + (q[5] - q[2]) ** 2 + (q[6] - q[3]) ** 2,
q[4] ** 2 + q[5] ** 2 + q[6] ** 2,
q[7] ** 2 + q[8] ** 2 + q[9] ** 2,
(q[7] - q[1]) ** 2 + (q[8] - q[2]) ** 2 + (q[9] - q[3]) ** 2,
(q[7] - q[4]) ** 2 + (q[8] - q[5]) ** 2 + (q[9] - q[6]) ** 2,
q[10] ** 2 + q[11] ** 2,
(q[10] - q[0]) ** 2 + q[11] ** 2,
(q[10] - q[1]) ** 2 + (q[11] - q[2]) ** 2 + q[3] ** 2,
(q[10] - q[7]) ** 2 + (q[11] - q[8]) ** 2 + q[9] ** 2,
]) - lengths2) / (lengths) | 77c3617a76cb2e184b1f22404f1db8be8212a4c9 | 3,651,193 |
def resize(clip, newsize=None, height=None, width=None):
"""
Returns a video clip that is a resized version of the clip.
Parameters
------------
newsize:
Can be either
- ``(height,width)`` in pixels or a float representing
- A scaling factor, like 0.5
- A function of time returning one of these.
width:
width of the new clip in pixel. The height is then computed so
that the width/height ratio is conserved.
height:
height of the new clip in pixel. The width is then computed so
that the width/height ratio is conserved.
Examples
----------
>>> myClip.resize( (460,720) ) # New resolution: (460,720)
>>> myClip.resize(0.6) # width and heigth multiplied by 0.6
>>> myClip.resize(width=800) # height computed automatically.
>>> myClip.resize(lambda t : 1+0.02*t) # slow swelling of the clip
"""
w, h = clip.size
if newsize != None:
def trans_newsize(ns):
if isinstance(ns, (int, float)):
return [ns * w, ns * h]
else:
return ns
if hasattr(newsize, "__call__"):
newsize2 = lambda t : trans_newsize(newsize(t))
if clip.ismask:
fun = lambda gf,t: (1.0*resizer((255 * gf(t))
.astype('uint8'),
newsize2(t))/255)
else:
fun = lambda gf,t: resizer(gf(t).astype('uint8'),
newsize2(t))
return clip.fl(fun, keep_duration=True, apply_to='mask')
else:
newsize = trans_newsize(newsize)
elif height != None:
newsize = [w * height / h, height]
elif width != None:
newsize = [width, h * width / w]
if clip.ismask:
fl = lambda pic: 1.0*resizer((255 * pic).astype('uint8'),
newsize)/255
else:
fl = lambda pic: resizer(pic.astype('uint8'), newsize)
return clip.fl_image(fl, apply_to='mask') | 5a8541e1320d37bd47aa35978794d849af358cb6 | 3,651,194 |
def calc_rt_pytmm(pol, omega, kx, n, d):
"""API-compatible wrapper around pytmm
"""
vec_omega = omega.numpy()
vec_lambda = C0/vec_omega*2*np.pi
vec_n = n.numpy()
vec_d = d.numpy()
vec_d = np.append(np.inf, vec_d)
vec_d = np.append(vec_d, np.inf)
vec_kx = kx.numpy().reshape([-1,1])
vec_k0 = 2 * np.pi / vec_lambda.reshape([1,-1])
vec_theta = np.arcsin(vec_kx / vec_k0)
r = np.zeros((len(kx), len(omega)), dtype=np.complex64)
t = np.zeros((len(kx), len(omega)), dtype=np.complex64)
for i, theta in enumerate(vec_theta):
for j, lam in enumerate(vec_lambda):
out = coh_tmm(pol, vec_n, vec_d, theta[j], lam)
r[i, j] = out['r']
t[i, j] = out['t']
t = tf.constant(t)
r = tf.constant(r)
return tf.constant(t), tf.constant(r) | def2fb22d2e72a873794838601bc74a7c65cb9c3 | 3,651,195 |
def statistic_bbox(dic, dic_im):
""" Statistic number of bbox of seed and image-level data for each class
Parameters
----------
dic: seed roidb dictionary
dic_im: image-level roidb dictionary
Returns
-------
num_bbox: list for number of 20 class's bbox
num_bbox_im: list for number of 20 class's bbox
"""
num_bbox = [0] * 20
num_bbox_im = [0] * 20
for d in dic:
for c in d['gt_classes']:
num_bbox[c-1] += 1
for d in dic_im:
for c in d['gt_classes']:
num_bbox_im[c-1] += 1
print("Statistic for seed data bbox: ", num_bbox)
print("Statistic for image-level data bbox: ", num_bbox_im)
return num_bbox, num_bbox_im | 782314baeab7fbec36c9ea56bcec57d5a508a918 | 3,651,196 |
def github_youtube_config_files():
"""
Function that returns a list of pyGithub files with youtube config channel data
Returns:
A list of pyGithub contentFile objects
"""
if settings.GITHUB_ACCESS_TOKEN:
github_client = github.Github(settings.GITHUB_ACCESS_TOKEN)
else:
github_client = github.Github()
repo = github_client.get_repo(CONFIG_FILE_REPO)
return repo.get_contents(CONFIG_FILE_FOLDER, ref=settings.OPEN_VIDEO_DATA_BRANCH) | 166ca3653173feee7513097c9313ebb5ab3b4d17 | 3,651,197 |
def reverse_uint(uint,num_bits=None):
"""
This function takes an unsigned integer and reverses all of its bits.
num_bits is number of bits to assume are present in the unsigned integer.
If num_bits is not specified, the minimum number of bits needed to represent the unsigned integer is assumed.
If num_bits is specified, it must be greater than the minimum number of bits needed to represent the unsigned integer.
>>> reverse_uint(3,8)
192
>>> bin(192)
'0b11000000'
"""
if not isinstance(uint,int):
raise Exception('input must be an integer, not %s' % repr(type(uint)))
if uint < 0:
raise Exception('input must be non-negative: %s' % repr(uint))
if min_bits_uint(uint) > num_bits:
raise Exception('Input uint must be storable in at most num_bits (%d) number of bits, but requires %d bits' % (num_bits,min_bits_uint(uint)))
result = 0
extracted_bits = 0
while (num_bits is not None and extracted_bits < num_bits) or uint != 0:
uint,rem = divmod(uint,2)
result = (result<<1) | rem
extracted_bits += 1
return result | a3197aa3f199a5677a15e053c0455c0216d07827 | 3,651,198 |
def min_by_tail(lhs, ctx):
"""Element ↓
(any) -> min(a, key=lambda x: x[-1])
"""
lhs = iterable(lhs, ctx=ctx)
if len(lhs) == 0:
return []
else:
return min_by(lhs, key=tail, cmp=less_than, ctx=ctx) | 88fce303e6ff95f89e57ebd05c575810238497ea | 3,651,199 |
def SecureBytesEqual( a, b ):
"""Returns the equivalent of 'a == b', but avoids content based short
circuiting to reduce the vulnerability to timing attacks."""
# Consistent timing matters more here than data type flexibility
# We do NOT want to support py2's str type because iterating over them
# (below) produces different results.
if type( a ) != bytes or type( b ) != bytes:
raise TypeError( "inputs must be bytes instances" )
# We assume the length of the expected digest is public knowledge,
# thus this early return isn't leaking anything an attacker wouldn't
# already know
if len( a ) != len( b ):
return False
# We assume that integers in the bytes range are all cached,
# thus timing shouldn't vary much due to integer object creation
result = 0
for x, y in zip( a, b ):
result |= x ^ y
return result == 0 | 1ba46089d94f544b53a47e09dbbcf95dd5b594a0 | 3,651,200 |
import base64
import pickle
def encode(something):
"""
We encode all messages as base64-encoded pickle objects in case
later on, we want to persist them or send them to another system.
This is extraneous for now.
"""
return base64.b64encode(pickle.dumps(something)) | 89c9c855b8b66aadc55c1602e133906d3220691a | 3,651,201 |
def installRecommendation(install, uninstall, working_set=working_set, tuples=False):
"""Human Readable advice on which modules have to be installed on
current Working Set.
"""
installList = []
for i in install:
is_in = False
for p in working_set:
if i[0] == p.key and i[1] == p.version:
is_in = True
break
if not is_in:
if not tuples:
print('~~ Install: '+i[0]+' version '+i[1])
else:
installList.append((i[0], i[1]))
for u in uninstall:
is_in = False
for p in working_set:
if u[0] == p.key and u[1] == p.version:
is_in = True
break
if is_in:
if not tuples:
print('~~ Uninstall: '+u[0]+' version '+u[1])
return installList | bf3083d4bcb50bdc27c382ccd9ea1dfc7b8cdb71 | 3,651,203 |
def obsangle(thetas, phis, alpha_obs):
"""
Return the cosine of the observer angle for the different shockwave segments and and
and observer at and angle alpha_obs with respect to the jet axis
(contained in yz plane)
"""
#u_obs_x, u_obs_y, u_obs_z = 0., sin(alpha_obs), cos(alpha_obs)
u_obs_y, u_obs_z = sin(alpha_obs), cos(alpha_obs)
#seg_x =
seg_y = sin(thetas)*sin(phis)
seg_z = cos(thetas)
#return arccos(u_obs_x*seg_x + u_obs_y*seg_y + u_obs_z*seg_z)
return u_obs_y*seg_y + u_obs_z*seg_z | 6fc03a386a97f63d3ad10d291d1528bf7fb45720 | 3,651,204 |
def _image_tensor_input_placeholder(input_shape=None):
"""Returns input placeholder and a 4-D uint8 image tensor."""
if input_shape is None:
input_shape = (None, None, None, 3)
input_tensor = tf.placeholder(
dtype=tf.uint8, shape=input_shape, name='image_tensor')
return input_tensor, input_tensor | bd3c339da4b8f0eea482687cecf28a4625d3f84c | 3,651,205 |
def _load_top_bonds(f, topology, **kwargs):
"""Take a mol2 file section with the heading '@<TRIPOS>BOND' and save to the topology.bonds attribute."""
while True:
line = f.readline()
if _is_end_of_rti(line):
line = line.split()
bond = Bond(
connection_members=(
topology.sites[int(line[1]) - 1],
topology.sites[int(line[2]) - 1],
)
)
topology.add_connection(bond)
else:
break
return line | e6605428a99720ff0d773a1db2a8363d61e38ca3 | 3,651,206 |
def timelength_to_phrase(
timelength: spec.Timelength,
from_representation: spec.TimelengthRepresentation = None,
) -> spec.TimelengthPhrase:
"""convert Timelength to TimelengthPhrase
## Inputs
- timelength: Timelength
- from_representation: str representation name of input timelength
## Returns
- TimelengthPhrase timelength
"""
return convert_timelength(
timelength=timelength,
to_representation='TimelengthPhrase',
from_representation=from_representation,
) | a840b69625c968cda4a1e686a61298e2809ffde0 | 3,651,207 |
def order_columns(self: DataFrame, order: str = "asc", by_dtypes: bool = False):
"""
Rearrange the columns in alphabetical order.
An option of rearrangement by dtypes is possible.
:param self:
:param by_dtypes: boolean to rearrange by dtypes first
"""
if order not in ['asc', 'desc']:
raise Exception("'{}' is not an acceptable ordering value, you can only use {'asc','desc'}".format(order))
if by_dtypes:
dtypes_dict = dict()
for col, dtype in self.dtypes:
dtypes_dict.setdefault(dtype, list())
dtypes_dict[dtype].append(col)
dtypes_dict = dict(sorted(dtypes_dict.items()))
columns = [col for values in dtypes_dict.values()
for col in sorted(values)]
return self.select(columns)
else:
return self.select(sorted(self.columns, reverse=False if order == "asc" else True)) | c05f4b13b26b041c86816c15a375943713a6dcdb | 3,651,208 |
import random
def reorderWithinGroup(players_by_wins):
"""Shuffle players with the same score.
Args:
players_by_wins: a dictionary returned by splitByScore().
Returns a list of the re-ordered player ids.
"""
for score in players_by_wins.keys():
random.shuffle(players_by_wins[score])
# players_by_wins is a dictionary with scores as keys. When
# converting to a list, need to make sure it is sorted by score,
# from highest to lowest.
players_ordered = []
score_keys = players_by_wins.keys()
score_keys.sort(reverse=True)
for score in score_keys:
players_ordered.append(players_by_wins[score])
# Convert back to a list.
players_ordered = list(chain.from_iterable(players_ordered))
# Return the ordered ids.
ordered_ids = [x[0] for x in players_ordered]
return(ordered_ids) | bd0afe4db36bf815ab7861e53cd674bd49e81775 | 3,651,210 |
def selection(population, method):
"""Apply selection method of a given population.
Args:
population: (list of) plans to apply the selection on.
method: (str) selection method:
- rws (Roulette Wheel Selection)
- sus (Stochastic Universal Selection)
- ts (Tournament Selection)
Returns:
(list of) plans representing the new pool
"""
if method == "rws":
return roulette_wheel_selection(population)
elif method == "sus":
return stochastic_universal_sampling(population)
elif method == "ts":
return tournament_selection(population) | e5b05c62530babfd48b5061152b9f88e4a463456 | 3,651,211 |
def PSL_prefix(row, cols):
"""Returns the prefix a domain (www.images for www.images.example.com)"""
psl_data = psl.search_tree(row[cols[0]])
if psl_data:
return(psl_data[1], psl_data[0])
return (None, None) | e5e7809acae3be60eca9f0cd65aec7a93ac087de | 3,651,212 |
def build_model(sess,t,Y,model='sde',sf0=1.0,ell0=[2,2],sfg0=1.0,ellg0=[1e5],
W=6,ktype="id",whiten=True,
fix_ell=False,fix_sf=False,fix_Z=False,fix_U=False,fix_sn=False,
fix_ellg=False,fix_sfg=False,fix_Zg=True,fix_Ug=False):
"""
Args:
sess: TensowFlow session needed for initialization and optimization
t: Python array of numpy vectors storing observation times
Y: Python array of numpy matrices storing observations. Observations
are stored in rows.
model: 'sde' or 'ode'
sf0: Integer initial value of the signal variance of drift GP
ell0: Python/numpy array of floats for the initial value of the
lengthscale of drift GP
sfg0: Integer initial value of the signal variance of diffusion GP
ellg0: Python/numpy array of a single float for the initial value of the
lengthscale of diffusion GP
W: Integer denoting the width of the inducing point grid. If the problem
dimension is D, total number of inducing points is W**D
ktype: Kernel type. We have made experiments only with Kronecker kernel,
denoted by 'id'. The other kernels are not supported.
whiten: Boolean. Currently we perform the optimization only in the
white domain
fix_ell: Boolean - whether drift GP lengthscale is fixed or optimized
fix_sf: Boolean - whether drift GP signal variance is fixed or optimized
fix_Z: Boolean - whether drift GP inducing locations are fixed or optimized
fix_U: Boolean - whether drift GP inducing vectors are fixed or optimized
fix_sn: Boolean - whether noise variance is fixed or optimized
fix_ellg: Boolean - whether diffusion GP lengthscale is fixed or optimized
fix_sfg: Boolean - whether diffusion GP signal variance is fixed or optimized
fix_Zg: Boolean - whether diffusion GP inducing locations are fixed or optimized
fix_Ug: Boolean - whether diffusion GP inducing vectors are fixed or optimized
Returns:
npde: A new NPDE model
"""
print('Model being initialized...')
def init_U0(Y=None,t=None,kern=None,Z0=None,whiten=None):
Ug = (Y[1:,:] - Y[:-1,:]) / np.reshape(t[1:]-t[:-1],(-1,1))
with tf.name_scope("init_U0"):
tmp = NPODE(Z0=Y[:-1,:],U0=Ug,sn0=0,kern=kern,jitter=0.25,whiten=False,
fix_Z=True,fix_U=True,fix_sn=True)
U0 = tmp.f(X=Z0)
if whiten:
Lz = tf.cholesky(kern.K(Z0))
U0 = tf.matrix_triangular_solve(Lz, U0, lower=True)
U0 = sess.run(U0)
return U0
D = len(ell0)
Nt = len(Y)
x0 = np.zeros((Nt,D))
Ys = np.zeros((0,D))
for i in range(Nt):
x0[i,:] = Y[i][0,:]
Ys = np.vstack((Ys,Y[i]))
maxs = np.max(Ys,0)
mins = np.min(Ys,0)
grids = []
for i in range(D):
grids.append(np.linspace(mins[i],maxs[i],W))
vecs = np.meshgrid(*grids)
Z0 = np.zeros((0,W**D))
for i in range(D):
Z0 = np.vstack((Z0,vecs[i].T.flatten()))
Z0 = Z0.T
tmp_kern = OperatorKernel(sf0,ell0,ktype="id",fix_ell=True,fix_sf=True)
U0 = np.zeros(Z0.shape,dtype=np.float64)
for i in range(len(Y)):
U0 += init_U0(Y[i],t[i],tmp_kern,Z0,whiten)
U0 /= len(Y)
sn0 = 0.5*np.ones(D)
Ug0 = np.ones([Z0.shape[0],1])*0.01
ell0 = np.asarray(ell0,dtype=np.float64)
ellg0 = np.asarray(ellg0,dtype=np.float64)
kern = OperatorKernel(sf0=sf0, ell0=ell0, ktype=ktype, fix_ell=fix_ell, fix_sf=fix_sf)
if model is 'ode':
npde = NPODE(Z0=Z0, U0=U0, sn0=sn0, kern=kern, whiten=whiten, fix_Z=fix_Z, fix_U=fix_U, fix_sn=fix_sn)
sess.run(tf.global_variables_initializer())
return npde
elif model is 'sde':
diffus = BrownianMotion(sf0=sfg0, ell0=ellg0, U0=Ug0, Z0=Z0, whiten=whiten,\
fix_sf=fix_sfg, fix_ell=fix_ellg, fix_Z=fix_Zg, fix_U=fix_Ug)
npde = NPSDE(Z0=Z0, U0=U0, sn0=sn0, kern=kern, diffus=diffus, whiten=whiten,\
fix_Z=fix_Z, fix_U=fix_U, fix_sn=fix_sn)
sess.run(tf.global_variables_initializer())
return npde
else:
raise NotImplementedError("model parameter should be either 'ode' or 'sde', not {:s}\n".format(model)) | f5d691aca815df25d1c34ae9c7fa9810c3aae1ab | 3,651,213 |
import re
def paginatedUrls(pattern, view, kwargs=None, name=None):
"""
Takes a group of url tuples and adds paginated urls.
Extends a url tuple to include paginated urls.
Currently doesn't handle url() compiled patterns.
"""
results = [(pattern, view, kwargs, name)]
tail = ''
mtail = re.search('(/+\+?\\*?\??\$?)$', pattern)
if mtail:
tail = mtail.group(1)
pattern = pattern[:len(pattern) - len(tail)]
results += [(pattern + "/(?P<page_number>\d+)" + tail, view, kwargs)]
results += [(pattern + "/(?P<page_number>\d+)\|(?P<page_limit>\d+)" +
tail, view, kwargs)]
if not kwargs:
kwargs = dict()
kwargs['page_limit'] = 0
results += [(pattern + "/?\|(?P<page_limit>all)" + tail, view, kwargs)]
return results | 2102309434e02e0df49888978d41ffce2de0e2dc | 3,651,214 |
from typing import Tuple
def _to_intraday_trix(date: pd.Timestamp, provider: providers.DataProvider,
period: int)-> Tuple[nd.NDArray, nd.NDArray]:
"""
Returns an ndarray containing the TRIX for a given +data+ and +provider+,
averaged across a given +period+.
"""
# First, get the triple-smoothed 15 period exponential moving average
data = _get_intraday_data(date, provider)
ewm1 = pd.Series.ewm(data['close'], span=period).mean()
ewm2 = pd.Series.ewm(ewm1, span=period).mean()
ewm3 = pd.Series.ewm(ewm2, span=period).mean()
# Return the percentage change from last period
ewm3_yesterday = ewm3.shift(periods=1, fill_value=ewm3[0])
trix = (ewm3 / ewm3_yesterday) - 1
return nd.array(trix.values, utils.try_gpu(0)) | 14c621afa6128fb3b33058b357e2ca79723a42f9 | 3,651,215 |
def _decode(integer):
"""
Decode the given 32-bit integer into a MAX_LENGTH character string according
to the scheme in the specification. Returns a string.
"""
if integer.bit_length() > 32:
raise ValueError("Can only decode 32-bit integers.")
decoded_int = 0
# Since each byte has its bits distributed along the given integer at
# BIT_SHIFT intervals, we'll get the bits from one byte at a time.
for input_start in range(4):
# Move to the beginning of the correct output byte.
output_pos = input_start * 8
# Read the bits from the input at BIT_SHIFT intervals, lowest-order
# bits first.
for input_bit in range(input_start, integer.bit_length(), BIT_SHIFT):
current_bit = getBit(integer, input_bit)
# If the current bit is 1, set the corresponding bit in the result.
# Otherwise, we can leave the result bit as 0.
if current_bit:
decoded_int = setBit(decoded_int, output_pos)
# Move to the next position in the output byte.
output_pos += 1
# Get a byte array from the decoded integer. We're reversing the byte order
# because we read the input integer from lowest-order bit to highest-order.
decoded_bytes = decoded_int.to_bytes(4, byteorder="little")
# Get the characters represented by each byte, ignoring empty bytes.
chars = []
for byte in decoded_bytes:
if byte:
chars.append(chr(byte))
return "".join(chars) | 156b75e8907bbcf6ae69f0a3429fb29777651f8e | 3,651,216 |
def register(name):
"""Registers a new data loader function under the given name."""
def add_to_dict(func):
_LOADERS[name] = func
return func
return add_to_dict | ea672cdf3c8d34f090d98e2498b77ea929aee6e6 | 3,651,217 |
def get_api_host(staging):
"""If 'staging' is truthy, return staging API host instead of prod."""
return STAGING_API_HOST if staging else PROD_API_HOST | d2b0003669422ef4481ffe4db76497de1485d0f7 | 3,651,218 |
def delete_user(auth, client):
"""
Delete a user
:auth: dict
:client: users_client object
"""
log("What user you want to delete?")
user_to_delete = find_user_by_username(auth, client)
if user_to_delete is False:
log("Could not find user.", serv="ERROR")
return False
confirmation = yes_or_no("You really want to delete %s?" % user_to_delete["username"])
if confirmation is False:
log("Aborted...")
return False
try:
client.users_user_id_delete(int(user_to_delete["id"]))
log("Successfully deleted user %s" % user_to_delete["username"], serv="SUCCESS")
return True
except:
log("Could not delete user %s. Error by backend" % user_to_delete["username"], serv="ERROR")
return False | db51c7d7d9f5fbd164bde010f3887f43e998fbef | 3,651,220 |
def remove_empty(s):
"""\
Remove empty strings from a list.
>>> a = ['a', 2, '', 'b', '']
>>> remove_empty(a)
[{u}'a', 2, {u}'b']
"""
while True:
try:
s.remove('')
except ValueError:
break
return s | 98778e4cc90f11b9b74ac6d26b203cbfc958fd7b | 3,651,222 |
import math
from typing import Tuple
from typing import Any
import itertools
def quantum_ia(nb_stick: int, past: list, backend_sim: Aer) -> list:
"""Quantum IA.
Args:
nb_stick: nb of stick left
past: past turn
backend_sim: backend for quantum
Return: Prediction to use
"""
def quadratibot(nb_stick: int, past: list, backend_sim: Aer) -> list:
"""Quadratic + QAOA function
Args:
nb_stick: nb of stick left
past: past turn
backend_sim: backend for quantum
Return: Gates to use
"""
def get_quantum_solution_for(
quadprog: QuadraticProgram, quantumInstance: QuantumInstance, optimizer=None
):
_eval_count = 0
def callback(eval_count, parameters, mean, std):
nonlocal _eval_count
_eval_count = eval_count
# Create solver and optimizer
solver = QAOA(
optimizer=optimizer,
quantum_instance=quantumInstance,
callback=callback,
max_evals_grouped=3,
)
# Create optimizer for solver
optimizer = MinimumEigenOptimizer(solver)
# Get result from optimizer
result = optimizer.solve(quadprog)
return result, _eval_count
# Check number of stick max
if nb_stick >= 3:
max_stick = 3
else:
max_stick = nb_stick
# Check the past
poten_stick = nb_stick
for i in range(len(past)):
if past[i] == "/":
poten_stick += 0.5
if past[i] == "¬":
u = 1
if len(past) - 1 >= i + u:
while past[i + u] == "¬":
u += 1
if past[i + u] == "/":
poten_stick += 0.5
# Check last turn
last_st = 0
if past[0] == "¬":
u = 1
while past[0 + u] == "¬":
u += 1
if past[0 + u] == "/":
last_st = 0.5
if past[0] == "/":
last_st = 0.5
quadprog = QuadraticProgram(name="qnim")
quadprog.integer_var(name="x", lowerbound=0, upperbound=max_stick)
quadprog.integer_var(name="sup", lowerbound=0, upperbound=max_stick)
quadprog.integer_var(name="intric", lowerbound=0, upperbound=max_stick)
quadprog.maximize(
linear={"x": 1, "sup": 0.5, "intric": last_st},
quadratic={("sup", "intric"): 0.5},
)
# General constraints
quadprog.linear_constraint(
linear={"x": 1, "sup": 1, "intric": 1}, sense=">", rhs=0, name="gen_min"
)
quadprog.linear_constraint(
linear={"x": 1, "sup": 1, "intric": 1},
sense="<=",
rhs=max_stick,
name="gen_max",
)
# Mod4 constraints
if math.ceil(poten_stick % 4) - 0.5 > 0:
quadprog.linear_constraint(
linear={"x": 1, "sup": 1},
sense="<=",
rhs=math.ceil(poten_stick % 4),
name="qua_mod4",
)
if nb_stick % 4 - 1 > 0:
quadprog.linear_constraint(
linear={"x": 1, "sup": 1, "intric": 1},
sense="<=",
rhs=nb_stick % 4 - 1,
name="cla_mod4",
)
# Get QAOA result
final_result = []
simulator_instance = QuantumInstance(backend=backend_sim)
qaoa_result, qaoa_eval_count = get_quantum_solution_for(
quadprog, simulator_instance
)
# Format and print result
for cropHectares, cropName in zip(qaoa_result.x, qaoa_result.variable_names):
for i in range(int(cropHectares)):
final_result.append(cropName)
return final_result
def gronim(output: list, backend_sim: Aer) -> Tuple[Any, ...]:
"""Grover for best predict.
Args:
output: every possible prediction
backend_sim: backend for quantum
Return: best predict
"""
def diffuser(nqubits):
qc = QuantumCircuit(nqubits)
for qubit in range(nqubits):
qc.h(qubit)
for qubit in range(nqubits):
qc.x(qubit)
qc.h(nqubits - 1)
qc.mct(list(range(nqubits - 1)), nqubits - 1)
qc.h(nqubits - 1)
for qubit in range(nqubits):
qc.x(qubit)
for qubit in range(nqubits):
qc.h(qubit)
U_s = qc.to_gate()
U_s.name = "$Diff$"
return U_s
def ram(nqubits, lists_final):
list_qram = [i for i in range(nqubits)]
qram = QuantumRegister(nqubits, "qram")
qalgo = QuantumRegister(nqubits, "algo")
qc = QuantumCircuit(qram, qalgo)
control_h = MCMT("h", nqubits, 1).to_gate()
map_ram_2 = [["x", "x"], ["o", "x"], ["x", "o"], ["o", "o"]]
map_ram_3 = [
["x", "x", "x"],
["o", "x", "x"],
["x", "o", "x"],
["o", "o", "x"],
["x", "x", "o"],
["o", "x", "o"],
["x", "o", "o"],
["o", "o", "o"],
]
if len(bin(len(lists_final))[2:]) == 3:
map_ram = map_ram_3
if len(bin(len(lists_final))[2:]) == 2:
map_ram = map_ram_2
for i, m_ram in zip(range(len(lists_final)), map_ram):
# qc.barrier()
for index, gate in enumerate(m_ram):
if gate == "x":
qc.x(qram[index])
if lists_final[i][0] == "x" or lists_final[i][0] == "sup":
qc.mcx(qram, qalgo[0])
else:
qc.append(control_h, [*list_qram, qalgo[0]])
if len(lists_final[i]) == 3:
if lists_final[i][1] == "x":
qc.mcx(qram, qalgo[1])
elif lists_final[i][1] == "intric":
qc.mcx([qram[0], qram[1], qram[2], qalgo[0]], qalgo[1])
else:
qc.append(control_h, [*list_qram, qalgo[1]])
if lists_final[i][-1] == "x":
qc.mcx(qram, qalgo[-1])
elif lists_final[i][-1] == "intric":
if len(lists_final[i]) == 3:
qc.mcx([qram[0], qram[1], qram[2], qalgo[1]], qalgo[-1])
else:
qc.mcx([qram[0], qram[1], qalgo[0]], qalgo[-1])
else:
qc.append(control_h, [*list_qram, qalgo[-1]])
for index, gate in enumerate(m_ram):
if gate == "x":
qc.x(qram[index])
# print(qc.draw())
U_s = qc.to_gate()
U_s.name = "$Qram$"
return U_s
def algo(nqubits):
qc = QuantumCircuit(nqubits)
qc.h(0)
qc.x(0)
U_s = qc.to_gate()
U_s.name = "$Algo$"
return U_s
lists_final = []
lists_full = list(itertools.permutations(output, len(output)))
for u in lists_full:
if u not in lists_final:
lists_final.append(u)
len_qram = len(bin(len(lists_final))[2:])
qram = QuantumRegister(len_qram, "qram")
qalgo = QuantumRegister(len_qram, "algo")
oracle = QuantumRegister(1, "oracle")
c = ClassicalRegister(len_qram, "measurement")
qc = QuantumCircuit(qram, qalgo, oracle, c)
# Init
qc.h(qram)
qc.x(oracle)
qc.h(oracle)
qc.barrier()
# Qram
qc.append(ram(len_qram, lists_final), [*[i for i in range(len_qram * 2)]])
qc.barrier()
# Algorithm
qc.append(algo(len_qram), [*[i for i in range(len_qram, len_qram * 2)]])
qc.barrier()
# Oracle
qc.mcx([qalgo[0], qalgo[-1]], oracle)
qc.barrier()
# Revert Algo + Qram
qc.append(
algo(len_qram).inverse(), [*[i for i in range(len_qram, len_qram * 2)]]
)
qc.append(
ram(len_qram, lists_final).inverse(), [*[i for i in range(len_qram * 2)]]
)
qc.barrier()
# Diffuser
qc.append(diffuser(len_qram), [*[i for i in range(len_qram)]])
# Measure of the outputs
qc.barrier()
qc.measure(qram, c)
job = execute(qc, backend_sim, shots=512, memory=True)
result_job = job.result()
result_count = result_job.get_counts()
result_memory = job.result().get_memory()
if len(result_count) == 1:
final_result = int(result_memory[0], 2)
else:
final_result = max(result_count, key=result_count.get)
final_result = int(final_result, 2)
to_return = lists_final[final_result]
return to_return
gates = quadratibot(nb_stick, past, backend_sim)
if len(gates) < 2:
predict = gates
elif len(set(gates)) != len(gates):
predict = gates
else:
predict = gronim(gates, backend_sim)
return predict | 7e13f554e6eb901ec43ec80bf346005f17ec55d5 | 3,651,223 |
def construct_model_cnn_gram(num_classes, input_shape):
"""
construct model architecture
:param num_classes: number of output classes of the model [int]
:return: model - Keras model object
"""
model = Sequential()
model.add(Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same', input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'))
model.add(MaxPool2D((2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'))
model.add(Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'))
model.add(MaxPool2D((2, 2)))
model.add(Flatten())
model.add(Dense(128, activation='relu', kernel_initializer='he_uniform'))
model.add(Dense(num_classes, activation='softmax'))
return model | ddffb66efe6b4f94a9c2a0ccc976206eebb503a6 | 3,651,224 |
def get_basic_data(match_info, event):
"""input: dictionary | output: dictionary updated"""
match_info['status'] = event['status']['type']['name']
match_info['start_time'] = event['date']
match_info['match_id'] = event['id']
match_info['time'] = event['status']['displayClock']
match_info['period'] = event['status']['period']
match_info['display_period'] = give_display_period(match_info['period'])
match_info['detail'] = event['status']['type']['detail']
match_info['match_type_id'] = event['status']['type']['id']
return match_info | fae755e195b5bbf12c9fccf20b9ba2c2f9e700c6 | 3,651,225 |
def convert_blockgrad(node, **kwargs):
""" Skip operator """
return create_basic_op_node('Identity', node, kwargs) | 12803d387a30884da08779b878e9cdf3e06226a7 | 3,651,227 |
def is_complete(node):
"""
all children of a sum node have same scope as the parent
"""
assert node is not None
for sum_node in reversed(get_nodes_by_type(node, Sum)):
nscope = set(sum_node.scope)
if len(sum_node.children) == 0:
return False, "Sum node %s has no children" % sum_node.id
for child in sum_node.children:
if nscope != set(child.scope):
return False, "children of (sum) node %s do not have the same scope as parent" % sum_node.id
return True, None | a92741f4770757518e91a44e757e4d8037958066 | 3,651,228 |
import torch
def step_inplace(Ts, ae, target, weight, depth, intrinsics, lm=.0001, ep=10.0):
""" dense gauss newton update with computing similiarity matrix """
pts = pops.inv_project(depth, intrinsics)
pts = pts.permute(0,3,1,2).contiguous()
# tensor representation of SE3
se3 = Ts.data.permute(0,3,1,2).contiguous()
ae = ae / 8.0
# build the linear system
H, b = SE3BuilderInplace.apply(se3, ae, pts, target, weight, intrinsics)
I = torch.eye(6, device=H.device)[...,None,None]
H = H + (lm*H + ep) * I # damping
dx = SE3Solver.apply(H, b)
dx = dx.permute(0,3,4,1,2).squeeze(-1).contiguous()
Ts = SE3.exp(dx) * Ts
return Ts | 9f1fc6911d1fb11bc6956d63dda8f59b8f6654cd | 3,651,229 |
def _compute_extent_axis(axis_range, grid_steps):
"""Compute extent for matplotlib.pyplot.imshow() along one axis.
:param axis_range: 1D numpy float array with 2 elements; axis range for plotting
:param grid_steps: positive integer, number of grid steps in each dimension
:return: 1D numpy float array with 2 elements
"""
delta = (axis_range[1] - axis_range[0]) / (2.0 * (grid_steps - 1))
# the range is covered by grid_steps - 1 pixels with one half of a pixel overlapping on each side; delta is half the
# pixel width
return np.array([axis_range[0] - delta, axis_range[1] + delta]) | e83a251b4055639435342d19960d1e75a6d33ba8 | 3,651,230 |
import hashlib
def md5_hash_file(fh):
"""Return the md5 hash of the given file-object"""
md5 = hashlib.md5()
while True:
data = fh.read(8192)
if not data:
break
md5.update(data)
return md5.hexdigest() | f572ec27add8024e5fa8b9a82b5d694905e4d0f8 | 3,651,233 |
def rule(request):
"""Administration rule content"""
if request.user.is_authenticated():
return helpers.render_page('rule.html', show_descriptions=True)
else:
return redirect('ahmia.views_admin.login') | 514f767235660b812126c5cfc2dabeec40a7b22a | 3,651,234 |
def get_tenant_info(schema_name):
"""
get_tenant_info return the first tenant object by schema_name
"""
with schema_context(schema_name):
return Pharmacy.objects.filter(schema_name=schema_name).first() | 388d51c50fec60a331822bd157da1d53c88cc170 | 3,651,235 |
def get_lr(curr_epoch, hparams, iteration=None):
"""Returns the learning rate during training based on the current epoch."""
assert iteration is not None
batches_per_epoch = int(hparams.train_size / hparams.batch_size)
if 'svhn' in hparams.dataset and 'wrn' in hparams.model_name:
lr = step_lr(hparams.lr, curr_epoch)
elif 'cifar' in hparams.dataset or ('svhn' in hparams.dataset and
'shake_shake' in hparams.model_name):
lr = cosine_lr(hparams.lr, curr_epoch, iteration, batches_per_epoch,
hparams.num_epochs)
else:
lr = hparams.lr
tf.logging.log_first_n(tf.logging.WARN, 'Default not changing learning rate.', 1)
return lr | 42df4a185dd41d0eb02845d78dbd061f4658fba8 | 3,651,236 |
def delete_page(shortname):
"""Delete page from the database."""
# Check page existency
if get_page(shortname) is None:
abort(404)
if shortname is None:
flash("No parameters for page deletion!")
return redirect(url_for("admin"))
else:
query_db("DELETE FROM pages WHERE shortname = ?", (shortname,))
commit_db()
flash("Page '" + shortname + "' deleted!")
return redirect(url_for("admin")) | b1bb9526832209e1cddf1c86851aeef7c6701d3d | 3,651,238 |
def has_property(name, match=None):
"""Matches if object has a property with a given name whose value satisfies
a given matcher.
:param name: The name of the property.
:param match: Optional matcher to satisfy.
This matcher determines if the evaluated object has a property with a given
name. If no such property is found, ``has_property`` is not satisfied.
If the property is found, its value is passed to a given matcher for
evaluation. If the ``match`` argument is not a matcher, it is implicitly
wrapped in an :py:func:`~hamcrest.core.core.isequal.equal_to` matcher to
check for equality.
If the ``match`` argument is not provided, the
:py:func:`~hamcrest.core.core.isanything.anything` matcher is used so that
``has_property`` is satisfied if a matching property is found.
Examples::
has_property('name', starts_with('J'))
has_property('name', 'Jon')
has_property('name')
"""
if match is None:
match = anything()
return IsObjectWithProperty(name, wrap_shortcut(match)) | a5c562b1f5a36fc2c591d700d84b5ee9ca54ccde | 3,651,239 |
import glob
def gain_stability_task(run, det_name, fe55_files):
"""
This task fits the Fe55 clusters to the cluster data from each frame
sequence and writes a pickle file with the gains as a function of
sequence number and MJD-OBS.
Parameters
----------
run: str
Run number.
det_name: str
Sensor name in the focal plane, e.g., 'R22_S11'.
fe55_files: list
Raw Fe55 for the sensor being consider. The MJD-OBS values
will be extracted from these files.
Returns:
(pandas.DataFrame, str), i.e., a tuple of the data frame containing
the gain sequence and the file name of the output pickle file.
"""
file_prefix = make_file_prefix(run, det_name)
# Extract MJD-OBS values into a dict to provide look up table in
# case there are missing sequence frames in the psf results table.
mjd_obs = dict()
for item in fe55_files:
with fits.open(item) as hdus:
mjd_obs[hdus[0].header['SEQNUM']] = hdus[0].header['MJD-OBS']
psf_results_file = sorted(glob.glob(f'{file_prefix}_psf_results*.fits'))[0]
try:
df = sensorTest.gain_sequence(det_name, psf_results_file)
except ValueError as eobj:
print("ValueError in gain_stability_task:", eobj)
return None
df['mjd'] = [mjd_obs[seqnum] for seqnum in df['seqnum']]
outfile = f'{file_prefix}_gain_sequence.pickle'
df.to_pickle(outfile)
return df, outfile | a285879fc963342ed51b61ec9fae8ac08c089bc6 | 3,651,240 |
from datetime import datetime
def get_datetime(timestamp):
"""Parse several representations of time into a datetime object"""
if isinstance(timestamp, datetime.datetime):
# Timestamp is already a datetime object.
return timestamp
elif isinstance(timestamp, (int, float)):
try:
# Handle Unix timestamps.
return datetime.datetime.fromtimestamp(timestamp)
except ValueError:
pass
try:
# Handle Unix timestamps in milliseconds.
return datetime.datetime.fromtimestamp(timestamp / 1000)
except ValueError:
pass
elif isinstance(timestamp, string_types):
try:
timestamp = float(timestamp)
except (ValueError, TypeError):
pass
else:
# Timestamp is probably Unix timestamp given as string.
return get_datetime(timestamp)
try:
# Try to parse as string date in common formats.
return iso8601.parse_date(timestamp)
except:
pass
# Fuck this shit.
raise ValueError("Couldn't extract date object from %r" % timestamp) | d8277a1de3876106de02b9d75e02369061261996 | 3,651,241 |
def conda_installed_files(prefix, exclude_self_build=False):
"""
Return the set of files which have been installed (using conda) into
a given prefix.
"""
res = set()
for dist in install.linked(prefix):
meta = install.is_linked(prefix, dist)
if exclude_self_build and 'file_hash' in meta:
continue
res.update(set(meta['files']))
return res | e81051e19f7829bf149c71f0533bd51c80c7e2a1 | 3,651,242 |
def computeStarsItembased(corated, target_bid, model):
"""
corated - {bid: star, ...}
"""
if corated == None:
return None
corated.pop(target_bid, None)
bid_cor = list(corated.keys())
collect = []
for b in bid_cor:
pair = None
if b < target_bid:
pair = (b, target_bid)
else:
pair = (target_bid, b)
# if b == target_bid:
# print('same:', pair)
w = model.get(pair)
if w != None:
# pair may not have a value in the model
# when b == target_bid, pair have no value, too
collect.append((pair, w, b))
# else:
# collect.append((pair, 0, b))
# print(collect)
collect.sort(key=lambda x: x[1], reverse=True)
neighbors = collect[:N_NEIGHBORS_ITEMBASED]
sum_w = 0
n = 0
for p, w, b in neighbors:
star = corated[b]
n += star * w
sum_w += w
if sum_w == 0:
return None
else:
return n / sum_w | 7b3cd5bd103d35fe09477be96b5cbcc378927c65 | 3,651,243 |
import psutil
import re
def beacon(config):
"""
Monitor the memory usage of the minion
Specify thresholds for percent used and only emit a beacon
if it is exceeded.
.. code-block:: yaml
beacons:
memusage:
- percent: 63%
"""
ret = []
_config = {}
list(map(_config.update, config))
_current_usage = psutil.virtual_memory()
current_usage = _current_usage.percent
monitor_usage = _config["percent"]
if isinstance(monitor_usage, str) and "%" in monitor_usage:
monitor_usage = re.sub("%", "", monitor_usage)
monitor_usage = float(monitor_usage)
if current_usage >= monitor_usage:
ret.append({"memusage": current_usage})
return ret | ac10e85d47fef403148ad2e00c4e10ced2cc226c | 3,651,244 |
def get_surrounding_points(search_values, point_set):
"""
#for each value p[i] in search_values, returns a pair of surrounding points from point_set
the surrounding points are a tuplet of the form (lb[i], ub[i]) where
- lb[i] < p[i] < ub[i] if p[i] is not in point_set, and p[i] is within range
- lb[i] == p[i] == ub[i] if p[i] in point_set, p[i] < min(point_set), p[i] > max(point_set)
:param search_values: set of points that need neighbors
:param point_set: set of points that need be sorted
:return: list of points in point_set that surround search_values
"""
# http://stackoverflow.com/questions/2566412/find-nearest-value-in-numpy-array
upper_indices = np.searchsorted(point_set, search_values, side="left")
n_points = len(point_set)
n_search = len(search_values)
neighbors = []
for i in range(n_search):
idx = upper_indices[i]
val = search_values[i]
if idx == 0:
n = (point_set[0], point_set[0])
elif idx == n_points:
n = (point_set[-1], point_set[-1])
else:
n = (point_set[idx-1], point_set[idx])
neighbors.append(n)
return neighbors | d4ec055946c19b999ed9523aa260d7bd28ffd269 | 3,651,245 |
def _scriptable_get(obj, name):
""" The getter for a scriptable trait. """
global _outermost_call
saved_outermost = _outermost_call
_outermost_call = False
try:
result = getattr(obj, '_' + name, None)
if result is None:
result = obj.trait(name).default
finally:
_outermost_call = saved_outermost
if saved_outermost:
get_script_manager().record_trait_get(obj, name, result)
return result | 53da00cb49d73065281306c90a51a95e1670e14e | 3,651,246 |
def Iq(q, intercept, slope):
"""
:param q: Input q-value
:param intercept: Intrecept in linear model
:param slope: Slope in linear model
:return: Calculated Intensity
"""
inten = intercept + slope*q
return inten | af3e580e6061089b431ef25f1f08def6f29c8ef6 | 3,651,247 |
def get_optimizer(library, solver):
"""Constructs Optimizer given and optimization library and optimization
solver specification"""
options = {
'maxiter': 100
}
if library == 'scipy':
optimizer = optimize.ScipyOptimizer(method=solver, options=options)
elif library == 'ipopt':
optimizer = optimize.IpoptOptimizer()
elif library == 'dlib':
optimizer = optimize.DlibOptimizer(options=options)
elif library == 'pyswarm':
optimizer = optimize.PyswarmOptimizer(options=options)
elif library == 'cmaes':
optimizer = optimize.CmaesOptimizer(options=options)
elif library == 'scipydiffevolopt':
optimizer = optimize.ScipyDifferentialEvolutionOptimizer(
options=options)
elif library == 'pyswarms':
optimizer = optimize.PyswarmsOptimizer(options=options)
elif library == 'nlopt':
optimizer = optimize.NLoptOptimizer(method=solver, options=options)
elif library == 'fides':
options[fides.Options.SUBSPACE_DIM] = solver[1]
optimizer = optimize.FidesOptimizer(options=options,
hessian_update=solver[0])
else:
raise ValueError(f"Optimizer not recognized: {library}")
return optimizer | be72fc9115abf0d087049debe470139b248ef47f | 3,651,251 |
def _cast_query(query, col):
"""
ALlow different query types (e.g. numerical, list, str)
"""
query = query.strip()
if col in {"t", "d"}:
return query
if query.startswith("[") and query.endswith("]"):
if "," in query:
query = ",".split(query[1:-1])
return [i.strip() for i in query]
if query.isdigit():
return int(query)
try:
return float(query)
except Exception:
return query | 4b6cfc823f8b2e78f343e73683b418112e66f43d | 3,651,252 |
import torch
def binary_loss(pred_raw,
label_raw,
loss_func,
weight=None,
class_weight=None,
class_weight_norm=False,
reduction='mean',
avg_factor=None,
smooth=1.0):
"""
:param pred: [N, C, *] scores without softmax
:param label: [N, *] in [0, C], 0 stands for background, 1~C stands for pred in 0~C-1
:return: reduction([N])
"""
pred = pred_raw.clone()
label = label_raw.clone()
num_classes = pred.shape[1]
if class_weight is not None:
class_weight = class_weight.float()
if pred.shape != label.shape:
label = _make_one_hot(label, num_classes)
pred = torch.sigmoid(pred)
loss = 0.
for i in range(num_classes):
if isinstance(loss_func, tuple):
loss_function = loss_func[i]
else:
loss_function = loss_func
class_loss = loss_function(pred[:, i], label[:, i], smooth=smooth)
if class_weight is not None:
class_loss *= class_weight[i]
loss += class_loss
if class_weight is not None and class_weight_norm:
loss = loss / torch.sum(class_weight)
else:
loss = loss / num_classes
loss = weight_reduce_loss(loss, weight=weight, reduction=reduction, avg_factor=avg_factor)
return loss | 9154c8e46a48485e643de496554d302f9db294ac | 3,651,253 |
def showerActivityModel(sol, flux_max, b, sol_max):
""" Activity model taken from: Jenniskens, P. (1994). Meteor stream activity I. The annual streams.
Astronomy and Astrophysics, 287., equation 8.
Arguments:
sol: [float] Solar longitude for which the activity is computed (radians).
flux_max: [float] Peak relative flux.
b: [float] Slope of the shower.
sol_max: [float] Solar longitude of the peak of the shower (radians).
"""
# Compute the flux at given solar longitude
flux = flux_max*10**(-b*np.degrees(np.abs(sol - sol_max)))
return flux | 0a4cc6d8c490b36412140cfeeca0c30464c11577 | 3,651,255 |
import json
def request_slow_log(db_cluster_id, start_datetime, end_datetime, page_number, page_size):
"""
请求慢SQL日志
:param db_cluster_id:
:param start_datetime:
:param end_datetime:
:param page_number:
:param page_size:
:return:
"""
request = DescribeSlowLogRecordsRequest()
request.set_accept_format('json')
request.set_DBClusterId(db_cluster_id)
# 格式化前一天的日期
request.set_StartTime(start_datetime)
request.set_EndTime(end_datetime)
request.set_PageNumber(page_number)
request.set_PageSize(page_size)
response = client.do_action_with_exception(request)
response = str(response, encoding='utf-8')
resp_result = json.loads(response)
return resp_result | e49a006e59f04067eb43f72dfcd29fd71def4fb1 | 3,651,257 |
def pad_omni_image(image, pad_size, image_dims=None):
"""Pad an omni-directional image with the correct image wrapping at the edges.
Parameters
----------
image
Image to perform the padding on *[batch_shape,h,w,d]*
pad_size
Number of pixels to pad.
image_dims
Image dimensions. Inferred from Inputs if None. (Default value = None)
Returns
-------
ret
New padded omni-directional image *[batch_shape,h+ps,w+ps,d]*
"""
if image_dims is None:
image_dims = image.shape[-3:-1]
# BS x PS x W/2 x D
top_left = image[..., 0:pad_size, int(image_dims[1] / 2):, :]
top_right = image[..., 0:pad_size, 0:int(image_dims[1] / 2), :]
# BS x PS x W x D
top_border = _ivy.flip(_ivy.concatenate((top_left, top_right), -2), -3)
# BS x PS x W/2 x D
bottom_left = image[..., -pad_size:, int(image_dims[1] / 2):, :]
bottom_right = image[..., -pad_size:, 0:int(image_dims[1] / 2), :]
# BS x PS x W x D
bottom_border = _ivy.flip(_ivy.concatenate((bottom_left, bottom_right), -2), -3)
# BS x H+2PS x W x D
image_expanded = _ivy.concatenate((top_border, image, bottom_border), -3)
# BS x H+2PS x PS x D
left_border = image_expanded[..., -pad_size:, :]
right_border = image_expanded[..., 0:pad_size, :]
# BS x H+2PS x W+2PS x D
return _ivy.concatenate((left_border, image_expanded, right_border), -2) | e54d732508bea3f969eb2a78ec3238e88e33a30f | 3,651,258 |
from typing import Any
def add_film(
film: FilmCreate,
db: Session = Depends(get_db),
user: User = Depends(get_current_user),
) -> Any:
"""
Add new film
"""
if not user.role.can_add_films:
raise ForbiddenAction
db_film = db.query(Film).filter(Film.name == film.name).first()
if db_film is not None:
raise FilmAlreadyExists
db_film = Film(
name=film.name,
released_year=film.released_year,
owner_user=user,
)
db.add(db_film)
db.flush()
film_model = FilmGet.from_orm(db_film)
db.commit()
return {
'status': 'ok',
'data': film_model,
} | 510f80969570e186233a6313277093b6d939a9ea | 3,651,259 |
def load_cube_file(lines, target_mode=None, cls=ImageFilter.Color3DLUT):
"""Loads 3D lookup table from .cube file format.
:param lines: Filename or iterable list of strings with file content.
:param target_mode: Image mode which should be after color transformation.
The default is None, which means mode doesn't change.
:param cls: A class which handles the parsed file.
Default is ``ImageFilter.Color3DLUT``.
"""
name, size = None, None
channels = 3
file = None
lines = open(lines, 'rt')
try:
iterator = iter(lines)
for i, line in enumerate(iterator, 1):
line = line.strip()
if line.startswith('TITLE "'):
name = line.split('"')[1]
continue
if line.startswith('LUT_3D_SIZE '):
size = [int(x) for x in line.split()[1:]]
if len(size) == 1:
size = size[0]
continue
if line.startswith('CHANNELS '):
channels = int(line.split()[1])
if line.startswith('LUT_1D_SIZE '):
raise ValueError("1D LUT cube files aren't supported")
try:
float(line.partition(' ')[0])
except ValueError:
pass
else:
# Data starts
break
if size is None:
raise ValueError('No size found in the file')
table = []
for i, line in enumerate(chain([line], iterator), i):
line = line.strip()
if not line or line.startswith('#'):
continue
try:
pixel = [float(x) for x in line.split()]
except ValueError:
raise ValueError("Not a number on line {}".format(i))
if len(pixel) != channels:
raise ValueError(
"Wrong number of colors on line {}".format(i))
table.extend(pixel)
finally:
if file is not None:
file.close()
instance = cls(size, table, channels=channels,
target_mode=target_mode, _copy_table=False)
if name is not None:
instance.name = name
return instance | bf87c0e686b689a429297bae4ec84402dd12dc3d | 3,651,260 |
import torch
def vector_to_Hermitian(vec):
"""Construct a Hermitian matrix from a vector of N**2 independent
real-valued elements.
Args:
vec (torch.Tensor): (..., N ** 2)
Returns:
mat (ComplexTensor): (..., N, N)
""" # noqa: H405, D205, D400
N = int(np.sqrt(vec.shape[-1]))
mat = torch.zeros(size=vec.shape[:-1] + (N, N, 2), device=vec.device)
# real component
triu = np.triu_indices(N, 0)
triu2 = np.triu_indices(N, 1) # above main diagonal
tril = (triu2[1], triu2[0]) # below main diagonal; for symmetry
mat[(...,) + triu + (np.zeros(triu[0].shape[0]),)] = vec[..., : triu[0].shape[0]]
start = triu[0].shape[0]
mat[(...,) + tril + (np.zeros(tril[0].shape[0]),)] = mat[
(...,) + triu2 + (np.zeros(triu2[0].shape[0]),)
]
# imaginary component
mat[(...,) + triu2 + (np.ones(triu2[0].shape[0]),)] = vec[
..., start : start + triu2[0].shape[0]
]
mat[(...,) + tril + (np.ones(tril[0].shape[0]),)] = -mat[
(...,) + triu2 + (np.ones(triu2[0].shape[0]),)
]
return ComplexTensor(mat[..., 0], mat[..., 1]) | 8bd32d93e9865305a8f75711d72990beaea5d897 | 3,651,261 |
def view_payment(request):
""" A view that renders the payment page template """
user = request.user
# Check if user has already paid and redirect them to definitions app.
if user.has_perm('definitionssoftware.access_paid_definitions_app'):
return redirect(reverse('view_definitionssoftware'))
# Get stripe environment variables
stripe_public_key = settings.STRIPE_PUBLIC_KEY
stripe_secret_key = settings.STRIPE_SECRET_KEY
if request.method == 'POST':
request.session['payment_successful'] = True
return redirect(reverse('payment_success'))
# Create Stripe Payment Intent
stripe_total = 2500
stripe.api_key = stripe_secret_key
intent = stripe.PaymentIntent.create(
amount=stripe_total,
currency=settings.STRIPE_CURRENCY,
)
print(intent)
if not stripe_public_key:
messages.warning(request, 'Stripe public key is missing. \
Did you forget to set it in your environment?')
template = 'payment/payment.html'
context = {
'stripe_public_key': stripe_public_key,
'client_secret': intent.client_secret,
}
return render(request, template, context) | fc86a79c759bc4e4005d634b5c9a204473a3a3a7 | 3,651,263 |
def augment_bag(store, bag, username=None):
"""
Augment a bag object with information about it's policy type.
"""
if not bag.store:
bag = store.get(bag)
if not username:
username = bag.policy.owner
policy_type = determine_tank_type(bag, username)
bag.icon = POLICY_ICONS[policy_type]
bag.type = policy_type
return bag | 1fbda85f3db346e46e52b86d2d4b5784f8c4d2ab | 3,651,264 |
from typing import Dict
def province_id_to_home_sc_power() -> Dict[utils.ProvinceID, int]:
"""Which power is this a home sc for?"""
content = get_mdf_content(MapMDF.STANDARD_MAP)
home_sc_line = content.splitlines()[2]
tag_to_id = _tag_to_id(get_mdf_content(MapMDF.STANDARD_MAP))
# Assume powers are ordered correctly
id_to_power = {}
power = -1
words = str(home_sc_line).split(' ')
for w in words:
if w in ['(', ')']:
pass
elif w in tag_to_id: # Is a province
id_to_power[tag_to_id[w]] = power
else: # Must be a power tag
power += 1
return id_to_power | f87081ce053e3a50bb48deaae28b0e919e224216 | 3,651,265 |
def evaluate_cubic_spline(x, y, r, t):
"""Evaluate cubic spline at points.
Parameters:
x : rank-1 np.array of np.float64
data x coordinates
y : rank-1 np.array of np.float64
data y coordinates
r : rank-1 np.array of np.float64
output of solve_coeffs() for your data
t : rank-1 np.array of np.float64
points where to evaluate. Must satisfy (x[0] <= t <= x[-1]).all().
Returns:
s : rank-1 np.array of np.float64
Value of the spline at the points t.
"""
return _evaluate_generic(x,y,r,t, _evaluate_cubic_spline_one) | 52d6c4ac0440da88ee908bc0a6cfa2b755ca606f | 3,651,266 |
def get_username(host, meta_host, config):
"""Find username from sources db/metadata/config."""
username = host.username or meta_host.get("username")
if is_windows_host(meta_host):
username = username or "Administrator"
default_user = get_config_value(config["users"], meta_host["os"])
username = username or default_user
return username | 4e220816442e64d43f1da15aa0bd19508e186f19 | 3,651,267 |
def get_all_tests():
"""
Collect all tests and return them
:return: A test suite as returned by xunitparser with all the tests
available in the w3af framework source code, without any selectors.
"""
return _get_tests('all.xml') | 11acff501fb717ac5c9bdc16343742f124f2a120 | 3,651,268 |
def main():
"""Builds OSS-Fuzz project's fuzzers for CI tools.
Note: The resulting fuzz target binaries of this build are placed in
the directory: ${GITHUB_WORKSPACE}/out
Returns:
0 on success or nonzero on failure.
"""
return build_fuzzers_entrypoint() | cd7c386c2a5d126c0abc1504a1cb3dfe6026173c | 3,651,269 |
def find_first_img_dim(import_gen):
"""
Loads in the first image in a provided data set and returns its dimensions
Intentionally returns on first iteration of the loop
:param import_gen: PyTorch DataLoader utilizing ImageFolderWithPaths for its dataset
:return: dimensions of image
"""
for x, _, _ in import_gen:
return x[0].shape[-2], x[0].shape[-1] | 3ccaccdfb20d7b2ca4d339adacd3c706a460fdef | 3,651,270 |
def restaurantJSON():
""" Returns all restaurants by JSON call """
restaurants = session.query(Restaurant)
return jsonify(Restaurants=[r.serialize for r in restaurants]) | 350df909de7798da9567a7fe0a972d660c40ff8c | 3,651,271 |
def _to_histogram_plotgroup(use_spec, plotgroup_id, plot_id, read_type, bincounts, output_dir, png_name):
"""
Create a histogram of length distribution.
"""
plot_spec = use_spec.get_plot_spec(plotgroup_id, plot_id)
png_file = op.join(output_dir, png_name)
png, thumb = plot_read_lengths_binned(bincounts,
png_file,
read_type=read_type,
title=plot_spec.title,
color=get_blue(3),
edgecolor=get_blue(2))
return to_plotgroup(plotgroup_id, plot_id, png, thumb) | 18ae412af24800098ec2c01c9ba5c456455540f5 | 3,651,272 |
def prepare_string(x, max_length=None):
""" Converts a string from LaTeX escapes to UTF8 and truncates it to max_length """
# data = latex2text(x, tolerant_parsing=True)
try:
data = latex_to_unicode(filter_using_re(x))
if max_length is not None:
data = (data[:max_length-5] + '[...]') if len(data) > max_length else data
return smart_text(data)
except TypeError:
logger.warning("Encountered a TypeError which may be linked to unicode handling "
"in bibtexparser when processing the following string: %s."%x)
return "" | 043d0d063e22ef18943459a7ba0a8928244bca12 | 3,651,273 |
import math
def q_b(m0, m1, m2, n0, n1, n2):
"""Stretch"""
return math.sqrt((m0 - n0)**2 + (m1 - n1)**2 + (m2 - n2)**2) | 61cf1b5eec6c89be7f822cbdbc03564b805a1920 | 3,651,274 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.