content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def _prettify(elem,indent_level=0):
"""Return a pretty-printed XML string for the Element.
"""
indent = " "
res = indent_level*indent + '<'+elem.tag.encode('utf-8')
for k in elem.keys():
res += " "+k.encode('utf-8')+'="'+_escape_nl(elem.get(k)).encode('utf-8')+'"'
children = elem.getchildren()
if len(children)==0 and not elem.text:
res += ' />'
return res
res += '>'
if elem.text:
res += _escape_nl(elem.text).encode('utf-8')
for c in children:
res += '\n'+_prettify(c,indent_level+1)
if len(children)>0:
res += '\n'+indent_level*indent
res += '</'+elem.tag.encode('utf-8')+'>'
return res | 5,355,700 |
def predict(text):
"""
Predict the language of a text.
Parameters
----------
text : str
Returns
-------
language_code : str
"""
if language_models is None:
init_language_models(comp_metric, unicode_cutoff=10**6)
x_distribution = get_distribution(text, language_models_chars)
return predict_param(language_models,
comp_metric,
x_distribution,
best_only=True) | 5,355,701 |
def kgup(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"), organizasyon_eic="", uevcb_eic=""):
"""
İlgili tarih aralığı için kaynak bazlı kesinleşmiş günlük üretim planı (KGÜP) bilgisini vermektedir.
Not: "organizasyon_eic" değeri girildiği, "uevcb_eic" değeri girilmediği taktirde organizasyona ait tüm uevcb'lerin
toplamı için kgüp bilgisini vermektedir. Her iki değer de girildiği taktirde ilgili organizasyonun ilgili uevcb'si
için kgüp bilgisini vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
organizasyon_eic : metin formatında organizasyon eic kodu (Varsayılan: "")
uevcb_eic : metin formatında metin formatında uevcb eic kodu (Varsayılan: "")
Geri Dönüş Değeri
-----------------
KGUP (Tarih, Saat, Doğalgaz, Barajlı, Linyit, Akarsu, İthal Kömür, Rüzgar, Fuel Oil, Jeo Termal, Taş Kömür, Biyokütle
,Nafta, Diğer, Toplam)
"""
if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi):
try:
particular_url = __first_part_url + "dpp" + "?startDate=" + baslangic_tarihi + "&endDate=" + bitis_tarihi \
+ "&organizationEIC=" + organizasyon_eic + "&uevcbEIC=" + uevcb_eic
json = __make_requests(particular_url)
df = __pd.DataFrame(json["body"]["dppList"])
df["Saat"] = df["tarih"].apply(lambda h: int(h[11:13]))
df["Tarih"] = __pd.to_datetime(df["tarih"].apply(lambda d: d[:10]))
df.rename(index=str,
columns={"akarsu": "Akarsu", "barajli": "Barajlı", "biokutle": "Biyokütle", "diger": "Diğer",
"dogalgaz": "Doğalgaz", "fuelOil": "Fuel Oil", "ithalKomur": "İthal Kömür",
"jeotermal": "Jeo Termal", "linyit": "Linyit", "nafta": "Nafta",
"ruzgar": "Rüzgar", "tasKomur": "Taş Kömür", "toplam": "Toplam"}, inplace=True)
df = df[["Tarih", "Saat", "Doğalgaz", "Barajlı", "Linyit", "Akarsu", "İthal Kömür", "Rüzgar",
"Fuel Oil", "Jeo Termal", "Taş Kömür", "Biyokütle", "Nafta", "Diğer", "Toplam"]]
except (KeyError, TypeError):
return __pd.DataFrame()
else:
return df | 5,355,702 |
def _output_rdf_graph_as_html_no_jinja(theCgi, top_url, error_msg, gbl_cgi_env_list):
"""
This transforms an internal data graph into a HTML document.
"""
page_title = theCgi.m_page_title
grph = theCgi.m_graph
display_html_text_header(page_title)
WrtAsUtf('<body>')
script_information = "".join(_script_information_html_iterator(theCgi, gbl_cgi_env_list))
WrtAsUtf(script_information)
object_information = "".join(_object_information_html_iterator(theCgi))
WrtAsUtf(object_information)
WrtAsUtf("".join(_write_errors_no_jinja(error_msg)))
dict_class_subj_prop_obj = _create_objects_list(grph)
WrtAsUtf("".join(_write_all_objects_no_jinja(dict_class_subj_prop_obj)))
parameters_edition_html = "".join(_parameters_edition_html_iterator(theCgi))
if parameters_edition_html:
WrtAsUtf("<h2>Script parameters</h2>")
WrtAsUtf(parameters_edition_html)
# Scripts do not apply when displaying a class.
# TODO: When in a enumerate script such as enumerate_CIM_LogicalDisk.py,
# it should assume the same: No id but a class.
if(theCgi.m_entity_type == "") or (theCgi.m_entity_id != ""):
WrtAsUtf("<h2>Related data scripts</h2>")
WrtAsUtf("".join(_scripts_tree_html_iterator(theCgi)))
WrtAsUtf("<h2>Other related urls</h2>")
WrtAsUtf('<table class="other_urls">')
WrtAsUtf("".join(_other_urls_html_iterator(top_url)))
WrtAsUtf("".join(_cim_urls_html_iterator()))
WrtAsUtf('</table>')
html_footer = "".join(display_html_text_footer())
WrtAsUtf(html_footer)
WrtAsUtf("</body>")
WrtAsUtf("</html> ") | 5,355,703 |
def distance_loop(x1, x2):
""" Returns the Euclidean distance between the 1-d numpy arrays x1 and x2"""
return -1 | 5,355,704 |
def get_ps_calls_and_summary(filtered_guide_counts_matrix, f_map):
"""Calculates protospacer calls per cell and summarizes them
Args:
filtered_guide_counts_matrix: CountMatrix - obtained by selecting features by CRISPR library type on the feature counts matrix
f_map: dict - map of feature ID:feature sequence pairs
Returns:
First 3 outputs as specified in docstring for get_perturbation_calls
ps_calls_summary is a Pandas dataframe summarizing descriptive statistics for each perturbation_call (unique combination of protospacers) found in
the dataset, along with some overall summary statistics about the mulitplicty of infection
"""
if feature_utils.check_if_none_or_empty(filtered_guide_counts_matrix):
return (None, None, None, None, None)
(ps_calls_table, presence_calls, cells_with_ps, umi_thresholds) = get_perturbation_calls(filtered_guide_counts_matrix,
f_map,)
ps_calls_table.sort_values(by=['feature_call'], inplace=True, kind='mergesort')
ps_calls_summary = get_ps_calls_summary(ps_calls_table, filtered_guide_counts_matrix)
return (ps_calls_table, presence_calls, cells_with_ps, ps_calls_summary, umi_thresholds) | 5,355,705 |
def symbol_by_name(name, aliases={}, imp=None, package=None,
sep='.', default=None, **kwargs):
"""Get symbol by qualified name.
The name should be the full dot-separated path to the class::
modulename.ClassName
Example::
celery.concurrency.processes.TaskPool
^- class name
or using ':' to separate module and symbol::
celery.concurrency.processes:TaskPool
If `aliases` is provided, a dict containing short name/long name
mappings, the name is looked up in the aliases first.
Examples:
>>> symbol_by_name("celery.concurrency.processes.TaskPool")
<class 'celery.concurrency.processes.TaskPool'>
>>> symbol_by_name("default", {
... "default": "celery.concurrency.processes.TaskPool"})
<class 'celery.concurrency.processes.TaskPool'>
# Does not try to look up non-string names.
>>> from celery.concurrency.processes import TaskPool
>>> symbol_by_name(TaskPool) is TaskPool
True
"""
if imp is None:
imp = importlib.import_module
if not isinstance(name, basestring):
return name # already a class
name = aliases.get(name) or name
sep = ':' if ':' in name else sep
module_name, _, cls_name = name.rpartition(sep)
if not module_name:
cls_name, module_name = None, package if package else cls_name
try:
try:
module = imp(module_name, package=package, **kwargs)
except ValueError, exc:
raise ValueError, ValueError(
"Couldn't import %r: %s" % (name, exc)), sys.exc_info()[2]
return getattr(module, cls_name) if cls_name else module
except (ImportError, AttributeError):
if default is None:
raise
return default | 5,355,706 |
def coefficients_of_line_from_points(
point_a: Tuple[float, float], point_b: Tuple[float, float]
) -> Tuple[float, float]:
"""Computes the m and c coefficients of the equation (y=mx+c) for
a straight line from two points.
Args:
point_a: point 1 coordinates
point_b: point 2 coordinates
Returns:
m coefficient and c coefficient
"""
points = [point_a, point_b]
x_coords, y_coords = zip(*points)
coord_array = np.vstack([x_coords, np.ones(len(x_coords))]).T
m, c = np.linalg.lstsq(coord_array, y_coords, rcond=None)[0]
return m, c | 5,355,707 |
def logDirManager():
""" Directory manager for TensorFlow logging """
print('Cleaning and initialising logging directory... \n')
# Ensure function is starting from project root..
if os.getcwd() != "/Users/Oliver/AnacondaProjects/SNSS_TF":
os.chdir("/Users/Oliver/AnacondaProjects/SNSS_TF")
os.chdir("tb_log") # Go to logging folder..
stdout = subprocess.check_output(["ls", "-a"]) # Send ls command to terminal
# Decode output from terminal
folders = DictReader(stdout.decode('ascii').splitlines(),
delimiter=' ', skipinitialspace=True,
fieldnames=['name'])
# For every folder in ordered dict...
for f in folders:
path = f.get('name') # Get path
if (path != ('.')) & (path != ('..')) & (path != '.DS_Store'): # Ignore parent dirs
cDate = datetime.fromtimestamp(os.stat(os.getcwd() + '/' + f.get('name')).st_ctime)
delta = datetime.today() - cDate # Get age of folder.
if delta.days > 6: # If older than 1 week...
rmtree(path) # Delete folder.
print('Removed old folder: "' + path + '" \n') # Log deletion to console.
# print('Name: ' + str + ' Created on: ' + cDate.isoformat()) # Debugging
logDir = "log_dir/" + date.today().isoformat() + "/" +\
datetime.now().time().isoformat(timespec='minutes').replace(':', '')
# Create todays folder for logging
print('Tensorflow logging to : ~/' + logDir + '\n')
os.chdir('..')
return logDir | 5,355,708 |
def read_tiff(fname, slc=None):
"""
Read data from tiff file.
Parameters
----------
fname : str
String defining the path of file or file name.
slc : sequence of tuples, optional
Range of values for slicing data in each axis.
((start_1, end_1, step_1), ... , (start_N, end_N, step_N))
defines slicing parameters for each axis of the data matrix.
Returns
-------
ndarray
Output 2D image.
"""
fname = _check_read(fname)
try:
import tifffile
arr = tifffile.imread(fname, memmap=True)
except IOError:
logger.error('No such file or directory: %s', fname)
return False
arr = _slice_array(arr, slc)
_log_imported_data(fname, arr)
return arr | 5,355,709 |
def sorted_non_max_suppression_padded(scores,
boxes,
max_output_size,
iou_threshold):
"""A wrapper that handles non-maximum suppression.
Assumption:
* The boxes are sorted by scores unless the box is a dot (all coordinates
are zero).
* Boxes with higher scores can be used to suppress boxes with lower scores.
The overal design of the algorithm is to handle boxes tile-by-tile:
boxes = boxes.pad_to_multiply_of(tile_size)
num_tiles = len(boxes) // tile_size
output_boxes = []
for i in range(num_tiles):
box_tile = boxes[i*tile_size : (i+1)*tile_size]
for j in range(i - 1):
suppressing_tile = boxes[j*tile_size : (j+1)*tile_size]
iou = bbox_overlap(box_tile, suppressing_tile)
# if the box is suppressed in iou, clear it to a dot
box_tile *= _update_boxes(iou)
# Iteratively handle the diagnal tile.
iou = _box_overlap(box_tile, box_tile)
iou_changed = True
while iou_changed:
# boxes that are not suppressed by anything else
suppressing_boxes = _get_suppressing_boxes(iou)
# boxes that are suppressed by suppressing_boxes
suppressed_boxes = _get_suppressed_boxes(iou, suppressing_boxes)
# clear iou to 0 for boxes that are suppressed, as they cannot be used
# to suppress other boxes any more
new_iou = _clear_iou(iou, suppressed_boxes)
iou_changed = (new_iou != iou)
iou = new_iou
# remaining boxes that can still suppress others, are selected boxes.
output_boxes.append(_get_suppressing_boxes(iou))
if len(output_boxes) >= max_output_size:
break
Args:
scores: a tensor with a shape of [batch_size, anchors].
boxes: a tensor with a shape of [batch_size, anchors, 4].
max_output_size: a scalar integer `Tensor` representing the maximum number
of boxes to be selected by non max suppression.
iou_threshold: a float representing the threshold for deciding whether boxes
overlap too much with respect to IOU.
Returns:
nms_scores: a tensor with a shape of [batch_size, anchors]. It has same
dtype as input scores.
nms_proposals: a tensor with a shape of [batch_size, anchors, 4]. It has
same dtype as input boxes.
"""
batch_size = tf.shape(boxes)[0]
num_boxes = tf.shape(boxes)[1]
pad = tf.cast(
tf.math.ceil(tf.cast(num_boxes, tf.float32) / NMS_TILE_SIZE),
tf.int32) * NMS_TILE_SIZE - num_boxes
boxes = tf.pad(tf.cast(boxes, tf.float32), [[0, 0], [0, pad], [0, 0]])
scores = tf.pad(
tf.cast(scores, tf.float32), [[0, 0], [0, pad]], constant_values=-1)
num_boxes += pad
def _loop_cond(unused_boxes, unused_threshold, output_size, idx):
return tf.logical_and(
tf.reduce_min(output_size) < max_output_size,
idx < num_boxes // NMS_TILE_SIZE)
selected_boxes, _, output_size, _ = tf.while_loop(
_loop_cond, _suppression_loop_body, [
boxes, iou_threshold,
tf.zeros([batch_size], tf.int32),
tf.constant(0)
])
idx = num_boxes - tf.cast(
tf.nn.top_k(
tf.cast(tf.reduce_any(selected_boxes > 0, [2]), tf.int32) *
tf.expand_dims(tf.range(num_boxes, 0, -1), 0), max_output_size)[0],
tf.int32)
idx = tf.minimum(idx, num_boxes - 1)
idx = tf.reshape(
idx + tf.reshape(tf.range(batch_size) * num_boxes, [-1, 1]), [-1])
boxes = tf.reshape(
tf.gather(tf.reshape(boxes, [-1, 4]), idx),
[batch_size, max_output_size, 4])
boxes = boxes * tf.cast(
tf.reshape(tf.range(max_output_size), [1, -1, 1]) < tf.reshape(
output_size, [-1, 1, 1]), boxes.dtype)
scores = tf.reshape(
tf.gather(tf.reshape(scores, [-1, 1]), idx),
[batch_size, max_output_size])
scores = scores * tf.cast(
tf.reshape(tf.range(max_output_size), [1, -1]) < tf.reshape(
output_size, [-1, 1]), scores.dtype)
return scores, boxes | 5,355,710 |
def create_session() -> Session:
"""
Creates a new session using the aforementioned engine
:return: session
"""
return Session(bind=engine) | 5,355,711 |
def test_checkout_start_is_transaction_date(
loan_created, db, params, mock_ensure_item_is_available_for_checkout
):
"""Test checkout start date to transaction date when not set."""
mock_ensure_item_is_available_for_checkout.side_effect = None
number_of_days = timedelta(days=10)
with SwappedNestedConfig(
["CIRCULATION_POLICIES", "checkout", "duration_default"],
lambda x: number_of_days,
):
loan = current_circulation.circulation.trigger(
loan_created, **dict(params, trigger="checkout")
)
db.session.commit()
assert loan["state"] == "ITEM_ON_LOAN"
assert loan["start_date"] == loan["transaction_date"]
start_date = parse_date(loan["start_date"])
end_date = start_date + number_of_days
assert loan["end_date"] == end_date.isoformat() | 5,355,712 |
def fft_to_complex_matrix(x):
""" Create matrix with [a -b; b a] entries for complex numbers. """
x_stacked = torch.stack((x, torch.flip(x, (4,))), dim=5).permute(2, 3, 0, 4, 1, 5)
x_stacked[:, :, :, 0, :, 1] *= -1
return x_stacked.reshape(-1, 2 * x.shape[0], 2 * x.shape[1]) | 5,355,713 |
def mcais(A, X, verbose=False):
"""
Returns the maximal constraint-admissible (positive) invariant set O_inf for the system x(t+1) = A x(t) subject to the constraint x in X.
O_inf is also known as maximum output admissible set.
It holds that x(0) in O_inf <=> x(t) in X for all t >= 0.
(Implementation of Algorithm 3.2 from: Gilbert, Tan - Linear Systems with State and Control Constraints, The Theory and Application of Maximal Output Admissible Sets.)
Sufficient conditions for this set to be finitely determined (i.e. defined by a finite number of facets) are: A stable, X bounded and containing the origin.
Math
----------
At each time step t, we want to verify if at the next time step t+1 the system will go outside X.
Let's consider X := {x | D_i x <= e_i, i = 1,...,n} and t = 0.
In order to ensure that x(1) = A x(0) is inside X, we need to consider one by one all the constraints and for each of them, the worst-case x(0).
We can do this solvin an LP
V(t=0, i) = max_{x in X} D_i A x - e_i for i = 1,...,n
if all these LPs has V < 0 there is no x(0) such that x(1) is outside X.
The previous implies that all the time-evolution x(t) will lie in X (see Gilbert and Tan).
In case one of the LPs gives a V > 0, we iterate and consider
V(t=1, i) = max_{x in X, x in A X} D_i A^2 x - e_i for i = 1,...,n
where A X := {x | D A x <= e}.
If now all V < 0, then O_inf = X U AX, otherwise we iterate until convergence
V(t, i) = max_{x in X, x in A X, ..., x in A^t X} D_i A^(t+1) x - e_i for i = 1,...,n
Once at convergence O_Inf = X U A X U ... U A^t X.
Arguments
----------
A : numpy.ndarray
State transition matrix.
X : instance of Polyhedron
State-space domain of the dynamical system.
verbose : bool
If True prints at each iteration the convergence parameters.
Returns:
----------
O_inf : instance of Polyhedron
Maximal constraint-admissible (positive) ivariant.
t : int
Determinedness index.
"""
# ensure convergence of the algorithm
eig_max = np.max(np.absolute(np.linalg.eig(A)[0]))
if eig_max > 1.:
raise ValueError('unstable system, cannot derive maximal constraint-admissible set.')
[nc, nx] = X.A.shape
if not X.contains(np.zeros((nx, 1))):
raise ValueError('the origin is not contained in the constraint set, cannot derive maximal constraint-admissible set.')
if not X.bounded:
raise ValueError('unbounded constraint set, cannot derive maximal constraint-admissible set.')
# initialize mcais
O_inf = copy(X)
# loop over time
t = 1
convergence = False
while not convergence:
# solve one LP per facet
J = X.A.dot(np.linalg.matrix_power(A,t))
residuals = []
for i in range(X.A.shape[0]):
sol = linear_program(- J[i,:], O_inf.A, O_inf.b)
residuals.append(- sol['min'] - X.b[i,0])
# print status of the algorithm
if verbose:
print('Time horizon: ' + str(t) + '.'),
print('Convergence index: ' + str(max(residuals)) + '.'),
print('Number of facets: ' + str(O_inf.A.shape[0]) + '. \r'),
# convergence check
new_facets = [i for i, r in enumerate(residuals) if r > 0.]
if len(new_facets) == 0:
convergence = True
else:
# add (only non-redundant!) facets
O_inf.add_inequality(J[new_facets,:], X.b[new_facets,:])
t += 1
# remove redundant facets
if verbose:
print('\nMaximal constraint-admissible invariant set found.')
print('Removing redundant facets ...'),
O_inf.remove_redundant_inequalities()
if verbose:
print('minimal facets are ' + str(O_inf.A.shape[0]) + '.')
return O_inf | 5,355,714 |
def draw_tree(document: Document,
sid: str,
cases: List[str],
bridging: bool = False,
coreference: bool = False,
fh: Optional[TextIO] = None,
html: bool = False,
) -> None:
"""sid で指定された文の述語項構造・共参照関係をツリー形式で fh に書き出す
Args:
document (Document): sid が含まれる文書
sid (str): 出力対象の文ID
cases (List[str]): 表示対象の格
bridging (bool): 橋渡し照応関係も表示するかどうか
coreference (bool): 共参照関係も表示するかどうか
fh (Optional[TextIO]): 出力ストリーム
html (bool): html 形式で出力するかどうか
"""
blist: BList = document.sid2sentence[sid].blist
with io.StringIO() as string:
blist.draw_tag_tree(fh=string, show_pos=False)
tree_strings = string.getvalue().rstrip('\n').split('\n')
assert len(tree_strings) == len(blist.tag_list())
all_targets = [m.core for m in document.mentions.values()]
tid2mention = {mention.tid: mention for mention in document.mentions.values() if mention.sid == sid}
for bp in document[sid].bps:
tree_strings[bp.tid] += ' '
if is_pas_target(bp, verbal=True, nominal=True):
arguments = document.get_arguments(bp)
for case in cases:
args: List[BaseArgument] = arguments.get(case, [])
targets = set()
for arg in args:
target = str(arg)
if all_targets.count(str(arg)) > 1 and isinstance(arg, Argument):
target += str(arg.dtid)
targets.add(target)
if html:
color = 'black' if targets else 'gray'
tree_strings[bp.tid] += f'<font color="{color}">{case}:{",".join(targets)}</font> '
else:
tree_strings[bp.tid] += f'{case}:{",".join(targets)} '
if bridging and is_bridging_target(bp):
args = document.get_arguments(bp).get('ノ', [])
targets = set()
for arg in args:
target = str(arg)
if all_targets.count(str(arg)) > 1 and isinstance(arg, Argument):
target += str(arg.dtid)
targets.add(target)
if html:
color = 'black' if targets else 'gray'
tree_strings[bp.tid] += f'<font color="{color}">ノ:{",".join(targets)}</font> '
else:
tree_strings[bp.tid] += f'ノ:{",".join(targets)} '
if coreference and is_coreference_target(bp):
if bp.tid in tid2mention:
src_mention = tid2mention[bp.tid]
tgt_mentions = [tgt for tgt in document.get_siblings(src_mention) if tgt.dtid < src_mention.dtid]
targets = set()
for tgt_mention in tgt_mentions:
target = tgt_mention.core
if all_targets.count(tgt_mention.core) > 1:
target += str(tgt_mention.dtid)
targets.add(target)
for eid in src_mention.eids:
entity = document.entities[eid]
if entity.is_special:
targets.add(entity.exophor)
else:
targets = set()
if html:
color = 'black' if targets else 'gray'
tree_strings[bp.tid] += f'<font color="{color}">=:{",".join(targets)}</font>'
else:
tree_strings[bp.tid] += f'=:{",".join(targets)}'
print('\n'.join(tree_strings), file=fh) | 5,355,715 |
def evaluation_seasonal_srmse(model_name, variable_name='mean', background='all'):
"""
Evaluate the model in different seasons using the standardized RMSE.
:type model_name: str
:param model_name: The name of the model.
:type variable_name: str
:param variable_name: The name of the variable which shell be evaluated\
against the ONI prediction.
:returns: The SRMSE for different seasons and the \
0, 3, 6, 9, 12 and 15-month lead times. The returned arrays have the shape \
(lead time, season). The season corresponding to the the array entry [:,0]\
is DJF and to [:,1] is JFM (and so on).
"""
reader = data_reader(startdate='1963-01', enddate='2017-12')
# seasonal scores
seas_srmse = np.zeros((n_lead, 12))
# ONI observation
oni = reader.read_csv('oni')
if background=="el-nino-like":
obs = oni[(oni.index.year>=1982)&(oni.index.year<=2001)]
elif background=="la-nina-like":
obs = oni[(oni.index.year<1982)|(oni.index.year>2001)]
elif background=="all":
obs = oni
obs_time = obs.index
for i in range(n_lead):
pred_all = reader.read_forecasts(model_name, lead_times[i]).loc[{'target_season':obs_time}]
pred = pred_all[variable_name]
seas_srmse[i, :] = seasonal_srmse(obs, pred, obs_time - pd.tseries.offsets.MonthBegin(1))
return seas_srmse | 5,355,716 |
def create_message():
"""send_message Send stream of messages to server
Args:
client_obj (object): Client stub class
"""
for _ in range(10):
message_id = f'Client with message ID: {randint(0, 1000)}'
yield pb2.ClientStreamingRequest(client_message=message_id) | 5,355,717 |
def test_logged_social_connect_self(social_config, facebook_user, db_session):
"""Connect self."""
user = db_session.merge(facebook_user)
profile = {
"accounts": [
{
"domain": "facebook.com",
"userid": user.provider_id("facebook"),
}
],
"displayName": "teddy",
"preferredUsername": "teddy",
"emails": [{"value": user.email}],
"name": "ted",
}
credentials = {"oauthAccessToken": "7897048593434"}
provider_name = "facebook"
provider_type = "facebook"
request = testing.DummyRequest()
request.user = user
request.registry = social_config.registry
request.remote_addr = "127.0.0.123"
request.context = AuthenticationComplete(profile, credentials, provider_name, provider_type)
request._ = mock_translate
request.login_perform = MagicMock(name="login_perform")
request.login_perform.return_value = {"status": True}
view = SocialLoginViews(request)
out = view()
assert out["status"] is True
user = db_session.merge(facebook_user)
assert user.provider_id("facebook") == profile["accounts"][0]["userid"] | 5,355,718 |
def generate_accession_id() -> str:
"""Generate Stable ID."""
accessionID = uuid4()
urn = accessionID.urn
LOG.debug(f"generated accession id as: {urn}")
return urn | 5,355,719 |
def _async_device_ha_info(
hass: HomeAssistant, lg_device_id: str
) -> dict | None:
"""Gather information how this ThinQ device is represented in Home Assistant."""
device_registry = dr.async_get(hass)
entity_registry = er.async_get(hass)
hass_device = device_registry.async_get_device(
identifiers={(DOMAIN, lg_device_id)}
)
if not hass_device:
return None
data = {
"name": hass_device.name,
"name_by_user": hass_device.name_by_user,
"model": hass_device.model,
"manufacturer": hass_device.manufacturer,
"sw_version": hass_device.sw_version,
"disabled": hass_device.disabled,
"disabled_by": hass_device.disabled_by,
"entities": {},
}
hass_entities = er.async_entries_for_device(
entity_registry,
device_id=hass_device.id,
include_disabled_entities=True,
)
for entity_entry in hass_entities:
if entity_entry.platform != DOMAIN:
continue
state = hass.states.get(entity_entry.entity_id)
state_dict = None
if state:
state_dict = dict(state.as_dict())
# The entity_id is already provided at root level.
state_dict.pop("entity_id", None)
# The context doesn't provide useful information in this case.
state_dict.pop("context", None)
data["entities"][entity_entry.entity_id] = {
"name": entity_entry.name,
"original_name": entity_entry.original_name,
"disabled": entity_entry.disabled,
"disabled_by": entity_entry.disabled_by,
"entity_category": entity_entry.entity_category,
"device_class": entity_entry.device_class,
"original_device_class": entity_entry.original_device_class,
"icon": entity_entry.icon,
"original_icon": entity_entry.original_icon,
"unit_of_measurement": entity_entry.unit_of_measurement,
"state": state_dict,
}
return data | 5,355,720 |
def clean(url, full=typer.Option(False, "-f")):
"""Clean the cache"""
db_path = f"cache/{url_to_domain(url)}/posts.db"
ic(db_path)
if not pathlib.Path(db_path).exists():
llog.info("There is no data associated with this URL")
llog.info("There is nothing to do")
return
db = sqlite3.connect(db_path)
c = db.cursor()
sql_drop_table = "DROP TABLE IF EXISTS posts"
c.execute(sql_drop_table)
db.commit()
llog.success("Dropped posts database")
if full:
shutil.rmtree(f"cache/{url_to_domain(url)}")
llog.success("Deleted downloaded media") | 5,355,721 |
def get_travis_pr_num() -> Optional[int]:
"""Return the PR number if the job is a pull request, None otherwise
Returns:
int
See also:
- <https://docs.travis-ci.com/user/environment-variables/#default-environment-variables>
""" # noqa E501
try:
travis_pull_request = get_travis_env_or_fail('TRAVIS_PULL_REQUEST')
if falsy(travis_pull_request):
return None
else:
try:
return int(travis_pull_request)
except ValueError:
return None
except UnexpectedTravisEnvironmentError:
return None | 5,355,722 |
def get_updated_records(table_name: str, existing_items: List) -> List:
"""
Determine the list of record updates, to be sent to a DDB stream after a PartiQL update operation.
Note: This is currently a fairly expensive operation, as we need to retrieve the list of all items
from the table, and compare the items to the previously available. This is a limitation as
we're currently using the DynamoDB Local backend as a blackbox. In future, we should consider hooking
into the PartiQL query execution inside DynamoDB Local and directly extract the list of updated items.
"""
result = []
stream_spec = dynamodb_get_table_stream_specification(table_name=table_name)
key_schema = SchemaExtractor.get_key_schema(table_name)
before = ItemSet(existing_items, key_schema=key_schema)
after = ItemSet(ItemFinder.get_all_table_items(table_name), key_schema=key_schema)
def _add_record(item, comparison_set: ItemSet):
matching_item = comparison_set.find_item(item)
if matching_item == item:
return
# determine event type
if comparison_set == after:
if matching_item:
return
event_name = "REMOVE"
else:
event_name = "INSERT" if not matching_item else "MODIFY"
old_image = item if event_name == "REMOVE" else matching_item
new_image = matching_item if event_name == "REMOVE" else item
# prepare record
keys = SchemaExtractor.extract_keys_for_schema(item=item, key_schema=key_schema)
record = {
"eventName": event_name,
"eventID": short_uid(),
"dynamodb": {
"Keys": keys,
"NewImage": new_image,
"SizeBytes": len(json.dumps(item)),
},
}
if stream_spec:
record["dynamodb"]["StreamViewType"] = stream_spec["StreamViewType"]
if old_image:
record["dynamodb"]["OldImage"] = old_image
result.append(record)
# loop over items in new item list (find INSERT/MODIFY events)
for item in after.items_list:
_add_record(item, before)
# loop over items in old item list (find REMOVE events)
for item in before.items_list:
_add_record(item, after)
return result | 5,355,723 |
def kill_server():
"""
Kills the forked process that is hosting the Director repositories via
Python's simple HTTP server. This does not affect the Director service
(which handles manifests and responds to requests from Primaries), nor does
it affect the metadata in the repositories or the state of the repositories
at all. host() can be run afterwards to begin hosting again.
"""
global repo_server_process
if repo_server_process is None:
print(LOG_PREFIX + 'No repository hosting process to stop.')
return
else:
print(LOG_PREFIX + 'Killing repository hosting process with pid: ' +
str(repo_server_process.pid))
repo_server_process.kill()
repo_server_process = None | 5,355,724 |
def normalize_record(input_object, parent_name="root_entity"):
"""
This function orchestrates the main normalization.
It will go through the json document and recursively work with the data to:
- unnest (flatten/normalize) keys in objects with the standard <parentkey>_<itemkey> convention
- identify arrays, which will be pulled out and normalized
- create an array of entities, ready for streaming or export
for each item in the object:
if the item is a non object or non list item:
append to this flattened_dict object
if the item is a dictionary:
trigger the flatten dict function
the flatten dict function will iterate through the items and append them to a dictionary. it will return a dictionary with {"dictionary": <dict_data>, "array": <arrays>}
join flattened_dict and the returned[dictionary] data
append returned[array] to arrays layer
arrays will be dealt with a little differently. Because we're expecting multiple entries we'll be workign with a loop which will always belong to an array
create new dict object dict_object = {"name": <dict name>, "data": [dict array entries data]}
for each in the array loop - trigger normalize_layer with parent name of array name
dict_object.append the `dicts_array`["data"] to the dict_object["data"] array
"""
arrays = []
dicts = []
output_dictionary = {}
parent_keys = extract_parent_keys(dictionary_name=parent_name, dictionary_object=input_object)
if isinstance(input_object, (dict)):
for key, value in input_object.items():
if not isinstance(value, (dict,list) ):
# if the item is a non object or non list item:
output_dictionary[key] = value
elif isinstance(value, dict):
# if the item is a dictionary:
# trigger the flatten dict function
dict_contents = flatten_object(key,value) # will return {"dictionary": <dict_data>, "array": <arrays>}
instance_dictionary = dict_contents["dictionary"]
instance_array = dict_contents["array"]
if len(instance_array) >0:
arrays.extend(instance_array)
output_dictionary = merge_two_dicts(output_dictionary,instance_dictionary) #join the dict
elif isinstance(value, list):
arrays.append({"name":key, "data":value, "parent_keys": parent_keys})
elif isinstance(input_object, (list)):
arrays.append({"name":parent_name,"data":input_object })
##############################
### Now process the arrays ###
##############################
for each_array in arrays:
for each_entry in each_array["data"]:
each_entry = each_entry
try:
if each_array["parent_keys"]:
each_entry = merge_two_dicts(each_entry, each_array["parent_keys"])
except:
pass
normalized_array = (normalize_record(input_object = each_entry, parent_name = each_array["name"]) )
#expect list here
#let the normalizer recursively work through and pull the data out. Once it's out, we can append the data to the dicts array :)
#may return 1 or more dictionaries
for each_normalized_array_entry in normalized_array:
# iterate through each output in the normalized array
#check if there is an instance of this data already
matches = False
for each_dictionary_entity in dicts:
if each_normalized_array_entry["name"] == each_dictionary_entity["name"]:
#check if there is data in place already for this. If so, we add an entry to it
each_dictionary_entity["data"].extend(each_normalized_array_entry["data"])
matches = True
if matches == False:
dicts.append({"name": each_normalized_array_entry["name"] , "data": each_normalized_array_entry["data"] })
dicts.append({"name":parent_name, "data": [output_dictionary]})
return(dicts) | 5,355,725 |
def rmse(f, p, xdata, ydata):
"""Root-mean-square error."""
results = np.asarray([f(p, x) for x in xdata])
sqerr = (results - ydata)**2
return np.sqrt(sqerr.mean()) | 5,355,726 |
def get_logger(name=None, propagate=True):
"""Get logger object"""
logger = logging.getLogger(name)
logger.propagate = propagate
loggers.append(logger)
return logger | 5,355,727 |
def load_movielens1m(infile=None, event_dtype=event_dtype_timestamp):
""" load the MovieLens 1m data set
Original file ``ml-1m.zip`` is distributed by the Grouplens Research
Project at the site:
`MovieLens Data Sets <http://www.grouplens.org/node/73>`_.
Parameters
----------
infile : optional, file or str
input file if specified; otherwise, read from default sample directory.
event_dtype : np.dtype
dtype of extra event features. as default, it consists of only a
``timestamp`` feature.
Returns
-------
data : :class:`kamrecsys.data.EventWithScoreData`
sample data
Notes
-----
Format of events:
* each event consists of a vector whose format is [user, item].
* 1,000,209 events in total
* 6,040 users rate 3,706 items (=movies)
* dtype=int
Format of scores:
* one score is given to each event
* domain of score is [1.0, 2.0, 3.0, 4.0, 5.0]
* dtype=float
Default format of event_features ( `data.event_feature` ):
timestamp : int
represented in seconds since the epoch as returned by time(2)
Format of user's feature ( `data.feature[0]` ):
gender : int
gender of the user, {0:male, 1:female}
age : int, {0, 1,..., 6}
age of the user, where
1:"Under 18", 18:"18-24", 25:"25-34", 35:"35-44", 45:"45-49",
50:"50-55", 56:"56+"
occupation : int, {0,1,...,20}
the number indicates the occupation of the user as follows:
0:"other" or not specified, 1:"academic/educator",
2:"artist", 3:"clerical/admin", 4:"college/grad student"
5:"customer service", 6:"doctor/health care", 7:"executive/managerial"
8:"farmer", 9:"homemaker", 10:"K-12 student", 11:"lawyer",
12:"programmer", 13:"retired", 14:"sales/marketing", 15:"scientist",
16:"self-employed", 17:"technician/engineer", 18:"tradesman/craftsman",
19:"unemployed", 20:"writer"
zip : str, length=5
zip code of 5 digits, which represents the residential area of the user
Format of item's feature ( `data.feature[1]` ):
name : str, length=[8, 82]
title of the movie with release year
year : int
released year
genre : binary_int * 18
18 binary numbers represents a genre of the movie. 1 if the movie
belongs to the genre; 0 other wise. All 0 implies unknown. Each column
corresponds to the following genres:
0:Action, 1:Adventure, 2:Animation, 3:Children's, 4:Comedy, 5:Crime,
6:Documentary, 7:Drama, 8:Fantasy, 9:Film-Noir, 10:Horror, 11:Musical,
12:Mystery, 13:Romance, 14:Sci-Fi, 15:Thriller, 16:War, 17:Western
"""
# load event file
if infile is None:
infile = os.path.join(SAMPLE_PATH, 'movielens1m.event')
data = load_event_with_score(
infile, n_otypes=2, event_otypes=(0, 1),
score_domain=(1., 5., 1.), event_dtype=event_dtype)
# load user's feature file
infile = os.path.join(SAMPLE_PATH, 'movielens1m.user')
fdtype = np.dtype([('gender', int), ('age', int),
('occupation', int), ('zip', 'U5')])
dtype = np.dtype([('eid', int), ('feature', fdtype)])
x = np.genfromtxt(fname=infile, delimiter='\t', dtype=dtype)
data.set_feature(0, x['eid'], x['feature'])
# load item's feature file
infile = os.path.join(SAMPLE_PATH, 'movielens1m.item')
fdtype = np.dtype([('name', 'U82'),
('year', int),
('genre', 'i1', 18)])
dtype = np.dtype([('eid', int), ('feature', fdtype)])
x = np.genfromtxt(fname=infile, delimiter='\t', dtype=dtype,
converters={1: np.char.decode})
data.set_feature(1, x['eid'], x['feature'])
return data | 5,355,728 |
def load_coco_data(split):
"""load the `split` data containing image and label
Args:
split (str): the split of the dataset (train, val, test)
Returns:
tf.data.Dataset: the dataset contains image and label
image (tf.tensor), shape (224, 224, 3)
label (tf.tensor), shape (1000, )
"""
dataset = tfds.load(name="coco_captions", split=split)
write_captions_of_iamges_to_file(dataset, split)
img_cap_dict = get_captions_of_images(dataset, split)
attr_list = get_attributes_list(
os.path.join(WORKING_PATH, "finetune", "attribute_list.pickle")
)
attr2idx = {word: idx for idx, word in enumerate(attr_list)}
attr_dict = get_attributes_dict(dataset, split, attr_list, img_cap_dict)
attr_onehot = get_onehot_attributes(attr_dict, attr2idx, split)
attr_onehot_labels = [attr_onehot[idx] for idx in attr_onehot.keys()]
attr_onehot_labels = tf.data.Dataset.from_tensor_slices(
tf.cast(attr_onehot_labels, tf.int32)
)
def process(image):
image = tf.image.resize(image, (224, 224))
image = tf.cast(image, tf.float32)
image = image / 255
return image
def parse_fn(feature):
image = feature["image"]
return process(image)
img_dataset = dataset.map(parse_fn)
ds = tf.data.Dataset.zip((img_dataset, attr_onehot_labels))
return ds | 5,355,729 |
def get_args():
"""
Get User defined arguments, or assign defaults
:rtype: argparse.ArgumentParser()
:return: User defined or default arguments
"""
parser = argparse.ArgumentParser()
# Positional arguments
parser.add_argument("main_args", type=str, nargs="*",
help="task for Seisflows to perform")
# Optional parameters
parser.add_argument("-w", "--workdir", nargs="?", default=os.getcwd())
parser.add_argument("-p", "--parameter_file", nargs="?",
default="parameters.yaml")
return parser.parse_args() | 5,355,730 |
def getMatches(tournamentName=None, matchDate=None, matchPatch=None, matchTeam=None):
"""
Params:
tournamentName: str/List[str]/Tuple(str) : filter by tournament names (e.g. LCK 2020 Spring)
matchDate: str/List[str]/Tuple(str) : date in the format of yyyy-mm-dd
matchPatch: str/List[str]/Tuple(str) : game patch the match is played on (e.g. 10.15)
matchTeam: str/List[str]/Tuple(str)
Returns:
List[Match]
"""
argsString = " AND ".join(filter(None, [
_formatArgs(tournamentName, "SG.Tournament"),
_formatDateTimeArgs(matchDate, "SG.DateTime_UTC"),
_formatArgs(matchPatch, "SG.Patch")
]))
url = MATCHES_URL.format(argsString)
matchesJson = requests.get(url).json()["cargoquery"]
matches = []
uniqueMatchMap = {}
for i in range(len(matchesJson)):
matchJson = matchesJson[i]["title"]
# apply team filter
if isinstance(matchTeam, str):
matchTeam = [matchTeam]
if isinstance(matchTeam, list):
if matchJson["Team1"] not in matchTeam and matchJson["Team2"] not in matchTeam:
continue
elif isinstance(matchTeam, tuple):
if not set(matchTeam).issubset(set([matchJson["Team1"], matchJson["Team2"]])):
continue
uniqueMatch = matchJson["UniqueGame"][:-2]
if uniqueMatch not in uniqueMatchMap:
match = Match(uniqueMatch)
match._uniqueGames.append(matchJson["UniqueGame"])
match.dateTime = matchJson["DateTime UTC"]
match.patch = matchJson["Patch"]
match.teams = (matchJson["Team1"], matchJson["Team2"])
match.scores = (int(matchJson["Team1Score"]), int(matchJson["Team2Score"]))
matches.append(match)
uniqueMatchMap[uniqueMatch] = match
else:
match = uniqueMatchMap[uniqueMatch]
match._uniqueGames.append(matchJson["UniqueGame"])
match.dateTime = matchJson["DateTime UTC"]
return matches | 5,355,731 |
def parse_e_elect(path: str,
zpe_scale_factor: float = 1.,
) -> Optional[float]:
"""
Parse the electronic energy from an sp job output file.
Args:
path (str): The ESS log file to parse from.
zpe_scale_factor (float): The ZPE scaling factor, used only for composite methods in Gaussian via Arkane.
Returns: Optional[float]
The electronic energy in kJ/mol.
"""
if not os.path.isfile(path):
raise InputError(f'Could not find file {path}')
log = ess_factory(fullpath=path)
try:
e_elect = log.load_energy(zpe_scale_factor) * 0.001 # convert to kJ/mol
except (LogError, NotImplementedError):
logger.warning(f'Could not read e_elect from {path}')
e_elect = None
return e_elect | 5,355,732 |
def test_get_slates():
"""Tests get slates"""
pass | 5,355,733 |
def create_app():
"""Creates the instance of an app."""
configuration_file=os.getcwd()+'/./configuration.cfg'
app=Flask(__name__)
app.config.from_pyfile(configuration_file)
bootstrap.init_app(app)
mail.init_app(app)
from my_app.admin import admin
app.register_blueprint(admin)
from my_app.main import main
app.register_blueprint(main)
return app | 5,355,734 |
def execute(args):
"""This function invokes the forage model given user inputs.
args - a python dictionary with the following required entries:
args['latitude'] - site latitude in degrees. If south of the equator,
this should be negative
args['prop_legume'] - proportion of the pasture by weight that is
legume, ranging from 0:1
args['steepness'] - site steepness, ranging from 1:2
args['DOY'] - initial day of the year, an integer ranging from 1:365
args['start_year'] - initial year, an integer
args['start_month'] - initial month, an integer ranging from 1:12
corresponding to January:December
args['num_months'] - number of months to run the simulation
args['grz_months'] - (optional) months when grazing should be applied,
where month 0 is the first month of the simulation
args['density_series'] - (optional) stocking density by month, where
month 0 is the first month of the simulation
args['mgmt_threshold'] - management threshold (kg/ha), residual biomass
that is required to remain, limiting herbivore offtake
args['input_dir'] - local file directory containing inputs to run
CENTURY
args['century_dir'] - local file directory containing the executable
and global parameter files to run the CENTURY ecosystem model
args['outdir'] - local file directory where intermediate and output
files will be saved
args['template_level'] - template grazing level. # TODO replace this
args['fix_file'] - file basename of CENTURY fix file, which resides in
the directory args['century_dir']
args['user_define_protein'] - boolean (0: false, 1: true). Should
crude protein of forage be drawn from forage input supplied by the
user? If false, it is calculated from CENTURY outputs
args['user_define_digestibility'] - boolean (0: false, 1: true). Should
digestibility of forage be drawn from forage input supplied by the
user? If false, it is calculated from CENTURY outputs
args['herbivore_csv'] - an absolute path to a csv file containing all
necessary descriptors of each herbivore type
args['grass_csv'] - an absolute path to a csv file containing all
necessary descriptors of the grass available as forage
args['supp_csv'] - an absolute path to a csv file containing all
necessary descriptors of supplemental feed (optional)
args['diet_verbose'] - save details of diet selection?
args['digestibility_flag'] - flag to use a particular regression
equation to calculate digestibility from crude protein
returns nothing."""
for opt_arg in [
'grz_months', 'density_series', 'digestibility_flag',
'diet_verbose']:
try:
val = args[opt_arg]
except KeyError:
args[opt_arg] = None
now_str = datetime.now().strftime("%Y-%m-%d--%H_%M_%S")
if not os.path.exists(args['outdir']):
os.makedirs(args['outdir'])
intermediate_dir = os.path.join(
args['outdir'], 'CENTURY_outputs_spin_up')
if not os.path.exists(intermediate_dir):
os.makedirs(intermediate_dir)
forage.write_inputs_log(args, now_str)
forage.set_time_step('month') # current default, enforced by CENTURY
add_event = 1 # TODO should this ever be 0?
steps_per_year = forage.find_steps_per_year()
graz_file = os.path.join(args[u'century_dir'], 'graz.100')
cent.set_century_directory(args[u'century_dir'])
if args['diet_verbose']:
master_diet_dict = {}
diet_segregation_dict = {'step': [], 'segregation': []}
herbivore_list = []
if args[u'herbivore_csv'] is not None:
herbivore_input = (pandas.read_csv(
args[u'herbivore_csv'])).to_dict(orient='records')
for herb_class in herbivore_input:
herd = forage.HerbivoreClass(herb_class)
herbivore_list.append(herd)
grass_list = (pandas.read_csv(
args[u'grass_csv'])).to_dict(orient='records')
for grass in grass_list:
if not isinstance(grass['label'], str):
grass['label'] = str(grass['label'])
forage.check_initial_biomass(grass_list)
results_dict = {'step': [], 'year': [], 'month': []}
for herb_class in herbivore_list:
results_dict[herb_class.label + '_MEItotal'] = []
results_dict[herb_class.label + '_DPLS'] = []
results_dict[herb_class.label + '_E_req'] = []
results_dict[herb_class.label + '_P_req'] = []
results_dict[herb_class.label + '_intake_forage_per_indiv_kg'] = []
for grass in grass_list:
results_dict[grass['label'] + '_green_kgha'] = []
results_dict[grass['label'] + '_dead_kgha'] = []
results_dict['total_offtake'] = []
schedule_list = []
for grass in grass_list:
schedule = os.path.join(args[u'input_dir'], (grass['label'] + '.sch'))
if os.path.exists(schedule):
schedule_list.append(schedule)
else:
er = "Error: schedule file not found"
raise Exception(er)
# write CENTURY batch file for spin-up simulation
hist_bat = os.path.join(
args[u'input_dir'], (grass['label'] + '_hist.bat'))
hist_schedule = grass['label'] + '_hist.sch'
hist_output = grass['label'] + '_hist'
cent.write_century_bat(
args[u'input_dir'], hist_bat, hist_schedule, hist_output,
args[u'fix_file'], 'outvars.txt')
# write CENTURY bat for extend simulation
extend_bat = os.path.join(
args[u'input_dir'], (grass['label'] + '.bat'))
schedule = grass['label'] + '.sch'
output = grass['label']
extend = grass['label'] + '_hist'
cent.write_century_bat(
args[u'input_dir'], extend_bat, schedule, output,
args[u'fix_file'], 'outvars.txt', extend)
supp_available = 0
if 'supp_csv' in args.keys():
supp_list = (pandas.read_csv(args[u'supp_csv'])).to_dict(
orient='records')
assert len(supp_list) == 1, "Only one supplement type is allowed"
supp_info = supp_list[0]
supp = forage.Supplement(
FreerParam.FreerParamCattle('indicus'), supp_info['digestibility'],
supp_info['kg_per_day'], supp_info['M_per_d'],
supp_info['ether_extract'], supp_info['crude_protein'],
supp_info['rumen_degradability'])
if supp.DMO > 0.:
supp_available = 1
else:
supp = None
# assume fix file is in the input directory, copy it to Century directory
shutil.copyfile(
os.path.join(args['input_dir'], args['fix_file']),
os.path.join(args['century_dir'], args['fix_file']))
# make a copy of the original graz params and schedule file
shutil.copyfile(
graz_file, os.path.join(args[u'century_dir'], 'graz_orig.100'))
for schedule in schedule_list:
label = os.path.basename(schedule)[:-4]
copy_name = label + '_orig.sch'
shutil.copyfile(
schedule, os.path.join(args[u'input_dir'], copy_name))
file_list = []
for grass in grass_list:
move_outputs = [
grass['label']+'_hist_log.txt', grass['label']+'_hist.lis',
grass['label']+'_log.txt', grass['label']+'.lis',
grass['label']+'.bin']
# move CENTURY run files to CENTURY dir
hist_bat = os.path.join(
args[u'input_dir'], (grass['label'] + '_hist.bat'))
extend_bat = os.path.join(
args[u'input_dir'], (grass['label'] + '.bat'))
e_schedule = os.path.join(args[u'input_dir'], grass['label'] + '.sch')
h_schedule = os.path.join(
args[u'input_dir'], grass['label'] + '_hist.sch')
site_file, weather_file = cent.get_site_weather_files(
e_schedule, args[u'input_dir'])
grass_files = [
hist_bat, extend_bat, e_schedule, h_schedule, site_file]
for file_name in grass_files:
file_list.append(file_name)
if weather_file != 'NA':
file_list.append(weather_file)
for file_name in file_list:
shutil.copyfile(
file_name,
os.path.join(args[u'century_dir'], os.path.basename(file_name)))
# run CENTURY for spin-up for each grass type up to start_year and
# start_month
hist_bat = os.path.join(
args[u'century_dir'], (grass['label'] + '_hist.bat'))
century_bat = os.path.join(
args[u'century_dir'], (grass['label'] + '.bat'))
cent.launch_CENTURY_subprocess(hist_bat)
cent.launch_CENTURY_subprocess(century_bat)
# save copies of CENTURY outputs, but remove from CENTURY dir
for file_name in move_outputs:
shutil.move(
os.path.join(args[u'century_dir'], file_name),
os.path.join(intermediate_dir, file_name))
stocking_density_dict = forage.populate_sd_dict(herbivore_list)
total_SD = forage.calc_total_stocking_density(herbivore_list)
site = forage.SiteInfo(args[u'steepness'], args[u'latitude'])
# add starting conditions to summary file
step = -1
step_month = args[u'start_month'] + step
if step_month == 0:
month = 12
year = args[u'start_year'] - 1
else:
month = step_month
year = args[u'start_year']
results_dict['step'].append(step)
results_dict['year'].append(year)
results_dict['month'].append(month)
results_dict['total_offtake'].append('NA')
for herb_class in herbivore_list:
results_dict[herb_class.label + '_MEItotal'].append('NA')
results_dict[herb_class.label + '_DPLS'].append('NA')
results_dict[herb_class.label + '_E_req'].append('NA')
results_dict[herb_class.label + '_P_req'].append('NA')
results_dict[herb_class.label +
'_intake_forage_per_indiv_kg'].append('NA')
try:
for step in xrange(args[u'num_months']):
step_month = args[u'start_month'] + step
if step_month > 12:
mod = step_month % 12
if mod == 0:
month = 12
year = (step_month / 12) + args[u'start_year'] - 1
else:
month = mod
year = (step_month / 12) + args[u'start_year']
else:
month = step_month
year = (step / 12) + args[u'start_year']
for herb_class in herbivore_list:
herb_class.update(step)
# get biomass and crude protein for each grass type from CENTURY
for grass in grass_list:
output_file = os.path.join(
intermediate_dir, grass['label'] + '.lis')
outputs = cent.read_CENTURY_outputs(
output_file, year - 1, year + 1)
outputs = outputs[~outputs.index.duplicated(keep='first')]
target_month = cent.find_prev_month(year, month)
grass['prev_g_gm2'] = grass['green_gm2']
grass['prev_d_gm2'] = grass['dead_gm2']
try:
grass['green_gm2'] = outputs.loc[target_month, 'aglivc']
except KeyError:
raise Exception("CENTURY outputs not as expected")
grass['dead_gm2'] = outputs.loc[target_month, 'stdedc']
if grass['green_gm2'] == 0:
grass['green_gm2'] = 0.000001
if grass['dead_gm2'] == 0:
grass['dead_gm2'] = 0.000001
if not args[u'user_define_protein']:
try:
N_mult = grass['N_multiplier']
except KeyError:
N_mult = 1
grass['cprotein_green'] = (
outputs.loc[target_month, 'aglive1'] /
outputs.loc[target_month, 'aglivc'] * N_mult)
grass['cprotein_dead'] = (
outputs.loc[target_month, 'stdede1'] /
outputs.loc[target_month, 'stdedc'] * N_mult)
if step == 0:
available_forage = forage.calc_feed_types(grass_list)
else:
available_forage = forage.update_feed_types(
grass_list, available_forage)
available_forage = forage.restrict_available_forage(
available_forage, args['mgmt_threshold'])
results_dict['step'].append(step)
results_dict['year'].append(year)
results_dict['month'].append(month)
for feed_type in available_forage:
results_dict[
feed_type.label + '_' +
feed_type.green_or_dead + '_kgha'].append(
feed_type.biomass)
if not args[u'user_define_digestibility']:
for feed_type in available_forage:
feed_type.calc_digestibility_from_protein(
args['digestibility_flag'])
diet_dict = {}
for herb_class in herbivore_list:
if (args['grz_months'] is not None and step not in
args['grz_months']):
diet = forage.Diet()
diet.fill_intake_zero(available_forage)
diet_dict[herb_class.label] = diet
continue
if (args['density_series'] is not None and step in
args['density_series'].keys()):
herb_class.stocking_density = args['density_series'][step]
stocking_density_dict = forage.populate_sd_dict(
herbivore_list)
total_SD = forage.calc_total_stocking_density(
herbivore_list)
herb_class.calc_distance_walked(
site.S, total_SD, available_forage)
max_intake = herb_class.calc_max_intake()
ZF = herb_class.calc_ZF()
HR = forage.calc_relative_height(available_forage)
diet = forage.diet_selection_t2(
ZF, HR, args[u'prop_legume'], supp_available, max_intake,
herb_class.FParam, available_forage, herb_class.f_w,
herb_class.q_w, supp)
diet_interm = forage.calc_diet_intermediates(
diet, herb_class, args[u'prop_legume'], args[u'DOY'], site,
supp)
if herb_class.type != 'hindgut_fermenter':
reduced_max_intake = forage.check_max_intake(
diet, diet_interm, herb_class, max_intake)
if reduced_max_intake < max_intake:
diet = forage.diet_selection_t2(
ZF, HR, args[u'prop_legume'], supp_available,
reduced_max_intake, herb_class.FParam,
available_forage, herb_class.f_w, herb_class.q_w,
supp)
diet_dict[herb_class.label] = diet
forage.reduce_demand(
diet_dict, stocking_density_dict, available_forage)
total_intake_step = forage.calc_total_intake(
diet_dict, stocking_density_dict)
if args['diet_verbose']:
# save diet_dict across steps to be written out later
master_diet_dict[step] = diet_dict
diet_segregation = forage.calc_diet_segregation(diet_dict)
diet_segregation_dict['step'].append(step)
diet_segregation_dict['segregation'].append(diet_segregation)
for herb_class in herbivore_list:
diet = diet_dict[herb_class.label]
# if herb_class.type != 'hindgut_fermenter':
diet_interm = forage.calc_diet_intermediates(
diet, herb_class, args[u'prop_legume'], args[u'DOY'], site,
supp)
results_dict[herb_class.label + '_MEItotal'].append(
diet_interm.MEItotal)
results_dict[herb_class.label + '_DPLS'].append(
diet_interm.DPLS)
results_dict[herb_class.label + '_E_req'].append(
diet_interm.MEm + diet_interm.MEc + diet_interm.MEl +
diet_interm.NEw)
results_dict[herb_class.label + '_P_req'].append(
diet_interm.Pm + diet_interm.Pc + diet_interm.Pl +
diet_interm.Pw)
results_dict[
herb_class.label + '_intake_forage_per_indiv_kg'].append(
forage.convert_daily_to_step(diet.If))
if herb_class.sex == 'lac_female':
results_dict['milk_prod_kg'].append(
forage.convert_daily_to_step(milk_kg_day))
# calculate percent live and dead removed for each grass type
consumed_dict = forage.calc_percent_consumed(
available_forage, diet_dict, stocking_density_dict)
results_dict['total_offtake'].append(total_intake_step)
# send to CENTURY for this month's scheduled grazing event
date = year + float('%.2f' % (month / 12.))
for grass in grass_list:
g_label = ';'.join([grass['label'], 'green'])
d_label = ';'.join([grass['label'], 'dead'])
# only modify schedule if any of this grass was grazed
if consumed_dict[g_label] > 0 or consumed_dict[d_label] > 0:
schedule = os.path.join(
args[u'century_dir'], (grass['label'] + '.sch'))
target_dict = cent.find_target_month(
add_event, schedule, date, 12)
if target_dict == 0:
raise Exception, """Error: grazing event already
scheduled in file"""
new_code = cent.add_new_graz_level(
grass, consumed_dict, graz_file,
args[u'template_level'], args[u'outdir'], step)
cent.modify_schedule(
schedule, add_event, target_dict, new_code,
args[u'outdir'], step)
# call CENTURY from the batch file
century_bat = os.path.join(
args[u'century_dir'], (grass['label'] + '.bat'))
cent.launch_CENTURY_subprocess(century_bat)
# save copies of CENTURY outputs, but remove from CENTURY dir
century_outputs = [
grass['label']+'_log.txt', grass['label']+'.lis',
grass['label']+'.bin']
intermediate_dir = os.path.join(
args['outdir'], 'CENTURY_outputs_m%d_y%d' % (month, year))
if not os.path.exists(intermediate_dir):
os.makedirs(intermediate_dir)
for file_name in century_outputs:
n_tries = 6
while True:
if n_tries == 0:
break
try:
n_tries -= 1
shutil.move(
os.path.join(args[u'century_dir'], file_name),
os.path.join(intermediate_dir, file_name))
break
except OSError:
print (
'OSError in moving %s, trying again' %
file_name)
time.sleep(1.0)
# add final standing biomass to summary file
newstep = args[u'num_months']
step_month = args[u'start_month'] + newstep
if step_month > 12:
mod = step_month % 12
if mod == 0:
month = 12
year = (step_month / 12) + args[u'start_year'] - 1
else:
month = mod
year = (step_month / 12) + args[u'start_year']
else:
month = step_month
year = (newstep / 12) + args[u'start_year']
for grass in grass_list:
output_file = os.path.join(
intermediate_dir, grass['label'] + '.lis')
outputs = cent.read_CENTURY_outputs(
output_file, year - 1, year + 1)
outputs = outputs[~outputs.index.duplicated(keep='first')]
target_month = cent.find_prev_month(year, month)
grass['prev_g_gm2'] = grass['green_gm2']
grass['prev_d_gm2'] = grass['dead_gm2']
try:
grass['green_gm2'] = outputs.loc[target_month, 'aglivc']
except KeyError:
raise Exception("CENTURY outputs not as expected")
grass['dead_gm2'] = outputs.loc[target_month, 'stdedc']
available_forage = forage.update_feed_types(
grass_list, available_forage)
for feed_type in available_forage:
results_dict[
feed_type.label + '_' + feed_type.green_or_dead +
'_kgha'].append(feed_type.biomass)
except:
raise
finally:
### Cleanup files
# replace graz params used by CENTURY with original file
os.remove(graz_file)
shutil.copyfile(
os.path.join(args[u'century_dir'], 'graz_orig.100'), graz_file)
os.remove(os.path.join(args[u'century_dir'], 'graz_orig.100'))
file_list = set(file_list)
files_to_remove = [
os.path.join(args[u'century_dir'], os.path.basename(f)) for f in
file_list]
for file_name in files_to_remove:
os.remove(file_name)
for grass in grass_list:
os.remove(
os.path.join(
args[u'century_dir'], grass['label'] + '_hist.bin'))
for schedule in schedule_list:
label = os.path.basename(schedule)[:-4]
orig_copy = os.path.join(args[u'input_dir'], label + '_orig.sch')
os.remove(orig_copy)
for grass in grass_list:
os.remove(
os.path.join(
args[u'input_dir'], (grass['label'] + '_hist.bat')))
os.remove(os.path.join(args[u'input_dir'], (grass['label'] +
'.bat')))
for ext in ['.lis', '.bin', '_log.txt']:
obj = os.path.join(args[u'century_dir'], grass['label'] + ext)
if os.path.isfile(obj):
os.remove(obj)
if args['diet_verbose']:
df = pandas.DataFrame(diet_segregation_dict)
save_as = os.path.join(args['outdir'], 'diet_segregation.csv')
df.to_csv(save_as, index=False)
for h_label in master_diet_dict[0].keys():
new_dict = {}
new_dict['step'] = master_diet_dict.keys()
new_dict['DMDf'] = [
master_diet_dict[step][h_label].DMDf for step in
master_diet_dict.keys()]
new_dict['CPIf'] = [
master_diet_dict[step][h_label].CPIf for step in
master_diet_dict.keys()]
grass_labels = master_diet_dict[0][h_label].intake.keys()
for g_label in grass_labels:
new_dict['intake_' + g_label] = (
[master_diet_dict[step][h_label].intake[g_label] for
step in master_diet_dict.keys()])
df = pandas.DataFrame(new_dict)
save_as = os.path.join(args['outdir'], h_label + '_diet.csv')
df.to_csv(save_as, index=False)
filled_dict = forage.fill_dict(results_dict, 'NA')
df = pandas.DataFrame(filled_dict)
df.to_csv(os.path.join(args['outdir'], 'summary_results.csv')) | 5,355,735 |
def start():
"""
Start the application
"""
with settings(user=env.sudouser):
sudo('initctl start %s' % env.appuser) | 5,355,736 |
def get_fields(fields):
"""
From the last column of a GTF, return a dictionary mapping each value.
Parameters:
fields (str): The last column of a GTF
Returns:
attributes (dict): Dictionary created from fields.
"""
attributes = {}
description = fields.strip()
description = [x.strip() for x in description.split(";")]
for pair in description:
if pair == "": continue
pair = pair.replace('"', '')
key, val = pair.split()
attributes[key] = val
# put in placeholders for important attributes (such as gene_id) if they
# are absent
if 'gene_id' not in attributes:
attributes['gene_id'] = 'NULL'
return attributes | 5,355,737 |
def add_log_group_name_params(log_group_name, configs):
"""Add a "log_group_name": log_group_name to every config."""
for config in configs:
config.update({"log_group_name": log_group_name})
return configs | 5,355,738 |
def on_update_user_info(data: dict, activity: Activity) -> (int, Union[str, None]):
"""
broadcast a user info update to a room, or all rooms the user is in if no target.id specified
:param data: activity streams format, must include object.attachments (user info)
:param activity: the parsed activity, supplied by @pre_process decorator, NOT by calling endpoint
:return: {'status_code': ECodes.OK, 'data': '<same AS as client sent, plus timestamp>'}
"""
activity.actor.display_name = utils.b64e(environ.env.session.get(SessionKeys.user_name.value))
data['actor']['displayName'] = activity.actor.display_name
environ.env.observer.emit('on_update_user_info', (data, activity))
return ECodes.OK, data | 5,355,739 |
def discover(isamAppliance, check_mode=False, force=False):
"""
Discover available updates
"""
return isamAppliance.invoke_get("Discover available updates",
"/updates/available/discover") | 5,355,740 |
def jest_test(name, **kwargs):
"""Wrapper macro around jest_test cli"""
_jest_test(
name = name,
args = [
"--no-cache",
"--no-watchman",
"--no-colors",
"--ci",
],
chdir = native.package_name(),
**kwargs
) | 5,355,741 |
def ENsettimeparam(paramcode, timevalue):
"""Sets the value of a time parameter.
Arguments:
paramcode: time parameter code EN_DURATION
EN_HYDSTEP
EN_QUALSTEP
EN_PATTERNSTEP
EN_PATTERNSTART
EN_REPORTSTEP
EN_REPORTSTART
EN_RULESTEP
EN_STATISTIC
EN_PERIODS
timevalue: value of time parameter in seconds
The codes for EN_STATISTIC are:
EN_NONE none
EN_AVERAGE averaged
EN_MINIMUM minimums
EN_MAXIMUM maximums
EN_RANGE ranges"""
ierr= _lib.ENsettimeparam(ctypes.c_int(paramcode), ctypes.c_int(timevalue))
if ierr!=0: raise ENtoolkitError(ierr) | 5,355,742 |
def ltistep(U, A=A, B=B, C=C):
""" LTI( A B C ): U -> y linear
straight up
"""
U, A, B, C = map(np.asarray, (U, A, B, C))
xk = np.zeros(A.shape[1])
x = [xk]
for u in U[:-1]:
xk = A.dot(xk) + B.dot(u)
x.append(xk.copy())
return np.dot(x, C) | 5,355,743 |
def _registry():
"""Registry to download images from."""
return _registry_config()["host"] | 5,355,744 |
def load_structure(query, reduce=True, strip='solvent&~@/pseudoBonds'):
"""
Load a structure in Chimera. It can be anything accepted by `open` command.
Parameters
==========
query : str
Path to molecular file, or special query for Chimera's open (e.g. pdb:3pk2).
reduce : bool
Add hydrogens to structure. Defaults to True.
strip : str
Chimera selection spec that will be removed. Defaults to solvent&~@/pseudoBonds
(solvent that is not attached to a metal ion).
"""
print('Opening', query)
chimera.runCommand('open ' + query)
m = chimera.openModels.list()[0]
m.setAllPDBHeaders({})
if strip:
print(' Removing {}...'.format(strip))
chimera.runCommand('del ' + strip)
if reduce:
print(' Adding hydrogens...')
chimera.runCommand('addh')
return m | 5,355,745 |
def is_into_keyword(token):
"""
INTO判定
"""
return token.match(T.Keyword, "INTO") | 5,355,746 |
def exp(
value: Union[Tensor, MPCTensor, int, float], iterations: int = 8
) -> Union[MPCTensor, float, Tensor]:
"""Approximates the exponential function using a limit approximation.
exp(x) = lim_{n -> infty} (1 + x / n) ^ n
Here we compute exp by choosing n = 2 ** d for some large d equal to
iterations. We then compute (1 + x / n) once and square `d` times.
Args:
value: tensor whose exp is to be calculated
iterations (int): number of iterations for limit approximation
Ref: https://github.com/LaRiffle/approximate-models
Returns:
MPCTensor: the calculated exponential of the given tensor
"""
result = (value / 2**iterations) + 1
for _ in range(iterations):
result = result * result
return result | 5,355,747 |
def train(estimator: Estimator, data_root_dir: str, max_steps: int) -> Any:
"""Train a Tensorflow estimator"""
train_spec = tf.estimator.TrainSpec(
input_fn=_build_input_fn(data_root_dir, ModeKeys.TRAIN),
max_steps=max_steps,
)
if max_steps > Training.LONG_TRAINING_STEPS:
throttle_secs = Training.LONG_DELAY
else:
throttle_secs = Training.SHORT_DELAY
eval_spec = tf.estimator.EvalSpec(
input_fn=_build_input_fn(data_root_dir, ModeKeys.EVAL),
start_delay_secs=Training.SHORT_DELAY,
throttle_secs=throttle_secs,
)
LOGGER.debug('Train the model')
results = tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
training_metrics = results[0]
return training_metrics | 5,355,748 |
def plot(data_input,categorical_name=[],drop=[],PLOT_COLUMNS_SIZE = 4,bin_size=20,bar_width=0.2,wspace=0.5,hspace=0.8):
"""
This is the main function to give Bivariate analysis between the target variable and the input features.
Parameters
-----------
data_input : Dataframe
This is the input Dataframe with all data.
categorical_name : list
Names of all categorical variable columns with more than 2 classes, to distinguish with the continuous variables.
drop : list
Names of columns to be dropped.
PLOT_COLUMNS_SIZE : int; default =4
Number of plots to display vertically in the display window.The row size is adjusted accordingly.
bin_size : int ;default="auto"
Number of bins for the histogram displayed in the categorical vs categorical category.
wspace : float32 ;default = 0.5
Horizontal padding between subplot on the display window.
hspace : float32 ;default = 0.8
Vertical padding between subplot on the display window.
-----------
"""
if type(data_input).__name__ == "DataFrame" :
# Column names
columns_name = data_input.columns.values
#To drop user specified columns.
if is_present(columns_name,drop):
data_input = data_input.drop(drop,axis=1)
columns_name = data_input.columns.values
categorical_name = remove_drop_from_catglist(drop,categorical_name)
else:
raise ValueError("Couldn't find it in the input Dataframe!")
#Checks if the categorical_name are present in the orignal dataframe columns.
categorical_is_present = is_present(columns_name,categorical_name)
if categorical_is_present:
category_dict,catg_list,cont_list = get_category(data_input,categorical_name,columns_name)
#Subplot(Total number of graphs)
total = total_subplots(data_input,[catg_list,cont_list])
if total < PLOT_COLUMNS_SIZE:
total = PLOT_COLUMNS_SIZE
PLOT_ROW_SIZE = ceil(float(total)/PLOT_COLUMNS_SIZE)
plot,count = univariate_analysis_continous(cont_list,data_input,total,COUNTER,bin_size,PLOT_ROW_SIZE,PLOT_COLUMNS_SIZE)
plot,count = univariate_analysis_categorical(catg_list,data_input,total,count,bar_width,PLOT_ROW_SIZE,PLOT_COLUMNS_SIZE)
fig.subplots_adjust(bottom=0.08,left = 0.05,right=0.97,top=0.93,wspace = wspace,hspace = hspace)
plot.show()
else:
raise ValueError("The input doesn't seems to be Dataframe") | 5,355,749 |
def assert_similar_time(dt1: datetime, dt2: datetime, threshold: float = 0.5) -> None:
"""Assert the delta between the two datetimes is less than the given threshold (in seconds).
This is required as there seems to be small data loss when marshalling and unmarshalling
datetimes, for example:
2021-09-26T15:00:18.708000+00:00 -> 2021-09-26T15:00:18.708776+00:00
This issue does not appear to be solvable by us, please create an issue if you know of a solution.
"""
if dt1 > dt2:
delta = dt1 - dt2
else:
delta = dt2 - dt1
assert delta.days == 0
assert delta.total_seconds() < threshold | 5,355,750 |
def isDllInCorrectPath():
"""
Returns True if the BUFFY DLL is present and in the correct location (...\<BTS>\Mods\<BUFFY>\Assets\).
"""
return IS_DLL_IN_CORRECT_PATH | 5,355,751 |
def time_remaining(event_time):
"""
Args:
event_time (time.struct_time): Time of the event.
Returns:
float: Time remaining between now and the event, in
seconds since epoch.
"""
now = time.localtime()
time_remaining = time.mktime(event_time) - time.mktime(now)
return time_remaining | 5,355,752 |
def _repeat(values, count):
"""Produces a list of lists suitable for testing interleave.
Args:
values: for each element `x` the result contains `[x] * x`
count: determines how many times to repeat `[x] * x` in the result
Returns:
A list of lists of values suitable for testing interleave.
"""
return [[value] * value for value in np.tile(values, count)] | 5,355,753 |
def start_selenium_server(logfile=None, jarpath=None, *params):
"""A hook to start the Selenium Server provided with SeleniumLibrary.
`logfile` must be either an opened file (or file-like object) or None. If
not None, Selenium Server log will be written to it.
`jarpath` must be either the absolute path to the selenium-server.jar or
None. If None, the jar file distributed with the library will be used.
It is possible to give a list of additional command line options to
Selenium Server in `*params`.
A custom automation friendly Firefox profile is enabled by default using
the `-firefoxProfileTemplate` option. If there is `user-extensions.js`
file in the same directory as the jar, it is loaded automatically using the
option `-userExtensions`. For more information about these options, see
the documentation of the start_selenium_server method of the Selenium
class.
Note that this function requires `subprocess` module which is available
on Python/Jython 2.5 or newer.
"""
if not subprocess:
raise RuntimeError('This function requires `subprocess` module which '
'is available on Python/Jython 2.5 or newer.')
cmd = _server_startup_command(jarpath, *params)
try:
subprocess.Popen(cmd, stdout=logfile, stderr=subprocess.STDOUT)
except OSError:
raise RuntimeError('Starting Selenium Server failed. Check that you '
'have Java 1.5 or newer installed by running '
'`java -version` on the command prompt.')
print 'Selenium Server started with command "%s" ' % ' '.join(cmd) | 5,355,754 |
def P2D_l_TAN(df, cond, attr): # P(attr | 'target', cond)
"""Calcule la probabilité d'un attribut sachant la classe et un autre attribut.
Parameters
----------
df : pandas.DataFrame
La base d'examples.
cond : str
Le nom de l'attribut conditionnant.
attr : str
Le nom de l'attribut conditionné.
Returns
-------
dict of (int, number): (dict of number: float)
Un dictionnaire associant au couple (`t`, `c`), de classe `t` et de valeur
d'attribut conditionnant `c`, un dictionnaire qui associe à
la valeur `a` de l'attribut conditionné la probabilité
.. math:: P(attr=a|target=t,cond=c).
"""
joint_target_cond_attr = getJoint(df, ['target', cond, attr])
joint_target_cond = getJoint(df, ['target', cond])
raw_dico = dict(divide(joint_target_cond_attr, joint_target_cond))
dicos = [{(k_t, k_c): {k_a: proba}}
for (k_t, k_c, k_a), proba in raw_dico.items()]
res = {}
reduce(reduce_update, [res] + dicos)
return res | 5,355,755 |
def has_no_jump(bigram, peaks_groundtruth):
"""
Tell if the two components of the bigram are same or successive in the sequence of valid peaks or not
For exemple, if groundtruth = [1,2,3], [1,1] or [2,3] have no jump but [1,3] has a jump.
bigram : the bigram to judge
peaks_groundtruth : the list of valid peaks
Return boolean
"""
assert len(bigram) == 2
if len(set(bigram)) == 1:
return True
sorted_groundtruth = sorted(peaks_groundtruth)
sorted_peaks = sorted(list(bigram))
begin = sorted_groundtruth.index(sorted_peaks[0])
end = begin+len(sorted_peaks)
return sorted_peaks == sorted_groundtruth[begin:end] | 5,355,756 |
def Base64WSDecode(s):
"""
Return decoded version of given Base64 string. Ignore whitespace.
Uses URL-safe alphabet: - replaces +, _ replaces /. Will convert s of type
unicode to string type first.
@param s: Base64 string to decode
@type s: string
@return: original string that was encoded as Base64
@rtype: bytes
@raise Base64DecodingError: If length of string (ignoring whitespace) is one
more than a multiple of four.
"""
s = RawString(s) # Base64W decode can only work with strings
s = ''.join(s.splitlines())
s = str(s.replace(" ", "")) # kill whitespace, make string (not unicode)
d = len(s) % 4
if d == 1:
raise kzr_errors.Base64DecodingError()
elif d == 2:
s += "=="
elif d == 3:
s += "="
s = RawBytes(s)
try:
return base64.urlsafe_b64decode(s)
except TypeError:
# Decoding raises TypeError if s contains invalid characters.
raise kzr_errors.Base64DecodingError() | 5,355,757 |
def make_window():
"""create the window"""
window = Tk()
window.title("Pac-Man")
window.geometry("%dx%d+%d+%d" % (
WINDOW_WIDTH,
WINDOW_HEIGHT,
X_WIN_POS,
Y_WIN_POS
)
)
window = window
return window | 5,355,758 |
def construct_scrape_regex_patterns(scrape_info: dict[str, Union[ParseResult, str]]) -> dict[str, Union[ParseResult, str]]:
""" Construct regex patterns for seasons/episodes """
logger.debug("Constructing scrape regexes")
for info in scrape_info:
if info == 'url':
continue
if info == 'seasons':
if scrape_info[info] is not None:
if re.search(r'/season-\d{1,6}', scrape_info['url'].geturl()):
logger.warning("Season already specified in url")
raise exceptions.InvalidInput("Season already specified in url")
scrape_info['seasons'] = parse_scrape_info(scrape_info[info])
else:
s = re.search(r'/season-(\d{1,6})', scrape_info['url'].geturl())
if s:
scrape_info['seasons'] = s.group(1)
else:
scrape_info['seasons'] = r'\d{1,6}'
if info == 'episodes':
if scrape_info[info] is not None:
if re.search(r'/episode-\d{1,6}', scrape_info['url'].geturl()):
logger.warning("Episode already specified in url")
raise exceptions.InvalidInput("Episode already specified in url")
scrape_info['episodes'] = parse_scrape_info(scrape_info[info])
else:
e = re.search(r'/episode-(\d{1,6})', scrape_info['url'].geturl())
if e:
scrape_info['episodes'] = e.group(1)
else:
scrape_info['episodes'] = r'\d{1,6}'
return scrape_info | 5,355,759 |
def hasf(e):
"""
Returns a function which if applied with `x` tests whether `x` has `e`.
Examples
--------
>>> filter(hasf("."), ['statement', 'A sentence.'])
['A sentence.']
"""
return lambda x: e in x | 5,355,760 |
def borehole_model(x, theta):
"""Given x and theta, return matrix of [row x] times [row theta] of values."""
return f | 5,355,761 |
def findNodesOnHostname(hostname):
"""Return the list of nodes name of a (non-dmgr) node on the given hostname, or None
Function parameters:
hostname - the hostname to check, with or without the domain suffix
"""
m = "findNodesOnHostname:"
nodes = []
for nodename in listNodes():
if hostname.lower() == getNodeHostname(nodename).lower():
sop(m, "Found node %s which is on %s" % (nodename, hostname))
nodes.append(nodename)
#endif
#endfor
# Not found - try matching without domain - z/OS systems might not have domain configured
shorthostname = hostname.split(".")[0].lower()
for nodename in listNodes():
shortnodehostname = getNodeHostname(nodename).split(".")[0].lower()
if shortnodehostname == shorthostname:
if nodename in nodes :
sop(m, "Node name %s was already found with the domain attached" % nodename)
else :
nodes.append(nodename)
sop(m, "Found node %s which is on %s" % (nodename, hostname))
#endif
#endif
#endfor
if len(nodes) == 0 :
sop(m,"WARNING: Unable to find any node with the hostname %s (not case-sensitive)" % hostname)
sop(m,"HERE are the hostnames that your nodes think they're on:")
for nodename in listNodes():
sop(m,"\tNode %s: hostname %s" % (nodename, getNodeHostname(nodename)))
#endfor
return None
else :
return nodes
#endif | 5,355,762 |
def test_transaction_saved_from_new(session):
"""Assert that the payment is saved to the table."""
payment_account = factory_payment_account()
payment = factory_payment()
payment_account.save()
payment.save()
invoice = factory_invoice(payment.id, payment_account.id)
invoice.save()
fee_schedule = FeeSchedule.find_by_filing_type_and_corp_type('CP', 'OTANN')
line = factory_payment_line_item(invoice.id, fee_schedule_id=fee_schedule.fee_schedule_id)
line.save()
payment_transaction = PaymentTransactionService()
payment_transaction.status_code = 'DRAFT'
payment_transaction.transaction_end_time = datetime.now()
payment_transaction.transaction_start_time = datetime.now()
payment_transaction.pay_system_url = 'http://google.com'
payment_transaction.client_system_url = 'http://google.com'
payment_transaction.payment_id = payment.id
payment_transaction = payment_transaction.save()
transaction = PaymentTransactionService.find_by_id(payment.id, payment_transaction.id)
assert transaction is not None
assert transaction.id is not None
assert transaction.status_code is not None
assert transaction.payment_id is not None
assert transaction.client_system_url is not None
assert transaction.pay_system_url is not None
assert transaction.transaction_start_time is not None
assert transaction.transaction_end_time is not None | 5,355,763 |
def MakeControlClass( controlClass, name = None ):
"""Given a CoClass in a generated .py file, this function will return a Class
object which can be used as an OCX control.
This function is used when you do not want to handle any events from the OCX
control. If you need events, then you should derive a class from both the
activex.Control class and the CoClass
"""
if name is None:
name = controlClass.__name__
return new_type("OCX" + name, (Control, controlClass), {}) | 5,355,764 |
def obtenTipoNom(linea):
""" Obtiene por ahora la primera palabra del título, tendría que regresar de que se trata"""
res = linea.split('\t')
return res[6].partition(' ')[0] | 5,355,765 |
def histogramfrom2Darray(array, nbins):
"""
Creates histogram of elements from 2 dimensional array
:param array: input 2 dimensional array
:param nbins: number of bins so that bin size = (maximum value in array - minimum value in array) / nbins
the motivation for returning this array is for the purpose of easily plotting with matplotlib
:return: list of three elements:
list[0] = length nbins list of integers, a histogram of the array elements
list[1] = length nbins list of values of array element types, values of the lower end of the bins
list[2] = [minimum in list, maximum in list]
this is just good to know sometimes.
"""
#find minimum
minimum = np.min(array)
#find maximu
maximum = np.max(array)
#compute bin size
binsize = (maximum - minimum) / nbins
#create bin array
bins = [minimum + binsize * i for i in range(nbins)]
histo = [0 for b in range(nbins)]
for x in array:
for y in x:
#find the lower end of the affiliated bin
ab = y - (minimum + fmod(y - minimum, binsize))
histo[int(ab/binsize)-1] += 1
return [histo, bins, [minimum, maximum]] | 5,355,766 |
def build_pert_reg(unsupervised_regularizer, cut_backg_noise=1.0,
cut_prob=1.0, box_reg_scale_mode='fixed',
box_reg_scale=0.25, box_reg_random_aspect_ratio=False,
cow_sigma_range=(4.0, 8.0), cow_prop_range=(0.0, 1.0),):
"""Build perturbation regularizer."""
if unsupervised_regularizer == 'none':
unsup_reg = None
augment_twice = False
elif unsupervised_regularizer == 'mt':
unsup_reg = regularizers.IdentityRegularizer()
augment_twice = False
elif unsupervised_regularizer == 'aug':
unsup_reg = regularizers.IdentityRegularizer()
augment_twice = True
elif unsupervised_regularizer == 'cutout':
unsup_reg = regularizers.BoxMaskRegularizer(
cut_backg_noise, cut_prob, box_reg_scale_mode, box_reg_scale,
box_reg_random_aspect_ratio)
augment_twice = False
elif unsupervised_regularizer == 'aug_cutout':
unsup_reg = regularizers.BoxMaskRegularizer(
cut_backg_noise, cut_prob, box_reg_scale_mode, box_reg_scale,
box_reg_random_aspect_ratio)
augment_twice = True
elif unsupervised_regularizer == 'cowout':
unsup_reg = regularizers.CowMaskRegularizer(
cut_backg_noise, cut_prob, cow_sigma_range, cow_prop_range)
augment_twice = False
elif unsupervised_regularizer == 'aug_cowout':
unsup_reg = regularizers.CowMaskRegularizer(
cut_backg_noise, cut_prob, cow_sigma_range, cow_prop_range)
augment_twice = True
else:
raise ValueError('Unknown supervised_regularizer \'{}\''.format(
unsupervised_regularizer))
return unsup_reg, augment_twice | 5,355,767 |
def part_5b_avg_std_dev_of_replicates_analysis_completed(*jobs):
"""Check that the initial job data is written to the json files."""
file_written_bool_list = []
all_file_written_bool_pass = False
for job in jobs:
data_written_bool = False
if job.isfile(
f"../../src/engines/gomc/averagesWithinReplicatez.txt"
) and job.isfile(f"../../src/engines/gomc/setAverages.txt"):
data_written_bool = True
file_written_bool_list.append(data_written_bool)
if False not in file_written_bool_list:
all_file_written_bool_pass = True
return all_file_written_bool_pass | 5,355,768 |
def get_ifort_version(conf, fc):
"""get the compiler version"""
version_re = re.compile(r"ifort\s*\(IFORT\)\s*(?P<major>\d*)\.(?P<minor>\d*)", re.I).search
cmd = fc + ['--version']
out, err = fc_config.getoutput(conf, cmd, stdin=False)
if out:
match = version_re(out)
else:
match = version_re(err)
if not match:
conf.fatal('cannot determine ifort version.')
k = match.groupdict()
conf.env['FC_VERSION'] = (k['major'], k['minor']) | 5,355,769 |
def exportFlatClusterData(filename, root_dir, dataset_name, new_row_header,new_column_header,xt,ind1,ind2,display):
""" Export the clustered results as a text file, only indicating the flat-clusters rather than the tree """
filename = string.replace(filename,'.pdf','.txt')
export_text = export.ExportFile(filename)
column_header = string.join(['UID','row_clusters-flat']+new_column_header,'\t')+'\n' ### format column-names for export
export_text.write(column_header)
column_clusters = string.join(['column_clusters-flat','']+ map(str, ind2),'\t')+'\n' ### format column-flat-clusters for export
export_text.write(column_clusters)
### The clusters, dendrogram and flat clusters are drawn bottom-up, so we need to reverse the order to match
#new_row_header = new_row_header[::-1]
#xt = xt[::-1]
try: elite_dir = getGOEliteExportDir(root_dir,dataset_name)
except Exception: elite_dir = None
elite_columns = string.join(['InputID','SystemCode'])
try: sy = systemCodeCheck(new_row_header)
except Exception: sy = None
### Export each row in the clustered data matrix xt
i=0
cluster_db={}
export_lines = []
for row in xt:
id = new_row_header[i]
if sy == '$En:Sy':
cluster = 'cluster-'+string.split(id,':')[0]
elif sy == 'S' and ':' in id:
cluster = 'cluster-'+string.split(id,':')[0]
elif sy == 'Sy' and ':' in id:
cluster = 'cluster-'+string.split(id,':')[0]
else:
cluster = 'c'+str(ind1[i])
try: cluster_db[cluster].append(new_row_header[i])
except Exception: cluster_db[cluster] = [new_row_header[i]]
export_lines.append(string.join([new_row_header[i],str(ind1[i])]+map(str, row),'\t')+'\n')
i+=1
### Reverse the order of the file
export_lines.reverse()
for line in export_lines:
export_text.write(line)
export_text.close()
### Export GO-Elite input files
allGenes={}
for cluster in cluster_db:
export_elite = export.ExportFile(elite_dir+'/'+cluster+'.txt')
if sy==None:
export_elite.write('ID\n')
else:
export_elite.write('ID\tSystemCode\n')
for id in cluster_db[cluster]:
if sy == '$En:Sy':
id = string.split(id,':')[1]
ids = string.split(id,' ')
if 'ENS' in ids[0]: id = ids[0]
else: id = ids[-1]
sc = 'En'
elif sy == 'Sy' and ':' in id:
id = string.split(id,':')[1]
ids = string.split(id,' ')
sc = 'Sy'
elif sy == 'En:Sy':
id = string.split(id,' ')[0]
sc = 'En'
elif sy == 'Ae':
l = string.split(id,':')
if len(l)==2:
id = string.split(id,':')[0] ### Use the Ensembl
if len(l) == 3:
id = string.split(id,':')[1] ### Use the Ensembl
sc = 'En'
else:
sc = sy
if sy == 'S':
if ':' in id:
id = string.split(id,':')[-1]
sc = 'Ae'
try: export_elite.write(id+'\t'+sc+'\n')
except Exception: export_elite.write(id+'\n') ### if no System Code known
allGenes[id]=[]
export_elite.close()
try:
if storeGeneSetName != None:
if len(storeGeneSetName)>0 and 'driver' not in justShowTheseIDs:
exportCustomGeneSet(storeGeneSetName,species,allGenes)
print 'Exported geneset to "StoredGeneSets"'
except Exception: pass
### Export as CDT file
filename = string.replace(filename,'.txt','.cdt')
if display:
try: exportJTV(filename, new_column_header, new_row_header)
except Exception: pass
export_cdt = export.ExportFile(filename)
column_header = string.join(['UNIQID','NAME','GWEIGHT']+new_column_header,'\t')+'\n' ### format column-names for export
export_cdt.write(column_header)
eweight = string.join(['EWEIGHT','','']+ ['1']*len(new_column_header),'\t')+'\n' ### format column-flat-clusters for export
export_cdt.write(eweight)
### Export each row in the clustered data matrix xt
i=0; cdt_lines=[]
for row in xt:
cdt_lines.append(string.join([new_row_header[i]]*2+['1']+map(str, row),'\t')+'\n')
i+=1
### Reverse the order of the file
cdt_lines.reverse()
for line in cdt_lines:
export_cdt.write(line)
export_cdt.close()
return elite_dir, filename | 5,355,770 |
def indent_multiline(s: str, indentation: str = " ", add_newlines: bool = True) -> str:
"""Indent the given string if it contains more than one line.
Args:
s: String to indent
indentation: Indentation to prepend to each line.
add_newlines: Whether to add newlines surrounding the result
if indentation was added.
"""
lines = s.splitlines()
if len(lines) <= 1:
return s
lines_str = "\n".join(f"{indentation}{line}" for line in lines)
if add_newlines:
return f"\n{lines_str}\n"
else:
return lines_str | 5,355,771 |
def _get_property(self, key: str, *, offset: int = 0) -> Optional[int]:
"""Get a property from the location details.
:param key: The key for the property
:param offset: Any offset to apply to the value (if found)
:returns: The property as an int value if found, None otherwise
"""
value = self.location_details.get(key)
if value is None:
return None
return int(value[0]) + offset | 5,355,772 |
def pca_normalization(points):
"""Projects points onto the directions of maximum variance."""
points = np.transpose(points)
pca = PCA(n_components=len(np.transpose(points)))
points = pca.fit_transform(points)
return np.transpose(points) | 5,355,773 |
def _reformTrend(percs, inits):
"""
Helper function to recreate original trend based on percent change data.
"""
trend = []
trend.append(percs[0])
for i in range(1, len(percs)):
newLine = []
newLine.append(percs[i][0]) #append the date
for j in range(1, len(percs[i])): #for each term on date
level = float(trend[i-1][j]) * percs[i][j].numerator / percs[i][j].denominator #level is the prev level * %change
newLine.append(level)
trend.append(newLine)
return trend | 5,355,774 |
def create_zenpack_srcdir(zenpack_name):
"""Create a new ZenPack source directory."""
import shutil
import errno
if os.path.exists(zenpack_name):
sys.exit("{} directory already exists.".format(zenpack_name))
print "Creating source directory for {}:".format(zenpack_name)
zenpack_name_parts = zenpack_name.split('.')
packages = reduce(
lambda x, y: x + ['.'.join((x[-1], y))],
zenpack_name_parts[1:],
['ZenPacks'])
namespace_packages = packages[:-1]
# Create ZenPacks.example.Thing/ZenPacks/example/Thing directory.
module_directory = os.path.join(zenpack_name, *zenpack_name_parts)
try:
print " - making directory: {}".format(module_directory)
os.makedirs(module_directory)
except OSError as e:
if e.errno == errno.EEXIST:
sys.exit("{} directory already exists.".format(zenpack_name))
else:
sys.exit(
"Failed to create {!r} directory: {}"
.format(zenpack_name, e.strerror))
# Create setup.py.
setup_py_fname = os.path.join(zenpack_name, 'setup.py')
print " - creating file: {}".format(setup_py_fname)
with open(setup_py_fname, 'w') as setup_py_f:
setup_py_f.write(
SETUP_PY.format(
zenpack_name=zenpack_name,
namespace_packages=namespace_packages,
packages=packages))
# Create MANIFEST.in.
manifest_in_fname = os.path.join(zenpack_name, 'MANIFEST.in')
print " - creating file: {}".format(manifest_in_fname)
with open(manifest_in_fname, 'w') as manifest_in_f:
manifest_in_f.write("graft ZenPacks\n")
# Create __init__.py files in all namespace directories.
for namespace_package in namespace_packages:
namespace_init_fname = os.path.join(
zenpack_name,
os.path.join(*namespace_package.split('.')),
'__init__.py')
print " - creating file: {}".format(namespace_init_fname)
with open(namespace_init_fname, 'w') as namespace_init_f:
namespace_init_f.write(
"__import__('pkg_resources').declare_namespace(__name__)\n")
# Create __init__.py in ZenPack module directory.
init_fname = os.path.join(module_directory, '__init__.py')
print " - creating file: {}".format(init_fname)
with open(init_fname, 'w') as init_f:
init_f.write(
"from . import zenpacklib\n\n"
"zenpacklib.load_yaml()\n")
# Create zenpack.yaml in ZenPack module directory.
yaml_fname = os.path.join(module_directory, 'zenpack.yaml')
print " - creating file: {}".format(yaml_fname)
with open(yaml_fname, 'w') as yaml_f:
yaml_f.write("name: {}\n".format(zenpack_name))
# Copy zenpacklib.py (this file) into ZenPack module directory.
print " - copying: {} to {}".format(__file__, module_directory)
shutil.copy2(__file__, module_directory) | 5,355,775 |
def PET_initialize_compression_structure(N_axial,N_azimuthal,N_u,N_v):
"""Obtain 'offsets' and 'locations' arrays for fully sampled PET compressed projection data. """
descriptor = [{'name':'N_axial','type':'uint','value':N_axial},
{'name':'N_azimuthal','type':'uint','value':N_azimuthal},
{'name':'N_u','type':'uint','value':N_u},
{'name':'N_v','type':'uint','value':N_v},
{
'name':'offsets','type':'array','value':None,
'dtype':np.int32,'size':(N_azimuthal,N_axial),
'order':'F'
},
{
'name':'locations','type':'array','value':None,
'dtype':np.uint16,
'size':(3,N_u * N_v * N_axial * N_azimuthal),'order':'F'
},
]
r = call_c_function(niftyrec_c.PET_initialize_compression_structure,
descriptor)
if not r.status == status_success():
raise ErrorInCFunction(
"The execution of 'PET_initialize_compression_structure' was unsuccessful.",
r.status,
'niftyrec_c.PET_initialize_compression_structure')
return [r.dictionary['offsets'],r.dictionary['locations']] | 5,355,776 |
def tic():
"""Mimics Matlab's tic toc"""
global __start_time_for_tictoc__
__start_time_for_tictoc__ = time.time() | 5,355,777 |
def get_client_from_user_settings(settings_obj):
"""Same as get client, except its argument is a DropboxUserSettingsObject."""
return get_client(settings_obj.owner) | 5,355,778 |
def train_student(
model,
dataset,
test_data,
test_labels,
nb_labels,
nb_teachers,
stdnt_share,
lap_scale,
):
"""This function trains a student using predictions made by an ensemble of
teachers. The student and teacher models are trained using the same neural
network architecture.
:param dataset: string corresponding to mnist, cifar10, or svhn
:param nb_teachers: number of teachers (in the ensemble) to learn from
:return: True if student training went well
"""
# Call helper function to prepare student data using teacher predictions
stdnt_dataset = prepare_student_data(
model,
dataset,
test_data,
test_labels,
nb_labels,
nb_teachers,
stdnt_share,
lap_scale,
)
# Unpack the student dataset
stdnt_data, stdnt_labels, stdnt_test_data, stdnt_test_labels = stdnt_dataset
# Prepare checkpoint filename and path
filename = str(dataset) + "_" + str(nb_teachers) + "_student.ckpt"
stdnt_prep = PrepareData(stdnt_data, stdnt_labels)
stdnt_loader = DataLoader(stdnt_prep, batch_size=64, shuffle=False)
stdnt_test_prep = PrepareData(stdnt_test_data, stdnt_test_labels)
stdnt_test_loader = DataLoader(stdnt_test_prep, batch_size=64, shuffle=False)
# Start student training
train(model, stdnt_loader, stdnt_test_loader, ckpt_path, filename)
# Compute final checkpoint name for student
student_preds = softmax_preds(
model, nb_labels, stdnt_test_loader, ckpt_path + filename
)
# Compute teacher accuracy
precision = accuracy(student_preds, stdnt_test_labels)
print("\nPrecision of student after training: " + str(precision))
return True | 5,355,779 |
def create_transition_matrix_numeric(mu, d, v):
"""
Use numerical integration.
This is not so compatible with algopy because it goes through fortran.
Note that d = 2*h - 1 following Kimura 1957.
The rate mu is a catch-all scaling factor.
The finite distribution v is assumed to be a stochastic vector.
@param mu: scales the rate matrix
@param d: dominance (as opposed to recessiveness) of preferred states.
@param v: numpy array defining a distribution over states
@return: transition matrix
"""
# Construct the numpy matrix whose entries
# are differences of log equilibrium probabilities.
# Everything in this code block is pure numpy.
F = numpy.log(v)
e = numpy.ones_like(F)
S = numpy.outer(e, F) - numpy.outer(F, e)
# Create the rate matrix Q and return its matrix exponential.
# Things in this code block may use algopy if mu and d
# are bundled with truncated Taylor information.
D = d * numpy.sign(S)
pre_Q = numpy.vectorize(numeric_fixation)(0.5*S, D)
pre_Q = mu * pre_Q
Q = pre_Q - algopy.diag(algopy.sum(pre_Q, axis=1))
P = algopy.expm(Q)
return P | 5,355,780 |
def parse_integrate(filename='INTEGRATE.LP'):
"""
Harvest data from INTEGRATE
"""
if not os.path.exists(filename):
return {'failure': 'Integration step failed'}
info = parser.parse(filename, 'integrate')
for batch, frames in zip(info.get('batches',[]), info.pop('batch_frames', [])):
batch.update(frames)
return info | 5,355,781 |
def test_backup_replica_resumes_ordering_on_lag_in_checkpoints(
looper, chkFreqPatched, reqs_for_checkpoint,
one_replica_and_others_in_backup_instance,
sdk_pool_handle, sdk_wallet_client, view_change_done, txnPoolNodeSet):
"""
Verifies resumption of ordering 3PC-batches on a backup replica
on detection of a lag in checkpoints
"""
slow_replica, other_replicas = one_replica_and_others_in_backup_instance
view_no = slow_replica.viewNo
# Send a request and ensure that the replica orders the batch for it
sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1)
looper.run(
eventually(lambda: assert_eq(slow_replica.last_ordered_3pc, (view_no, 2)),
retryWait=1,
timeout=waits.expectedTransactionExecutionTime(nodeCount)))
# Don't receive Commits from two replicas
slow_replica.node.nodeIbStasher.delay(
cDelay(instId=1, sender_filter=other_replicas[0].node.name))
slow_replica.node.nodeIbStasher.delay(
cDelay(instId=1, sender_filter=other_replicas[1].node.name))
# Send a request for which the replica will not be able to order the batch
# due to an insufficient count of Commits
sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1)
looper.runFor(waits.expectedTransactionExecutionTime(nodeCount))
# Recover reception of Commits
slow_replica.node.nodeIbStasher.drop_delayeds()
slow_replica.node.nodeIbStasher.resetDelays()
# Send requests but in a quantity insufficient
# for catch-up number of checkpoints
sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client,
Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP *
reqs_for_checkpoint - 3)
looper.runFor(waits.expectedTransactionExecutionTime(nodeCount))
# Ensure that the replica has not ordered any batches
# after the very first one
assert slow_replica.last_ordered_3pc == (view_no, 2)
# Ensure that the watermarks have not been shifted since the view start
assert slow_replica.h == 0
assert slow_replica.H == LOG_SIZE
# Ensure that the collections related to requests, batches and
# own checkpoints are not empty.
# (Note that a primary replica removes requests from requestQueues
# when creating a batch with them.)
if slow_replica.isPrimary:
assert slow_replica._ordering_service.sentPrePrepares
else:
assert slow_replica._ordering_service.requestQueues[DOMAIN_LEDGER_ID]
assert slow_replica._ordering_service.prePrepares
assert slow_replica._ordering_service.prepares
assert slow_replica._ordering_service.commits
assert slow_replica._ordering_service.batches
assert slow_replica._checkpointer._checkpoint_state
# Ensure that there are some quorumed stashed checkpoints
assert slow_replica._checkpointer._stashed_checkpoints_with_quorum()
# Send more requests to reach catch-up number of checkpoints
sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
sdk_wallet_client, reqs_for_checkpoint)
# Ensure that the replica has adjusted last_ordered_3pc to the end
# of the last checkpoint
looper.run(
eventually(lambda *args: assertExp(slow_replica.last_ordered_3pc == \
(view_no, (Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1) * CHK_FREQ)),
slow_replica,
retryWait=1,
timeout=waits.expectedTransactionExecutionTime(nodeCount)))
# Ensure that the watermarks have been shifted so that the lower watermark
# has the same value as last_ordered_3pc
assert slow_replica.h == (Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1) * CHK_FREQ
assert slow_replica.H == (Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1) * CHK_FREQ + LOG_SIZE
# Ensure that the collections related to requests, batches and
# own checkpoints have been cleared
assert not slow_replica._ordering_service.requestQueues[DOMAIN_LEDGER_ID]
assert not slow_replica._ordering_service.sentPrePrepares
assert not slow_replica._ordering_service.prePrepares
assert not slow_replica._ordering_service.prepares
assert not slow_replica._ordering_service.commits
assert not slow_replica._ordering_service.batches
assert not slow_replica._checkpointer._checkpoint_state
# Ensure that now there are no quorumed stashed checkpoints
assert not slow_replica._checkpointer._stashed_checkpoints_with_quorum()
# Send a request and ensure that the replica orders the batch for it
sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1)
looper.run(
eventually(lambda *args: assertExp(slow_replica.last_ordered_3pc ==
(view_no, (Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1) * CHK_FREQ + 1)),
slow_replica,
retryWait=1,
timeout=waits.expectedTransactionExecutionTime(nodeCount))) | 5,355,782 |
def eprint(msg):
"""
Prints given ``msg`` into sys.stderr as nose test runner hides all output
from sys.stdout by default and if we want to pipe stream somewhere we don't
need those verbose messages anyway.
Appends line break.
"""
sys.stderr.write(msg)
sys.stderr.write('\n') | 5,355,783 |
def channelmap(stream: Stream, *args, **kwargs) -> FilterableStream:
"""https://ffmpeg.org/ffmpeg-filters.html#channelmap"""
return filter(stream, channelmap.__name__, *args, **kwargs) | 5,355,784 |
def test_token(current_user: DBUser = Depends(get_current_user)):
"""
Test access-token
"""
return current_user | 5,355,785 |
def locate_data(name, check_exists=True):
"""Locate the named data file.
Data files under mls/data/ are copied when this package is installed.
This function locates these files relative to the install directory.
Parameters
----------
name : str
Path of data file relative to mls/data.
check_exists : bool
Raise a RuntimeError if the named file does not exist when this is True.
Returns
-------
str
Path of data file within installation directory.
"""
import mls
pkg_path = mls.__path__[0]
path = os.path.join(pkg_path, 'data', name)
if check_exists and not os.path.exists(path):
raise RuntimeError('No such data file: {}'.format(path))
return path | 5,355,786 |
def links():
"""
For type hints, read `PEP 484`_.
See the `Python home page <http://www.python.org>`_ for info.
.. _PEP 484:
https://www.python.org/dev/peps/pep-0484/
""" | 5,355,787 |
def process_label_imA(im):
"""Crop a label image so that the result contains
all labels, then return separate images, one for
each label.
Returns a dictionary of images and corresponding
labels (for choosing colours), also a scene bounding
box. Need to run shape statistics to determine
the number of labels and the IDs
"""
# stuff to figure out which way we slice, etc
isoidx = check_isotropy(im)
otheridx = [0, 1, 2]
otheridx.remove(isoidx)
direction = get_direction(im, isoidx)
sp = im.GetSpacing()
sp = str2ds(sp)
spacing = [sp[i] for i in otheridx]
slthickness = sp[isoidx]
labstats = sitk.LabelShapeStatisticsImageFilter()
labstats.Execute(im)
labels = labstats.GetLabels()
boxes = [labstats.GetBoundingBox(i) for i in labels]
# Need to compute bounding box for all labels, as
# this will set the row/colums
# boxes are corner and size - this code assumes 3D
corners = [(x[0], x[1], x[2]) for x in boxes]
othercorner = [(x[0] + x[3] - 1,
x[1] + x[4] - 1,
x[2] + x[5] - 1) for x in boxes]
sizes = [(x[3], x[4], x[5]) for x in boxes]
all_low_x = [C[0] for C in corners]
all_low_y = [C[1] for C in corners]
all_low_z = [C[2] for C in corners]
low_x = min(all_low_x)
low_y = min(all_low_y)
low_z = min(all_low_z)
lowcorner = (low_x, low_y, low_z)
all_high_x = [C[0] for C in othercorner]
all_high_y = [C[1] for C in othercorner]
all_high_z = [C[2] for C in othercorner]
high_x = max(all_high_x)
high_y = max(all_high_y)
high_z = max(all_high_z)
highcorner = (high_x, high_y, high_z)
allsize = (highcorner[0] - lowcorner[0] + 1,
highcorner[1] - lowcorner[1] + 1,
highcorner[2] - lowcorner[2] + 1)
# corners [otheridx] and size[otheridx] should be all the same
newcorners = [list(x) for x in corners]
newsizes = [list(x) for x in sizes]
a = otheridx[0]
b = otheridx[1]
for f in range(len(newcorners)):
newcorners[f][a] = lowcorner[a]
newcorners[f][b] = lowcorner[b]
newsizes[f][a] = allsize[a]
newsizes[f][b] = allsize[b]
ims = [sitk.RegionOfInterest(im, allsize,
lowcorner) == labels[i]
for i in range(len(labels))]
imcrop = sitk.RegionOfInterest(im, allsize, lowcorner)
return({'rois': ims, 'labels': labels,
'original': im, 'cropped': imcrop}) | 5,355,788 |
def render_graphs(csv_data, append_titles=""):
"""
Convenience function. Gets the aggregated `monthlies` data from
`aggregate_monthly_data(csv_data)` and returns a dict of graph
titles mapped to rendered SVGs from `monthly_total_precip_line()`
and `monthly_avg_min_max_temp_line()` using the `monthlies` data.
"""
monthlies = aggregate_monthly_data(csv_data)
return {
graph.config.title: graph.render()
for graph in [
monthly_total_precip_line(monthlies, append_titles),
monthly_avg_min_max_temp_line(monthlies, append_titles),
monthly_max_temps_box(monthlies, append_titles),
]
} | 5,355,789 |
def _get_location():
"""Return the location as a string, accounting for this function and the parent in the stack."""
return "".join(traceback.format_stack(limit=STACK_LIMIT + 2)[:-2]) | 5,355,790 |
def test_syscall_client_init():
"""Tests SysCallClient.__init__"""
from apyfal.client.syscall import SysCallClient
from apyfal import Accelerator
import apyfal.configuration as cfg
import apyfal.exceptions as exc
# Test: accelerator_executable_available, checks return type
assert type(cfg.accelerator_executable_available()) is bool
# Mocks some functions
accelerator_available = True
class DummySysCallClient(SysCallClient):
"""Dummy SysCallClient"""
@staticmethod
def _stop(*_, **__):
"""Do Nothing to skip object deletion"""
def dummy_accelerator_executable_available():
"""Return fake result"""
return accelerator_available
cfg_accelerator_executable_available = cfg.accelerator_executable_available
cfg.accelerator_executable_available = (
dummy_accelerator_executable_available)
# Tests
try:
# Accelerator not available
DummySysCallClient()
# Default for Accelerator if no host specified
config = cfg.Configuration()
try:
del config._sections['host']
except KeyError:
pass
client = Accelerator(config=config).client
client._stop = DummySysCallClient._stop # Disable __del__
assert isinstance(client, SysCallClient)
# Accelerator not available
accelerator_available = False
with pytest.raises(exc.HostConfigurationException):
SysCallClient()
# Restores functions
finally:
cfg.accelerator_executable_available = (
cfg_accelerator_executable_available) | 5,355,791 |
def start_session():
"""do nothing here
"""
return Response.failed_response('Error') | 5,355,792 |
def test_pairwise_mlp_init(pairwise_mlp):
"""Test PairwiseMLP.__init__ sets state correctly."""
assert pairwise_mlp.in_features == PROJ_FEATURES
assert pairwise_mlp.hidden_features == PROJ_FEATURES
assert pairwise_mlp.project is not None | 5,355,793 |
def _combine_keras_model_with_trill(embedding_tfhub_handle, aggregating_model):
"""Combines keras model with TRILL model."""
trill_layer = hub.KerasLayer(
handle=embedding_tfhub_handle,
trainable=False,
arguments={'sample_rate': 16000},
output_key='embedding',
output_shape=[None, 2048]
)
input1 = tf.keras.Input([None])
trill_output = trill_layer(input1)
final_out = aggregating_model(trill_output)
final_model = tf.keras.Model(
inputs=input1,
outputs=final_out)
return final_model | 5,355,794 |
def phases(times, names=[]):
""" Creates named phases from a set of times defining the edges of hte intervals """
if not names: names = range(len(times)-1)
return {names[i]:[times[i], times[i+1]] for (i, _) in enumerate(times) if i < len(times)-1} | 5,355,795 |
def deploy():
"""
定义一个部署任务
:return:
"""
# 先进行打包
pack()
# 备份服务器上的版本
backup()
# 远程服务器的临时文件
remote_tmp_tar = '/tmp/%s' % TAR_FILE_NAME
run('rm -f %s' % remote_tmp_tar)
# 上传tar文件至远程服务器
put(TAR_FILE_NAME, remote_tmp_tar)
remote_dist_base_dir = '/home/python'
# 如果不存在, 则创建文件夹
run('mkdir -p %s' % remote_dist_base_dir)
with cd(remote_dist_base_dir):
print('解压文件到到目录: %s' % remote_dist_base_dir)
run('tar -xzvf %s' % remote_tmp_tar)
remote_dist_dir = '%s/%s' % (remote_dist_base_dir, PROJECT_NAME)
name = PROJECT_NAME
requirements_file = 'requirements.txt'
print('上传 requirements.txt 文件 %s' % requirements_file)
put(requirements_file, '%s/%s' % (remote_dist_dir, requirements_file))
with cd(remote_dist_dir):
print('解压文件到到目录: %s' % remote_dist_dir)
run('tar -xzvf %s' % remote_tmp_tar)
print('安装 requirements.txt 中的依赖包')
run('pip install -r requirements.txt')
remote_settings_file = '%s/settings.py' % remote_dist_dir
settings_file = './%s/deploy/settings.py' % PROJECT_NAME
print('上传 settings.py 文件 %s' % settings_file)
put(settings_file, remote_settings_file)
# 创建日志文件夹, 因为当前启动 django 进程用的是 nobody, 会没有权限
remote_logs_path = '%s/logs' % remote_dist_dir
# 如果不存在, 则创建文件夹
run('mkdir -p %s' % remote_logs_path)
nginx_file = './%s/deploy/%s.conf' % (PROJECT_NAME, name)
remote_nginx_file = '/etc/nginx/conf.d/%s.conf' % name
print('上传 nginx 配置文件 %s' % nginx_file)
put(nginx_file, remote_nginx_file)
print('设置文件夹权限')
run('chown -R oxygen /home/python/fomalhaut')
supervisor_file = './%s/deploy/%s.ini' % (PROJECT_NAME, name)
remote_supervisor_file = '/etc/supervisord.d/%s.ini' % name
print('上传 supervisor 配置文件 %s' % supervisor_file)
put(supervisor_file, remote_supervisor_file)
run('supervisorctl reload')
run('nginx -s reload')
run('nginx -t')
# run('service nginx restart')
# 删除本地的打包文件
local('rm -f %s' % TAR_FILE_NAME)
# frabic 运行 supervisorctl restart all 会提示有错误, 并中止往下运行
# run('supervisorctl restart all') | 5,355,796 |
def smesolve(H, rho0, times, c_ops=[], sc_ops=[], e_ops=[],
_safe_mode=True, args={}, **kwargs):
"""
Solve stochastic master equation. Dispatch to specific solvers
depending on the value of the `solver` keyword argument.
Parameters
----------
H : :class:`qutip.Qobj`, or time dependent system.
System Hamiltonian.
Can depend on time, see StochasticSolverOptions help for format.
rho0 : :class:`qutip.Qobj`
Initial density matrix or state vector (ket).
times : *list* / *array*
List of times for :math:`t`. Must be uniformly spaced.
c_ops : list of :class:`qutip.Qobj`, or time dependent Qobjs.
Deterministic collapse operator which will contribute with a standard
Lindblad type of dissipation.
Can depend on time, see StochasticSolverOptions help for format.
sc_ops : list of :class:`qutip.Qobj`, or time dependent Qobjs.
List of stochastic collapse operators. Each stochastic collapse
operator will give a deterministic and stochastic contribution
to the eqaution of motion according to how the d1 and d2 functions
are defined.
Can depend on time, see StochasticSolverOptions help for format.
e_ops : list of :class:`qutip.Qobj`
single operator or list of operators for which to evaluate
expectation values.
kwargs : *dictionary*
Optional keyword arguments. See
:class:`qutip.stochastic.StochasticSolverOptions`.
Returns
-------
output: :class:`qutip.solver.Result`
An instance of the class :class:`qutip.solver.Result`.
"""
if "method" in kwargs and kwargs["method"] == "photocurrent":
print("stochastic solver with photocurrent method has been moved to "
"it's own function: photocurrent_mesolve")
return photocurrent_mesolve(H, rho0, times, c_ops=c_ops, sc_ops=sc_ops,
e_ops=e_ops, _safe_mode=_safe_mode,
args=args, **kwargs)
if isket(rho0):
rho0 = ket2dm(rho0)
if isinstance(e_ops, dict):
e_ops_dict = e_ops
e_ops = [e for e in e_ops.values()]
else:
e_ops_dict = None
sso = StochasticSolverOptions(True, H=H, state0=rho0, times=times,
c_ops=c_ops, sc_ops=sc_ops, e_ops=e_ops,
args=args, **kwargs)
if _safe_mode:
_safety_checks(sso)
if sso.solver_code == 120:
return _positive_map(sso, e_ops_dict)
sso.LH = liouvillian(sso.H, c_ops=sso.sc_ops + sso.c_ops) * sso.dt
if sso.method == 'homodyne' or sso.method is None:
if sso.m_ops is None:
sso.m_ops = [op + op.dag() for op in sso.sc_ops]
sso.sops = [spre(op) + spost(op.dag()) for op in sso.sc_ops]
if not isinstance(sso.dW_factors, list):
sso.dW_factors = [1] * len(sso.m_ops)
elif len(sso.dW_factors) != len(sso.m_ops):
raise Exception("The len of dW_factors is not the same as m_ops")
elif sso.method == 'heterodyne':
if sso.m_ops is None:
m_ops = []
sso.sops = []
for c in sso.sc_ops:
if sso.m_ops is None:
m_ops += [c + c.dag(), -1j * c - c.dag()]
sso.sops += [(spre(c) + spost(c.dag())) / np.sqrt(2),
(spre(c) - spost(c.dag())) * -1j / np.sqrt(2)]
sso.m_ops = m_ops
if not isinstance(sso.dW_factors, list):
sso.dW_factors = [np.sqrt(2)] * len(sso.sops)
elif len(sso.dW_factors) == len(sso.m_ops):
pass
elif len(sso.dW_factors) == len(sso.sc_ops):
dW_factors = []
for fact in sso.dW_factors:
dW_factors += [np.sqrt(2) * fact, np.sqrt(2) * fact]
sso.dW_factors = dW_factors
elif len(sso.dW_factors) != len(sso.m_ops):
raise Exception("The len of dW_factors is not the same as sc_ops")
elif sso.method == "photocurrent":
raise NotImplementedError("Moved to 'photocurrent_mesolve'")
else:
raise Exception("The method must be one of None, homodyne, heterodyne")
sso.ce_ops = [QobjEvo(spre(op)) for op in sso.e_ops]
sso.cm_ops = [QobjEvo(spre(op)) for op in sso.m_ops]
sso.LH.compile()
[op.compile() for op in sso.sops]
[op.compile() for op in sso.cm_ops]
[op.compile() for op in sso.ce_ops]
if sso.solver_code in [103, 153]:
sso.imp = 1 - sso.LH * 0.5
sso.imp.compile()
sso.solver_obj = SMESolver
sso.solver_name = "smesolve_" + sso.solver
res = _sesolve_generic(sso, sso.options, sso.progress_bar)
if e_ops_dict:
res.expect = {e: res.expect[n]
for n, e in enumerate(e_ops_dict.keys())}
return res | 5,355,797 |
def harmonic_vector(n):
"""
create a vector in the form [1,1/2,1/3,...1/n]
"""
return np.array([[1.0 / i] for i in range(1, n + 1)], dtype='double') | 5,355,798 |
def session(connection):
"""
Create a transaction and session per test unit.
Rolling back a transaction removes even committed rows
(``session.commit``) from the database.
"""
transaction = connection.begin()
session = Session(bind=connection)
yield session
session.close()
transaction.rollback() | 5,355,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.