hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
794030efec87cc222af29be4be72489c696fac20 | 1,873 | py | Python | test/test_delete_contact_from_group.py | Paulss20/python_training | d0e48f453d08f29e1a0f0bfdcee2fb3aecdc5a82 | [
"Apache-2.0"
] | null | null | null | test/test_delete_contact_from_group.py | Paulss20/python_training | d0e48f453d08f29e1a0f0bfdcee2fb3aecdc5a82 | [
"Apache-2.0"
] | null | null | null | test/test_delete_contact_from_group.py | Paulss20/python_training | d0e48f453d08f29e1a0f0bfdcee2fb3aecdc5a82 | [
"Apache-2.0"
] | null | null | null |
from fixture.orm import ORMFixture
from model.group import Group
from model.add_new import AddNew
orm = ORMFixture(host="127.0.0.1", name="addressbook", user="root", password="")
def test_delete_contact_from_group(app, db):
# собираем списки групп и контактов из БД, если они пусты, добавляем группу и контакт
groups = db.get_group_list()
contacts = db.get_contact_list()
if len(groups) == 0:
app.group.create(Group(name="group_to_delete"))
if len(contacts) == 0:
app.contacts.create_contact(AddNew(my_f_name="contact_to_delete"))
# собираем список контактов, присутствующих в таблице связи групп и контактов через orm, если таковых нет, то создаем и добавляем
old_dealed_contacts = orm.get_dealed_contacts()
# если нет занятого контакта, то создаем контакт и группу и связываем их
if len(old_dealed_contacts) == 0 :
dealed_group = app.group.create(Group(name="group_to_delete"))
dealed_contact = app.contacts.create_contact(AddNew(my_f_name="contact_to_delete"))
# app.contacts.add_contact_to_group(old_free_contacts[0], old_free_groups[0])
app.contacts.add_contact_to_group( dealed_contact, dealed_group)
old_dealed_contacts = orm.get_dealed_contacts()
dealed_contact = old_dealed_contacts[0]
dealed_group = orm.get_groups_of_contact(dealed_contact)[0]
app.contacts.remove_contact_from_group(dealed_contact, dealed_group)
print("Contact with id %s from the group with id %s successfully deleted" % (dealed_contact, dealed_group))
# собираем списки групп и контактов, отсутствующих в таблице связи групп и контактов через orm, после добавления
new_dealed_contacts = orm.get_dealed_contacts()
# проверяем, что список свободных контактов изменился на 1
assert len(old_dealed_contacts) == len(new_dealed_contacts) + 1 | 48.025641 | 134 | 0.741591 |
794031236fbbe097d28e7a40779b9eee35ef4fe4 | 10,918 | py | Python | examples/inspection/plot_partial_dependence.py | matiasrvazquez/scikit-learn | e821a9e8a0d4ef63b1219faf9ab902ad0fd4b181 | [
"BSD-3-Clause"
] | 2 | 2017-11-22T08:20:15.000Z | 2017-11-22T08:23:14.000Z | examples/inspection/plot_partial_dependence.py | matiasrvazquez/scikit-learn | e821a9e8a0d4ef63b1219faf9ab902ad0fd4b181 | [
"BSD-3-Clause"
] | 1 | 2022-03-06T18:49:03.000Z | 2022-03-06T18:49:03.000Z | examples/inspection/plot_partial_dependence.py | matiasrvazquez/scikit-learn | e821a9e8a0d4ef63b1219faf9ab902ad0fd4b181 | [
"BSD-3-Clause"
] | null | null | null | """
===============================================================
Partial Dependence and Individual Conditional Expectation Plots
===============================================================
Partial dependence plots show the dependence between the target function [2]_
and a set of features of interest, marginalizing over the values of all other
features (the complement features). Due to the limits of human perception, the
size of the set of features of interest must be small (usually, one or two)
thus they are usually chosen among the most important features.
Similarly, an individual conditional expectation (ICE) plot [3]_
shows the dependence between the target function and a feature of interest.
However, unlike partial dependence plots, which show the average effect of the
features of interest, ICE plots visualize the dependence of the prediction on a
feature for each :term:`sample` separately, with one line per sample.
Only one feature of interest is supported for ICE plots.
This example shows how to obtain partial dependence and ICE plots from a
:class:`~sklearn.neural_network.MLPRegressor` and a
:class:`~sklearn.ensemble.HistGradientBoostingRegressor` trained on the
California housing dataset. The example is taken from [1]_.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
.. [2] For classification you can think of it as the regression score before
the link function.
.. [3] :arxiv:`Goldstein, A., Kapelner, A., Bleich, J., and Pitkin, E. (2015).
"Peeking Inside the Black Box: Visualizing Statistical Learning With Plots of
Individual Conditional Expectation". Journal of Computational and
Graphical Statistics, 24(1): 44-65 <1309.6392>`
"""
# %%
# California Housing data preprocessing
# -------------------------------------
#
# Center target to avoid gradient boosting init bias: gradient boosting
# with the 'recursion' method does not account for the initial estimator
# (here the average target, by default).
import pandas as pd
from sklearn.datasets import fetch_california_housing
from sklearn.model_selection import train_test_split
cal_housing = fetch_california_housing()
X = pd.DataFrame(cal_housing.data, columns=cal_housing.feature_names)
y = cal_housing.target
y -= y.mean()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=0)
# %%
# 1-way partial dependence with different models
# ----------------------------------------------
#
# In this section, we will compute 1-way partial dependence with two different
# machine-learning models: (i) a multi-layer perceptron and (ii) a
# gradient-boosting. With these two models, we illustrate how to compute and
# interpret both partial dependence plot (PDP) and individual conditional
# expectation (ICE).
#
# Multi-layer perceptron
# ......................
#
# Let's fit a :class:`~sklearn.neural_network.MLPRegressor` and compute
# single-variable partial dependence plots.
from time import time
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import QuantileTransformer
from sklearn.neural_network import MLPRegressor
print("Training MLPRegressor...")
tic = time()
est = make_pipeline(
QuantileTransformer(),
MLPRegressor(
hidden_layer_sizes=(30, 15),
learning_rate_init=0.01,
early_stopping=True,
random_state=0,
),
)
est.fit(X_train, y_train)
print(f"done in {time() - tic:.3f}s")
print(f"Test R2 score: {est.score(X_test, y_test):.2f}")
# %%
# We configured a pipeline to scale the numerical input features and tuned the
# neural network size and learning rate to get a reasonable compromise between
# training time and predictive performance on a test set.
#
# Importantly, this tabular dataset has very different dynamic ranges for its
# features. Neural networks tend to be very sensitive to features with varying
# scales and forgetting to preprocess the numeric feature would lead to a very
# poor model.
#
# It would be possible to get even higher predictive performance with a larger
# neural network but the training would also be significantly more expensive.
#
# Note that it is important to check that the model is accurate enough on a
# test set before plotting the partial dependence since there would be little
# use in explaining the impact of a given feature on the prediction function of
# a poor model.
#
# We will plot the partial dependence, both individual (ICE) and averaged one
# (PDP). We limit to only 50 ICE curves to not overcrowd the plot.
import matplotlib.pyplot as plt
from sklearn.inspection import partial_dependence
from sklearn.inspection import PartialDependenceDisplay
print("Computing partial dependence plots...")
tic = time()
features = ["MedInc", "AveOccup", "HouseAge", "AveRooms"]
display = PartialDependenceDisplay.from_estimator(
est,
X_train,
features,
kind="both",
subsample=50,
n_jobs=3,
grid_resolution=20,
random_state=0,
ice_lines_kw={"color": "tab:blue", "alpha": 0.2, "linewidth": 0.5},
pd_line_kw={"color": "tab:orange", "linestyle": "--"},
)
print(f"done in {time() - tic:.3f}s")
display.figure_.suptitle(
"Partial dependence of house value on non-location features\n"
"for the California housing dataset, with MLPRegressor"
)
display.figure_.subplots_adjust(hspace=0.3)
# %%
# Gradient boosting
# .................
#
# Let's now fit a :class:`~sklearn.ensemble.HistGradientBoostingRegressor` and
# compute the partial dependence on the same features.
from sklearn.ensemble import HistGradientBoostingRegressor
print("Training HistGradientBoostingRegressor...")
tic = time()
est = HistGradientBoostingRegressor(random_state=0)
est.fit(X_train, y_train)
print(f"done in {time() - tic:.3f}s")
print(f"Test R2 score: {est.score(X_test, y_test):.2f}")
# %%
# Here, we used the default hyperparameters for the gradient boosting model
# without any preprocessing as tree-based models are naturally robust to
# monotonic transformations of numerical features.
#
# Note that on this tabular dataset, Gradient Boosting Machines are both
# significantly faster to train and more accurate than neural networks. It is
# also significantly cheaper to tune their hyperparameters (the defaults tend
# to work well while this is not often the case for neural networks).
#
# We will plot the partial dependence, both individual (ICE) and averaged one
# (PDP). We limit to only 50 ICE curves to not overcrowd the plot.
print("Computing partial dependence plots...")
tic = time()
display = PartialDependenceDisplay.from_estimator(
est,
X_train,
features,
kind="both",
subsample=50,
n_jobs=3,
grid_resolution=20,
random_state=0,
ice_lines_kw={"color": "tab:blue", "alpha": 0.2, "linewidth": 0.5},
pd_line_kw={"color": "tab:orange", "linestyle": "--"},
)
print(f"done in {time() - tic:.3f}s")
display.figure_.suptitle(
"Partial dependence of house value on non-location features\n"
"for the California housing dataset, with Gradient Boosting"
)
display.figure_.subplots_adjust(wspace=0.4, hspace=0.3)
# %%
# Analysis of the plots
# .....................
#
# We can clearly see on the PDPs (dashed orange line) that the median house price
# shows a linear relationship with the median income (top left) and that the
# house price drops when the average occupants per household increases (top
# middle). The top right plot shows that the house age in a district does not
# have a strong influence on the (median) house price; so does the average
# rooms per household.
#
# The ICE curves (light blue lines) complement the analysis: we can see that
# there are some exceptions, where the house price remain constant with median
# income and average occupants. On the other hand, while the house age (top
# right) does not have a strong influence on the median house price on average,
# there seems to be a number of exceptions where the house price increase when
# between the ages 15-25. Similar exceptions can be observed for the average
# number of rooms (bottom left). Therefore, ICE plots show some individual
# effect which are attenuated by taking the averages.
#
# In all plots, the tick marks on the x-axis represent the deciles of the
# feature values in the training data.
#
# We also observe that :class:`~sklearn.neural_network.MLPRegressor` has much
# smoother predictions than
# :class:`~sklearn.ensemble.HistGradientBoostingRegressor`.
#
# However, it is worth noting that we are creating potential meaningless
# synthetic samples if features are correlated.
# %%
# 2D interaction plots
# --------------------
#
# PDPs with two features of interest enable us to visualize interactions among
# them. However, ICEs cannot be plotted in an easy manner and thus interpreted.
# Another consideration is linked to the performance to compute the PDPs. With
# the tree-based algorithm, when only PDPs are requested, they can be computed
# on an efficient way using the `'recursion'` method.
features = ["AveOccup", "HouseAge", ("AveOccup", "HouseAge")]
print("Computing partial dependence plots...")
tic = time()
_, ax = plt.subplots(ncols=3, figsize=(9, 4))
display = PartialDependenceDisplay.from_estimator(
est,
X_train,
features,
kind="average",
n_jobs=2,
grid_resolution=10,
ax=ax,
)
print(f"done in {time() - tic:.3f}s")
display.figure_.suptitle(
"Partial dependence of house value on non-location features\n"
"for the California housing dataset, with Gradient Boosting"
)
display.figure_.subplots_adjust(wspace=0.4, hspace=0.3)
# %%
# The two-way partial dependence plot shows the dependence of median house
# price on joint values of house age and average occupants per household. We
# can clearly see an interaction between the two features: for an average
# occupancy greater than two, the house price is nearly independent of the
# house age, whereas for values less than two there is a strong dependence on
# age.
#
# 3D interaction plots
# --------------------
#
# Let's make the same partial dependence plot for the 2 features interaction,
# this time in 3 dimensions.
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
features = ("AveOccup", "HouseAge")
pdp = partial_dependence(
est, X_train, features=features, kind="average", grid_resolution=10
)
XX, YY = np.meshgrid(pdp["values"][0], pdp["values"][1])
Z = pdp.average[0].T
ax = Axes3D(fig)
fig.add_axes(ax)
surf = ax.plot_surface(XX, YY, Z, rstride=1, cstride=1, cmap=plt.cm.BuPu, edgecolor="k")
ax.set_xlabel(features[0])
ax.set_ylabel(features[1])
ax.set_zlabel("Partial dependence")
# pretty init view
ax.view_init(elev=22, azim=122)
plt.colorbar(surf)
plt.suptitle(
"Partial dependence of house value on median\n"
"age and average occupancy, with Gradient Boosting"
)
plt.subplots_adjust(top=0.9)
plt.show()
| 37.5189 | 88 | 0.731453 |
7940317e97d8a06828234ba63a188f8c02f59386 | 3,212 | py | Python | eden/integration/hg/merge_test.py | jmswen/eden | 5e0b051703fa946cc77fc43004435ae6b20599a1 | [
"BSD-3-Clause"
] | null | null | null | eden/integration/hg/merge_test.py | jmswen/eden | 5e0b051703fa946cc77fc43004435ae6b20599a1 | [
"BSD-3-Clause"
] | null | null | null | eden/integration/hg/merge_test.py | jmswen/eden | 5e0b051703fa946cc77fc43004435ae6b20599a1 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
#
# Copyright (c) 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from eden.integration.lib.hgrepo import HgRepository
from .lib.hg_extension_test_base import EdenHgTestCase, hg_test
@hg_test
class MergeTest(EdenHgTestCase):
"""Note that Mercurial has a number of built-in merge tools:
https://www.mercurial-scm.org/repo/hg/help/merge-tools
"""
commit0: str
commit1: str
commit2: str
def populate_backing_repo(self, repo: HgRepository) -> None:
repo.write_file("foo", "original")
self.commit0 = repo.commit("root commit")
repo.write_file("foo", "1")
self.commit1 = repo.commit("commit1")
repo.update(self.commit0)
repo.write_file("foo", "2")
self.commit2 = repo.commit("commit2")
def test_merge_local(self) -> None:
self._do_merge_and_commit(":local")
self._verify_tip("2")
def test_merge_other(self) -> None:
self._do_merge_and_commit(":other")
self._verify_tip("1")
def test_merge_union(self) -> None:
self._do_merge_and_commit(":union")
self._verify_tip("21")
def _do_merge_and_commit(self, tool: str) -> None:
self.hg("merge", "--tool", tool, "-r", self.commit1)
self.assert_status({"foo": "M"}, op="merge")
self.repo.commit("merge commit1 into commit2")
self.assert_status_empty()
def test_resolve_merge(self) -> None:
# Perform the merge and let it fail with the file unresolved
self.hg("merge", "--tool", ":fail", "-r", self.commit1, check=False)
self.assert_status({"foo": "M"}, op="merge")
self.assert_unresolved(["foo"])
self.write_file("foo", "3")
self.hg("resolve", "--mark", "foo")
self.assert_unresolved(unresolved=[], resolved=["foo"])
self.assert_status({"foo": "M"}, op="merge")
self.repo.commit("merge commit1 into commit2")
self._verify_tip("3")
def test_clear_merge_state(self) -> None:
# Perform the merge and let it fail with the file unresolved
self.hg("merge", "--tool", ":fail", "-r", self.commit1, check=False)
self.assert_status({"foo": "M"}, op="merge")
self.assert_unresolved(["foo"])
# "hg update --clean ." should reset is back to a clean state
# with no outstanding merge conflicts.
self.hg("update", "--clean", ".")
self.assertEqual(self.commit2, self.repo.get_head_hash())
self.assert_status_empty()
self.assert_unresolved([])
def _verify_tip(self, expected_contents: str) -> None:
files = self.repo.log(template="{files}", revset="tip")[0]
self.assertEqual("foo", files)
p1, p2 = self.repo.log(template="{p1node}\n{p2node}", revset="tip")[0].split(
"\n"
)
self.assertEqual(self.commit2, p1)
self.assertEqual(self.commit1, p2)
self.assertEqual(expected_contents, self.read_file("foo"))
| 35.688889 | 85 | 0.633873 |
794031ac86770f20e8843d6c84ced4dc2c237b93 | 422 | py | Python | migrations/0006_auto_20161019_2000.py | tobiasbartel/servicium-contact_manager | 551001bc20c52484707b56fd548605a6580beacf | [
"MIT"
] | null | null | null | migrations/0006_auto_20161019_2000.py | tobiasbartel/servicium-contact_manager | 551001bc20c52484707b56fd548605a6580beacf | [
"MIT"
] | null | null | null | migrations/0006_auto_20161019_2000.py | tobiasbartel/servicium-contact_manager | 551001bc20c52484707b56fd548605a6580beacf | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-19 20:00
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('contact_manager', '0005_contact_is_memeber_of'),
]
operations = [
migrations.AlterModelOptions(
name='contact',
options={'ordering': ['name']},
),
]
| 21.1 | 58 | 0.620853 |
794032b12f8c5385df51cfb54f2b117a2af98ccd | 6,716 | py | Python | holidays/countries/spain.py | m-ganko/python-holidays | f0d5a91f8bee8661ef3440bde2302332a364877e | [
"MIT"
] | null | null | null | holidays/countries/spain.py | m-ganko/python-holidays | f0d5a91f8bee8661ef3440bde2302332a364877e | [
"MIT"
] | 1 | 2021-06-08T14:40:55.000Z | 2021-06-08T14:40:55.000Z | holidays/countries/spain.py | m-ganko/python-holidays | f0d5a91f8bee8661ef3440bde2302332a364877e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Author: ryanss <[email protected]> (c) 2014-2017
# dr-prodigy <[email protected]> (c) 2017-2021
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
from datetime import date
from dateutil.easter import easter
from dateutil.relativedelta import relativedelta as rd, TH, FR, MO
from holidays.constants import (
JAN,
FEB,
MAR,
APR,
MAY,
JUN,
JUL,
AUG,
SEP,
OCT,
NOV,
DEC,
)
from holidays.constants import SUN
from holidays.holiday_base import HolidayBase
class Spain(HolidayBase):
PROVINCES = [
"AN",
"AR",
"AS",
"CB",
"CM",
"CL",
"CT",
"VC",
"EX",
"GA",
"IB",
"CN",
"MD",
"MC",
"ML",
"NC",
"PV",
"RI",
]
def __init__(self, **kwargs):
self.country = "ES"
self.prov = kwargs.pop("prov", kwargs.pop("state", ""))
HolidayBase.__init__(self, **kwargs)
def _is_observed(self, date_holiday, name_holiday):
if self.observed and date_holiday.weekday() == SUN:
self[date_holiday + rd(days=+1)] = name_holiday + " (Trasladado)"
else:
self[date_holiday] = name_holiday
def _populate(self, year):
self._is_observed(date(year, JAN, 1), "Año nuevo")
self._is_observed(date(year, JAN, 6), "Epifanía del Señor")
if (
year < 2015
and self.prov
and self.prov
in [
"AR",
"CL",
"CM",
"EX",
"GA",
"MD",
"ML",
"MC",
"NC",
"PV",
"VC",
]
):
self._is_observed(date(year, MAR, 19), "San José")
elif (
year == 2015
and self.prov
and self.prov in ["CM", "MD", "ML", "MC", "NC", "PV", "VC"]
):
self._is_observed(date(year, MAR, 19), "San José")
elif (
year == 2016
and self.prov
and self.prov in ["ML", "MC", "PV", "VC"]
):
self._is_observed(date(year, MAR, 19), "San José")
elif year == 2017 and self.prov and self.prov in ["PV"]:
self._is_observed(date(year, MAR, 19), "San José")
elif (
2018 <= year <= 2019
and self.prov
and self.prov in ["GA", "MC", "NC", "PV", "VC"]
):
self._is_observed(date(year, MAR, 19), "San José")
elif (
2020 <= year <= 2025
and self.prov
and self.prov in ["CM", "GA", "MC", "NC", "PV", "VC"]
):
self._is_observed(date(year, MAR, 19), "San José")
if self.prov and self.prov not in ["CT", "VC"]:
self[easter(year) + rd(weeks=-1, weekday=TH)] = "Jueves Santo"
self[easter(year) + rd(weeks=-1, weekday=FR)] = "Viernes Santo"
if self.prov and self.prov in ["CT", "PV", "NC", "VC", "IB", "CM"]:
self[easter(year) + rd(weekday=MO)] = "Lunes de Pascua"
self._is_observed(date(year, MAY, 1), "Día del Trabajador")
if self.prov and self.prov in ["CT", "GA", "VC"]:
self._is_observed(date(year, JUN, 24), "San Juan")
self._is_observed(date(year, AUG, 15), "Asunción de la Virgen")
self._is_observed(date(year, OCT, 12), "Día de la Hispanidad")
self._is_observed(date(year, NOV, 1), "Todos los Santos")
self._is_observed(
date(year, DEC, 6), "Día de la Constitución " "Española"
)
self._is_observed(date(year, DEC, 8), "La Inmaculada Concepción")
self._is_observed(date(year, DEC, 25), "Navidad")
if self.prov and self.prov in ["CT", "IB"]:
self._is_observed(date(year, DEC, 26), "San Esteban")
# Provinces festive day
if self.prov:
if self.prov == "AN":
self._is_observed(date(year, FEB, 28), "Día de Andalucia")
elif self.prov == "AR":
self._is_observed(date(year, APR, 23), "Día de San Jorge")
elif self.prov == "AS":
self._is_observed(date(year, SEP, 8), "Día de Asturias")
elif self.prov == "CB":
self._is_observed(
date(year, JUL, 28),
"Día de las Instituci" "ones de Cantabria",
)
elif self.prov == "CM":
self._is_observed(
date(year, MAY, 31), "Día de Castilla " "La Mancha"
)
elif self.prov == "CL":
self._is_observed(
date(year, APR, 23), "Día de Castilla y " "Leon"
)
elif self.prov == "CT":
self._is_observed(
date(year, SEP, 11), "Día Nacional de " "Catalunya"
)
elif self.prov == "VC":
self._is_observed(
date(year, OCT, 9), "Día de la Comunidad " "Valenciana"
)
elif self.prov == "EX":
self._is_observed(date(year, SEP, 8), "Día de Extremadura")
elif self.prov == "GA":
self._is_observed(
date(year, JUL, 25), "Día Nacional de " "Galicia"
)
elif self.prov == "IB":
self._is_observed(
date(year, MAR, 1), "Día de las Islas " "Baleares"
)
elif self.prov == "CN":
self._is_observed(date(year, MAY, 30), "Día de Canarias")
elif self.prov == "MD":
self._is_observed(
date(year, MAY, 2), "Día de Comunidad de " "Madrid"
)
elif self.prov == "MC":
self._is_observed(
date(year, JUN, 9), "Día de la Región de " "Murcia"
)
elif self.prov == "NC":
self._is_observed(date(year, SEP, 27), "Día de Navarra")
elif self.prov == "PV":
self._is_observed(date(year, OCT, 25), "Día del Páis Vasco")
elif self.prov == "RI":
self._is_observed(date(year, JUN, 9), "Día de La Rioja")
class ES(Spain):
pass
class ESP(Spain):
pass
| 33.748744 | 78 | 0.483472 |
7940335539e067bab6066f686a64ed8e547342ce | 9,464 | py | Python | events.py | adrian-stephens/schedule | 56002951155b44aee9989393a088c20dd56d7f03 | [
"MIT"
] | null | null | null | events.py | adrian-stephens/schedule | 56002951155b44aee9989393a088c20dd56d7f03 | [
"MIT"
] | null | null | null | events.py | adrian-stephens/schedule | 56002951155b44aee9989393a088c20dd56d7f03 | [
"MIT"
] | null | null | null | # Events
# Class definitions for events
from datetime import timedelta, datetime
# Base Event type
class Event(object):
"""
Base class for all meeting events. Times and dates are in the meeting locale.
"""
def __init__(self,settings,startDateTime,endDateTime,summary,location,inIMAT,group):
self.sessionDateTime = settings.sessionDateTime
self.startDateTime = startDateTime # In the local meeting timezone
self.endDateTime = endDateTime # In the local meeting timezone
self.summary = summary
self.deleted = False
self.timeZoneOffset = settings.timeZoneOffset
if location != None:
self.location = location
else:
self.location = ''
self.inIMAT = inIMAT # Event is present in IMAT
self.group = group # name of group hosting breakout
def endsBefore(self, endDateTime):
return self.endDateTime < endDateTime
def startDateTimeUTC(self):
return self.startDateTime - timedelta(hours=self.timeZoneOffset)
def endDateTimeUTC(self):
return self.endDateTime - timedelta(hours=self.timeZoneOffset)
def __repr__(self):
s = "Event %s-%s '%s' " % (self.startDateTime, self.endDateTime, self.summary)
if len(self.location) > 0:
s += "in %s " % (self.location,)
return s
def shortStr(self):
"""
Return a short string to identify the meeting uniquely
"""
return "%s %s '%s'" % (self.startDateTime.strftime("%Y-%m-%d %a"), self.startDateTime.time(), self.summary)
# Equality is determined by a matching start and summary
def __eq__(self,obj):
return (self.startDateTime == obj.startDateTime) and (self.summary == obj.summary) and (self.deleted == obj.deleted)
def __ne__(self,obj):
return not self == obj
def changed(self,obj):
"""
Determine if self and obj have changed
"""
if self.startDateTime != obj.startDateTime:
return True
if self.endDateTime != obj.endDateTime:
return True
if self.summary != obj.summary:
return True
if (len(self.location) > 0) and (len(obj.location) > 0) and (self.location != obj.location):
return True
# if self.deleted and not obj.deleted:
# return True
if self.deleted != obj.deleted:
return True
return False
def diff(self,obj):
"""
Returns a string describing differences between two objects
"""
s = ''
if self.startDateTime != obj.startDateTime:
s += '%s start changed: to %s from %s. ' % (self.shortStr(), self.startDateTime, obj.startDateTime)
if self.endDateTime != obj.endDateTime:
s += '%s end changed: to %s from %s. ' % (self.shortStr(), self.endDateTime, obj.endDateTime)
if self.summary != obj.summary:
s += '%s summary changed: to %s from %s. ' % (self.shortStr(), self.summary, obj.summary)
if (len(self.location) > 0) and (len(obj.location) > 0) and (self.location != obj.location):
s += '%s location changed: to %s from %s. ' % (self.shortStr(), self.location, obj.location)
# if self.deleted != obj.deleted:
# s += '%s deleted changed: to %s from %s. ' % (self.shortStr(), 'now marked deleted')
if self.deleted != obj.deleted:
s += '%s deleted changed: %s. ' % (self.shortStr(), 'now marked deleted' if self.deleted else 'deletion marker removed')
if len(s) > 0:
s += '\n'
return s
# Return day index of start time
def dayIndex(self):
td = self.startDateTime - self.sessionDateTime
return td.days
class SlottedEvent(Event):
"""
Class for events that have been assigned start and end slots
"""
def __init__(self,settings,startDateTime,endDateTime,summary,location,startSlot,endSlot,inIMAT,group):
super(SlottedEvent, self).__init__(settings,startDateTime,endDateTime,summary,location,inIMAT,group)
self.startSlot = startSlot
if endSlot != None:
self.endSlot = endSlot
else:
self.endSlot = startSlot
def __repr__(self):
return super(SlottedEvent, self).__repr__() + " %s-%s" % (self.startSlot, self.endSlot)
# Equality is determined by a matching start date and slot and summary
def __eq__(self,obj):
return (self.dayIndex() == obj.dayIndex()) and \
(self.startSlot == obj.startSlot) and \
(self.summary == obj.summary)
class ImatEvent(SlottedEvent):
"""
Class to hold all information in an IMAT Event. Adds IMAT accounting data to slotted event.
"""
def __init__(self,settings,startDateTime,endDateTime,summary,location,startSlot,endSlot,group,credit,edit):
super(ImatEvent, self).__init__(settings,startDateTime,endDateTime,summary,location,startSlot,endSlot,True,'')
self.group = group
self.credit = credit
self.edit = edit
self.numerator = '0'
self.denominator = '0'
def __repr__(self):
return super(ImatEvent, self).__repr__() + " %s %s (%s/%s)" % (self.group, self.credit, self.numerator, self.denominator)
def creditChanged(self,obj):
"""
Indicate if a significant change has been made to credit
"""
if self.credit != obj.credit:
# We generally don't enforce our automated view of credit. It it's
# been changed on IMAT, that is presumably for a good reason. However
# The closing plenary has to be progressed automatically from "Normal" to
# "Other" as the dialog on which the meeting was initially created doesn't
# support "Other"
if self.credit == 'Other' and obj.credit == 'Normal':
return True
return False
def changed(self,obj):
"""
Determine if self and obj have changed
"""
if self.creditChanged(obj):
return True
return super(ImatEvent, self).changed(obj)
def diff(self,obj):
"""
Returns a string describing differences between two objects
"""
s = super(ImatEvent, self).diff(obj)
if self.creditChanged(obj):
s += '%s credit changed: to %s from %s.\n' % (self.shortStr(), self.credit, obj.credit)
return s
def compareEventLists(l1,n1,l2,n2,isImat,updatePast):
"""
Compare two event lists l1 and l2 called n1 and n2, conditionally ignoring any events that ended in the past.
When one of the lists is on IMAT, it is l2.
isImat indicates whether this is testing for imat events only
updatePast indicates whether past events should be updated
Returns:
a list events only in l1
a list [l1, l2] tuples of those changed
a list of events only in l2
A string describing the changes that will be made to resolve any differences. This string
assumes that l1 is the "new" state and l2 the "old" state, and that changes will be made
to match the new state.
"""
# Current time in UTC
now = datetime.utcnow()
onlyInL1 = []
onlyInL2 = []
changed = []
s = ''
for e1 in l1:
# Ignore events that start in the past
# if (e1.startDateTimeUTC() <= now) and not updatePast:
# continue
# Ignore events that end in the past
if (e1.endDateTimeUTC() <= now) and not updatePast:
continue
if isImat and not e1.inIMAT:
continue
# Ignore events marked deleted
if e1.deleted:
continue
found = False
for e2 in l2:
if e1 == e2:
if e1.changed(e2):
changed.append((e1, e2))
s += e1.diff(e2)
found = True
break
if not found:
onlyInL1.append(e1)
s += "%s: New in %s\n" % (e1.shortStr(), n1)
for e2 in l2:
# Ignore events that start in the past
# if (e2.startDateTimeUTC() <= now) and not updatePast:
# continue
# Ignore events that end in the past
if (e2.endDateTimeUTC() <= now) and not updatePast:
continue
if isImat and not e2.inIMAT:
continue
# Ignore events marked as deleted
if e2.deleted:
continue
found = False
for e1 in l1:
if e1 == e2:
found = True
break
if not found:
onlyInL2.append(e2)
s += "%s: Deleting item only in %s\n" % (e2.shortStr(), n2)
return (onlyInL1, changed, onlyInL2, s)
| 34.666667 | 133 | 0.554205 |
794034151dd14da0037d743587644428b28c1ec0 | 139 | py | Python | ex_12/urllib1.py | rovelee/py4e | 32125f5d62b6c7b6a56c8e1a250c1d81c6d54006 | [
"MIT"
] | null | null | null | ex_12/urllib1.py | rovelee/py4e | 32125f5d62b6c7b6a56c8e1a250c1d81c6d54006 | [
"MIT"
] | null | null | null | ex_12/urllib1.py | rovelee/py4e | 32125f5d62b6c7b6a56c8e1a250c1d81c6d54006 | [
"MIT"
] | null | null | null | import urllib.request
fhand = urllib.request.urlopen('http://data.pr4e.org/romeo.txt')
for line in fhand:
print(line.decode().strip()) | 27.8 | 64 | 0.726619 |
794034d6cae9cc20610ec8662190fd23750bf625 | 5,319 | py | Python | src/util/storage.py | cagatay/Evolution-9 | dee5b0d95d722706d5014595d4e7d18ef81929cd | [
"WTFPL"
] | 18 | 2015-02-12T17:37:17.000Z | 2021-12-19T02:47:25.000Z | src/util/storage.py | cagatay/Evolution-9 | dee5b0d95d722706d5014595d4e7d18ef81929cd | [
"WTFPL"
] | null | null | null | src/util/storage.py | cagatay/Evolution-9 | dee5b0d95d722706d5014595d4e7d18ef81929cd | [
"WTFPL"
] | 6 | 2015-12-26T15:54:58.000Z | 2021-02-17T19:30:24.000Z | '''
Created on Oct 11, 2010
@author: melih.karci
'''
from constants import SQLITE_FILE
import sqlite3
import json
class db:
_connection = None
def __init__(self):
self._connection = sqlite3.connect(SQLITE_FILE)
c = self._cursor
# Create neural networks table
c.execute('''
CREATE TABLE IF NOT EXISTS neural_networks(
name TEXT PRIMARY KEY ON CONFLICT REPLACE,
dataset TEXT,
trained BOOLEAN
);
''')
# Create evolutions table
c.execute('''
CREATE TABLE IF NOT EXISTS evolutions(
name TEXT PRIMARY KEY ON CONFLICT REPLACE,
population_size INTEGER,
generation_count INTEGER,
evaluator TEXT,
state TEXT,
FOREIGN KEY(evaluator) REFERENCES neural_networks(name)
);
''')
# create genomes table
c.execute('''
CREATE TABLE IF NOT EXISTS genomes(
name TEXT PRIMARY KEY ON CONFLICT REPLACE,
genome TEXT,
evolution TEXT,
generation INTEGER,
individual_id,
parent_1 TEXT,
parent_2 TEXT,
grade REAL,
status TEXT,
FOREIGN KEY(evolution) REFERENCES evolutions(name)
);
''')
self._commit()
c.close()
def _commit(self):
self._connection.commit()
@property
def _cursor(self):
return self._connection.cursor()
def close(self):
self._connection.close()
def save_evolution(self, name, population_size, evaluator, generation_count, state):
c = self._cursor
c.execute('''
INSERT INTO evolutions (name, evaluator, population_size, generation_count, state)
VALUES(?, ?, ?, ?, ?);
''', (name, evaluator, population_size, generation_count, state))
self._commit()
c.close()
return
#Returns evolution for the given row id
def get_evolution(self, name):
c = self._cursor
c.execute('''
SELECT * FROM evolutions WHERE name=?
''', [name])
result = c.fetchall()
c.close()
return result[0] if result else None
def get_evolution_list(self):
c = self._cursor
c.execute('''
SELECT name FROM evolutions
''')
result = c.fetchall()
c.close()
return result
def new_neural_network(self, name, dataset):
c = self._cursor
c.execute('''
INSERT INTO neural_networks (name, dataset, trained)
VALUES(?, ?, ?);
''', (name, json.dumps(dataset), 0))
self._commit()
c.close()
return
def save_neural_network(self, name, dataset, trained):
c = self._cursor
c.execute('''
UPDATE neural_networks SET name=?, trained=? WHERE name=?;
''', (name, int(trained), json.dumps(dataset)))
self._commit()
c.close()
return
def get_neural_network(self, name):
c = self._cursor
c.execute('''
SELECT dataset, trained from neural_networks WHERE name=?;
''', [name])
result = c.fetchall()[0]
c.close()
return json.loads(result[0]), bool(result[1])
def get_neural_network_list(self):
c = self._cursor
c.execute('''
SELECT name FROM neural_networks
''')
result = c.fetchall()
c.close()
return [x[0] for x in result]
def get_genomes(self, evolution, generation):
c = self._cursor
c.execute('''
SELECT * FROM genomes WHERE evolution=? AND generation=? AND status <> ? ORDER BY individual_id ASC
''', [evolution, generation, 'eliminated'])
result = c.fetchall()
c.close()
return result
def save_genome(self,
name,
genome,
evolution,
generation,
individual_id,
parent_1,
parent_2,
grade,
status):
c = self._cursor
c.execute('''
INSERT INTO genomes(name,
genome,
evolution,
generation,
individual_id,
parent_1,
parent_2,
grade,
status)
VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?);
''', (name,
genome,
evolution,
generation,
individual_id,
parent_1,
parent_2,
grade,
status))
self._commit()
c.close()
return
| 26.728643 | 112 | 0.460425 |
794034fe32f49b68eb755c13c433c30c7a61070e | 1,441 | py | Python | py/tests/problems/arr/majority_element_test.py | bmoretz/Daily-Coding-Problem | f79e062e9f6e7b18b7e95c071fbe71ad104affcb | [
"MIT"
] | 1 | 2020-06-26T13:28:43.000Z | 2020-06-26T13:28:43.000Z | py/tests/problems/arr/majority_element_test.py | bmoretz/Daily-Coding-Problem | f79e062e9f6e7b18b7e95c071fbe71ad104affcb | [
"MIT"
] | 7 | 2021-11-18T19:46:08.000Z | 2022-03-12T01:03:01.000Z | py/tests/problems/arr/majority_element_test.py | bmoretz/Daily-Coding-Problem | f79e062e9f6e7b18b7e95c071fbe71ad104affcb | [
"MIT"
] | null | null | null | import unittest
from problems.arr import majority_element
class Test_LettersNumbers2(unittest.TestCase):
def setUp(self):
pass
def test_none(self):
arr = None
actual = majority_element(arr)
expected = None
assert actual == expected
def test_case1(self):
arr = [1, 2, 5, 9, 5, 9, 5, 5, 5]
actual = majority_element(arr)
expected = 5
assert actual == expected
def test_case2(self):
arr = [1, 2, 5, 9, 5, 9, 5, 5, 5, 1]
actual = majority_element(arr)
expected = 5
assert actual == expected
def test_case2(self):
arr = [1, 2, 5, 9, 5, 9, 5, 5, 5, 1, 2]
actual = majority_element(arr)
expected = -1
assert actual == expected
def test_case3(self):
arr = [1, 2, 5, 9, 5, 9, 5, 5, 5, 1, 2, 1, 2]
actual = majority_element(arr)
expected = -1
assert actual == expected
def test_case4(self):
arr = [1, 2, 5, 9, 5, 9, 5, 5, 5, 1, 2, 1, 2, 5]
actual = majority_element(arr)
expected = -1
assert actual == expected
def test_case5(self):
arr = [1, 5]
actual = majority_element(arr)
expected = -1
assert actual == expected
def test_case6(self):
arr = [1, 5, 5]
actual = majority_element(arr)
expected = 5
assert actual == expected | 20.013889 | 56 | 0.532963 |
7940360ecee668e64808126b0258e95cb4420646 | 4,370 | py | Python | payfast/tests.py | reinbach/django-payfast-convert | 20267f76252cc61eba582d06694ce8e24ed413f0 | [
"MIT"
] | 2 | 2017-11-16T16:47:55.000Z | 2018-01-02T17:00:09.000Z | payfast/tests.py | reinbach/django-payfast-convert | 20267f76252cc61eba582d06694ce8e24ed413f0 | [
"MIT"
] | null | null | null | payfast/tests.py | reinbach/django-payfast-convert | 20267f76252cc61eba582d06694ce8e24ed413f0 | [
"MIT"
] | 2 | 2019-08-06T11:57:35.000Z | 2020-09-01T15:50:51.000Z | # coding: utf-8
import unittest
from collections import OrderedDict
import django
from django.test import TestCase
from payfast.forms import notify_url, PayFastForm
from payfast.models import PayFastOrder
from payfast.api import signature
from payfast import conf
import payfast.signals
def _test_data():
data = OrderedDict()
data['merchant_id'] = '10000100'
data['merchant_key'] = '46f0cd694581a'
data['notify_url'] = "http://127.0.0.1:8000/payfast/notify/"
data['name_first'] = u"Вася"
data['last_name'] = u'Пупников'
data['m_payment_id'] = '23'
data['amount'] = '234'
data['item_name'] = u"Payment (Планета суши). ID:272-15"
return data
def _notify_data(data, payment_form):
notify_data = data.copy()
# prepare server data
notify_data['m_payment_id'] = payment_form.order.pk
notify_data['amount_gross'] = data['amount']
del notify_data['amount']
del notify_data['merchant_key']
notify_data['signature'] = signature(notify_data)
return notify_data
def _order():
return PayFastOrder.objects.all()[0]
class SignatureTest(unittest.TestCase):
def test_signature(self):
data = _test_data()
self.assertEqual(signature(data), 'c71d41dd5041bf28d819fe102ab0106b')
class NotifyTest(TestCase):
def setUp(self):
conf.IP_ADDRESSES = ['127.0.0.1']
conf.USE_POSTBACK = False
conf.MERCHANT_ID = '10000100'
conf.REQUIRE_AMOUNT_MATCH = True
def handler(sender, **kwargs):
handler.called = True
handler.called = False
self.signal_handler = handler
payfast.signals.notify.connect(self.signal_handler)
def tearDown(self):
payfast.signals.notify.disconnect(self.signal_handler)
def _create_order(self):
"""
Create a payment order, and return the notification data for it.
"""
data = _test_data()
# user posts the pay request
payment_form = PayFastForm(initial={
'amount': data['amount'],
'item_name': data['item_name']
})
self.assertEqual(_order().trusted, None)
return _notify_data(data, payment_form)
def test_notify(self):
notify_data = self._create_order()
# the server sends a notification
response = self.client.post(notify_url(), notify_data)
self.assertEqual(response.status_code, 200)
self.assertTrue(self.signal_handler.called)
order = _order()
self.assertEqual(order.request_ip, u'127.0.0.1')
self.assertEqual(order.debug_info, u'')
self.assertEqual(order.trusted, True)
def test_untrusted_ip(self):
"""
The notify handler rejects notification attempts from untrusted IP address.
"""
notify_data = self._create_order()
# the server sends a notification
response = self.client.post(notify_url(), notify_data, REMOTE_ADDR='127.0.0.2')
self.assertEqual(response.status_code, 404)
self.assertFalse(self.signal_handler.called)
order = _order()
self.assertEqual(order.request_ip, u'127.0.0.2')
self.assertEqual(order.debug_info, u'__all__: untrusted ip: 127.0.0.2')
self.assertEqual(order.trusted, False)
def test_non_existing_order(self):
response = self.client.post(notify_url(), {})
self.assertEqual(response.status_code, 404)
self.assertFalse(self.signal_handler.called)
self.assertQuerysetEqual(PayFastOrder.objects.all(), [])
def test_invalid_request(self):
form = PayFastForm(initial={'amount': 100, 'item_name': 'foo'})
response = self.client.post(notify_url(), {'m_payment_id': form.order.pk})
self.assertEqual(response.status_code, 404)
self.assertFalse(self.signal_handler.called)
order = _order()
self.assertEqual(order.request_ip, u'127.0.0.1')
self.assertEqual(set(order.debug_info.split(u'|')), {
u'amount_gross: Amount is not the same: {} != None'.format(
# Django 1.8 returns more precise DecimalField values.
u'100' if django.VERSION < (1, 8) else u'100.00'
),
u'item_name: This field is required.',
u'merchant_id: This field is required.',
})
self.assertEqual(order.trusted, False)
| 32.61194 | 87 | 0.653089 |
7940365fb4a641ecc80309586295002967c44fb9 | 894 | py | Python | rpi-config/scripts/sample_devices.py | DiamondLightSource/rpi-config | 617f5e176c0621e3ea1b567e9586e96ba0f8b5db | [
"Apache-2.0"
] | 4 | 2016-08-23T12:13:21.000Z | 2018-08-22T12:55:55.000Z | rpi-config/scripts/sample_devices.py | DiamondLightSource/rpi-config | 617f5e176c0621e3ea1b567e9586e96ba0f8b5db | [
"Apache-2.0"
] | null | null | null | rpi-config/scripts/sample_devices.py | DiamondLightSource/rpi-config | 617f5e176c0621e3ea1b567e9586e96ba0f8b5db | [
"Apache-2.0"
] | 2 | 2016-09-15T19:17:30.000Z | 2018-03-06T06:34:13.000Z | from gdascripts.pd.dummy_pds import DummyPD
from gdascripts.pd.dummy_pds import MultiInputExtraFieldsDummyPD
from gdascripts.pd.dummy_pds import ZeroInputExtraFieldsDummyPD
from gdascripts.pd.time_pds import showtimeClass
from gdascripts.pd.time_pds import showincrementaltimeClass
from gdascripts.pd.time_pds import waittimeClass
print "Creating dummy devices x,y and z"
x=DummyPD("x")
y=DummyPD("y")
z=DummyPD("z")
print "Creating timer devices t, dt, and w"
t = showtimeClass("t") # cannot also be driven.
dt= showincrementaltimeClass("dt")
w = waittimeClass("w")
print "Creating multi input/extra field device, mi, me and mie"
mi=MultiInputExtraFieldsDummyPD('mi',['i1','i2'],[])
me=MultiInputExtraFieldsDummyPD('me',[],['e1','e2'])
mie=MultiInputExtraFieldsDummyPD('mie',['i1'],['e2','e3'])
print "Createing zero input/extra field device, zie"
zie=ZeroInputExtraFieldsDummyPD('zie')
| 33.111111 | 64 | 0.779642 |
794037ae23c5efd4daf0e6c1f0b2f400bf176dc8 | 280 | py | Python | tests/mods/packinit/init.py | thatch45/pop | b1ea83cf93c1ea28851129146d8507842363aa6a | [
"Apache-2.0"
] | null | null | null | tests/mods/packinit/init.py | thatch45/pop | b1ea83cf93c1ea28851129146d8507842363aa6a | [
"Apache-2.0"
] | null | null | null | tests/mods/packinit/init.py | thatch45/pop | b1ea83cf93c1ea28851129146d8507842363aa6a | [
"Apache-2.0"
] | 1 | 2021-02-05T04:18:33.000Z | 2021-02-05T04:18:33.000Z | # -*- coding: utf-8 -*-
'''
used to test the pack_init system
'''
# pylint: disable=undefined-variable
def new(hub):
'''
Add a value to the context
'''
hub.context['NEW'] = True
hub.mods._mem['new'] = True
def check(hub):
return hub.context.get('NEW')
| 15.555556 | 36 | 0.592857 |
7940386dfab624ea2865919b4b95668620dd8ab3 | 5,220 | py | Python | cronjobs/FileLoader.py | axsauze/bigdatadarwin | 76320a1ba1b0e6efd50ca150699b34175c474856 | [
"Apache-2.0"
] | 1 | 2015-09-21T16:15:11.000Z | 2015-09-21T16:15:11.000Z | cronjobs/FileLoader.py | axsauze/bigdatadarwin | 76320a1ba1b0e6efd50ca150699b34175c474856 | [
"Apache-2.0"
] | 15 | 2015-09-19T20:19:18.000Z | 2015-10-10T05:36:23.000Z | cronjobs/FileLoader.py | HackPartners/bigdatadarwin | 76320a1ba1b0e6efd50ca150699b34175c474856 | [
"Apache-2.0"
] | null | null | null | import os, sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from models import Schedule, CallingPoint, db
from darwinpush.messagefactories.xml import ScheduleXMLMessageFactory
import darwinpush.xb.pushport as pp
import re
class ScheduleFileLoader:
def __init__(self, file_location):
assert(file_location)
self.file_location = file_location
def update_daily_schedules(self):
collect_mode=False
journey_buffer = []
for line in open(self.file_location):
if '<Journey' in line:
assert(not collect_mode)
collect_mode=True
elif '</Journey' in line:
assert(collect_mode)
journey_buffer.append(line)
journey_string = "".join(journey_buffer)
journey_string = journey_string.replace("Journey", "schedule")
journey_string = journey_string.replace("<OR", "<ns2:OR")
journey_string = journey_string.replace("<PP", "<ns2:PP")
journey_string = journey_string.replace("<IP", "<ns2:IP")
journey_string = journey_string.replace("<DT", "<ns2:DT")
journey_string = journey_string.replace("<OPOR", "<ns2:OPOR")
journey_string = journey_string.replace("<OPIP", "<ns2:OPIP")
journey_string = journey_string.replace("<OPDT", "<ns2:OPDT")
journey_string = journey_string.replace("<OPPP", "<ns2:OPPP")
journey_string = journey_string.replace("cancelReason", "ns2:cancelReason")
journey_string = re.sub(r'plat=\"[0-9a-zA-Z\s]+\"', '', journey_string)
journey_string = re.sub(r'qtrain=\"[0-9a-zA-Z\s]+\"', '', journey_string)
journey_string = re.sub(r'can=\"[0-9a-zA-Z\s]+\"', '', journey_string)
journey_string = re.sub(r'act=\"[0-9a-zA-Z\s]+\"', '', journey_string)
journey_string = ( '<?xml version="1.0" encoding="UTF-8"?>'
+ '<Pport ts="2015-07-20T11:52:07.3487919+01:00" version="12.0" xmlns="http://www.thalesgroup.com/rtti/PushPort/v12" xmlns:ns2="http://www.thalesgroup.com/rtti/PushPort/Schedules/v1">'
+ '<uR requestID="0000000000020608" requestSource="at09" updateOrigin="CIS">'
+ journey_string
+ '</uR>'
+ '</Pport>')
self.add_schedule_from_buffer(journey_string)
journey_buffer = []
collect_mode=False
if collect_mode:
journey_buffer.append(line)
@db.transaction()
def add_schedule_from_buffer(self, journey_buffer):
r = pp.CreateFromDocument(journey_buffer)
assert(None is r.sR)
assert(None is not r.uR)
assert(1 == len(r.uR.schedule))
m = ScheduleXMLMessageFactory.build(r.uR.schedule[0], r, journey_buffer)
###### This code below checks that there are no schedules already stored
# # We try to find a schedule, and replace it if we do
# found = (Schedule
# .select()
# .where(
# Schedule.uid == m.uid,
# Schedule.rid == m.rid
# ))
# count = found.count()
# if count > 0:
# assert(count == 1)
# s = found[0]
# # Removing all relevant calling points
# CallingPoint.delete().where(
# CallingPoint.schedule == s
# ).execute()
# else:
# s = Schedule()
# s.uid = m.uid
# s.rid = m.rid
# Script assumes new schedules added, and overrides any existing
s = Schedule()
s.uid = m.uid
s.rid = m.rid
s.headcode = m.headcode
s.start_date = m.start_date
s.toc_code = m.toc_code
s.category = m.category
s.status = m.status
s.active = m.active
s.deleted = m.deleted
s.cancel_tiploc = m.cancel_reason_tiploc
s.cancel_code = m.cancel_reason_code
s.cancel_near = m.cancel_reason_near
s.save()
for o in m.all_points:
p = CallingPoint()
p.tiploc = o.tiploc
p.schedule = s
p.activity_codes = o.planned_activity_codes
p.cancelled = o.cancelled
p.false_tiploc = o.false_tiploc
p.route_delay = o.route_delay
p.working_arrival = o.raw_working_arrival_time
p.working_pass = o.raw_working_pass_time
p.working_departure = o.raw_working_departure_time
p.public_arrival = o.raw_public_arrival_time
p.public_departure = o.raw_public_departure_time
p.type = str(type(o))
p.save()
if __name__ == "__main__":
if len(sys.argv) < 2:
print("You must pass the name of the file as argument")
exit()
s=ScheduleFileLoader(sys.argv[1])
s.update_daily_schedules()
| 37.285714 | 220 | 0.546552 |
794038a66e8cbe75dfd2caed47bbcfd65ca699f2 | 4,964 | py | Python | repour/server/server.py | janinko/repour | 5df249254b0c4c5a5c90e7e87a8c3e135ad95d21 | [
"Apache-2.0"
] | null | null | null | repour/server/server.py | janinko/repour | 5df249254b0c4c5a5c90e7e87a8c3e135ad95d21 | [
"Apache-2.0"
] | null | null | null | repour/server/server.py | janinko/repour | 5df249254b0c4c5a5c90e7e87a8c3e135ad95d21 | [
"Apache-2.0"
] | null | null | null | # flake8: noqa
import asyncio
import logging
import os
import prometheus_async.aio as aio
from aiohttp import web
from prometheus_client.bridge.graphite import GraphiteBridge
from .. import clone, repo, websockets
from ..adjust import adjust
from ..auth import auth
from ..config import config
from .endpoint import (
cancel,
endpoint,
external_to_internal,
info,
validation,
ws,
internal_scm,
)
logger = logging.getLogger(__name__)
#
# Setup
#
shutdown_callbacks = []
async def init(loop, bind, repo_provider, repour_url, adjust_provider):
logger.debug("Running init")
c = await config.get_configuration()
auth_provider = c.get("auth", {}).get("provider", None)
logger.info("Using auth provider '" + str(auth_provider) + "'.")
app = web.Application(
loop=loop, middlewares=[auth.providers[auth_provider]] if auth_provider else {}
)
logger.debug("Adding application resources")
app["repo_provider"] = repo.provider_types[repo_provider["type"]](
**repo_provider["params"]
)
external_to_internal_source = endpoint.validated_json_endpoint(
shutdown_callbacks,
validation.external_to_internal,
external_to_internal.translate,
repour_url,
)
adjust_source = endpoint.validated_json_endpoint(
shutdown_callbacks, validation.adjust_modeb, adjust.adjust, repour_url
)
internal_scm_source = endpoint.validated_json_endpoint(
shutdown_callbacks,
validation.internal_scm,
internal_scm.internal_scm,
repour_url,
)
logger.debug("Setting up handlers")
app.router.add_route("GET", "/", info.handle_request)
app.router.add_route(
"POST", "/git-external-to-internal", external_to_internal_source
)
app.router.add_route(
"POST",
"/clone",
endpoint.validated_json_endpoint(
shutdown_callbacks, validation.clone, clone.clone, repour_url
),
)
app.router.add_route("POST", "/adjust", adjust_source)
app.router.add_route("POST", "/internal-scm", internal_scm_source)
app.router.add_route("POST", "/cancel/{task_id}", cancel.handle_cancel)
app.router.add_route("GET", "/callback/{callback_id}", ws.handle_socket)
app.router.add_route("GET", "/metrics", aio.web.server_stats)
await setup_graphite_exporter()
# used for distributed cancel operation
asyncio.get_event_loop().create_task(cancel.start_cancel_loop())
logger.debug("Creating asyncio server")
srv = await loop.create_server(app.make_handler(), bind["address"], bind["port"])
for socket in srv.sockets:
logger.info("Server started on socket: {}".format(socket.getsockname()))
def start_server(bind, repo_provider, repour_url, adjust_provider):
logger.debug("Starting server")
loop = asyncio.get_event_loop()
# # Monkey patch for Python 3.4.1
# if not hasattr(loop, "create_task"):
# loop.create_task = lambda c: asyncio.async(c, loop=loop)
loop.run_until_complete(
init(
loop=loop,
bind=bind,
repo_provider=repo_provider,
repour_url=repour_url,
adjust_provider=adjust_provider,
)
)
loop.create_task(websockets.periodic_cleanup())
try:
loop.run_forever()
except KeyboardInterrupt:
logger.debug("KeyboardInterrupt")
finally:
logger.info("Stopping tasks")
tasks = asyncio.Task.all_tasks()
for task in tasks:
task.cancel()
results = loop.run_until_complete(
asyncio.gather(*tasks, loop=loop, return_exceptions=True)
)
for shutdown_callback in shutdown_callbacks:
shutdown_callback()
exception_results = [
r
for r in results
if isinstance(r, Exception) and not isinstance(r, asyncio.CancelledError)
]
if len(exception_results) > 1:
raise Exception(exception_results)
elif len(exception_results) == 1:
raise exception_results[0]
loop.close()
async def setup_graphite_exporter():
graphite_server = os.environ.get("GRAPHITE_SERVER", None)
graphite_key = os.environ.get("GRAPHITE_KEY", None)
graphite_port = os.environ.get("GRAPHITE_PORT", 2003)
if graphite_server is None or graphite_key is None:
logger.warn(
"Graphite server ("
+ str(graphite_server)
+ ") or Graphite key ("
+ str(graphite_key)
+ ") is not defined. Not setting up Monitoring graphite server!"
)
return
logger.info(
"Monitoring graphite server setup! Reporting to server: "
+ graphite_server
+ ":"
+ str(graphite_port)
+ " with prefix: "
+ str(graphite_key)
)
gb = GraphiteBridge((graphite_server, graphite_port))
gb.start(60.0, prefix=graphite_key)
| 29.2 | 87 | 0.65552 |
7940397ffe8de596d4fa1d6504a4146f20f7f1c5 | 411 | py | Python | dupgee/base/dupgee/matcher.py | ahmetkotan/dupgee | 7cb65e6ba107fb30e98bdd17fd666ecff53f9eb7 | [
"MIT"
] | 40 | 2021-12-26T07:42:29.000Z | 2022-01-03T01:37:16.000Z | dupgee/base/dupgee/matcher.py | ahmetkotan/dupgee | 7cb65e6ba107fb30e98bdd17fd666ecff53f9eb7 | [
"MIT"
] | null | null | null | dupgee/base/dupgee/matcher.py | ahmetkotan/dupgee | 7cb65e6ba107fb30e98bdd17fd666ecff53f9eb7 | [
"MIT"
] | null | null | null | urls_module = __import__("{{ app_name }}.urls")
pages_module = __import__("{{ app_name }}.pages")
def match_url(path):
for url in urls_module.urls.urls:
if url.get("path") == path:
return url.get("view")
return pages_module.pages.HomepageView
def find_view(request):
view_class = match_url(path=request.path)
view = view_class()
return view.dispatch(request=request)
| 25.6875 | 49 | 0.673966 |
79403ac66e6eb68abac5dc38304c7a84c14b395b | 1,053 | py | Python | tensorflow_compression/python/ops/coder_ops.py | ghgh3269/compression | 3e920bc49fa32d79c1c2917583ffb663c6ebac85 | [
"Apache-2.0"
] | 46 | 2019-06-17T21:13:57.000Z | 2022-03-29T07:52:11.000Z | tensorflow_compression/python/ops/coder_ops.py | ghgh3269/compression | 3e920bc49fa32d79c1c2917583ffb663c6ebac85 | [
"Apache-2.0"
] | 11 | 2019-07-05T09:51:08.000Z | 2022-02-06T14:00:03.000Z | tensorflow_compression/python/ops/coder_ops.py | ghgh3269/compression | 3e920bc49fa32d79c1c2917583ffb663c6ebac85 | [
"Apache-2.0"
] | 14 | 2019-04-10T01:09:36.000Z | 2022-03-30T01:24:57.000Z | # -*- coding: utf-8 -*-
# Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Range coder operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from tensorflow.contrib.coder.python.ops import coder_ops
pmf_to_quantized_cdf = coder_ops.pmf_to_quantized_cdf
range_decode = coder_ops.range_decode
range_encode = coder_ops.range_encode
| 36.310345 | 80 | 0.725546 |
79403b3fc5708de3dddd0375f0a8bd10a3bbafa7 | 722 | py | Python | bluepy/scan_fuzz.py | rohichau/BLETracking | 495cbfc8e39343b3808a68a52283cdc6e113b7ff | [
"Apache-2.0"
] | 1 | 2021-08-20T10:02:21.000Z | 2021-08-20T10:02:21.000Z | old/scan_fuzz.py | grodansparadis/vscp-python-sensorpuck | 168aeb29b9994dd8f9499fa6380c4d7332619b34 | [
"MIT"
] | 7 | 2021-03-19T15:51:36.000Z | 2022-03-12T00:53:47.000Z | flora/lib/python3.5/site-packages/bluepy/scan_fuzz.py | saulgold/miflora | dc19d812dda1fc90b43e3e3bd72841fd62ca4003 | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function
import sys
import os
import random
import struct
import btle
def rand_db(adtype, datalen):
return struct.pack("<BB", datalen+1, adtype) + os.urandom(datalen)
if __name__ == '__main__':
while True:
sr = btle.ScanEntry(None, 0)
db = b''
while len(db) <= 28:
adlen = random.randint(3, 31-len(db))
adtype = random.randint(0,255)
db += rand_db(adtype, adlen-2)
resp = { 'type' : [ random.randint(1,2) ],
'rssi' : [ random.randint(1,127) ],
'flag' : [ 4 ],
'd' : [ db ] }
sr._update(resp)
print ("Result:", sr.getScanData())
| 24.896552 | 70 | 0.522161 |
79403b717975c8ad70157d4d69c370b7adb72d92 | 5,387 | py | Python | influxdb_client/domain/scraper_target_response.py | Rajpratik71/influxdb-client-python | ae537018b638600552b3ac11f1b070c048719910 | [
"MIT"
] | null | null | null | influxdb_client/domain/scraper_target_response.py | Rajpratik71/influxdb-client-python | ae537018b638600552b3ac11f1b070c048719910 | [
"MIT"
] | null | null | null | influxdb_client/domain/scraper_target_response.py | Rajpratik71/influxdb-client-python | ae537018b638600552b3ac11f1b070c048719910 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Influx API Service.
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: 0.1.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from influxdb_client.domain.scraper_target_request import ScraperTargetRequest
class ScraperTargetResponse(ScraperTargetRequest):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'id': 'str',
'org': 'str',
'bucket': 'str',
'links': 'object',
'name': 'str',
'type': 'str',
'url': 'str',
'org_id': 'str',
'bucket_id': 'str'
}
attribute_map = {
'id': 'id',
'org': 'org',
'bucket': 'bucket',
'links': 'links',
'name': 'name',
'type': 'type',
'url': 'url',
'org_id': 'orgID',
'bucket_id': 'bucketID'
}
def __init__(self, id=None, org=None, bucket=None, links=None, name=None, type=None, url=None, org_id=None, bucket_id=None): # noqa: E501,D401,D403
"""ScraperTargetResponse - a model defined in OpenAPI.""" # noqa: E501
ScraperTargetRequest.__init__(self, name=name, type=type, url=url, org_id=org_id, bucket_id=bucket_id) # noqa: E501
self._id = None
self._org = None
self._bucket = None
self._links = None
self.discriminator = None
if id is not None:
self.id = id
if org is not None:
self.org = org
if bucket is not None:
self.bucket = bucket
if links is not None:
self.links = links
@property
def id(self):
"""Get the id of this ScraperTargetResponse.
:return: The id of this ScraperTargetResponse.
:rtype: str
""" # noqa: E501
return self._id
@id.setter
def id(self, id):
"""Set the id of this ScraperTargetResponse.
:param id: The id of this ScraperTargetResponse.
:type: str
""" # noqa: E501
self._id = id
@property
def org(self):
"""Get the org of this ScraperTargetResponse.
The organization name.
:return: The org of this ScraperTargetResponse.
:rtype: str
""" # noqa: E501
return self._org
@org.setter
def org(self, org):
"""Set the org of this ScraperTargetResponse.
The organization name.
:param org: The org of this ScraperTargetResponse.
:type: str
""" # noqa: E501
self._org = org
@property
def bucket(self):
"""Get the bucket of this ScraperTargetResponse.
The bucket name.
:return: The bucket of this ScraperTargetResponse.
:rtype: str
""" # noqa: E501
return self._bucket
@bucket.setter
def bucket(self, bucket):
"""Set the bucket of this ScraperTargetResponse.
The bucket name.
:param bucket: The bucket of this ScraperTargetResponse.
:type: str
""" # noqa: E501
self._bucket = bucket
@property
def links(self):
"""Get the links of this ScraperTargetResponse.
:return: The links of this ScraperTargetResponse.
:rtype: object
""" # noqa: E501
return self._links
@links.setter
def links(self, links):
"""Set the links of this ScraperTargetResponse.
:param links: The links of this ScraperTargetResponse.
:type: object
""" # noqa: E501
self._links = links
def to_dict(self):
"""Return the model properties as a dict."""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Return the string representation of the model."""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`."""
return self.to_str()
def __eq__(self, other):
"""Return true if both objects are equal."""
if not isinstance(other, ScraperTargetResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return true if both objects are not equal."""
return not self == other
| 26.935 | 152 | 0.558196 |
79403d8ae544f97a4a83b5b1c190bf395ffe8bb2 | 2,518 | py | Python | phi/physics/material.py | joelguerrero/PhiFlow | 94c7f3966c30c38b4e298fada88e001eba897dba | [
"MIT"
] | 1 | 2021-03-13T19:29:15.000Z | 2021-03-13T19:29:15.000Z | phi/physics/material.py | joelguerrero/PhiFlow | 94c7f3966c30c38b4e298fada88e001eba897dba | [
"MIT"
] | null | null | null | phi/physics/material.py | joelguerrero/PhiFlow | 94c7f3966c30c38b4e298fada88e001eba897dba | [
"MIT"
] | null | null | null | """
Surface material definitions including constants.
"""
import math
from phi import struct
@struct.definition()
class Material(struct.Struct):
"""
Defines a surface material including the boundary conditions.
"""
def __init__(self, name, **kwargs):
struct.Struct.__init__(self, **struct.kwargs(locals()))
@struct.constant()
def name(self, name):
"""
Material name.
"""
return str(name)
@struct.constant(default=True)
def solid(self, solid):
"""
Fluid can only enter non-solid cells or pass through non-solid boundaries.
"""
assert isinstance(solid, bool)
return solid
@struct.constant(default=0.0)
def friction(self, friction):
"""
(only for solid materials) velocity decay rate in units of 1/time.
0: fluid can move parallell to the surface (no-stick),
1: fluid cannot move parallel (no-slip)
"""
return friction
def friction_multiplier(self, dt=1):
"""
Computes the velocity multiplication factor for fluid that moves along the surface for time dt.
:param dt: time spent near surface (float)
:return: factor (float)
"""
if dt == 1 or self.friction == 1 or self.friction == 0:
return 1 - self.friction
else:
time_friction_exponent = math.log(1/(1-self.friction))
return math.exp(- dt * time_friction_exponent)
@struct.constant(default=False)
def periodic(self, periodic):
"""
Whether the boundary is periodic, i.e. seamlessly merges with the opposite end of the domain.
"""
assert isinstance(periodic, bool)
return periodic
def __repr__(self):
return self.name
@struct.derived()
def extrapolation_mode(self):
"""
Returns the extrapolation mode, one of ('periodic', 'boundary', 'constant').
"""
if self.periodic:
return 'periodic'
if self.solid:
return 'boundary'
else:
return 'constant'
@struct.derived()
def accessible_extrapolation_mode(self):
if self.periodic:
return 'periodic'
if self.solid:
return 'constant'
else:
return 'boundary'
OPEN = Material('open', solid=False)
CLOSED = NO_STICK = SLIPPERY = Material('slippery', solid=True, friction=0.0)
# NO_SLIP = STICKY = Material('sticky', solid=True, friction=1.0)
PERIODIC = Material('periodic', solid=False, periodic=True)
| 27.369565 | 95 | 0.619936 |
79403fd3ec341cbb42d5d1886552f6db6b287c82 | 931 | py | Python | application/app.py | PaulineLc/OpenDoorData | 203b19427467e330cd04b4dc5e8c31d5b9af755c | [
"MIT"
] | null | null | null | application/app.py | PaulineLc/OpenDoorData | 203b19427467e330cd04b4dc5e8c31d5b9af755c | [
"MIT"
] | null | null | null | application/app.py | PaulineLc/OpenDoorData | 203b19427467e330cd04b4dc5e8c31d5b9af755c | [
"MIT"
] | null | null | null | # file that contains flask app setup code
from flask import Flask # import Flask class
from config import DevelopmentConfig
import peewee
app = Flask(__name__) # instantiate the Flask class
app.config.from_object(DevelopmentConfig) # initialise app with development config
configdb = app.config['DATABASE']
db = peewee.MySQLDatabase("wifi_db",
host = configdb['host'],
user = configdb['user'],
password =configdb['password']
)
# This hook ensures that a connection is opened to handle any queries
# generated by the request.
@app.before_request
def _db_connect():
db.connect()
# This hook ensures that the connection is closed when we've finished
# processing the request.
@app.teardown_request
def _db_close(exc):
if not db.is_closed():
db.close()
| 30.032258 | 83 | 0.627282 |
7940401a36f4d8b0eeeb436b6bd2b08b0426b161 | 1,815 | py | Python | userbot/modules/hentai.py | bryanasfuk/Baphomet | bf3c3d9589511534ad848b3aa0b59e3d6b113282 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1 | 2021-04-15T16:10:49.000Z | 2021-04-15T16:10:49.000Z | userbot/modules/hentai.py | bryanasfuk/Baphomet | bf3c3d9589511534ad848b3aa0b59e3d6b113282 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | userbot/modules/hentai.py | bryanasfuk/Baphomet | bf3c3d9589511534ad848b3aa0b59e3d6b113282 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 21 | 2021-02-01T14:01:42.000Z | 2021-08-22T01:13:28.000Z | # Copyright (C) 2021 Bian Sepang
# All Rights Reserved.
#
import nekos
from userbot import CMD_HELP
from userbot.events import register
@register(pattern=r"^\.hentai$", outgoing=True)
async def _(event):
"""Gets random hentai gif from nekos.py."""
await event.edit("`Fetching from nekos...`")
pic = nekos.img("random_hentai_gif")
await event.client.send_file(
event.chat_id,
pic,
caption=f"[Source]({pic})",
)
await event.delete()
@register(pattern=r"^\.pussy$", outgoing=True)
async def _(event):
"""Gets anime pussy gif from nekos.py."""
await event.edit("`Fetching from nekos...`")
pic = nekos.img("pussy")
await event.client.send_file(
event.chat_id,
pic,
caption=f"[Source]({pic})",
)
await event.delete()
@register(pattern=r"^\.cum$", outgoing=True)
async def _(event):
"""Gets anime cum gif from nekos.py."""
await event.edit("`Fetching from nekos...`")
pic = nekos.img("cum")
await event.client.send_file(
event.chat_id,
pic,
caption=f"[Source]({pic})",
)
await event.delete()
@register(pattern=r"^\.nsfwneko$", outgoing=True)
async def _(event):
"""Gets nsfw neko gif from nekos.py."""
await event.edit("`Fetching from nekos...`")
pic = nekos.img("nsfw_neko_gif")
await event.client.send_file(
event.chat_id,
pic,
caption=f"[Source]({pic})",
)
await event.delete()
CMD_HELP.update(
{
"hentai": ">`.hentai`"
"\nUsage: Gets random hentai gif from nekos"
"\n\n>`.pussy`"
"\nUsage: Gets anime pussy gif from nekos"
"\n\n>`.cum`"
"\nUsage: Gets anime cum gif from nekos"
"\n\n>`.nsfwneko`"
"\nUsage: Gets nsfw neko gif from nekos"
}
)
| 24.2 | 52 | 0.595041 |
7940404582ae89b39db0c17d08f20cb362e64003 | 3,087 | py | Python | src/main/resources/blazemeter/AddData.py | xebialabs-community/xlr-blazemeter-plugin | d4629d31fda2a42efc7e628b01665e055ebe0096 | [
"MIT"
] | null | null | null | src/main/resources/blazemeter/AddData.py | xebialabs-community/xlr-blazemeter-plugin | d4629d31fda2a42efc7e628b01665e055ebe0096 | [
"MIT"
] | 1 | 2020-01-28T20:14:04.000Z | 2020-01-28T20:14:04.000Z | src/main/resources/blazemeter/AddData.py | xebialabs-community/xlr-blazemeter-plugin | d4629d31fda2a42efc7e628b01665e055ebe0096 | [
"MIT"
] | 1 | 2020-01-15T20:07:14.000Z | 2020-01-15T20:07:14.000Z | #
# Copyright 2020 XEBIALABS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import sys
import time
import base64
import random
import json
from ast import literal_eval
from blazemeter.common import (call_url, encode_multipart)
# Initialize variables
base_url = server.get('url').strip('/')
contents = ''
# Make sure all the required paramaters are set
if not base_url.strip():
print 'FATAL: Input error! Server configuration url undefined\n'
sys.exit(101)
if not keyId.strip():
print 'FATAL: Input error! Parameter keyId undefined\n'
sys.exit(102)
if not secret.strip():
print 'FATAL: Input error! Parameter secret undefined\n'
sys.exit(103)
if not test.strip():
print 'FATAL: Input error! Parameter test undefined\n'
sys.exit(104)
if not filename.strip():
print 'FATAL: Input error! Parameter filename undefined\n'
sys.exit(105)
if not testData:
print 'FATAL: Input error! Parameter testData undefined\n'
sys.exit(106)
print 'BlazeMeter test data upload started\n'
# Write the data to a string
try:
for r in testData:
row = literal_eval(r)
contents += ','.join(map(str, row)) + '\n'
except Exception as error:
print 'FATAL: The test data is in the wrong format!\n'
print 'Example: ["(\'Header1\', \'Header2\')", "(\'Value1\', \'Value2\')"]\n'
sys.exit(107)
# Encode multipart form data
files = {'file': {'filename': filename, 'content': contents}}
url_data, url_headers = encode_multipart({}, files)
# Add headers
base64string = base64.encodestring('%s:%s' % (keyId, secret)).replace('\n', '')
url_headers['Authorization'] = 'Basic %s' % base64string
# Upload the test data
upload_url = '%s/tests/%s/files' % (base_url, test)
data = call_url('post', upload_url, url_data, url_headers)
if 'updated' in data.get('result') and data.get('result').get('updated') == True:
print 'BlazeMeter file upload for test %s completed **successfully**\n' % test
sys.exit(0)
print 'FATAL: BlazeMeter upload for test %s **failed**:\n' % test
print '```'
print json.dumps(data)
print '```'
sys.exit(1) | 38.111111 | 462 | 0.721088 |
7940407104bb1baeb52a68a624af88c38b838f0b | 333 | py | Python | softdelete/urls.py | fritill-team/django-generic-delete | dcd07bfff577525b8010b9e9790f5c054a41433e | [
"MIT"
] | null | null | null | softdelete/urls.py | fritill-team/django-generic-delete | dcd07bfff577525b8010b9e9790f5c054a41433e | [
"MIT"
] | null | null | null | softdelete/urls.py | fritill-team/django-generic-delete | dcd07bfff577525b8010b9e9790f5c054a41433e | [
"MIT"
] | null | null | null | from django.urls import path, include
from .views import AdminSingleDeleteView
app_name = 'softdelete'
urlpatterns = [
path('<str:app_label>/<str:model>/<str:pk>/', AdminSingleDeleteView.as_view(), name='single-delete'),
# path('<str:app_label>/<str:model>/<str:pks>/', AdminBulkDeleteView.as_view(), name='bulk-delete'),
] | 37 | 105 | 0.717718 |
794041a0313e747014f11fa2dcab2bbf8a8e5c31 | 8,989 | py | Python | utilities/utility_functions.py | pnwinkler/GKeepToCalc | ec995ba56a614deadca60927e29c1a8c27f1589a | [
"MIT"
] | null | null | null | utilities/utility_functions.py | pnwinkler/GKeepToCalc | ec995ba56a614deadca60927e29c1a8c27f1589a | [
"MIT"
] | null | null | null | utilities/utility_functions.py | pnwinkler/GKeepToCalc | ec995ba56a614deadca60927e29c1a8c27f1589a | [
"MIT"
] | null | null | null | import gkeepapi
import openpyxl
import shutil
import re
from datetime import datetime
from GKeepToCalc.utilities.params import *
import getpass
def backup_targetpath():
if not BACKUP_FOLDER_NAME:
bk_folder_name = 'Keep2Calc.backups'
else:
bk_folder_name = BACKUP_FOLDER_NAME
backup_folder = os.path.join(os.path.dirname(TARGET_PATH), bk_folder_name)
if not os.path.exists(backup_folder):
os.makedirs(backup_folder)
now = datetime.now()
dmy = '{}.{}.{}'.format(now.day, now.month, now.year)
backup_basename = 'backup_' + dmy + '_' + os.path.basename(TARGET_PATH)
backup_full_path = os.path.join(backup_folder, backup_basename)
if not os.path.exists(backup_full_path):
print('Backing up target file')
shutil.copy(TARGET_PATH, backup_full_path)
def convert_ddmmyyyy_to_datetime(date_str, verbose=True):
# take string in form DDMMYYYY and return its datetime equivalent
# also accepts strings like DDMONTHYYY where MONTH is a string
# tolerant of spaces, newlines, semi-colons
# returns -1 if effort fails
date_str = date_str.replace('\n', '').replace(';', '').replace(' ', '').replace('.', '')
# if not date_str.isdigit():
# raise ValueError(f'Invalid parameter for utilities convert_ddmm_to_datetime, date_str={date_str}')
# return -1
try:
datetime_obj = datetime.strptime(date_str, '%d%B%Y')
except ValueError:
try:
datetime_obj = datetime.strptime(date_str, '%d%b%Y')
except ValueError:
try:
datetime_obj = datetime.strptime(date_str, '%B%d%Y')
except ValueError:
try:
datetime_obj = datetime.strptime(date_str, '%b%d%Y')
except Exception as e:
# Possible causes: UTF 8 bullshit; unconverted data like "day 1"
if verbose:
print('Error in utilities convert_ddmm_to_datetime:', e)
return -1
now = datetime.now()
datetime_obj = datetime_obj.replace(year=now.year)
if now < datetime_obj:
# exercise would be in the future, so we assume it's from last year
return datetime_obj.replace(year=now.year - 1)
return datetime_obj
def count_empty_cells_between_rows(sheet, start_row, end_row, cols_lst: list):
# a non-inclusive count. Given target sheet, start and end rows, and a simple or composite key, counts how many
# rows between the 2 passed in rows have empty values in the key columns
if isinstance(cols_lst, str):
cols_lst = list(cols_lst)
cols = [int(x) for x in cols_lst]
count = 0
for r in range(start_row + 1, end_row):
for col in cols:
if not sheet.cell(row=r, column=col).value:
count += 1
break
return count
def find_row_of_datecell_given_datetime(sheet, datetime_target, date_column=2) -> int:
# todo: make this handle full datetimes better. Like 2021-05-13 12:09:53
# current behavior is to fail to match "2021-05-13" because it's not "2021-05-13 12:09:53", for example
# returns row value of cell containing specified date, in specified column
# returns -1 if not found
# takes parameter sheet: a valid sheet object in an xlsx file
# takes parameter datetime_target: the datetime date to search for in DATE_COLUMN
# takes parameter DATE_COLUMN: column in which to search for date
datetime_target = datetime_target.replace(hour=0, minute=0, second=0, microsecond=0)
# this may be redundant. We can probably assume we'll get a proper sheet object
if not isinstance(sheet, openpyxl.worksheet.worksheet.Worksheet):
print(
f'Invalid parameter: find_row_of_datecell_given_datetime did not receive a valid sheet, sheet type = {type(sheet)}')
return -1
if not isinstance(datetime_target, datetime):
print(
f'Invalid parameter: find_row_of_datecell_given_datetime did not receive a valid datetime_target. It received: {datetime_target}')
return -1
# find date cell matching the "date" parameter in the given sheet
# note that in xlsx files:
# headers & strings are str,
# dates are datetime objects,
# empty cells are NoneType
r = 0
empty_cell_count = 0
while True:
r += 1
# check datetime cells in DATE_COLUMN for exercise_datetime match.
# break if too many empty cells found in place of dates.
if isinstance(sheet.cell(row=r, column=date_column).value, datetime):
empty_cell_count = 0
if sheet.cell(row=r, column=date_column).value == datetime_target:
return r
# if examined cell is distant from workout's date, jump closer
# we assume continuity in file's date column: that there's no time gap between start and final date.
days_to_advance = (datetime_target - sheet.cell(row=r, column=date_column).value).days
if days_to_advance > 3:
r += days_to_advance - 2
else:
# it's possible that some cells in this column are neither None nor datetime
# but we still break after 50 non-date cells, given that we're looking for dates
# a few cells may be empty, for formatting reasons, so don't set the cap too low.
# but there's no reason to have 50+ non-date cells in a row.
empty_cell_count += 1
if empty_cell_count > 50:
return -1
def return_first_empty_bodyweight_row(sheet, date_column=2, bodyweight_column=3):
# returns the integer row where:
# 1) there's a date column cell filled in
# 2) there's a bodyweights column cell that's empty
# 3) the previous row has a filled in date cell, and bodyweights cell (disregarding empty rows, e.g. at year's end)
today = datetime.now()
todays_row = find_row_of_datecell_given_datetime(sheet, today, date_column)
if sheet.cell(row=todays_row, column=bodyweight_column).value:
return todays_row
num_rows_to_check = 10000
first_occurrence = todays_row
for x in range(num_rows_to_check):
# search backwards
row = todays_row - x
try:
row_has_date = isinstance(sheet.cell(row=row, column=date_column).value, datetime)
row_has_bodyweight = isinstance(sheet.cell(row=row, column=bodyweight_column).value, (str, float, int))
if row_has_date and not row_has_bodyweight:
first_occurrence = row
elif row_has_bodyweight:
break
except IndexError as e:
raise ValueError(f"Failed to find empty bodyweight cell. Row index out of range. Exception {e}")
if x != num_rows_to_check:
return first_occurrence
raise ValueError(f"Failed to find empty bodyweight cell. Examined {num_rows_to_check} rows")
def is_est_xx_mins_line(line):
# I decided against putting this regex in utilities.params because
# it's fundamental to how my programs work, and cannot be changed without significant consequence
# it would also introduce stylistic inconsistencies in the xlsx file,
# when future workouts are written with a different stylistic standard.
est_xx_mins_reg = re.compile(r'(est \d\d(\d)? min)|(est \?\? min)|(est \?\?\? min)', re.IGNORECASE)
return re.search(est_xx_mins_reg, line)
def login_and_return_keep_obj():
keep = gkeepapi.Keep()
try:
from GKeepToCalc.utilities.credentials import username, password
except FileNotFoundError:
# to avoid typing your username each time, change the following line in params.py
# username = '[email protected]'
username = input('Google Keep username: ')
# getpass obscures the password as it's entered
if password is None:
password = getpass.getpass('Google Keep password: ')
print('Logging in...')
keep.login(username, password)
return keep
def retrieve_notes(keep):
# retrieves a list of not trashed Note objects
print('Retrieving notes')
# gnotes = keep.all()
# gnotes = keep.find(pinned=True, archived=False, trashed=False)
gnotes = keep.find(trashed=False)
if not gnotes:
raise ValueError('No notes found. Incorrect username or password?')
return gnotes
def return_now_as_friendly_datetime():
# return datetime.now() in a usable format (that the other programs expect)
return datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)
def target_path_is_xslx():
# returns True if utilities.TARGET_PATH variable points to .xslx file
filename, file_extension = os.path.splitext(TARGET_PATH)
if file_extension == '.xlsx':
return True
return False
def targetsheet_exists():
wb = openpyxl.load_workbook(TARGET_PATH)
if TARGET_SHEET in wb.sheetnames:
return True
else:
return False
| 40.129464 | 142 | 0.671599 |
794041ce8b452e0a95d88560e3a473ac025e688d | 227 | py | Python | ubuild.py | basilveerman/vcver-python | d5c1c70f0e7aae57cd047bb08fe9bfb7a16685a4 | [
"MIT"
] | 1 | 2017-06-22T17:04:59.000Z | 2017-06-22T17:04:59.000Z | ubuild.py | basilveerman/vcver-python | d5c1c70f0e7aae57cd047bb08fe9bfb7a16685a4 | [
"MIT"
] | 5 | 2019-07-24T20:45:18.000Z | 2021-08-10T05:26:41.000Z | ubuild.py | basilveerman/vcver-python | d5c1c70f0e7aae57cd047bb08fe9bfb7a16685a4 | [
"MIT"
] | 3 | 2018-07-29T20:34:30.000Z | 2021-11-12T19:35:49.000Z | from uranium import current_build
import os
current_build.packages.install("uranium-plus[vscode]")
import uranium_plus
current_build.config.update({"uranium-plus": {"module": "vcver"}})
uranium_plus.bootstrap(current_build)
| 22.7 | 66 | 0.801762 |
79404288ed5b86bb8076f2e01fe996e98f46536c | 1,216 | py | Python | snakepit/world.py | Artimi/snakepit-game | b9832cd542b1317147c179e944f529c022cad2e4 | [
"MIT"
] | null | null | null | snakepit/world.py | Artimi/snakepit-game | b9832cd542b1317147c179e944f529c022cad2e4 | [
"MIT"
] | null | null | null | snakepit/world.py | Artimi/snakepit-game | b9832cd542b1317147c179e944f529c022cad2e4 | [
"MIT"
] | null | null | null | from . import settings
from .datatypes import Char
class World(list):
SIZE_X = settings.FIELD_SIZE_X
SIZE_Y = settings.FIELD_SIZE_Y
COLOR_0 = 0
CH_VOID = ' '
CH_STONE = '#'
VOID_CHAR = Char(CH_VOID, COLOR_0)
def __init__(self):
super(World, self).__init__()
for y in range(0, self.SIZE_Y):
self.append([self.VOID_CHAR] * self.SIZE_X)
def __repr__(self):
return '<%s [%sx%s]>' % (self.__class__.__name__, self.SIZE_X, self.SIZE_Y)
def __str__(self):
return self.show()
def show(self):
border = '+' + '-' * len(self[0]) + '+'
return border + '\n' + '\n'.join('|' + (''.join(j[0] for j in i)) + '|' for i in self) + '\n' + border
def reset(self):
for y in range(0, self.SIZE_Y):
for x in range(0, self.SIZE_X):
if self[y][x][0] != self.CH_VOID:
self[y][x] = self.VOID_CHAR
def load(self, data):
self[:] = data
def update(self, draw):
self[draw.y][draw.x] = Char(draw.char, draw.color)
@classmethod
def is_invalid_position(cls, pos):
return pos.x < 0 or pos.x >= cls.SIZE_X or pos.y < 0 or pos.y >= cls.SIZE_Y
| 28.27907 | 110 | 0.553454 |
794042a784667230368f57d0f830b6e16b89cd39 | 13,601 | py | Python | gaphor/core/modeling/tests/test_elementdispatcher.py | Texopolis/gaphor | 3b190620075fd413258af1e7a007b4b2167a7564 | [
"Apache-2.0"
] | 1 | 2022-01-30T15:33:53.000Z | 2022-01-30T15:33:53.000Z | gaphor/core/modeling/tests/test_elementdispatcher.py | burakozturk16/gaphor | 86267a5200ac4439626d35d306dbb376c3800107 | [
"Apache-2.0"
] | 1 | 2021-12-10T19:46:00.000Z | 2021-12-10T19:46:00.000Z | gaphor/core/modeling/tests/test_elementdispatcher.py | burakozturk16/gaphor | 86267a5200ac4439626d35d306dbb376c3800107 | [
"Apache-2.0"
] | 1 | 2022-01-23T18:36:27.000Z | 2022-01-23T18:36:27.000Z | import pytest
from gaphor import UML
from gaphor.core.eventmanager import EventManager
from gaphor.core.modeling import Element, ElementFactory
from gaphor.core.modeling.elementdispatcher import ElementDispatcher, EventWatcher
from gaphor.core.modeling.properties import association
from gaphor.UML.modelinglanguage import UMLModelingLanguage
class Event:
def __init__(self):
self.events = []
def handler(self, event):
self.events.append(event)
@pytest.fixture
def event_manager():
return EventManager()
@pytest.fixture
def modeling_language():
return UMLModelingLanguage()
@pytest.fixture
def dispatcher(event_manager, modeling_language):
return ElementDispatcher(event_manager, modeling_language)
@pytest.fixture
def element_factory(event_manager):
return ElementFactory(event_manager)
@pytest.fixture
def uml_class(element_factory):
return element_factory.create(UML.Class)
@pytest.fixture
def uml_parameter(element_factory):
return element_factory.create(UML.Parameter)
@pytest.fixture
def uml_operation(element_factory):
return element_factory.create(UML.Operation)
@pytest.fixture
def uml_transition(element_factory):
return element_factory.create(UML.Transition)
@pytest.fixture
def uml_constraint(element_factory):
return element_factory.create(UML.Constraint)
@pytest.fixture
def event():
return Event()
def test_register_handler(dispatcher, uml_class, uml_parameter, uml_operation, event):
element = uml_class
dispatcher.subscribe(event.handler, element, "ownedOperation.ownedParameter.name")
assert len(dispatcher._handlers) == 1
assert list(dispatcher._handlers.keys())[0] == (element, UML.Class.ownedOperation)
# Add some properties:
# 1:
element.ownedOperation = uml_operation
# 2:
p = element.ownedOperation[0].ownedParameter = uml_parameter
# 3:
p.name = "func"
dispatcher.subscribe(event.handler, element, "ownedOperation.ownedParameter.name")
assert len(event.events) == 3
assert len(dispatcher._handlers) == 3
def test_register_handler_twice(
dispatcher, uml_class, uml_operation, uml_parameter, event
):
"""Multiple registrations have no effect."""
# Add some properties:
element = uml_class
element.ownedOperation = uml_operation
p = element.ownedOperation[0].ownedParameter = uml_parameter
dispatcher.subscribe(event.handler, element, "ownedOperation.ownedParameter.name")
n_handlers = len(dispatcher._handlers)
assert len(event.events) == 0
dispatcher.subscribe(event.handler, element, "ownedOperation.ownedParameter.name")
assert n_handlers == len(dispatcher._handlers)
dispatcher.subscribe(event.handler, element, "ownedOperation.ownedParameter.name")
assert n_handlers == len(dispatcher._handlers)
dispatcher.subscribe(event.handler, element, "ownedOperation.ownedParameter.name")
assert n_handlers == len(dispatcher._handlers)
p.name = "func"
assert len(event.events) == 1
def test_unregister_handler(dispatcher, uml_class, uml_operation, uml_parameter, event):
# First some setup:
element = uml_class
o = element.ownedOperation = uml_operation
p = element.ownedOperation[0].ownedParameter = uml_parameter
p.name = "func"
dispatcher.subscribe(event.handler, element, "ownedOperation.ownedParameter.name")
assert len(dispatcher._handlers) == 3
assert dispatcher._handlers[element, UML.Class.ownedOperation]
assert dispatcher._handlers[o, UML.Operation.ownedParameter]
assert dispatcher._handlers[p, UML.Parameter.name]
dispatcher.unsubscribe(event.handler)
assert len(dispatcher._handlers) == 0, dispatcher._handlers
assert len(dispatcher._reverse) == 0, dispatcher._reverse
# Should not fail here too:
dispatcher.unsubscribe(event.handler)
def test_notification(
dispatcher, uml_class, uml_operation, uml_parameter, event, element_factory
):
"""Test notifications with Class object."""
element = uml_class
o = element.ownedOperation = uml_operation
p = element.ownedOperation[0].ownedParameter = uml_parameter
p.name = "func"
dispatcher.subscribe(event.handler, element, "ownedOperation.ownedParameter.name")
assert len(dispatcher._handlers) == 3
assert not event.events
element.ownedOperation = element_factory.create(UML.Operation)
assert len(event.events) == 1, event.events
assert len(dispatcher._handlers) == 4
p.name = "othername"
assert len(event.events) == 2, event.events
del element.ownedOperation[o]
assert len(dispatcher._handlers) == 2
def test_notification_2(
dispatcher, uml_transition, uml_constraint, event, element_factory
):
"""Test notifications with Transition object."""
element = uml_transition
g = element.guard = uml_constraint
dispatcher.subscribe(event.handler, element, "guard.specification")
assert len(dispatcher._handlers) == 2
assert not event.events
assert (element.guard, UML.Constraint.specification) in list(
dispatcher._handlers.keys()
), list(dispatcher._handlers.keys())
g.specification = "x"
assert len(event.events) == 1, event.events
element.guard = element_factory.create(UML.Constraint)
assert len(event.events) == 2, event.events
assert len(dispatcher._handlers) == 2, len(dispatcher._handlers)
assert (element.guard, UML.Constraint.specification) in list(
dispatcher._handlers.keys()
)
def test_notification_of_change(
dispatcher, uml_transition, uml_constraint, event, element_factory
):
"""Test notifications with Transition object."""
element = uml_transition
g = element.guard = uml_constraint
dispatcher.subscribe(event.handler, element, "guard.specification")
assert len(dispatcher._handlers) == 2
assert not event.events
g.specification = "x"
assert len(event.events) == 1, event.events
element.guard = element_factory.create(UML.Constraint)
assert len(event.events) == 2, event.events
def test_notification_with_composition(
dispatcher, uml_class, uml_operation, uml_constraint, event
):
"""Test unregister with composition.
Use Class.ownedOperation.precondition.
"""
element = uml_class
o = element.ownedOperation = uml_operation
p = element.ownedOperation[0].precondition = uml_constraint
p.name = "func"
dispatcher.subscribe(event.handler, element, "ownedOperation.precondition.name")
assert len(dispatcher._handlers) == 3
assert not event.events
del element.ownedOperation[o]
assert len(dispatcher._handlers) == 1
def test_notification_with_incompatible_elements(
dispatcher, uml_transition, uml_constraint, event
):
"""Test unregister with composition.
Use Class.ownedOperation.precondition.
"""
element = uml_transition
g = element.guard = uml_constraint
dispatcher.subscribe(event.handler, element, "guard.specification")
assert len(dispatcher._handlers) == 2
assert not event.events
assert (element.guard, UML.Constraint.specification) in list(
dispatcher._handlers.keys()
), list(dispatcher._handlers.keys())
g.specification = "x"
assert len(event.events) == 1, event.events
g.specification = "a"
assert len(event.events) == 2, event.events
class A(Element):
one: association
two: association
def __init__(self, id=None, model=None):
super().__init__(id, model)
A.one = association("one", A, lower=0, upper=1, composite=True)
A.two = association("two", A, lower=0, upper=2, composite=True)
class TestElementDispatcherAsService:
@pytest.fixture
def case(self, case):
case.events = []
case.dispatcher = case.element_factory.element_dispatcher
def handler(event):
case.events.append(event)
case._handler = handler
def getA():
return case.element_factory.create(A)
case.A = getA
return case
def test_notification(self, case):
"""Test notifications with Class object."""
dispatcher = case.dispatcher
element = case.element_factory.create(UML.Class)
o = element.ownedOperation = case.element_factory.create(UML.Operation)
p = element.ownedOperation[0].ownedParameter = case.element_factory.create(
UML.Parameter
)
p.name = "func"
dispatcher.subscribe(
case._handler, element, "ownedOperation.ownedParameter.name"
)
assert len(dispatcher._handlers) == 4
assert not case.events
element.ownedOperation = case.element_factory.create(UML.Operation)
assert len(case.events) == 1, case.events
assert len(dispatcher._handlers) == 5
p.name = "othername"
assert len(case.events) == 2, case.events
del element.ownedOperation[o]
assert len(dispatcher._handlers) == 3
def test_association_notification(self, case):
"""Test notifications with Class object.
Tricky case where no events are fired.
"""
dispatcher = case.dispatcher
element = case.element_factory.create(UML.Association)
p1 = element.memberEnd = case.element_factory.create(UML.Property)
element.memberEnd = case.element_factory.create(UML.Property)
assert len(element.memberEnd) == 2
dispatcher.subscribe(case._handler, element, "memberEnd.name")
assert len(dispatcher._handlers) == 4, len(dispatcher._handlers)
assert not case.events
p1.name = "foo"
assert len(case.events) == 1, (case.events, dispatcher._handlers)
assert len(dispatcher._handlers) == 4
p1.name = "othername"
assert len(case.events) == 2, case.events
p1.name = "othername"
assert len(case.events) == 2, case.events
def test_association_notification_complex(self, case):
"""Test notifications with Class object.
Tricky case where no events are fired.
"""
dispatcher = case.dispatcher
element = case.element_factory.create(UML.Association)
p1 = element.memberEnd = case.element_factory.create(UML.Property)
p2 = element.memberEnd = case.element_factory.create(UML.Property)
p1.lowerValue = "0"
p1.upperValue = "1"
p2.lowerValue = "1"
p2.upperValue = "*"
assert len(element.memberEnd) == 2
base = "memberEnd[Property]."
dispatcher.subscribe(case._handler, element, base + "name")
dispatcher.subscribe(case._handler, element, base + "aggregation")
dispatcher.subscribe(case._handler, element, base + "classifier")
dispatcher.subscribe(case._handler, element, base + "lowerValue")
dispatcher.subscribe(case._handler, element, base + "upperValue")
assert len(dispatcher._handlers) == 12, len(dispatcher._handlers)
assert not case.events
p1.name = "foo"
assert len(case.events) == 1, (case.events, dispatcher._handlers)
assert len(dispatcher._handlers) == 12
p1.name = "othername"
assert len(case.events) == 2, case.events
def test_diamond(self, case):
"""Test diamond shaped dependencies a -> b -> c, a -> b' -> c."""
A = case.A
a = A()
watcher = EventWatcher(a, case.dispatcher, case._handler)
watcher.watch("one.two.one.two")
a.one = A()
a.one.two = A()
a.one.two = A()
a.one.two[0].one = A()
a.one.two[1].one = a.one.two[0].one
a.one.two[0].one.two = A()
assert len(case.events) == 6
a.unlink()
watcher.unsubscribe_all()
watcher.unsubscribe_all()
def test_big_diamond(self, case):
"""Test diamond shaped dependencies a -> b -> c -> d, a -> b' -> c' ->
d."""
A = case.A
a = A()
watcher = EventWatcher(a, case.dispatcher, case._handler)
watcher.watch("one.two.one.two")
a.one = A()
a.one.two = A()
a.one.two = A()
a.one.two[0].one = A()
a.one.two[1].one = A()
a.one.two[0].one.two = A()
a.one.two[1].one.two = a.one.two[0].one.two[0]
assert len(case.events) == 7
a.unlink()
watcher.unsubscribe_all()
watcher.unsubscribe_all()
assert len(case.dispatcher._handlers) == 1
def test_braking_big_diamond(self, case):
"""Test diamond shaped dependencies a -> b -> c -> d, a -> b' -> c' ->
d."""
A = case.A
a = A()
watcher = EventWatcher(a, case.dispatcher, case._handler)
watcher.watch("one.two.one.two")
a.one = A()
a.one.two = A()
a.one.two = A()
a.one.two[0].one = A()
a.one.two[1].one = A()
a.one.two[0].one.two = A()
a.one.two[1].one.two = a.one.two[0].one.two[0]
assert len(case.events) == 7
assert len(case.dispatcher._handlers) == 7
del a.one.two[0].one
watcher.unsubscribe_all()
watcher.unsubscribe_all()
assert len(case.dispatcher._handlers) == 1
def test_cyclic(self, case):
"""Test cyclic dependency a -> b -> c -> a."""
A = case.A
a = A()
watcher = EventWatcher(a, case.dispatcher, case._handler)
watcher.watch("one.two.one.two")
a.one = A()
a.one.two = A()
a.one.two = A()
a.one.two[0].one = a
assert 4 == len(case.events)
a.unlink()
assert 2 == len(case.dispatcher._handlers)
| 31.33871 | 88 | 0.670465 |
794042b548004e67eee02b194db263821271d33e | 447 | py | Python | ruuvitag/ruuvi_single.py | emehtata/scripts | 9eda01157d2dfb3951bf2056d2336c1df4e01512 | [
"MIT"
] | null | null | null | ruuvitag/ruuvi_single.py | emehtata/scripts | 9eda01157d2dfb3951bf2056d2336c1df4e01512 | [
"MIT"
] | null | null | null | ruuvitag/ruuvi_single.py | emehtata/scripts | 9eda01157d2dfb3951bf2056d2336c1df4e01512 | [
"MIT"
] | null | null | null | # 56 DD:17:F3:D7:86:CE pakastin
# 74 EA:D5:76:69:70:99 jääkaappi
# 104 EC:67:46:36:EA:60 sauna
import logging
import sys
from ruuvitag_sensor.ruuvi import RuuviTagSensor
logging.basicConfig(level=logging.DEBUG)
macs = [ sys.argv[1] ]
timeout_in_sec = 30
datas = RuuviTagSensor.get_data_for_sensors(macs, timeout_in_sec)
if sys.argv[1] in datas:
print(datas[sys.argv[1]])
else:
print("Data not received for "+sys.argv[1])
sys.exit(1)
| 20.318182 | 65 | 0.736018 |
794042f32dfdd6be5b028c255db036b3d9c85283 | 16,584 | py | Python | technology/freepdk45/tech/tech.py | ycyang0508/OpenRAM | 54c6043cb81c51f5f4a2f77e91145545ce0ed6d6 | [
"BSD-3-Clause"
] | 1 | 2022-02-17T22:12:46.000Z | 2022-02-17T22:12:46.000Z | technology/freepdk45/tech/tech.py | ycyang0508/OpenRAM | 54c6043cb81c51f5f4a2f77e91145545ce0ed6d6 | [
"BSD-3-Clause"
] | null | null | null | technology/freepdk45/tech/tech.py | ycyang0508/OpenRAM | 54c6043cb81c51f5f4a2f77e91145545ce0ed6d6 | [
"BSD-3-Clause"
] | null | null | null | # See LICENSE for licensing information.
#
# Copyright (c) 2016-2021 Regents of the University of California and The Board
# of Regents for the Oklahoma Agricultural and Mechanical College
# (acting for and on behalf of Oklahoma State University)
# All rights reserved.
#
import os
from design_rules import *
from module_type import *
from custom_cell_properties import cell_properties
from custom_layer_properties import layer_properties
"""
File containing the process technology parameters for FreePDK 45nm.
"""
###################################################
# Custom modules
###################################################
# This uses the default classes to instantiate module from
# '$OPENRAM_HOME/compiler/modules'.
# Using tech_modules['cellname'] you can override each class by providing a custom
# implementation in '$OPENRAM_TECHDIR/modules/'
# For example: tech_modules['contact'] = 'contact_freepdk45'
tech_modules = module_type()
###################################################
# Custom cell properties
###################################################
cell_properties = cell_properties()
cell_properties.bitcell_power_pin_directions = ("V", "V")
###################################################
# Custom cell properties
###################################################
layer_properties = layer_properties()
###################################################
# GDS file info
###################################################
GDS = {}
# gds units
# From http://www.cnf.cornell.edu/cnf_spie9.html: "The first
#is the size of a database unit in user units. The second is the size
#of a database unit in meters. For example, if your library was
#created with the default units (user unit = 1 m and 1000 database
#units per user unit), then the first number would be 0.001 and the
#second number would be 10-9. Typically, the first number is less than
#1, since you use more than 1 database unit per user unit. To
#calculate the size of a user unit in meters, divide the second number
#by the first."
GDS["unit"] = (0.0005,1e-9)
# default label zoom
GDS["zoom"] = 0.05
###################################################
# Interconnect stacks
###################################################
poly_stack = ("poly", "contact", "m1")
active_stack = ("active", "contact", "m1")
m1_stack = ("m1", "via1", "m2")
m2_stack = ("m2", "via2", "m3")
m3_stack = ("m3", "via3", "m4")
layer_indices = {"poly": 0,
"active": 0,
"m1": 1,
"m2": 2,
"m3": 3,
"m4": 4}
# The FEOL stacks get us up to m1
feol_stacks = [poly_stack,
active_stack]
# The BEOL stacks are m1 and up
beol_stacks = [m1_stack,
m2_stack,
m3_stack]
layer_stacks = feol_stacks + beol_stacks
preferred_directions = {"poly": "V",
"active": "V",
"m1": "H",
"m2": "V",
"m3": "H",
"m4": "V"}
###################################################
# Power grid
###################################################
# Use M3/M4
power_grid = m3_stack
###################################################
# GDS Layer Map
###################################################
# Create the GDS layer map using internal names
layer = {}
layer["active"] = (1, 0)
layer["pwell"] = (2, 0)
layer["nwell"] = (3, 0)
layer["nimplant"]= (4, 0)
layer["pimplant"]= (5, 0)
layer["vtg"] = (6, 0)
layer["vth"] = (7, 0)
layer["thkox"] = (8, 0)
layer["poly"] = (9, 0)
layer["contact"] = (10, 0)
layer["m1"] = (11, 0)
layer["via1"] = (12, 0)
layer["m2"] = (13, 0)
layer["via2"] = (14, 0)
layer["m3"] = (15, 0)
layer["via3"] = (16, 0)
layer["m4"] = (17, 0)
layer["via4"] = (18, 0)
layer["m5"] = (19, 0)
layer["via5"] = (20, 0)
layer["m6"] = (21, 0)
layer["via6"] = (22, 0)
layer["m7"] = (23, 0)
layer["via7"] = (24, 0)
layer["m8"] = (25, 0)
layer["via8"] = (26, 0)
layer["m9"] = (27, 0)
layer["via9"] = (28, 0)
layer["m10"] = (29, 0)
layer["text"] = (239, 0)
layer["boundary"]= (239, 0)
# Layer names for external PDKs
layer_names = {}
layer_names["active"] = "active"
layer_names["pwell"] = "pwell"
layer_names["nwell"] = "nwell"
layer_names["nimplant"]= "nimplant"
layer_names["pimplant"]= "pimplant"
layer_names["vtg"] = "vtg"
layer_names["vth"] = "vth"
layer_names["thkox"] = "thkox"
layer_names["poly"] = "poly"
layer_names["contact"] = "contact"
layer_names["m1"] = "metal1"
layer_names["via1"] = "via1"
layer_names["m2"] = "metal2"
layer_names["via2"] = "via2"
layer_names["m3"] = "metal3"
layer_names["via3"] = "via3"
layer_names["m4"] = "metal4"
layer_names["via4"] = "via4"
layer_names["m5"] = "metal5"
layer_names["via5"] = "via5"
layer_names["m6"] = "metal6"
layer_names["via6"] = "via6"
layer_names["m7"] = "metal7"
layer_names["via7"] = "via7"
layer_names["m8"] = "metal8"
layer_names["via8"] = "via8"
layer_names["m9"] = "metal9"
layer_names["via9"] = "via9"
layer_names["m10"] = "metal10"
layer_names["text"] = "text"
layer_names["boundary"]= "boundary"
###################################################
# DRC/LVS Rules Setup
###################################################
#technology parameter
parameter={}
parameter["min_tx_size"] = 0.09
parameter["beta"] = 3
parameter["6T_inv_nmos_size"] = 0.205
parameter["6T_inv_pmos_size"] = 0.09
parameter["6T_access_size"] = 0.135
drclvs_home=os.environ.get("DRCLVS_HOME")
drc = design_rules("freepdk45")
#grid size
drc["grid"] = 0.0025
#DRC/LVS test set_up
drc["drc_rules"]=drclvs_home + "/calibreDRC.rul"
drc["lvs_rules"]=drclvs_home + "/calibreLVS.rul"
drc["xrc_rules"]=drclvs_home + "/calibrexRC.rul"
drc["layer_map"]=os.environ.get("OPENRAM_TECH") + "/freepdk45/layers.map"
# minwidth_tx with contact (no dog bone transistors)
drc["minwidth_tx"] = 0.09
drc["minlength_channel"] = 0.05
# WELL.2 Minimum spacing of nwell/pwell at different potential
drc["pwell_to_nwell"] = 0.225
# WELL.3 Minimum spacing of nwell/pwell at the same potential
# WELL.4 Minimum width of nwell/pwell
drc.add_layer("nwell",
width=0.2,
spacing=0.135)
drc.add_layer("pwell",
width=0.2,
spacing=0.135)
# POLY.1 Minimum width of poly
# POLY.2 Minimum spacing of poly AND active
drc.add_layer("poly",
width=0.05,
spacing=0.14)
# POLY.3 Minimum poly extension beyond active
drc["poly_extend_active"]=0.055
# Not a rule
drc["poly_to_contact"]=0.075
# POLY.4 Minimum enclosure of active around gate
drc["active_enclose_gate"]=0.07
# POLY.5 Minimum spacing of field poly to active
drc["poly_to_active"]=0.05
# POLY.6 Minimum Minimum spacing of field poly
drc["poly_to_field_poly"]=0.075
# Not a rule
drc["minarea_poly"]=0.0
# ACTIVE.1 Minimum width of active
# ACTIVE.2 Minimum spacing of active
drc.add_layer("active",
width=0.09,
spacing=0.08)
# ACTIVE.3 Minimum enclosure/spacing of nwell/pwell to active
drc.add_enclosure("nwell",
layer="active",
enclosure=0.055)
drc.add_enclosure("pwell",
layer="active",
enclosure=0.055)
# IMPLANT.1 Minimum spacing of nimplant/ pimplant to channel
drc["implant_to_channel"]=0.07
# Not a rule
drc.add_enclosure("implant",
layer="active",
enclosure=0)
# Not a rule
drc.add_enclosure("implant",
layer="contact",
enclosure=0)
# IMPLANT.2 Minimum spacing of nimplant/ pimplant to contact
drc["implant_to_contact"]=0.025
# IMPLANT.3 Minimum width/ spacing of nimplant/ pimplant
# IMPLANT.4 Minimum width/ spacing of nimplant/ pimplant
drc.add_layer("implant",
width=0.045,
spacing=0.045)
# CONTACT.1 Minimum width of contact
# CONTACT.2 Minimum spacing of contact
drc.add_layer("contact",
width=0.065,
spacing=0.075)
# CONTACT.4 Minimum enclosure of active around contact
drc.add_enclosure("active",
layer="contact",
enclosure=0.005)
# CONTACT.6 Minimum spacing of contact and gate
drc["active_contact_to_gate"]=0.0375
# CONTACT.7 Minimum spacing of contact and poly
drc["poly_contact_to_gate"]=0.090
# CONTACT.1 Minimum width of contact
# CONTACT.2 Minimum spacing of contact
drc.add_layer("contact",
width=0.065,
spacing=0.075)
# CONTACT.5 Minimum enclosure of poly around contact
drc.add_enclosure("poly",
layer="contact",
enclosure=0.005)
# CONTACT.6 Minimum spacing of contact and gate
drc["contact_to_gate"]=0.0375
# CONTACT.7 Minimum spacing of contact and poly
drc["contact_to_poly"]=0.090
# METAL1.1 Minimum width of metal1
# METAL1.2 Minimum spacing of metal1
drc.add_layer("m1",
width=0.065,
spacing=0.065)
# METAL1.3 Minimum enclosure around contact on two opposite sides
drc.add_enclosure("m1",
layer="contact",
enclosure=0,
extension=0.035)
# METAL1.4 inimum enclosure around via1 on two opposite sides
drc.add_enclosure("m1",
layer="via1",
enclosure=0,
extension=0.035)
# VIA1.1 Minimum width of via1
# VIA1.2 Minimum spacing of via1
drc.add_layer("via1",
width=0.065,
spacing=0.075)
# METALINT.1 Minimum width of intermediate metal
# METALINT.2 Minimum spacing of intermediate metal
drc.add_layer("m2",
width=0.07,
spacing=0.07)
# METALINT.3 Minimum enclosure around via1 on two opposite sides
drc.add_enclosure("m2",
layer="via1",
enclosure=0,
extension=0.035)
# METALINT.4 Minimum enclosure around via[2-3] on two opposite sides
drc.add_enclosure("m2",
layer="via2",
enclosure=0,
extension=0.035)
# VIA2-3.1 Minimum width of Via[2-3]
# VIA2-3.2 Minimum spacing of Via[2-3]
drc.add_layer("via2",
width=0.065,
spacing=0.075)
# METALINT.1 Minimum width of intermediate metal
# METALINT.2 Minimum spacing of intermediate metal
# Minimum spacing of m3 wider than 0.09 & longer than 0.3=0.09
# Minimum spacing of m3 wider than 0.27 & longer than 0.9=0.27
# Minimum spacing of m3 wider than 0.5 & longer than 1.8=0.5
# Minimum spacing of m3 wider than 0.9 & longer than 2.7=0.9
# Minimum spacing of m3 wider than 1.5 & longer than 4.0=1.5
drc.add_layer("m3",
width=0.07,
spacing=drc_lut({(0.00, 0.0): 0.07,
(0.09, 0.3): 0.09,
(0.27, 0.9): 0.27,
(0.50, 1.8): 0.5,
(0.90, 2.7): 0.9,
(1.50, 4.0): 1.5}))
# METALINT.3 Minimum enclosure around via1 on two opposite sides
drc.add_enclosure("m3",
layer="via2",
enclosure=0,
extension=0.035)
# METALINT.4 Minimum enclosure around via[2-3] on two opposite sides
drc.add_enclosure("m3",
layer="via3",
enclosure=0,
extension=0.035)
# VIA2-3.1 Minimum width of Via[2-3]
# VIA2-3.2 Minimum spacing of Via[2-3]
drc.add_layer("via3",
width=0.07,
spacing=0.085)
# METALSMG.1 Minimum width of semi-global metal
# METALSMG.2 Minimum spacing of semi-global metal
# Minimum spacing of m4 wider than 0.27 & longer than 0.9=0.27
# Minimum spacing of m4 wider than 0.5 & longer than 1.8=0.5
# Minimum spacing of m4 wider than 0.9 & longer than 2.7=0.9
# Minimum spacing of m4 wider than 1.5 & longer than 4.0=1.5
drc.add_layer("m4",
width=0.14,
spacing=drc_lut({(0.00, 0.0): 0.14,
(0.27, 0.9): 0.27,
(0.50, 1.8): 0.5,
(0.90, 2.7): 0.9,
(1.50, 4.0): 1.5}))
# METALSMG.3 Minimum enclosure around via[3-6] on two opposite sides
drc.add_enclosure("m4",
layer="via3",
enclosure=0.0025)
# Metal 5-10 are ommitted
###################################################
# Spice Simulation Parameters
###################################################
#spice info
spice = {}
spice["nmos"] = "nmos_vtg"
spice["pmos"] = "pmos_vtg"
# This is a map of corners to model files
SPICE_MODEL_DIR=os.environ.get("SPICE_MODEL_DIR")
spice["fet_models"] = {"TT": [SPICE_MODEL_DIR + "/models_nom/PMOS_VTG.inc", SPICE_MODEL_DIR + "/models_nom/NMOS_VTG.inc"],
"FF": [SPICE_MODEL_DIR + "/models_ff/PMOS_VTG.inc", SPICE_MODEL_DIR + "/models_ff/NMOS_VTG.inc"],
"SF": [SPICE_MODEL_DIR + "/models_ss/PMOS_VTG.inc", SPICE_MODEL_DIR + "/models_ff/NMOS_VTG.inc"],
"FS": [SPICE_MODEL_DIR + "/models_ff/PMOS_VTG.inc", SPICE_MODEL_DIR + "/models_ss/NMOS_VTG.inc"],
"SS": [SPICE_MODEL_DIR + "/models_ss/PMOS_VTG.inc", SPICE_MODEL_DIR + "/models_ss/NMOS_VTG.inc"],
"ST": [SPICE_MODEL_DIR + "/models_ss/PMOS_VTG.inc", SPICE_MODEL_DIR + "/models_nom/NMOS_VTG.inc"],
"TS": [SPICE_MODEL_DIR + "/models_nom/PMOS_VTG.inc", SPICE_MODEL_DIR + "/models_ss/NMOS_VTG.inc"],
"FT": [SPICE_MODEL_DIR + "/models_ff/PMOS_VTG.inc", SPICE_MODEL_DIR + "/models_nom/NMOS_VTG.inc"],
"TF": [SPICE_MODEL_DIR + "/models_nom/PMOS_VTG.inc", SPICE_MODEL_DIR + "/models_ff/NMOS_VTG.inc"],
}
#spice stimulus related variables
spice["feasible_period"] = 5 # estimated feasible period in ns
spice["supply_voltages"] = [0.9, 1.0, 1.1] # Supply voltage corners in [Volts]
spice["nom_supply_voltage"] = 1.0 # Nominal supply voltage in [Volts]
spice["rise_time"] = 0.005 # rise time in [Nano-seconds]
spice["fall_time"] = 0.005 # fall time in [Nano-seconds]
spice["temperatures"] = [0, 25, 100] # Temperature corners (celcius)
spice["nom_temperature"] = 25 # Nominal temperature (celcius)
# analytical delay parameters
spice["nom_threshold"] = 0.4 # Typical Threshold voltage in Volts
spice["wire_unit_r"] = 0.075 # Unit wire resistance in ohms/square
spice["wire_unit_c"] = 0.64 # Unit wire capacitance ff/um^2
spice["min_tx_drain_c"] = 0.7 # Minimum transistor drain capacitance in ff
spice["min_tx_gate_c"] = 0.2 # Minimum transistor gate capacitance in ff
spice["dff_setup"] = 9 # DFF setup time in ps
spice["dff_hold"] = 1 # DFF hold time in ps
spice["dff_in_cap"] = 0.2091 # Input capacitance (D) [Femto-farad]
spice["dff_out_cap"] = 2 # Output capacitance (Q) [Femto-farad]
# analytical power parameters, many values are temporary
spice["bitcell_leakage"] = 1 # Leakage power of a single bitcell in nW
spice["inv_leakage"] = 1 # Leakage power of inverter in nW
spice["nand2_leakage"] = 1 # Leakage power of 2-input nand in nW
spice["nand3_leakage"] = 1 # Leakage power of 3-input nand in nW
spice["nand4_leakage"] = 1 # Leakage power of 4-input nand in nW
spice["nor2_leakage"] = 1 # Leakage power of 2-input nor in nW
spice["dff_leakage"] = 1 # Leakage power of flop in nW
spice["default_event_frequency"] = 100 # Default event activity of every gate. MHz
# Parameters related to sense amp enable timing and delay chain/RBL sizing
parameter["le_tau"] = 2.25 # In pico-seconds.
parameter["cap_relative_per_ff"] = 7.5 # Units of Relative Capacitance/ Femto-Farad
parameter["dff_clk_cin"] = 30.6 # relative capacitance
parameter["6tcell_wl_cin"] = 3 # relative capacitance
parameter["min_inv_para_delay"] = 2.4 # Tau delay units
parameter["sa_en_pmos_size"] = 0.72 # micro-meters
parameter["sa_en_nmos_size"] = 0.27 # micro-meters
parameter["sa_inv_pmos_size"] = 0.54 # micro-meters
parameter["sa_inv_nmos_size"] = 0.27 # micro-meters
parameter["bitcell_drain_cap"] = 0.1 # In Femto-Farad, approximation of drain capacitance
###################################################
# Technology Tool Preferences
###################################################
drc_name = "calibre"
lvs_name = "calibre"
pex_name = "calibre"
blackbox_bitcell = False
| 35.587983 | 122 | 0.582972 |
79404384fc2971f69ead8fdd1e9d7609ed4e4911 | 554 | py | Python | util/text_tool.py | BarryZM/KnowYouAI | 8c9d96238090fa8fd70b8581ac536bb1b0691eb5 | [
"MIT"
] | null | null | null | util/text_tool.py | BarryZM/KnowYouAI | 8c9d96238090fa8fd70b8581ac536bb1b0691eb5 | [
"MIT"
] | null | null | null | util/text_tool.py | BarryZM/KnowYouAI | 8c9d96238090fa8fd70b8581ac536bb1b0691eb5 | [
"MIT"
] | 1 | 2020-12-31T11:13:30.000Z | 2020-12-31T11:13:30.000Z | import jieba
from jieba.posseg import POSTokenizer
import config
import os
import logging
jieba.setLogLevel(logging.INFO)
class TextTool:
def __init__(self):
self.token = jieba.Tokenizer()
file = [x.path for x in os.scandir(config.JIEBA_DICT_PATH) if x.path.endswith("txt")]
for fp in file:
self.token.load_userdict(fp)
self.pos_token = POSTokenizer(self.token)
def lcut(self, query):
return self.token.lcut(query)
def pos_lcut(self, query):
return self.pos_token.lcut(query)
| 23.083333 | 93 | 0.676895 |
7940441d0882c2b05003a5793e1977151a7131e2 | 6,904 | py | Python | src/spinnaker_ros_lsm/venv/lib/python2.7/site-packages/spalloc/scripts/ps.py | Roboy/LSM_SpiNNaker_MyoArm | 04fa1eaf78778edea3ba3afa4c527d20c491718e | [
"BSD-3-Clause"
] | 2 | 2020-11-01T13:22:11.000Z | 2020-11-01T13:22:20.000Z | src/spinnaker_ros_lsm/venv/lib/python2.7/site-packages/spalloc/scripts/ps.py | Roboy/LSM_SpiNNaker_MyoArm | 04fa1eaf78778edea3ba3afa4c527d20c491718e | [
"BSD-3-Clause"
] | null | null | null | src/spinnaker_ros_lsm/venv/lib/python2.7/site-packages/spalloc/scripts/ps.py | Roboy/LSM_SpiNNaker_MyoArm | 04fa1eaf78778edea3ba3afa4c527d20c491718e | [
"BSD-3-Clause"
] | null | null | null | """An administrative command-line process listing utility.
By default, the ``spalloc-ps`` command lists all running and queued jobs. For
a real-time monitor of queued and running jobs, the ``--watch`` option may be
added.
.. image:: _static/spalloc_ps.png
:alt: Jobs being listed by spalloc-ps
This list may be filtered by owner or machine with the ``--owner`` and
``--machine`` arguments.
"""
import sys
import argparse
import datetime
from pytz import utc
from tzlocal import get_localzone
from spalloc import config
from spalloc import \
__version__, ProtocolClient, ProtocolTimeoutError, JobState
from spalloc.term import Terminal, render_table
# The acceptable range of server version numbers
VERSION_RANGE_START = (0, 1, 0)
VERSION_RANGE_STOP = (2, 0, 0)
def render_job_list(t, jobs, machine=None, owner=None):
"""Return a human-readable process listing.
Parameters
----------
t : :py:class:`spalloc.term.Terminal`
The terminal to which the output will be sent.
jobs : [{...}, ...]
The list of jobs returned by the server.
machine : str or None
If not None, only list jobs on this machine.
owner : str or None
If not None, only list jobs with this owner.
"""
table = []
# Add headings
table.append(((t.underscore_bright, "ID"),
(t.underscore_bright, "State"),
(t.underscore_bright, "Power"),
(t.underscore_bright, "Boards"),
(t.underscore_bright, "Machine"),
(t.underscore_bright, "Created at"),
(t.underscore_bright, "Keepalive"),
(t.underscore_bright, "Owner")))
for job in jobs:
# Filter jobs
if machine is not None and job["allocated_machine_name"] != machine:
continue
if owner is not None and job["owner"] != owner:
continue
# Colourise job states
if job["state"] == JobState.queued:
job_state = (t.blue, "queue")
elif job["state"] == JobState.power:
job_state = (t.yellow, "power")
elif job["state"] == JobState.ready:
job_state = (t.green, "ready")
else:
job_state = str(job["state"])
# Colourise power states
if job["power"] is not None:
if job["power"]:
power_state = (t.green, "on")
else:
power_state = (t.red, "off")
if job["state"] == JobState.power:
power_state = (t.yellow, power_state[1])
else:
power_state = ""
if job["boards"] is not None:
num_boards = len(job["boards"])
else:
num_boards = ""
# Format start time
utc_timestamp = datetime.datetime.fromtimestamp(
job["start_time"], utc)
local_timestamp = utc_timestamp.astimezone(get_localzone())
timestamp = local_timestamp.strftime('%d/%m/%Y %H:%M:%S')
if job["allocated_machine_name"] is not None:
machine_name = job["allocated_machine_name"]
else:
machine_name = ""
table.append((
job["job_id"],
job_state,
power_state,
num_boards,
machine_name,
timestamp,
str(job["keepalive"]),
job["owner"],
))
# Format the table
return render_table(table)
def main(argv=None):
t = Terminal(stream=sys.stderr)
cfg = config.read_config()
parser = argparse.ArgumentParser(
description="List all active jobs.")
parser.add_argument("--version", "-V", action="version",
version=__version__)
parser.add_argument("--watch", "-w", action="store_true", default=False,
help="watch the list of live jobs in real time")
filter_args = parser.add_argument_group("filtering arguments")
filter_args.add_argument("--machine", "-m",
help="list only jobs on the specified "
"machine")
filter_args.add_argument("--owner", "-o",
help="list only jobs belonging to a particular "
"owner")
server_args = parser.add_argument_group("spalloc server arguments")
server_args.add_argument("--hostname", "-H", default=cfg["hostname"],
help="hostname or IP of the spalloc server "
"(default: %(default)s)")
server_args.add_argument("--port", "-P", default=cfg["port"],
type=int,
help="port number of the spalloc server "
"(default: %(default)s)")
server_args.add_argument("--timeout", default=cfg["timeout"],
type=float, metavar="SECONDS",
help="seconds to wait for a response "
"from the server (default: %(default)s)")
args = parser.parse_args(argv)
# Fail if server not specified
if args.hostname is None:
parser.error("--hostname of spalloc server must be specified")
client = ProtocolClient(args.hostname, args.port)
try:
# Connect to server and ensure compatible version
client.connect()
version = tuple(
map(int, client.version(timeout=args.timeout).split(".")))
if not (VERSION_RANGE_START <= version < VERSION_RANGE_STOP):
sys.stderr.write("Incompatible server version ({}).\n".format(
".".join(map(str, version))))
return 2
if args.watch:
client.notify_job(timeout=args.timeout)
while True:
jobs = client.list_jobs(timeout=args.timeout)
# Clear the screen before reprinting the table
if args.watch:
sys.stdout.write(t.clear_screen())
print(render_job_list(
t, jobs, args.machine, args.owner))
# Exit or wait for changes, if requested
if not args.watch:
return 0
else:
# Wait for state change
try:
client.wait_for_notification()
except KeyboardInterrupt:
# Gracefully exit
print("")
return 0
# Print a newline to separate old table from the new table when
# it gets printed if ANSI screen clearing is not possible.
print("")
except (IOError, OSError, ProtocolTimeoutError) as e:
sys.stderr.write("Error communicating with server: {}\n".format(e))
return 1
finally:
client.close()
if __name__ == "__main__": # pragma: no cover
sys.exit(main())
| 32.87619 | 79 | 0.55562 |
794044603bb074fff84646136731475b312623b6 | 1,715 | py | Python | commands/calevent_commands.py | k-anson/minilla_bot_py | 8099059b7a577f2d3a8bbcbf673bda71178212b6 | [
"MIT"
] | null | null | null | commands/calevent_commands.py | k-anson/minilla_bot_py | 8099059b7a577f2d3a8bbcbf673bda71178212b6 | [
"MIT"
] | null | null | null | commands/calevent_commands.py | k-anson/minilla_bot_py | 8099059b7a577f2d3a8bbcbf673bda71178212b6 | [
"MIT"
] | null | null | null | from discord.ext import commands
from discord.ext.commands import Bot, Context, CommandError
from datetime import datetime, date
from dateutil.parser import parse
from database import Database
from utils.embed import create_cal_embed, create_calevent_embed
def create_calevent_commands(config:dict, client:Bot, db:Database):
@client.group(invoke_without_command=True)
@commands.guild_only()
@commands.has_permissions(administrator=True)
async def calevent(ctx:Context):
# TODO: send tooltip
pass
@calevent.command(name='create')
@commands.guild_only()
@commands.has_permissions(administrator=True)
async def calevent_create(ctx:Context, name:str, event_datetime:convert_to_datetime, description:str):
calendar:db.Calendar = db.session.query(db.Calendar).filter(db.Calendar.guild_id == ctx.guild.id).first()
event = db.Event(
name = name,
date = event_datetime,
description = description,
calendar_id = calendar.id
)
db.session.add(event)
db.session.commit()
channel = client.get_channel(int(calendar.events_channel_id))
message = await channel.send(embed=create_calevent_embed(db, event))
event.message_id = message.id
db.session.commit()
await ctx.send(f'Event "{event.name}" created')
channel = client.get_channel(int(calendar.channel_id))
message = await channel.fetch_message(int(calendar.message_id))
await message.edit(embed=create_cal_embed(db))
@calevent.error
@calevent_create.error
async def calevent_error(ctx:Context, error:CommandError):
if isinstance(error, commands.NoPrivateMessage):
pass
else:
await ctx.send(error)
def convert_to_datetime(arg):
return parse(arg) | 34.3 | 109 | 0.749271 |
794044af12c1ff76bce99bc3d2f03d3d06ebe6a6 | 8,896 | py | Python | tests/graph/hex/test_hex.py | clebouteiller/landlab | e6f47db76ea0814c4c5a24e695bbafb74c722ff7 | [
"MIT"
] | null | null | null | tests/graph/hex/test_hex.py | clebouteiller/landlab | e6f47db76ea0814c4c5a24e695bbafb74c722ff7 | [
"MIT"
] | 1 | 2021-11-11T21:23:46.000Z | 2021-11-11T21:23:46.000Z | tests/graph/hex/test_hex.py | clebouteiller/landlab | e6f47db76ea0814c4c5a24e695bbafb74c722ff7 | [
"MIT"
] | null | null | null | import numpy as np
import pytest
from hypothesis import given
from hypothesis.strategies import integers, lists
from numpy.testing import assert_array_almost_equal, assert_array_equal
from pytest import approx
from landlab.graph import TriGraph
from landlab.graph.hex.hex import (
HorizontalHexTriGraph,
HorizontalRectTriGraph,
VerticalHexTriGraph,
VerticalRectTriGraph,
)
def test_number_of_nodes_horizontal_rect():
assert HorizontalRectTriGraph.number_of_nodes((1, 2)) == 2
assert HorizontalRectTriGraph.number_of_nodes((1, 3)) == 3
assert HorizontalRectTriGraph.number_of_nodes((2, 2)) == 4
assert HorizontalRectTriGraph.number_of_nodes((2, 3)) == 6
assert HorizontalRectTriGraph.number_of_nodes((3, 2)) == 6
assert HorizontalRectTriGraph.number_of_nodes((3, 3)) == 9
def test_number_of_nodes_vertical_rect():
assert VerticalRectTriGraph.number_of_nodes((1, 2)) == 2
assert VerticalRectTriGraph.number_of_nodes((1, 3)) == 3
assert VerticalRectTriGraph.number_of_nodes((2, 2)) == 4
assert VerticalRectTriGraph.number_of_nodes((2, 3)) == 6
assert VerticalRectTriGraph.number_of_nodes((3, 2)) == 6
assert VerticalRectTriGraph.number_of_nodes((3, 3)) == 9
def test_number_of_nodes_horizontal_hex():
assert HorizontalHexTriGraph.number_of_nodes((1, 2)) == 2
assert HorizontalHexTriGraph.number_of_nodes((1, 3)) == 3
assert HorizontalHexTriGraph.number_of_nodes((2, 2)) == 5
assert HorizontalHexTriGraph.number_of_nodes((2, 3)) == 7
assert HorizontalHexTriGraph.number_of_nodes((3, 2)) == 7
assert HorizontalHexTriGraph.number_of_nodes((3, 3)) == 10
def test_number_of_nodes_vertical_hex():
assert VerticalHexTriGraph.number_of_nodes((1, 2)) == 3
assert VerticalHexTriGraph.number_of_nodes((1, 3)) == 4
assert VerticalHexTriGraph.number_of_nodes((2, 2)) == 5
assert VerticalHexTriGraph.number_of_nodes((2, 3)) == 7
assert VerticalHexTriGraph.number_of_nodes((3, 2)) == 7
assert VerticalHexTriGraph.number_of_nodes((3, 3)) == 10
@given(shape=lists(integers(min_value=3, max_value=1024), min_size=2, max_size=2))
def test_number_of_nodes_symetric_rect(shape):
assert VerticalRectTriGraph.number_of_nodes(
shape
) == HorizontalRectTriGraph.number_of_nodes(shape[::-1])
@given(shape=lists(integers(min_value=3, max_value=1024), min_size=2, max_size=2))
def test_number_of_nodes_symetric_hex(shape):
assert VerticalHexTriGraph.number_of_nodes(
shape
) == HorizontalHexTriGraph.number_of_nodes(shape[::-1])
@pytest.mark.parametrize("n_rows", (3,))
@pytest.mark.parametrize("node_layout", ("rect", "hex"))
@pytest.mark.parametrize("orientation", ("horizontal", "vertical"))
@pytest.mark.parametrize("at", ("nodes", "links", "patches"))
def test_create_hex_graph(n_rows, node_layout, orientation, at):
expected = {
"rect": {
"horizontal": {"nodes": 6, "links": 9, "patches": 4},
"vertical": {"nodes": 6, "links": 9, "patches": 4},
},
"hex": {
"horizontal": {"nodes": 7, "links": 12, "patches": 6},
"vertical": {"nodes": 7, "links": 12, "patches": 6},
},
}
if orientation == "vertical":
shape = (2, n_rows)
else:
shape = (n_rows, 2)
graph = TriGraph(shape, node_layout=node_layout, orientation=orientation, sort=True)
assert (
getattr(graph, "number_of_{at}".format(at=at))
== expected[node_layout][orientation][at]
)
def test_create_rect():
"""Test creating a hex graph with rectangular layout."""
graph = TriGraph((3, 2), node_layout="rect", sort=True)
assert graph.number_of_nodes == 6
assert graph.number_of_links == 9
assert graph.number_of_patches == 4
def test_create_hex():
"""Test creating a hex graph with hex layout."""
graph = TriGraph((3, 2), node_layout="hex", sort=True)
assert graph.number_of_nodes == 7
assert graph.number_of_links == 12
assert graph.number_of_patches == 6
@given(shape=lists(integers(min_value=3, max_value=32), min_size=2, max_size=2))
def test_spacing(shape):
"""Test spacing of nodes."""
graph = TriGraph(shape)
assert_array_almost_equal(graph.length_of_link, 1.0)
graph = TriGraph(shape, spacing=2)
assert_array_almost_equal(graph.length_of_link, 2.0)
@given(shape=lists(integers(min_value=3, max_value=32), min_size=2, max_size=2))
@pytest.mark.parametrize("orientation", ("horizontal", "vertical"))
@pytest.mark.parametrize("node_layout", ("hex", "rect"))
def test_origin_keyword(node_layout, orientation, shape):
"""Test setting the origin."""
graph = TriGraph(shape)
assert np.min(graph.x_of_node) == approx(0.0)
assert np.min(graph.y_of_node) == approx(0.0)
graph = TriGraph(shape, xy_of_lower_left=(0.5, 0.25))
assert np.min(graph.x_of_node[0]) == approx(0.5)
assert np.min(graph.y_of_node[0]) == approx(0.25)
def test_orientation():
"""Test vertical and horizontal orientation."""
graph = TriGraph((3, 3), orientation="vertical")
assert_array_almost_equal(
graph.y_of_node, [0.0, 0.0, 0.5, 1.0, 1.0, 1.5, 2.0, 2.0, 2.5]
)
graph = TriGraph((3, 3), orientation="horizontal")
assert_array_almost_equal(
graph.x_of_node, [0.0, 1.0, 2.0, 0.5, 1.5, 2.5, 0.0, 1.0, 2.0]
)
def test_perimeter_nodes_rect():
graph = TriGraph((3, 4), node_layout="rect")
assert_array_equal(graph.perimeter_nodes, [3, 7, 11, 10, 9, 8, 4, 0, 1, 2])
def test_perimeter_nodes_hex():
graph = TriGraph((4, 2), node_layout="hex")
assert_array_equal(graph.perimeter_nodes, [8, 11, 10, 9, 5, 2, 0, 1, 4])
def test_adjacent_nodes_at_node():
graph = TriGraph((3, 3), node_layout="hex", sort=True)
assert_array_equal(
graph.adjacent_nodes_at_node,
[
[1, 4, 3, -1, -1, -1],
[2, 5, 4, 0, -1, -1],
[6, 5, 1, -1, -1, -1],
[4, 7, 0, -1, -1, -1],
[5, 8, 7, 3, 0, 1],
[6, 9, 8, 4, 1, 2],
[9, 5, 2, -1, -1, -1],
[8, 3, 4, -1, -1, -1],
[9, 7, 4, 5, -1, -1],
[8, 5, 6, -1, -1, -1],
],
)
def test_patches_at_node():
grid = TriGraph((3, 3), node_layout="hex", sort=True)
assert_array_equal(
grid.patches_at_node,
[
[0, 2, -1, -1, -1, -1],
[1, 3, 0, -1, -1, -1],
[4, 1, -1, -1, -1, -1],
[5, 2, -1, -1, -1, -1],
[6, 8, 5, 2, 0, 3],
[7, 9, 6, 3, 1, 4],
[7, 4, -1, -1, -1, -1],
[5, 8, -1, -1, -1, -1],
[8, 6, 9, -1, -1, -1],
[9, 7, -1, -1, -1, -1],
],
)
@pytest.mark.parametrize("n_cols", (2, 3))
@pytest.mark.parametrize("n_rows", (2, 3))
def test_xy_of_node_rect_vertical(n_rows, n_cols):
expected = {
(2, 2): ([0, 1, 0, 1], [0, 0.5, 1, 1.5]),
(2, 3): ([0, 2, 1, 0, 2, 1], [0, 0, 0.5, 1, 1, 1.5]),
(3, 2): ([0, 1, 0, 1, 0, 1], [0, 0.5, 1, 1.5, 2, 2.5]),
(3, 3): ([0, 2, 1, 0, 2, 1, 0, 2, 1], [0, 0, 0.5, 1, 1, 1.5, 2, 2, 2.5]),
}
x_of_node, y_of_node = VerticalRectTriGraph.xy_of_node((n_rows, n_cols))
assert np.all(
x_of_node / np.sin(np.pi / 3.0) == approx(expected[(n_rows, n_cols)][0])
)
assert np.all(y_of_node == approx(expected[(n_rows, n_cols)][1]))
@pytest.mark.parametrize("n_cols", (2, 3))
@pytest.mark.parametrize("n_rows", (1, 2, 3))
def test_xy_of_node_hex_vertical(n_rows, n_cols):
expected = {
(1, 2): ([1.0, 0, 1.0], [0, 0.5, 1]),
(1, 3): ([1.0, 0, 2, 1.0], [0, 0.5, 0.5, 1]),
(2, 2): ([1.0, 0, 1.0, 0, 1.0], [0, 0.5, 1, 1.5, 2]),
(2, 3): ([1.0, 0, 2, 1.0, 0, 2, 1.0], [0, 0.5, 0.5, 1, 1.5, 1.5, 2]),
(3, 2): ([1.0, 0, 1.0, 0, 1.0, 0.0, 1.0], [0, 0.5, 1, 1.5, 2, 2.5, 3]),
(3, 3): (
[1.0, 0, 2, 1.0, 0, 2, 1.0, 0, 2, 1.0],
[0, 0.5, 0.5, 1, 1.5, 1.5, 2, 2.5, 2.5, 3],
),
}
x_of_node, y_of_node = VerticalHexTriGraph.xy_of_node((n_rows, n_cols))
assert np.all(
x_of_node / np.sin(np.pi / 3.0) == approx(expected[(n_rows, n_cols)][0])
)
assert np.all(y_of_node == approx(expected[(n_rows, n_cols)][1]))
def test_xy_of_node_spacing(hex_layout):
x_of_node_expected, y_of_node_expected = hex_layout.xy_of_node((3, 4))
x_of_node, y_of_node = hex_layout.xy_of_node((3, 4), spacing=2.0)
assert_array_almost_equal(x_of_node / 2.0, x_of_node_expected)
assert_array_almost_equal(y_of_node / 2.0, y_of_node_expected)
@pytest.mark.parametrize("n_cols", (2, 3))
@pytest.mark.parametrize("n_rows", (1, 2, 3))
def test_xy_of_node_lower_left(hex_layout, n_rows, n_cols):
(x_of_node, y_of_node) = hex_layout.xy_of_node((n_rows, n_cols))
assert np.min(x_of_node) == approx(0.0)
assert np.min(y_of_node) == approx(0.0)
| 35.301587 | 88 | 0.61286 |
794045026ad061c4fbb00169f7bc13bc305d7d0c | 3,759 | py | Python | core_modules/macro_handler.py | picass02005/PyMacro-async | 5945de7be39793c42d2e1d53a6050809d962338d | [
"MIT"
] | 1 | 2021-08-10T19:50:57.000Z | 2021-08-10T19:50:57.000Z | core_modules/macro_handler.py | picass02005/PyMacro-async | 5945de7be39793c42d2e1d53a6050809d962338d | [
"MIT"
] | null | null | null | core_modules/macro_handler.py | picass02005/PyMacro-async | 5945de7be39793c42d2e1d53a6050809d962338d | [
"MIT"
] | null | null | null | import gc
import importlib
import json
from typing import Coroutine, Union
from core_modules.get_window import get_window
from core_modules.tray import Tray
from global_modules import logs
class MacroHandler:
def __init__(self, tray: Tray):
from global_modules.macro_manager import REGISTERED_PATH
self.__REGISTERED_PATH = REGISTERED_PATH
self.__tray = tray
self.__window_name = get_window()
self.actual_loaded = {}
self.just_updated_loaded = False # A variable for the keyboard_handler
logs.info("macro_handler", f"Loading macros for window {self.__window_name}")
self.__update_registered_for_window()
def __update_registered_for_window(self):
with open(self.__REGISTERED_PATH, "r") as f:
actual_json = json.loads(f.read())
actual_loaded = {}
if "default" in actual_json.keys():
actual_loaded.update(actual_json["default"].items())
for i in actual_json.keys():
if i.lower() in self.__window_name.lower():
if actual_json[i] is None:
self.actual_loaded = {}
return
else:
actual_loaded.update(actual_json[i].items())
tmp = {}
for key, value in actual_loaded.items():
try:
module = importlib.import_module(".".join(value['callback'].split(".")[:-1]))
importlib.reload(module)
except ModuleNotFoundError:
return logs.error("macro_handler", f"Module {'.'.join(value['callback'].split('.')[:-1])} not found")
try:
callback = eval(f"module.{value['callback'].split('.')[-1]}")
except AttributeError:
return logs.error("macro_handler", f"Function {value['callback']} not found")
callback = self.__get_callback_from_location(value['callback'])
before = self.__get_callback_from_location(value['before']) if value['before'] is not None else None
after = self.__get_callback_from_location(value['after']) if value['after'] is not None else None
if callback is not None:
tmp.update({key: {
'callback': {"func": callback, "location": value['callback']},
'before': {"func": before, "location": value['before']},
'after': {"func": after, "location": value['after']},
'loop': value['loop']}
})
self.actual_loaded = {}
self.actual_loaded.update(tmp)
self.just_updated_loaded = True
def update(self):
if not self.__tray.enabled:
if self.actual_loaded:
self.actual_loaded = {}
self.__window_name = None
logs.info("macro_handler", "Actually loaded cleared")
return
if self.__window_name != (window := get_window()):
logs.info("macro_handler", f"Window changed from {self.__window_name} to {window}, reloading macros...")
self.__window_name = window
self.__update_registered_for_window()
gc.collect()
@staticmethod
def __get_callback_from_location(location) -> Union[Coroutine, None]:
try:
module = importlib.import_module(".".join(location.split(".")[:-1]))
importlib.reload(module)
except ModuleNotFoundError:
return logs.error("macro_handler", f"Module {'.'.join(location.split('.')[:-1])} not found")
try:
return eval(f"module.{location.split('.')[-1]}")
except AttributeError:
return logs.error("macro_handler", f"Function {location} not found")
| 36.852941 | 117 | 0.589518 |
7940466d6e1fbfa9431ab8c92555b1f2b36be0fe | 415 | py | Python | packages/markblocks/tests/test_preprocessor.py | stattikcms/stattik | 5c96d600d105461edb95a11d8050dee3c32edd1e | [
"MIT"
] | 1 | 2021-11-05T06:24:28.000Z | 2021-11-05T06:24:28.000Z | packages/markblocks/tests/test_preprocessor.py | stattikcms/stattik | 5c96d600d105461edb95a11d8050dee3c32edd1e | [
"MIT"
] | null | null | null | packages/markblocks/tests/test_preprocessor.py | stattikcms/stattik | 5c96d600d105461edb95a11d8050dee3c32edd1e | [
"MIT"
] | null | null | null | import unittest
from markblocks.data import load
from markblocks.lex.textlexer import TextLexer
class Test(unittest.TestCase):
def test(self):
filename = "hello.mb"
with load(filename) as fh:
s = fh.read()
lexer = TextLexer()
tokens = lexer.tokenize(s)
for tok in tokens:
print(tok)
if __name__ == "__main__":
Test().test()
| 18.863636 | 46 | 0.585542 |
79404675e864ed3fb926cd2722bc42bcd1a56e6d | 63,476 | py | Python | flaml/model.py | wuchihsu/FLAML | 54d303a95ab8615ec298a5a7a530f8d1d477bf68 | [
"MIT"
] | null | null | null | flaml/model.py | wuchihsu/FLAML | 54d303a95ab8615ec298a5a7a530f8d1d477bf68 | [
"MIT"
] | 4 | 2022-01-16T04:25:26.000Z | 2022-02-23T04:50:37.000Z | flaml/model.py | wuchihsu/FLAML | 54d303a95ab8615ec298a5a7a530f8d1d477bf68 | [
"MIT"
] | null | null | null | # !
# * Copyright (c) Microsoft Corporation. All rights reserved.
# * Licensed under the MIT License. See LICENSE file in the
# * project root for license information.
from contextlib import contextmanager
from functools import partial
import signal
import os
from typing import Callable, List
import numpy as np
import time
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
from sklearn.ensemble import ExtraTreesRegressor, ExtraTreesClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.dummy import DummyClassifier, DummyRegressor
from scipy.sparse import issparse
import logging
import shutil
from . import tune
from .data import (
group_counts,
CLASSIFICATION,
TS_FORECAST,
TS_TIMESTAMP_COL,
TS_VALUE_COL,
SEQCLASSIFICATION,
SEQREGRESSION,
)
import pandas as pd
from pandas import DataFrame, Series
import sys
try:
import psutil
except ImportError:
psutil = None
try:
import resource
except ImportError:
resource = None
logger = logging.getLogger("flaml.automl")
FREE_MEM_RATIO = 0.2
def TimeoutHandler(sig, frame):
raise TimeoutError(sig, frame)
@contextmanager
def limit_resource(memory_limit, time_limit):
if memory_limit > 0:
soft, hard = resource.getrlimit(resource.RLIMIT_AS)
if soft < 0 and (hard < 0 or memory_limit <= hard) or memory_limit < soft:
resource.setrlimit(resource.RLIMIT_AS, (memory_limit, hard))
main_thread = False
if time_limit is not None:
try:
signal.signal(signal.SIGALRM, TimeoutHandler)
signal.alarm(int(time_limit) or 1)
main_thread = True
except ValueError:
pass
try:
yield
finally:
if main_thread:
signal.alarm(0)
if memory_limit > 0:
resource.setrlimit(resource.RLIMIT_AS, (soft, hard))
class BaseEstimator:
"""The abstract class for all learners.
Typical examples:
* XGBoostEstimator: for regression.
* XGBoostSklearnEstimator: for classification.
* LGBMEstimator, RandomForestEstimator, LRL1Classifier, LRL2Classifier:
for both regression and classification.
"""
def __init__(self, task="binary", **config):
"""Constructor.
Args:
task: A string of the task type, one of
'binary', 'multi', 'regression', 'rank', 'forecast'
config: A dictionary containing the hyperparameter names, 'n_jobs' as keys.
n_jobs is the number of parallel threads.
"""
self._task = task
self.params = self.config2params(config)
self.estimator_class = self._model = None
if "_estimator_type" in config:
self._estimator_type = self.params.pop("_estimator_type")
else:
self._estimator_type = (
"classifier" if task in CLASSIFICATION else "regressor"
)
def get_params(self, deep=False):
params = self.params.copy()
params["task"] = self._task
if hasattr(self, "_estimator_type"):
params["_estimator_type"] = self._estimator_type
return params
@property
def classes_(self):
return self._model.classes_
@property
def n_features_in_(self):
return self.model.n_features_in_
@property
def model(self):
"""Trained model after fit() is called, or None before fit() is called."""
return self._model
@property
def estimator(self):
"""Trained model after fit() is called, or None before fit() is called."""
return self._model
def _preprocess(self, X):
return X
def _fit(self, X_train, y_train, **kwargs):
current_time = time.time()
if "groups" in kwargs:
kwargs = kwargs.copy()
groups = kwargs.pop("groups")
if self._task == "rank":
kwargs["group"] = group_counts(groups)
# groups_val = kwargs.get('groups_val')
# if groups_val is not None:
# kwargs['eval_group'] = [group_counts(groups_val)]
# kwargs['eval_set'] = [
# (kwargs['X_val'], kwargs['y_val'])]
# kwargs['verbose'] = False
# del kwargs['groups_val'], kwargs['X_val'], kwargs['y_val']
X_train = self._preprocess(X_train)
model = self.estimator_class(**self.params)
if logger.level == logging.DEBUG:
logger.debug(f"flaml.model - {model} fit started")
model.fit(X_train, y_train, **kwargs)
if logger.level == logging.DEBUG:
logger.debug(f"flaml.model - {model} fit finished")
train_time = time.time() - current_time
self._model = model
return train_time
def fit(self, X_train, y_train, budget=None, **kwargs):
"""Train the model from given training data.
Args:
X_train: A numpy array or a dataframe of training data in shape n*m.
y_train: A numpy array or a series of labels in shape n*1.
budget: A float of the time budget in seconds.
Returns:
train_time: A float of the training time in seconds.
"""
if (
getattr(self, "limit_resource", None)
and resource is not None
and (budget is not None or psutil is not None)
):
start_time = time.time()
mem = psutil.virtual_memory() if psutil is not None else None
try:
with limit_resource(
mem.available * (1 - FREE_MEM_RATIO)
+ psutil.Process(os.getpid()).memory_info().rss
if mem is not None
else -1,
budget,
):
train_time = self._fit(X_train, y_train, **kwargs)
except (MemoryError, TimeoutError) as e:
logger.warning(f"{e.__class__} {e}")
if self._task in CLASSIFICATION:
model = DummyClassifier()
else:
model = DummyRegressor()
X_train = self._preprocess(X_train)
model.fit(X_train, y_train)
self._model = model
train_time = time.time() - start_time
else:
train_time = self._fit(X_train, y_train, **kwargs)
return train_time
def predict(self, X_test):
"""Predict label from features.
Args:
X_test: A numpy array or a dataframe of featurized instances, shape n*m.
Returns:
A numpy array of shape n*1.
Each element is the label for a instance.
"""
if self._model is not None:
X_test = self._preprocess(X_test)
return self._model.predict(X_test)
else:
return np.ones(X_test.shape[0])
def predict_proba(self, X_test):
"""Predict the probability of each class from features.
Only works for classification problems
Args:
X_test: A numpy array of featurized instances, shape n*m.
Returns:
A numpy array of shape n*c. c is the # classes.
Each element at (i,j) is the probability for instance i to be in
class j.
"""
assert (
self._task in CLASSIFICATION
), "predict_prob() only for classification task."
X_test = self._preprocess(X_test)
return self._model.predict_proba(X_test)
def cleanup(self):
del self._model
self._model = None
@classmethod
def search_space(cls, data_size, task, **params):
"""[required method] search space.
Args:
data_size: A tuple of two integers, number of rows and columns.
task: A str of the task type, e.g., "binary", "multi", "regression".
Returns:
A dictionary of the search space.
Each key is the name of a hyperparameter, and value is a dict with
its domain (required) and low_cost_init_value, init_value,
cat_hp_cost (if applicable).
e.g.,
`{'domain': tune.randint(lower=1, upper=10), 'init_value': 1}.`
"""
return {}
@classmethod
def size(cls, config: dict) -> float:
"""[optional method] memory size of the estimator in bytes.
Args:
config: A dict of the hyperparameter config.
Returns:
A float of the memory size required by the estimator to train the
given config.
"""
return 1.0
@classmethod
def cost_relative2lgbm(cls) -> float:
"""[optional method] relative cost compared to lightgbm."""
return 1.0
@classmethod
def init(cls):
"""[optional method] initialize the class."""
pass
def config2params(self, config: dict) -> dict:
"""[optional method] config dict to params dict
Args:
config: A dict of the hyperparameter config.
Returns:
A dict that will be passed to self.estimator_class's constructor.
"""
params = config.copy()
return params
class TransformersEstimator(BaseEstimator):
"""The class for fine-tuning language models, using huggingface transformers API."""
ITER_HP = "global_max_steps"
def __init__(self, task="seq-classification", **config):
super().__init__(task, **config)
import uuid
self.trial_id = str(uuid.uuid1().hex)[:8]
def _join(self, X_train, y_train):
y_train = DataFrame(y_train, columns=["label"], index=X_train.index)
train_df = X_train.join(y_train)
return train_df
@classmethod
def search_space(cls, data_size, task, **params):
search_space_dict = {
"learning_rate": {
"domain": tune.loguniform(lower=1e-6, upper=1e-3),
"init_value": 1e-5,
},
"num_train_epochs": {
"domain": tune.loguniform(lower=0.1, upper=10.0),
},
"per_device_train_batch_size": {
"domain": tune.choice([4, 8, 16, 32]),
"init_value": 32,
},
"warmup_ratio": {
"domain": tune.uniform(lower=0.0, upper=0.3),
"init_value": 0.0,
},
"weight_decay": {
"domain": tune.uniform(lower=0.0, upper=0.3),
"init_value": 0.0,
},
"adam_epsilon": {
"domain": tune.loguniform(lower=1e-8, upper=1e-6),
"init_value": 1e-6,
},
"seed": {"domain": tune.choice(list(range(40, 45))), "init_value": 42},
"global_max_steps": {"domain": sys.maxsize, "init_value": sys.maxsize},
}
# TODO: if self._task == SUMMARIZATION, uncomment the code below, SET the search space for
# "num_beams" in search_space_dict using
# search_space_dict["num_beams"] = {...}
# if task in NLG_TASKS:
# search_space_dict["num_beams"] = {"domain": tune.choice(...)}
return search_space_dict
def _init_hpo_args(self, automl_fit_kwargs: dict = None):
from .nlp.utils import HPOArgs
custom_hpo_args = HPOArgs()
for key, val in automl_fit_kwargs["custom_hpo_args"].items():
assert (
key in custom_hpo_args.__dict__
), "The specified key {} is not in the argument list of flaml.nlp.utils::HPOArgs".format(
key
)
setattr(custom_hpo_args, key, val)
self.custom_hpo_args = custom_hpo_args
def _preprocess(self, X, task, **kwargs):
from .nlp.utils import tokenize_text
if X.dtypes[0] == "string":
return tokenize_text(X, task, self.custom_hpo_args)
else:
return X
def fit(self, X_train: DataFrame, y_train: Series, budget=None, **kwargs):
from transformers import EarlyStoppingCallback
from transformers.trainer_utils import set_seed
from transformers import AutoTokenizer
# TODO: if self._task == SUMMARIZATION, uncomment the code below (add indentation before
# from transformers import TrainingArguments)
# if self._task in NLG_TASKS:
# from transformers import Seq2SeqTrainingArguments as TrainingArguments
# else:
from transformers import TrainingArguments
import transformers
from datasets import Dataset
from .nlp.utils import (
get_num_labels,
separate_config,
load_model,
compute_checkpoint_freq,
get_trial_fold_name,
date_str,
)
# TODO: if self._task == QUESTIONANSWERING, uncomment the code below (add indentation before
# from .nlp.huggingface.trainer import TrainerForAuto)
# if self._task in NLG_TASKS:
# from .nlp.huggingface.trainer import Seq2SeqTrainerForAuto as TrainerForAuto
# else:
from .nlp.huggingface.trainer import TrainerForAuto
this_params = self.params
class EarlyStoppingCallbackForAuto(EarlyStoppingCallback):
def on_train_begin(self, args, state, control, **callback_kwargs):
self.train_begin_time = time.time()
def on_step_begin(self, args, state, control, **callback_kwargs):
self.step_begin_time = time.time()
def on_step_end(self, args, state, control, **callback_kwargs):
if state.global_step == 1:
self.time_per_iter = time.time() - self.step_begin_time
if (
budget
and (
time.time() + self.time_per_iter
> self.train_begin_time + budget
)
or state.global_step >= this_params[TransformersEstimator.ITER_HP]
):
control.should_training_stop = True
control.should_save = True
control.should_evaluate = True
return control
def on_epoch_end(self, args, state, control, **callback_kwargs):
if (
control.should_training_stop
or state.epoch + 1 >= args.num_train_epochs
):
control.should_save = True
control.should_evaluate = True
set_seed(self.params.get("seed", TrainingArguments.seed))
self._init_hpo_args(kwargs)
self._metric_name = kwargs["metric"]
if hasattr(self, "use_ray") is False:
self.use_ray = kwargs["use_ray"]
X_val = kwargs.get("X_val")
y_val = kwargs.get("y_val")
X_train = self._preprocess(X_train, self._task, **kwargs)
train_dataset = Dataset.from_pandas(self._join(X_train, y_train))
# TODO: set a breakpoint here, observe the resulting train_dataset,
# compare it with the output of the tokenized results in your transformer example
# for example, if your task is MULTIPLECHOICE, you need to compare train_dataset with
# the output of https://github.com/huggingface/transformers/blob/master/examples/pytorch/multiple-choice/run_swag.py#L329
# make sure they are the same
if X_val is not None:
X_val = self._preprocess(X_val, self._task, **kwargs)
eval_dataset = Dataset.from_pandas(self._join(X_val, y_val))
else:
eval_dataset = None
tokenizer = AutoTokenizer.from_pretrained(
self.custom_hpo_args.model_path, use_fast=True
)
num_labels = get_num_labels(self._task, y_train)
training_args_config, per_model_config = separate_config(self.params)
this_model = load_model(
checkpoint_path=self.custom_hpo_args.model_path,
task=self._task,
num_labels=num_labels,
per_model_config=per_model_config,
)
ckpt_freq = compute_checkpoint_freq(
train_data_size=len(X_train),
custom_hpo_args=self.custom_hpo_args,
num_train_epochs=training_args_config.get(
"num_train_epochs", TrainingArguments.num_train_epochs
),
batch_size=training_args_config.get(
"per_device_train_batch_size",
TrainingArguments.per_device_train_batch_size,
),
)
local_dir = os.path.join(
self.custom_hpo_args.output_dir, "train_{}".format(date_str())
)
if not self.use_ray:
# if self.params = {}, don't include configuration in trial fold name
trial_dir = get_trial_fold_name(local_dir, self.params, self.trial_id)
else:
import ray
trial_dir = ray.tune.get_trial_dir()
if transformers.__version__.startswith("3"):
training_args = TrainingArguments(
report_to=[],
output_dir=trial_dir,
do_train=True,
do_eval=True,
eval_steps=ckpt_freq,
evaluate_during_training=True,
save_steps=ckpt_freq,
save_total_limit=0,
fp16=self.custom_hpo_args.fp16,
load_best_model_at_end=True,
**training_args_config,
)
else:
from transformers import IntervalStrategy
training_args = TrainingArguments(
report_to=[],
output_dir=trial_dir,
do_train=True,
do_eval=True,
per_device_eval_batch_size=1,
eval_steps=ckpt_freq,
evaluation_strategy=IntervalStrategy.STEPS,
save_steps=ckpt_freq,
save_total_limit=0,
fp16=self.custom_hpo_args.fp16,
load_best_model_at_end=True,
**training_args_config,
)
def _model_init():
return load_model(
checkpoint_path=self.custom_hpo_args.model_path,
task=self._task,
num_labels=num_labels,
per_model_config=per_model_config,
)
self._model = TrainerForAuto(
model=this_model,
args=training_args,
model_init=_model_init,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
tokenizer=tokenizer,
compute_metrics=self._compute_metrics_by_dataset_name,
callbacks=[EarlyStoppingCallbackForAuto],
)
setattr(self._model, "_use_ray", self.use_ray)
self._model.train()
self.params[self.ITER_HP] = self._model.state.global_step
self._checkpoint_path = self._select_checkpoint(self._model)
self._kwargs = kwargs
self._num_labels = num_labels
self._per_model_config = per_model_config
self._ckpt_remains = list(self._model.ckpt_to_metric.keys())
def _delete_one_ckpt(self, ckpt_location):
if self.use_ray is False:
try:
shutil.rmtree(ckpt_location)
except FileNotFoundError:
logger.warning("checkpoint {} not found".format(ckpt_location))
def cleanup(self):
super().cleanup()
if hasattr(self, "_ckpt_remains"):
for each_ckpt in self._ckpt_remains:
self._delete_one_ckpt(each_ckpt)
def _select_checkpoint(self, trainer):
from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR
if trainer.ckpt_to_metric:
best_ckpt, _ = min(
trainer.ckpt_to_metric.items(), key=lambda x: x[1]["val_loss"]
)
best_ckpt_global_step = trainer.ckpt_to_global_step[best_ckpt]
for each_ckpt in list(trainer.ckpt_to_metric):
if each_ckpt != best_ckpt:
del trainer.ckpt_to_metric[each_ckpt]
del trainer.ckpt_to_global_step[each_ckpt]
self._delete_one_ckpt(each_ckpt)
else:
best_ckpt_global_step = trainer.state.global_step
best_ckpt = os.path.join(
trainer.args.output_dir,
f"{PREFIX_CHECKPOINT_DIR}-{best_ckpt_global_step}",
)
self.params[self.ITER_HP] = best_ckpt_global_step
print(trainer.state.global_step)
print(trainer.ckpt_to_global_step)
return best_ckpt
def _compute_metrics_by_dataset_name(self, eval_pred):
from .ml import sklearn_metric_loss_score
import datasets
from .nlp.utils import load_default_huggingface_metric_for_task
predictions, labels = eval_pred
predictions = (
np.squeeze(predictions)
if self._task == SEQREGRESSION
else np.argmax(predictions, axis=1)
)
if isinstance(self._metric_name, str):
return {
"val_loss": sklearn_metric_loss_score(
metric_name=self._metric_name, y_predict=predictions, y_true=labels
)
}
else:
(
default_metric_name,
default_metric_mode,
) = load_default_huggingface_metric_for_task(self._task)
metric = datasets.load_metric(default_metric_name)
multiplier = -1 if default_metric_mode == "max" else 1
return {
"val_loss": metric.compute(predictions=predictions, references=labels)[
default_metric_name
]
* multiplier
}
def predict_proba(self, X_test):
from datasets import Dataset
from .nlp.huggingface.trainer import TrainerForAuto
from transformers import TrainingArguments
from .nlp.utils import load_model
assert (
self._task in CLASSIFICATION
), "predict_proba is only available in classification tasks"
X_test = self._preprocess(X_test, self._task, **self._kwargs)
test_dataset = Dataset.from_pandas(X_test)
best_model = load_model(
checkpoint_path=self._checkpoint_path,
task=self._task,
num_labels=self._num_labels,
per_model_config=self._per_model_config,
)
training_args = TrainingArguments(
per_device_eval_batch_size=1,
output_dir=self.custom_hpo_args.output_dir,
)
self._model = TrainerForAuto(model=best_model, args=training_args)
predictions = self._model.predict(test_dataset)
return predictions.predictions
def predict(self, X_test):
from datasets import Dataset
from transformers import TrainingArguments
from .nlp.utils import load_model
from .nlp.huggingface.trainer import TrainerForAuto
X_test = self._preprocess(X_test, self._task, **self._kwargs)
test_dataset = Dataset.from_pandas(X_test)
best_model = load_model(
checkpoint_path=self._checkpoint_path,
task=self._task,
num_labels=self._num_labels,
per_model_config=self._per_model_config,
)
training_args = TrainingArguments(
per_device_eval_batch_size=1,
output_dir=self.custom_hpo_args.output_dir,
)
self._model = TrainerForAuto(model=best_model, args=training_args)
predictions = self._model.predict(test_dataset)
if self._task == SEQCLASSIFICATION:
return np.argmax(predictions.predictions, axis=1)
elif self._task == SEQREGRESSION:
return predictions.predictions
# TODO: elif self._task == your task, return the corresponding prediction
# e.g., if your task == QUESTIONANSWERING, you need to return the answer instead
# of the index
def config2params(self, config: dict) -> dict:
params = config.copy()
params[TransformersEstimator.ITER_HP] = params.get(
TransformersEstimator.ITER_HP, sys.maxsize
)
return params
class SKLearnEstimator(BaseEstimator):
"""The base class for tuning scikit-learn estimators."""
def __init__(self, task="binary", **config):
super().__init__(task, **config)
def _preprocess(self, X):
if isinstance(X, DataFrame):
cat_columns = X.select_dtypes(include=["category"]).columns
if not cat_columns.empty:
X = X.copy()
X[cat_columns] = X[cat_columns].apply(lambda x: x.cat.codes)
elif isinstance(X, np.ndarray) and X.dtype.kind not in "buif":
# numpy array is not of numeric dtype
X = DataFrame(X)
for col in X.columns:
if isinstance(X[col][0], str):
X[col] = X[col].astype("category").cat.codes
X = X.to_numpy()
return X
class LGBMEstimator(BaseEstimator):
"""The class for tuning LGBM, using sklearn API."""
ITER_HP = "n_estimators"
HAS_CALLBACK = True
@classmethod
def search_space(cls, data_size, **params):
upper = min(32768, int(data_size[0]))
return {
"n_estimators": {
"domain": tune.lograndint(lower=4, upper=upper),
"init_value": 4,
"low_cost_init_value": 4,
},
"num_leaves": {
"domain": tune.lograndint(lower=4, upper=upper),
"init_value": 4,
"low_cost_init_value": 4,
},
"min_child_samples": {
"domain": tune.lograndint(lower=2, upper=2 ** 7 + 1),
"init_value": 20,
},
"learning_rate": {
"domain": tune.loguniform(lower=1 / 1024, upper=1.0),
"init_value": 0.1,
},
# 'subsample': {
# 'domain': tune.uniform(lower=0.1, upper=1.0),
# 'init_value': 1.0,
# },
"log_max_bin": { # log transformed with base 2
"domain": tune.lograndint(lower=3, upper=11),
"init_value": 8,
},
"colsample_bytree": {
"domain": tune.uniform(lower=0.01, upper=1.0),
"init_value": 1.0,
},
"reg_alpha": {
"domain": tune.loguniform(lower=1 / 1024, upper=1024),
"init_value": 1 / 1024,
},
"reg_lambda": {
"domain": tune.loguniform(lower=1 / 1024, upper=1024),
"init_value": 1.0,
},
}
def config2params(self, config: dict) -> dict:
params = config.copy()
if "log_max_bin" in params:
params["max_bin"] = (1 << params.pop("log_max_bin")) - 1
return params
@classmethod
def size(cls, config):
num_leaves = int(
round(
config.get("num_leaves")
or config.get("max_leaves")
or 1 << config.get("max_depth", 16)
)
)
n_estimators = int(round(config["n_estimators"]))
return (num_leaves * 3 + (num_leaves - 1) * 4 + 1.0) * n_estimators * 8
def __init__(self, task="binary", **config):
super().__init__(task, **config)
if "verbose" not in self.params:
self.params["verbose"] = -1
if "regression" == task:
from lightgbm import LGBMRegressor
self.estimator_class = LGBMRegressor
elif "rank" == task:
from lightgbm import LGBMRanker
self.estimator_class = LGBMRanker
else:
from lightgbm import LGBMClassifier
self.estimator_class = LGBMClassifier
self._time_per_iter = None
self._train_size = 0
self._mem_per_iter = -1
self.HAS_CALLBACK = self.HAS_CALLBACK and self._callbacks(0, 0) is not None
def _preprocess(self, X):
if (
not isinstance(X, DataFrame)
and issparse(X)
and np.issubdtype(X.dtype, np.integer)
):
X = X.astype(float)
elif isinstance(X, np.ndarray) and X.dtype.kind not in "buif":
# numpy array is not of numeric dtype
X = DataFrame(X)
for col in X.columns:
if isinstance(X[col][0], str):
X[col] = X[col].astype("category").cat.codes
X = X.to_numpy()
return X
def fit(self, X_train, y_train, budget=None, **kwargs):
start_time = time.time()
deadline = start_time + budget if budget else np.inf
n_iter = self.params[self.ITER_HP]
trained = False
if not self.HAS_CALLBACK:
mem0 = psutil.virtual_memory().available if psutil is not None else 1
if (
(
not self._time_per_iter
or abs(self._train_size - X_train.shape[0]) > 4
)
and budget is not None
or self._mem_per_iter < 0
and psutil is not None
) and n_iter > 1:
self.params[self.ITER_HP] = 1
self._t1 = self._fit(X_train, y_train, **kwargs)
if budget is not None and self._t1 >= budget or n_iter == 1:
# self.params[self.ITER_HP] = n_iter
return self._t1
mem1 = psutil.virtual_memory().available if psutil is not None else 1
self._mem1 = mem0 - mem1
self.params[self.ITER_HP] = min(n_iter, 4)
self._t2 = self._fit(X_train, y_train, **kwargs)
mem2 = psutil.virtual_memory().available if psutil is not None else 1
self._mem2 = max(mem0 - mem2, self._mem1)
# if self._mem1 <= 0:
# self._mem_per_iter = self._mem2 / (self.params[self.ITER_HP] + 1)
# elif self._mem2 <= 0:
# self._mem_per_iter = self._mem1
# else:
self._mem_per_iter = min(
self._mem1, self._mem2 / self.params[self.ITER_HP]
)
# if self._mem_per_iter <= 1 and psutil is not None:
# n_iter = self.params[self.ITER_HP]
self._time_per_iter = (
(self._t2 - self._t1) / (self.params[self.ITER_HP] - 1)
if self._t2 > self._t1
else self._t1
if self._t1
else 0.001
)
self._train_size = X_train.shape[0]
if (
budget is not None
and self._t1 + self._t2 >= budget
or n_iter == self.params[self.ITER_HP]
):
# self.params[self.ITER_HP] = n_iter
return time.time() - start_time
trained = True
# logger.debug(mem0)
# logger.debug(self._mem_per_iter)
if n_iter > 1:
max_iter = min(
n_iter,
int(
(budget - time.time() + start_time - self._t1)
/ self._time_per_iter
+ 1
)
if budget is not None
else n_iter,
int((1 - FREE_MEM_RATIO) * mem0 / self._mem_per_iter)
if psutil is not None and self._mem_per_iter > 0
else n_iter,
)
if trained and max_iter <= self.params[self.ITER_HP]:
return time.time() - start_time
self.params[self.ITER_HP] = max_iter
if self.params[self.ITER_HP] > 0:
if self.HAS_CALLBACK:
self._fit(
X_train,
y_train,
callbacks=self._callbacks(start_time, deadline),
**kwargs,
)
best_iteration = (
self._model.get_booster().best_iteration
if isinstance(self, XGBoostSklearnEstimator)
else self._model.best_iteration_
)
if best_iteration is not None:
self._model.set_params(n_estimators=best_iteration + 1)
else:
self._fit(X_train, y_train, **kwargs)
else:
self.params[self.ITER_HP] = self._model.n_estimators
train_time = time.time() - start_time
return train_time
def _callbacks(self, start_time, deadline) -> List[Callable]:
return [partial(self._callback, start_time, deadline)]
def _callback(self, start_time, deadline, env) -> None:
from lightgbm.callback import EarlyStopException
now = time.time()
if env.iteration == 0:
self._time_per_iter = now - start_time
if now + self._time_per_iter > deadline:
raise EarlyStopException(env.iteration, env.evaluation_result_list)
if psutil is not None:
mem = psutil.virtual_memory()
if mem.available / mem.total < FREE_MEM_RATIO:
raise EarlyStopException(env.iteration, env.evaluation_result_list)
class XGBoostEstimator(SKLearnEstimator):
"""The class for tuning XGBoost regressor, not using sklearn API."""
@classmethod
def search_space(cls, data_size, **params):
upper = min(32768, int(data_size[0]))
return {
"n_estimators": {
"domain": tune.lograndint(lower=4, upper=upper),
"init_value": 4,
"low_cost_init_value": 4,
},
"max_leaves": {
"domain": tune.lograndint(lower=4, upper=upper),
"init_value": 4,
"low_cost_init_value": 4,
},
"max_depth": {
"domain": tune.choice([0, 6, 12]),
"init_value": 0,
},
"min_child_weight": {
"domain": tune.loguniform(lower=0.001, upper=128),
"init_value": 1,
},
"learning_rate": {
"domain": tune.loguniform(lower=1 / 1024, upper=1.0),
"init_value": 0.1,
},
"subsample": {
"domain": tune.uniform(lower=0.1, upper=1.0),
"init_value": 1.0,
},
"colsample_bylevel": {
"domain": tune.uniform(lower=0.01, upper=1.0),
"init_value": 1.0,
},
"colsample_bytree": {
"domain": tune.uniform(lower=0.01, upper=1.0),
"init_value": 1.0,
},
"reg_alpha": {
"domain": tune.loguniform(lower=1 / 1024, upper=1024),
"init_value": 1 / 1024,
},
"reg_lambda": {
"domain": tune.loguniform(lower=1 / 1024, upper=1024),
"init_value": 1.0,
},
}
@classmethod
def size(cls, config):
return LGBMEstimator.size(config)
@classmethod
def cost_relative2lgbm(cls):
return 1.6
def config2params(self, config: dict) -> dict:
params = config.copy()
max_depth = params["max_depth"] = params.get("max_depth", 0)
if max_depth == 0:
params["grow_policy"] = params.get("grow_policy", "lossguide")
params["tree_method"] = params.get("tree_method", "hist")
# params["booster"] = params.get("booster", "gbtree")
params["use_label_encoder"] = params.get("use_label_encoder", False)
if "n_jobs" in config:
params["nthread"] = params.pop("n_jobs")
return params
def __init__(
self,
task="regression",
**config,
):
super().__init__(task, **config)
self.params["verbosity"] = 0
def fit(self, X_train, y_train, budget=None, **kwargs):
import xgboost as xgb
start_time = time.time()
deadline = start_time + budget if budget else np.inf
if issparse(X_train):
self.params["tree_method"] = "auto"
else:
X_train = self._preprocess(X_train)
if "sample_weight" in kwargs:
dtrain = xgb.DMatrix(X_train, label=y_train, weight=kwargs["sample_weight"])
else:
dtrain = xgb.DMatrix(X_train, label=y_train)
objective = self.params.get("objective")
if isinstance(objective, str):
obj = None
else:
obj = objective
if "objective" in self.params:
del self.params["objective"]
_n_estimators = self.params.pop("n_estimators")
callbacks = XGBoostEstimator._callbacks(start_time, deadline)
if callbacks:
self._model = xgb.train(
self.params,
dtrain,
_n_estimators,
obj=obj,
callbacks=callbacks,
)
self.params["n_estimators"] = self._model.best_iteration + 1
else:
self._model = xgb.train(self.params, dtrain, _n_estimators, obj=obj)
self.params["n_estimators"] = _n_estimators
self.params["objective"] = objective
del dtrain
train_time = time.time() - start_time
return train_time
def predict(self, X_test):
import xgboost as xgb
if not issparse(X_test):
X_test = self._preprocess(X_test)
dtest = xgb.DMatrix(X_test)
return super().predict(dtest)
@classmethod
def _callbacks(cls, start_time, deadline):
try:
from xgboost.callback import TrainingCallback
except ImportError: # for xgboost<1.3
return None
class ResourceLimit(TrainingCallback):
def after_iteration(self, model, epoch, evals_log) -> bool:
now = time.time()
if epoch == 0:
self._time_per_iter = now - start_time
if now + self._time_per_iter > deadline:
return True
if psutil is not None:
mem = psutil.virtual_memory()
if mem.available / mem.total < FREE_MEM_RATIO:
return True
return False
return [ResourceLimit()]
class XGBoostSklearnEstimator(SKLearnEstimator, LGBMEstimator):
"""The class for tuning XGBoost with unlimited depth, using sklearn API."""
@classmethod
def search_space(cls, data_size, **params):
space = XGBoostEstimator.search_space(data_size)
space.pop("max_depth")
return space
@classmethod
def cost_relative2lgbm(cls):
return XGBoostEstimator.cost_relative2lgbm()
def config2params(self, config: dict) -> dict:
params = config.copy()
max_depth = params["max_depth"] = params.get("max_depth", 0)
if max_depth == 0:
params["grow_policy"] = params.get("grow_policy", "lossguide")
params["tree_method"] = params.get("tree_method", "hist")
params["use_label_encoder"] = params.get("use_label_encoder", False)
return params
def __init__(
self,
task="binary",
**config,
):
super().__init__(task, **config)
del self.params["verbose"]
self.params["verbosity"] = 0
import xgboost as xgb
self.estimator_class = xgb.XGBRegressor
if "rank" == task:
self.estimator_class = xgb.XGBRanker
elif task in CLASSIFICATION:
self.estimator_class = xgb.XGBClassifier
def fit(self, X_train, y_train, budget=None, **kwargs):
if issparse(X_train):
self.params["tree_method"] = "auto"
return super().fit(X_train, y_train, budget, **kwargs)
def _callbacks(self, start_time, deadline) -> List[Callable]:
return XGBoostEstimator._callbacks(start_time, deadline)
class XGBoostLimitDepthEstimator(XGBoostSklearnEstimator):
"""The class for tuning XGBoost with limited depth, using sklearn API."""
@classmethod
def search_space(cls, data_size, **params):
space = XGBoostEstimator.search_space(data_size)
space.pop("max_leaves")
upper = max(6, int(np.log2(data_size[0])))
space["max_depth"] = {
"domain": tune.randint(lower=1, upper=min(upper, 16)),
"init_value": 6,
"low_cost_init_value": 1,
}
space["learning_rate"]["init_value"] = 0.3
space["n_estimators"]["init_value"] = 10
return space
@classmethod
def cost_relative2lgbm(cls):
return 64
class RandomForestEstimator(SKLearnEstimator, LGBMEstimator):
"""The class for tuning Random Forest."""
HAS_CALLBACK = False
nrows = 101
@classmethod
def search_space(cls, data_size, task, **params):
RandomForestEstimator.nrows = int(data_size[0])
upper = min(2048, RandomForestEstimator.nrows)
init = 1 / np.sqrt(data_size[1]) if task in CLASSIFICATION else 1
lower = min(0.1, init)
space = {
"n_estimators": {
"domain": tune.lograndint(lower=4, upper=upper),
"init_value": 4,
"low_cost_init_value": 4,
},
"max_features": {
"domain": tune.loguniform(lower=lower, upper=1.0),
"init_value": init,
},
"max_leaves": {
"domain": tune.lograndint(
lower=4, upper=min(32768, RandomForestEstimator.nrows >> 1)
),
"init_value": 4,
"low_cost_init_value": 4,
},
}
if task in CLASSIFICATION:
space["criterion"] = {
"domain": tune.choice(["gini", "entropy"]),
# "init_value": "gini",
}
return space
@classmethod
def cost_relative2lgbm(cls):
return 2
def config2params(self, config: dict) -> dict:
params = config.copy()
if "max_leaves" in params:
params["max_leaf_nodes"] = params.get(
"max_leaf_nodes", params.pop("max_leaves")
)
if self._task not in CLASSIFICATION and "criterion" in config:
params.pop("criterion")
return params
def __init__(
self,
task="binary",
**params,
):
super().__init__(task, **params)
self.params["verbose"] = 0
self.estimator_class = RandomForestRegressor
if task in CLASSIFICATION:
self.estimator_class = RandomForestClassifier
class ExtraTreesEstimator(RandomForestEstimator):
"""The class for tuning Extra Trees."""
@classmethod
def cost_relative2lgbm(cls):
return 1.9
def __init__(self, task="binary", **params):
super().__init__(task, **params)
if "regression" in task:
self.estimator_class = ExtraTreesRegressor
else:
self.estimator_class = ExtraTreesClassifier
class LRL1Classifier(SKLearnEstimator):
"""The class for tuning Logistic Regression with L1 regularization."""
@classmethod
def search_space(cls, **params):
return {
"C": {
"domain": tune.loguniform(lower=0.03125, upper=32768.0),
"init_value": 1.0,
},
}
@classmethod
def cost_relative2lgbm(cls):
return 160
def config2params(self, config: dict) -> dict:
params = config.copy()
params["tol"] = params.get("tol", 0.0001)
params["solver"] = params.get("solver", "saga")
params["penalty"] = params.get("penalty", "l1")
return params
def __init__(self, task="binary", **config):
super().__init__(task, **config)
assert task in CLASSIFICATION, "LogisticRegression for classification task only"
self.estimator_class = LogisticRegression
class LRL2Classifier(SKLearnEstimator):
"""The class for tuning Logistic Regression with L2 regularization."""
limit_resource = True
@classmethod
def search_space(cls, **params):
return LRL1Classifier.search_space(**params)
@classmethod
def cost_relative2lgbm(cls):
return 25
def config2params(self, config: dict) -> dict:
params = config.copy()
params["tol"] = params.get("tol", 0.0001)
params["solver"] = params.get("solver", "lbfgs")
params["penalty"] = params.get("penalty", "l2")
return params
def __init__(self, task="binary", **config):
super().__init__(task, **config)
assert task in CLASSIFICATION, "LogisticRegression for classification task only"
self.estimator_class = LogisticRegression
class CatBoostEstimator(BaseEstimator):
"""The class for tuning CatBoost."""
ITER_HP = "n_estimators"
@classmethod
def search_space(cls, data_size, **params):
upper = max(min(round(1500000 / data_size[0]), 150), 12)
return {
"early_stopping_rounds": {
"domain": tune.lograndint(lower=10, upper=upper),
"init_value": 10,
"low_cost_init_value": 10,
},
"learning_rate": {
"domain": tune.loguniform(lower=0.005, upper=0.2),
"init_value": 0.1,
},
"n_estimators": {
"domain": 8192,
"init_value": 8192,
},
}
@classmethod
def size(cls, config):
n_estimators = config.get("n_estimators", 8192)
max_leaves = 64
return (max_leaves * 3 + (max_leaves - 1) * 4 + 1.0) * n_estimators * 8
@classmethod
def cost_relative2lgbm(cls):
return 15
def _preprocess(self, X):
if isinstance(X, DataFrame):
cat_columns = X.select_dtypes(include=["category"]).columns
if not cat_columns.empty:
X = X.copy()
X[cat_columns] = X[cat_columns].apply(
lambda x: x.cat.rename_categories(
[
str(c) if isinstance(c, float) else c
for c in x.cat.categories
]
)
)
elif isinstance(X, np.ndarray) and X.dtype.kind not in "buif":
# numpy array is not of numeric dtype
X = DataFrame(X)
for col in X.columns:
if isinstance(X[col][0], str):
X[col] = X[col].astype("category").cat.codes
X = X.to_numpy()
return X
def config2params(self, config: dict) -> dict:
params = config.copy()
params["n_estimators"] = params.get("n_estimators", 8192)
if "n_jobs" in params:
params["thread_count"] = params.pop("n_jobs")
return params
def __init__(
self,
task="binary",
**config,
):
super().__init__(task, **config)
self.params.update(
{
"verbose": config.get("verbose", False),
"random_seed": config.get("random_seed", 10242048),
}
)
from catboost import CatBoostRegressor
self.estimator_class = CatBoostRegressor
if task in CLASSIFICATION:
from catboost import CatBoostClassifier
self.estimator_class = CatBoostClassifier
def fit(self, X_train, y_train, budget=None, **kwargs):
start_time = time.time()
deadline = start_time + budget if budget else np.inf
train_dir = f"catboost_{str(start_time)}"
X_train = self._preprocess(X_train)
if isinstance(X_train, DataFrame):
cat_features = list(X_train.select_dtypes(include="category").columns)
else:
cat_features = []
n = max(int(len(y_train) * 0.9), len(y_train) - 1000)
X_tr, y_tr = X_train[:n], y_train[:n]
if "sample_weight" in kwargs:
weight = kwargs["sample_weight"]
if weight is not None:
kwargs["sample_weight"] = weight[:n]
else:
weight = None
from catboost import Pool, __version__
model = self.estimator_class(train_dir=train_dir, **self.params)
if __version__ >= "0.26":
model.fit(
X_tr,
y_tr,
cat_features=cat_features,
eval_set=Pool(
data=X_train[n:], label=y_train[n:], cat_features=cat_features
),
callbacks=CatBoostEstimator._callbacks(start_time, deadline),
**kwargs,
)
else:
model.fit(
X_tr,
y_tr,
cat_features=cat_features,
eval_set=Pool(
data=X_train[n:], label=y_train[n:], cat_features=cat_features
),
**kwargs,
)
shutil.rmtree(train_dir, ignore_errors=True)
if weight is not None:
kwargs["sample_weight"] = weight
self._model = model
self.params[self.ITER_HP] = self._model.tree_count_
train_time = time.time() - start_time
return train_time
@classmethod
def _callbacks(cls, start_time, deadline):
class ResourceLimit:
def after_iteration(self, info) -> bool:
now = time.time()
if info.iteration == 1:
self._time_per_iter = now - start_time
if now + self._time_per_iter > deadline:
return False
if psutil is not None:
mem = psutil.virtual_memory()
if mem.available / mem.total < FREE_MEM_RATIO:
return False
return True # can continue
return [ResourceLimit()]
class KNeighborsEstimator(BaseEstimator):
@classmethod
def search_space(cls, data_size, **params):
upper = min(512, int(data_size[0] / 2))
return {
"n_neighbors": {
"domain": tune.lograndint(lower=1, upper=upper),
"init_value": 5,
"low_cost_init_value": 1,
},
}
@classmethod
def cost_relative2lgbm(cls):
return 30
def config2params(self, config: dict) -> dict:
params = config.copy()
params["weights"] = params.get("weights", "distance")
return params
def __init__(self, task="binary", **config):
super().__init__(task, **config)
if task in CLASSIFICATION:
from sklearn.neighbors import KNeighborsClassifier
self.estimator_class = KNeighborsClassifier
else:
from sklearn.neighbors import KNeighborsRegressor
self.estimator_class = KNeighborsRegressor
def _preprocess(self, X):
if isinstance(X, DataFrame):
cat_columns = X.select_dtypes(["category"]).columns
if X.shape[1] == len(cat_columns):
raise ValueError("kneighbor requires at least one numeric feature")
X = X.drop(cat_columns, axis=1)
elif isinstance(X, np.ndarray) and X.dtype.kind not in "buif":
# drop categocial columns if any
X = DataFrame(X)
cat_columns = []
for col in X.columns:
if isinstance(X[col][0], str):
cat_columns.append(col)
X = X.drop(cat_columns, axis=1)
X = X.to_numpy()
return X
class Prophet(SKLearnEstimator):
"""The class for tuning Prophet."""
@classmethod
def search_space(cls, **params):
space = {
"changepoint_prior_scale": {
"domain": tune.loguniform(lower=0.001, upper=0.05),
"init_value": 0.05,
"low_cost_init_value": 0.001,
},
"seasonality_prior_scale": {
"domain": tune.loguniform(lower=0.01, upper=10),
"init_value": 10,
},
"holidays_prior_scale": {
"domain": tune.loguniform(lower=0.01, upper=10),
"init_value": 10,
},
"seasonality_mode": {
"domain": tune.choice(["additive", "multiplicative"]),
"init_value": "multiplicative",
},
}
return space
def __init__(self, task=TS_FORECAST, n_jobs=1, **params):
super().__init__(task, **params)
def _join(self, X_train, y_train):
assert TS_TIMESTAMP_COL in X_train, (
"Dataframe for training ts_forecast model must have column"
f' "{TS_TIMESTAMP_COL}" with the dates in X_train.'
)
y_train = DataFrame(y_train, columns=[TS_VALUE_COL])
train_df = X_train.join(y_train)
return train_df
def fit(self, X_train, y_train, budget=None, **kwargs):
from prophet import Prophet
current_time = time.time()
train_df = self._join(X_train, y_train)
train_df = self._preprocess(train_df)
cols = list(train_df)
cols.remove(TS_TIMESTAMP_COL)
cols.remove(TS_VALUE_COL)
logging.getLogger("prophet").setLevel(logging.WARNING)
model = Prophet(**self.params)
for regressor in cols:
model.add_regressor(regressor)
with suppress_stdout_stderr():
model.fit(train_df)
train_time = time.time() - current_time
self._model = model
return train_time
def predict(self, X_test):
if isinstance(X_test, int):
raise ValueError(
"predict() with steps is only supported for arima/sarimax."
" For Prophet, pass a dataframe with the first column containing"
" the timestamp values."
)
if self._model is not None:
X_test = self._preprocess(X_test)
forecast = self._model.predict(X_test)
return forecast["yhat"]
else:
logger.warning(
"Estimator is not fit yet. Please run fit() before predict()."
)
return np.ones(X_test.shape[0])
class ARIMA(Prophet):
"""The class for tuning ARIMA."""
@classmethod
def search_space(cls, **params):
space = {
"p": {
"domain": tune.quniform(lower=0, upper=10, q=1),
"init_value": 2,
"low_cost_init_value": 0,
},
"d": {
"domain": tune.quniform(lower=0, upper=10, q=1),
"init_value": 2,
"low_cost_init_value": 0,
},
"q": {
"domain": tune.quniform(lower=0, upper=10, q=1),
"init_value": 1,
"low_cost_init_value": 0,
},
}
return space
def _join(self, X_train, y_train):
train_df = super()._join(X_train, y_train)
train_df.index = pd.to_datetime(train_df[TS_TIMESTAMP_COL])
train_df = train_df.drop(TS_TIMESTAMP_COL, axis=1)
return train_df
def fit(self, X_train, y_train, budget=None, **kwargs):
import warnings
warnings.filterwarnings("ignore")
from statsmodels.tsa.arima.model import ARIMA as ARIMA_estimator
current_time = time.time()
train_df = self._join(X_train, y_train)
train_df = self._preprocess(train_df)
regressors = list(train_df)
regressors.remove(TS_VALUE_COL)
if regressors:
model = ARIMA_estimator(
train_df[[TS_VALUE_COL]],
exog=train_df[regressors],
order=(self.params["p"], self.params["d"], self.params["q"]),
enforce_stationarity=False,
enforce_invertibility=False,
)
else:
model = ARIMA_estimator(
train_df,
order=(self.params["p"], self.params["d"], self.params["q"]),
enforce_stationarity=False,
enforce_invertibility=False,
)
with suppress_stdout_stderr():
model = model.fit()
train_time = time.time() - current_time
self._model = model
return train_time
def predict(self, X_test):
if self._model is not None:
if isinstance(X_test, int):
forecast = self._model.forecast(steps=X_test)
elif isinstance(X_test, DataFrame):
start = X_test[TS_TIMESTAMP_COL].iloc[0]
end = X_test[TS_TIMESTAMP_COL].iloc[-1]
if len(X_test.columns) > 1:
X_test = self._preprocess(X_test.drop(columns=TS_TIMESTAMP_COL))
regressors = list(X_test)
print(start, end, X_test.shape)
forecast = self._model.predict(
start=start, end=end, exog=X_test[regressors]
)
else:
forecast = self._model.predict(start=start, end=end)
else:
raise ValueError(
"X_test needs to be either a pandas Dataframe with dates as the first column"
" or an int number of periods for predict()."
)
return forecast
else:
return np.ones(X_test if isinstance(X_test, int) else X_test.shape[0])
class SARIMAX(ARIMA):
"""The class for tuning SARIMA."""
@classmethod
def search_space(cls, **params):
space = {
"p": {
"domain": tune.quniform(lower=0, upper=10, q=1),
"init_value": 2,
"low_cost_init_value": 0,
},
"d": {
"domain": tune.quniform(lower=0, upper=10, q=1),
"init_value": 2,
"low_cost_init_value": 0,
},
"q": {
"domain": tune.quniform(lower=0, upper=10, q=1),
"init_value": 1,
"low_cost_init_value": 0,
},
"P": {
"domain": tune.quniform(lower=0, upper=10, q=1),
"init_value": 1,
"low_cost_init_value": 0,
},
"D": {
"domain": tune.quniform(lower=0, upper=10, q=1),
"init_value": 1,
"low_cost_init_value": 0,
},
"Q": {
"domain": tune.quniform(lower=0, upper=10, q=1),
"init_value": 1,
"low_cost_init_value": 0,
},
"s": {
"domain": tune.choice([1, 4, 6, 12]),
"init_value": 12,
},
}
return space
def fit(self, X_train, y_train, budget=None, **kwargs):
import warnings
warnings.filterwarnings("ignore")
from statsmodels.tsa.statespace.sarimax import SARIMAX as SARIMAX_estimator
current_time = time.time()
train_df = self._join(X_train, y_train)
train_df = self._preprocess(train_df)
regressors = list(train_df)
regressors.remove(TS_VALUE_COL)
if regressors:
model = SARIMAX_estimator(
train_df[[TS_VALUE_COL]],
exog=train_df[regressors],
order=(self.params["p"], self.params["d"], self.params["q"]),
seasonality_order=(
self.params["P"],
self.params["D"],
self.params["Q"],
self.params["s"],
),
enforce_stationarity=False,
enforce_invertibility=False,
)
else:
model = SARIMAX_estimator(
train_df,
order=(self.params["p"], self.params["d"], self.params["q"]),
seasonality_order=(
self.params["P"],
self.params["D"],
self.params["Q"],
self.params["s"],
),
enforce_stationarity=False,
enforce_invertibility=False,
)
with suppress_stdout_stderr():
model = model.fit()
train_time = time.time() - current_time
self._model = model
return train_time
class suppress_stdout_stderr(object):
def __init__(self):
# Open a pair of null files
self.null_fds = [os.open(os.devnull, os.O_RDWR) for x in range(2)]
# Save the actual stdout (1) and stderr (2) file descriptors.
self.save_fds = (os.dup(1), os.dup(2))
def __enter__(self):
# Assign the null pointers to stdout and stderr.
os.dup2(self.null_fds[0], 1)
os.dup2(self.null_fds[1], 2)
def __exit__(self, *_):
# Re-assign the real stdout/stderr back to (1) and (2)
os.dup2(self.save_fds[0], 1)
os.dup2(self.save_fds[1], 2)
# Close the null files
os.close(self.null_fds[0])
os.close(self.null_fds[1])
| 36.292739 | 131 | 0.539653 |
794046a9a27ba53eec0ffda2acb470f34a19a802 | 1,062 | py | Python | website/urls.py | sharif-42/Personal_Website | 7c385bec272ec7b5c816eab92e3b5bfb8cd80016 | [
"MIT"
] | null | null | null | website/urls.py | sharif-42/Personal_Website | 7c385bec272ec7b5c816eab92e3b5bfb8cd80016 | [
"MIT"
] | null | null | null | website/urls.py | sharif-42/Personal_Website | 7c385bec272ec7b5c816eab92e3b5bfb8cd80016 | [
"MIT"
] | null | null | null | """website URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('blog.rest_apis.urls'))
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | 37.928571 | 82 | 0.731638 |
7940477e6bb0c1e71467d5385dbcda485b9edc83 | 1,318 | py | Python | distkv/command/client/log.py | smurfix/distkv | 190b4c8f6cbe038b85bac4317a7fb13a9e742516 | [
"Apache-2.0",
"MIT"
] | 10 | 2019-03-19T16:04:21.000Z | 2020-06-04T20:11:10.000Z | distkv/command/client/log.py | M-o-a-T/distkv | 94f440e1480ecec04af63abdb66ec8f23859f871 | [
"Apache-2.0",
"MIT"
] | 1 | 2020-02-05T18:32:52.000Z | 2020-02-05T22:15:36.000Z | distkv/command/client/log.py | smurfix/distkv | 190b4c8f6cbe038b85bac4317a7fb13a9e742516 | [
"Apache-2.0",
"MIT"
] | 1 | 2020-08-03T03:10:58.000Z | 2020-08-03T03:10:58.000Z | # command line interface
import asyncclick as click
from distkv.util import yprint
@click.group(short_help="Manage logging.") # pylint: disable=undefined-variable
async def cli():
"""
This subcommand controls a server's logging.
"""
pass
@cli.command()
@click.option("-i", "--incremental", is_flag=True, help="Don't write the initial state")
@click.argument("path", nargs=1)
@click.pass_obj
async def dest(obj, path, incremental):
"""
Log changes to a file.
Any previously open log (on the server you talk to) is closed as soon
as the new one is opened and ready.
"""
res = await obj.client._request("log", path=path, fetch=not incremental)
if obj.meta:
yprint(res, stream=obj.stdout)
@cli.command()
@click.option("-f", "--full", is_flag=1, help="Also dump internal state")
@click.argument("path", nargs=1)
@click.pass_obj
async def save(obj, path, full):
"""
Write the server's current state to a file.
"""
res = await obj.client._request("save", path=path, full=full)
if obj.meta:
yprint(res, stream=obj.stdout)
@cli.command()
@click.pass_obj
async def stop(obj):
"""
Stop logging changes.
"""
res = await obj.client._request("log") # no path == stop
if obj.meta:
yprint(res, stream=obj.stdout)
| 24.407407 | 88 | 0.656297 |
7940482833564abce82bcade0204082b4aaeb73d | 1,117 | py | Python | src/pyobo/sources/pfam_clan.py | fossabot/pyobo | f196b1ea37a50f9e9144bd0990e6c52ec1c16d0f | [
"MIT"
] | null | null | null | src/pyobo/sources/pfam_clan.py | fossabot/pyobo | f196b1ea37a50f9e9144bd0990e6c52ec1c16d0f | [
"MIT"
] | null | null | null | src/pyobo/sources/pfam_clan.py | fossabot/pyobo | f196b1ea37a50f9e9144bd0990e6c52ec1c16d0f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Convert PFAM Clans to OBO."""
from typing import Iterable
import bioversions
from tqdm import tqdm
from .pfam import get_pfam_clan_df
from ..struct import Obo, Reference, Term
PREFIX = 'pfam.clan'
def get_obo() -> Obo:
"""Get PFAM Clans as OBO."""
version = bioversions.get_version('pfam')
return Obo(
ontology=PREFIX,
name='PFAM Clans',
data_version=version,
iter_terms=iter_terms,
iter_terms_kwargs=dict(version=version),
auto_generated_by=f'bio2obo:{PREFIX}',
)
# TODO could get definitions from ftp://ftp.ebi.ac.uk/pub/databases/Pfam/releases/Pfam33.0/Pfam-C.gz
def iter_terms(version: str) -> Iterable[Term]:
"""Iterate PFAM clan terms."""
df = get_pfam_clan_df(version=version)
df = df[['clan_id', 'clan_name']].drop_duplicates()
it = tqdm(df.values, total=len(df.index), desc=f'mapping {PREFIX}')
for identifier, name in it:
yield Term(
reference=Reference(PREFIX, identifier=identifier, name=name),
)
if __name__ == '__main__':
get_obo().write_default()
| 25.386364 | 100 | 0.659803 |
7940482882fa35887944cf0001ee52f37433bbb3 | 804 | py | Python | env/Scripts/f2py.py | matrixxd/woofpatrol | af7f7b6f44719450f6e0bf707c82f2be727f2553 | [
"MIT"
] | 1 | 2019-11-15T11:31:10.000Z | 2019-11-15T11:31:10.000Z | env/Scripts/f2py.py | matrixxd/woofpatrol | af7f7b6f44719450f6e0bf707c82f2be727f2553 | [
"MIT"
] | 2 | 2022-01-13T01:50:33.000Z | 2022-03-12T00:05:29.000Z | env/Scripts/f2py.py | matrixxd/woofpatrol | af7f7b6f44719450f6e0bf707c82f2be727f2553 | [
"MIT"
] | null | null | null | #!C:\Users\Darren\Desktop\OnlineDogFilter\env\Scripts\python.exe
# See http://cens.ioc.ee/projects/f2py2e/
from __future__ import division, print_function
import os
import sys
for mode in ["g3-numpy", "2e-numeric", "2e-numarray", "2e-numpy"]:
try:
i = sys.argv.index("--" + mode)
del sys.argv[i]
break
except ValueError:
pass
os.environ["NO_SCIPY_IMPORT"] = "f2py"
if mode == "g3-numpy":
sys.stderr.write("G3 f2py support is not implemented, yet.\\n")
sys.exit(1)
elif mode == "2e-numeric":
from f2py2e import main
elif mode == "2e-numarray":
sys.argv.append("-DNUMARRAY")
from f2py2e import main
elif mode == "2e-numpy":
from numpy.f2py import main
else:
sys.stderr.write("Unknown mode: " + repr(mode) + "\\n")
sys.exit(1)
main()
| 27.724138 | 67 | 0.64801 |
79404957325a3d524c13ec5e68053a3f3c89924f | 126,621 | py | Python | base/site-packages/docutils/parsers/rst/states.py | edisonlz/fastor | 342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3 | [
"Apache-2.0"
] | 285 | 2019-12-23T09:50:21.000Z | 2021-12-08T09:08:49.000Z | base/site-packages/docutils/parsers/rst/states.py | jeckun/fastor | 342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3 | [
"Apache-2.0"
] | 3 | 2015-01-06T15:21:58.000Z | 2019-04-09T12:03:13.000Z | base/site-packages/docutils/parsers/rst/states.py | jeckun/fastor | 342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3 | [
"Apache-2.0"
] | 9 | 2019-12-23T12:59:25.000Z | 2022-03-15T05:12:11.000Z | # $Id: states.py 6141 2009-09-25 18:50:30Z milde $
# Author: David Goodger <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
This is the ``docutils.parsers.restructuredtext.states`` module, the core of
the reStructuredText parser. It defines the following:
:Classes:
- `RSTStateMachine`: reStructuredText parser's entry point.
- `NestedStateMachine`: recursive StateMachine.
- `RSTState`: reStructuredText State superclass.
- `Inliner`: For parsing inline markup.
- `Body`: Generic classifier of the first line of a block.
- `SpecializedBody`: Superclass for compound element members.
- `BulletList`: Second and subsequent bullet_list list_items
- `DefinitionList`: Second+ definition_list_items.
- `EnumeratedList`: Second+ enumerated_list list_items.
- `FieldList`: Second+ fields.
- `OptionList`: Second+ option_list_items.
- `RFC2822List`: Second+ RFC2822-style fields.
- `ExtensionOptions`: Parses directive option fields.
- `Explicit`: Second+ explicit markup constructs.
- `SubstitutionDef`: For embedded directives in substitution definitions.
- `Text`: Classifier of second line of a text block.
- `SpecializedText`: Superclass for continuation lines of Text-variants.
- `Definition`: Second line of potential definition_list_item.
- `Line`: Second line of overlined section title or transition marker.
- `Struct`: An auxiliary collection class.
:Exception classes:
- `MarkupError`
- `ParserError`
- `MarkupMismatch`
:Functions:
- `escape2null()`: Return a string, escape-backslashes converted to nulls.
- `unescape()`: Return a string, nulls removed or restored to backslashes.
:Attributes:
- `state_classes`: set of State classes used with `RSTStateMachine`.
Parser Overview
===============
The reStructuredText parser is implemented as a recursive state machine,
examining its input one line at a time. To understand how the parser works,
please first become familiar with the `docutils.statemachine` module. In the
description below, references are made to classes defined in this module;
please see the individual classes for details.
Parsing proceeds as follows:
1. The state machine examines each line of input, checking each of the
transition patterns of the state `Body`, in order, looking for a match.
The implicit transitions (blank lines and indentation) are checked before
any others. The 'text' transition is a catch-all (matches anything).
2. The method associated with the matched transition pattern is called.
A. Some transition methods are self-contained, appending elements to the
document tree (`Body.doctest` parses a doctest block). The parser's
current line index is advanced to the end of the element, and parsing
continues with step 1.
B. Other transition methods trigger the creation of a nested state machine,
whose job is to parse a compound construct ('indent' does a block quote,
'bullet' does a bullet list, 'overline' does a section [first checking
for a valid section header], etc.).
- In the case of lists and explicit markup, a one-off state machine is
created and run to parse contents of the first item.
- A new state machine is created and its initial state is set to the
appropriate specialized state (`BulletList` in the case of the
'bullet' transition; see `SpecializedBody` for more detail). This
state machine is run to parse the compound element (or series of
explicit markup elements), and returns as soon as a non-member element
is encountered. For example, the `BulletList` state machine ends as
soon as it encounters an element which is not a list item of that
bullet list. The optional omission of inter-element blank lines is
enabled by this nested state machine.
- The current line index is advanced to the end of the elements parsed,
and parsing continues with step 1.
C. The result of the 'text' transition depends on the next line of text.
The current state is changed to `Text`, under which the second line is
examined. If the second line is:
- Indented: The element is a definition list item, and parsing proceeds
similarly to step 2.B, using the `DefinitionList` state.
- A line of uniform punctuation characters: The element is a section
header; again, parsing proceeds as in step 2.B, and `Body` is still
used.
- Anything else: The element is a paragraph, which is examined for
inline markup and appended to the parent element. Processing
continues with step 1.
"""
__docformat__ = 'reStructuredText'
import sys
import re
import roman
from types import FunctionType, MethodType
from docutils import nodes, statemachine, utils, urischemes
from docutils import ApplicationError, DataError
from docutils.statemachine import StateMachineWS, StateWS
from docutils.nodes import fully_normalize_name as normalize_name
from docutils.nodes import whitespace_normalize_name
from docutils.utils import escape2null, unescape, column_width
import docutils.parsers.rst
from docutils.parsers.rst import directives, languages, tableparser, roles
from docutils.parsers.rst.languages import en as _fallback_language_module
class MarkupError(DataError): pass
class UnknownInterpretedRoleError(DataError): pass
class InterpretedRoleNotImplementedError(DataError): pass
class ParserError(ApplicationError): pass
class MarkupMismatch(Exception): pass
class Struct:
"""Stores data attributes for dotted-attribute access."""
def __init__(self, **keywordargs):
self.__dict__.update(keywordargs)
class RSTStateMachine(StateMachineWS):
"""
reStructuredText's master StateMachine.
The entry point to reStructuredText parsing is the `run()` method.
"""
def run(self, input_lines, document, input_offset=0, match_titles=1,
inliner=None):
"""
Parse `input_lines` and modify the `document` node in place.
Extend `StateMachineWS.run()`: set up parse-global data and
run the StateMachine.
"""
self.language = languages.get_language(
document.settings.language_code)
self.match_titles = match_titles
if inliner is None:
inliner = Inliner()
inliner.init_customizations(document.settings)
self.memo = Struct(document=document,
reporter=document.reporter,
language=self.language,
title_styles=[],
section_level=0,
section_bubble_up_kludge=0,
inliner=inliner)
self.document = document
self.attach_observer(document.note_source)
self.reporter = self.memo.reporter
self.node = document
results = StateMachineWS.run(self, input_lines, input_offset,
input_source=document['source'])
assert results == [], 'RSTStateMachine.run() results should be empty!'
self.node = self.memo = None # remove unneeded references
class NestedStateMachine(StateMachineWS):
"""
StateMachine run from within other StateMachine runs, to parse nested
document structures.
"""
def run(self, input_lines, input_offset, memo, node, match_titles=1):
"""
Parse `input_lines` and populate a `docutils.nodes.document` instance.
Extend `StateMachineWS.run()`: set up document-wide data.
"""
self.match_titles = match_titles
self.memo = memo
self.document = memo.document
self.attach_observer(self.document.note_source)
self.reporter = memo.reporter
self.language = memo.language
self.node = node
results = StateMachineWS.run(self, input_lines, input_offset)
assert results == [], ('NestedStateMachine.run() results should be '
'empty!')
return results
class RSTState(StateWS):
"""
reStructuredText State superclass.
Contains methods used by all State subclasses.
"""
nested_sm = NestedStateMachine
nested_sm_cache = []
def __init__(self, state_machine, debug=0):
self.nested_sm_kwargs = {'state_classes': state_classes,
'initial_state': 'Body'}
StateWS.__init__(self, state_machine, debug)
def runtime_init(self):
StateWS.runtime_init(self)
memo = self.state_machine.memo
self.memo = memo
self.reporter = memo.reporter
self.inliner = memo.inliner
self.document = memo.document
self.parent = self.state_machine.node
def goto_line(self, abs_line_offset):
"""
Jump to input line `abs_line_offset`, ignoring jumps past the end.
"""
try:
self.state_machine.goto_line(abs_line_offset)
except EOFError:
pass
def no_match(self, context, transitions):
"""
Override `StateWS.no_match` to generate a system message.
This code should never be run.
"""
self.reporter.severe(
'Internal error: no transition pattern match. State: "%s"; '
'transitions: %s; context: %s; current line: %r.'
% (self.__class__.__name__, transitions, context,
self.state_machine.line),
line=self.state_machine.abs_line_number())
return context, None, []
def bof(self, context):
"""Called at beginning of file."""
return [], []
def nested_parse(self, block, input_offset, node, match_titles=0,
state_machine_class=None, state_machine_kwargs=None):
"""
Create a new StateMachine rooted at `node` and run it over the input
`block`.
"""
use_default = 0
if state_machine_class is None:
state_machine_class = self.nested_sm
use_default += 1
if state_machine_kwargs is None:
state_machine_kwargs = self.nested_sm_kwargs
use_default += 1
block_length = len(block)
state_machine = None
if use_default == 2:
try:
state_machine = self.nested_sm_cache.pop()
except IndexError:
pass
if not state_machine:
state_machine = state_machine_class(debug=self.debug,
**state_machine_kwargs)
state_machine.run(block, input_offset, memo=self.memo,
node=node, match_titles=match_titles)
if use_default == 2:
self.nested_sm_cache.append(state_machine)
else:
state_machine.unlink()
new_offset = state_machine.abs_line_offset()
# No `block.parent` implies disconnected -- lines aren't in sync:
if block.parent and (len(block) - block_length) != 0:
# Adjustment for block if modified in nested parse:
self.state_machine.next_line(len(block) - block_length)
return new_offset
def nested_list_parse(self, block, input_offset, node, initial_state,
blank_finish,
blank_finish_state=None,
extra_settings={},
match_titles=0,
state_machine_class=None,
state_machine_kwargs=None):
"""
Create a new StateMachine rooted at `node` and run it over the input
`block`. Also keep track of optional intermediate blank lines and the
required final one.
"""
if state_machine_class is None:
state_machine_class = self.nested_sm
if state_machine_kwargs is None:
state_machine_kwargs = self.nested_sm_kwargs.copy()
state_machine_kwargs['initial_state'] = initial_state
state_machine = state_machine_class(debug=self.debug,
**state_machine_kwargs)
if blank_finish_state is None:
blank_finish_state = initial_state
state_machine.states[blank_finish_state].blank_finish = blank_finish
for key, value in extra_settings.items():
setattr(state_machine.states[initial_state], key, value)
state_machine.run(block, input_offset, memo=self.memo,
node=node, match_titles=match_titles)
blank_finish = state_machine.states[blank_finish_state].blank_finish
state_machine.unlink()
return state_machine.abs_line_offset(), blank_finish
def section(self, title, source, style, lineno, messages):
"""Check for a valid subsection and create one if it checks out."""
if self.check_subsection(source, style, lineno):
self.new_subsection(title, lineno, messages)
def check_subsection(self, source, style, lineno):
"""
Check for a valid subsection header. Return 1 (true) or None (false).
When a new section is reached that isn't a subsection of the current
section, back up the line count (use ``previous_line(-x)``), then
``raise EOFError``. The current StateMachine will finish, then the
calling StateMachine can re-examine the title. This will work its way
back up the calling chain until the correct section level isreached.
@@@ Alternative: Evaluate the title, store the title info & level, and
back up the chain until that level is reached. Store in memo? Or
return in results?
:Exception: `EOFError` when a sibling or supersection encountered.
"""
memo = self.memo
title_styles = memo.title_styles
mylevel = memo.section_level
try: # check for existing title style
level = title_styles.index(style) + 1
except ValueError: # new title style
if len(title_styles) == memo.section_level: # new subsection
title_styles.append(style)
return 1
else: # not at lowest level
self.parent += self.title_inconsistent(source, lineno)
return None
if level <= mylevel: # sibling or supersection
memo.section_level = level # bubble up to parent section
if len(style) == 2:
memo.section_bubble_up_kludge = 1
# back up 2 lines for underline title, 3 for overline title
self.state_machine.previous_line(len(style) + 1)
raise EOFError # let parent section re-evaluate
if level == mylevel + 1: # immediate subsection
return 1
else: # invalid subsection
self.parent += self.title_inconsistent(source, lineno)
return None
def title_inconsistent(self, sourcetext, lineno):
error = self.reporter.severe(
'Title level inconsistent:', nodes.literal_block('', sourcetext),
line=lineno)
return error
def new_subsection(self, title, lineno, messages):
"""Append new subsection to document tree. On return, check level."""
memo = self.memo
mylevel = memo.section_level
memo.section_level += 1
section_node = nodes.section()
self.parent += section_node
textnodes, title_messages = self.inline_text(title, lineno)
titlenode = nodes.title(title, '', *textnodes)
name = normalize_name(titlenode.astext())
section_node['names'].append(name)
section_node += titlenode
section_node += messages
section_node += title_messages
self.document.note_implicit_target(section_node, section_node)
offset = self.state_machine.line_offset + 1
absoffset = self.state_machine.abs_line_offset() + 1
newabsoffset = self.nested_parse(
self.state_machine.input_lines[offset:], input_offset=absoffset,
node=section_node, match_titles=1)
self.goto_line(newabsoffset)
if memo.section_level <= mylevel: # can't handle next section?
raise EOFError # bubble up to supersection
# reset section_level; next pass will detect it properly
memo.section_level = mylevel
def paragraph(self, lines, lineno):
"""
Return a list (paragraph & messages) & a boolean: literal_block next?
"""
data = '\n'.join(lines).rstrip()
if re.search(r'(?<!\\)(\\\\)*::$', data):
if len(data) == 2:
return [], 1
elif data[-3] in ' \n':
text = data[:-3].rstrip()
else:
text = data[:-1]
literalnext = 1
else:
text = data
literalnext = 0
textnodes, messages = self.inline_text(text, lineno)
p = nodes.paragraph(data, '', *textnodes)
p.line = lineno
return [p] + messages, literalnext
def inline_text(self, text, lineno):
"""
Return 2 lists: nodes (text and inline elements), and system_messages.
"""
return self.inliner.parse(text, lineno, self.memo, self.parent)
def unindent_warning(self, node_name):
return self.reporter.warning(
'%s ends without a blank line; unexpected unindent.' % node_name,
line=(self.state_machine.abs_line_number() + 1))
def build_regexp(definition, compile=1):
"""
Build, compile and return a regular expression based on `definition`.
:Parameter: `definition`: a 4-tuple (group name, prefix, suffix, parts),
where "parts" is a list of regular expressions and/or regular
expression definitions to be joined into an or-group.
"""
name, prefix, suffix, parts = definition
part_strings = []
for part in parts:
if type(part) is tuple:
part_strings.append(build_regexp(part, None))
else:
part_strings.append(part)
or_group = '|'.join(part_strings)
regexp = '%(prefix)s(?P<%(name)s>%(or_group)s)%(suffix)s' % locals()
if compile:
return re.compile(regexp, re.UNICODE)
else:
return regexp
class Inliner:
"""
Parse inline markup; call the `parse()` method.
"""
def __init__(self):
self.implicit_dispatch = [(self.patterns.uri, self.standalone_uri),]
"""List of (pattern, bound method) tuples, used by
`self.implicit_inline`."""
def init_customizations(self, settings):
"""Setting-based customizations; run when parsing begins."""
if settings.pep_references:
self.implicit_dispatch.append((self.patterns.pep,
self.pep_reference))
if settings.rfc_references:
self.implicit_dispatch.append((self.patterns.rfc,
self.rfc_reference))
def parse(self, text, lineno, memo, parent):
# Needs to be refactored for nested inline markup.
# Add nested_parse() method?
"""
Return 2 lists: nodes (text and inline elements), and system_messages.
Using `self.patterns.initial`, a pattern which matches start-strings
(emphasis, strong, interpreted, phrase reference, literal,
substitution reference, and inline target) and complete constructs
(simple reference, footnote reference), search for a candidate. When
one is found, check for validity (e.g., not a quoted '*' character).
If valid, search for the corresponding end string if applicable, and
check it for validity. If not found or invalid, generate a warning
and ignore the start-string. Implicit inline markup (e.g. standalone
URIs) is found last.
"""
self.reporter = memo.reporter
self.document = memo.document
self.language = memo.language
self.parent = parent
pattern_search = self.patterns.initial.search
dispatch = self.dispatch
remaining = escape2null(text)
processed = []
unprocessed = []
messages = []
while remaining:
match = pattern_search(remaining)
if match:
groups = match.groupdict()
method = dispatch[groups['start'] or groups['backquote']
or groups['refend'] or groups['fnend']]
before, inlines, remaining, sysmessages = method(self, match,
lineno)
unprocessed.append(before)
messages += sysmessages
if inlines:
processed += self.implicit_inline(''.join(unprocessed),
lineno)
processed += inlines
unprocessed = []
else:
break
remaining = ''.join(unprocessed) + remaining
if remaining:
processed += self.implicit_inline(remaining, lineno)
return processed, messages
openers = u'\'"([{<\u2018\u201c\xab\u00a1\u00bf' # see quoted_start below
closers = u'\'")]}>\u2019\u201d\xbb!?'
unicode_delimiters = u'\u2010\u2011\u2012\u2013\u2014\u00a0'
start_string_prefix = (u'((?<=^)|(?<=[-/: \\n\u2019%s%s]))'
% (re.escape(unicode_delimiters),
re.escape(openers)))
end_string_suffix = (r'((?=$)|(?=[-/:.,; \n\x00%s%s]))'
% (re.escape(unicode_delimiters),
re.escape(closers)))
non_whitespace_before = r'(?<![ \n])'
non_whitespace_escape_before = r'(?<![ \n\x00])'
non_whitespace_after = r'(?![ \n])'
# Alphanumerics with isolated internal [-._+:] chars (i.e. not 2 together):
simplename = r'(?:(?!_)\w)+(?:[-._+:](?:(?!_)\w)+)*'
# Valid URI characters (see RFC 2396 & RFC 2732);
# final \x00 allows backslash escapes in URIs:
uric = r"""[-_.!~*'()[\];/:@&=+$,%a-zA-Z0-9\x00]"""
# Delimiter indicating the end of a URI (not part of the URI):
uri_end_delim = r"""[>]"""
# Last URI character; same as uric but no punctuation:
urilast = r"""[_~*/=+a-zA-Z0-9]"""
# End of a URI (either 'urilast' or 'uric followed by a
# uri_end_delim'):
uri_end = r"""(?:%(urilast)s|%(uric)s(?=%(uri_end_delim)s))""" % locals()
emailc = r"""[-_!~*'{|}/#?^`&=+$%a-zA-Z0-9\x00]"""
email_pattern = r"""
%(emailc)s+(?:\.%(emailc)s+)* # name
(?<!\x00)@ # at
%(emailc)s+(?:\.%(emailc)s*)* # host
%(uri_end)s # final URI char
"""
parts = ('initial_inline', start_string_prefix, '',
[('start', '', non_whitespace_after, # simple start-strings
[r'\*\*', # strong
r'\*(?!\*)', # emphasis but not strong
r'``', # literal
r'_`', # inline internal target
r'\|(?!\|)'] # substitution reference
),
('whole', '', end_string_suffix, # whole constructs
[# reference name & end-string
r'(?P<refname>%s)(?P<refend>__?)' % simplename,
('footnotelabel', r'\[', r'(?P<fnend>\]_)',
[r'[0-9]+', # manually numbered
r'\#(%s)?' % simplename, # auto-numbered (w/ label?)
r'\*', # auto-symbol
r'(?P<citationlabel>%s)' % simplename] # citation reference
)
]
),
('backquote', # interpreted text or phrase reference
'(?P<role>(:%s:)?)' % simplename, # optional role
non_whitespace_after,
['`(?!`)'] # but not literal
)
]
)
patterns = Struct(
initial=build_regexp(parts),
emphasis=re.compile(non_whitespace_escape_before
+ r'(\*)' + end_string_suffix),
strong=re.compile(non_whitespace_escape_before
+ r'(\*\*)' + end_string_suffix),
interpreted_or_phrase_ref=re.compile(
r"""
%(non_whitespace_escape_before)s
(
`
(?P<suffix>
(?P<role>:%(simplename)s:)?
(?P<refend>__?)?
)
)
%(end_string_suffix)s
""" % locals(), re.VERBOSE | re.UNICODE),
embedded_uri=re.compile(
r"""
(
(?:[ \n]+|^) # spaces or beginning of line/string
< # open bracket
%(non_whitespace_after)s
([^<>\x00]+) # anything but angle brackets & nulls
%(non_whitespace_before)s
> # close bracket w/o whitespace before
)
$ # end of string
""" % locals(), re.VERBOSE),
literal=re.compile(non_whitespace_before + '(``)'
+ end_string_suffix),
target=re.compile(non_whitespace_escape_before
+ r'(`)' + end_string_suffix),
substitution_ref=re.compile(non_whitespace_escape_before
+ r'(\|_{0,2})'
+ end_string_suffix),
email=re.compile(email_pattern % locals() + '$', re.VERBOSE),
uri=re.compile(
(r"""
%(start_string_prefix)s
(?P<whole>
(?P<absolute> # absolute URI
(?P<scheme> # scheme (http, ftp, mailto)
[a-zA-Z][a-zA-Z0-9.+-]*
)
:
(
( # either:
(//?)? # hierarchical URI
%(uric)s* # URI characters
%(uri_end)s # final URI char
)
( # optional query
\?%(uric)s*
%(uri_end)s
)?
( # optional fragment
\#%(uric)s*
%(uri_end)s
)?
)
)
| # *OR*
(?P<email> # email address
""" + email_pattern + r"""
)
)
%(end_string_suffix)s
""") % locals(), re.VERBOSE),
pep=re.compile(
r"""
%(start_string_prefix)s
(
(pep-(?P<pepnum1>\d+)(.txt)?) # reference to source file
|
(PEP\s+(?P<pepnum2>\d+)) # reference by name
)
%(end_string_suffix)s""" % locals(), re.VERBOSE),
rfc=re.compile(
r"""
%(start_string_prefix)s
(RFC(-|\s+)?(?P<rfcnum>\d+))
%(end_string_suffix)s""" % locals(), re.VERBOSE))
def quoted_start(self, match):
"""Return 1 if inline markup start-string is 'quoted', 0 if not."""
string = match.string
start = match.start()
end = match.end()
if start == 0: # start-string at beginning of text
return 0
prestart = string[start - 1]
try:
poststart = string[end]
if self.openers.index(prestart) \
== self.closers.index(poststart): # quoted
return 1
except IndexError: # start-string at end of text
return 1
except ValueError: # not quoted
pass
return 0
def inline_obj(self, match, lineno, end_pattern, nodeclass,
restore_backslashes=0):
string = match.string
matchstart = match.start('start')
matchend = match.end('start')
if self.quoted_start(match):
return (string[:matchend], [], string[matchend:], [], '')
endmatch = end_pattern.search(string[matchend:])
if endmatch and endmatch.start(1): # 1 or more chars
text = unescape(endmatch.string[:endmatch.start(1)],
restore_backslashes)
textend = matchend + endmatch.end(1)
rawsource = unescape(string[matchstart:textend], 1)
return (string[:matchstart], [nodeclass(rawsource, text)],
string[textend:], [], endmatch.group(1))
msg = self.reporter.warning(
'Inline %s start-string without end-string.'
% nodeclass.__name__, line=lineno)
text = unescape(string[matchstart:matchend], 1)
rawsource = unescape(string[matchstart:matchend], 1)
prb = self.problematic(text, rawsource, msg)
return string[:matchstart], [prb], string[matchend:], [msg], ''
def problematic(self, text, rawsource, message):
msgid = self.document.set_id(message, self.parent)
problematic = nodes.problematic(rawsource, text, refid=msgid)
prbid = self.document.set_id(problematic)
message.add_backref(prbid)
return problematic
def emphasis(self, match, lineno):
before, inlines, remaining, sysmessages, endstring = self.inline_obj(
match, lineno, self.patterns.emphasis, nodes.emphasis)
return before, inlines, remaining, sysmessages
def strong(self, match, lineno):
before, inlines, remaining, sysmessages, endstring = self.inline_obj(
match, lineno, self.patterns.strong, nodes.strong)
return before, inlines, remaining, sysmessages
def interpreted_or_phrase_ref(self, match, lineno):
end_pattern = self.patterns.interpreted_or_phrase_ref
string = match.string
matchstart = match.start('backquote')
matchend = match.end('backquote')
rolestart = match.start('role')
role = match.group('role')
position = ''
if role:
role = role[1:-1]
position = 'prefix'
elif self.quoted_start(match):
return (string[:matchend], [], string[matchend:], [])
endmatch = end_pattern.search(string[matchend:])
if endmatch and endmatch.start(1): # 1 or more chars
textend = matchend + endmatch.end()
if endmatch.group('role'):
if role:
msg = self.reporter.warning(
'Multiple roles in interpreted text (both '
'prefix and suffix present; only one allowed).',
line=lineno)
text = unescape(string[rolestart:textend], 1)
prb = self.problematic(text, text, msg)
return string[:rolestart], [prb], string[textend:], [msg]
role = endmatch.group('suffix')[1:-1]
position = 'suffix'
escaped = endmatch.string[:endmatch.start(1)]
rawsource = unescape(string[matchstart:textend], 1)
if rawsource[-1:] == '_':
if role:
msg = self.reporter.warning(
'Mismatch: both interpreted text role %s and '
'reference suffix.' % position, line=lineno)
text = unescape(string[rolestart:textend], 1)
prb = self.problematic(text, text, msg)
return string[:rolestart], [prb], string[textend:], [msg]
return self.phrase_ref(string[:matchstart], string[textend:],
rawsource, escaped, unescape(escaped))
else:
rawsource = unescape(string[rolestart:textend], 1)
nodelist, messages = self.interpreted(rawsource, escaped, role,
lineno)
return (string[:rolestart], nodelist,
string[textend:], messages)
msg = self.reporter.warning(
'Inline interpreted text or phrase reference start-string '
'without end-string.', line=lineno)
text = unescape(string[matchstart:matchend], 1)
prb = self.problematic(text, text, msg)
return string[:matchstart], [prb], string[matchend:], [msg]
def phrase_ref(self, before, after, rawsource, escaped, text):
match = self.patterns.embedded_uri.search(escaped)
if match:
text = unescape(escaped[:match.start(0)])
uri_text = match.group(2)
uri = ''.join(uri_text.split())
uri = self.adjust_uri(uri)
if uri:
target = nodes.target(match.group(1), refuri=uri)
else:
raise ApplicationError('problem with URI: %r' % uri_text)
if not text:
text = uri
else:
target = None
refname = normalize_name(text)
reference = nodes.reference(rawsource, text,
name=whitespace_normalize_name(text))
node_list = [reference]
if rawsource[-2:] == '__':
if target:
reference['refuri'] = uri
else:
reference['anonymous'] = 1
else:
if target:
reference['refuri'] = uri
target['names'].append(refname)
self.document.note_explicit_target(target, self.parent)
node_list.append(target)
else:
reference['refname'] = refname
self.document.note_refname(reference)
return before, node_list, after, []
def adjust_uri(self, uri):
match = self.patterns.email.match(uri)
if match:
return 'mailto:' + uri
else:
return uri
def interpreted(self, rawsource, text, role, lineno):
role_fn, messages = roles.role(role, self.language, lineno,
self.reporter)
if role_fn:
nodes, messages2 = role_fn(role, rawsource, text, lineno, self)
return nodes, messages + messages2
else:
msg = self.reporter.error(
'Unknown interpreted text role "%s".' % role,
line=lineno)
return ([self.problematic(rawsource, rawsource, msg)],
messages + [msg])
def literal(self, match, lineno):
before, inlines, remaining, sysmessages, endstring = self.inline_obj(
match, lineno, self.patterns.literal, nodes.literal,
restore_backslashes=1)
return before, inlines, remaining, sysmessages
def inline_internal_target(self, match, lineno):
before, inlines, remaining, sysmessages, endstring = self.inline_obj(
match, lineno, self.patterns.target, nodes.target)
if inlines and isinstance(inlines[0], nodes.target):
assert len(inlines) == 1
target = inlines[0]
name = normalize_name(target.astext())
target['names'].append(name)
self.document.note_explicit_target(target, self.parent)
return before, inlines, remaining, sysmessages
def substitution_reference(self, match, lineno):
before, inlines, remaining, sysmessages, endstring = self.inline_obj(
match, lineno, self.patterns.substitution_ref,
nodes.substitution_reference)
if len(inlines) == 1:
subref_node = inlines[0]
if isinstance(subref_node, nodes.substitution_reference):
subref_text = subref_node.astext()
self.document.note_substitution_ref(subref_node, subref_text)
if endstring[-1:] == '_':
reference_node = nodes.reference(
'|%s%s' % (subref_text, endstring), '')
if endstring[-2:] == '__':
reference_node['anonymous'] = 1
else:
reference_node['refname'] = normalize_name(subref_text)
self.document.note_refname(reference_node)
reference_node += subref_node
inlines = [reference_node]
return before, inlines, remaining, sysmessages
def footnote_reference(self, match, lineno):
"""
Handles `nodes.footnote_reference` and `nodes.citation_reference`
elements.
"""
label = match.group('footnotelabel')
refname = normalize_name(label)
string = match.string
before = string[:match.start('whole')]
remaining = string[match.end('whole'):]
if match.group('citationlabel'):
refnode = nodes.citation_reference('[%s]_' % label,
refname=refname)
refnode += nodes.Text(label)
self.document.note_citation_ref(refnode)
else:
refnode = nodes.footnote_reference('[%s]_' % label)
if refname[0] == '#':
refname = refname[1:]
refnode['auto'] = 1
self.document.note_autofootnote_ref(refnode)
elif refname == '*':
refname = ''
refnode['auto'] = '*'
self.document.note_symbol_footnote_ref(
refnode)
else:
refnode += nodes.Text(label)
if refname:
refnode['refname'] = refname
self.document.note_footnote_ref(refnode)
if utils.get_trim_footnote_ref_space(self.document.settings):
before = before.rstrip()
return (before, [refnode], remaining, [])
def reference(self, match, lineno, anonymous=None):
referencename = match.group('refname')
refname = normalize_name(referencename)
referencenode = nodes.reference(
referencename + match.group('refend'), referencename,
name=whitespace_normalize_name(referencename))
if anonymous:
referencenode['anonymous'] = 1
else:
referencenode['refname'] = refname
self.document.note_refname(referencenode)
string = match.string
matchstart = match.start('whole')
matchend = match.end('whole')
return (string[:matchstart], [referencenode], string[matchend:], [])
def anonymous_reference(self, match, lineno):
return self.reference(match, lineno, anonymous=1)
def standalone_uri(self, match, lineno):
if (not match.group('scheme')
or match.group('scheme').lower() in urischemes.schemes):
if match.group('email'):
addscheme = 'mailto:'
else:
addscheme = ''
text = match.group('whole')
unescaped = unescape(text, 0)
return [nodes.reference(unescape(text, 1), unescaped,
refuri=addscheme + unescaped)]
else: # not a valid scheme
raise MarkupMismatch
def pep_reference(self, match, lineno):
text = match.group(0)
if text.startswith('pep-'):
pepnum = int(match.group('pepnum1'))
elif text.startswith('PEP'):
pepnum = int(match.group('pepnum2'))
else:
raise MarkupMismatch
ref = (self.document.settings.pep_base_url
+ self.document.settings.pep_file_url_template % pepnum)
unescaped = unescape(text, 0)
return [nodes.reference(unescape(text, 1), unescaped, refuri=ref)]
rfc_url = 'rfc%d.html'
def rfc_reference(self, match, lineno):
text = match.group(0)
if text.startswith('RFC'):
rfcnum = int(match.group('rfcnum'))
ref = self.document.settings.rfc_base_url + self.rfc_url % rfcnum
else:
raise MarkupMismatch
unescaped = unescape(text, 0)
return [nodes.reference(unescape(text, 1), unescaped, refuri=ref)]
def implicit_inline(self, text, lineno):
"""
Check each of the patterns in `self.implicit_dispatch` for a match,
and dispatch to the stored method for the pattern. Recursively check
the text before and after the match. Return a list of `nodes.Text`
and inline element nodes.
"""
if not text:
return []
for pattern, method in self.implicit_dispatch:
match = pattern.search(text)
if match:
try:
# Must recurse on strings before *and* after the match;
# there may be multiple patterns.
return (self.implicit_inline(text[:match.start()], lineno)
+ method(match, lineno) +
self.implicit_inline(text[match.end():], lineno))
except MarkupMismatch:
pass
return [nodes.Text(unescape(text), rawsource=unescape(text, 1))]
dispatch = {'*': emphasis,
'**': strong,
'`': interpreted_or_phrase_ref,
'``': literal,
'_`': inline_internal_target,
']_': footnote_reference,
'|': substitution_reference,
'_': reference,
'__': anonymous_reference}
def _loweralpha_to_int(s, _zero=(ord('a')-1)):
return ord(s) - _zero
def _upperalpha_to_int(s, _zero=(ord('A')-1)):
return ord(s) - _zero
def _lowerroman_to_int(s):
return roman.fromRoman(s.upper())
class Body(RSTState):
"""
Generic classifier of the first line of a block.
"""
double_width_pad_char = tableparser.TableParser.double_width_pad_char
"""Padding character for East Asian double-width text."""
enum = Struct()
"""Enumerated list parsing information."""
enum.formatinfo = {
'parens': Struct(prefix='(', suffix=')', start=1, end=-1),
'rparen': Struct(prefix='', suffix=')', start=0, end=-1),
'period': Struct(prefix='', suffix='.', start=0, end=-1)}
enum.formats = enum.formatinfo.keys()
enum.sequences = ['arabic', 'loweralpha', 'upperalpha',
'lowerroman', 'upperroman'] # ORDERED!
enum.sequencepats = {'arabic': '[0-9]+',
'loweralpha': '[a-z]',
'upperalpha': '[A-Z]',
'lowerroman': '[ivxlcdm]+',
'upperroman': '[IVXLCDM]+',}
enum.converters = {'arabic': int,
'loweralpha': _loweralpha_to_int,
'upperalpha': _upperalpha_to_int,
'lowerroman': _lowerroman_to_int,
'upperroman': roman.fromRoman}
enum.sequenceregexps = {}
for sequence in enum.sequences:
enum.sequenceregexps[sequence] = re.compile(
enum.sequencepats[sequence] + '$')
grid_table_top_pat = re.compile(r'\+-[-+]+-\+ *$')
"""Matches the top (& bottom) of a full table)."""
simple_table_top_pat = re.compile('=+( +=+)+ *$')
"""Matches the top of a simple table."""
simple_table_border_pat = re.compile('=+[ =]*$')
"""Matches the bottom & header bottom of a simple table."""
pats = {}
"""Fragments of patterns used by transitions."""
pats['nonalphanum7bit'] = '[!-/:-@[-`{-~]'
pats['alpha'] = '[a-zA-Z]'
pats['alphanum'] = '[a-zA-Z0-9]'
pats['alphanumplus'] = '[a-zA-Z0-9_-]'
pats['enum'] = ('(%(arabic)s|%(loweralpha)s|%(upperalpha)s|%(lowerroman)s'
'|%(upperroman)s|#)' % enum.sequencepats)
pats['optname'] = '%(alphanum)s%(alphanumplus)s*' % pats
# @@@ Loosen up the pattern? Allow Unicode?
pats['optarg'] = '(%(alpha)s%(alphanumplus)s*|<[^<>]+>)' % pats
pats['shortopt'] = r'(-|\+)%(alphanum)s( ?%(optarg)s)?' % pats
pats['longopt'] = r'(--|/)%(optname)s([ =]%(optarg)s)?' % pats
pats['option'] = r'(%(shortopt)s|%(longopt)s)' % pats
for format in enum.formats:
pats[format] = '(?P<%s>%s%s%s)' % (
format, re.escape(enum.formatinfo[format].prefix),
pats['enum'], re.escape(enum.formatinfo[format].suffix))
patterns = {
'bullet': u'[-+*\u2022\u2023\u2043]( +|$)',
'enumerator': r'(%(parens)s|%(rparen)s|%(period)s)( +|$)' % pats,
'field_marker': r':(?![: ])([^:\\]|\\.)*(?<! ):( +|$)',
'option_marker': r'%(option)s(, %(option)s)*( +| ?$)' % pats,
'doctest': r'>>>( +|$)',
'line_block': r'\|( +|$)',
'grid_table_top': grid_table_top_pat,
'simple_table_top': simple_table_top_pat,
'explicit_markup': r'\.\.( +|$)',
'anonymous': r'__( +|$)',
'line': r'(%(nonalphanum7bit)s)\1* *$' % pats,
'text': r''}
initial_transitions = (
'bullet',
'enumerator',
'field_marker',
'option_marker',
'doctest',
'line_block',
'grid_table_top',
'simple_table_top',
'explicit_markup',
'anonymous',
'line',
'text')
def indent(self, match, context, next_state):
"""Block quote."""
indented, indent, line_offset, blank_finish = \
self.state_machine.get_indented()
elements = self.block_quote(indented, line_offset)
self.parent += elements
if not blank_finish:
self.parent += self.unindent_warning('Block quote')
return context, next_state, []
def block_quote(self, indented, line_offset):
elements = []
while indented:
(blockquote_lines,
attribution_lines,
attribution_offset,
indented,
new_line_offset) = self.split_attribution(indented, line_offset)
blockquote = nodes.block_quote()
self.nested_parse(blockquote_lines, line_offset, blockquote)
elements.append(blockquote)
if attribution_lines:
attribution, messages = self.parse_attribution(
attribution_lines, attribution_offset)
blockquote += attribution
elements += messages
line_offset = new_line_offset
while indented and not indented[0]:
indented = indented[1:]
line_offset += 1
return elements
# U+2014 is an em-dash:
attribution_pattern = re.compile(u'(---?(?!-)|\u2014) *(?=[^ \\n])')
def split_attribution(self, indented, line_offset):
"""
Check for a block quote attribution and split it off:
* First line after a blank line must begin with a dash ("--", "---",
em-dash; matches `self.attribution_pattern`).
* Every line after that must have consistent indentation.
* Attributions must be preceded by block quote content.
Return a tuple of: (block quote content lines, content offset,
attribution lines, attribution offset, remaining indented lines).
"""
blank = None
nonblank_seen = False
for i in range(len(indented)):
line = indented[i].rstrip()
if line:
if nonblank_seen and blank == i - 1: # last line blank
match = self.attribution_pattern.match(line)
if match:
attribution_end, indent = self.check_attribution(
indented, i)
if attribution_end:
a_lines = indented[i:attribution_end]
a_lines.trim_left(match.end(), end=1)
a_lines.trim_left(indent, start=1)
return (indented[:i], a_lines,
i, indented[attribution_end:],
line_offset + attribution_end)
nonblank_seen = True
else:
blank = i
else:
return (indented, None, None, None, None)
def check_attribution(self, indented, attribution_start):
"""
Check attribution shape.
Return the index past the end of the attribution, and the indent.
"""
indent = None
i = attribution_start + 1
for i in range(attribution_start + 1, len(indented)):
line = indented[i].rstrip()
if not line:
break
if indent is None:
indent = len(line) - len(line.lstrip())
elif len(line) - len(line.lstrip()) != indent:
return None, None # bad shape; not an attribution
else:
# return index of line after last attribution line:
i += 1
return i, (indent or 0)
def parse_attribution(self, indented, line_offset):
text = '\n'.join(indented).rstrip()
lineno = self.state_machine.abs_line_number() + line_offset
textnodes, messages = self.inline_text(text, lineno)
node = nodes.attribution(text, '', *textnodes)
node.line = lineno
return node, messages
def bullet(self, match, context, next_state):
"""Bullet list item."""
bulletlist = nodes.bullet_list()
self.parent += bulletlist
bulletlist['bullet'] = match.string[0]
i, blank_finish = self.list_item(match.end())
bulletlist += i
offset = self.state_machine.line_offset + 1 # next line
new_line_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=bulletlist, initial_state='BulletList',
blank_finish=blank_finish)
self.goto_line(new_line_offset)
if not blank_finish:
self.parent += self.unindent_warning('Bullet list')
return [], next_state, []
def list_item(self, indent):
if self.state_machine.line[indent:]:
indented, line_offset, blank_finish = (
self.state_machine.get_known_indented(indent))
else:
indented, indent, line_offset, blank_finish = (
self.state_machine.get_first_known_indented(indent))
listitem = nodes.list_item('\n'.join(indented))
if indented:
self.nested_parse(indented, input_offset=line_offset,
node=listitem)
return listitem, blank_finish
def enumerator(self, match, context, next_state):
"""Enumerated List Item"""
format, sequence, text, ordinal = self.parse_enumerator(match)
if not self.is_enumerated_list_item(ordinal, sequence, format):
raise statemachine.TransitionCorrection('text')
enumlist = nodes.enumerated_list()
self.parent += enumlist
if sequence == '#':
enumlist['enumtype'] = 'arabic'
else:
enumlist['enumtype'] = sequence
enumlist['prefix'] = self.enum.formatinfo[format].prefix
enumlist['suffix'] = self.enum.formatinfo[format].suffix
if ordinal != 1:
enumlist['start'] = ordinal
msg = self.reporter.info(
'Enumerated list start value not ordinal-1: "%s" (ordinal %s)'
% (text, ordinal), line=self.state_machine.abs_line_number())
self.parent += msg
listitem, blank_finish = self.list_item(match.end())
enumlist += listitem
offset = self.state_machine.line_offset + 1 # next line
newline_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=enumlist, initial_state='EnumeratedList',
blank_finish=blank_finish,
extra_settings={'lastordinal': ordinal,
'format': format,
'auto': sequence == '#'})
self.goto_line(newline_offset)
if not blank_finish:
self.parent += self.unindent_warning('Enumerated list')
return [], next_state, []
def parse_enumerator(self, match, expected_sequence=None):
"""
Analyze an enumerator and return the results.
:Return:
- the enumerator format ('period', 'parens', or 'rparen'),
- the sequence used ('arabic', 'loweralpha', 'upperroman', etc.),
- the text of the enumerator, stripped of formatting, and
- the ordinal value of the enumerator ('a' -> 1, 'ii' -> 2, etc.;
``None`` is returned for invalid enumerator text).
The enumerator format has already been determined by the regular
expression match. If `expected_sequence` is given, that sequence is
tried first. If not, we check for Roman numeral 1. This way,
single-character Roman numerals (which are also alphabetical) can be
matched. If no sequence has been matched, all sequences are checked in
order.
"""
groupdict = match.groupdict()
sequence = ''
for format in self.enum.formats:
if groupdict[format]: # was this the format matched?
break # yes; keep `format`
else: # shouldn't happen
raise ParserError('enumerator format not matched')
text = groupdict[format][self.enum.formatinfo[format].start
:self.enum.formatinfo[format].end]
if text == '#':
sequence = '#'
elif expected_sequence:
try:
if self.enum.sequenceregexps[expected_sequence].match(text):
sequence = expected_sequence
except KeyError: # shouldn't happen
raise ParserError('unknown enumerator sequence: %s'
% sequence)
elif text == 'i':
sequence = 'lowerroman'
elif text == 'I':
sequence = 'upperroman'
if not sequence:
for sequence in self.enum.sequences:
if self.enum.sequenceregexps[sequence].match(text):
break
else: # shouldn't happen
raise ParserError('enumerator sequence not matched')
if sequence == '#':
ordinal = 1
else:
try:
ordinal = self.enum.converters[sequence](text)
except roman.InvalidRomanNumeralError:
ordinal = None
return format, sequence, text, ordinal
def is_enumerated_list_item(self, ordinal, sequence, format):
"""
Check validity based on the ordinal value and the second line.
Return true if the ordinal is valid and the second line is blank,
indented, or starts with the next enumerator or an auto-enumerator.
"""
if ordinal is None:
return None
try:
next_line = self.state_machine.next_line()
except EOFError: # end of input lines
self.state_machine.previous_line()
return 1
else:
self.state_machine.previous_line()
if not next_line[:1].strip(): # blank or indented
return 1
result = self.make_enumerator(ordinal + 1, sequence, format)
if result:
next_enumerator, auto_enumerator = result
try:
if ( next_line.startswith(next_enumerator) or
next_line.startswith(auto_enumerator) ):
return 1
except TypeError:
pass
return None
def make_enumerator(self, ordinal, sequence, format):
"""
Construct and return the next enumerated list item marker, and an
auto-enumerator ("#" instead of the regular enumerator).
Return ``None`` for invalid (out of range) ordinals.
""" #"
if sequence == '#':
enumerator = '#'
elif sequence == 'arabic':
enumerator = str(ordinal)
else:
if sequence.endswith('alpha'):
if ordinal > 26:
return None
enumerator = chr(ordinal + ord('a') - 1)
elif sequence.endswith('roman'):
try:
enumerator = roman.toRoman(ordinal)
except roman.RomanError:
return None
else: # shouldn't happen
raise ParserError('unknown enumerator sequence: "%s"'
% sequence)
if sequence.startswith('lower'):
enumerator = enumerator.lower()
elif sequence.startswith('upper'):
enumerator = enumerator.upper()
else: # shouldn't happen
raise ParserError('unknown enumerator sequence: "%s"'
% sequence)
formatinfo = self.enum.formatinfo[format]
next_enumerator = (formatinfo.prefix + enumerator + formatinfo.suffix
+ ' ')
auto_enumerator = formatinfo.prefix + '#' + formatinfo.suffix + ' '
return next_enumerator, auto_enumerator
def field_marker(self, match, context, next_state):
"""Field list item."""
field_list = nodes.field_list()
self.parent += field_list
field, blank_finish = self.field(match)
field_list += field
offset = self.state_machine.line_offset + 1 # next line
newline_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=field_list, initial_state='FieldList',
blank_finish=blank_finish)
self.goto_line(newline_offset)
if not blank_finish:
self.parent += self.unindent_warning('Field list')
return [], next_state, []
def field(self, match):
name = self.parse_field_marker(match)
lineno = self.state_machine.abs_line_number()
indented, indent, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
field_node = nodes.field()
field_node.line = lineno
name_nodes, name_messages = self.inline_text(name, lineno)
field_node += nodes.field_name(name, '', *name_nodes)
field_body = nodes.field_body('\n'.join(indented), *name_messages)
field_node += field_body
if indented:
self.parse_field_body(indented, line_offset, field_body)
return field_node, blank_finish
def parse_field_marker(self, match):
"""Extract & return field name from a field marker match."""
field = match.group()[1:] # strip off leading ':'
field = field[:field.rfind(':')] # strip off trailing ':' etc.
return field
def parse_field_body(self, indented, offset, node):
self.nested_parse(indented, input_offset=offset, node=node)
def option_marker(self, match, context, next_state):
"""Option list item."""
optionlist = nodes.option_list()
try:
listitem, blank_finish = self.option_list_item(match)
except MarkupError, (message, lineno):
# This shouldn't happen; pattern won't match.
msg = self.reporter.error(
'Invalid option list marker: %s' % message, line=lineno)
self.parent += msg
indented, indent, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
elements = self.block_quote(indented, line_offset)
self.parent += elements
if not blank_finish:
self.parent += self.unindent_warning('Option list')
return [], next_state, []
self.parent += optionlist
optionlist += listitem
offset = self.state_machine.line_offset + 1 # next line
newline_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=optionlist, initial_state='OptionList',
blank_finish=blank_finish)
self.goto_line(newline_offset)
if not blank_finish:
self.parent += self.unindent_warning('Option list')
return [], next_state, []
def option_list_item(self, match):
offset = self.state_machine.abs_line_offset()
options = self.parse_option_marker(match)
indented, indent, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
if not indented: # not an option list item
self.goto_line(offset)
raise statemachine.TransitionCorrection('text')
option_group = nodes.option_group('', *options)
description = nodes.description('\n'.join(indented))
option_list_item = nodes.option_list_item('', option_group,
description)
if indented:
self.nested_parse(indented, input_offset=line_offset,
node=description)
return option_list_item, blank_finish
def parse_option_marker(self, match):
"""
Return a list of `node.option` and `node.option_argument` objects,
parsed from an option marker match.
:Exception: `MarkupError` for invalid option markers.
"""
optlist = []
optionstrings = match.group().rstrip().split(', ')
for optionstring in optionstrings:
tokens = optionstring.split()
delimiter = ' '
firstopt = tokens[0].split('=')
if len(firstopt) > 1:
# "--opt=value" form
tokens[:1] = firstopt
delimiter = '='
elif (len(tokens[0]) > 2
and ((tokens[0].startswith('-')
and not tokens[0].startswith('--'))
or tokens[0].startswith('+'))):
# "-ovalue" form
tokens[:1] = [tokens[0][:2], tokens[0][2:]]
delimiter = ''
if len(tokens) > 1 and (tokens[1].startswith('<')
and tokens[-1].endswith('>')):
# "-o <value1 value2>" form; join all values into one token
tokens[1:] = [' '.join(tokens[1:])]
if 0 < len(tokens) <= 2:
option = nodes.option(optionstring)
option += nodes.option_string(tokens[0], tokens[0])
if len(tokens) > 1:
option += nodes.option_argument(tokens[1], tokens[1],
delimiter=delimiter)
optlist.append(option)
else:
raise MarkupError(
'wrong number of option tokens (=%s), should be 1 or 2: '
'"%s"' % (len(tokens), optionstring),
self.state_machine.abs_line_number() + 1)
return optlist
def doctest(self, match, context, next_state):
data = '\n'.join(self.state_machine.get_text_block())
self.parent += nodes.doctest_block(data, data)
return [], next_state, []
def line_block(self, match, context, next_state):
"""First line of a line block."""
block = nodes.line_block()
self.parent += block
lineno = self.state_machine.abs_line_number()
line, messages, blank_finish = self.line_block_line(match, lineno)
block += line
self.parent += messages
if not blank_finish:
offset = self.state_machine.line_offset + 1 # next line
new_line_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=block, initial_state='LineBlock',
blank_finish=0)
self.goto_line(new_line_offset)
if not blank_finish:
self.parent += self.reporter.warning(
'Line block ends without a blank line.',
line=(self.state_machine.abs_line_number() + 1))
if len(block):
if block[0].indent is None:
block[0].indent = 0
self.nest_line_block_lines(block)
return [], next_state, []
def line_block_line(self, match, lineno):
"""Return one line element of a line_block."""
indented, indent, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end(),
until_blank=1)
text = u'\n'.join(indented)
text_nodes, messages = self.inline_text(text, lineno)
line = nodes.line(text, '', *text_nodes)
if match.string.rstrip() != '|': # not empty
line.indent = len(match.group(1)) - 1
return line, messages, blank_finish
def nest_line_block_lines(self, block):
for index in range(1, len(block)):
if block[index].indent is None:
block[index].indent = block[index - 1].indent
self.nest_line_block_segment(block)
def nest_line_block_segment(self, block):
indents = [item.indent for item in block]
least = min(indents)
new_items = []
new_block = nodes.line_block()
for item in block:
if item.indent > least:
new_block.append(item)
else:
if len(new_block):
self.nest_line_block_segment(new_block)
new_items.append(new_block)
new_block = nodes.line_block()
new_items.append(item)
if len(new_block):
self.nest_line_block_segment(new_block)
new_items.append(new_block)
block[:] = new_items
def grid_table_top(self, match, context, next_state):
"""Top border of a full table."""
return self.table_top(match, context, next_state,
self.isolate_grid_table,
tableparser.GridTableParser)
def simple_table_top(self, match, context, next_state):
"""Top border of a simple table."""
return self.table_top(match, context, next_state,
self.isolate_simple_table,
tableparser.SimpleTableParser)
def table_top(self, match, context, next_state,
isolate_function, parser_class):
"""Top border of a generic table."""
nodelist, blank_finish = self.table(isolate_function, parser_class)
self.parent += nodelist
if not blank_finish:
msg = self.reporter.warning(
'Blank line required after table.',
line=self.state_machine.abs_line_number() + 1)
self.parent += msg
return [], next_state, []
def table(self, isolate_function, parser_class):
"""Parse a table."""
block, messages, blank_finish = isolate_function()
if block:
try:
parser = parser_class()
tabledata = parser.parse(block)
tableline = (self.state_machine.abs_line_number() - len(block)
+ 1)
table = self.build_table(tabledata, tableline)
nodelist = [table] + messages
except tableparser.TableMarkupError, detail:
nodelist = self.malformed_table(
block, ' '.join(detail.args)) + messages
else:
nodelist = messages
return nodelist, blank_finish
def isolate_grid_table(self):
messages = []
blank_finish = 1
try:
block = self.state_machine.get_text_block(flush_left=1)
except statemachine.UnexpectedIndentationError, instance:
block, source, lineno = instance.args
messages.append(self.reporter.error('Unexpected indentation.',
source=source, line=lineno))
blank_finish = 0
block.disconnect()
# for East Asian chars:
block.pad_double_width(self.double_width_pad_char)
width = len(block[0].strip())
for i in range(len(block)):
block[i] = block[i].strip()
if block[i][0] not in '+|': # check left edge
blank_finish = 0
self.state_machine.previous_line(len(block) - i)
del block[i:]
break
if not self.grid_table_top_pat.match(block[-1]): # find bottom
blank_finish = 0
# from second-last to third line of table:
for i in range(len(block) - 2, 1, -1):
if self.grid_table_top_pat.match(block[i]):
self.state_machine.previous_line(len(block) - i + 1)
del block[i+1:]
break
else:
messages.extend(self.malformed_table(block))
return [], messages, blank_finish
for i in range(len(block)): # check right edge
if len(block[i]) != width or block[i][-1] not in '+|':
messages.extend(self.malformed_table(block))
return [], messages, blank_finish
return block, messages, blank_finish
def isolate_simple_table(self):
start = self.state_machine.line_offset
lines = self.state_machine.input_lines
limit = len(lines) - 1
toplen = len(lines[start].strip())
pattern_match = self.simple_table_border_pat.match
found = 0
found_at = None
i = start + 1
while i <= limit:
line = lines[i]
match = pattern_match(line)
if match:
if len(line.strip()) != toplen:
self.state_machine.next_line(i - start)
messages = self.malformed_table(
lines[start:i+1], 'Bottom/header table border does '
'not match top border.')
return [], messages, i == limit or not lines[i+1].strip()
found += 1
found_at = i
if found == 2 or i == limit or not lines[i+1].strip():
end = i
break
i += 1
else: # reached end of input_lines
if found:
extra = ' or no blank line after table bottom'
self.state_machine.next_line(found_at - start)
block = lines[start:found_at+1]
else:
extra = ''
self.state_machine.next_line(i - start - 1)
block = lines[start:]
messages = self.malformed_table(
block, 'No bottom table border found%s.' % extra)
return [], messages, not extra
self.state_machine.next_line(end - start)
block = lines[start:end+1]
# for East Asian chars:
block.pad_double_width(self.double_width_pad_char)
return block, [], end == limit or not lines[end+1].strip()
def malformed_table(self, block, detail=''):
block.replace(self.double_width_pad_char, '')
data = '\n'.join(block)
message = 'Malformed table.'
lineno = self.state_machine.abs_line_number() - len(block) + 1
if detail:
message += '\n' + detail
error = self.reporter.error(message, nodes.literal_block(data, data),
line=lineno)
return [error]
def build_table(self, tabledata, tableline, stub_columns=0):
colwidths, headrows, bodyrows = tabledata
table = nodes.table()
tgroup = nodes.tgroup(cols=len(colwidths))
table += tgroup
for colwidth in colwidths:
colspec = nodes.colspec(colwidth=colwidth)
if stub_columns:
colspec.attributes['stub'] = 1
stub_columns -= 1
tgroup += colspec
if headrows:
thead = nodes.thead()
tgroup += thead
for row in headrows:
thead += self.build_table_row(row, tableline)
tbody = nodes.tbody()
tgroup += tbody
for row in bodyrows:
tbody += self.build_table_row(row, tableline)
return table
def build_table_row(self, rowdata, tableline):
row = nodes.row()
for cell in rowdata:
if cell is None:
continue
morerows, morecols, offset, cellblock = cell
attributes = {}
if morerows:
attributes['morerows'] = morerows
if morecols:
attributes['morecols'] = morecols
entry = nodes.entry(**attributes)
row += entry
if ''.join(cellblock):
self.nested_parse(cellblock, input_offset=tableline+offset,
node=entry)
return row
explicit = Struct()
"""Patterns and constants used for explicit markup recognition."""
explicit.patterns = Struct(
target=re.compile(r"""
(
_ # anonymous target
| # *OR*
(?!_) # no underscore at the beginning
(?P<quote>`?) # optional open quote
(?![ `]) # first char. not space or
# backquote
(?P<name> # reference name
.+?
)
%(non_whitespace_escape_before)s
(?P=quote) # close quote if open quote used
)
(?<!(?<!\x00):) # no unescaped colon at end
%(non_whitespace_escape_before)s
[ ]? # optional space
: # end of reference name
([ ]+|$) # followed by whitespace
""" % vars(Inliner), re.VERBOSE),
reference=re.compile(r"""
(
(?P<simple>%(simplename)s)_
| # *OR*
` # open backquote
(?![ ]) # not space
(?P<phrase>.+?) # hyperlink phrase
%(non_whitespace_escape_before)s
`_ # close backquote,
# reference mark
)
$ # end of string
""" % vars(Inliner), re.VERBOSE | re.UNICODE),
substitution=re.compile(r"""
(
(?![ ]) # first char. not space
(?P<name>.+?) # substitution text
%(non_whitespace_escape_before)s
\| # close delimiter
)
([ ]+|$) # followed by whitespace
""" % vars(Inliner), re.VERBOSE),)
def footnote(self, match):
lineno = self.state_machine.abs_line_number()
indented, indent, offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
label = match.group(1)
name = normalize_name(label)
footnote = nodes.footnote('\n'.join(indented))
footnote.line = lineno
if name[0] == '#': # auto-numbered
name = name[1:] # autonumber label
footnote['auto'] = 1
if name:
footnote['names'].append(name)
self.document.note_autofootnote(footnote)
elif name == '*': # auto-symbol
name = ''
footnote['auto'] = '*'
self.document.note_symbol_footnote(footnote)
else: # manually numbered
footnote += nodes.label('', label)
footnote['names'].append(name)
self.document.note_footnote(footnote)
if name:
self.document.note_explicit_target(footnote, footnote)
else:
self.document.set_id(footnote, footnote)
if indented:
self.nested_parse(indented, input_offset=offset, node=footnote)
return [footnote], blank_finish
def citation(self, match):
lineno = self.state_machine.abs_line_number()
indented, indent, offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
label = match.group(1)
name = normalize_name(label)
citation = nodes.citation('\n'.join(indented))
citation.line = lineno
citation += nodes.label('', label)
citation['names'].append(name)
self.document.note_citation(citation)
self.document.note_explicit_target(citation, citation)
if indented:
self.nested_parse(indented, input_offset=offset, node=citation)
return [citation], blank_finish
def hyperlink_target(self, match):
pattern = self.explicit.patterns.target
lineno = self.state_machine.abs_line_number()
block, indent, offset, blank_finish = \
self.state_machine.get_first_known_indented(
match.end(), until_blank=1, strip_indent=0)
blocktext = match.string[:match.end()] + '\n'.join(block)
block = [escape2null(line) for line in block]
escaped = block[0]
blockindex = 0
while 1:
targetmatch = pattern.match(escaped)
if targetmatch:
break
blockindex += 1
try:
escaped += block[blockindex]
except IndexError:
raise MarkupError('malformed hyperlink target.', lineno)
del block[:blockindex]
block[0] = (block[0] + ' ')[targetmatch.end()-len(escaped)-1:].strip()
target = self.make_target(block, blocktext, lineno,
targetmatch.group('name'))
return [target], blank_finish
def make_target(self, block, block_text, lineno, target_name):
target_type, data = self.parse_target(block, block_text, lineno)
if target_type == 'refname':
target = nodes.target(block_text, '', refname=normalize_name(data))
target.indirect_reference_name = data
self.add_target(target_name, '', target, lineno)
self.document.note_indirect_target(target)
return target
elif target_type == 'refuri':
target = nodes.target(block_text, '')
self.add_target(target_name, data, target, lineno)
return target
else:
return data
def parse_target(self, block, block_text, lineno):
"""
Determine the type of reference of a target.
:Return: A 2-tuple, one of:
- 'refname' and the indirect reference name
- 'refuri' and the URI
- 'malformed' and a system_message node
"""
if block and block[-1].strip()[-1:] == '_': # possible indirect target
reference = ' '.join([line.strip() for line in block])
refname = self.is_reference(reference)
if refname:
return 'refname', refname
reference = ''.join([''.join(line.split()) for line in block])
return 'refuri', unescape(reference)
def is_reference(self, reference):
match = self.explicit.patterns.reference.match(
whitespace_normalize_name(reference))
if not match:
return None
return unescape(match.group('simple') or match.group('phrase'))
def add_target(self, targetname, refuri, target, lineno):
target.line = lineno
if targetname:
name = normalize_name(unescape(targetname))
target['names'].append(name)
if refuri:
uri = self.inliner.adjust_uri(refuri)
if uri:
target['refuri'] = uri
else:
raise ApplicationError('problem with URI: %r' % refuri)
self.document.note_explicit_target(target, self.parent)
else: # anonymous target
if refuri:
target['refuri'] = refuri
target['anonymous'] = 1
self.document.note_anonymous_target(target)
def substitution_def(self, match):
pattern = self.explicit.patterns.substitution
lineno = self.state_machine.abs_line_number()
block, indent, offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end(),
strip_indent=0)
blocktext = (match.string[:match.end()] + '\n'.join(block))
block.disconnect()
escaped = escape2null(block[0].rstrip())
blockindex = 0
while 1:
subdefmatch = pattern.match(escaped)
if subdefmatch:
break
blockindex += 1
try:
escaped = escaped + ' ' + escape2null(block[blockindex].strip())
except IndexError:
raise MarkupError('malformed substitution definition.',
lineno)
del block[:blockindex] # strip out the substitution marker
block[0] = (block[0].strip() + ' ')[subdefmatch.end()-len(escaped)-1:-1]
if not block[0]:
del block[0]
offset += 1
while block and not block[-1].strip():
block.pop()
subname = subdefmatch.group('name')
substitution_node = nodes.substitution_definition(blocktext)
substitution_node.line = lineno
if not block:
msg = self.reporter.warning(
'Substitution definition "%s" missing contents.' % subname,
nodes.literal_block(blocktext, blocktext), line=lineno)
return [msg], blank_finish
block[0] = block[0].strip()
substitution_node['names'].append(
nodes.whitespace_normalize_name(subname))
new_abs_offset, blank_finish = self.nested_list_parse(
block, input_offset=offset, node=substitution_node,
initial_state='SubstitutionDef', blank_finish=blank_finish)
i = 0
for node in substitution_node[:]:
if not (isinstance(node, nodes.Inline) or
isinstance(node, nodes.Text)):
self.parent += substitution_node[i]
del substitution_node[i]
else:
i += 1
for node in substitution_node.traverse(nodes.Element):
if self.disallowed_inside_substitution_definitions(node):
pformat = nodes.literal_block('', node.pformat().rstrip())
msg = self.reporter.error(
'Substitution definition contains illegal element:',
pformat, nodes.literal_block(blocktext, blocktext),
line=lineno)
return [msg], blank_finish
if len(substitution_node) == 0:
msg = self.reporter.warning(
'Substitution definition "%s" empty or invalid.'
% subname,
nodes.literal_block(blocktext, blocktext), line=lineno)
return [msg], blank_finish
self.document.note_substitution_def(
substitution_node, subname, self.parent)
return [substitution_node], blank_finish
def disallowed_inside_substitution_definitions(self, node):
if (node['ids'] or
isinstance(node, nodes.reference) and node.get('anonymous') or
isinstance(node, nodes.footnote_reference) and node.get('auto')):
return 1
else:
return 0
def directive(self, match, **option_presets):
"""Returns a 2-tuple: list of nodes, and a "blank finish" boolean."""
type_name = match.group(1)
directive_class, messages = directives.directive(
type_name, self.memo.language, self.document)
self.parent += messages
if directive_class:
return self.run_directive(
directive_class, match, type_name, option_presets)
else:
return self.unknown_directive(type_name)
def run_directive(self, directive, match, type_name, option_presets):
"""
Parse a directive then run its directive function.
Parameters:
- `directive`: The class implementing the directive. Must be
a subclass of `rst.Directive`.
- `match`: A regular expression match object which matched the first
line of the directive.
- `type_name`: The directive name, as used in the source text.
- `option_presets`: A dictionary of preset options, defaults for the
directive options. Currently, only an "alt" option is passed by
substitution definitions (value: the substitution name), which may
be used by an embedded image directive.
Returns a 2-tuple: list of nodes, and a "blank finish" boolean.
"""
if isinstance(directive, (FunctionType, MethodType)):
from docutils.parsers.rst import convert_directive_function
directive = convert_directive_function(directive)
lineno = self.state_machine.abs_line_number()
initial_line_offset = self.state_machine.line_offset
indented, indent, line_offset, blank_finish \
= self.state_machine.get_first_known_indented(match.end(),
strip_top=0)
block_text = '\n'.join(self.state_machine.input_lines[
initial_line_offset : self.state_machine.line_offset + 1])
try:
arguments, options, content, content_offset = (
self.parse_directive_block(indented, line_offset,
directive, option_presets))
except MarkupError, detail:
error = self.reporter.error(
'Error in "%s" directive:\n%s.' % (type_name,
' '.join(detail.args)),
nodes.literal_block(block_text, block_text), line=lineno)
return [error], blank_finish
directive_instance = directive(
type_name, arguments, options, content, lineno,
content_offset, block_text, self, self.state_machine)
try:
result = directive_instance.run()
except docutils.parsers.rst.DirectiveError, error:
msg_node = self.reporter.system_message(error.level, error.msg,
source=error.source, line=error.line)
msg_node += nodes.literal_block(block_text, block_text)
msg_node['line'] = lineno
result = [msg_node]
assert isinstance(result, list), \
'Directive "%s" must return a list of nodes.' % type_name
for i in range(len(result)):
assert isinstance(result[i], nodes.Node), \
('Directive "%s" returned non-Node object (index %s): %r'
% (type_name, i, result[i]))
return (result,
blank_finish or self.state_machine.is_next_line_blank())
def parse_directive_block(self, indented, line_offset, directive,
option_presets):
option_spec = directive.option_spec
has_content = directive.has_content
if indented and not indented[0].strip():
indented.trim_start()
line_offset += 1
while indented and not indented[-1].strip():
indented.trim_end()
if indented and (directive.required_arguments
or directive.optional_arguments
or option_spec):
for i in range(len(indented)):
if not indented[i].strip():
break
else:
i += 1
arg_block = indented[:i]
content = indented[i+1:]
content_offset = line_offset + i + 1
else:
content = indented
content_offset = line_offset
arg_block = []
while content and not content[0].strip():
content.trim_start()
content_offset += 1
if option_spec:
options, arg_block = self.parse_directive_options(
option_presets, option_spec, arg_block)
if arg_block and not (directive.required_arguments
or directive.optional_arguments):
raise MarkupError('no arguments permitted; blank line '
'required before content block')
else:
options = {}
if directive.required_arguments or directive.optional_arguments:
arguments = self.parse_directive_arguments(
directive, arg_block)
else:
arguments = []
if content and not has_content:
raise MarkupError('no content permitted')
return (arguments, options, content, content_offset)
def parse_directive_options(self, option_presets, option_spec, arg_block):
options = option_presets.copy()
for i in range(len(arg_block)):
if arg_block[i][:1] == ':':
opt_block = arg_block[i:]
arg_block = arg_block[:i]
break
else:
opt_block = []
if opt_block:
success, data = self.parse_extension_options(option_spec,
opt_block)
if success: # data is a dict of options
options.update(data)
else: # data is an error string
raise MarkupError(data)
return options, arg_block
def parse_directive_arguments(self, directive, arg_block):
required = directive.required_arguments
optional = directive.optional_arguments
arg_text = '\n'.join(arg_block)
arguments = arg_text.split()
if len(arguments) < required:
raise MarkupError('%s argument(s) required, %s supplied'
% (required, len(arguments)))
elif len(arguments) > required + optional:
if directive.final_argument_whitespace:
arguments = arg_text.split(None, required + optional - 1)
else:
raise MarkupError(
'maximum %s argument(s) allowed, %s supplied'
% (required + optional, len(arguments)))
return arguments
def parse_extension_options(self, option_spec, datalines):
"""
Parse `datalines` for a field list containing extension options
matching `option_spec`.
:Parameters:
- `option_spec`: a mapping of option name to conversion
function, which should raise an exception on bad input.
- `datalines`: a list of input strings.
:Return:
- Success value, 1 or 0.
- An option dictionary on success, an error string on failure.
"""
node = nodes.field_list()
newline_offset, blank_finish = self.nested_list_parse(
datalines, 0, node, initial_state='ExtensionOptions',
blank_finish=1)
if newline_offset != len(datalines): # incomplete parse of block
return 0, 'invalid option block'
try:
options = utils.extract_extension_options(node, option_spec)
except KeyError, detail:
return 0, ('unknown option: "%s"' % detail.args[0])
except (ValueError, TypeError), detail:
return 0, ('invalid option value: %s' % ' '.join(detail.args))
except utils.ExtensionOptionError, detail:
return 0, ('invalid option data: %s' % ' '.join(detail.args))
if blank_finish:
return 1, options
else:
return 0, 'option data incompletely parsed'
def unknown_directive(self, type_name):
lineno = self.state_machine.abs_line_number()
indented, indent, offset, blank_finish = \
self.state_machine.get_first_known_indented(0, strip_indent=0)
text = '\n'.join(indented)
error = self.reporter.error(
'Unknown directive type "%s".' % type_name,
nodes.literal_block(text, text), line=lineno)
return [error], blank_finish
def comment(self, match):
if not match.string[match.end():].strip() \
and self.state_machine.is_next_line_blank(): # an empty comment?
return [nodes.comment()], 1 # "A tiny but practical wart."
indented, indent, offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
while indented and not indented[-1].strip():
indented.trim_end()
text = '\n'.join(indented)
return [nodes.comment(text, text)], blank_finish
explicit.constructs = [
(footnote,
re.compile(r"""
\.\.[ ]+ # explicit markup start
\[
( # footnote label:
[0-9]+ # manually numbered footnote
| # *OR*
\# # anonymous auto-numbered footnote
| # *OR*
\#%s # auto-number ed?) footnote label
| # *OR*
\* # auto-symbol footnote
)
\]
([ ]+|$) # whitespace or end of line
""" % Inliner.simplename, re.VERBOSE | re.UNICODE)),
(citation,
re.compile(r"""
\.\.[ ]+ # explicit markup start
\[(%s)\] # citation label
([ ]+|$) # whitespace or end of line
""" % Inliner.simplename, re.VERBOSE | re.UNICODE)),
(hyperlink_target,
re.compile(r"""
\.\.[ ]+ # explicit markup start
_ # target indicator
(?![ ]|$) # first char. not space or EOL
""", re.VERBOSE)),
(substitution_def,
re.compile(r"""
\.\.[ ]+ # explicit markup start
\| # substitution indicator
(?![ ]|$) # first char. not space or EOL
""", re.VERBOSE)),
(directive,
re.compile(r"""
\.\.[ ]+ # explicit markup start
(%s) # directive name
[ ]? # optional space
:: # directive delimiter
([ ]+|$) # whitespace or end of line
""" % Inliner.simplename, re.VERBOSE | re.UNICODE))]
def explicit_markup(self, match, context, next_state):
"""Footnotes, hyperlink targets, directives, comments."""
nodelist, blank_finish = self.explicit_construct(match)
self.parent += nodelist
self.explicit_list(blank_finish)
return [], next_state, []
def explicit_construct(self, match):
"""Determine which explicit construct this is, parse & return it."""
errors = []
for method, pattern in self.explicit.constructs:
expmatch = pattern.match(match.string)
if expmatch:
try:
return method(self, expmatch)
except MarkupError, error: # never reached?
message, lineno = error.args
errors.append(self.reporter.warning(message, line=lineno))
break
nodelist, blank_finish = self.comment(match)
return nodelist + errors, blank_finish
def explicit_list(self, blank_finish):
"""
Create a nested state machine for a series of explicit markup
constructs (including anonymous hyperlink targets).
"""
offset = self.state_machine.line_offset + 1 # next line
newline_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=self.parent, initial_state='Explicit',
blank_finish=blank_finish,
match_titles=self.state_machine.match_titles)
self.goto_line(newline_offset)
if not blank_finish:
self.parent += self.unindent_warning('Explicit markup')
def anonymous(self, match, context, next_state):
"""Anonymous hyperlink targets."""
nodelist, blank_finish = self.anonymous_target(match)
self.parent += nodelist
self.explicit_list(blank_finish)
return [], next_state, []
def anonymous_target(self, match):
lineno = self.state_machine.abs_line_number()
block, indent, offset, blank_finish \
= self.state_machine.get_first_known_indented(match.end(),
until_blank=1)
blocktext = match.string[:match.end()] + '\n'.join(block)
block = [escape2null(line) for line in block]
target = self.make_target(block, blocktext, lineno, '')
return [target], blank_finish
def line(self, match, context, next_state):
"""Section title overline or transition marker."""
if self.state_machine.match_titles:
return [match.string], 'Line', []
elif match.string.strip() == '::':
raise statemachine.TransitionCorrection('text')
elif len(match.string.strip()) < 4:
msg = self.reporter.info(
'Unexpected possible title overline or transition.\n'
"Treating it as ordinary text because it's so short.",
line=self.state_machine.abs_line_number())
self.parent += msg
raise statemachine.TransitionCorrection('text')
else:
blocktext = self.state_machine.line
msg = self.reporter.severe(
'Unexpected section title or transition.',
nodes.literal_block(blocktext, blocktext),
line=self.state_machine.abs_line_number())
self.parent += msg
return [], next_state, []
def text(self, match, context, next_state):
"""Titles, definition lists, paragraphs."""
return [match.string], 'Text', []
class RFC2822Body(Body):
"""
RFC2822 headers are only valid as the first constructs in documents. As
soon as anything else appears, the `Body` state should take over.
"""
patterns = Body.patterns.copy() # can't modify the original
patterns['rfc2822'] = r'[!-9;-~]+:( +|$)'
initial_transitions = [(name, 'Body')
for name in Body.initial_transitions]
initial_transitions.insert(-1, ('rfc2822', 'Body')) # just before 'text'
def rfc2822(self, match, context, next_state):
"""RFC2822-style field list item."""
fieldlist = nodes.field_list(classes=['rfc2822'])
self.parent += fieldlist
field, blank_finish = self.rfc2822_field(match)
fieldlist += field
offset = self.state_machine.line_offset + 1 # next line
newline_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=fieldlist, initial_state='RFC2822List',
blank_finish=blank_finish)
self.goto_line(newline_offset)
if not blank_finish:
self.parent += self.unindent_warning(
'RFC2822-style field list')
return [], next_state, []
def rfc2822_field(self, match):
name = match.string[:match.string.find(':')]
indented, indent, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end(),
until_blank=1)
fieldnode = nodes.field()
fieldnode += nodes.field_name(name, name)
fieldbody = nodes.field_body('\n'.join(indented))
fieldnode += fieldbody
if indented:
self.nested_parse(indented, input_offset=line_offset,
node=fieldbody)
return fieldnode, blank_finish
class SpecializedBody(Body):
"""
Superclass for second and subsequent compound element members. Compound
elements are lists and list-like constructs.
All transition methods are disabled (redefined as `invalid_input`).
Override individual methods in subclasses to re-enable.
For example, once an initial bullet list item, say, is recognized, the
`BulletList` subclass takes over, with a "bullet_list" node as its
container. Upon encountering the initial bullet list item, `Body.bullet`
calls its ``self.nested_list_parse`` (`RSTState.nested_list_parse`), which
starts up a nested parsing session with `BulletList` as the initial state.
Only the ``bullet`` transition method is enabled in `BulletList`; as long
as only bullet list items are encountered, they are parsed and inserted
into the container. The first construct which is *not* a bullet list item
triggers the `invalid_input` method, which ends the nested parse and
closes the container. `BulletList` needs to recognize input that is
invalid in the context of a bullet list, which means everything *other
than* bullet list items, so it inherits the transition list created in
`Body`.
"""
def invalid_input(self, match=None, context=None, next_state=None):
"""Not a compound element member. Abort this state machine."""
self.state_machine.previous_line() # back up so parent SM can reassess
raise EOFError
indent = invalid_input
bullet = invalid_input
enumerator = invalid_input
field_marker = invalid_input
option_marker = invalid_input
doctest = invalid_input
line_block = invalid_input
grid_table_top = invalid_input
simple_table_top = invalid_input
explicit_markup = invalid_input
anonymous = invalid_input
line = invalid_input
text = invalid_input
class BulletList(SpecializedBody):
"""Second and subsequent bullet_list list_items."""
def bullet(self, match, context, next_state):
"""Bullet list item."""
if match.string[0] != self.parent['bullet']:
# different bullet: new list
self.invalid_input()
listitem, blank_finish = self.list_item(match.end())
self.parent += listitem
self.blank_finish = blank_finish
return [], next_state, []
class DefinitionList(SpecializedBody):
"""Second and subsequent definition_list_items."""
def text(self, match, context, next_state):
"""Definition lists."""
return [match.string], 'Definition', []
class EnumeratedList(SpecializedBody):
"""Second and subsequent enumerated_list list_items."""
def enumerator(self, match, context, next_state):
"""Enumerated list item."""
format, sequence, text, ordinal = self.parse_enumerator(
match, self.parent['enumtype'])
if ( format != self.format
or (sequence != '#' and (sequence != self.parent['enumtype']
or self.auto
or ordinal != (self.lastordinal + 1)))
or not self.is_enumerated_list_item(ordinal, sequence, format)):
# different enumeration: new list
self.invalid_input()
if sequence == '#':
self.auto = 1
listitem, blank_finish = self.list_item(match.end())
self.parent += listitem
self.blank_finish = blank_finish
self.lastordinal = ordinal
return [], next_state, []
class FieldList(SpecializedBody):
"""Second and subsequent field_list fields."""
def field_marker(self, match, context, next_state):
"""Field list field."""
field, blank_finish = self.field(match)
self.parent += field
self.blank_finish = blank_finish
return [], next_state, []
class OptionList(SpecializedBody):
"""Second and subsequent option_list option_list_items."""
def option_marker(self, match, context, next_state):
"""Option list item."""
try:
option_list_item, blank_finish = self.option_list_item(match)
except MarkupError, (message, lineno):
self.invalid_input()
self.parent += option_list_item
self.blank_finish = blank_finish
return [], next_state, []
class RFC2822List(SpecializedBody, RFC2822Body):
"""Second and subsequent RFC2822-style field_list fields."""
patterns = RFC2822Body.patterns
initial_transitions = RFC2822Body.initial_transitions
def rfc2822(self, match, context, next_state):
"""RFC2822-style field list item."""
field, blank_finish = self.rfc2822_field(match)
self.parent += field
self.blank_finish = blank_finish
return [], 'RFC2822List', []
blank = SpecializedBody.invalid_input
class ExtensionOptions(FieldList):
"""
Parse field_list fields for extension options.
No nested parsing is done (including inline markup parsing).
"""
def parse_field_body(self, indented, offset, node):
"""Override `Body.parse_field_body` for simpler parsing."""
lines = []
for line in list(indented) + ['']:
if line.strip():
lines.append(line)
elif lines:
text = '\n'.join(lines)
node += nodes.paragraph(text, text)
lines = []
class LineBlock(SpecializedBody):
"""Second and subsequent lines of a line_block."""
blank = SpecializedBody.invalid_input
def line_block(self, match, context, next_state):
"""New line of line block."""
lineno = self.state_machine.abs_line_number()
line, messages, blank_finish = self.line_block_line(match, lineno)
self.parent += line
self.parent.parent += messages
self.blank_finish = blank_finish
return [], next_state, []
class Explicit(SpecializedBody):
"""Second and subsequent explicit markup construct."""
def explicit_markup(self, match, context, next_state):
"""Footnotes, hyperlink targets, directives, comments."""
nodelist, blank_finish = self.explicit_construct(match)
self.parent += nodelist
self.blank_finish = blank_finish
return [], next_state, []
def anonymous(self, match, context, next_state):
"""Anonymous hyperlink targets."""
nodelist, blank_finish = self.anonymous_target(match)
self.parent += nodelist
self.blank_finish = blank_finish
return [], next_state, []
blank = SpecializedBody.invalid_input
class SubstitutionDef(Body):
"""
Parser for the contents of a substitution_definition element.
"""
patterns = {
'embedded_directive': re.compile(r'(%s)::( +|$)'
% Inliner.simplename, re.UNICODE),
'text': r''}
initial_transitions = ['embedded_directive', 'text']
def embedded_directive(self, match, context, next_state):
nodelist, blank_finish = self.directive(match,
alt=self.parent['names'][0])
self.parent += nodelist
if not self.state_machine.at_eof():
self.blank_finish = blank_finish
raise EOFError
def text(self, match, context, next_state):
if not self.state_machine.at_eof():
self.blank_finish = self.state_machine.is_next_line_blank()
raise EOFError
class Text(RSTState):
"""
Classifier of second line of a text block.
Could be a paragraph, a definition list item, or a title.
"""
patterns = {'underline': Body.patterns['line'],
'text': r''}
initial_transitions = [('underline', 'Body'), ('text', 'Body')]
def blank(self, match, context, next_state):
"""End of paragraph."""
paragraph, literalnext = self.paragraph(
context, self.state_machine.abs_line_number() - 1)
self.parent += paragraph
if literalnext:
self.parent += self.literal_block()
return [], 'Body', []
def eof(self, context):
if context:
self.blank(None, context, None)
return []
def indent(self, match, context, next_state):
"""Definition list item."""
definitionlist = nodes.definition_list()
definitionlistitem, blank_finish = self.definition_list_item(context)
definitionlist += definitionlistitem
self.parent += definitionlist
offset = self.state_machine.line_offset + 1 # next line
newline_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=definitionlist, initial_state='DefinitionList',
blank_finish=blank_finish, blank_finish_state='Definition')
self.goto_line(newline_offset)
if not blank_finish:
self.parent += self.unindent_warning('Definition list')
return [], 'Body', []
def underline(self, match, context, next_state):
"""Section title."""
lineno = self.state_machine.abs_line_number()
title = context[0].rstrip()
underline = match.string.rstrip()
source = title + '\n' + underline
messages = []
if column_width(title) > len(underline):
if len(underline) < 4:
if self.state_machine.match_titles:
msg = self.reporter.info(
'Possible title underline, too short for the title.\n'
"Treating it as ordinary text because it's so short.",
line=lineno)
self.parent += msg
raise statemachine.TransitionCorrection('text')
else:
blocktext = context[0] + '\n' + self.state_machine.line
msg = self.reporter.warning(
'Title underline too short.',
nodes.literal_block(blocktext, blocktext), line=lineno)
messages.append(msg)
if not self.state_machine.match_titles:
blocktext = context[0] + '\n' + self.state_machine.line
msg = self.reporter.severe(
'Unexpected section title.',
nodes.literal_block(blocktext, blocktext), line=lineno)
self.parent += messages
self.parent += msg
return [], next_state, []
style = underline[0]
context[:] = []
self.section(title, source, style, lineno - 1, messages)
return [], next_state, []
def text(self, match, context, next_state):
"""Paragraph."""
startline = self.state_machine.abs_line_number() - 1
msg = None
try:
block = self.state_machine.get_text_block(flush_left=1)
except statemachine.UnexpectedIndentationError, instance:
block, source, lineno = instance.args
msg = self.reporter.error('Unexpected indentation.',
source=source, line=lineno)
lines = context + list(block)
paragraph, literalnext = self.paragraph(lines, startline)
self.parent += paragraph
self.parent += msg
if literalnext:
try:
self.state_machine.next_line()
except EOFError:
pass
self.parent += self.literal_block()
return [], next_state, []
def literal_block(self):
"""Return a list of nodes."""
indented, indent, offset, blank_finish = \
self.state_machine.get_indented()
while indented and not indented[-1].strip():
indented.trim_end()
if not indented:
return self.quoted_literal_block()
data = '\n'.join(indented)
literal_block = nodes.literal_block(data, data)
literal_block.line = offset + 1
nodelist = [literal_block]
if not blank_finish:
nodelist.append(self.unindent_warning('Literal block'))
return nodelist
def quoted_literal_block(self):
abs_line_offset = self.state_machine.abs_line_offset()
offset = self.state_machine.line_offset
parent_node = nodes.Element()
new_abs_offset = self.nested_parse(
self.state_machine.input_lines[offset:],
input_offset=abs_line_offset, node=parent_node, match_titles=0,
state_machine_kwargs={'state_classes': (QuotedLiteralBlock,),
'initial_state': 'QuotedLiteralBlock'})
self.goto_line(new_abs_offset)
return parent_node.children
def definition_list_item(self, termline):
indented, indent, line_offset, blank_finish = \
self.state_machine.get_indented()
definitionlistitem = nodes.definition_list_item(
'\n'.join(termline + list(indented)))
lineno = self.state_machine.abs_line_number() - 1
definitionlistitem.line = lineno
termlist, messages = self.term(termline, lineno)
definitionlistitem += termlist
definition = nodes.definition('', *messages)
definitionlistitem += definition
if termline[0][-2:] == '::':
definition += self.reporter.info(
'Blank line missing before literal block (after the "::")? '
'Interpreted as a definition list item.', line=line_offset+1)
self.nested_parse(indented, input_offset=line_offset, node=definition)
return definitionlistitem, blank_finish
classifier_delimiter = re.compile(' +: +')
def term(self, lines, lineno):
"""Return a definition_list's term and optional classifiers."""
assert len(lines) == 1
text_nodes, messages = self.inline_text(lines[0], lineno)
term_node = nodes.term()
node_list = [term_node]
for i in range(len(text_nodes)):
node = text_nodes[i]
if isinstance(node, nodes.Text):
parts = self.classifier_delimiter.split(node.rawsource)
if len(parts) == 1:
node_list[-1] += node
else:
node_list[-1] += nodes.Text(parts[0].rstrip())
for part in parts[1:]:
classifier_node = nodes.classifier('', part)
node_list.append(classifier_node)
else:
node_list[-1] += node
return node_list, messages
class SpecializedText(Text):
"""
Superclass for second and subsequent lines of Text-variants.
All transition methods are disabled. Override individual methods in
subclasses to re-enable.
"""
def eof(self, context):
"""Incomplete construct."""
return []
def invalid_input(self, match=None, context=None, next_state=None):
"""Not a compound element member. Abort this state machine."""
raise EOFError
blank = invalid_input
indent = invalid_input
underline = invalid_input
text = invalid_input
class Definition(SpecializedText):
"""Second line of potential definition_list_item."""
def eof(self, context):
"""Not a definition."""
self.state_machine.previous_line(2) # so parent SM can reassess
return []
def indent(self, match, context, next_state):
"""Definition list item."""
definitionlistitem, blank_finish = self.definition_list_item(context)
self.parent += definitionlistitem
self.blank_finish = blank_finish
return [], 'DefinitionList', []
class Line(SpecializedText):
"""
Second line of over- & underlined section title or transition marker.
"""
eofcheck = 1 # @@@ ???
"""Set to 0 while parsing sections, so that we don't catch the EOF."""
def eof(self, context):
"""Transition marker at end of section or document."""
marker = context[0].strip()
if self.memo.section_bubble_up_kludge:
self.memo.section_bubble_up_kludge = 0
elif len(marker) < 4:
self.state_correction(context)
if self.eofcheck: # ignore EOFError with sections
lineno = self.state_machine.abs_line_number() - 1
transition = nodes.transition(rawsource=context[0])
transition.line = lineno
self.parent += transition
self.eofcheck = 1
return []
def blank(self, match, context, next_state):
"""Transition marker."""
lineno = self.state_machine.abs_line_number() - 1
marker = context[0].strip()
if len(marker) < 4:
self.state_correction(context)
transition = nodes.transition(rawsource=marker)
transition.line = lineno
self.parent += transition
return [], 'Body', []
def text(self, match, context, next_state):
"""Potential over- & underlined title."""
lineno = self.state_machine.abs_line_number() - 1
overline = context[0]
title = match.string
underline = ''
try:
underline = self.state_machine.next_line()
except EOFError:
blocktext = overline + '\n' + title
if len(overline.rstrip()) < 4:
self.short_overline(context, blocktext, lineno, 2)
else:
msg = self.reporter.severe(
'Incomplete section title.',
nodes.literal_block(blocktext, blocktext), line=lineno)
self.parent += msg
return [], 'Body', []
source = '%s\n%s\n%s' % (overline, title, underline)
overline = overline.rstrip()
underline = underline.rstrip()
if not self.transitions['underline'][0].match(underline):
blocktext = overline + '\n' + title + '\n' + underline
if len(overline.rstrip()) < 4:
self.short_overline(context, blocktext, lineno, 2)
else:
msg = self.reporter.severe(
'Missing matching underline for section title overline.',
nodes.literal_block(source, source), line=lineno)
self.parent += msg
return [], 'Body', []
elif overline != underline:
blocktext = overline + '\n' + title + '\n' + underline
if len(overline.rstrip()) < 4:
self.short_overline(context, blocktext, lineno, 2)
else:
msg = self.reporter.severe(
'Title overline & underline mismatch.',
nodes.literal_block(source, source), line=lineno)
self.parent += msg
return [], 'Body', []
title = title.rstrip()
messages = []
if column_width(title) > len(overline):
blocktext = overline + '\n' + title + '\n' + underline
if len(overline.rstrip()) < 4:
self.short_overline(context, blocktext, lineno, 2)
else:
msg = self.reporter.warning(
'Title overline too short.',
nodes.literal_block(source, source), line=lineno)
messages.append(msg)
style = (overline[0], underline[0])
self.eofcheck = 0 # @@@ not sure this is correct
self.section(title.lstrip(), source, style, lineno + 1, messages)
self.eofcheck = 1
return [], 'Body', []
indent = text # indented title
def underline(self, match, context, next_state):
overline = context[0]
blocktext = overline + '\n' + self.state_machine.line
lineno = self.state_machine.abs_line_number() - 1
if len(overline.rstrip()) < 4:
self.short_overline(context, blocktext, lineno, 1)
msg = self.reporter.error(
'Invalid section title or transition marker.',
nodes.literal_block(blocktext, blocktext), line=lineno)
self.parent += msg
return [], 'Body', []
def short_overline(self, context, blocktext, lineno, lines=1):
msg = self.reporter.info(
'Possible incomplete section title.\nTreating the overline as '
"ordinary text because it's so short.", line=lineno)
self.parent += msg
self.state_correction(context, lines)
def state_correction(self, context, lines=1):
self.state_machine.previous_line(lines)
context[:] = []
raise statemachine.StateCorrection('Body', 'text')
class QuotedLiteralBlock(RSTState):
"""
Nested parse handler for quoted (unindented) literal blocks.
Special-purpose. Not for inclusion in `state_classes`.
"""
patterns = {'initial_quoted': r'(%(nonalphanum7bit)s)' % Body.pats,
'text': r''}
initial_transitions = ('initial_quoted', 'text')
def __init__(self, state_machine, debug=0):
RSTState.__init__(self, state_machine, debug)
self.messages = []
self.initial_lineno = None
def blank(self, match, context, next_state):
if context:
raise EOFError
else:
return context, next_state, []
def eof(self, context):
if context:
text = '\n'.join(context)
literal_block = nodes.literal_block(text, text)
literal_block.line = self.initial_lineno
self.parent += literal_block
else:
self.parent += self.reporter.warning(
'Literal block expected; none found.',
line=self.state_machine.abs_line_number())
self.state_machine.previous_line()
self.parent += self.messages
return []
def indent(self, match, context, next_state):
assert context, ('QuotedLiteralBlock.indent: context should not '
'be empty!')
self.messages.append(
self.reporter.error('Unexpected indentation.',
line=self.state_machine.abs_line_number()))
self.state_machine.previous_line()
raise EOFError
def initial_quoted(self, match, context, next_state):
"""Match arbitrary quote character on the first line only."""
self.remove_transition('initial_quoted')
quote = match.string[0]
pattern = re.compile(re.escape(quote))
# New transition matches consistent quotes only:
self.add_transition('quoted',
(pattern, self.quoted, self.__class__.__name__))
self.initial_lineno = self.state_machine.abs_line_number()
return [match.string], next_state, []
def quoted(self, match, context, next_state):
"""Match consistent quotes on subsequent lines."""
context.append(match.string)
return context, next_state, []
def text(self, match, context, next_state):
if context:
self.messages.append(
self.reporter.error('Inconsistent literal block quoting.',
line=self.state_machine.abs_line_number()))
self.state_machine.previous_line()
raise EOFError
state_classes = (Body, BulletList, DefinitionList, EnumeratedList, FieldList,
OptionList, LineBlock, ExtensionOptions, Explicit, Text,
Definition, Line, SubstitutionDef, RFC2822Body, RFC2822List)
"""Standard set of State classes used to start `RSTStateMachine`."""
| 42.080758 | 80 | 0.561882 |
794049ab2fe8bf96ec65e8c63971e44e7b835705 | 1,196 | py | Python | angr/engines/vex/statements/store.py | mariusmue/angr | f8304c4b1f0097a721a6692b02a45cabaae137c5 | [
"BSD-2-Clause"
] | 2 | 2018-12-03T23:14:56.000Z | 2018-12-03T23:15:57.000Z | angr/engines/vex/statements/store.py | mariusmue/angr | f8304c4b1f0097a721a6692b02a45cabaae137c5 | [
"BSD-2-Clause"
] | null | null | null | angr/engines/vex/statements/store.py | mariusmue/angr | f8304c4b1f0097a721a6692b02a45cabaae137c5 | [
"BSD-2-Clause"
] | 1 | 2022-02-10T02:29:38.000Z | 2022-02-10T02:29:38.000Z | from . import SimIRStmt
from .... import sim_options as o
from ....state_plugins.sim_action_object import SimActionObject
from ....state_plugins.sim_action import SimActionData
class SimIRStmt_Store(SimIRStmt):
def _execute(self):
# first resolve the address and record stuff
addr = self._translate_expr(self.stmt.addr)
# now get the value and track everything
data = self._translate_expr(self.stmt.data)
expr = data.expr.raw_to_bv()
# track the write
if o.TRACK_MEMORY_ACTIONS in self.state.options:
data_ao = SimActionObject(expr, reg_deps=data.reg_deps(), tmp_deps=data.tmp_deps())
addr_ao = SimActionObject(addr.expr, reg_deps=addr.reg_deps(), tmp_deps=addr.tmp_deps())
size_ao = SimActionObject(data.size_bits())
a = SimActionData(self.state, SimActionData.MEM, SimActionData.WRITE, data=data_ao, size=size_ao, addr=addr_ao)
self.actions.append(a)
else:
a = None
# Now do the store (if we should)
if o.DO_STORES in self.state.options:
self.state.memory.store(addr.expr, data.expr, action=a, endness=self.stmt.endness)
| 41.241379 | 123 | 0.674749 |
794049e4f30221372ed41ef964148f0d3c91f09b | 629 | py | Python | InventoryManager/product/models.py | FuryAndRage/InventoryManager | bbb7318dae6bf40da81ed02a0f547019f031778f | [
"MIT"
] | null | null | null | InventoryManager/product/models.py | FuryAndRage/InventoryManager | bbb7318dae6bf40da81ed02a0f547019f031778f | [
"MIT"
] | null | null | null | InventoryManager/product/models.py | FuryAndRage/InventoryManager | bbb7318dae6bf40da81ed02a0f547019f031778f | [
"MIT"
] | null | null | null | from django.db import models
from InventoryManager.category.models import Category
from django.contrib.auth.models import User
class Product(models.Model):
user = models.ForeignKey(User, on_delete = models.CASCADE)
category = models.ForeignKey(Category, on_delete = models.CASCADE, verbose_name = 'Category')
product_name = models.CharField('Product', max_length = 255)
product_description = models.TextField(null = True)
product_quantity = models.IntegerField(default = 0)
product_image = models.ImageField(upload_to = 'product/', null=True)
def __str__(self):
return self.product_name
| 41.933333 | 97 | 0.750397 |
79404b66bddf932db6f8dacdf5ef5085f91e55f4 | 5,188 | py | Python | MyApi/settings.py | CodeEnvironment/django-rest-framework-deploy-heroku | c6ffb20961c193b0f4dc1289de904b5d6750f335 | [
"MIT"
] | 3 | 2021-04-05T14:02:44.000Z | 2022-01-25T07:50:20.000Z | MyApi/settings.py | CodeEnvironment/django-rest-framework-deploy-heroku | c6ffb20961c193b0f4dc1289de904b5d6750f335 | [
"MIT"
] | null | null | null | MyApi/settings.py | CodeEnvironment/django-rest-framework-deploy-heroku | c6ffb20961c193b0f4dc1289de904b5d6750f335 | [
"MIT"
] | 1 | 2022-01-23T15:09:59.000Z | 2022-01-23T15:09:59.000Z | """
Django settings for MyApi project.
Generated by 'django-admin startproject' using Django 2.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
from decouple import config
from dj_database_url import parse as dburl
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
SECRET_KEY = config('SECRET_KEY')
DEBUG = config('DEBUG', default=False, cast=bool)
ALLOWED_HOSTS = ['127.0.0.1','dj-test-api.herokuapp.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'allauth.socialaccount',
'rest_framework.authtoken',
'rest_auth',
'django.contrib.sites',
'allauth',
'allauth.account',
'rest_auth.registration',
'firstApp',
'cars',
'posts',
'userapp',
'school',
'racing',
'weather.apps.WeatherConfig',
]
SITE_ID = 1
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'MyApi.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'MyApi.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
# }
default_dburl = 'sqlite:///' + os.path.join(BASE_DIR, 'db.sqlite3')
DATABASES = { 'default': config('DATABASE_URL', default=default_dburl, cast=dburl), }
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': [
# 'rest_framework.authentication.BasicAuthentication',
# 'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.TokenAuthentication',
#'rest_framework_simplejwt.authentication.JWTAuthentication',
],
'DEFAULT_PERMISSION_CLASSES': [
# 'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
# 'rest_framework.permissions.IsAuthenticated',
'rest_framework.permissions.AllowAny',
],
'TEST_REQUEST_RENDERER_CLASSES': [
'rest_framework.renderers.MultiPartRenderer',
'rest_framework.renderers.JSONRenderer',
'rest_framework.renderers.TemplateHTMLRenderer'
],
'DEFAULT_THROTTLE_CLASSES': [
'rest_framework.throttling.ScopedRateThrottle',
],
'DEFAULT_THROTTLE_RATES': {
'cars_app': '50/day',
'first_app': '4/day'
}
}
REST_AUTH_SERIALIZERS = {
'USER_DETAILS_SERIALIZER': 'userapp.serializer.UserDetailsSerializer'
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
'LOCATION': 'C:/Users/Python/Desktop/MyApi',
}
}
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR,'staticfiles')
AUTH_USER_MODEL = 'userapp.User'
ACCOUNT_EMAIL_REQUIRED = False
#gmail_send/settings.py
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = '[email protected]'
EMAIL_HOST_PASSWORD = 'emqcfqqghtgndxqq'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
DEFAULT_FROM_EMAIL = 'default from email'
| 26.605128 | 91 | 0.694873 |
79404b9091f4c9236ab4474e061ef2151e98c910 | 66 | py | Python | kpi/unical_accounts/__init__.py | UniversitaDellaCalabria/kpiManagement | d045a464298e17f50e005b89ba3b71e53d57f368 | [
"Apache-2.0"
] | null | null | null | kpi/unical_accounts/__init__.py | UniversitaDellaCalabria/kpiManagement | d045a464298e17f50e005b89ba3b71e53d57f368 | [
"Apache-2.0"
] | null | null | null | kpi/unical_accounts/__init__.py | UniversitaDellaCalabria/kpiManagement | d045a464298e17f50e005b89ba3b71e53d57f368 | [
"Apache-2.0"
] | 1 | 2022-03-28T10:48:38.000Z | 2022-03-28T10:48:38.000Z | default_app_config = 'unical_accounts.apps.Unical_AccountsConfig'
| 33 | 65 | 0.878788 |
79404bc719dededb5333373b2d7cb689679a62f2 | 4,414 | py | Python | Database.py | itsp300/attendance_facial_recognition | 0f49998ebbe35d350443c2f0b7e24060635066ee | [
"MIT"
] | null | null | null | Database.py | itsp300/attendance_facial_recognition | 0f49998ebbe35d350443c2f0b7e24060635066ee | [
"MIT"
] | null | null | null | Database.py | itsp300/attendance_facial_recognition | 0f49998ebbe35d350443c2f0b7e24060635066ee | [
"MIT"
] | null | null | null | import sqlite3
from sqlite3 import Error
def convert_tuple(tup):
str = ''.join(tup)
return str
# Database Connection
def create_connection(db_file):
""" create a database connection to the SQLite database
specified by db_file
:param db_file: database file
:return: Connection object or None
"""
print("Creating a connection to the database")
conn = None
try:
print("Connection Created!")
conn = sqlite3.connect(db_file)
except Error as e:
print(e)
return conn
# Creates tables in the database
def create_table(conn, create_table_sql):
""" create a table from the create_table_sql statement
:param conn: Connection object
:param create_table_sql: a CREATE TABLE statement
:return:
"""
try:
c = conn.cursor()
c.execute(create_table_sql)
except Error as e:
print(e)
# Create a Attendance Record
def create_attend(conn, face_attend):
"""
Create a new record into the students table
:param conn:
:param face_attend:
:return: table id
"""
sql = ''' INSERT INTO attendance(attendance_id,report_id,student_number,confidence, date_attended)
VALUES(?,?,?,?,?) '''
cur = conn.cursor()
cur.execute(sql, face_attend)
return cur.lastrowid
# Create a Report Record
def create_report(conn, face_report):
"""
Create a new record into the students table
:param conn:
:param face_report:
:return: table id
"""
sql = ''' INSERT INTO report(report_id,identified,date_attended)
VALUES(?,?,?) '''
print("Report " + sql)
cur = conn.cursor()
cur.execute(sql, face_report)
return cur.lastrowid
# Create a Student Record
def create_student(conn, student_data):
"""
Create a new record into the students table
:param conn:
:param student_data:
:return: table id
"""
try:
sql = ''' INSERT INTO students(student_number, first_name, surname, file_name)
VALUES(?,?,?,?) '''
cur = conn.cursor()
cur.execute(sql, student_data)
except Error as e:
print(e)
return cur.lastrowid
# Select all Students in the Database
def select_all_students(conn):
"""
Query all rows in the tasks table
:param conn: the Connection object
:return:
"""
cur = conn.cursor()
cur.execute("SELECT * FROM students")
rows = cur.fetchall()
print("Students in Database")
for row in rows:
print(row)
# Select all records from Table Attendances
def select_all_tasks(conn):
"""
Query all rows in the tasks table
:param conn: the Connection object
:return:
"""
cur = conn.cursor()
cur.execute("SELECT * FROM attendance")
rows = cur.fetchall()
for row in rows:
print(row)
# Select All Records from Table Reports
def select_all_report(conn):
"""
Query all rows in the tasks table
:param conn: the Connection object
:return:
"""
cur = conn.cursor()
cur.execute("SELECT * FROM report")
rows = cur.fetchall()
for row in rows:
print(row)
# Creates the Json Statement to return to main Server
def create_the_statement(conn, report_num):
"""
Query all rows in the tasks table
:param conn: the Connection object
:param report_num: the identifier
:return:
"""
student_number = []
att_confidence = []
identified = []
cur = conn.cursor()
cur.execute("SELECT student_number FROM attendance WHERE report_id =" + report_num)
students = cur.fetchall()
for student in students:
stud = convert_tuple(student)
student_number.append(stud)
print(student_number)
cur.execute("SELECT confidence FROM attendance WHERE report_id =" + report_num)
confidence = cur.fetchall()
for con in confidence:
conf = convert_tuple(con)
att_confidence.append(conf)
print(att_confidence)
counter = 0
for person in student_number:
the_records = {
"person_id": person,
"certainty": att_confidence[counter]
}
identified.append(the_records)
counter = counter +1
report_config = {
"type": "face_rec_details",
"identified": identified
}
return report_config
| 23.604278 | 102 | 0.624377 |
79404c4690837cc329e0c0abc1e55e7e455117c9 | 3,628 | py | Python | src/oci/application_migration/models/occ_authorization_details.py | Manny27nyc/oci-python-sdk | de60b04e07a99826254f7255e992f41772902df7 | [
"Apache-2.0",
"BSD-3-Clause"
] | 249 | 2017-09-11T22:06:05.000Z | 2022-03-04T17:09:29.000Z | src/oci/application_migration/models/occ_authorization_details.py | Manny27nyc/oci-python-sdk | de60b04e07a99826254f7255e992f41772902df7 | [
"Apache-2.0",
"BSD-3-Clause"
] | 228 | 2017-09-11T23:07:26.000Z | 2022-03-23T10:58:50.000Z | src/oci/application_migration/models/occ_authorization_details.py | Manny27nyc/oci-python-sdk | de60b04e07a99826254f7255e992f41772902df7 | [
"Apache-2.0",
"BSD-3-Clause"
] | 224 | 2017-09-27T07:32:43.000Z | 2022-03-25T16:55:42.000Z | # coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from .authorization_details import AuthorizationDetails
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class OccAuthorizationDetails(AuthorizationDetails):
"""
Credentials to access Oracle Cloud@Customer, which is the source environment from which you want to migrate the application.
"""
def __init__(self, **kwargs):
"""
Initializes a new OccAuthorizationDetails object with values from keyword arguments. The default value of the :py:attr:`~oci.application_migration.models.OccAuthorizationDetails.type` attribute
of this class is ``OCC`` and it should not be changed.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param type:
The value to assign to the type property of this OccAuthorizationDetails.
Allowed values for this property are: "OCIC", "INTERNAL_COMPUTE", "OCC", "OCIC_IDCS", "IMPORT"
:type type: str
:param username:
The value to assign to the username property of this OccAuthorizationDetails.
:type username: str
:param password:
The value to assign to the password property of this OccAuthorizationDetails.
:type password: str
"""
self.swagger_types = {
'type': 'str',
'username': 'str',
'password': 'str'
}
self.attribute_map = {
'type': 'type',
'username': 'username',
'password': 'password'
}
self._type = None
self._username = None
self._password = None
self._type = 'OCC'
@property
def username(self):
"""
**[Required]** Gets the username of this OccAuthorizationDetails.
User with Compute Operations role in Oracle Cloud@Customer.
:return: The username of this OccAuthorizationDetails.
:rtype: str
"""
return self._username
@username.setter
def username(self, username):
"""
Sets the username of this OccAuthorizationDetails.
User with Compute Operations role in Oracle Cloud@Customer.
:param username: The username of this OccAuthorizationDetails.
:type: str
"""
self._username = username
@property
def password(self):
"""
**[Required]** Gets the password of this OccAuthorizationDetails.
Password for this user.
:return: The password of this OccAuthorizationDetails.
:rtype: str
"""
return self._password
@password.setter
def password(self, password):
"""
Sets the password of this OccAuthorizationDetails.
Password for this user.
:param password: The password of this OccAuthorizationDetails.
:type: str
"""
self._password = password
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 32.392857 | 245 | 0.649118 |
79404ce563c06205e2c0278cee59f5bcd539811e | 8,754 | py | Python | tensorflow/python/eager/wrap_function_test.py | Sonata-Wang/tensorflow | 8bbef0cd77879d05ed69bf30e76087847a8ca4a2 | [
"Apache-2.0"
] | 6 | 2019-02-05T22:36:51.000Z | 2022-01-14T03:50:57.000Z | tensorflow/python/eager/wrap_function_test.py | YaoYaoZhi/tensorflow | 83903c9dd9b5235996ec9158c30a1607fcfb4c73 | [
"Apache-2.0"
] | 1 | 2019-09-14T04:40:07.000Z | 2020-11-18T18:16:17.000Z | tensorflow/python/eager/wrap_function_test.py | YaoYaoZhi/tensorflow | 83903c9dd9b5235996ec9158c30a1607fcfb4c73 | [
"Apache-2.0"
] | 8 | 2016-01-14T13:12:56.000Z | 2021-04-09T10:20:53.000Z | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import backprop
from tensorflow.python.eager import def_function
from tensorflow.python.eager import wrap_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class WrapFunctionTest(test.TestCase):
def testDocString(self):
def f(x, do_add):
v = variables.Variable(5.0)
if do_add:
op = v.assign_add(x)
else:
op = v.assign_sub(x)
with ops.control_dependencies([op]):
return v.read_value()
f_add = wrap_function.wrap_function(
f, [tensor_spec.TensorSpec((), dtypes.float32), True])
self.assertAllEqual(f_add(1.0), 6.0)
self.assertAllEqual(f_add(1.0), 7.0)
# Can call tf.compat.v1.wrap_function again to get a new trace, a new set
# of variables, and possibly different non-template arguments.
f_sub = wrap_function.wrap_function(
f, [tensor_spec.TensorSpec((), dtypes.float32), False])
self.assertAllEqual(f_sub(1.0), 4.0)
self.assertAllEqual(f_sub(1.0), 3.0)
def testPrune(self):
x_in = []
x_out = []
def f(x, y):
x_in.append(x)
xx = x * x
x_out.append(xx)
return xx, 2 * y*y
f_wrapped = wrap_function.wrap_function(
f, [tensor_spec.TensorSpec((), dtypes.float32)] * 2)
f_pruned = f_wrapped.prune(x_in[0], [x_out[0]])
self.assertAllEqual(f_pruned(ops.convert_to_tensor(2.0)), [4.0])
def testNoArguments(self):
def f():
return constant_op.constant(1.)
f_wrapped = wrap_function.wrap_function(f, [])
self.assertAllEqual(1.0, f_wrapped())
def testPruneCaptures(self):
v1 = variables.Variable(2.)
def f():
v2 = variables.Variable(3.)
return array_ops.identity(v1 * v2 * constant_op.constant(1.), 'fetch')
f_wrapped = wrap_function.wrap_function(f, [])
self.assertAllEqual(6.0, f_wrapped())
# Test pruning directly on the inputs
pruned = f_wrapped.prune(
feeds=f_wrapped.inputs,
fetches=f_wrapped.graph.get_tensor_by_name('fetch:0'))
self.assertAllEqual(6.0, pruned())
# Test pruning with no inputs
pruned = f_wrapped.prune(
feeds=(),
fetches=f_wrapped.graph.get_tensor_by_name('fetch:0'))
self.assertAllEqual(6.0, pruned())
def testCollectionsIsolation(self):
v1 = variables.Variable(2.)
v2_holder = []
def f():
v2 = variables.Variable(3.)
v2_holder.append(v2)
ops.add_to_collection(ops.GraphKeys.LOSSES, v2 * constant_op.constant(3.))
return array_ops.identity(v1 * v2 * constant_op.constant(1.), 'fetch')
f_wrapped = wrap_function.wrap_function(f, [])
self.assertAllEqual(6.0, f_wrapped())
self.assertEqual(
len(f_wrapped.graph.get_collection(ops.GraphKeys.LOSSES)), 1)
f_var_collection = f_wrapped.graph.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES)
self.assertEqual(len(f_var_collection), 1)
self.assertIs(f_var_collection[0], v2_holder[0])
v3_holder = []
def g():
v3 = variables.Variable(4.)
v3_holder.append(v3)
ops.add_to_collection(ops.GraphKeys.LOSSES, v3 * constant_op.constant(3.))
return array_ops.identity(v1 * v3 * constant_op.constant(1.), 'fetch')
g_wrapped = wrap_function.wrap_function(g, [])
self.assertAllEqual(8.0, g_wrapped())
self.assertEqual(
len(g_wrapped.graph.get_collection(ops.GraphKeys.LOSSES)), 1)
g_var_collection = g_wrapped.graph.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES)
self.assertEqual(len(g_var_collection), 1)
self.assertIs(g_var_collection[0], v3_holder[0])
# Both have only one value, and their values aren't equal. So no sharing.
self.assertNotEqual(g_wrapped.graph.get_collection(ops.GraphKeys.LOSSES),
f_wrapped.graph.get_collection(ops.GraphKeys.LOSSES))
def testGradientsOfPrune(self):
v1 = variables.Variable(2.)
v2_holder = []
def f(z):
v2 = variables.Variable(3.)
v2_holder.append(v2)
return array_ops.identity(v1 * v2 * z, 'fetch')
f_wrapped = wrap_function.wrap_function(
f, [tensor_spec.TensorSpec((), dtype=dtypes.float32)])
x = constant_op.constant(1.)
with backprop.GradientTape() as tape:
tape.watch(x)
out = f_wrapped(x)
grads = tape.gradient(out, [x, v1, v2_holder[0]])
self.assertAllEqual(6.0, out)
self.assertAllEqual([6.0, 3.0, 2.0], grads)
pruned = f_wrapped.prune(
feeds=f_wrapped.inputs,
fetches=f_wrapped.graph.get_tensor_by_name('fetch:0'))
x = constant_op.constant(1.)
with backprop.GradientTape() as tape:
tape.watch(x)
out = pruned(x)
grads = tape.gradient(out, [x, v1, v2_holder[0]])
self.assertAllEqual(6.0, out)
self.assertAllEqual([6.0, 3.0, 2.0], grads)
def testPruneOperations(self):
v = variables.Variable(0)
def f():
v.assign_add(1, name='increment', read_value=False)
f_wrapped = wrap_function.wrap_function(f, [])
pruned = f_wrapped.prune(
feeds=(),
fetches=(f_wrapped.graph.get_operation_by_name('increment'),))
self.assertEqual((None,), pruned())
self.assertEqual(1, self.evaluate(v))
del f, f_wrapped
def f1():
v.assign_add(
array_ops.placeholder(shape=[], dtype=dtypes.int32, name='step'),
name='increment', read_value=False)
return constant_op.constant(1, name='other')
f_wrapped = wrap_function.wrap_function(f1, [])
increments = f_wrapped.prune(
feeds=(f_wrapped.graph.get_tensor_by_name('step:0')),
fetches=(f_wrapped.graph.get_operation_by_name('increment'),
f_wrapped.graph.get_tensor_by_name('other:0')))
first_output, second_output = increments(constant_op.constant(2))
self.assertEqual(['step:0', 'increment/resource:0'],
[t.name for t in increments.inputs])
self.assertIs(None, first_output)
self.assertEqual(1, second_output.numpy())
self.assertEqual(3, v.numpy())
does_not_increment = f_wrapped.prune(
feeds=(f_wrapped.graph.get_tensor_by_name('step:0')),
fetches=f_wrapped.graph.get_tensor_by_name('other:0'))
self.assertEqual(1, does_not_increment(constant_op.constant(3)).numpy())
self.assertEqual(3, v.numpy())
def testPruneStatefulOpsFromWrappedFunc(self):
v0 = variables.Variable(0)
v1 = variables.Variable(0)
# When we wrap a function, we expect it to be executed with 'tf.Graph`
# rules: it's allowed to prune all ops that are not in transitive fanin of
# the fetches.
def f(x):
v0.assign_add(1, name='increment_v0')
v1.assign_add(1, name='increment_v1')
return x
f_wrapped = wrap_function.wrap_function(f, [1])
self.assertEqual(1, f_wrapped().numpy())
self.assertEqual(0, v0.numpy())
self.assertEqual(0, v1.numpy())
f_wrapped_with_name = wrap_function.wrap_function(f, [2], name='func')
self.assertEqual(2, f_wrapped_with_name().numpy())
self.assertEqual(0, v0.numpy())
self.assertEqual(0, v1.numpy())
def test_function_from_graph_def(self):
@def_function.function
def make_graph_def(x):
return x + 1.
original_func_graph = make_graph_def.get_concrete_function(
tensor_spec.TensorSpec([None, 2], dtypes.float32)).graph
graph_def = original_func_graph.as_graph_def()
revived_function = wrap_function.function_from_graph_def(
graph_def, inputs=original_func_graph.inputs[0].name,
outputs=original_func_graph.outputs[0].name)
self.assertEqual(2., revived_function(constant_op.constant(1.)).numpy())
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
| 33.285171 | 80 | 0.68186 |
79404d2942ec5d9462b5487f391faa75c03bccf8 | 3,694 | py | Python | hyver/config.py | retr0h/hyver | e17a95d97e43afa2e99dce5a7b2619a1ce330654 | [
"MIT"
] | null | null | null | hyver/config.py | retr0h/hyver | e17a95d97e43afa2e99dce5a7b2619a1ce330654 | [
"MIT"
] | null | null | null | hyver/config.py | retr0h/hyver | e17a95d97e43afa2e99dce5a7b2619a1ce330654 | [
"MIT"
] | null | null | null | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2017 John Dewey
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import errno
import os
import uuid
from hyver import logger
from hyver import util
HYVER_FILE = 'hyver.yml'
HYVER_DIR = '~/.gilt'
LOG = logger.get_logger(__name__)
class Config(object):
def __init__(self, hyver_file, args={}, command_args={}):
"""
Initialize a new config version one class and returns None.
:param hyver_file: A string containing the path to the Hyver file
to be parsed.
:param args: A dict of options, arguments and commands from the CLI.
:param command_args: A dict of options passed to the subcommand from
the CLI.
:returns: None
"""
self.hyver_file = hyver_file
self.args = args
self.command_args = command_args
self.config = self._get_config()
@property
def acpi(self):
return self.config.get('acpi')
@property
def kernel(self):
return self.config['kernel']
@property
def initrd(self):
return self.config['initrd']
@property
def cmdline(self):
return self.config.get('cmdline')
@property
def mem(self):
return self.config.get('mem', 1024)
@property
def cpus(self):
return self.config.get('cpus', 1)
@property
def net(self):
return '2:0,virtio-net'
@property
def cd(self):
return self.config['cd']
@property
def img_cd(self):
return '3,ahci-cd,{}'.format(self.cd)
@property
def hdd(self):
return self.config['hdd']
@property
def img_hdd(self):
return '4,virtio-blk,{}'.format(self.hdd)
@property
def uuid(self):
return str(uuid.uuid4())
@property
def pci_dev(self):
return '0:0,hostbridge'
@property
def lpc_dev(self):
return 'com1,stdio'
def _get_config(self):
try:
return util.safe_load_file(self.hyver_file)
except IOError:
msg = 'Unable to find {}. Exiting.'.format(hyver_file())
util.sysexit_with_message(msg)
def hyver_file():
return HYVER_FILE
# test
def hyver_dir():
return os.path.expanduser(HYVER_DIR)
def _makedirs(path):
"""
Create a base directory of the provided path and return None.
:param path: A string containing a path to be deconstructed and basedir
created.
:return: None
"""
dirname, _ = os.path.split(path)
try:
os.makedirs(dirname)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
| 26.198582 | 78 | 0.65647 |
79404dfda67b660f3ddd2b100b101a59af8f25f0 | 1,053 | py | Python | simulator/routers/NwcLeastEnergyRouter.py | seakers/dtnsim | 9ea1da84e0565d97b3ea184facb597302bf4d99e | [
"Apache-2.0"
] | 4 | 2020-03-06T19:39:08.000Z | 2022-02-22T13:12:36.000Z | simulator/routers/NwcLeastEnergyRouter.py | seakers/dtnsim | 9ea1da84e0565d97b3ea184facb597302bf4d99e | [
"Apache-2.0"
] | null | null | null | simulator/routers/NwcLeastEnergyRouter.py | seakers/dtnsim | 9ea1da84e0565d97b3ea184facb597302bf4d99e | [
"Apache-2.0"
] | 2 | 2021-07-30T14:09:20.000Z | 2022-02-27T10:28:58.000Z | """
# ==================================================================================
# Author: Marc Sanchez Net
# Date: 03/25/2019
# Copyright (c) 2019, Jet Propulsion Laboratory.
# ==================================================================================
"""
from simulator.routers.NwcAbstractOpportunisticRouter import NwcAbstractRouter
from simulator.routers.nwc_opportunistic import find_route_energy_estimate3
class NwcLeastEnergyRouter(NwcAbstractRouter):
def find_best_route(self, bundle, first_contact, **kwargs):
# Call routing function
return find_route_energy_estimate3(bundle_size=bundle.data_vol,
contact_plan=self.cp2,
current_time=self.t,
nodes_state=self.state,
source=self.parent.nid,
target=bundle.dest,
preferred_path=self.preferred_path)
| 37.607143 | 84 | 0.475783 |
79404f7fb1104f10432017b9800a40a63783ac61 | 7,173 | py | Python | CMSIS/CoreValidation/Tests/builder.py | DavidLesnjak/CMSIS_5 | e0848410d137758a3356a5ee94ca4501cea708a8 | [
"Apache-2.0"
] | 2,293 | 2016-02-25T06:47:33.000Z | 2022-03-29T16:44:02.000Z | CMSIS/CoreValidation/Tests/builder.py | DavidLesnjak/CMSIS_5 | e0848410d137758a3356a5ee94ca4501cea708a8 | [
"Apache-2.0"
] | 1,125 | 2016-02-27T09:56:01.000Z | 2022-03-31T13:57:05.000Z | CMSIS/CoreValidation/Tests/builder.py | DavidLesnjak/CMSIS_5 | e0848410d137758a3356a5ee94ca4501cea708a8 | [
"Apache-2.0"
] | 1,160 | 2016-02-27T09:06:10.000Z | 2022-03-31T19:06:24.000Z | #! python
import os
import shutil
import sys
from datetime import datetime
from buildutils.builder import Device, Compiler, Axis, Step, BuildStep, RunModelStep, Builder, Filter
OPTIMIZATION = [ 'O1', 'O2', 'Ofast', 'Os', 'Oz' ]
CC_OPT = {
Compiler.AC5: {
'O1': 'O0',
'O2': 'O1',
'Ofast': 'Otime',
'Os': 'O2',
'Oz': 'O3'
},
Compiler.AC6: {
'O1': 'O1',
'O2': 'O2',
'Ofast': 'Ofast',
'Os': 'Os',
'Oz': 'Oz'
},
Compiler.AC6LTM: {
'O1': 'O1',
'O2': 'O2',
'Ofast': 'Ofast',
'Os': 'Os',
'Oz': 'Oz'
},
Compiler.AC6STBL: {
'O1': 'O1',
'O2': 'O2',
'Ofast': 'Ofast',
'Os': 'Os',
'Oz': 'Oz'
},
Compiler.GCC: {
'O1': 'O1',
'O2': 'O2',
'Ofast': 'Ofast',
'Os': 'Os',
'Oz': 'O3'
},
}
CORTEX_M = [
Device.CM0,
Device.CM0PLUS,
Device.CM3,
Device.CM4,
Device.CM4FP,
Device.CM7,
Device.CM7SP,
Device.CM7DP,
Device.CM23,
Device.CM33,
Device.CM23NS,
Device.CM33NS,
Device.CM23S,
Device.CM33S,
Device.CM35P,
Device.CM35PS,
Device.CM35PNS
]
BOOTLOADER = [
Device.CM23NS,
Device.CM33NS,
Device.CM35PNS
]
FVP_MODELS = {
Device.CM0 : { 'cmd': "FVP_MPS2_Cortex-M0", 'args': { 'limit': "1000000000", 'config': "config/ARMCM0_config.txt" } },
Device.CM0PLUS : { 'cmd': "FVP_MPS2_Cortex-M0plus", 'args': { 'limit': "1000000000", 'config': "config/ARMCM0plus_config.txt" } },
Device.CM3 : { 'cmd': "FVP_MPS2_Cortex-M3", 'args': { 'limit': "1000000000", 'config': "config/ARMCM3_config.txt" } },
Device.CM4 : { 'cmd': "FVP_MPS2_Cortex-M4", 'args': { 'limit': "1000000000", 'config': "config/ARMCM4_config.txt" } },
Device.CM4FP : { 'cmd': "FVP_MPS2_Cortex-M4", 'args': { 'limit': "1000000000", 'config': "config/ARMCM4FP_config.txt" } },
Device.CM7 : { 'cmd': "FVP_MPS2_Cortex-M7", 'args': { 'limit': "1000000000", 'config': "config/ARMCM7_config.txt" } },
Device.CM7SP : { 'cmd': "FVP_MPS2_Cortex-M7", 'args': { 'limit': "1000000000", 'config': "config/ARMCM7SP_config.txt" } },
Device.CM7DP : { 'cmd': "FVP_MPS2_Cortex-M7", 'args': { 'limit': "1000000000", 'config': "config/ARMCM7DP_config.txt" } },
Device.CM23 : { 'cmd': "FVP_MPS2_Cortex-M23", 'args': { 'limit': "1000000000", 'config': "config/ARMCM23_config.txt", 'target': "cpu0" } },
Device.CM33 : { 'cmd': "FVP_MPS2_Cortex-M33", 'args': { 'limit': "1000000000", 'config': "config/ARMCM33_config.txt", 'target': "cpu0" } },
Device.CM23NS : { 'cmd': "FVP_MPS2_Cortex-M23", 'args': { 'limit': "1000000000", 'config': "config/ARMCM23_TZ_config.txt", 'target': "cpu0" } },
Device.CM33NS : { 'cmd': "FVP_MPS2_Cortex-M33", 'args': { 'limit': "1000000000", 'config': "config/ARMCM33_DSP_FP_TZ_config.txt", 'target': "cpu0" } },
Device.CM23S : { 'cmd': "FVP_MPS2_Cortex-M23", 'args': { 'limit': "1000000000", 'config': "config/ARMCM23_TZ_config.txt", 'target': "cpu0" } },
Device.CM33S : { 'cmd': "FVP_MPS2_Cortex-M33", 'args': { 'limit': "1000000000", 'config': "config/ARMCM33_DSP_FP_TZ_config.txt", 'target': "cpu0" } },
Device.CM35P : { 'cmd': "FVP_MPS2_Cortex-M35P", 'args': { 'limit': "1000000000", 'config': "config/ARMCM35P_config.txt", 'target': "cpu0" } },
Device.CM35PS : { 'cmd': "FVP_MPS2_Cortex-M35P", 'args': { 'limit': "1000000000", 'config': "config/ARMCM35P_DSP_FP_TZ_config.txt", 'target': "cpu0" } },
Device.CM35PNS : { 'cmd': "FVP_MPS2_Cortex-M35P", 'args': { 'limit': "1000000000", 'config': "config/ARMCM35P_DSP_FP_TZ_config.txt", 'target': "cpu0" } },
Device.CA5 : { 'cmd': "FVP_VE_Cortex-A5x1", 'args': { 'limit': "1000000000", 'config': "config/ARMCA5_config.txt" } },
Device.CA7 : { 'cmd': "FVP_VE_Cortex-A7x1", 'args': { 'limit': "1000000000", 'config': "config/ARMCA7_config.txt" } },
Device.CA9 : { 'cmd': "FVP_VE_Cortex-A9x1", 'args': { 'limit': "1000000000", 'config': "config/ARMCA9_config.txt" } },
Device.CA5NEON : { 'cmd': "FVP_VE_Cortex-A5x1", 'args': { 'limit': "1000000000", 'config': "config/ARMCA5neon_config.txt" } },
Device.CA7NEON : { 'cmd': "FVP_VE_Cortex-A7x1", 'args': { 'limit': "1000000000", 'config': "config/ARMCA7neon_config.txt" } },
Device.CA9NEON : { 'cmd': "FVP_VE_Cortex-A9x1", 'args': { 'limit': "1000000000", 'config': "config/ARMCA9neon_config.txt" } }
}
def projects(step, config):
result = [ str(config['compiler']).lower()+".rtebuild" ]
if config['device'] in BOOTLOADER:
result += [ "bootloader/"+str(config['compiler']).lower()+".rtebuild" ]
return result
def images(step, config):
result = [ "build/arm{dev}/arm{dev}.elf".format(dev=config['device'].value[1].lower()) ]
if config['device'] in BOOTLOADER:
result += [ "bootloader/build/arm{dev}/arm{dev}.elf".format(dev=config['device'].value[1].lower()) ]
return result
def storeResult(step, config, cmd):
result = "result_{cc}_{dev}_{opt}_{now}.junit".format(dev=config['device'], cc=config['compiler'], opt=config['optimize'],now=datetime.now().strftime("%Y%m%d%H%M%S"))
resultfile = step.storeJunitResult(cmd, result, "{cc}.{dev}.{opt}".format(dev=config['device'], cc=config['compiler'], opt=config['optimize']))
if not resultfile:
cmd.appendOutput("Storing results failed!");
cmd.forceResult(1)
def add_options(step, config, cmd):
cmd._options['optimize'] = CC_OPT[config['compiler']][config['optimize']]
def create():
deviceAxis = Axis("device", abbrev="d", values=Device, desc="Device(s) to be considered.")
compilerAxis = Axis("compiler", abbrev="c", values=Compiler, desc="Compiler(s) to be considered.")
optimizeAxis = Axis("optimize", abbrev="o", values=OPTIMIZATION , desc="Optimization level(s) to be considered.")
buildStep = BuildStep("build", abbrev="b", desc="Build the selected configurations.")
buildStep.projects = projects
buildStep.target = lambda step, config: "arm"+config['device'].value[1].lower()
buildStep.pre = add_options
runStep = RunModelStep("run", abbrev="r", desc="Run the selected configurations.")
runStep.images = images
runStep.model = lambda step, config: FVP_MODELS[config['device']]
runStep.post = storeResult
debugStep = RunModelStep("debug", abbrev="d", desc="Debug the selected configurations.")
debugStep.images = images
debugStep.args = lambda step, config: { 'cadi' : True }
debugStep.model = lambda step, config: FVP_MODELS[config['device']]
filterAC5 = Filter().addAxis(compilerAxis, Compiler.AC5).addAxis(deviceAxis, "CM[23]3*")
filterAC6LTM = Filter().addAxis(compilerAxis, Compiler.AC6LTM).addAxis(deviceAxis, "CM35P*")
builder = Builder()
builder.addAxis([ compilerAxis, deviceAxis, optimizeAxis ])
builder.addStep([ buildStep, runStep, debugStep ])
builder.addFilter([ filterAC5, filterAC6LTM ])
return builder
def complete(builder, success):
builder.saveJunitResult("build_{now}.junit".format(now = datetime.now().strftime("%Y%m%d%H%M%S")))
| 45.980769 | 168 | 0.615921 |
794050fbef34ae04c9cdb6df07766c2e62ac1d7c | 15,664 | py | Python | framework/PostProcessors/LimitSurfaceIntegral.py | milljm/raven | 5f29fe81b75e2ffbeb54a55aa63647e7b2f6457b | [
"Apache-2.0"
] | null | null | null | framework/PostProcessors/LimitSurfaceIntegral.py | milljm/raven | 5f29fe81b75e2ffbeb54a55aa63647e7b2f6457b | [
"Apache-2.0"
] | null | null | null | framework/PostProcessors/LimitSurfaceIntegral.py | milljm/raven | 5f29fe81b75e2ffbeb54a55aa63647e7b2f6457b | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on July 10, 2013
@author: alfoa
"""
from __future__ import division, print_function , unicode_literals, absolute_import
import warnings
warnings.simplefilter('default', DeprecationWarning)
#External Modules------------------------------------------------------------------------------------
import numpy as np
import xarray
import math
import os
#External Modules End--------------------------------------------------------------------------------
#Internal Modules------------------------------------------------------------------------------------
from .PostProcessor import PostProcessor
from .BasicStatistics import BasicStatistics
from utils import InputData
import LearningGate
import Files
import Runners
#Internal Modules End--------------------------------------------------------------------------------
class LimitSurfaceIntegral(PostProcessor):
"""
This post-processor computes the n-dimensional integral of a Limit Surface
"""
@classmethod
def getInputSpecification(cls):
"""
Method to get a reference to a class that specifies the input data for
class cls.
@ In, cls, the class for which we are retrieving the specification
@ Out, inputSpecification, InputData.ParameterInput, class to use for
specifying input of cls.
"""
## This will replace the lines above
inputSpecification = super(LimitSurfaceIntegral, cls).getInputSpecification()
LSIVariableInput = InputData.parameterInputFactory("variable")
LSIVariableInput.addParam("name", InputData.StringType)
LSIDistributionInput = InputData.parameterInputFactory("distribution", contentType=InputData.StringType)
LSIVariableInput.addSub(LSIDistributionInput)
LSILowerBoundInput = InputData.parameterInputFactory("lowerBound", contentType=InputData.FloatType)
LSIVariableInput.addSub(LSILowerBoundInput)
LSIUpperBoundInput = InputData.parameterInputFactory("upperBound", contentType=InputData.FloatType)
LSIVariableInput.addSub(LSIUpperBoundInput)
inputSpecification.addSub(LSIVariableInput)
LSIToleranceInput = InputData.parameterInputFactory("tolerance", contentType=InputData.FloatType)
inputSpecification.addSub(LSIToleranceInput)
LSIIntegralTypeInput = InputData.parameterInputFactory("integralType", contentType=InputData.StringType)
inputSpecification.addSub(LSIIntegralTypeInput)
LSISeedInput = InputData.parameterInputFactory("seed", contentType=InputData.IntegerType)
inputSpecification.addSub(LSISeedInput)
LSITargetInput = InputData.parameterInputFactory("target", contentType=InputData.StringType)
inputSpecification.addSub(LSITargetInput)
LSIOutputNameInput = InputData.parameterInputFactory("outputName", contentType=InputData.StringType)
inputSpecification.addSub(LSIOutputNameInput)
return inputSpecification
def __init__(self, messageHandler):
"""
Constructor
@ In, messageHandler, MessageHandler, message handler object
@ Out, None
"""
PostProcessor.__init__(self, messageHandler)
self.variableDist = {} # dictionary created upon the .xml input file reading. It stores the distributions for each variable.
self.target = None # target that defines the f(x1,x2,...,xn)
self.tolerance = 0.0001 # integration tolerance
self.integralType = 'montecarlo' # integral type (which alg needs to be used). Either montecarlo or quadrature(quadrature not yet)
self.seed = 20021986 # seed for montecarlo
self.matrixDict = {} # dictionary of arrays and target
self.lowerUpperDict = {}
self.functionS = None
self.computationPrefix = None
self.stat = BasicStatistics(self.messageHandler) # instantiation of the 'BasicStatistics' processor, which is used to compute the pb given montecarlo evaluations
self.stat.what = ['expectedValue']
self.addAssemblerObject('Distribution','n', newXmlFlg = False)
self.printTag = 'POSTPROCESSOR INTEGRAL'
def _localWhatDoINeed(self):
"""
This method is a local mirror of the general whatDoINeed method.
It is implemented by this postprocessor that need to request special objects
@ In, None
@ Out, needDict, dict, list of objects needed
"""
needDict = {'Distributions':[]}
for distName in self.variableDist.values():
if distName != None:
needDict['Distributions'].append((None, distName))
return needDict
def _localGenerateAssembler(self, initDict):
"""
This method is used for sending to the instanciated class, which is implementing the method, the objects that have been requested through "whatDoINeed" method
It is an abstract method -> It must be implemented in the derived class!
@ In, initDict, dict, dictionary ({'mainClassName(e.g., Databases):{specializedObjectName(e.g.,DatabaseForSystemCodeNamedWolf):ObjectInstance}'})
@ Out, None
"""
for varName, distName in self.variableDist.items():
if distName != None:
if distName not in initDict['Distributions'].keys():
self.raiseAnError(IOError, 'distribution ' + distName + ' not found.')
self.variableDist[varName] = initDict['Distributions'][distName]
self.lowerUpperDict[varName]['lowerBound'] = self.variableDist[varName].lowerBound
self.lowerUpperDict[varName]['upperBound'] = self.variableDist[varName].upperBound
def _localReadMoreXML(self, xmlNode):
"""
Function to read the portion of the xml input that belongs to this specialized class
and initialize some stuff based on the inputs got
@ In, xmlNode, xml.etree.Element, Xml element node
@ Out, None
"""
paramInput = LimitSurfaceIntegral.getInputSpecification()()
paramInput.parseNode(xmlNode)
self._handleInput(paramInput)
def _handleInput(self, paramInput):
"""
Function to handle the parsed paramInput for this class.
@ In, paramInput, ParameterInput, the already parsed input.
@ Out, None
"""
for child in paramInput.subparts:
varName = None
if child.getName() == 'variable':
varName = child.parameterValues['name']
self.lowerUpperDict[varName] = {}
self.variableDist[varName] = None
for childChild in child.subparts:
if childChild.getName() == 'distribution':
self.variableDist[varName] = childChild.value
elif childChild.getName() == 'lowerBound':
if self.variableDist[varName] != None:
self.raiseAnError(NameError, 'you can not specify both distribution and lower/upper bounds nodes for variable ' + varName + ' !')
self.lowerUpperDict[varName]['lowerBound'] = childChild.value
elif childChild.getName() == 'upperBound':
if self.variableDist[varName] != None:
self.raiseAnError(NameError, 'you can not specify both distribution and lower/upper bounds nodes for variable ' + varName + ' !')
self.lowerUpperDict[varName]['upperBound'] = childChild.value
else:
self.raiseAnError(NameError, 'invalid labels after the variable call. Only "distribution", "lowerBound" abd "upperBound" is accepted. tag: ' + child.getName())
elif child.getName() == 'tolerance':
try:
self.tolerance = child.value
except ValueError:
self.raiseAnError(ValueError, "tolerance can not be converted into a float value!")
elif child.getName() == 'integralType':
self.integralType = child.value.strip().lower()
if self.integralType not in ['montecarlo']:
self.raiseAnError(IOError, 'only one integral types are available: MonteCarlo!')
elif child.getName() == 'seed':
try:
self.seed = child.value
except ValueError:
self.raiseAnError(ValueError, 'seed can not be converted into a int value!')
if self.integralType != 'montecarlo':
self.raiseAWarning('integral type is ' + self.integralType + ' but a seed has been inputted!!!')
else:
np.random.seed(self.seed)
elif child.getName() == 'target':
self.target = child.value
elif child.getName() == 'outputName':
self.computationPrefix = child.value
else:
self.raiseAnError(NameError, 'invalid or missing labels after the variables call. Only "variable" is accepted.tag: ' + child.getName())
# if no distribution, we look for the integration domain in the input
if varName != None:
if self.variableDist[varName] == None:
if 'lowerBound' not in self.lowerUpperDict[varName].keys() or 'upperBound' not in self.lowerUpperDict[varName].keys():
self.raiseAnError(NameError, 'either a distribution name or lowerBound and upperBound need to be specified for variable ' + varName)
if self.computationPrefix == None:
self.raiseAnError(IOError,'The required XML node <outputName> has not been inputted!!!')
if self.target == None:
self.raiseAWarning('integral target has not been provided. The postprocessor is going to take the last output it finds in the provided limitsurface!!!')
def initialize(self, runInfo, inputs, initDict):
"""
Method to initialize the pp.
@ In, runInfo, dict, dictionary of run info (e.g. working dir, etc)
@ In, inputs, list, list of inputs
@ In, initDict, dict, dictionary with initialization options
@ Out, None
"""
self.inputToInternal(inputs)
if self.integralType in ['montecarlo']:
self.stat.toDo = {'expectedValue':[{'targets':set([self.target]), 'prefix':self.computationPrefix}]}
self.stat.initialize(runInfo, inputs, initDict)
self.functionS = LearningGate.returnInstance('SupervisedGate','SciKitLearn', self, **{'SKLtype':'neighbors|KNeighborsClassifier', 'Features':','.join(list(self.variableDist.keys())), 'Target':self.target})
self.functionS.train(self.matrixDict)
self.raiseADebug('DATA SET MATRIX:')
self.raiseADebug(self.matrixDict)
def inputToInternal(self, currentInput):
"""
Method to convert an input object into the internal format that is
understandable by this pp.
The resulting converted object is stored as an attribute of this class
@ In, currentInput, object, an object that needs to be converted
@ Out, None
"""
if len(currentInput) > 1:
self.raiseAnError(IOError,"This PostProcessor can accept only a single input! Got: "+ str(len(currentInput))+"!")
item = currentInput[0]
if item.type == 'PointSet':
if not set(item.getVars('input')) == set(self.variableDist.keys()):
self.raiseAnError(IOError, 'The variables inputted and the features in the input PointSet ' + item.name + 'do not match!!!')
outputKeys = item.getVars('output')
if self.target is None:
self.target = utils.first(outputKeys)
elif self.target not in outputKeys:
self.raiseAnError(IOError, 'The target ' + self.target + 'is not present among the outputs of the PointSet ' + item.name)
# construct matrix
dataSet = item.asDataset()
self.matrixDict = {varName: dataSet[varName].values for varName in self.variableDist}
responseArray = dataSet[self.target].values
if len(np.unique(responseArray)) != 2:
self.raiseAnError(IOError, 'The target ' + self.target + ' needs to be a classifier output (-1 +1 or 0 +1)!')
responseArray[responseArray == -1] = 0.0
self.matrixDict[self.target] = responseArray
else:
self.raiseAnError(IOError, 'Only PointSet is accepted as input!!!!')
def run(self, input):
"""
This method executes the postprocessor action. In this case, it performs the computation of the LS integral
@ In, input, object, object contained the data to process. (inputToInternal output)
@ Out, pb, float, integral outcome (probability of the event)
"""
pb = None
if self.integralType == 'montecarlo':
tempDict = {}
randomMatrix = np.random.rand(int(math.ceil(1.0 / self.tolerance**2)), len(self.variableDist.keys()))
for index, varName in enumerate(self.variableDist.keys()):
if self.variableDist[varName] == None:
randomMatrix[:, index] = randomMatrix[:, index] * (self.lowerUpperDict[varName]['upperBound'] - self.lowerUpperDict[varName]['lowerBound']) + self.lowerUpperDict[varName]['lowerBound']
else:
f = np.vectorize(self.variableDist[varName].ppf, otypes=[np.float])
randomMatrix[:, index] = f(randomMatrix[:, index])
tempDict[varName] = randomMatrix[:, index]
pb = self.stat.run({'targets':{self.target:xarray.DataArray(self.functionS.evaluate(tempDict)[self.target])}})[self.computationPrefix +"_"+self.target]
else:
self.raiseAnError(NotImplemented, "quadrature not yet implemented")
return pb
def collectOutput(self, finishedJob, output):
"""
Function to place all of the computed data into the output object
@ In, finishedJob, JobHandler External or Internal instance, A JobHandler object that is in charge of running this post-processor
@ In, output, dataObjects, The object where we want to place our computed results
@ Out, None
"""
evaluation = finishedJob.getEvaluation()
if isinstance(evaluation, Runners.Error):
self.raiseAnError(RuntimeError, "No available output to collect (run possibly not finished yet)")
pb = evaluation[1]
lms = evaluation[0][0]
if output.type == 'PointSet':
# we store back the limitsurface
dataSet = lms.asDataset()
loadDict = {key: dataSet[key].values for key in lms.getVars()}
loadDict[self.computationPrefix] = np.full(len(lms), pb)
output.load(loadDict,'dict')
# NB I keep this commented part in case we want to keep the possibility to have outputfiles for PP
#elif isinstance(output,Files.File):
# headers = lms.getParaKeys('inputs') + lms.getParaKeys('outputs')
# if 'EventProbability' not in headers:
# headers += ['EventProbability']
# stack = [None] * len(headers)
# output.close()
# # If the file already exist, we will erase it.
# if os.path.exists(output.getAbsFile()):
# self.raiseAWarning('File %s already exists, this file will be erased!' %output.getAbsFile())
# output.open('w')
# output.close()
# outIndex = 0
# for key, value in lms.getParametersValues('input').items():
# stack[headers.index(key)] = np.asarray(value).flatten()
# for key, value in lms.getParametersValues('output').items():
# stack[headers.index(key)] = np.asarray(value).flatten()
# outIndex = headers.index(key)
# stack[headers.index('EventProbability')] = np.array([pb] * len(stack[outIndex])).flatten()
# stacked = np.column_stack(stack)
# np.savetxt(output, stacked, delimiter = ',', header = ','.join(headers),comments='')
# #N.B. without comments='' you get a "# " at the top of the header row
else:
self.raiseAnError(Exception, self.type + ' accepts PointSet only')
| 49.88535 | 209 | 0.683797 |
79405402c3e6a991d39a4ae082a2039ff549ebd1 | 1,204 | py | Python | mfanalysis/utils.py | omardrwch/mfanalysis | 36c00e0d4ee346bb6dddc5776a86c8c2ebd4501a | [
"MIT"
] | 3 | 2019-04-11T15:38:25.000Z | 2021-04-16T16:30:34.000Z | mfanalysis/utils.py | omardrwch/mfanalysis | 36c00e0d4ee346bb6dddc5776a86c8c2ebd4501a | [
"MIT"
] | null | null | null | mfanalysis/utils.py | omardrwch/mfanalysis | 36c00e0d4ee346bb6dddc5776a86c8c2ebd4501a | [
"MIT"
] | 3 | 2018-07-09T07:28:33.000Z | 2021-07-26T12:40:52.000Z | from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
class Utils:
def __init__(self):
pass
def linear_regression(self, x, y, nj, return_variance = False):
"""
Performs a (weighted or not) linear regression.
Finds 'a' that minimizes the error:
sum_j { n[j]*(y[j] - (a*x[j] + b))**2 }
Args:
x, y : regression variables
nj: list containg the weigths
Returns:
a, b: angular coefficient and intercept
(!!!!!!!!!!!!!)
IMPORTANT:
return_variance NOT DEBUGGED
(!!!!!!!!!!!!!)
"""
bj = np.array(nj, dtype = np.float)
assert len(bj) == len(x)
V_0 = np.sum(bj)
V_1 = np.sum(bj * x)
V_2 = np.sum(bj * (x**2))
weights_slope = bj * (V_0*x - V_1)/(V_0*V_2 - V_1*V_1)
weights_intercept = bj * (V_2 - V_1*x)/(V_0*V_2 - V_1*V_1)
a = np.sum(weights_slope*y)
b = np.sum(weights_intercept*y)
var_a = np.sum((1/bj)*weights_slope*weights_slope)
if not return_variance:
return a, b
else:
return a, b, var_a
| 24.571429 | 67 | 0.527409 |
7940546813233f6d66e3857eb7b0e4b6a37afdca | 3,425 | py | Python | MindFuck/mindfuck.py | tixcode/mindfuck | 9737318a246c49adcda5b02596677cb0964189ce | [
"MIT"
] | 2 | 2021-12-25T18:16:16.000Z | 2022-01-10T10:38:43.000Z | MindFuck/mindfuck.py | tixcode/mindfuck | 9737318a246c49adcda5b02596677cb0964189ce | [
"MIT"
] | 5 | 2021-12-25T18:24:06.000Z | 2022-01-05T16:48:27.000Z | MindFuck/mindfuck.py | tixcode/mindfuck | 9737318a246c49adcda5b02596677cb0964189ce | [
"MIT"
] | 2 | 2021-12-25T18:25:50.000Z | 2022-01-10T10:37:24.000Z | import sys
import time
import random
from colorama import init, Fore, Style, Back
init(autoreset=True)
def Error(text: str):
print(text)
input('Click enter to exit from a program...')
exit()
def AnimationPrinting(text: str, secs: int):
for symbol in text:
print(symbol, flush=True, end="")
time.sleep(secs)
print("")
slots = []
special = ""
cursor_position = 0
for i in range(0, 5000):
slots.append(0)
fileN = input("Write a script file-name: ")
if fileN == "":
Error("Script file-name can't be nothing!")
else:
try:
with open(fileN, 'r', encoding='utf-8') as file:
code = file.read()
except:
Error("Uncorrect script file-name!")
AnimationPrinting("Creating a int-array with 5000 slots...", 0.05)
time.sleep(0.5)
AnimationPrinting("Created!", 0.05)
index = 0
while index < len(code):
symbol = code[index]
match symbol:
case ">":
cursor_position += 1
cursor_position = min(cursor_position, 5000 - 1)
case "<":
cursor_position -= 1
cursor_position = max(cursor_position, 0)
case "?":
value = input("")
if value != "":
slots[cursor_position] = ord(value[0])
case ".":
print(special+chr(slots[cursor_position]), end="")
case "+":
slots[cursor_position] += 1
case "-":
slots[cursor_position] -= 1
case "*":
slots[cursor_position] += 10
case "/":
slots[cursor_position] -= 10
case "$":
slots[cursor_position] += 100
case "#":
slots[cursor_position] -= 100
case "^":
slots[cursor_position] += 1000
case "!":
slots[cursor_position] -= 1000
case "g":
special = "%s" % Fore.GREEN
case "r":
special = "%s" % Fore.RED
case "y":
special = "%s" % Fore.YELLOW
case "b":
special = "%s" % Fore.BLUE
case ":":
cursor_position += 10
if cursor_position > 5000:
cursor_position = 5000
case ";":
cursor_position -= 10
if cursor_position < 1:
cursor_position = 1
case "[":
if slots[cursor_position] == 0:
counter = 1
while counter:
index += 1
char = code[index]
match char:
case "[": counter += 1
case "]": counter -= 1
case "]":
if slots[cursor_position] != 0:
counter = 1
while counter:
index -= 1
char = code[index]
match char:
case "[": counter -= 1
case "]": counter += 1
case _: pass
index += 1
print("")
AnimationPrinting("Program is finished!", 0.05)
input("Click enter to exit from a program...")
| 31.136364 | 71 | 0.434161 |
7940550e80aff2bcb75224d7d73b2c77ed4502da | 4,255 | py | Python | pysnmp/MITEL-APPLICATION-PLATFORM-LIST-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 11 | 2021-02-02T16:27:16.000Z | 2021-08-31T06:22:49.000Z | pysnmp/MITEL-APPLICATION-PLATFORM-LIST-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 75 | 2021-02-24T17:30:31.000Z | 2021-12-08T00:01:18.000Z | pysnmp/MITEL-APPLICATION-PLATFORM-LIST-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module MITEL-APPLICATION-PLATFORM-LIST-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/MITEL-APPLICATION-PLATFORM-LIST-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 20:02:47 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, SingleValueConstraint, ConstraintsIntersection, ValueSizeConstraint, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "SingleValueConstraint", "ConstraintsIntersection", "ValueSizeConstraint", "ValueRangeConstraint")
mitelIdentification, = mibBuilder.importSymbols("MITEL-MIB", "mitelIdentification")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
ModuleIdentity, Gauge32, Counter64, Unsigned32, Bits, MibIdentifier, ObjectIdentity, IpAddress, NotificationType, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter32, Integer32, iso, TimeTicks = mibBuilder.importSymbols("SNMPv2-SMI", "ModuleIdentity", "Gauge32", "Counter64", "Unsigned32", "Bits", "MibIdentifier", "ObjectIdentity", "IpAddress", "NotificationType", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter32", "Integer32", "iso", "TimeTicks")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
mitelIdApplicationPlatforms = ModuleIdentity((1, 3, 6, 1, 4, 1, 1027, 1, 6))
mitelIdApplicationPlatforms.setRevisions(('2006-08-10 00:00', '2005-08-24 21:34',))
if mibBuilder.loadTexts: mitelIdApplicationPlatforms.setLastUpdated('200608100000Z')
if mibBuilder.loadTexts: mitelIdApplicationPlatforms.setOrganization('MITEL Networks Corporation')
mitelIdAppPlatManagementApplicationServer = ObjectIdentity((1, 3, 6, 1, 4, 1, 1027, 1, 6, 1))
if mibBuilder.loadTexts: mitelIdAppPlatManagementApplicationServer.setStatus('current')
mitelIdAppPlatLXTelephonyServer = ObjectIdentity((1, 3, 6, 1, 4, 1, 1027, 1, 6, 2))
if mibBuilder.loadTexts: mitelIdAppPlatLXTelephonyServer.setStatus('current')
mitelIdAppPlatMXTelephonyServer = ObjectIdentity((1, 3, 6, 1, 4, 1, 1027, 1, 6, 3))
if mibBuilder.loadTexts: mitelIdAppPlatMXTelephonyServer.setStatus('current')
mitelIdAppPlatCXTelephonyServer = ObjectIdentity((1, 3, 6, 1, 4, 1, 1027, 1, 6, 4))
if mibBuilder.loadTexts: mitelIdAppPlatCXTelephonyServer.setStatus('current')
mitelIdAppPlatCXiTelephonyServer = ObjectIdentity((1, 3, 6, 1, 4, 1, 1027, 1, 6, 5))
if mibBuilder.loadTexts: mitelIdAppPlatCXiTelephonyServer.setStatus('current')
mitelIdAppPlatLiteTelephonyServer = ObjectIdentity((1, 3, 6, 1, 4, 1, 1027, 1, 6, 6))
if mibBuilder.loadTexts: mitelIdAppPlatLiteTelephonyServer.setStatus('current')
mitelIdAppPlatMXeTelephonyServer = ObjectIdentity((1, 3, 6, 1, 4, 1, 1027, 1, 6, 7))
if mibBuilder.loadTexts: mitelIdAppPlatMXeTelephonyServer.setStatus('current')
mitelIdAppPlatAXTelephonyServer = ObjectIdentity((1, 3, 6, 1, 4, 1, 1027, 1, 6, 8))
if mibBuilder.loadTexts: mitelIdAppPlatAXTelephonyServer.setStatus('current')
mitelIdAppPlatMXTTelephonyServer = ObjectIdentity((1, 3, 6, 1, 4, 1, 1027, 1, 6, 9))
if mibBuilder.loadTexts: mitelIdAppPlatMXTTelephonyServer.setStatus('current')
mibBuilder.exportSymbols("MITEL-APPLICATION-PLATFORM-LIST-MIB", mitelIdAppPlatLXTelephonyServer=mitelIdAppPlatLXTelephonyServer, mitelIdAppPlatLiteTelephonyServer=mitelIdAppPlatLiteTelephonyServer, mitelIdAppPlatCXiTelephonyServer=mitelIdAppPlatCXiTelephonyServer, PYSNMP_MODULE_ID=mitelIdApplicationPlatforms, mitelIdAppPlatMXTelephonyServer=mitelIdAppPlatMXTelephonyServer, mitelIdAppPlatAXTelephonyServer=mitelIdAppPlatAXTelephonyServer, mitelIdAppPlatMXTTelephonyServer=mitelIdAppPlatMXTTelephonyServer, mitelIdApplicationPlatforms=mitelIdApplicationPlatforms, mitelIdAppPlatManagementApplicationServer=mitelIdAppPlatManagementApplicationServer, mitelIdAppPlatCXTelephonyServer=mitelIdAppPlatCXTelephonyServer, mitelIdAppPlatMXeTelephonyServer=mitelIdAppPlatMXeTelephonyServer)
| 111.973684 | 781 | 0.818566 |
7940559c5cf0e4c3ea348b0cb733fd0ed2504a58 | 655 | py | Python | Day 14/day14pt1.py | sjgg555/aoc2021 | bf3a84a85949b65558a749aa0b39fb5b79b850ac | [
"MIT"
] | null | null | null | Day 14/day14pt1.py | sjgg555/aoc2021 | bf3a84a85949b65558a749aa0b39fb5b79b850ac | [
"MIT"
] | null | null | null | Day 14/day14pt1.py | sjgg555/aoc2021 | bf3a84a85949b65558a749aa0b39fb5b79b850ac | [
"MIT"
] | null | null | null | from day14data import simple_test_data, data
import numpy as np
from collections import Counter
#data = simple_test_data
steps = 10
template = data[0]
start, end = np.array(data[1]).T
result = template
for step in range(steps):
step_result = ""
for i in range(len(result)):
search = result[i:i+2]
try:
idx = np.where(start == search)[0][0]
addition = search[0] + end[idx]
except:
addition = search[0]
step_result += addition
result = step_result
res_count = Counter(result).most_common(26)
most = res_count[0][1]
least = res_count[-1][1]
print(f"result = {most - least}") | 24.259259 | 49 | 0.630534 |
79405802e7a7590ab33eddf60b69888db5266694 | 6,677 | py | Python | apidocs/retrieve.py | gabeorlanski/external-knowledge-codegen | 3e1e3581f5774ca51d79c97e768eb5227e7e3dba | [
"Apache-2.0"
] | 80 | 2020-04-17T06:16:40.000Z | 2022-02-16T13:24:48.000Z | apidocs/retrieve.py | gabeorlanski/external-knowledge-codegen | 3e1e3581f5774ca51d79c97e768eb5227e7e3dba | [
"Apache-2.0"
] | 5 | 2020-06-17T14:55:15.000Z | 2021-11-05T11:56:46.000Z | apidocs/retrieve.py | gabeorlanski/external-knowledge-codegen | 3e1e3581f5774ca51d79c97e768eb5227e7e3dba | [
"Apache-2.0"
] | 10 | 2020-05-03T13:00:03.000Z | 2021-08-20T04:13:06.000Z | from typing import List
import json
import pprint
import sys
import argparse
import re
from tqdm import tqdm
import string
from collections import defaultdict
from elasticsearch import Elasticsearch
import operator
import numpy as np
import pickle
#PUNCT_TO_SPACE = dict(zip(list(string.punctuation), list(' ' * len(string.punctuation))))
PUNCT_TO_SPACE = str.maketrans(string.punctuation, ' ' * len(string.punctuation))
def softmax(x):
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum()
def log_softmax(x):
return np.log(softmax(x))
class ESSearcher():
def __init__(self, index_name: str):
self.es = Elasticsearch()
self.index_name = index_name
def query_format(self, query_str: str, field: str):
new_query_str = query_str.translate(PUNCT_TO_SPACE)
new_query_str = ' '.join([w for w in new_query_str.split() if re.match('^[0-9A-Za-z]+$', w)])
new_query_str = new_query_str.replace(' AND ', ' ').replace(' and ', ' ')
'''
if len(query_str) - len(new_query_str) > 10:
print(query_str)
print(new_query_str)
input()
'''
return '{}:({})'.format(field, new_query_str)
def get_topk(self, query_str: str, field: str, topk: int=5):
results = self.es.search(
index=self.index_name,
q=self.query_format(query_str, field))['hits']['hits'][:topk]
return [(doc['_source'], doc['_score']) for doc in results]
def load_multi_files(files: List[str], max_counts: List[int]=None):
if type(files) is not list:
files = [files]
dataset = []
max_counts = max_counts or [None] * len(files)
for file, max_count in zip(files, max_counts):
try:
td = json.load(open(file, 'r'))
except:
td = [json.loads(l) for l in open(file, 'r')]
if max_count:
td = td[:max_count]
print('load {} from {}'.format(len(td), file))
dataset.extend(td)
return dataset
def aug_iter(ess, dataset, field, topk):
'''
iterate over dataset and do retrieval
'''
for i, code in enumerate(tqdm(dataset)):
if field == 'intent':
query = (code['rewritten_intent'] if 'rewritten_intent' in code else None) or code['intent']
elif field == 'snippet':
query = code['snippet']
try:
hits = ess.get_topk(query, field, topk=topk)
yield code, hits
except KeyboardInterrupt:
raise
except Exception as e:
pass # sometimes the query is empty
def topk_aug(args):
dataset = load_multi_files(args.inp.split(':'))
ess = ESSearcher(index_name='python-docs')
aug_dataset = []
id2count = defaultdict(lambda: 0)
for code, hits in aug_iter(ess, dataset, args.field, args.topk):
'''
if len(hits) != args.topk:
print('not enough for "{}"'.format(query))
print(ess.query_format(query, args.field))
'''
for (rcode, score) in hits:
rcode['for'] = code['question_id']
rcode['retrieval_score'] = score
aug_dataset.append(rcode)
id2count[rcode['question_id']] += 1
with open(args.out, 'w') as fout:
for code in aug_dataset:
fout.write(json.dumps(code) + '\n')
print('most commonly retrieved ids {}'.format(sorted(id2count.items(), key=lambda x: -x[1])[:5]))
def anneal(probs: np.ndarray, temperature=1):
lp = np.log(probs)
alp = temperature * lp
anneal_probs = softmax(alp)
return anneal_probs
def get_distribution(args):
files = args.inp.split(':')
dataset = load_multi_files(files, max_counts=[args.max_count] * len(files))
ess = ESSearcher(index_name='python-docs')
aug_dataset = []
id2count = defaultdict(lambda: 0)
for code, hits in aug_iter(ess, dataset, args.field, args.topk):
for (rcode, score) in hits:
rcode['for'] = code['question_id']
rcode['retrieval_score'] = score
aug_dataset.append(rcode)
id2count[rcode['question_id']] += 1
# compute distribution
dist = sorted(id2count.items(), key=lambda x: -x[1])
qids = np.array(list(map(operator.itemgetter(0), dist)))
probs = np.array(list(map(operator.itemgetter(1), dist)))
probs = probs / np.sum(probs)
if args.temp: # anneal
probs = anneal(probs, args.temp)
print('#retrieved code {}'.format(len(probs)))
print('most commonly retrieved ids {}'.format(list(zip(qids, probs))[:5]))
if args.out:
with open(args.out, 'w') as fout:
for qid, ap in zip(qids, probs):
fout.write('{}\t{}\n'.format(qid, ap))
def sample_aug(args):
dist_file, data_file = args.inp.split(':')
qids = []
probs = []
with open(dist_file, 'r') as fin:
for l in fin:
qid, prob = l.strip().split('\t')
qids.append(qid)
probs.append(float(prob))
qids = np.array(qids)
probs = np.array(probs)
if args.temp: # anneal
print('annel to {}'.format(args.temp))
print(probs[:5])
probs = anneal(probs, args.temp)
print(probs[:5])
dataset = load_multi_files(data_file)
qid2code = dict((str(code['question_id']), code) for code in dataset)
qid2count = defaultdict(lambda: 0)
with open(args.out, 'w') as fout:
for sam in np.random.choice(len(probs), args.max_count, p=probs):
fout.write(json.dumps(qid2code[qids[sam]]) + '\n')
qid2count[qids[sam]] += 1
print('mostly sampled qids {}'.format(sorted(qid2count.items(), key=lambda x: -x[1])[:5]))
if __name__ == '__main__':
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--method', type=str, help='method of augmentation',
choices=['topk', 'dist', 'sample'])
arg_parser.add_argument('--inp', type=str, help='input json file')
arg_parser.add_argument('--out', type=str, help='output file')
arg_parser.add_argument('--topk', type=int, help='top k for retrieval', default=5)
arg_parser.add_argument('--max_count', type=int,
help='max number of codes from each file', default=None)
arg_parser.add_argument('--field', type=str, help='field for retrieval',
choices=['snippet', 'intent'], default='snippet')
arg_parser.add_argument('--temp', type=float, help='temperature of sampling', default=None)
args = arg_parser.parse_args()
if args.method == 'topk':
topk_aug(args)
elif args.method == 'dist':
get_distribution(args)
elif args.method == 'sample':
sample_aug(args)
| 34.958115 | 104 | 0.608357 |
7940586ccde2d4f0a7c00043081106645ab0a735 | 1,380 | py | Python | web-simple/setup.py | lightyeare/FrameworkBenchmarks | 40489856a0480c85227993d91de7d66e9224f8b4 | [
"BSD-3-Clause"
] | 1 | 2015-01-28T07:11:03.000Z | 2015-01-28T07:11:03.000Z | web-simple/setup.py | lightyeare/FrameworkBenchmarks | 40489856a0480c85227993d91de7d66e9224f8b4 | [
"BSD-3-Clause"
] | null | null | null | web-simple/setup.py | lightyeare/FrameworkBenchmarks | 40489856a0480c85227993d91de7d66e9224f8b4 | [
"BSD-3-Clause"
] | null | null | null | import subprocess
import sys
import setup_util
from os.path import expanduser
import os
import getpass
home = expanduser("~")
def start(args, logfile, errfile):
setup_util.replace_text("web-simple/app.pl", "localhost", ""+ args.database_host +"")
setup_util.replace_text("web-simple/nginx.conf", "USR", getpass.getuser())
setup_util.replace_text("web-simple/nginx.conf", "server unix:.*\/FrameworkBenchmarks", "server unix:" + home + "/FrameworkBenchmarks")
try:
subprocess.Popen("plackup -E production -s Starman --workers=" + str(args.max_threads) + " -l " + home + "/FrameworkBenchmarks/web-simple/frameworks-benchmark.sock -a ./app.pl", shell=True, cwd="web-simple", stderr=errfile, stdout=logfile)
subprocess.check_call("sudo /usr/local/nginx/sbin/nginx -c " + home + "/FrameworkBenchmarks/web-simple/nginx.conf", shell=True, stderr=errfile, stdout=logfile)
return 0
except subprocess.CalledProcessError:
return 1
def stop(logfile, errfile):
try:
subprocess.call("sudo /usr/local/nginx/sbin/nginx -s stop", shell=True, stderr=errfile, stdout=logfile)
p = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE)
out, err = p.communicate()
for line in out.splitlines():
if 'starman' in line:
pid = int(line.split(None, 2)[1])
os.kill(pid, 9)
return 0
except subprocess.CalledProcessError:
return 1
| 41.818182 | 243 | 0.707971 |
79405994839fdb115af61becdee41c3d80eb69dc | 6,581 | py | Python | gcn/utils.py | alisure-fork/gcn | 4b7b64f941b89647f1ae9aa1f6a0e4276dbdf0d7 | [
"MIT"
] | null | null | null | gcn/utils.py | alisure-fork/gcn | 4b7b64f941b89647f1ae9aa1f6a0e4276dbdf0d7 | [
"MIT"
] | null | null | null | gcn/utils.py | alisure-fork/gcn | 4b7b64f941b89647f1ae9aa1f6a0e4276dbdf0d7 | [
"MIT"
] | null | null | null | import numpy as np
import pickle as pkl
import networkx as nx
import scipy.sparse as sp
from scipy.sparse.linalg.eigen.arpack import eigsh
import sys
def parse_index_file(filename):
"""Parse index file."""
index = []
for line in open(filename):
index.append(int(line.strip()))
return index
def sample_mask(idx, l):
"""Create mask."""
mask = np.zeros(l)
mask[idx] = 1
return np.array(mask, dtype=np.bool)
def load_data(dataset_str):
"""
Loads input data from gcn/data directory
ind.dataset_str.x => the feature vectors of the training instances as scipy.sparse.csr.csr_matrix object;
ind.dataset_str.tx => the feature vectors of the test instances as scipy.sparse.csr.csr_matrix object;
ind.dataset_str.allx => the feature vectors of both labeled and unlabeled training instances
(a superset of ind.dataset_str.x) as scipy.sparse.csr.csr_matrix object;
ind.dataset_str.y => the one-hot labels of the labeled training instances as numpy.ndarray object;
ind.dataset_str.ty => the one-hot labels of the test instances as numpy.ndarray object;
ind.dataset_str.ally => the labels for instances in ind.dataset_str.allx as numpy.ndarray object;
ind.dataset_str.graph => a dict in the format {index: [index_of_neighbor_nodes]} as collections.defaultdict
object;
ind.dataset_str.test.index => the indices of test instances in graph, for the inductive setting as list object.
All objects above must be saved using python pickle module.
:param dataset_str: Dataset name
:return: All data input files loaded (as well the training/test data).
"""
names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']
objects = []
for i in range(len(names)):
with open("data/ind.{}.{}".format(dataset_str, names[i]), 'rb') as f:
if sys.version_info > (3, 0):
objects.append(pkl.load(f, encoding='latin1'))
else:
objects.append(pkl.load(f))
x, y, tx, ty, allx, ally, graph = tuple(objects)
test_idx_reorder = parse_index_file("data/ind.{}.test.index".format(dataset_str))
test_idx_range = np.sort(test_idx_reorder)
if dataset_str == 'citeseer':
# Fix citeseer dataset (there are some isolated nodes in the graph)
# Find isolated nodes, add them as zero-vecs into the right position
test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder)+1)
tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))
tx_extended[test_idx_range-min(test_idx_range), :] = tx
tx = tx_extended
ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))
ty_extended[test_idx_range-min(test_idx_range), :] = ty
ty = ty_extended
features = sp.vstack((allx, tx)).tolil()
features[test_idx_reorder, :] = features[test_idx_range, :]
adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))
labels = np.vstack((ally, ty))
labels[test_idx_reorder, :] = labels[test_idx_range, :]
idx_test = test_idx_range.tolist()
idx_train = range(len(y))
idx_val = range(len(y), len(y)+500)
train_mask = sample_mask(idx_train, labels.shape[0])
val_mask = sample_mask(idx_val, labels.shape[0])
test_mask = sample_mask(idx_test, labels.shape[0])
y_train = np.zeros(labels.shape)
y_val = np.zeros(labels.shape)
y_test = np.zeros(labels.shape)
y_train[train_mask, :] = labels[train_mask, :]
y_val[val_mask, :] = labels[val_mask, :]
y_test[test_mask, :] = labels[test_mask, :]
return adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask
def sparse_to_tuple(sparse_mx):
"""Convert sparse matrix to tuple representation."""
def to_tuple(mx):
if not sp.isspmatrix_coo(mx):
mx = mx.tocoo()
coords = np.vstack((mx.row, mx.col)).transpose()
values = mx.data
shape = mx.shape
return coords, values, shape
if isinstance(sparse_mx, list):
for i in range(len(sparse_mx)):
sparse_mx[i] = to_tuple(sparse_mx[i])
else:
sparse_mx = to_tuple(sparse_mx)
return sparse_mx
def preprocess_features(features):
"""Row-normalize feature matrix and convert to tuple representation"""
rowsum = np.array(features.sum(1)) # 每个节点的特征数量 n
r_inv = np.power(rowsum, -1).flatten() # 特征数量的倒数 1/n
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv) # 特征数量的倒数的对角阵:
features = r_mat_inv.dot(features) # 是否可以解释为:特征及特征的权重?
return sparse_to_tuple(features)
def normalize_adj(adj):
"""Symmetrically normalize adjacency matrix."""
adj = sp.coo_matrix(adj)
rowsum = np.array(adj.sum(1))
d_inv_sqrt = np.power(rowsum, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()
def preprocess_adj(adj):
"""Preprocessing of adjacency matrix for simple GCN model and conversion to tuple representation."""
adj_normalized = normalize_adj(adj + sp.eye(adj.shape[0])) # A + I, 及权重
return sparse_to_tuple(adj_normalized)
def construct_feed_dict(features, support, labels, labels_mask, placeholders):
"""Construct feed dictionary."""
feed_dict = dict()
feed_dict.update({placeholders['labels']: labels})
feed_dict.update({placeholders['labels_mask']: labels_mask})
feed_dict.update({placeholders['features']: features})
feed_dict.update({placeholders['support'][i]: support[i] for i in range(len(support))})
feed_dict.update({placeholders['num_features_nonzero']: features[1].shape})
return feed_dict
def chebyshev_polynomials(adj, k):
"""Calculate Chebyshev polynomials up to order k. Return a list of sparse matrices (tuple representation)."""
print("Calculating Chebyshev polynomials up to order {}...".format(k))
adj_normalized = normalize_adj(adj)
laplacian = sp.eye(adj.shape[0]) - adj_normalized
largest_eigval, _ = eigsh(laplacian, 1, which='LM')
scaled_laplacian = (2. / largest_eigval[0]) * laplacian - sp.eye(adj.shape[0])
t_k = list()
t_k.append(sp.eye(adj.shape[0]))
t_k.append(scaled_laplacian)
def chebyshev_recurrence(t_k_minus_one, t_k_minus_two, scaled_lap):
s_lap = sp.csr_matrix(scaled_lap, copy=True)
return 2 * s_lap.dot(t_k_minus_one) - t_k_minus_two
for i in range(2, k+1):
t_k.append(chebyshev_recurrence(t_k[-1], t_k[-2], scaled_laplacian))
return sparse_to_tuple(t_k)
| 38.711765 | 115 | 0.683483 |
794059bfda0780c283b4a8d86ab6f3d41fc92a15 | 635 | py | Python | etl/parsers/etw/Microsoft_Windows_Video_For_Windows.py | IMULMUL/etl-parser | 76b7c046866ce0469cd129ee3f7bb3799b34e271 | [
"Apache-2.0"
] | 104 | 2020-03-04T14:31:31.000Z | 2022-03-28T02:59:36.000Z | etl/parsers/etw/Microsoft_Windows_Video_For_Windows.py | IMULMUL/etl-parser | 76b7c046866ce0469cd129ee3f7bb3799b34e271 | [
"Apache-2.0"
] | 7 | 2020-04-20T09:18:39.000Z | 2022-03-19T17:06:19.000Z | etl/parsers/etw/Microsoft_Windows_Video_For_Windows.py | IMULMUL/etl-parser | 76b7c046866ce0469cd129ee3f7bb3799b34e271 | [
"Apache-2.0"
] | 16 | 2020-03-05T18:55:59.000Z | 2022-03-01T10:19:28.000Z | # -*- coding: utf-8 -*-
"""
Microsoft-Windows-Video-For-Windows
GUID : 712abb2d-d806-4b42-9682-26da01d8b307
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("712abb2d-d806-4b42-9682-26da01d8b307"), event_id=1, version=0)
class Microsoft_Windows_Video_For_Windows_1_0(Etw):
pattern = Struct(
"ApplicationName" / WString,
"FileName" / WString,
"ContentType" / WString
)
| 31.75 | 123 | 0.725984 |
794059d85cb8078a7f803daa4412795b336239a5 | 5,309 | py | Python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2021_05_01/operations/_vpn_site_link_connections_operations.py | xolve/azure-sdk-for-python | 9f5baa19c392f77f811d936ee43450e4ea524002 | [
"MIT"
] | 2,728 | 2015-01-09T10:19:32.000Z | 2022-03-31T14:50:33.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2021_05_01/operations/_vpn_site_link_connections_operations.py | v-xuto/azure-sdk-for-python | 9c6296d22094c5ede410bc83749e8df8694ccacc | [
"MIT"
] | 17,773 | 2015-01-05T15:57:17.000Z | 2022-03-31T23:50:25.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2021_05_01/operations/_vpn_site_link_connections_operations.py | v-xuto/azure-sdk-for-python | 9c6296d22094c5ede410bc83749e8df8694ccacc | [
"MIT"
] | 1,916 | 2015-01-19T05:05:41.000Z | 2022-03-31T19:36:44.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class VpnSiteLinkConnectionsOperations(object):
"""VpnSiteLinkConnectionsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2021_05_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
resource_group_name, # type: str
gateway_name, # type: str
connection_name, # type: str
link_connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.VpnSiteLinkConnection"
"""Retrieves the details of a vpn site link connection.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:param connection_name: The name of the vpn connection.
:type connection_name: str
:param link_connection_name: The name of the vpn connection.
:type link_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VpnSiteLinkConnection, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2021_05_01.models.VpnSiteLinkConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnSiteLinkConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'linkConnectionName': self._serialize.url("link_connection_name", link_connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VpnSiteLinkConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/vpnConnections/{connectionName}/vpnLinkConnections/{linkConnectionName}'} # type: ignore
| 46.982301 | 238 | 0.685628 |
79405a7c0f95b1d632c407fa0cb26435255eaed2 | 552 | py | Python | virtual/lib/python3.8/site-packages/rest_framework/authtoken/migrations/0003_tokenproxy.py | ShirQUillE-SandE/the-neighborhood-101 | fda09cb0481d1cd902f5e13b7ed61ed96772121d | [
"MIT"
] | 17,395 | 2017-03-31T21:13:13.000Z | 2022-03-31T21:33:13.000Z | virtual/lib/python3.8/site-packages/rest_framework/authtoken/migrations/0003_tokenproxy.py | ShirQUillE-SandE/the-neighborhood-101 | fda09cb0481d1cd902f5e13b7ed61ed96772121d | [
"MIT"
] | 2,983 | 2017-03-31T14:43:26.000Z | 2022-03-31T20:34:23.000Z | virtual/lib/python3.8/site-packages/rest_framework/authtoken/migrations/0003_tokenproxy.py | ShirQUillE-SandE/the-neighborhood-101 | fda09cb0481d1cd902f5e13b7ed61ed96772121d | [
"MIT"
] | 5,159 | 2017-03-31T15:06:32.000Z | 2022-03-31T03:25:17.000Z | # Generated by Django 3.1.1 on 2020-09-28 09:34
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('authtoken', '0002_auto_20160226_1747'),
]
operations = [
migrations.CreateModel(
name='TokenProxy',
fields=[
],
options={
'verbose_name': 'token',
'proxy': True,
'indexes': [],
'constraints': [],
},
bases=('authtoken.token',),
),
]
| 21.230769 | 49 | 0.471014 |
79405adb54b06f79a03d63496cdce160d8ad5b7c | 1,052 | py | Python | ci/dropbox_upload.py | swistakm/pyrilla | 43ceed4e2500ee9aa352446fd7a9552466b66aed | [
"BSD-3-Clause"
] | 9 | 2016-03-31T15:28:00.000Z | 2021-11-18T23:41:01.000Z | ci/dropbox_upload.py | swistakm/pyrilla | 43ceed4e2500ee9aa352446fd7a9552466b66aed | [
"BSD-3-Clause"
] | 7 | 2015-10-19T15:13:26.000Z | 2016-10-29T14:17:20.000Z | ci/dropbox_upload.py | swistakm/pyrilla | 43ceed4e2500ee9aa352446fd7a9552466b66aed | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Script for uploading securely built distributions (artifacts) to private
Dropbox directory.
Dropbox authorization token should be provided only as environment variable
in a secure form. In case of CI systems (AppVeyor, Travis CI) this should
be provided as encrypted value in CI configuration file.
We prefer to use this method instead of native artifacts collection routine
provided by given CI system because it is more consistent.
"""
import os
import dropbox
import dropbox.files
dropbox_token = os.environ.get('DROPBOX_TOKEN')
dbx = dropbox.Dropbox(dropbox_token)
for root, dirs, files in os.walk('dist'):
for filename in files:
local_path = os.path.join(root, filename)
relative_path = os.path.relpath(local_path, 'dist')
dropbox_path = "/" + relative_path
with open(local_path, 'rb') as f:
print("uploading %s" % local_path)
dbx.files_upload(
f.read(), dropbox_path,
dropbox.files.WriteMode('overwrite')
)
| 30.941176 | 75 | 0.692966 |
79405b182dd33f0ca98116738f83470cc051fffe | 768 | py | Python | desktop/core/ext-py/cx_Oracle-6.4.1/samples/PLSQLProcedure.py | yetsun/hue | 2e48f0cc70e233ee0e1b40733d4b2a18d8836c66 | [
"Apache-2.0"
] | 5,079 | 2015-01-01T03:39:46.000Z | 2022-03-31T07:38:22.000Z | desktop/core/ext-py/cx_Oracle-6.4.1/samples/PLSQLProcedure.py | yetsun/hue | 2e48f0cc70e233ee0e1b40733d4b2a18d8836c66 | [
"Apache-2.0"
] | 1,623 | 2015-01-01T08:06:24.000Z | 2022-03-30T19:48:52.000Z | desktop/core/ext-py/cx_Oracle-6.4.1/samples/PLSQLProcedure.py | yetsun/hue | 2e48f0cc70e233ee0e1b40733d4b2a18d8836c66 | [
"Apache-2.0"
] | 2,033 | 2015-01-04T07:18:02.000Z | 2022-03-28T19:55:47.000Z | #------------------------------------------------------------------------------
# Copyright 2016, 2017, Oracle and/or its affiliates. All rights reserved.
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# PLSQLProcedure.py
#
# Demonstrate how to call a PL/SQL stored procedure and get the results of an
# OUT variable.
#------------------------------------------------------------------------------
from __future__ import print_function
import cx_Oracle
import SampleEnv
connection = cx_Oracle.connect(SampleEnv.MAIN_CONNECT_STRING)
cursor = connection.cursor()
myvar = cursor.var(int)
cursor.callproc('myproc', (123, myvar))
print(myvar.getvalue())
| 32 | 79 | 0.454427 |
79405b79fd7199cc6ffdc946ac38611a6d1bfced | 11,616 | py | Python | diffusion/sampling.py | crowsonkb/v-diffusion-pytorch | 93b6a54986d8259837a100046777fba52d812554 | [
"MIT"
] | 393 | 2021-12-16T19:44:36.000Z | 2022-03-31T02:30:30.000Z | diffusion/sampling.py | crowsonkb/v-diffusion-pytorch | 93b6a54986d8259837a100046777fba52d812554 | [
"MIT"
] | 13 | 2021-12-21T11:14:37.000Z | 2022-03-29T14:29:59.000Z | diffusion/sampling.py | crowsonkb/v-diffusion-pytorch | 93b6a54986d8259837a100046777fba52d812554 | [
"MIT"
] | 59 | 2021-12-16T23:35:34.000Z | 2022-03-25T22:59:58.000Z | import torch
from tqdm.auto import trange
from . import utils
# DDPM/DDIM sampling
@torch.no_grad()
def sample(model, x, steps, eta, extra_args, callback=None):
"""Draws samples from a model given starting noise."""
ts = x.new_ones([x.shape[0]])
# Create the noise schedule
alphas, sigmas = utils.t_to_alpha_sigma(steps)
# The sampling loop
for i in trange(len(steps), disable=None):
# Get the model output (v, the predicted velocity)
with torch.cuda.amp.autocast():
v = model(x, ts * steps[i], **extra_args).float()
# Predict the noise and the denoised image
pred = x * alphas[i] - v * sigmas[i]
eps = x * sigmas[i] + v * alphas[i]
# Call the callback
if callback is not None:
callback({'x': x, 'i': i, 't': steps[i], 'v': v, 'pred': pred})
# If we are not on the last timestep, compute the noisy image for the
# next timestep.
if i < len(steps) - 1:
# If eta > 0, adjust the scaling factor for the predicted noise
# downward according to the amount of additional noise to add
ddim_sigma = eta * (sigmas[i + 1]**2 / sigmas[i]**2).sqrt() * \
(1 - alphas[i]**2 / alphas[i + 1]**2).sqrt()
adjusted_sigma = (sigmas[i + 1]**2 - ddim_sigma**2).sqrt()
# Recombine the predicted noise and predicted denoised image in the
# correct proportions for the next step
x = pred * alphas[i + 1] + eps * adjusted_sigma
# Add the correct amount of fresh noise
if eta:
x += torch.randn_like(x) * ddim_sigma
# If we are on the last timestep, output the denoised image
return pred
@torch.no_grad()
def cond_sample(model, x, steps, eta, extra_args, cond_fn, callback=None):
"""Draws guided samples from a model given starting noise."""
ts = x.new_ones([x.shape[0]])
# Create the noise schedule
alphas, sigmas = utils.t_to_alpha_sigma(steps)
# The sampling loop
for i in trange(len(steps), disable=None):
# Get the model output
with torch.enable_grad():
x = x.detach().requires_grad_()
with torch.cuda.amp.autocast():
v = model(x, ts * steps[i], **extra_args)
pred = x * alphas[i] - v * sigmas[i]
# Call the callback
if callback is not None:
callback({'x': x, 'i': i, 't': steps[i], 'v': v.detach(), 'pred': pred.detach()})
if steps[i] < 1:
cond_grad = cond_fn(x, ts * steps[i], pred, **extra_args).detach()
v = v.detach() - cond_grad * (sigmas[i] / alphas[i])
else:
v = v.detach()
# Predict the noise and the denoised image
pred = x * alphas[i] - v * sigmas[i]
eps = x * sigmas[i] + v * alphas[i]
# If we are not on the last timestep, compute the noisy image for the
# next timestep.
if i < len(steps) - 1:
# If eta > 0, adjust the scaling factor for the predicted noise
# downward according to the amount of additional noise to add
ddim_sigma = eta * (sigmas[i + 1]**2 / sigmas[i]**2).sqrt() * \
(1 - alphas[i]**2 / alphas[i + 1]**2).sqrt()
adjusted_sigma = (sigmas[i + 1]**2 - ddim_sigma**2).sqrt()
# Recombine the predicted noise and predicted denoised image in the
# correct proportions for the next step
x = pred * alphas[i + 1] + eps * adjusted_sigma
# Add the correct amount of fresh noise
if eta:
x += torch.randn_like(x) * ddim_sigma
# If we are on the last timestep, output the denoised image
return pred
@torch.no_grad()
def reverse_sample(model, x, steps, extra_args, callback=None):
"""Finds a starting latent that would produce the given image with DDIM
(eta=0) sampling."""
ts = x.new_ones([x.shape[0]])
# Create the noise schedule
alphas, sigmas = utils.t_to_alpha_sigma(steps)
# The sampling loop
for i in trange(len(steps) - 1, disable=None):
# Get the model output (v, the predicted velocity)
with torch.cuda.amp.autocast():
v = model(x, ts * steps[i], **extra_args).float()
# Predict the noise and the denoised image
pred = x * alphas[i] - v * sigmas[i]
eps = x * sigmas[i] + v * alphas[i]
# Call the callback
if callback is not None:
callback({'x': x, 'i': i, 't': steps[i], 'v': v, 'pred': pred})
# Recombine the predicted noise and predicted denoised image in the
# correct proportions for the next step
x = pred * alphas[i + 1] + eps * sigmas[i + 1]
return x
# PNDM sampling (see https://openreview.net/pdf?id=PlKWVd2yBkY)
def make_eps_model_fn(model):
def eps_model_fn(x, t, **extra_args):
alphas, sigmas = utils.t_to_alpha_sigma(t)
v = model(x, t, **extra_args)
eps = x * utils.append_dims(sigmas, x.ndim) + v * utils.append_dims(alphas, x.ndim)
return eps
return eps_model_fn
def make_autocast_model_fn(model, enabled=True):
def autocast_model_fn(*args, **kwargs):
with torch.cuda.amp.autocast(enabled):
return model(*args, **kwargs).float()
return autocast_model_fn
def transfer(x, eps, t_1, t_2):
alphas, sigmas = utils.t_to_alpha_sigma(t_1)
next_alphas, next_sigmas = utils.t_to_alpha_sigma(t_2)
pred = (x - eps * utils.append_dims(sigmas, x.ndim)) / utils.append_dims(alphas, x.ndim)
x = pred * utils.append_dims(next_alphas, x.ndim) + eps * utils.append_dims(next_sigmas, x.ndim)
return x, pred
def prk_step(model, x, t_1, t_2, extra_args):
eps_model_fn = make_eps_model_fn(model)
t_mid = (t_2 + t_1) / 2
eps_1 = eps_model_fn(x, t_1, **extra_args)
x_1, _ = transfer(x, eps_1, t_1, t_mid)
eps_2 = eps_model_fn(x_1, t_mid, **extra_args)
x_2, _ = transfer(x, eps_2, t_1, t_mid)
eps_3 = eps_model_fn(x_2, t_mid, **extra_args)
x_3, _ = transfer(x, eps_3, t_1, t_2)
eps_4 = eps_model_fn(x_3, t_2, **extra_args)
eps_prime = (eps_1 + 2 * eps_2 + 2 * eps_3 + eps_4) / 6
x_new, pred = transfer(x, eps_prime, t_1, t_2)
return x_new, eps_prime, pred
def plms_step(model, x, old_eps, t_1, t_2, extra_args):
eps_model_fn = make_eps_model_fn(model)
eps = eps_model_fn(x, t_1, **extra_args)
eps_prime = (55 * eps - 59 * old_eps[-1] + 37 * old_eps[-2] - 9 * old_eps[-3]) / 24
x_new, _ = transfer(x, eps_prime, t_1, t_2)
_, pred = transfer(x, eps, t_1, t_2)
return x_new, eps, pred
@torch.no_grad()
def prk_sample(model, x, steps, extra_args, is_reverse=False, callback=None):
"""Draws samples from a model given starting noise using fourth-order
Pseudo Runge-Kutta."""
ts = x.new_ones([x.shape[0]])
model_fn = make_autocast_model_fn(model)
if not is_reverse:
steps = torch.cat([steps, steps.new_zeros([1])])
for i in trange(len(steps) - 1, disable=None):
x, _, pred = prk_step(model_fn, x, steps[i] * ts, steps[i + 1] * ts, extra_args)
if callback is not None:
callback({'x': x, 'i': i, 't': steps[i], 'pred': pred})
return x
@torch.no_grad()
def plms_sample(model, x, steps, extra_args, is_reverse=False, callback=None):
"""Draws samples from a model given starting noise using fourth order
Pseudo Linear Multistep."""
ts = x.new_ones([x.shape[0]])
model_fn = make_autocast_model_fn(model)
if not is_reverse:
steps = torch.cat([steps, steps.new_zeros([1])])
old_eps = []
for i in trange(len(steps) - 1, disable=None):
if len(old_eps) < 3:
x, eps, pred = prk_step(model_fn, x, steps[i] * ts, steps[i + 1] * ts, extra_args)
else:
x, eps, pred = plms_step(model_fn, x, old_eps, steps[i] * ts, steps[i + 1] * ts, extra_args)
old_eps.pop(0)
old_eps.append(eps)
if callback is not None:
callback({'x': x, 'i': i, 't': steps[i], 'pred': pred})
return x
def pie_step(model, x, t_1, t_2, extra_args):
eps_model_fn = make_eps_model_fn(model)
eps_1 = eps_model_fn(x, t_1, **extra_args)
x_1, _ = transfer(x, eps_1, t_1, t_2)
eps_2 = eps_model_fn(x_1, t_2, **extra_args)
eps_prime = (eps_1 + eps_2) / 2
x_new, pred = transfer(x, eps_prime, t_1, t_2)
return x_new, eps_prime, pred
def plms2_step(model, x, old_eps, t_1, t_2, extra_args):
eps_model_fn = make_eps_model_fn(model)
eps = eps_model_fn(x, t_1, **extra_args)
eps_prime = (3 * eps - old_eps[-1]) / 2
x_new, _ = transfer(x, eps_prime, t_1, t_2)
_, pred = transfer(x, eps, t_1, t_2)
return x_new, eps, pred
@torch.no_grad()
def pie_sample(model, x, steps, extra_args, is_reverse=False, callback=None):
"""Draws samples from a model given starting noise using second-order
Pseudo Improved Euler."""
ts = x.new_ones([x.shape[0]])
model_fn = make_autocast_model_fn(model)
if not is_reverse:
steps = torch.cat([steps, steps.new_zeros([1])])
for i in trange(len(steps) - 1, disable=None):
x, _, pred = pie_step(model_fn, x, steps[i] * ts, steps[i + 1] * ts, extra_args)
if callback is not None:
callback({'x': x, 'i': i, 't': steps[i], 'pred': pred})
return x
@torch.no_grad()
def plms2_sample(model, x, steps, extra_args, is_reverse=False, callback=None):
"""Draws samples from a model given starting noise using second order
Pseudo Linear Multistep."""
ts = x.new_ones([x.shape[0]])
model_fn = make_autocast_model_fn(model)
if not is_reverse:
steps = torch.cat([steps, steps.new_zeros([1])])
old_eps = []
for i in trange(len(steps) - 1, disable=None):
if len(old_eps) < 1:
x, eps, pred = pie_step(model_fn, x, steps[i] * ts, steps[i + 1] * ts, extra_args)
else:
x, eps, pred = plms2_step(model_fn, x, old_eps, steps[i] * ts, steps[i + 1] * ts, extra_args)
old_eps.pop(0)
old_eps.append(eps)
if callback is not None:
callback({'x': x, 'i': i, 't': steps[i], 'pred': pred})
return x
def iplms_step(model, x, old_eps, t_1, t_2, extra_args):
eps_model_fn = make_eps_model_fn(model)
eps = eps_model_fn(x, t_1, **extra_args)
if len(old_eps) == 0:
eps_prime = eps
elif len(old_eps) == 1:
eps_prime = (3/2 * eps - 1/2 * old_eps[-1])
elif len(old_eps) == 2:
eps_prime = (23/12 * eps - 16/12 * old_eps[-1] + 5/12 * old_eps[-2])
else:
eps_prime = (55/24 * eps - 59/24 * old_eps[-1] + 37/24 * old_eps[-2] - 9/24 * old_eps[-3])
x_new, _ = transfer(x, eps_prime, t_1, t_2)
_, pred = transfer(x, eps, t_1, t_2)
return x_new, eps, pred
@torch.no_grad()
def iplms_sample(model, x, steps, extra_args, is_reverse=False, callback=None):
"""Draws samples from a model given starting noise using fourth order
Improved Pseudo Linear Multistep."""
ts = x.new_ones([x.shape[0]])
model_fn = make_autocast_model_fn(model)
if not is_reverse:
steps = torch.cat([steps, steps.new_zeros([1])])
old_eps = []
for i in trange(len(steps) - 1, disable=None):
x, eps, pred = iplms_step(model_fn, x, old_eps, steps[i] * ts, steps[i + 1] * ts, extra_args)
if len(old_eps) >= 3:
old_eps.pop(0)
old_eps.append(eps)
if callback is not None:
callback({'x': x, 'i': i, 't': steps[i], 'pred': pred})
return x
| 37.230769 | 105 | 0.600723 |
79405bb96bdb046d9f64950346ad004a8e71615b | 15,780 | py | Python | symbolic_slimshot/ind_union_open.py | sunyi000/SafeLearner | bf93268c9107f3404696b9212accaace1a0e76ee | [
"Apache-2.0"
] | 6 | 2019-10-03T04:18:17.000Z | 2020-12-18T16:13:34.000Z | symbolic_slimshot/ind_union_open.py | sunyi000/SafeLearner | bf93268c9107f3404696b9212accaace1a0e76ee | [
"Apache-2.0"
] | null | null | null | symbolic_slimshot/ind_union_open.py | sunyi000/SafeLearner | bf93268c9107f3404696b9212accaace1a0e76ee | [
"Apache-2.0"
] | 2 | 2019-10-16T21:13:04.000Z | 2022-02-02T14:00:54.000Z | import itertools
from symbolic_slimshot import algorithm
class IndependentUnion(object):
def __init__(self, query, subqueries, init=True):
self.query = query
self.subqueries = subqueries
self.genericConstantStr = None
self.genericIdentifiers = set()
self.usedSeparatorVars = []
# True means that missing tuples in the output count as True
self.trueOnMissing = False
# this call must be last for initialization purposes
self.getSafeQueryPlan(init)
def getSafeQueryPlan(self, init=True):
if isinstance(self.query, list):
self.usedSeparatorVars = set()
for q in self.query:
self.usedSeparatorVars.update(q.getUsedSeparators())
else:
self.usedSeparatorVars = self.query.getUsedSeparators()
self.formattedUsedSeparators = [
self.formatSeparatorVariable(sep) for sep in self.usedSeparatorVars
]
if init:
self.children = list(
map(algorithm.getSafeOpenQueryPlanNaive, self.subqueries)
)
self.lam = sum(x.lam for x in self.children)
else:
self.children = []
def hasGenericConstant(self):
return self.genericConstantStr is not None
def getGenericConstantStr(self):
return self.genericConstantStr
def generateSQL_DNF(self, separatorSubs=None):
if separatorSubs is None:
separatorSubs = []
results = []
counters = []
selectAtts = []
identToTermSubs = {}
counterIdentToGenericConstantStr = {}
genericConstantStrIdent = 0
for (i, child) in enumerate(self.children):
sql = child.generateSQL_DNF(separatorSubs[:])
ident = algorithm.counter()
if child.hasGenericConstant():
genericConstantStr = child.getGenericConstantStr()
# doesn't matter which one, just pick arbitrarily
self.genericConstantStr = genericConstantStr
counterIdentToGenericConstantStr[ident] = genericConstantStr
genericConstantStrIdent = ident
counters.append(ident)
results.append((sql, ident))
thisTermSubs = set()
for (subId, varList) in separatorSubs:
if child.usesSeparator(subId):
thisTermSubs.add(subId)
identToTermSubs[ident] = thisTermSubs
if self.hasGenericConstant():
selectAtts.append(
"q%d.%s" % (genericConstantStrIdent, self.genericConstantStr)
)
subqueryPairs = [
pair for pair in itertools.product(counters, counters) if pair[0] < pair[1]
]
joinConditions = []
for (j1, j2) in subqueryPairs:
if (
j1 in counterIdentToGenericConstantStr
and j2 in counterIdentToGenericConstantStr
):
joinConditions.append(
"q%d.%s = q%d.%s"
% (
j1,
counterIdentToGenericConstantStr[j1],
j2,
counterIdentToGenericConstantStr[j2],
)
)
if len(joinConditions):
joinCondition = "where %s" % " and ".join(joinConditions)
else:
joinCondition = ""
subqueries = []
previousIdent = False
for (sql, ident) in results:
newSubquery = "(%s) as q%d \n" % (sql, ident)
if previousIdent:
if len(separatorSubs):
condition = "ON %s" % " and ".join(
[
"q%d.c%d = q%d.c%d" % (previousIdent, i, ident, i)
for (i, x) in separatorSubs
if i in identToTermSubs[previousIdent]
and i in identToTermSubs[ident]
]
)
subqueries.append(
"FULL OUTER JOIN %s %s" % (newSubquery, condition)
)
else:
subqueries.append("FULL OUTER JOIN %s ON true" % (newSubquery))
else:
subqueries.append("%s" % (newSubquery))
previousIdent = ident
subqueryString = " ".join(subqueries)
# pString = '*'.join(["COALESCE(1-q%d.pUse,1)" % i for i in counters])
pString = (
"concat("
+ ", ' + ', ".join(
[
"COALESCE(cast(q%d.pUse as text), '<%s>')" % (i, l)
for i, l in zip(counters, [c.lam for c in self.children])
]
)
+ ")"
)
for (i, x) in separatorSubs:
attsToCoalesce = ", ".join(
[
"q%d.c%d" % (ident, i)
for ident in counters
if i in identToTermSubs[previousIdent]
and i in identToTermSubs[ident]
]
)
if len(attsToCoalesce) > 0:
selectAtts.append("COALESCE(%s) as c%d" % (attsToCoalesce, i))
attString = ", ".join(selectAtts)
if attString:
selectString = "%s, %s as pUse" % (attString, pString)
else:
selectString = "%s as pUse" % (pString)
sql = "\n select %s from %s %s" % (selectString, subqueryString, joinCondition)
return sql
def formatSeparatorVariable(self, sep):
return "sep_var_%s" % str(sep)
# we have an order on attributes to follow, but some will require complex statements such
# as CASE/WHEN or COALESCE, which are passed in using
# attributeToFormattedStringMap
def getOrderedSelectString(self, orderedAttributes, attributeToFormattedStringMap):
selectAttributes = []
for attribute in orderedAttributes:
if attribute in attributeToFormattedStringMap:
selectAttributes.append(attributeToFormattedStringMap[attribute])
return ", ".join(selectAttributes)
def generateSQL_CNF(self, params):
if params["useLog"]:
if params["useNull"]:
defaultValue = "NULL"
else:
defaultValue = "'-Infinity'"
else:
defaultValue = "0"
tableAliases = []
tableAliasToSubquerySQLMap = {}
tableAliasToGenericIdentifiersMap = {}
tableAliasToUsedSeparatorsMap = {}
genericIdentifierToTableAliasMap = {}
usedSeparatorToTableAliasMap = {}
tableAliasesUsingAllSeparators = set()
tableAliasesUsingAllSeparatorsAndGenericIdentifiers = set()
tableAliasIsTrueOnMissing = {}
tableAliasToMissingGenericIdentifiersMap = {}
tableAliasToMissingSeparatorsMap = {}
restOfTableAliases = set()
self.genericIdentifiers = set()
# if any child is not true on missing, this gets set to False
self.trueOnMissing = True
# assign each child a table alias and fetch its SQL code, then build the maps
# that say which identifiers/separator vars are used by each child
for child in self.children:
currentSubqueryID = algorithm.counter()
subquerySQL = child.generateSQL_CNF(params)
tableAlias = "q%d" % currentSubqueryID
tableAliases.append(tableAlias)
tableAliasToSubquerySQLMap[tableAlias] = subquerySQL
tableAliasIsTrueOnMissing[tableAlias] = child.trueOnMissing
if not child.trueOnMissing:
self.trueOnMissing = False
childGenericIdentifiers = child.genericIdentifiers.copy()
tableAliasToGenericIdentifiersMap[tableAlias] = childGenericIdentifiers
self.genericIdentifiers.update(childGenericIdentifiers)
for genericIdentifier in childGenericIdentifiers:
if genericIdentifier in genericIdentifierToTableAliasMap:
genericIdentifierToTableAliasMap[genericIdentifier].add(tableAlias)
else:
genericIdentifierToTableAliasMap[genericIdentifier] = set(
[tableAlias]
)
usesAllSeparators = True
tableAliasToUsedSeparatorsMap[tableAlias] = set()
for usedSeparatorVariable in self.usedSeparatorVars:
if child.usesSeparator(usedSeparatorVariable):
formattedSeparator = self.formatSeparatorVariable(
usedSeparatorVariable
)
tableAliasToUsedSeparatorsMap[tableAlias].add(formattedSeparator)
if formattedSeparator in usedSeparatorToTableAliasMap:
usedSeparatorToTableAliasMap[formattedSeparator].add(tableAlias)
else:
usedSeparatorToTableAliasMap[formattedSeparator] = set(
[tableAlias]
)
else:
usesAllSeparators = False
if usesAllSeparators:
tableAliasesUsingAllSeparators.add(tableAlias)
for tableAlias in tableAliases:
tableAliasToMissingGenericIdentifiersMap[
tableAlias
] = self.genericIdentifiers.difference(
tableAliasToGenericIdentifiersMap[tableAlias]
)
tableAliasToMissingSeparatorsMap[tableAlias] = set(
self.formattedUsedSeparators
).difference(tableAliasToUsedSeparatorsMap[tableAlias])
if tableAliasToMissingGenericIdentifiersMap[tableAlias]:
restOfTableAliases.add(tableAlias)
elif tableAlias in tableAliasesUsingAllSeparators:
tableAliasesUsingAllSeparatorsAndGenericIdentifiers.add(tableAlias)
else:
restOfTableAliases.add(tableAlias)
# need a fixed order for selected attributes, before building the union
# queries
orderedAttributes = []
for previousSeparatorVariable in self.formattedUsedSeparators:
orderedAttributes.append(previousSeparatorVariable)
for genericIdentifier in self.genericIdentifiers:
orderedAttributes.append(genericIdentifier)
orderedAttributes.append("pUse")
orderedAttributes.append("trueOnMissing")
unionSubqueries = []
selectVariables = set(self.formattedUsedSeparators).union(
self.genericIdentifiers
)
for tableAlias in tableAliases:
if tableAlias in tableAliasesUsingAllSeparatorsAndGenericIdentifiers:
selectAttributeMap = {
attribute: "%s.%s" % (tableAlias, attribute)
for attribute in selectVariables
}
selectAttributeMap["pUse"] = "%s.pUse" % tableAlias
selectAttributeMap["trueOnMissing"] = "%s as trueOnMissing" % str(
tableAliasIsTrueOnMissing[tableAlias]
)
selectAttributeString = self.getOrderedSelectString(
orderedAttributes, selectAttributeMap
)
unionSubqueries.append(
"SELECT %s FROM %s" % (selectAttributeString, tableAlias)
)
else:
# skip if this tableAlias has no joining variables?
missingSelectVariables = tableAliasToMissingGenericIdentifiersMap[
tableAlias
].union(tableAliasToMissingSeparatorsMap[tableAlias])
selectAttributeMap = {}
additionalTables = []
index = 0
for attribute in selectVariables:
if attribute in missingSelectVariables:
domainTable = "A%d" % index
additionalTables.append("A %s" % domainTable)
selectAttributeMap[attribute] = "%s.v0 as %s" % (
domainTable,
attribute,
)
index += 1
else:
selectAttributeMap[attribute] = "%s.%s" % (
tableAlias,
attribute,
)
selectAttributeMap["pUse"] = "%s.pUse" % tableAlias
selectAttributeMap["trueOnMissing"] = "%s as trueOnMissing" % str(
tableAliasIsTrueOnMissing[tableAlias]
)
selectAttributeString = self.getOrderedSelectString(
orderedAttributes, selectAttributeMap
)
additionalTablesString = ", ".join(additionalTables)
unionSubqueries.append(
"SELECT %s FROM %s, %s"
% (selectAttributeString, tableAlias, additionalTablesString)
)
selectAttributeMap = {
attribute: "%s" % (attribute) for attribute in selectVariables
}
numberOfChildrenFalseOnMissing = len(self.children) - sum(
tableAliasIsTrueOnMissing.values()
)
if params["useLog"]:
if params["useNull"]:
selectAttributeMap["pUse"] = (
"iunion_log_null_%d_false_on_missing(pUse, trueOnMissing) as pUse"
% numberOfChildrenFalseOnMissing
)
else:
selectAttributeMap["pUse"] = (
"iunion_log_neginf_%d_false_on_missing(pUse, trueOnMissing) as pUse"
% numberOfChildrenFalseOnMissing
)
else:
selectAttributeMap["pUse"] = (
"iunion_%d_false_on_missing(pUse, trueOnMissing) as pUse"
% numberOfChildrenFalseOnMissing
)
selectClause = self.getOrderedSelectString(
orderedAttributes, selectAttributeMap
)
groupByAttributeMap = {
attribute: "%s" % (attribute) for attribute in selectVariables
}
if groupByAttributeMap:
groupByClause = "GROUP BY %s" % self.getOrderedSelectString(
orderedAttributes, groupByAttributeMap
)
else:
groupByClause = ""
withClause = ",\n".join(
[
"%s as (%s)" % (tableAlias, tableAliasToSubquerySQLMap[tableAlias])
for tableAlias in tableAliases
]
)
unionClause = " UNION ALL ".join(unionSubqueries)
unionClauseAlias = "q%d" % algorithm.counter()
joinSQL = "\n WITH %s select %s from (\n %s \n) %s \n %s" % (
withClause,
selectClause,
unionClause,
unionClauseAlias,
groupByClause,
)
return joinSQL
def usesSeparator(self, sep):
return self.query.usesSeparator(sep)
def buildTree(self, T, parent=None):
newId = len(T.nodes()) + 1
T.add_node(newId, label=self.getLabel())
for n in self.children:
T.add_edge(newId, n.buildTree(T, self))
return newId
def getLabel(self):
# TODO(ericgribkoff) make the join/union distinction for CNF versus DNF
# more clear
return "Independent Join\n%s" % self.query.prettyPrintCNF()
def __repr__(self):
return "Independent Union (true on missing = %s): %s" % (
self.trueOnMissing,
", ".join([x.__repr__() for x in self.children]),
)
| 39.748111 | 93 | 0.557098 |
79405e254c2200450a3d39c76dfc5483134ea245 | 286 | py | Python | _modules/urls/handler404/views.py | looking-for-a-job/django-examples | dfafa450668cac5c0351f6c7238b8886511229bf | [
"Unlicense"
] | null | null | null | _modules/urls/handler404/views.py | looking-for-a-job/django-examples | dfafa450668cac5c0351f6c7238b8886511229bf | [
"Unlicense"
] | null | null | null | _modules/urls/handler404/views.py | looking-for-a-job/django-examples | dfafa450668cac5c0351f6c7238b8886511229bf | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python
from django.http import HttpResponse, HttpResponseNotFound
def my_view(request):
return HttpResponse('hello world')
def handler404(request, *args, **kwargs):
"""works only with DEBUG = False
"""
return HttpResponseNotFound("handler404 output")
| 22 | 58 | 0.723776 |
79405f9a201ac2c9e8bed737e48a2c8f319fa462 | 15,537 | py | Python | notebooks/01_dynamic_bernoulli/src/models.py | jfilter/ptf | 94e76a6f26344ba37c41793523e70ba75863c5c0 | [
"MIT"
] | 10 | 2020-05-20T07:25:21.000Z | 2021-05-20T15:14:55.000Z | notebooks/01_dynamic_bernoulli/src/models.py | jfilter/ptf | 94e76a6f26344ba37c41793523e70ba75863c5c0 | [
"MIT"
] | null | null | null | notebooks/01_dynamic_bernoulli/src/models.py | jfilter/ptf | 94e76a6f26344ba37c41793523e70ba75863c5c0 | [
"MIT"
] | 2 | 2020-04-28T15:07:41.000Z | 2021-05-06T14:30:01.000Z | import numpy as np
import os
import pickle
import tensorflow as tf
from tensorflow.contrib.distributions import Normal, Bernoulli
from tensorflow.contrib.tensorboard.plugins import projector
from sklearn.manifold import TSNE
from utils import *
class emb_model(object):
def __init__(self, args, d, logdir):
self.args = args
self.K = args.K
self.cs = args.cs
self.ns = args.ns
self.sig = args.sig
self.dynamic = args.dynamic
self.logdir = logdir
self.N = d.N
self.L = d.L
self.T = d.T
self.n_minibatch = d.n_train
self.n_test = d.n_test
self.labels = d.labels
self.unigram = d.unigram
self.dictionary = d.dictionary
self.query_words = d.query_words
self.train_feed = d.train_feed
#self.valid_data = d.valid_data
#self.test_data = d.test_data
self.n_iter = args.n_iter
self.n_epochs = d.n_epochs
self.n_test = d.n_test
self.n_valid = d.n_valid
self.alpha_trainable = True
if args.init:
fname = os.path.join('fits', d.name, args.init)
if 'alpha_constant' in args.init:
self.alpha_trainable = False
fname = fname.replace('/alpha_constant','')
fit = pickle.load(open(fname))
self.rho_init = fit['rho']
self.alpha_init = fit['alpha']
else:
self.rho_init = (np.random.randn(self.L, self.K)/self.K).astype('float32')
self.alpha_init = (np.random.randn(self.L, self.K)/self.K).astype('float32')
if not self.alpha_trainable:
self.rho_init = (0.1*np.random.randn(self.L, self.K)/self.K).astype('float32')
with open(os.path.join(self.logdir, "log_file.txt"), "a") as text_file:
text_file.write(str(self.args))
text_file.write('\n')
def dump(self, fname):
raise NotImplementedError()
def detect_drift(self):
raise NotImplementedError()
def eval_log_like(self, feed_dict):
return self.sess.run(tf.log(self.y_pos.mean()+0.000001), feed_dict = feed_dict)
def plot_params(self, plot_only=500):
with self.sess.as_default():
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
low_dim_embs_alpha2 = tsne.fit_transform(self.alpha.eval()[:plot_only])
plot_with_labels(low_dim_embs_alpha2[:plot_only], self.labels[:plot_only], self.logdir + '/alpha.eps')
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
low_dim_embs_rho2 = tsne.fit_transform(self.rho.eval()[:plot_only])
plot_with_labels(low_dim_embs_rho2[:plot_only], self.labels[:plot_only], self.logdir + '/rho.eps')
def print_word_similarities(self, words, num):
with self.sess.as_default():
rho = self.rho.eval()
for x in words:
x_idx = self.dictionary[x]
f_name = os.path.join(self.logdir, '%s_queries.txt' % (x))
with open(f_name, "w+") as text_file:
cos = cosine_distance(rho[x_idx], rho.T)
rank = np.argsort(cos)[1:num+1]
text_file.write("\n\n=====================================\n%s\n=====================================" % (x))
for r in rank:
text_file.write("\n%-20s %6.4f" % (self.labels[r], cos[r]))
def initialize_training(self):
optimizer = tf.train.AdamOptimizer()
self.train = optimizer.minimize(self.loss)
self.sess = tf.Session()
with self.sess.as_default():
tf.global_variables_initializer().run()
variable_summaries('alpha', self.alpha)
with tf.name_scope('objective'):
tf.summary.scalar('loss', self.loss)
tf.summary.scalar('priors', self.log_prior)
tf.summary.scalar('ll_pos', self.ll_pos)
tf.summary.scalar('ll_neg', self.ll_neg)
self.summaries = tf.summary.merge_all()
self.train_writer = tf.summary.FileWriter(self.logdir, self.sess.graph)
self.saver = tf.train.Saver()
config = projector.ProjectorConfig()
alpha = config.embeddings.add()
alpha.tensor_name = 'model/embeddings/alpha'
alpha.metadata_path = '../vocab.tsv'
if not self.dynamic:
rho = config.embeddings.add()
rho.tensor_name = 'model/embeddings/rho'
rho.metadata_path = '../vocab.tsv'
else:
for t in range(self.T):
rho = config.embeddings.add()
rho.tensor_name = 'model/embeddings/rho_'+str(t)
rho.metadata_path = '../vocab.tsv'
projector.visualize_embeddings(self.train_writer, config)
def train_embeddings(self):
for data_pass in range(self.n_iter):
for step in range(self.n_epochs):
if step % 100 == 0:
summary, ll_pos, ll_neg, _ = self.sess.run([self.summaries, self.ll_pos, self.ll_neg, self.train], feed_dict=self.train_feed(self.placeholders))
self.train_writer.add_summary(summary, data_pass*(self.n_epochs) + step)
print("%8d/%8d iter%3d; log-likelihood: %6.4f on positive samples,%6.4f on negative samples " % (step, self.n_epochs, data_pass, ll_pos, ll_neg))
else:
self.sess.run([self.train], feed_dict=self.train_feed(self.placeholders))
self.dump(self.logdir+"/variational"+str(data_pass)+".dat")
self.saver.save(self.sess, os.path.join(self.logdir, "model.ckpt"), data_pass)
self.print_word_similarities(self.query_words, 10)
if self.dynamic:
words = self.detect_drift()
self.print_word_similarities(words[:10], 10)
self.plot_params(500)
class bern_emb_model(emb_model):
def __init__(self, args, d, logdir):
super(bern_emb_model, self).__init__(args, d, logdir)
self.n_minibatch = self.n_minibatch.sum()
with tf.name_scope('model'):
# Data Placeholder
with tf.name_scope('input'):
self.placeholders = tf.placeholder(tf.int32)
self.words = self.placeholders
# Index Masks
with tf.name_scope('context_mask'):
self.p_mask = tf.cast(tf.range(self.cs/2, self.n_minibatch + self.cs/2),tf.int32)
rows = tf.cast(tf.tile(tf.expand_dims(tf.range(0, self.cs/2),[0]), [self.n_minibatch, 1]),tf.int32)
columns = tf.cast(tf.tile(tf.expand_dims(tf.range(0, self.n_minibatch), [1]), [1, self.cs/2]),tf.int32)
self.ctx_mask = tf.concat([rows+columns, rows+columns +self.cs/2+1], 1)
with tf.name_scope('embeddings'):
self.rho = tf.Variable(self.rho_init, name='rho')
self.alpha = tf.Variable(self.alpha_init, name='alpha', trainable=self.alpha_trainable)
with tf.name_scope('priors'):
prior = Normal(loc = 0.0, scale = self.sig)
if self.alpha_trainable:
self.log_prior = tf.reduce_sum(prior.log_prob(self.rho) + prior.log_prob(self.alpha))
else:
self.log_prior = tf.reduce_sum(prior.log_prob(self.rho))
with tf.name_scope('natural_param'):
# Taget and Context Indices
with tf.name_scope('target_word'):
self.p_idx = tf.gather(self.words, self.p_mask)
self.p_rho = tf.squeeze(tf.gather(self.rho, self.p_idx))
# Negative samples
with tf.name_scope('negative_samples'):
unigram_logits = tf.tile(tf.expand_dims(tf.log(tf.constant(self.unigram)), [0]), [self.n_minibatch, 1])
self.n_idx = tf.multinomial(unigram_logits, self.ns)
self.n_rho = tf.gather(self.rho, self.n_idx)
with tf.name_scope('context'):
self.ctx_idx = tf.squeeze(tf.gather(self.words, self.ctx_mask))
self.ctx_alphas = tf.gather(self.alpha, self.ctx_idx)
# Natural parameter
ctx_sum = tf.reduce_sum(self.ctx_alphas,[1])
self.p_eta = tf.expand_dims(tf.reduce_sum(tf.multiply(self.p_rho, ctx_sum),-1),1)
self.n_eta = tf.reduce_sum(tf.multiply(self.n_rho, tf.tile(tf.expand_dims(ctx_sum,1),[1,self.ns,1])),-1)
# Conditional likelihood
self.y_pos = Bernoulli(logits = self.p_eta)
self.y_neg = Bernoulli(logits = self.n_eta)
self.ll_pos = tf.reduce_sum(self.y_pos.log_prob(1.0))
self.ll_neg = tf.reduce_sum(self.y_neg.log_prob(0.0))
self.log_likelihood = self.ll_pos + self.ll_neg
scale = 1.0*self.N/self.n_minibatch
self.loss = - (self.n_epochs * self.log_likelihood + self.log_prior)
def dump(self, fname):
with self.sess.as_default():
dat = {'rho': self.rho.eval(),
'alpha': self.alpha.eval()}
pickle.dump( dat, open( fname, "a+" ) )
class dynamic_bern_emb_model(emb_model):
def __init__(self, args, d, logdir):
super(dynamic_bern_emb_model, self).__init__(args, d, logdir)
with tf.name_scope('model'):
with tf.name_scope('embeddings'):
self.alpha = tf.Variable(self.alpha_init, name='alpha', trainable=self.alpha_trainable)
self.rho_t = {}
for t in range(-1,self.T):
self.rho_t[t] = tf.Variable(self.rho_init
+ 0.001*tf.random_normal([self.L, self.K])/self.K,
name = 'rho_'+str(t))
with tf.name_scope('priors'):
global_prior = Normal(loc = 0.0, scale = self.sig)
local_prior = Normal(loc = 0.0, scale = self.sig/100.0)
self.log_prior = tf.reduce_sum(global_prior.log_prob(self.alpha))
self.log_prior += tf.reduce_sum(global_prior.log_prob(self.rho_t[-1]))
for t in range(self.T):
self.log_prior += tf.reduce_sum(local_prior.log_prob(self.rho_t[t] - self.rho_t[t-1]))
with tf.name_scope('likelihood'):
self.placeholders = {}
self.y_pos = {}
self.y_neg = {}
self.ll_pos = 0.0
self.ll_neg = 0.0
for t in range(self.T):
# Index Masks
p_mask = tf.range(self.cs/2,self.n_minibatch[t] + self.cs/2)
rows = tf.tile(tf.expand_dims(tf.range(0, self.cs/2),[0]), [self.n_minibatch[t], 1])
columns = tf.tile(tf.expand_dims(tf.range(0, self.n_minibatch[t]), [1]), [1, self.cs/2])
ctx_mask = tf.concat([rows+columns, rows+columns +self.cs/2+1], 1)
# Data Placeholder
self.placeholders[t] = tf.placeholder(tf.int32, shape = (self.n_minibatch[t] + self.cs))
# Taget and Context Indices
p_idx = tf.gather(self.placeholders[t], p_mask)
ctx_idx = tf.squeeze(tf.gather(self.placeholders[t], ctx_mask))
# Negative samples
unigram_logits = tf.tile(tf.expand_dims(tf.log(tf.constant(self.unigram)), [0]), [self.n_minibatch[t], 1])
n_idx = tf.multinomial(unigram_logits, self.ns)
# Context vectors
ctx_alphas = tf.gather(self.alpha, ctx_idx)
p_rho = tf.squeeze(tf.gather(self.rho_t[t], p_idx))
n_rho = tf.gather(self.rho_t[t], n_idx)
# Natural parameter
ctx_sum = tf.reduce_sum(ctx_alphas,[1])
p_eta = tf.expand_dims(tf.reduce_sum(tf.multiply(p_rho, ctx_sum),-1),1)
n_eta = tf.reduce_sum(tf.multiply(n_rho, tf.tile(tf.expand_dims(ctx_sum,1),[1,self.ns,1])),-1)
# Conditional likelihood
self.y_pos[t] = Bernoulli(logits = p_eta)
self.y_neg[t] = Bernoulli(logits = n_eta)
self.ll_pos += tf.reduce_sum(self.y_pos[t].log_prob(1.0))
self.ll_neg += tf.reduce_sum(self.y_neg[t].log_prob(0.0))
self.loss = - (self.n_epochs * (self.ll_pos + self.ll_neg) + self.log_prior)
def dump(self, fname):
with self.sess.as_default():
dat = {'alpha': self.alpha.eval()}
for t in range(self.T):
dat['rho_'+str(t)] = self.rho_t[t].eval()
pickle.dump( dat, open( fname, "a+" ) )
def eval_log_like(self, feed_dict):
log_p = np.zeros((0,1))
for t in range(self.T):
log_p_t = self.sess.run(tf.log(self.y_pos[t].mean()+0.000001), feed_dict = feed_dict)
log_p = np.vstack((log_p, log_p_t))
return log_p
def plot_params(self, plot_only=500):
with self.sess.as_default():
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
low_dim_embs_alpha = tsne.fit_transform(self.alpha.eval()[:plot_only])
plot_with_labels(low_dim_embs_alpha[:plot_only], self.labels[:plot_only], self.logdir + '/alpha.eps')
for t in [0, int(self.T/2), self.T-1]:
w_idx_t = range(plot_only)
np_rho = self.rho_t[t].eval()
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
low_dim_embs_rho = tsne.fit_transform(np_rho[w_idx_t,:])
plot_with_labels(low_dim_embs_rho, self.labels[w_idx_t], self.logdir + '/rho_' + str(t) + '.eps')
def detect_drift(self, metric='total_dist'):
if metric == 'total_dist':
tf_dist, tf_w_idx = tf.nn.top_k(tf.reduce_sum(tf.square(self.rho_t[self.T-1]-self.rho_t[0]),1), 500)
else:
print('unknown metric')
return
dist, w_idx = self.sess.run([tf_dist, tf_w_idx])
words = self.labels[w_idx]
f_name = self.logdir + '/top_drifting_words.txt'
with open(f_name, "w+") as text_file:
for (w, drift) in zip(w_idx,dist):
text_file.write("\n%-20s %6.4f" % (self.labels[w], drift))
return words
def print_word_similarities(self, words, num):
for x in words:
x_idx = self.dictionary[x]
f_name = os.path.join(self.logdir, '%s_queries.txt' % (x))
with open(f_name, "w+") as text_file:
for t_idx in xrange(self.T):
with self.sess.as_default():
rho_t = self.rho_t[t_idx].eval()
cos = cosine_distance(rho_t[x_idx], rho_t.T)
rank = np.argsort(cos)[1:num+1]
text_file.write("\n\n================================\n%s, t = %d\n================================" % (x,t_idx))
for r in rank:
text_file.write("\n%-20s %6.4f" % (self.labels[r], cos[r]))
def define_model(args, d, logdir):
if args.dynamic:
m = dynamic_bern_emb_model(args, d, logdir)
else:
m = bern_emb_model(args, d, logdir)
return m
| 45.165698 | 165 | 0.557765 |
79406056abb80590dc0ab387cab25bc8240864a5 | 668 | py | Python | startup.py | ruthogunnnaike/SeeSec---IoT-Vulnerablity-Scanner | d2186421607af0ef3351b7d95c2478c4ac307931 | [
"MIT"
] | 7 | 2017-09-09T06:16:16.000Z | 2020-12-23T09:00:23.000Z | startup.py | eshcrow/SeeSec---IoT-Vulnerablity-Scanner | d2186421607af0ef3351b7d95c2478c4ac307931 | [
"MIT"
] | null | null | null | startup.py | eshcrow/SeeSec---IoT-Vulnerablity-Scanner | d2186421607af0ef3351b7d95c2478c4ac307931 | [
"MIT"
] | 6 | 2018-05-12T10:07:42.000Z | 2021-12-06T15:24:17.000Z |
def launch():
def launch():
from pox.log.level import launch
launch(DEBUG=True)
from pox.misc.firewall import launch
launch()
from pox.openflow.keepalive import launch
launch(interval=300)
from pox.forwarding.l3_learning import launch
launch()
from pox.proto.dhcpd import launch
launch()
from pox.proto.dns_spy import launch
launch()
from pox.host_tracker.host_tracker import launch
launch()
from pox.openflow.discovery import launch
launch() # 15 seconds
from pox.forwarding.l2_pairs import launch
launch()
| 20.875 | 56 | 0.615269 |
7940610d4644daf2201af34e4208b17352066110 | 2,242 | py | Python | utils/transforms.py | Vaden4d/logo-classifier | 18c397e52352da8e79868158123c13bf0417130f | [
"MIT"
] | null | null | null | utils/transforms.py | Vaden4d/logo-classifier | 18c397e52352da8e79868158123c13bf0417130f | [
"MIT"
] | null | null | null | utils/transforms.py | Vaden4d/logo-classifier | 18c397e52352da8e79868158123c13bf0417130f | [
"MIT"
] | null | null | null | import albumentations as A
from albumentations.pytorch.transforms import ToTensor
def get_train_transform(img_size):
train_transform = A.Compose([
A.Resize(img_size, img_size),
A.Transpose(p=0.5),
A.HorizontalFlip(p=0.5),
A.VerticalFlip(p=0.5),
A.ShiftScaleRotate(p=0.5),
A.HueSaturationValue(hue_shift_limit=0.2, sat_shift_limit=0.2, val_shift_limit=0.2, p=0.5),
A.RandomBrightnessContrast(brightness_limit=(-0.1,0.1), contrast_limit=(-0.1, 0.1), p=0.5),
A.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], max_pixel_value=255.0, p=1.0),
A.CoarseDropout(p=0.5),
A.Cutout(p=0.5),
ToTensor()])
return train_transform
def get_valid_transform(img_size):
# constants from imagenet data
test_transform = A.Compose([A.Resize(img_size, img_size),
A.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
),
ToTensor()
])
return test_transform
def get_transforms(img_size):
train_transform = A.Compose([A.Resize(img_size, img_size),
A.HorizontalFlip(),
A.VerticalFlip(),
A.ShiftScaleRotate(),
A.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
),
ToTensor()
])
test_transform = A.Compose([A.Resize(img_size, img_size),
A.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
),
ToTensor()
])
return train_transform, test_transform
| 41.518519 | 109 | 0.417484 |
79406127fb3bf05f843e8c62030aa53b6c0230fb | 13,045 | py | Python | testscripts/RDKB/component/LOG_AGENT/TS_LOGAGENT_CM_DisableLog.py | rdkcmf/rdkb-tools-tdkb | 9f9c3600cd701d5fc90ac86a6394ebd28d49267e | [
"Apache-2.0"
] | null | null | null | testscripts/RDKB/component/LOG_AGENT/TS_LOGAGENT_CM_DisableLog.py | rdkcmf/rdkb-tools-tdkb | 9f9c3600cd701d5fc90ac86a6394ebd28d49267e | [
"Apache-2.0"
] | null | null | null | testscripts/RDKB/component/LOG_AGENT/TS_LOGAGENT_CM_DisableLog.py | rdkcmf/rdkb-tools-tdkb | 9f9c3600cd701d5fc90ac86a6394ebd28d49267e | [
"Apache-2.0"
] | null | null | null | ##########################################################################
# If not stated otherwise in this file or this component's Licenses.txt
# file the following copyright and licenses apply:
#
# Copyright 2019 RDK Management
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
'''
<?xml version="1.0" encoding="UTF-8"?><xml>
<id/>
<version>1</version>
<name>TS_LOGAGENT_CM_DisableLog</name>
<primitive_test_id/>
<primitive_test_name>LogAgent_DoNothing</primitive_test_name>
<primitive_test_version>1</primitive_test_version>
<status>FREE</status>
<synopsis>Disable logging of CM module and check if the logs are getting updated in CMlog.txt.0</synopsis>
<groups_id/>
<execution_time>40</execution_time>
<long_duration>false</long_duration>
<advanced_script>false</advanced_script>
<remarks/>
<skip>false</skip>
<box_types>
<box_type>Broadband</box_type>
</box_types>
<rdk_versions>
<rdk_version>RDKB</rdk_version>
</rdk_versions>
<test_cases>
<test_case_id>TC_LOGAGENT_4</test_case_id>
<test_objective>Disable logging of CM module and check if the logs are getting updated in CMlog.txt.0</test_objective>
<test_type>Positive</test_type>
<test_setup>Broadband,RPI,Emulator</test_setup>
<pre_requisite>TDK Agent should be in running state.</pre_requisite>
<api_or_interface_used>None</api_or_interface_used>
<input_parameters>Device.LogAgent.X_RDKCENTRAL-COM_LoggerEnable : True
Device.LogAgent.X_RDKCENTRAL-COM_CM_LoggerEnable : False</input_parameters>
<automation_approch>1. Load LogAgent module
2. Get and save the values of log enable status
3. Set false to CM log enable status
4. Check if the CMAgent process is up and running in device
5. When the process is up, get and save the timestamp of CMlog.txt.0
6. Get the values of CM related parameters and check if the timestamp in log file is changed or not.
7. Revert the values
8. Unload module</automation_approch>
<except_output>The logs should not get updated in CMlog.txt.0</except_output>
<priority>High</priority>
<test_stub_interface>LOG_AGENT</test_stub_interface>
<test_script>TS_LOGAGENT_CM_DisableLog</test_script>
<skipped>No</skipped>
<release_version>M65</release_version>
<remarks>None</remarks>
</test_cases>
</xml>
'''
# use tdklib library,which provides a wrapper for tdk testcase script
import tdklib;
from time import sleep;
from tdkbVariables import *;
MAX_RETRY = 6;
#Test component to be tested
obj = tdklib.TDKScriptingLibrary("tdkbtr181","1");
sysobj = tdklib.TDKScriptingLibrary("sysutil","1");
#IP and Port of box, No need to change,
#This will be replaced with correspoing Box Ip and port while executing script
ip = <ipaddress>
port = <port>
obj.configureTestCase(ip,port,'TS_LOGAGENT_CM_DisableLog');
sysobj.configureTestCase(ip,port,'TS_LOGAGENT_CM_DisableLog');
#Get the result of connection with test component and DUT
loadmodulestatus=obj.getLoadModuleResult();
sysloadmodulestatus=sysobj.getLoadModuleResult();
if "SUCCESS" in loadmodulestatus.upper() and "SUCCESS" in sysloadmodulestatus.upper():
#Set the result status of execution
obj.setLoadModuleStatus("SUCCESS")
sysobj.setLoadModuleStatus("SUCCESS")
tdkTestObj = obj.createTestStep('TDKB_TR181Stub_Get');
tdkTestObj.addParameter("ParamName","Device.LogAgent.X_RDKCENTRAL-COM_LoggerEnable");
expectedresult="SUCCESS";
#Execute the test case in DUT
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
LogStatus = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 1: Get the enable status of Logging";
print "EXPECTED RESULT 1: Should get the enable status of logging";
print "ACTUAL RESULT 1: Enable status is %s" %LogStatus;
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
tdkTestObj.addParameter("ParamName","Device.LogAgent.X_RDKCENTRAL-COM_CM_LoggerEnable");
#Execute the test case in DUT
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
CMLogStatus = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 2: Get the enable status of CM Logging";
print "EXPECTED RESULT 2: Should get the enable status of CM logging";
print "ACTUAL RESULT 2: Enable status is %s" %CMLogStatus;
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
#Set true to both namespaces
tdkTestObj = obj.createTestStep("TDKB_TR181Stub_SetMultiple");
tdkTestObj.addParameter("paramList","Device.LogAgent.X_RDKCENTRAL-COM_LoggerEnable|true|bool|Device.LogAgent.X_RDKCENTRAL-COM_CM_LoggerEnable|false|bool");
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 3: Disable logging of CM module"
print "EXPECTED RESULT 3: Should disable CM logging"
print "ACTUAL RESULT 3: %s" %details;
print "TEST EXECUTION RESULT :SUCCESS";
#check whether the process is restarted automatically
query="sh %s/tdk_platform_utility.sh checkProcess CcspCMAgentSsp" %TDK_PATH
print "query:%s" %query
tdkTestObj = sysobj.createTestStep('ExecuteCmd');
tdkTestObj.addParameter("command", query)
expectedresult="SUCCESS";
print "Check for every 10 secs whether the process is up"
retryCount = 0;
while retryCount < MAX_RETRY:
tdkTestObj.executeTestCase("SUCCESS");
actualresult = tdkTestObj.getResult();
pid = tdkTestObj.getResultDetails().strip();
if expectedresult in actualresult and pid:
break;
else:
sleep(10);
retryCount = retryCount + 1;
if not pid:
print "Retry Again: Check for every 5 mins whether the process is up"
retryCount = 0;
while retryCount < MAX_RETRY:
tdkTestObj.executeTestCase("SUCCESS");
actualresult = tdkTestObj.getResult();
pid = tdkTestObj.getResultDetails().strip();
if expectedresult in actualresult and pid:
break;
else:
sleep(300);
retryCount = retryCount + 1;
sleep(30)
#Check the timestamp of CMlog.txt.0
tdkTestObj = sysobj.createTestStep('ExecuteCmd');
cmd = "tail -1 /rdklogs/logs/CMlog.txt.0"
tdkTestObj.addParameter("command", cmd);
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
lastLineofLog = tdkTestObj.getResultDetails().strip();
if lastLineofLog:
oldTimeStamp = lastLineofLog.split(" ")[0]
print "Current timestamp in CMlog.txt.0 is %s" %oldTimeStamp
#If log file is empty
else:
oldTimeStamp = " "
print "Currently the log file of CM is empty";
#Get the values of CM parameters
tdkTestObj = obj.createTestStep('TDKB_TR181Stub_Get');
tdkTestObj.addParameter("ParamName","Device.X_CISCO_COM_CableModem.UpstreamChannel.");
expectedresult="SUCCESS";
#Execute the test case in DUT
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails()
if expectedresult in actualresult:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 4: Get the value of CM parameters";
print "EXPECTED RESULT 4: Should get the value of CM parameters";
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
#Get and compare the new timestamp of CMlog.txt.0
tdkTestObj = sysobj.createTestStep('ExecuteCmd');
tdkTestObj.addParameter("command", cmd);
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
lastLineofLog = tdkTestObj.getResultDetails().strip();
if lastLineofLog:
newTimeStamp = lastLineofLog.split(" ")[0]
print "New timestamp in CMlog.txt.0 is %s" %newTimeStamp
else:
newTimeStamp = " ";
print "CMlog.txt.0 is empty"
if newTimeStamp == oldTimeStamp:
print "SUCCESS:The logs are not updated in the log file"
tdkTestObj.setResultStatus("SUCCESS");
else:
print "FAILURE:The logs are updated in log file even after disabling logging feature"
tdkTestObj.setResultStatus("FAILURE");
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 4: Get the value of CM parameters";
print "EXPECTED RESULT 4: Should get the value of CM parameters";
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
#Revert the log enable status
tdkTestObj = obj.createTestStep("TDKB_TR181Stub_SetMultiple");
tdkTestObj.addParameter("paramList","Device.LogAgent.X_RDKCENTRAL-COM_LoggerEnable|%s|bool|Device.LogAgent.X_RDKCENTRAL-COM_CM_LoggerEnable|%s|bool" %(LogStatus,CMLogStatus));
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP : Revert the logging status"
print "EXPECTED RESULT : Should revert the logging status"
print "ACTUAL RESULT : %s" %details;
print "TEST EXECUTION RESULT :SUCCESS";
else:
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP : Revert the logging status"
print "EXPECTED RESULT : Should revert the logging status"
print "ACTUAL RESULT : %s" %details;
print "TEST EXECUTION RESULT :FAILURE";
else:
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 3: disable logging of CM module"
print "EXPECTED RESULT 3: Should disable CM logging"
print "ACTUAL RESULT 3: %s" %details;
print "TEST EXECUTION RESULT :FAILURE";
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 2: Get the enable status of CM Logging";
print "EXPECTED RESULT 2: Should get the enable status of CM logging";
print "ACTUAL RESULT 2: Enable status is %s" %CMLogStatus;
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 1: Get the enable status of Logging";
print "EXPECTED RESULT 1: Should get the enable status of logging";
print "ACTUAL RESULT 1: Enable status is %s" %LogStatus;
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
obj.unloadModule("tdkbtr181");
sysobj.unloadModule("sysutil");
else:
print "Failed to load module";
obj.setLoadModuleStatus("FAILURE");
print "Module loading failed";
| 46.92446 | 191 | 0.637792 |
7940619aeb9c124198d0f9756ba3f9e76d368a6e | 193 | py | Python | 020-Transaction Atomicity/core/urls.py | karuvally/Django-ORM-Mastery-DJ003 | 5792d717185b231449d41bd4ef82d6b4367d4722 | [
"MIT"
] | 33 | 2021-06-08T21:49:24.000Z | 2022-03-06T22:31:59.000Z | 020-Transaction Atomicity/core/urls.py | WilliamOtieno/Django-ORM-Mastery-DJ003 | 0eca2d2408bfc1112b7092fbdce1c5f188a428d3 | [
"MIT"
] | null | null | null | 020-Transaction Atomicity/core/urls.py | WilliamOtieno/Django-ORM-Mastery-DJ003 | 0eca2d2408bfc1112b7092fbdce1c5f188a428d3 | [
"MIT"
] | 33 | 2021-06-09T12:43:17.000Z | 2022-03-29T08:16:12.000Z | from django.contrib import admin
from django.urls import path
from bank import views
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.process_payment, name='payment')
]
| 21.444444 | 51 | 0.720207 |
794061aed042e6ccc9ba6c6f7f48b97ef51c9585 | 5,008 | py | Python | tests_samoa/table_tools/plot_data.py | chameleon-hpc/chameleon-scripts | 67c22be72b436395384dd9d83ef05a3397a851ee | [
"BSD-3-Clause"
] | null | null | null | tests_samoa/table_tools/plot_data.py | chameleon-hpc/chameleon-scripts | 67c22be72b436395384dd9d83ef05a3397a851ee | [
"BSD-3-Clause"
] | null | null | null | tests_samoa/table_tools/plot_data.py | chameleon-hpc/chameleon-scripts | 67c22be72b436395384dd9d83ef05a3397a851ee | [
"BSD-3-Clause"
] | null | null | null | import sys
import getopt
import matplotlib.pyplot as plt
import matplotlib
import os
from filter_csv import *
markers_times = 'x'
markers_efficiency = 'o'
markers_imbalance = 'o'
colors = ['#1f77b4','#ff7f0e','#2ca02c']
def plot_speedup_curve(ax,x,base_time, times, label, marker):
speedups = [ base_time/t for t in times]
ax.plot(x,speedups,label=label,marker=marker,markerfacecolor="none")
def plot_times_curve(ax,x,times,label, marker,color):
print(x)
print(times)
ax.plot(x,times,label=label,marker=marker, color=color)
def plot_times_curve_error(ax,x,times,label, marker,color,yerr):
print (yerr)
ax.errorbar(x,times,label=label,marker=marker, color=color,yerr=yerr)
def plot_efficiency_curve(ax,x, base_time, times, label, marker):
speedups = [ base_time/t for t in times]
efficiencies = [ r[1]/r[0] for r in zip(x,speedups)]
print (efficiencies)
ax.plot(x,efficiencies,label=label,marker=marker,markerfacecolor="none")
def plot_imbalance_curve(ax, x, imbalance, label, marker,color):
ax.plot(x,imbalance,label=label,marker=marker,linestyle="dotted",markerfacecolor="none",color=color)
def main():
plot_times = False
plot_efficiency = False
plot_speedup = False
plot_imbalance = False
output_filename = "plot.pdf"
x_filter = "ranks"
x_label = "Nodes"
labels = []
color_offset = 0
try:
opts, arg = getopt.getopt(sys.argv[1:], "i:c:f:o:tsebl:x:","xlabel=")
except getopt.GetoptError as err:
print(str(err))
sys.exit(2)
file=""
for o, a in opts:
if o=="-i":
file=a
filter=[]
for o, a in opts:
if o=="-f":
filter=a.split(";")
if o=="-e":
plot_efficiency = True
if o=="-t":
plot_times = True
if o=="-l":
labels=a.split(";")
if o=="-s":
plot_speedup = True
if o=="-o":
output_filename = a
if o=="-b":
plot_imbalance = True
if o=="-x":
x_filter = a
if o=="--xlabel":
x_label = a
if labels==[]:
labels = filter
#x=[1,2,4,8,16,32,56,128,256,512]
#times with CCP
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xscale('log', basex=2)
ax.set_yscale('log', basey=2)
#ax.set
curr_ax = ax
cnt = 0
if plot_times:
print("plotting times")
for f in filter:
print (f)
dict=getSortedDict(file,f,"",x_filter)
print (dict)
times=extractCol(dict, "min_time")
print (times)
err=extractCol(dict, "std_dev")
x=extractCol(dict, x_filter)
curr_ax.set_xticks(x)
curr_ax.get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
curr_ax.get_yaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
plot_times_curve(curr_ax, x,times, labels[cnt]+" - wall clock time",markers_times, colors[color_offset+cnt])
#plot_times_curve_error(curr_ax, x,times, labels[cnt]+" - wall clock time", markers_times, colors[color_offset+cnt],err)
cnt = cnt+1
curr_ax.legend(frameon=False, loc='lower left',fontsize='small')
#curr_ax.set_ylim([0.0,1024.0])
curr_ax.set_xlabel(x_label)
curr_ax.set_ylabel("Wall Clock Execution Time [s]")
if plot_speedup:
if cnt>0:
cnt = 0
curr_ax = ax.twinx()
for f in filter:
#print (f)
dict=getSortedDict(file,f,"",x_filter)
print (dict)
times=extractCol(dict, "mean_time")
x=extractCol(dict, x_filter)
curr_ax.set_xticks(x)
curr_ax.get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
plot_speedup_curve(x,times[0], times, labels[cnt],'x')
cnt = cnt+1
curr_ax.legend(frameon=False)
if plot_efficiency:
if cnt>0:
cnt = 0
curr_ax = ax.twinx()
for f in filter:
#print (f)
dict=getSortedDict(file,f,"",x_filter)
print (dict)
times=extractCol(dict, "mean_time")
x=extractCol(dict, x_filter)
curr_ax.set_xticks(x)
curr_ax.get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
plot_efficiency_curve(curr_ax,x,times[0], times, labels[cnt]+" - parallel efficiency",markers_efficiency)
cnt = cnt+1
curr_ax.set_ylim([0,1.19])
curr_ax.set_ylabel("Parallel Efficiency")
curr_ax.legend(frameon=False, loc='upper right', fontsize='small')
if plot_imbalance:
if cnt>0:
cnt = 0
curr_ax = ax.twinx()
for f in filter:
#print (f)
dict=getSortedDict(file,f,"",x_filter)
print (dict)
imbalances=extractCol(dict, "avg_imbalance")
x=extractCol(dict, x_filter)
curr_ax.set_xticks(x)
curr_ax.get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
plot_imbalance_curve(curr_ax,x,imbalances, labels[cnt]+" - imbalance",markers_imbalance,colors[color_offset+cnt])
cnt = cnt+1
curr_ax.set_ylim([1.0,2.0])
curr_ax.set_yscale('linear')
curr_ax.set_xlabel(x_label)
curr_ax.set_ylabel("Imbalance (max_load/avg_load)")
curr_ax.legend(frameon=False, loc='upper right', fontsize='small')
plt.savefig(os.path.join(os.path.split(file)[0], output_filename), bbox_inches='tight')
if __name__=="__main__":
main()
| 27.516484 | 125 | 0.678514 |
794061dd8e2e069a2b1b07504dfb10633bf7e7fb | 458 | py | Python | plotly/validators/layout/annotation/_name.py | gnestor/plotly.py | a8ae062795ddbf9867b8578fe6d9e244948c15ff | [
"MIT"
] | 12 | 2020-04-18T18:10:22.000Z | 2021-12-06T10:11:15.000Z | plotly/validators/layout/annotation/_name.py | Vesauza/plotly.py | e53e626d59495d440341751f60aeff73ff365c28 | [
"MIT"
] | 27 | 2020-04-28T21:23:12.000Z | 2021-06-25T15:36:38.000Z | plotly/validators/layout/annotation/_name.py | Vesauza/plotly.py | e53e626d59495d440341751f60aeff73ff365c28 | [
"MIT"
] | 6 | 2020-04-18T23:07:08.000Z | 2021-11-18T07:53:06.000Z | import _plotly_utils.basevalidators
class NameValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name='name', parent_name='layout.annotation', **kwargs
):
super(NameValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'none'),
role=kwargs.pop('role', 'style'),
**kwargs
)
| 28.625 | 75 | 0.631004 |
794062c29ab6f35aee4a6a6cf2a457ad8f8e5047 | 739 | py | Python | LeetCode/randomPickInteger.py | johanaluna/DataScience_summary | a365728b81a38f31a28e97666252910a23732936 | [
"MIT"
] | null | null | null | LeetCode/randomPickInteger.py | johanaluna/DataScience_summary | a365728b81a38f31a28e97666252910a23732936 | [
"MIT"
] | null | null | null | LeetCode/randomPickInteger.py | johanaluna/DataScience_summary | a365728b81a38f31a28e97666252910a23732936 | [
"MIT"
] | null | null | null | import random
class Solution:
def __init__(self, w):
self.w = w
self.suma = sum(self.w[0][0])
self.sumAdded = [sum(self.w[0][0][0:x:1]) for x in range(1, len(self.w[0][0])+1)]
print(self.suma)
def pickIndex(self):
sln = []
sln.append('null')
i = 0
while i < len(self.w)-1:
rad = random.randint(1,self.suma)
for j in range(len(self.sumAdded)):
if rad <= self.sumAdded[j] :
index = j
i += 1
return j
w = [[[1]],[]]
w = [[[1,3]],[],[],[],[],[]]
# Your Solution object will be instantiated and called as such:
obj = Solution(w)
param_1 = obj.pickIndex()
print(param_1) | 25.482759 | 89 | 0.484438 |
7940637ce5ec1ee14232e7da8a7211b8b00b36e9 | 3,103 | py | Python | samples/client/petstore/python-experimental/petstore_api/models/dog_all_of.py | jaumard/openapi-generator | 239d68df3644cb0b60547184b7f0db17835940d3 | [
"Apache-2.0"
] | 1 | 2021-02-17T00:02:17.000Z | 2021-02-17T00:02:17.000Z | samples/client/petstore/python-experimental/petstore_api/models/dog_all_of.py | jaumard/openapi-generator | 239d68df3644cb0b60547184b7f0db17835940d3 | [
"Apache-2.0"
] | 8 | 2020-07-18T08:19:47.000Z | 2022-02-26T18:06:57.000Z | samples/client/petstore/python-experimental/petstore_api/models/dog_all_of.py | jaumard/openapi-generator | 239d68df3644cb0b60547184b7f0db17835940d3 | [
"Apache-2.0"
] | 2 | 2020-09-30T13:01:22.000Z | 2020-09-30T15:20:55.000Z | # coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class DogAllOf(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'breed': 'str',
}
attribute_map = {
'breed': 'breed', # noqa: E501
}
def __init__(self, breed=None): # noqa: E501
"""DogAllOf - a model defined in OpenAPI
Keyword Args:
breed (str): [optional] # noqa: E501
"""
self._breed = None
self.discriminator = None
if breed is not None:
self.breed = breed # noqa: E501
@property
def breed(self):
"""Gets the breed of this DogAllOf. # noqa: E501
:return: The breed of this DogAllOf. # noqa: E501
:rtype: str
"""
return self._breed
@breed.setter
def breed(
self,
breed):
"""Sets the breed of this DogAllOf.
:param breed: The breed of this DogAllOf. # noqa: E501
:type: str
"""
self._breed = (
breed)
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DogAllOf):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 25.434426 | 174 | 0.538511 |
7940638e1d7faf83eea4618a79cfc9a46b2aeba7 | 2,552 | py | Python | logger.py | keonlee9420/tacotron2_toolkit | fe0e19fdb00f2554d99b0f33e56d65a1d1956b86 | [
"BSD-3-Clause"
] | 4 | 2021-04-26T13:48:38.000Z | 2022-01-16T03:21:03.000Z | logger.py | keonlee9420/tacotron2_toolkit | fe0e19fdb00f2554d99b0f33e56d65a1d1956b86 | [
"BSD-3-Clause"
] | null | null | null | logger.py | keonlee9420/tacotron2_toolkit | fe0e19fdb00f2554d99b0f33e56d65a1d1956b86 | [
"BSD-3-Clause"
] | 1 | 2021-05-10T06:17:57.000Z | 2021-05-10T06:17:57.000Z | import random
import torch
from tensorboardX import SummaryWriter
from plotting_utils import plot_alignment_to_numpy, plot_spectrogram_to_numpy
from plotting_utils import plot_gate_outputs_to_numpy
class Tacotron2Logger(SummaryWriter):
def __init__(self, logdir, use_mmi, use_guided_attn_loss):
super(Tacotron2Logger, self).__init__(logdir)
self.use_mmi = use_mmi
self.use_guided_attn_loss = use_guided_attn_loss
def log_training(self, total_loss, taco_loss, attn_loss, mi_loss, grad_norm,
gaf, learning_rate, duration, iteration):
self.add_scalar("training.loss", total_loss, iteration)
if self.use_guided_attn_loss:
self.add_scalar("training.guided_attn_loss", attn_loss, iteration)
if self.use_mmi:
self.add_scalar("training.taco_loss", taco_loss, iteration)
self.add_scalar("training.mi_loss", mi_loss, iteration)
self.add_scalar("grad.gaf", gaf, iteration)
self.add_scalar("grad.norm", grad_norm, iteration)
self.add_scalar("learning.rate", learning_rate, iteration)
self.add_scalar("duration", duration, iteration)
def log_validation(self, reduced_loss, model, y, y_pred, iteration):
self.add_scalar("validation.loss", reduced_loss, iteration)
_, _, mel_outputs, gate_outputs, alignments = y_pred
mel_targets, gate_targets = y
# plot distribution of parameters
for tag, value in model.named_parameters():
tag = tag.replace('.', '/')
self.add_histogram(tag, value.data.cpu().numpy(), iteration)
# plot alignment, mel target and predicted, gate target and predicted
idx = random.randint(0, alignments.size(0) - 1)
self.add_image(
"alignment",
plot_alignment_to_numpy(alignments[idx].data.cpu().numpy().T),
iteration, dataformats='HWC')
self.add_image(
"mel_target",
plot_spectrogram_to_numpy(mel_targets[idx].data.cpu().numpy()),
iteration, dataformats='HWC')
self.add_image(
"mel_predicted",
plot_spectrogram_to_numpy(mel_outputs[idx].data.cpu().numpy()),
iteration, dataformats='HWC')
self.add_image(
"gate",
plot_gate_outputs_to_numpy(
gate_targets[idx].data.cpu().numpy(),
torch.sigmoid(gate_outputs[idx]).data.cpu().numpy()),
iteration, dataformats='HWC')
| 44.77193 | 82 | 0.647335 |
794063de7e3d8f1d2af409b7c068184b15a8026a | 933 | py | Python | setup.py | Alex-Au1/Youtube_Downloader | 3ad7b39a11154b9a9fd205ad75ce58120cb3d9be | [
"MIT"
] | null | null | null | setup.py | Alex-Au1/Youtube_Downloader | 3ad7b39a11154b9a9fd205ad75ce58120cb3d9be | [
"MIT"
] | null | null | null | setup.py | Alex-Au1/Youtube_Downloader | 3ad7b39a11154b9a9fd205ad75ce58120cb3d9be | [
"MIT"
] | null | null | null | import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="Youtube_Video_Audio_Downloader",
version="0.1.2",
author="Alex Au",
author_email="[email protected]",
description="A Simple GUI interface to help download videos and audio files from Youtube using Youtube-dl and FFMPEG",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Alex-Au1/Youtube_Downloader",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=[
"Pillow",
"pyperclip",
"youtube_dl",
"youtube-search-python",
"requests",
"validators"
],
python_requires='>=3.6',
)
| 30.096774 | 122 | 0.652733 |
7940640d339fc94448be179bb1d3a37e20fb0780 | 2,316 | py | Python | interpolate.py | fmu2/Wasserstein-BiGAN | db4f7f26da54f255cd38451721108f3a762ba3d4 | [
"MIT"
] | 27 | 2020-02-21T21:51:54.000Z | 2021-12-13T09:04:52.000Z | interpolate.py | fmu2/Wasserstein-BiGAN | db4f7f26da54f255cd38451721108f3a762ba3d4 | [
"MIT"
] | null | null | null | interpolate.py | fmu2/Wasserstein-BiGAN | db4f7f26da54f255cd38451721108f3a762ba3d4 | [
"MIT"
] | 3 | 2020-06-26T10:30:05.000Z | 2021-05-26T17:17:53.000Z | import argparse, torch
from torchvision import utils
from wali_celeba import create_WALI
NLAT = 100
IMAGE_SIZE = 64
NUM_CHANNELS = 3
def interpolate(generator, z0, z1, nintp=10, path='linear', filepath=None):
""" Interpolate in the latent space.
Args:
generator: Generator network that takes z as input.
z0: Where interpolation starts.
z1: Where interpolation ends.
nintp: Number of intermediate steps.
path: Trajectory of interpolation. Default: linear
filepath: Where to save the images.
"""
assert path in ['linear', 'spherical']
assert z1.size() == z1.size()
z0, z1 = z0.view(z0.size(0), NLAT, 1, 1), z1.view(z1.size(0), NLAT, 1, 1)
alphas = torch.linspace(0, 1, nintp)
imgs = []
if path == 'linear':
for alpha in alphas:
z = z0 * alpha + z1 * (1 - alpha)
img = generator(z).detach_() * 0.5 + 0.5
imgs.append(img.cpu())
elif path == 'spherical':
nz0, nz1 = z0 / z0.norm(dim=1, keepdim=True), z1 / z1.norm(dim=1, keepdim=True)
theta = ((nz0 * nz1).sum(dim=1, keepdim=True)).acos()
for alpha in alphas:
z = torch.sin((1 - alpha) * theta) / torch.sin(theta) * z0 \
+ torch.sin(alpha * theta) / torch.sin(theta) * z1
img = generator(z).detach_() * 0.5 + 0.5
imgs.append(img.cpu())
imgs = torch.cat(imgs, dim=1).view(-1, NUM_CHANNELS, IMAGE_SIZE, IMAGE_SIZE)
grid = utils.make_grid(imgs, nrow=nintp)
utils.save_image(grid, filepath)
print('Interpolated images saved.')
def main(args):
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
wali = create_WALI()
ckpt = torch.load(args.ckpt)
wali.load_state_dict(ckpt)
generator = wali.G.to(device)
z0 = torch.randn(args.n, NLAT, 1, 1).to(device)
z1 = torch.randn(args.n, NLAT, 1, 1).to(device)
interpolate(generator, z0, z1, nintp=10, path='linear', filepath=args.save_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Plot interpolations for WALI.')
parser.add_argument("--ckpt", type=str, help='Path to the saved model checkpoint', default=None)
parser.add_argument("--n", type=int, help="number of interpolated paths", default=4)
parser.add_argument("--save-path", type=str, help="where to save the interpolations", default=None)
args = parser.parse_args()
main(args) | 35.090909 | 101 | 0.668394 |
794064eb8445a456367b3cb76f3e5f2192abbc18 | 11,339 | py | Python | tests/ignite/engine/test_create_supervised.py | Devanshu24/ignite | 2f0ba3e65cfa36b43bc87b315733fd3f3585e430 | [
"BSD-3-Clause"
] | null | null | null | tests/ignite/engine/test_create_supervised.py | Devanshu24/ignite | 2f0ba3e65cfa36b43bc87b315733fd3f3585e430 | [
"BSD-3-Clause"
] | null | null | null | tests/ignite/engine/test_create_supervised.py | Devanshu24/ignite | 2f0ba3e65cfa36b43bc87b315733fd3f3585e430 | [
"BSD-3-Clause"
] | null | null | null | import os
from distutils.version import LooseVersion
from importlib.util import find_spec
from typing import Optional, Union
from unittest.mock import patch
import pytest
import torch
from pytest import approx
from torch.nn import Linear
from torch.nn.functional import mse_loss
from torch.optim import SGD
import ignite.distributed as idist
from ignite.engine import (
create_supervised_evaluator,
create_supervised_trainer,
supervised_evaluation_step,
supervised_evaluation_step_amp,
supervised_training_step_tpu,
)
from ignite.metrics import MeanSquaredError
def _test_create_supervised_trainer(
model_device: Optional[str] = None,
trainer_device: Optional[str] = None,
trace: bool = False,
amp_mode: str = None,
scaler: Union[bool, "torch.cuda.amp.GradScaler"] = False,
):
model = Linear(1, 1)
if model_device:
model.to(model_device)
model.weight.data.zero_()
model.bias.data.zero_()
optimizer = SGD(model.parameters(), 0.1)
if trace:
example_input = torch.randn(1, 1)
model = torch.jit.trace(model, example_input)
if amp_mode == "apex" and model_device == trainer_device == "cuda":
from apex import amp
model, optimizer = amp.initialize(model, optimizer, opt_level="O2")
trainer = create_supervised_trainer(
model,
optimizer,
mse_loss,
device=trainer_device,
output_transform=lambda x, y, y_pred, loss: (y_pred, loss.item()),
amp_mode=amp_mode,
scaler=scaler,
)
x = torch.tensor([[0.1], [0.2]])
y = torch.tensor([[0.3], [0.5]])
data = [(x, y)]
assert model.weight.data[0, 0].item() == approx(0.0)
assert model.bias.item() == approx(0.0)
if model_device == trainer_device or ((model_device == "cpu") ^ (trainer_device == "cpu")):
state = trainer.run(data)
assert state.output[-1] == approx(0.17), state.output[-1]
assert round(model.weight.data[0, 0].item(), 3) == approx(0.013), model.weight.item()
assert round(model.bias.item(), 3) == approx(0.08), model.bias.item()
if amp_mode == "amp":
assert state.output[0].dtype is torch.half
if scaler and isinstance(scaler, bool):
assert hasattr(state, "scaler")
else:
assert not hasattr(state, "scaler")
else:
if LooseVersion(torch.__version__) >= LooseVersion("1.7.0"):
# This is broken in 1.6.0 but will be probably fixed with 1.7.0
with pytest.raises(RuntimeError, match=r"is on CPU, but expected them to be on GPU"):
trainer.run(data)
def _test_create_supervised_evaluator(
model_device: Optional[str] = None,
evaluator_device: Optional[str] = None,
trace: bool = False,
amp_mode: str = None,
):
model = Linear(1, 1)
if model_device:
model.to(model_device)
model.weight.data.zero_()
model.bias.data.zero_()
if trace:
example_input = torch.randn(1, 1)
model = torch.jit.trace(model, example_input)
evaluator = create_supervised_evaluator(model, device=evaluator_device, amp_mode=amp_mode)
x = torch.tensor([[1.0], [2.0]])
y = torch.tensor([[3.0], [5.0]])
data = [(x, y)]
if model_device == evaluator_device or ((model_device == "cpu") ^ (evaluator_device == "cpu")):
state = evaluator.run(data)
y_pred, y = state.output
assert y_pred[0, 0].item() == approx(0.0)
assert y_pred[1, 0].item() == approx(0.0)
assert y[0, 0].item() == approx(3.0)
assert y[1, 0].item() == approx(5.0)
assert model.weight.data[0, 0].item() == approx(0.0)
assert model.bias.item() == approx(0.0)
else:
if LooseVersion(torch.__version__) >= LooseVersion("1.7.0"):
# This is broken in 1.6.0 but will be probably fixed with 1.7.0
with pytest.raises(RuntimeError, match=r"is on CPU, but expected them to be on GPU"):
evaluator.run(data)
def test_create_supervised_trainer():
_test_create_supervised_trainer()
def test_create_supervised_trainer_with_cpu():
_test_create_supervised_trainer(trainer_device="cpu")
def test_create_supervised_trainer_traced_with_cpu():
_test_create_supervised_trainer(trainer_device="cpu", trace=True)
@pytest.mark.skipif(find_spec("apex"), reason="Skip if APEX")
def test_create_supervised_trainer_apex_error():
with pytest.raises(
ModuleNotFoundError, match="Please install apex from https://github.com/nvidia/apex to use amp_mode='apex'."
):
_test_create_supervised_trainer(amp_mode="apex")
@pytest.fixture
def mock_torch_cuda_amp_module():
with patch.dict(
"sys.modules",
{"torch.cuda.amp": None, "torch.cuda.amp.grad_scaler": None, "torch.cuda.amp.autocast_mode": None},
):
yield torch
def test_create_supervised_trainer_amp_error(mock_torch_cuda_amp_module):
with pytest.raises(ImportError, match="Please install torch>=1.6.0 to use amp_mode='amp'."):
_test_create_supervised_trainer(amp_mode="amp")
with pytest.raises(ImportError, match="Please install torch>=1.6.0 to use scaler argument."):
_test_create_supervised_trainer(amp_mode="amp", scaler=True)
@pytest.mark.skipif(LooseVersion(torch.__version__) < LooseVersion("1.6.0"), reason="Skip if < 1.6.0")
def test_create_supervised_trainer_scaler_not_amp():
scaler = torch.cuda.amp.GradScaler(enabled=torch.cuda.is_available())
with pytest.raises(ValueError, match=f"scaler argument is {scaler}, but amp_mode is None."):
_test_create_supervised_trainer(amp_mode=None, scaler=scaler)
with pytest.raises(ValueError, match="scaler argument is True, but amp_mode is None."):
_test_create_supervised_trainer(amp_mode=None, scaler=True)
with pytest.raises(ValueError, match="scaler argument is True, but amp_mode is apex."):
_test_create_supervised_trainer(amp_mode="apex", scaler=True)
with pytest.raises(ValueError, match=f"scaler argument is {scaler}, but amp_mode is apex."):
_test_create_supervised_trainer(amp_mode="apex", scaler=scaler)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")
def test_create_supervised_trainer_on_cuda():
model_device = trainer_device = "cuda"
_test_create_supervised_trainer(model_device=model_device, trainer_device=trainer_device)
@pytest.mark.skipif(LooseVersion(torch.__version__) < LooseVersion("1.6.0"), reason="Skip if < 1.6.0")
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")
def test_create_supervised_trainer_on_cuda_amp():
model_device = trainer_device = "cuda"
_test_create_supervised_trainer(model_device=model_device, trainer_device=trainer_device, amp_mode="amp")
@pytest.mark.skipif(LooseVersion(torch.__version__) < LooseVersion("1.6.0"), reason="Skip if < 1.6.0")
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")
def test_create_supervised_trainer_on_cuda_amp_scaler():
model_device = trainer_device = "cuda"
_test_create_supervised_trainer(
model_device=model_device, trainer_device=trainer_device, amp_mode="amp", scaler=True
)
scaler = torch.cuda.amp.GradScaler(enabled=torch.cuda.is_available())
_test_create_supervised_trainer(
model_device=model_device, trainer_device=trainer_device, amp_mode="amp", scaler=scaler
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")
@pytest.mark.skipif(not find_spec("apex"), reason="Skip if no APEX")
def test_create_supervised_trainer_on_cuda_apex():
model_device = trainer_device = "cuda"
_test_create_supervised_trainer(model_device=model_device, trainer_device=trainer_device, amp_mode="apex")
@pytest.mark.skipif(idist.has_xla_support, reason="Skip if has PyTorch XLA package")
def test_supervised_training_step_tpu_no_xla():
with pytest.raises(ModuleNotFoundError, match="torch_xla cannot be imported, please install PyTorch XLA."):
supervised_training_step_tpu(model=None, optimizer=None, loss_fn=None)
@pytest.mark.skipif(idist.has_xla_support, reason="Skip if has PyTorch XLA package")
def test_create_supervised_trainer_on_tpu_no_xla():
model_device = "cpu"
trainer_device = "xla"
with pytest.raises(RuntimeError, match=r"In order to run on TPU, please install PyTorch XLA"):
_test_create_supervised_trainer(model_device=model_device, trainer_device=trainer_device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_create_supervised_trainer_on_tpu():
model_device = trainer_device = "xla"
_test_create_supervised_trainer(model_device=model_device, trainer_device=trainer_device)
@pytest.mark.tpu
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_create_supervised_trainer_on_tpu_amp():
model_device = trainer_device = "xla"
with pytest.raises(ValueError, match="amp_mode cannot be used with xla device."):
_test_create_supervised_trainer(model_device=model_device, trainer_device=trainer_device, amp_mode="amp")
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")
def test_create_supervised_trainer_on_cuda_with_model_on_cpu():
_test_create_supervised_trainer(trainer_device="cuda")
def test_create_supervised_evaluator():
_test_create_supervised_evaluator()
def test_create_supervised_evaluator_on_cpu():
_test_create_supervised_evaluator(evaluator_device="cpu")
def test_create_supervised_evaluator_traced_on_cpu():
_test_create_supervised_evaluator(evaluator_device="cpu", trace=True)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")
def test_create_supervised_evaluator_on_cuda():
model_device = evaluator_device = "cuda"
_test_create_supervised_evaluator(model_device=model_device, evaluator_device=evaluator_device)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")
def test_create_supervised_evaluator_on_cuda_with_model_on_cpu():
_test_create_supervised_evaluator(evaluator_device="cuda")
@pytest.mark.skipif(LooseVersion(torch.__version__) < LooseVersion("1.6.0"), reason="Skip if < 1.6.0")
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")
def test_create_supervised_evaluator_on_cuda_amp():
model_device = evaluator_device = "cuda"
_test_create_supervised_evaluator(model_device=model_device, evaluator_device=evaluator_device, amp_mode="amp")
def test_create_supervised_evaluator_amp_error(mock_torch_cuda_amp_module):
with pytest.raises(ImportError, match="Please install torch>=1.6.0 to use amp_mode='amp'."):
_test_create_supervised_evaluator(amp_mode="amp")
def test_create_supervised_evaluator_with_metrics():
model = Linear(1, 1)
model.weight.data.zero_()
model.bias.data.zero_()
evaluator = create_supervised_evaluator(model, metrics={"mse": MeanSquaredError()})
x = torch.tensor([[1.0], [2.0]])
y = torch.tensor([[3.0], [4.0]])
data = [(x, y)]
state = evaluator.run(data)
assert state.metrics["mse"] == 12.5
| 38.307432 | 116 | 0.723697 |
794065530fce3e787336c2f6b73bf1b196bc5b33 | 1,795 | py | Python | Encoder/encoder.py | mivallion/Encoder | 8887e741e7efdf03eebc04c01c1ae4f0e2e19809 | [
"MIT"
] | 6 | 2020-04-06T19:43:31.000Z | 2022-03-16T13:15:37.000Z | Encoder/encoder.py | mivallion/Encoder | 8887e741e7efdf03eebc04c01c1ae4f0e2e19809 | [
"MIT"
] | 2 | 2020-05-14T18:04:53.000Z | 2022-02-12T06:22:56.000Z | Encoder/encoder.py | mivallion/Encoder | 8887e741e7efdf03eebc04c01c1ae4f0e2e19809 | [
"MIT"
] | 3 | 2020-11-08T04:14:43.000Z | 2021-09-04T16:48:54.000Z | """
Encoder library for Raspberry Pi for measuring quadrature encoded signals.
created by Mivallion <[email protected]>
Version 1.0 - 01 april 2020 - inital release
"""
import RPi.GPIO as GPIO
class Encoder(object):
"""
Encoder class allows to work with rotary encoder
which connected via two pin A and B.
Works only on interrupts because all RPi pins allow that.
This library is a simple port of the Arduino Encoder library
(https://github.com/PaulStoffregen/Encoder)
"""
def __init__(self, A, B):
GPIO.setmode(GPIO.BCM)
GPIO.setup(A, GPIO.IN)
GPIO.setup(B, GPIO.IN)
self.A = A
self.B = B
self.pos = 0
self.state = 0
if GPIO.input(A):
self.state |= 1
if GPIO.input(B):
self.state |= 2
GPIO.add_event_detect(A, GPIO.BOTH, callback=self.__update)
GPIO.add_event_detect(B, GPIO.BOTH, callback=self.__update)
"""
update() calling every time when value on A or B pins changes.
It updates the current position based on previous and current states
of the rotary encoder.
"""
def __update(self, channel):
state = self.state & 3
if GPIO.input(self.A):
state |= 4
if GPIO.input(self.B):
state |= 8
self.state = state >> 2
if state == 1 or state == 7 or state == 8 or state == 14:
self.pos += 1
elif state == 2 or state == 4 or state == 11 or state == 13:
self.pos -= 1
elif state == 3 or state == 12:
self.pos += 2
elif state == 6 or state == 9:
self.pos -= 2
"""
read() simply returns the current position of the rotary encoder.
"""
def read(self):
return self.pos
| 29.42623 | 74 | 0.581616 |
794065ca09a2bcb8887f0109f03afec58298f31f | 21,006 | py | Python | tests/components/google/test_calendar.py | liangleslie/core | cc807b4d597daaaadc92df4a93c6e30da4f570c6 | [
"Apache-2.0"
] | 1,635 | 2015-01-01T14:59:18.000Z | 2016-04-13T02:36:16.000Z | tests/components/google/test_calendar.py | liangleslie/core | cc807b4d597daaaadc92df4a93c6e30da4f570c6 | [
"Apache-2.0"
] | 1,463 | 2015-01-06T06:18:07.000Z | 2016-04-12T22:30:37.000Z | tests/components/google/test_calendar.py | liangleslie/core | cc807b4d597daaaadc92df4a93c6e30da4f570c6 | [
"Apache-2.0"
] | 659 | 2015-01-05T14:02:23.000Z | 2016-04-12T23:39:31.000Z | """The tests for the google calendar platform."""
from __future__ import annotations
import copy
import datetime
from http import HTTPStatus
from typing import Any
from unittest.mock import patch
import urllib
from aiohttp.client_exceptions import ClientError
from gcal_sync.auth import API_BASE_URL
import pytest
from homeassistant.const import STATE_OFF, STATE_ON
from homeassistant.helpers.template import DATE_STR_FORMAT
import homeassistant.util.dt as dt_util
from .conftest import CALENDAR_ID, TEST_YAML_ENTITY, TEST_YAML_ENTITY_NAME
from tests.common import async_fire_time_changed
from tests.test_util.aiohttp import AiohttpClientMockResponse
TEST_ENTITY = TEST_YAML_ENTITY
TEST_ENTITY_NAME = TEST_YAML_ENTITY_NAME
TEST_EVENT = {
"summary": "Test All Day Event",
"start": {},
"end": {},
"location": "Test Cases",
"description": "test event",
"kind": "calendar#event",
"created": "2016-06-23T16:37:57.000Z",
"transparency": "transparent",
"updated": "2016-06-24T01:57:21.045Z",
"reminders": {"useDefault": True},
"organizer": {
"email": "[email protected]",
"displayName": "Organizer Name",
"self": True,
},
"sequence": 0,
"creator": {
"email": "[email protected]",
"displayName": "Organizer Name",
"self": True,
},
"id": "_c8rinwq863h45qnucyoi43ny8",
"etag": '"2933466882090000"',
"htmlLink": "https://www.google.com/calendar/event?eid=*******",
"iCalUID": "[email protected]",
"status": "confirmed",
}
@pytest.fixture(autouse=True)
def mock_test_setup(
hass,
mock_calendars_yaml,
test_api_calendar,
mock_calendars_list,
config_entry,
):
"""Fixture that pulls in the default fixtures for tests in this file."""
mock_calendars_list({"items": [test_api_calendar]})
config_entry.add_to_hass(hass)
return
def upcoming() -> dict[str, Any]:
"""Create a test event with an arbitrary start/end time fetched from the api url."""
now = dt_util.now()
return {
"start": {"dateTime": now.isoformat()},
"end": {"dateTime": (now + datetime.timedelta(minutes=5)).isoformat()},
}
def upcoming_date() -> dict[str, Any]:
"""Create a test event with an arbitrary start/end date fetched from the api url."""
now = dt_util.now()
return {
"start": {"date": now.date().isoformat()},
"end": {"date": now.date().isoformat()},
}
def upcoming_event_url() -> str:
"""Return a calendar API to return events created by upcoming()."""
now = dt_util.now()
start = (now - datetime.timedelta(minutes=60)).isoformat()
end = (now + datetime.timedelta(minutes=60)).isoformat()
return f"/api/calendars/{TEST_ENTITY}?start={urllib.parse.quote(start)}&end={urllib.parse.quote(end)}"
async def test_all_day_event(
hass, mock_events_list_items, mock_token_read, component_setup
):
"""Test that we can create an event trigger on device."""
week_from_today = dt_util.now().date() + datetime.timedelta(days=7)
end_event = week_from_today + datetime.timedelta(days=1)
event = {
**TEST_EVENT,
"start": {"date": week_from_today.isoformat()},
"end": {"date": end_event.isoformat()},
}
mock_events_list_items([event])
assert await component_setup()
state = hass.states.get(TEST_ENTITY)
assert state.name == TEST_ENTITY_NAME
assert state.state == STATE_OFF
assert dict(state.attributes) == {
"friendly_name": TEST_ENTITY_NAME,
"message": event["summary"],
"all_day": True,
"offset_reached": False,
"start_time": week_from_today.strftime(DATE_STR_FORMAT),
"end_time": end_event.strftime(DATE_STR_FORMAT),
"location": event["location"],
"description": event["description"],
}
async def test_future_event(hass, mock_events_list_items, component_setup):
"""Test that we can create an event trigger on device."""
one_hour_from_now = dt_util.now() + datetime.timedelta(minutes=30)
end_event = one_hour_from_now + datetime.timedelta(minutes=60)
event = {
**TEST_EVENT,
"start": {"dateTime": one_hour_from_now.isoformat()},
"end": {"dateTime": end_event.isoformat()},
}
mock_events_list_items([event])
assert await component_setup()
state = hass.states.get(TEST_ENTITY)
assert state.name == TEST_ENTITY_NAME
assert state.state == STATE_OFF
assert dict(state.attributes) == {
"friendly_name": TEST_ENTITY_NAME,
"message": event["summary"],
"all_day": False,
"offset_reached": False,
"start_time": one_hour_from_now.strftime(DATE_STR_FORMAT),
"end_time": end_event.strftime(DATE_STR_FORMAT),
"location": event["location"],
"description": event["description"],
}
async def test_in_progress_event(hass, mock_events_list_items, component_setup):
"""Test that we can create an event trigger on device."""
middle_of_event = dt_util.now() - datetime.timedelta(minutes=30)
end_event = middle_of_event + datetime.timedelta(minutes=60)
event = {
**TEST_EVENT,
"start": {"dateTime": middle_of_event.isoformat()},
"end": {"dateTime": end_event.isoformat()},
}
mock_events_list_items([event])
assert await component_setup()
state = hass.states.get(TEST_ENTITY)
assert state.name == TEST_ENTITY_NAME
assert state.state == STATE_ON
assert dict(state.attributes) == {
"friendly_name": TEST_ENTITY_NAME,
"message": event["summary"],
"all_day": False,
"offset_reached": False,
"start_time": middle_of_event.strftime(DATE_STR_FORMAT),
"end_time": end_event.strftime(DATE_STR_FORMAT),
"location": event["location"],
"description": event["description"],
}
async def test_offset_in_progress_event(hass, mock_events_list_items, component_setup):
"""Test that we can create an event trigger on device."""
middle_of_event = dt_util.now() + datetime.timedelta(minutes=14)
end_event = middle_of_event + datetime.timedelta(minutes=60)
event_summary = "Test Event in Progress"
event = {
**TEST_EVENT,
"start": {"dateTime": middle_of_event.isoformat()},
"end": {"dateTime": end_event.isoformat()},
"summary": f"{event_summary} !!-15",
}
mock_events_list_items([event])
assert await component_setup()
state = hass.states.get(TEST_ENTITY)
assert state.name == TEST_ENTITY_NAME
assert state.state == STATE_OFF
assert dict(state.attributes) == {
"friendly_name": TEST_ENTITY_NAME,
"message": event_summary,
"all_day": False,
"offset_reached": True,
"start_time": middle_of_event.strftime(DATE_STR_FORMAT),
"end_time": end_event.strftime(DATE_STR_FORMAT),
"location": event["location"],
"description": event["description"],
}
async def test_all_day_offset_in_progress_event(
hass, mock_events_list_items, component_setup
):
"""Test that we can create an event trigger on device."""
tomorrow = dt_util.now().date() + datetime.timedelta(days=1)
end_event = tomorrow + datetime.timedelta(days=1)
event_summary = "Test All Day Event Offset In Progress"
event = {
**TEST_EVENT,
"start": {"date": tomorrow.isoformat()},
"end": {"date": end_event.isoformat()},
"summary": f"{event_summary} !!-25:0",
}
mock_events_list_items([event])
assert await component_setup()
state = hass.states.get(TEST_ENTITY)
assert state.name == TEST_ENTITY_NAME
assert state.state == STATE_OFF
assert dict(state.attributes) == {
"friendly_name": TEST_ENTITY_NAME,
"message": event_summary,
"all_day": True,
"offset_reached": True,
"start_time": tomorrow.strftime(DATE_STR_FORMAT),
"end_time": end_event.strftime(DATE_STR_FORMAT),
"location": event["location"],
"description": event["description"],
}
async def test_all_day_offset_event(hass, mock_events_list_items, component_setup):
"""Test that we can create an event trigger on device."""
now = dt_util.now()
day_after_tomorrow = now.date() + datetime.timedelta(days=2)
end_event = day_after_tomorrow + datetime.timedelta(days=1)
offset_hours = 1 + now.hour
event_summary = "Test All Day Event Offset"
event = {
**TEST_EVENT,
"start": {"date": day_after_tomorrow.isoformat()},
"end": {"date": end_event.isoformat()},
"summary": f"{event_summary} !!-{offset_hours}:0",
}
mock_events_list_items([event])
assert await component_setup()
state = hass.states.get(TEST_ENTITY)
assert state.name == TEST_ENTITY_NAME
assert state.state == STATE_OFF
assert dict(state.attributes) == {
"friendly_name": TEST_ENTITY_NAME,
"message": event_summary,
"all_day": True,
"offset_reached": False,
"start_time": day_after_tomorrow.strftime(DATE_STR_FORMAT),
"end_time": end_event.strftime(DATE_STR_FORMAT),
"location": event["location"],
"description": event["description"],
}
async def test_missing_summary(hass, mock_events_list_items, component_setup):
"""Test that we can create an event trigger on device."""
start_event = dt_util.now() + datetime.timedelta(minutes=14)
end_event = start_event + datetime.timedelta(minutes=60)
event = {
**TEST_EVENT,
"start": {"dateTime": start_event.isoformat()},
"end": {"dateTime": end_event.isoformat()},
}
del event["summary"]
mock_events_list_items([event])
assert await component_setup()
state = hass.states.get(TEST_ENTITY)
assert state.name == TEST_ENTITY_NAME
assert state.state == STATE_OFF
assert dict(state.attributes) == {
"friendly_name": TEST_ENTITY_NAME,
"message": "",
"all_day": False,
"offset_reached": False,
"start_time": start_event.strftime(DATE_STR_FORMAT),
"end_time": end_event.strftime(DATE_STR_FORMAT),
"location": event["location"],
"description": event["description"],
}
async def test_update_error(
hass,
component_setup,
mock_calendars_list,
mock_events_list,
test_api_calendar,
aioclient_mock,
):
"""Test that the calendar update handles a server error."""
now = dt_util.now()
mock_calendars_list({"items": [test_api_calendar]})
mock_events_list(
{
"items": [
{
**TEST_EVENT,
"start": {
"dateTime": (now + datetime.timedelta(minutes=-30)).isoformat()
},
"end": {
"dateTime": (now + datetime.timedelta(minutes=30)).isoformat()
},
}
]
}
)
assert await component_setup()
state = hass.states.get(TEST_ENTITY)
assert state.name == TEST_ENTITY_NAME
assert state.state == "on"
# Advance time to avoid throttling
now += datetime.timedelta(minutes=30)
aioclient_mock.clear_requests()
mock_events_list({}, exc=ClientError())
with patch("homeassistant.util.utcnow", return_value=now):
async_fire_time_changed(hass, now)
await hass.async_block_till_done()
# No change
state = hass.states.get(TEST_ENTITY)
assert state.name == TEST_ENTITY_NAME
assert state.state == "on"
# Advance time beyond update/throttle point
now += datetime.timedelta(minutes=30)
aioclient_mock.clear_requests()
mock_events_list(
{
"items": [
{
**TEST_EVENT,
"start": {
"dateTime": (now + datetime.timedelta(minutes=30)).isoformat()
},
"end": {
"dateTime": (now + datetime.timedelta(minutes=60)).isoformat()
},
}
]
}
)
with patch("homeassistant.util.utcnow", return_value=now):
async_fire_time_changed(hass, now)
await hass.async_block_till_done()
# State updated
state = hass.states.get(TEST_ENTITY)
assert state.name == TEST_ENTITY_NAME
assert state.state == "off"
async def test_calendars_api(
hass, hass_client, component_setup, mock_events_list_items
):
"""Test the Rest API returns the calendar."""
mock_events_list_items([])
assert await component_setup()
client = await hass_client()
response = await client.get("/api/calendars")
assert response.status == HTTPStatus.OK
data = await response.json()
assert data == [
{
"entity_id": TEST_ENTITY,
"name": TEST_ENTITY_NAME,
}
]
async def test_http_event_api_failure(
hass,
hass_client,
component_setup,
mock_calendars_list,
mock_events_list,
aioclient_mock,
):
"""Test the Rest API response during a calendar failure."""
mock_events_list({})
assert await component_setup()
client = await hass_client()
aioclient_mock.clear_requests()
mock_events_list({}, exc=ClientError())
response = await client.get(upcoming_event_url())
assert response.status == HTTPStatus.OK
# A failure to talk to the server results in an empty list of events
events = await response.json()
assert events == []
@pytest.mark.freeze_time("2022-03-27 12:05:00+00:00")
async def test_http_api_event(
hass, hass_client, mock_events_list_items, component_setup
):
"""Test querying the API and fetching events from the server."""
hass.config.set_time_zone("Asia/Baghdad")
event = {
**TEST_EVENT,
**upcoming(),
}
mock_events_list_items([event])
assert await component_setup()
client = await hass_client()
response = await client.get(upcoming_event_url())
assert response.status == HTTPStatus.OK
events = await response.json()
assert len(events) == 1
assert {k: events[0].get(k) for k in ["summary", "start", "end"]} == {
"summary": TEST_EVENT["summary"],
"start": {"dateTime": "2022-03-27T15:05:00+03:00"},
"end": {"dateTime": "2022-03-27T15:10:00+03:00"},
}
@pytest.mark.freeze_time("2022-03-27 12:05:00+00:00")
async def test_http_api_all_day_event(
hass, hass_client, mock_events_list_items, component_setup
):
"""Test querying the API and fetching events from the server."""
event = {
**TEST_EVENT,
**upcoming_date(),
}
mock_events_list_items([event])
assert await component_setup()
client = await hass_client()
response = await client.get(upcoming_event_url())
assert response.status == HTTPStatus.OK
events = await response.json()
assert len(events) == 1
assert {k: events[0].get(k) for k in ["summary", "start", "end"]} == {
"summary": TEST_EVENT["summary"],
"start": {"date": "2022-03-27"},
"end": {"date": "2022-03-27"},
}
@pytest.mark.freeze_time("2022-03-27 12:05:00+00:00")
async def test_http_api_event_paging(
hass, hass_client, aioclient_mock, component_setup
):
"""Test paging through results from the server."""
hass.config.set_time_zone("Asia/Baghdad")
responses = [
{
"nextPageToken": "page-token",
"items": [
{
**TEST_EVENT,
"summary": "event 1",
**upcoming(),
}
],
},
{
"items": [
{
**TEST_EVENT,
"summary": "event 2",
**upcoming(),
}
],
},
]
def next_response(response_list):
results = copy.copy(response_list)
async def get(method, url, data):
return AiohttpClientMockResponse(method, url, json=results.pop(0))
return get
# Setup response for initial entity load
aioclient_mock.get(
f"{API_BASE_URL}/calendars/{CALENDAR_ID}/events",
side_effect=next_response(responses),
)
assert await component_setup()
# Setup response for API request
aioclient_mock.clear_requests()
aioclient_mock.get(
f"{API_BASE_URL}/calendars/{CALENDAR_ID}/events",
side_effect=next_response(responses),
)
client = await hass_client()
response = await client.get(upcoming_event_url())
assert response.status == HTTPStatus.OK
events = await response.json()
assert len(events) == 2
assert events[0]["summary"] == "event 1"
assert events[1]["summary"] == "event 2"
@pytest.mark.parametrize(
"calendars_config_ignore_availability,transparency,expect_visible_event",
[
# Look at visibility to determine if entity is created
(False, "opaque", True),
(False, "transparent", False),
# Ignoring availability and always show the entity
(True, "opaque", True),
(True, "transparency", True),
# Default to ignore availability
(None, "opaque", True),
(None, "transparency", True),
],
)
async def test_opaque_event(
hass,
hass_client,
mock_events_list_items,
component_setup,
transparency,
expect_visible_event,
):
"""Test querying the API and fetching events from the server."""
event = {
**TEST_EVENT,
**upcoming(),
"transparency": transparency,
}
mock_events_list_items([event])
assert await component_setup()
client = await hass_client()
response = await client.get(upcoming_event_url())
assert response.status == HTTPStatus.OK
events = await response.json()
assert (len(events) > 0) == expect_visible_event
@pytest.mark.parametrize("mock_test_setup", [None])
async def test_scan_calendar_error(
hass,
component_setup,
test_api_calendar,
mock_calendars_list,
config_entry,
):
"""Test that the calendar update handles a server error."""
config_entry.add_to_hass(hass)
mock_calendars_list({}, exc=ClientError())
assert await component_setup()
assert not hass.states.get(TEST_ENTITY)
async def test_future_event_update_behavior(
hass, mock_events_list_items, component_setup
):
"""Test an future event that becomes active."""
now = dt_util.now()
now_utc = dt_util.utcnow()
one_hour_from_now = now + datetime.timedelta(minutes=60)
end_event = one_hour_from_now + datetime.timedelta(minutes=90)
event = {
**TEST_EVENT,
"start": {"dateTime": one_hour_from_now.isoformat()},
"end": {"dateTime": end_event.isoformat()},
}
mock_events_list_items([event])
assert await component_setup()
# Event has not started yet
state = hass.states.get(TEST_ENTITY)
assert state.name == TEST_ENTITY_NAME
assert state.state == STATE_OFF
# Advance time until event has started
now += datetime.timedelta(minutes=60)
now_utc += datetime.timedelta(minutes=30)
with patch("homeassistant.util.dt.utcnow", return_value=now_utc), patch(
"homeassistant.util.dt.now", return_value=now
):
async_fire_time_changed(hass, now)
await hass.async_block_till_done()
# Event has started
state = hass.states.get(TEST_ENTITY)
assert state.state == STATE_ON
async def test_future_event_offset_update_behavior(
hass, mock_events_list_items, component_setup
):
"""Test an future event that becomes active."""
now = dt_util.now()
now_utc = dt_util.utcnow()
one_hour_from_now = now + datetime.timedelta(minutes=60)
end_event = one_hour_from_now + datetime.timedelta(minutes=90)
event_summary = "Test Event in Progress"
event = {
**TEST_EVENT,
"start": {"dateTime": one_hour_from_now.isoformat()},
"end": {"dateTime": end_event.isoformat()},
"summary": f"{event_summary} !!-15",
}
mock_events_list_items([event])
assert await component_setup()
# Event has not started yet
state = hass.states.get(TEST_ENTITY)
assert state.name == TEST_ENTITY_NAME
assert state.state == STATE_OFF
assert not state.attributes["offset_reached"]
# Advance time until event has started
now += datetime.timedelta(minutes=45)
now_utc += datetime.timedelta(minutes=45)
with patch("homeassistant.util.dt.utcnow", return_value=now_utc), patch(
"homeassistant.util.dt.now", return_value=now
):
async_fire_time_changed(hass, now)
await hass.async_block_till_done()
# Event has not started, but the offset was reached
state = hass.states.get(TEST_ENTITY)
assert state.state == STATE_OFF
assert state.attributes["offset_reached"]
| 31.683258 | 106 | 0.643054 |
794065e41b382fbb3f0d9667ca0b2741ce5c963c | 5,786 | py | Python | google/ads/google_ads/v6/proto/services/currency_constant_service_pb2.py | arammaliachi/google-ads-python | a4fe89567bd43eb784410523a6306b5d1dd9ee67 | [
"Apache-2.0"
] | null | null | null | google/ads/google_ads/v6/proto/services/currency_constant_service_pb2.py | arammaliachi/google-ads-python | a4fe89567bd43eb784410523a6306b5d1dd9ee67 | [
"Apache-2.0"
] | null | null | null | google/ads/google_ads/v6/proto/services/currency_constant_service_pb2.py | arammaliachi/google-ads-python | a4fe89567bd43eb784410523a6306b5d1dd9ee67 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads/v6/services/currency_constant_service.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.ads.google_ads.v6.proto.resources import currency_constant_pb2 as google_dot_ads_dot_googleads_dot_v6_dot_resources_dot_currency__constant__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.api import client_pb2 as google_dot_api_dot_client__pb2
from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2
from google.api import resource_pb2 as google_dot_api_dot_resource__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads/v6/services/currency_constant_service.proto',
package='google.ads.googleads.v6.services',
syntax='proto3',
serialized_options=b'\n$com.google.ads.googleads.v6.servicesB\034CurrencyConstantServiceProtoP\001ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v6/services;services\242\002\003GAA\252\002 Google.Ads.GoogleAds.V6.Services\312\002 Google\\Ads\\GoogleAds\\V6\\Services\352\002$Google::Ads::GoogleAds::V6::Services',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n@google/ads/googleads/v6/services/currency_constant_service.proto\x12 google.ads.googleads.v6.services\x1a\x39google/ads/googleads/v6/resources/currency_constant.proto\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\"f\n\x1aGetCurrencyConstantRequest\x12H\n\rresource_name\x18\x01 \x01(\tB1\xe0\x41\x02\xfa\x41+\n)googleads.googleapis.com/CurrencyConstant2\xac\x02\n\x17\x43urrencyConstantService\x12\xc9\x01\n\x13GetCurrencyConstant\x12<.google.ads.googleads.v6.services.GetCurrencyConstantRequest\x1a\x33.google.ads.googleads.v6.resources.CurrencyConstant\"?\x82\xd3\xe4\x93\x02)\x12\'/v6/{resource_name=currencyConstants/*}\xda\x41\rresource_name\x1a\x45\xca\x41\x18googleads.googleapis.com\xd2\x41\'https://www.googleapis.com/auth/adwordsB\x83\x02\n$com.google.ads.googleads.v6.servicesB\x1c\x43urrencyConstantServiceProtoP\x01ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v6/services;services\xa2\x02\x03GAA\xaa\x02 Google.Ads.GoogleAds.V6.Services\xca\x02 Google\\Ads\\GoogleAds\\V6\\Services\xea\x02$Google::Ads::GoogleAds::V6::Servicesb\x06proto3'
,
dependencies=[google_dot_ads_dot_googleads_dot_v6_dot_resources_dot_currency__constant__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_api_dot_client__pb2.DESCRIPTOR,google_dot_api_dot_field__behavior__pb2.DESCRIPTOR,google_dot_api_dot_resource__pb2.DESCRIPTOR,])
_GETCURRENCYCONSTANTREQUEST = _descriptor.Descriptor(
name='GetCurrencyConstantRequest',
full_name='google.ads.googleads.v6.services.GetCurrencyConstantRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v6.services.GetCurrencyConstantRequest.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\002\372A+\n)googleads.googleapis.com/CurrencyConstant', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=276,
serialized_end=378,
)
DESCRIPTOR.message_types_by_name['GetCurrencyConstantRequest'] = _GETCURRENCYCONSTANTREQUEST
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GetCurrencyConstantRequest = _reflection.GeneratedProtocolMessageType('GetCurrencyConstantRequest', (_message.Message,), {
'DESCRIPTOR' : _GETCURRENCYCONSTANTREQUEST,
'__module__' : 'google.ads.googleads.v6.services.currency_constant_service_pb2'
# @@protoc_insertion_point(class_scope:google.ads.googleads.v6.services.GetCurrencyConstantRequest)
})
_sym_db.RegisterMessage(GetCurrencyConstantRequest)
DESCRIPTOR._options = None
_GETCURRENCYCONSTANTREQUEST.fields_by_name['resource_name']._options = None
_CURRENCYCONSTANTSERVICE = _descriptor.ServiceDescriptor(
name='CurrencyConstantService',
full_name='google.ads.googleads.v6.services.CurrencyConstantService',
file=DESCRIPTOR,
index=0,
serialized_options=b'\312A\030googleads.googleapis.com\322A\'https://www.googleapis.com/auth/adwords',
create_key=_descriptor._internal_create_key,
serialized_start=381,
serialized_end=681,
methods=[
_descriptor.MethodDescriptor(
name='GetCurrencyConstant',
full_name='google.ads.googleads.v6.services.CurrencyConstantService.GetCurrencyConstant',
index=0,
containing_service=None,
input_type=_GETCURRENCYCONSTANTREQUEST,
output_type=google_dot_ads_dot_googleads_dot_v6_dot_resources_dot_currency__constant__pb2._CURRENCYCONSTANT,
serialized_options=b'\202\323\344\223\002)\022\'/v6/{resource_name=currencyConstants/*}\332A\rresource_name',
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_CURRENCYCONSTANTSERVICE)
DESCRIPTOR.services_by_name['CurrencyConstantService'] = _CURRENCYCONSTANTSERVICE
# @@protoc_insertion_point(module_scope)
| 55.104762 | 1,174 | 0.826132 |
794066193012be50d5678d01b5b1258cd575c17f | 3,879 | py | Python | data_ingest/utils.py | 18F/data-federation-ingest | a896ef2da1faf3966f018366b26a338bb66cc717 | [
"CC0-1.0"
] | 18 | 2019-07-26T13:43:01.000Z | 2022-01-15T14:57:52.000Z | data_ingest/utils.py | 18F/data-federation-ingest | a896ef2da1faf3966f018366b26a338bb66cc717 | [
"CC0-1.0"
] | 96 | 2019-06-14T18:30:54.000Z | 2021-08-03T09:25:02.000Z | data_ingest/utils.py | 18F/data-federation-ingest | a896ef2da1faf3966f018366b26a338bb66cc717 | [
"CC0-1.0"
] | 3 | 2020-01-23T04:48:18.000Z | 2021-01-12T09:31:20.000Z | import csv
import json
import io
import logging
from collections import OrderedDict
from django.utils.module_loading import import_string
from .ingest_settings import UPLOAD_SETTINGS
logger = logging.getLogger('ReVAL')
def get_schema_headers():
ordered_header = []
good_table_validator = 'data_ingest.ingestors.GoodtablesValidator'
schema = [loc for loc, val_type in UPLOAD_SETTINGS['VALIDATORS'].items()
if val_type == good_table_validator and loc is not None]
if schema:
validator = import_string(good_table_validator)(name=good_table_validator, filename=schema[0])
contents = validator.get_validator_contents()
ordered_header = [field['name'] for field in contents.get('fields', [])]
return ordered_header
def get_ordered_headers(headers):
if isinstance(UPLOAD_SETTINGS['STREAM_ARGS']['headers'], list):
return UPLOAD_SETTINGS['STREAM_ARGS']['headers']
correct_headers = get_schema_headers()
if correct_headers == headers:
return headers
working_headers = headers.copy()
o_headers = []
for h in correct_headers:
if h in working_headers:
o_headers.append(h)
working_headers.remove(h)
# add back header that didn't exist in the schema but in headers
o_headers.extend(working_headers)
return o_headers
def to_tabular(incoming):
"""Coerce incoming json to tabular structure for tabulator
[
[All observed keys(headers)],
[values],
[values],
...
]
First list contains all observed `columns`, following lists
are data rows containing data values in the order of headers defined
in the first row.
"""
if incoming.get('source') is None:
return incoming
data = incoming.copy()
# if we are going through the API, the JSONDecoder already
# converts the source JSON to a python dictionary for us.
jsonbuffer = None
try:
jsonbuffer = json.loads(data["source"].decode())
except (TypeError, KeyError, AttributeError):
jsonbuffer = data['source']
headers = set()
for row in jsonbuffer:
for header in row.keys():
headers.add(header)
headers = list(headers)
o_headers = get_ordered_headers(headers)
output = [o_headers]
for row in jsonbuffer:
row_data = []
for header in o_headers:
logger.debug(f"Fetching: {header}")
val = row.get(header, None)
row_data.append(val)
logger.debug(f'Set to: {val}')
output.append(row_data)
return output
def reorder_csv(incoming):
if incoming.get('source') is None:
return incoming
data = incoming.copy()
csvbuffer = io.StringIO(data['source'].decode('UTF-8'))
output = io.StringIO()
headers = []
header_mapping = {}
writer = None
# This will make sure empty lines are not deleted
lines = (',' if line.isspace() else line for line in csvbuffer)
for row in csv.DictReader(lines):
if not headers:
# write headers first
headers = get_ordered_headers(list(row.keys()))
writer = csv.DictWriter(output, fieldnames=headers, extrasaction='ignore', lineterminator='\n')
writer.writeheader()
if (isinstance(UPLOAD_SETTINGS['STREAM_ARGS']['headers'], list)):
header_mapping = dict(zip(row.keys(), headers))
# If there's extra item in the row
if row.get(None):
vals = [row.get(header, '') for header in headers]
vals.extend(row.get(None))
write_row = ",".join(vals)
output.write(write_row + '\n')
else:
writer.writerow(OrderedDict([(header_mapping.get(k, k), v) for k, v in row.items()]))
data['source'] = output.getvalue().encode('UTF-8')
return data
| 30.785714 | 107 | 0.642691 |
7940662f4859044199cd656698fa207ea21c5792 | 65,618 | py | Python | mindspore/ops/operations/_grad_ops.py | kanchenhao/mindspore | 020662240545603fb53407c81f53f2541710e85d | [
"Apache-2.0"
] | null | null | null | mindspore/ops/operations/_grad_ops.py | kanchenhao/mindspore | 020662240545603fb53407c81f53f2541710e85d | [
"Apache-2.0"
] | null | null | null | mindspore/ops/operations/_grad_ops.py | kanchenhao/mindspore | 020662240545603fb53407c81f53f2541710e85d | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Operators for gradients."""
from ..._c_expression import signature_rw as sig_rw
from ..._c_expression import signature_kind as sig_kind
from ..primitive import Primitive, PrimitiveWithInfer, prim_attr_register
from ..._checkparam import Validator as validator, Rel
from .._utils import get_concat_offset
from ...common import dtype as mstype
from .. import functional as F
class AbsGrad(PrimitiveWithInfer):
"""Computes gradients for abs operation."""
@prim_attr_register
def __init__(self):
"""init AbsGrad"""
def infer_shape(self, y, dy):
return y
def infer_dtype(self, y, dy):
return y
class ACosGrad(PrimitiveWithInfer):
"""
Computes ACosGrad of input element-wise.
Returns:
Tensor, has the same type as input.
"""
@prim_attr_register
def __init__(self):
"""init ACosGrad"""
def infer_shape(self, x, dout):
validator.check("x shape", x, "dout shape", dout, Rel.EQ, self.name)
return x
def infer_dtype(self, x, dout):
args = {"x": x, "dout": dout}
validator.check_tensor_type_same(args, mstype.number_type, self.name)
return x
class AcoshGrad(PrimitiveWithInfer):
"""Performs grad of Acosh operation."""
@prim_attr_register
def __init__(self):
"""init AcoshGrad"""
def infer_shape(self, x, dout):
validator.check("x shape", x, "dout shape", dout, Rel.EQ, self.name)
return x
def infer_dtype(self, x, dout):
args = {"x": x, "dout": dout}
validator.check_tensor_type_same(args, mstype.number_type, self.name)
return x
class AsinGrad(PrimitiveWithInfer):
"""
Computes AsinGrad of input element-wise.
Returns:
Tensor, has the same type as input.
"""
@prim_attr_register
def __init__(self):
"""Init AsinGrad"""
def infer_shape(self, x, dout):
validator.check("x shape", x, "dout shape", dout, Rel.EQ, self.name)
return x
def infer_dtype(self, x, dout):
args = {"x": x, "dout": dout}
validator.check_tensor_type_same(args, mstype.number_type, self.name)
return x
class AsinhGrad(PrimitiveWithInfer):
"""Performs grad of Asinh operation."""
@prim_attr_register
def __init__(self):
"""init AsinhGrad"""
def infer_shape(self, x, dout):
validator.check("x shape", x, "dout shape", dout, Rel.EQ, self.name)
return x
def infer_dtype(self, x, dout):
args = {"x": x, "dout": dout}
validator.check_tensor_type_same(args, mstype.number_type, self.name)
return x
class ReciprocalGrad(PrimitiveWithInfer):
"""Performs grad of Reciprocal operation."""
@prim_attr_register
def __init__(self):
"""init ReciprocalGrad"""
def infer_shape(self, x_shape, dout_shape):
validator.check("x shape", x_shape, "dout shape", dout_shape, Rel.EQ, self.name)
return x_shape
def infer_dtype(self, x_dtype, dout_dtype):
args = {"x": x_dtype, "dout": dout_dtype}
validator.check_tensor_type_same(args, [mstype.float16, mstype.float32], self.name)
return x_dtype
class RsqrtGrad(PrimitiveWithInfer):
"""Performs grad of Rsqrt operation."""
@prim_attr_register
def __init__(self):
"""init RsqrtGrad"""
def infer_shape(self, x_shape, dout_shape):
validator.check("x shape", x_shape, "dout shape", dout_shape, Rel.EQ, self.name)
return x_shape
def infer_dtype(self, x_dtype, dout_dtype):
args = {"x": x_dtype, "dout": dout_dtype}
validator.check_tensor_type_same(args, [mstype.float16, mstype.float32, mstype.int32, mstype.int8], self.name)
return x_dtype
class SoftmaxGrad(PrimitiveWithInfer):
"""Performs grad of Softmax operation."""
@prim_attr_register
def __init__(self):
"""init SoftmaxGrad"""
def infer_shape(self, x_shape, dout_shape):
validator.check("x shape", x_shape, "dout shape", dout_shape, Rel.EQ, self.name)
return x_shape
def infer_dtype(self, x_dtype, dout_dtype):
args = {"x": x_dtype, "dout": dout_dtype}
validator.check_tensor_type_same(args, [mstype.float16, mstype.float32], self.name)
return x_dtype
class SqrtGrad(PrimitiveWithInfer):
"""Performs grad of Sqrt operation."""
@prim_attr_register
def __init__(self):
"""init SqrtGrad"""
def infer_shape(self, x_shape, dout_shape):
validator.check("x shape", x_shape, "dout shape", dout_shape, Rel.EQ, self.name)
return x_shape
def infer_dtype(self, x_dtype, dout_dtype):
args = {"x": x_dtype, "dout": dout_dtype}
validator.check_tensor_type_same(args, [mstype.float16, mstype.float32], self.name)
return x_dtype
class BatchNormGrad(PrimitiveWithInfer):
"""Performs grad of BatchNorm operation."""
@prim_attr_register
def __init__(self, is_training=False, epsilon=1e-5):
self.is_training = validator.check_value_type('is_training', is_training, (bool,), self.name)
self.epsilon = validator.check_number_range('epsilon', epsilon, 0, 1, Rel.INC_RIGHT, self.name)
self.add_prim_attr('data_format', "NCHW")
def infer_shape(self, y_backprop_shape, x_shape, scale_shape, reserve_1_shape, reserve_2_shape):
validator.check("BatchNorm y_backprop_shape", y_backprop_shape, "BatchNorm x_shape", x_shape)
return (x_shape, scale_shape, scale_shape, reserve_1_shape, reserve_2_shape)
def infer_dtype(self, y_backprop_type, x_type, scale_type, reserve_1_type, reserve_2_type):
return (x_type, scale_type, scale_type, reserve_1_type, reserve_2_type)
class BiasAddGrad(Primitive):
"""Computes gradients of BiasAdd."""
@prim_attr_register
def __init__(self):
self.init_prim_io_names(inputs=['dout'], outputs=['output'])
self.add_prim_attr('data_format', 'NCHW')
def __call__(self, d_output):
raise NotImplementedError
class KLDivLossGrad(PrimitiveWithInfer):
"""Computes gradients for `KLDivLoss` operation."""
@prim_attr_register
def __init__(self, reduction='mean'):
self.reduction = validator.check_string('reduction', reduction, ['none', 'mean', 'sum'], self.name)
def infer_shape(self, x_shape, y_shape, doutput_shape):
validator.check('x_shape', x_shape, 'y_shape', y_shape, Rel.EQ, self.name)
return x_shape, y_shape
def infer_dtype(self, x_type, y_type, doutput_type):
args = {'x_type': x_type, 'y_type': y_type, 'doutput_type': doutput_type}
validator.check_tensor_type_same(args, (mstype.float16, mstype.float32), self.name)
return x_type, y_type
class BinaryCrossEntropyGrad(PrimitiveWithInfer):
"""Computes gradients for `BinaryCrossEntropy` operation."""
@prim_attr_register
def __init__(self, reduction='mean'):
self.reduction = validator.check_string('reduction', reduction, ['none', 'mean', 'sum'], self.name)
def infer_shape(self, x_shape, y_shape, doutput_shape, weight_shape):
validator.check('x_shape', x_shape, 'y_shape', y_shape, Rel.EQ, self.name)
if weight_shape:
validator.check('y_shape', y_shape, 'weight_shape', weight_shape, Rel.EQ, self.name)
return x_shape
def infer_dtype(self, x_type, y_type, doutput_type, weight_type):
args = {'x_type': x_type, 'y_type': y_type, 'doutput_type': doutput_type}
validator.check_tensor_type_same(args, (mstype.float16, mstype.float32), self.name)
if weight_type:
validator.check('x_type', x_type, 'weight_type', weight_type, Rel.EQ, TypeError)
return x_type
class ConcatOffset(PrimitiveWithInfer):
"""primitive for computing Concat's gradient."""
@prim_attr_register
def __init__(self, N=2, axis=0):
"""init ConcatOffset"""
def __infer__(self, input_x):
axis = self.axis
x_shp = input_x['shape']
x_type = input_x['dtype']
offset, _, axis = get_concat_offset(x_shp, x_type, axis, self.name)
self.add_prim_attr('T', x_type[0].element_type())
offset_values = []
for i in range(len(x_shp)):
values = []
for j in range(len(x_shp[0])):
value = 0
if j == axis:
value = offset[i]
values.append(value)
offset_values.append(tuple(values))
out = {'shape': None,
'dtype': None,
'value': tuple(offset_values)}
return out
class Conv2DBackpropFilter(PrimitiveWithInfer):
"""
Computes the gradients of convolution with respect to the filter.
Args:
out_channel (int): The dimensionality of the output space.
kernel_size (Union[int, tuple[int]]): The size of the convolution window.
pad_mode (str): "valid", "same", "pad" the mode to fill padding. Default: "valid".
pad (int): The pad value to fill. Default: 0.
mode (int): 0 Math convolutiuon, 1 cross-correlation convolution ,
2 deconvolution, 3 depthwise convolution. Default: 1.
stride (tuple): The stride to apply conv filter. Default: (1, 1).
dilation (tuple): Specifies the dilation rate to use for dilated convolution. Default: (1, 1, 1, 1).
group (int): Splits input into groups. Default: 1.
Returns:
Tensor, the gradients of convolution.
"""
@prim_attr_register
def __init__(self,
out_channel,
kernel_size,
pad_mode="valid",
pad=0,
pad_list=(0, 0, 0, 0),
mode=1,
stride=(1, 1),
dilation=(1, 1, 1, 1),
group=1):
"""init Convolution"""
self.init_prim_io_names(inputs=['out_backprop', 'input', 'filter_sizes'], outputs=['output'])
self.out_channel = out_channel
self.kernel_size = kernel_size
self.mode = mode
pad_mode = pad_mode.upper()
self.add_prim_attr('pad_mode', pad_mode)
self.pad = pad
if isinstance(stride, tuple) and len(stride) == 4:
self.stride = (stride[2], stride[3])
self.add_prim_attr('stride', self.stride)
self.dilation = dilation
self.group = group
self.add_prim_attr('groups', group)
self.add_prim_attr('data_format', "NCHW")
def __infer__(self, doutput, x, w_size):
w_size_v = w_size['value']
validator.check_value_type('w_size', w_size_v, [tuple], self.name)
for i, dim_len in enumerate(w_size_v):
validator.check_value_type("w_size[%d]" % i, dim_len, [int], self.name)
args = {"x": x['dtype'], "doutput": doutput['dtype']}
validator.check_tensor_type_same(args, [mstype.int8, mstype.int32, mstype.float16, mstype.float32], self.name)
out = {
'value': None,
'shape': w_size_v,
'dtype': doutput['dtype'],
}
return out
class DepthwiseConv2dNativeBackpropFilter(PrimitiveWithInfer):
"""
Returns the gradient of filter for DepthwiseConv2dNative.
Applies depthwise conv2d for the input, which will generate more channels with channel_multiplier.
Refer to class DepthwiseConv2dNative for more details.
Args:
channel_multiplier (int): The multipiler for the original output conv.
kernel_size (int or tuple): The size of the conv kernel.
mode (int): 0 Math convolutiuon, 1 cross-correlation convolution,
2 deconvolution,3 depthwise convolution. Defaul: 3.
pad_mode (str): The mode to fill padding which can be: "valid", "same" or "pad". Default: "valid".
pad (int): The pad value to fill. Default: 0.
pads (tuple): The pad list like (top, bottom, left, right). Default: (0, 0, 0, 0).
stride (int): The stride to apply conv filter. Default: 1.
dilation (int): Specifies the space to use between kernel elements. Default: 1.
group (int): Splits input into groups. Default: 1.
Returns:
Tensor, the value is the gradient of filter for DepthwiseConv2dNative.
"""
@prim_attr_register
def __init__(self,
channel_multiplier,
kernel_size,
pad_mode="valid",
pad=0,
pads=(0, 0, 0, 0),
mode=3,
stride=1,
dilation=1,
group=1):
"""init Convolution"""
self.init_prim_io_names(inputs=['input', 'filter_size', 'dout'], outputs=['output'])
self.channel_multiplier = channel_multiplier
self.kernel_size = kernel_size
self.mode = mode
self.pad_mode = pad_mode
self.pad = pad
self.pads = pads
self.stride = stride
self.dilation = dilation
self.group = group
self.add_prim_attr('data_format', "NCHW")
def __call__(self, x, w_size, dout):
raise NotImplementedError
def __infer__(self, x, w_size, dout):
w_size_v = w_size['value']
args = {'x': x['dtype'], 'dout': dout['dtype']}
validator.check_tensor_type_same(args, mstype.number_type, self.name)
out = {
'value': None,
'shape': w_size_v,
'dtype': dout['dtype'],
}
return out
class DepthwiseConv2dNativeBackpropInput(PrimitiveWithInfer):
"""
Returns the gradient of input for DepthwiseConv2dNative.
Applies depthwise conv2d for the input, which will generate more channels with channel_multiplier.
Args:
channel_multiplier (int): The multipiler for the original output conv.
kernel_size (int or tuple): The size of the conv kernel.
mode (int): 0 Math convolutiuon, 1 cross-correlation convolution ,
2 deconvolution,3 depthwise convolution. Default: 3.
pad_mode (str): "valid", "same", "pad" the mode to fill padding. Default: "valid".
pad (int): the pad value to fill. Default: 0.
pads (tuple): The pad list like (top, bottom, left, right). Default: (0, 0, 0, 0).
stride (int): the stride to apply conv filter. Default: 1.
dilation (int): Specifies the space to use between kernel elements. Default: 1.
group (int): Splits input into groups. Default: 1.
Returns:
Tensor, the value is the gradient of input for DepthwiseConv2dNative.
"""
@prim_attr_register
def __init__(self,
channel_multiplier,
kernel_size,
pad_mode="valid",
pad=0,
pads=(0, 0, 0, 0),
mode=3,
stride=1,
dilation=1,
group=1):
"""init Convolution"""
self.init_prim_io_names(inputs=['input_size', 'filter', 'dout'], outputs=['output'])
self.channel_multiplier = channel_multiplier
self.kernel_size = kernel_size
self.mode = mode
self.pad_mode = pad_mode
self.pad = pad
self.pads = pads
self.stride = stride
self.dilation = dilation
self.group = group
self.add_prim_attr('data_format', "NCHW")
def __call__(self, x_size, w, dout):
raise NotImplementedError
def __infer__(self, x_size, w, dout):
args = {'w': w['dtype'], 'dout': dout['dtype']}
validator.check_tensor_type_same(args, mstype.number_type, self.name)
x_size_v = x_size['value']
out = {
'value': None,
'shape': x_size_v,
'dtype': dout['dtype'],
}
return out
class FlattenGrad(PrimitiveWithInfer):
"""Performs gradients of Flatten."""
@prim_attr_register
def __init__(self):
self.init_prim_io_names(inputs=['x', 'shape'], outputs=['output'])
def __infer__(self, *args):
out = {
'value': None,
'shape': args[1]['value'],
'dtype': args[0]['dtype'],
}
return out
class FusedBatchNormGrad(Primitive):
"""Gradients of FusedBatchNorm operation."""
@prim_attr_register
def __init__(self, epsilon=0.0, momentum=0.1):
self.init_prim_io_names(inputs=['dy', 'x', 'scale', 'save_mean', 'save_inv_variance'],
outputs=['dx', 'bn_scale', 'bn_bias'])
def __call__(self, dy, x, scale, save_mean, save_inv_variance):
raise NotImplementedError
class UniqueGrad(Primitive):
"""Gradients of Unique operation."""
@prim_attr_register
def __init__(self):
self.init_prim_io_names(inputs=['dy', 'y'], outputs=['dx'])
def __call__(self, dy, x, scale, save_mean, save_inv_variance):
raise NotImplementedError
class BNTrainingReduceGrad(PrimitiveWithInfer):
"""Gradients of FusedBatchNorm operation."""
@prim_attr_register
def __init__(self, epsilon=0.0001):
_inputs = ['grads', 'x', 'diff_scale', 'diff_offset', 'scale', 'batch_mean', 'batch_variance']
self.init_prim_io_names(inputs=_inputs, outputs=['y'])
def infer_shape(self, grads, x, diff_scale, diff_offset, scale, batch_mean, batch_variance):
return grads
def infer_dtype(self, grads, x, diff_scale, diff_offset, scale, batch_mean, batch_variance):
return grads
class BNTrainingUpdateGrad(PrimitiveWithInfer):
"""Gradients of FusedBatchNorm operation."""
@prim_attr_register
def __init__(self, epsilon=0.0001):
self.init_prim_io_names(inputs=['grads', 'x', 'batch_mean', 'batch_variance'],
outputs=['diff_scale', 'diff_offset'])
def infer_shape(self, grads, x, batch_mean, batch_variance):
return (batch_mean, batch_variance)
def infer_dtype(self, grads, x, batch_mean, batch_variance):
return (batch_mean, batch_variance)
class GeluGrad(PrimitiveWithInfer):
"""Gradients of Gelu operation."""
@prim_attr_register
def __init__(self):
"""init GeluGrad"""
def infer_shape(self, y_backprop_shape, x_shape, y_shape):
return x_shape
def infer_dtype(self, y_backprop_dtype, x_dtype, y_dtype):
validator.check_tensor_type_same({"y_backprop": y_backprop_dtype}, (mstype.float16, mstype.float32), self.name)
validator.check_tensor_type_same({"x": x_dtype}, (mstype.float16, mstype.float32), self.name)
validator.check_tensor_type_same({"y": y_dtype}, (mstype.float16, mstype.float32), self.name)
return x_dtype
class _PoolGrad(PrimitiveWithInfer):
"""Gradients of the max/avg pool operation."""
@prim_attr_register
def __init__(self, ksize, strides, padding="VALID"):
self.init_prim_io_names(inputs=['x_origin', 'out_origin', 'grad'], outputs=['output'])
validator.check_value_type('ksize', ksize, [int, tuple], self.name)
validator.check_value_type('strides', strides, [int, tuple], self.name)
self.padding = validator.check_string('padding', padding.upper(), ['VALID', 'SAME'], self.name)
self.add_prim_attr("padding", self.padding)
self.is_maxpoolgradwithargmax = (self.name == "MaxPoolGradWithArgmax")
if not self.is_maxpoolgradwithargmax:
self.add_prim_attr('data_format', "NCHW")
def _grad_check_int_or_tuple(arg_name, arg_val, is_argmax):
validator.check_value_type(arg_name, arg_val, (int, tuple), self.name)
error_msg = ValueError(f"For '{self.name}' the '{arg_name}' should be an positive int number "
f"or a tuple of two or four positive int numbers, but got {arg_val}")
if isinstance(arg_val, int):
ret = (1, arg_val, arg_val, 1) if is_argmax else (1, 1, arg_val, arg_val)
elif len(arg_val) == 2:
ret = (1, arg_val[0], arg_val[1], 1) if is_argmax else (1, 1, arg_val[0], arg_val[1])
elif len(arg_val) == 4:
ret = arg_val
else:
raise error_msg
# whether all elements of tuple are positive integers
for item in ret:
if not isinstance(item, int) or item <= 0:
raise error_msg
return ret
self.ksize = _grad_check_int_or_tuple("ksize", ksize, self.is_maxpoolgradwithargmax)
self.add_prim_attr("ksize", self.ksize)
self.strides = _grad_check_int_or_tuple("strides", strides, self.is_maxpoolgradwithargmax)
self.add_prim_attr("strides", self.strides)
class AvgPoolGrad(_PoolGrad):
"""Gradients of the avg pool operation for ge."""
@prim_attr_register
def __init__(self, ksize=1, strides=1, padding="VALID"):
super(AvgPoolGrad, self).__init__(ksize, strides, padding)
def __infer__(self, origin_input, dout):
out = {
'value': None,
'shape': tuple(origin_input['value']),
'dtype': dout['dtype'],
}
return out
class AvgPoolGradVm(_PoolGrad):
"""Gradients of the avg pool operation for vm."""
@prim_attr_register
def __init__(self, ksize=1, strides=1, padding="VALID"):
super(AvgPoolGradVm, self).__init__(ksize, strides, padding)
self.init_prim_io_names(inputs=['x_origin', 'grad', 'mean_matrix', 'kernel_matrix'], outputs=['output'])
def __infer__(self, origin_input, dout, mean_matrix, kernel_matrix):
out = {
'value': None,
'shape': tuple(origin_input['value']),
'dtype': dout['dtype'],
}
return out
class AvgPoolGradGpu(_PoolGrad):
"""Gradients of the avg pool operation for gpu."""
@prim_attr_register
def __init__(self, ksize=1, strides=1, padding="VALID"):
super(AvgPoolGradGpu, self).__init__(ksize, strides, padding)
def infer_shape(self, x1_shape, x2_shape, grad_shape):
return x1_shape
def infer_dtype(self, x1_dtype, x2_dtype, grad_dtype):
return x1_dtype
class MaxPoolGrad(_PoolGrad):
"""Performs gradients of the max pool operation."""
@prim_attr_register
def __init__(self, ksize=1, strides=1, padding="VALID"):
super(MaxPoolGrad, self).__init__(ksize, strides, padding)
def infer_shape(self, x1_shape, x2_shape, grad_shape):
return x1_shape
def infer_dtype(self, x1_dtype, x2_dtype, grad_dtype):
return x1_dtype
class MaxPoolGradGrad(_PoolGrad):
r"""
Performs gradients of the MaxPoolGrad operation.
Args:
ksize (Union[int, tuple[int]]): The size of kernel used to take the maximum value,
is an int number that represents height and width are both ksize, or a tuple
of two int numbers that represent height and width respectively. Default: 1.
strides (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents
the height and width of movement are both strides, or a tuple of two int numbers that
represent height and width of movement respectively. Default: 1.
padding (str): The optional values for pad mode, is "same" or "valid", not case sensitive.
Default: "valid".
- same: Adopts the way of completion. Output height and width will be the same as
the input. Total number of padding will be calculated for horizontal and vertical
direction and evenly distributed to top and bottom, left and right if possible.
Otherwise, the last extra padding will be done from the bottom and the right side.
- valid: Adopts the way of discarding. The possibly largest height and width of output
will be return without padding. Extra pixels will be discarded.
Inputs:
- **origin_input** (Tensor) - Tensor with data format "NCHW", data type should be float16.
- **origin_output** (Tensor) - Data type same as `origin_input`.
- **grad** (Tensor) - Data type same as `origin_input`.
Outputs:
Tensor, With data type same as `origin_input`.
"""
@prim_attr_register
def __init__(self, ksize=1, strides=1, padding="VALID"):
super(MaxPoolGradGrad, self).__init__(ksize, strides, padding)
def infer_shape(self, x1_shape, x2_shape, grad_shape):
return x1_shape
def infer_dtype(self, x1_dtype, x2_dtype, grad_dtype):
args = {'x1_dtype': x1_dtype, 'x2_dtype': x2_dtype, 'grad_dtype': grad_dtype}
validator.check_tensor_type_same(args, [mstype.float16], self.name)
return x1_dtype
class MaximumGrad(Primitive):
"""Grad for maximum."""
@prim_attr_register
def __init__(self, grad_x=True, grad_y=True):
"""Init MaximumGrad"""
def __call__(self, x, y, dout):
raise NotImplementedError
class MaxPoolGradWithArgmax(_PoolGrad):
"""Computes the gradients of MaxPoolWithArgmax."""
@prim_attr_register
def __init__(self, ksize=1, strides=1, padding="VALID",):
self.init_prim_io_names(inputs=['x', 'grad', 'argmax'], outputs=['output'])
super(MaxPoolGradWithArgmax, self).__init__(ksize, strides, padding)
def infer_shape(self, x_shape, grad_shape, argmax_shape):
if not grad_shape:
raise TypeError("The dout of MaxPoolGradWithArgmax should be a Tensor.")
return x_shape
def infer_dtype(self, x_dtype, grad_dtype, argmax_dtype):
return grad_dtype
class MaxPoolGradGradWithArgmax(_PoolGrad):
r"""
Computes the gradients of MaxPoolGradWithArgmax.
Args:
ksize (Union[int, tuple[int]]): The size of kernel used to take the maximum value,
is an int number that represents height and width are both ksize, or a tuple
of two int numbers that represent height and width respectively. Default: 1.
strides (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents
the height and width of movement are both strides, or a tuple of two int numbers that
represent height and width of movement respectively. Default: 1.
padding (str): The optional values for pad mode, is "same" or "valid", not case sensitive.
Default: "valid".
- same: Adopts the way of completion. Output height and width will be the same as
the input. Total number of padding will be calculated for horizontal and vertical
direction and evenly distributed to top and bottom, left and right if possible.
Otherwise, the last extra padding will be done from the bottom and the right side.
- valid: Adopts the way of discarding. The possibly largest height and width of output
will be return without padding. Extra pixels will be discarded.
Inputs:
- **x** (Tensor) - Tensor with data format "NCHW", data type should be float16.
- **grad** (Tensor) - Data type same as `x`.
- **argmax** (Tensor) - Data type should be uint16 or int64.
Outputs:
Tensor, With data type same as `x`.
"""
@prim_attr_register
def __init__(self, ksize=1, strides=1, padding="VALID"):
self.init_prim_io_names(inputs=['x', 'grad', 'argmax'], outputs=['output'])
super(MaxPoolGradGradWithArgmax, self).__init__(ksize, strides, padding)
def infer_shape(self, x_shape, grad_shape, argmax_shape):
if not grad_shape:
raise TypeError("The dout of MaxPoolGradGradWithArgmax should be a Tensor.")
return x_shape
def infer_dtype(self, x_dtype, grad_dtype, argmax_dtype):
args = {'x_dtype': x_dtype, 'grad_dtype': grad_dtype}
validator.check_tensor_type_same(args, [mstype.float16], self.name)
return grad_dtype
class MinimumGrad(Primitive):
"""Grad for minimum."""
@prim_attr_register
def __init__(self, grad_x=True, grad_y=True):
"""Init MinimumGrad"""
def __call__(self, x, y, dout):
raise NotImplementedError
class L2NormalizeGrad(PrimitiveWithInfer):
r"""
Gradients of L2 normalize.
Args:
axis (int): The begin axis for the input to apply L2 normalize. Default: 0.
epsilon (float): A small value added for numerical stability. Default: 1e-4.
Inputs:
- **input_x** (Tensor) - Should be the input `weight` of forward operator L2Normalize.
- **out** (Tensor) - Should be the output of forward operator L2Normalize.
- **dout** (Tensor) - The backprop of the next layer.
Outputs:
Tensor, gradients of L2Normalize `input_x`.
"""
@prim_attr_register
def __init__(self, axis=0, epsilon=1e-4):
validator.check_value_type('axis', axis, [int], self.name)
validator.check_value_type('epsilon', epsilon, [int, float], self.name)
def infer_shape(self, input_x, out, dout):
validator.check('input_x shape', input_x, 'out shape', out, Rel.EQ, self.name)
validator.check('input_x shape', input_x, 'dout shape', dout, Rel.EQ, self.name)
return input_x
def infer_dtype(self, input_x, out, dout):
args = {'input_x': input_x, 'out': out, 'dout': dout}
validator.check_tensor_type_same(args, mstype.number_type, self.name)
return input_x
class LayerNormGrad(Primitive):
"""
Applies the layer normalization to the input array.
This operator will calculate the input gradients of layernorm.
Args:
begin_norm_axis (int): The begin axis for the input to apply layernorm. Default: 1.
begin_params_axis (int): The begin axis for the parameter input to apply layernorm. Default: 1.
Returns:
tuple[int], tuple of 3 values (the gradients of layernorm input, gamma, beta).
"""
@prim_attr_register
def __init__(self, begin_norm_axis=1, begin_params_axis=1):
"""init"""
self.begin_norm_axis = validator.check_value_type('begin_norm_axis', begin_norm_axis, [int], self.name)
self.begin_params_axis = validator.check_value_type('begin_params_axis', begin_params_axis, [int], self.name)
def __call__(self, x, dy, variance, mean, gamma):
raise NotImplementedError
class LogSoftmaxGrad(PrimitiveWithInfer):
"""Computes gradient for the Log Softmax activation."""
@prim_attr_register
def __init__(self, axis=-1):
"""init LogSoftmaxGrad"""
validator.check_value_type("axis", axis, [int], self.name)
def infer_shape(self, dout, logits):
rank = len(logits)
validator.check_int_range('axis', self.axis, -rank - 1, rank, Rel.INC_BOTH, self.name)
return logits
def infer_dtype(self, dout, logits):
validator.check_subclass("logits", logits, mstype.tensor, self.name)
return logits
class LSTMGradData(PrimitiveWithInfer):
"""Computes the data gradients of LSTM."""
@prim_attr_register
def __init__(self, input_size, hidden_size, num_layers, has_bias, bidirectional, dropout):
self.input_size = validator.check_integer('input_size', input_size, 0, Rel.GT, self.name)
self.hidden_size = validator.check_integer('hidden_size', hidden_size, 0, Rel.GT, self.name)
self.num_layers = validator.check_integer('num_layers', num_layers, 0, Rel.GT, self.name)
self.has_bias = validator.check_value_type('has_bias', has_bias, (bool,), self.name)
self.bidirectional = validator.check_value_type('bidirectional', bidirectional, (bool,), self.name)
self.dropout = validator.check_value_type("dropout", dropout, [float], self.name)
self.dropout = validator.check_number_range('dropout', dropout, 0, 1, Rel.INC_BOTH, self.name)
if bidirectional:
self.num_directions = 2
else:
self.num_directions = 1
def infer_shape(self, y_shape, dy_shape, dhy_shape, dcy_shape, w_shape,
hx_shape, cx_shape, reserve_shape, state_shape):
# dhy and dcy should be same shape
validator.check_integer("h_shape", len(dhy_shape), 3, Rel.EQ, self.name)
validator.check_integer("h_shape", len(dhy_shape), len(dcy_shape), Rel.EQ, self.name)
validator.check_integer("h_shape[0]", dhy_shape[0], dcy_shape[0], Rel.EQ, self.name)
validator.check_integer("h_shape[1]", dhy_shape[1], dcy_shape[1], Rel.EQ, self.name)
validator.check_integer("h_shape[2]", dhy_shape[2], dcy_shape[2], Rel.EQ, self.name)
validator.check_integer("h_shape[0]", dhy_shape[0], self.num_layers * self.num_directions, Rel.EQ, self.name)
validator.check_integer("h_shape[2]", dhy_shape[2], self.hidden_size, Rel.EQ, self.name)
# dy: (seq_len, batch_size, hidden_size * num_directions)
validator.check_integer("dy_shape", len(dy_shape), 3, Rel.EQ, self.name)
validator.check_integer("dy[1]", dy_shape[1], dhy_shape[1], Rel.EQ, self.name)
validator.check_integer("dy[2]", dy_shape[2], self.hidden_size * self.num_directions, Rel.EQ, self.name)
# (seq_len, batch_size, input_size)
dx_shape = (y_shape[0], y_shape[1], self.input_size)
dhx_shape = dhy_shape
dcx_shape = dcy_shape
return (dx_shape, dhx_shape, dcx_shape)
def infer_dtype(self, y_dtype, dy_dtype, dhy_dtype, dcy_dtype, w_dtype,
hx_dtype, cx_dtype, reserve_dtype, state_dtype):
args = {"dy": dy_dtype, "dhy": dhy_dtype, "dcy": dcy_dtype}
validator.check_tensor_type_same(args, (mstype.float32, mstype.float16), self.name)
return (dy_dtype, dy_dtype, dy_dtype)
class LSTMGradWeight(PrimitiveWithInfer):
"""Computes the weight gradients of LSTM."""
@prim_attr_register
def __init__(self, input_size, hidden_size, num_layers, has_bias, bidirectional, dropout):
self.input_size = validator.check_integer('input_size', input_size, 0, Rel.GT, self.name)
self.hidden_size = validator.check_integer('hidden_size', hidden_size, 0, Rel.GT, self.name)
self.num_layers = validator.check_integer('num_layers', num_layers, 0, Rel.GT, self.name)
self.has_bias = validator.check_value_type('has_bias', has_bias, (bool,), self.name)
self.bidirectional = validator.check_value_type('bidirectional', bidirectional, (bool,), self.name)
self.dropout = validator.check_value_type("dropout", dropout, [float], self.name)
self.dropout = validator.check_number_range('dropout', dropout, 0, 1, Rel.INC_BOTH, self.name)
if bidirectional:
self.num_directions = 2
else:
self.num_directions = 1
def infer_shape(self, x_shape, hx_shape, y_shape, reserve_shape, state_shape):
weight_size = 0
gate_size = 4 * self.hidden_size
for layer in range(self.num_layers):
for _ in range(self.num_directions):
input_layer_size = self.input_size if layer == 0 else self.hidden_size * self.num_directions
weight_size += gate_size * input_layer_size
weight_size += gate_size * self.hidden_size
if self.has_bias:
weight_size += 2 * gate_size
return (weight_size, 1, 1)
def infer_dtype(self, x_dtype, hx_dtype, y_dtype, reserve_dtype, state_dtype):
return hx_dtype
class LSTMGrad(PrimitiveWithInfer):
"""Computes the data and weight gradients of LSTM."""
@prim_attr_register
def __init__(self, input_size, hidden_size, num_layers, has_bias, bidirectional, dropout):
self.input_size = validator.check_integer('input_size', input_size, 0, Rel.GT, self.name)
self.hidden_size = validator.check_integer('hidden_size', hidden_size, 0, Rel.GT, self.name)
self.num_layers = validator.check_integer('num_layers', num_layers, 0, Rel.GT, self.name)
self.has_bias = validator.check_value_type('has_bias', has_bias, (bool,), self.name)
self.bidirectional = validator.check_value_type('bidirectional', bidirectional, (bool,), self.name)
self.dropout = validator.check_value_type("dropout", dropout, [float], self.name)
self.dropout = validator.check_number_range('dropout', dropout, 0, 1, Rel.INC_BOTH, self.name)
if bidirectional:
self.num_directions = 2
else:
self.num_directions = 1
def infer_shape(self, x_shape, hx_shape, cx_shape, w_shape, y_shape, hy_shape, cy_shape, dy_shape, dhy_shape,
dcy_shape, reserve_shape):
# dhy and dcy should be same shape
validator.check_integer("h_shape", len(dhy_shape), 3, Rel.EQ, self.name)
validator.check_integer("h_shape", len(dhy_shape), len(dcy_shape), Rel.EQ, self.name)
validator.check_integer("h_shape[0]", dhy_shape[0], dcy_shape[0], Rel.EQ, self.name)
validator.check_integer("h_shape[1]", dhy_shape[1], dcy_shape[1], Rel.EQ, self.name)
validator.check_integer("h_shape[2]", dhy_shape[2], dcy_shape[2], Rel.EQ, self.name)
validator.check_integer("h_shape[0]", dhy_shape[0], self.num_layers * self.num_directions, Rel.EQ, self.name)
validator.check_integer("h_shape[2]", dhy_shape[2], self.hidden_size, Rel.EQ, self.name)
# dy: (seq_len, batch_size, hidden_size * num_directions)
validator.check_integer("dy_shape", len(dy_shape), 3, Rel.EQ, self.name)
validator.check_integer("dy[1]", dy_shape[1], dhy_shape[1], Rel.EQ, self.name)
validator.check_integer("dy[2]", dy_shape[2], self.hidden_size * self.num_directions, Rel.EQ, self.name)
# (seq_len, batch_size, input_size)
dx_shape = (y_shape[0], y_shape[1], self.input_size)
dhx_shape = dhy_shape
dcx_shape = dcy_shape
weight_size = 0
gate_size = 4 * self.hidden_size
for layer in range(self.num_layers):
for _ in range(self.num_directions):
input_layer_size = self.input_size if layer == 0 else self.hidden_size * self.num_directions
weight_size += gate_size * input_layer_size
weight_size += gate_size * self.hidden_size
if self.has_bias:
weight_size += gate_size
return (dx_shape, dhx_shape, dcx_shape, (weight_size, 1, 1))
def infer_dtype(self, x_dtype, hx_dtype, cx_dtype, w_dtype, y_dtype, hy_dtype, cy_dtype, dy_dtype, dhy_dtype,
dcy_dtype, reserve_dtype):
return (dy_dtype, dy_dtype, dy_dtype, hx_dtype)
class PReLUGrad(PrimitiveWithInfer):
r"""
Gradients of PReLU operation.
Note:
1-dimensional input_x is not supported.
Inputs:
- **y_backprop** (Tensor) - Representing the backprop of the next layer.
- **input_x** (Tensor) - Should be the input `input_x` of forward operator PRelu.
- **weight** (Tensor) - Float Tensor, w > 0, should be the input `weight` of forward operator PRelu.
Outputs:
Tensor, with the same type as `input_x`.
"""
@prim_attr_register
def __init__(self):
pass
def infer_shape(self, y_backprop_shape, A_shape, w_shape):
if len(A_shape) == 1:
raise ValueError(f'For \'{self.name}\' input_x rank 1 is not supported.')
return y_backprop_shape, w_shape
def infer_dtype(self, y_backprop_dtype, A_dtype, w_dtype):
valid_types = (mstype.float16, mstype.float32)
validator.check_tensor_type_same({"y_backprop": y_backprop_dtype}, valid_types, self.name)
validator.check_tensor_type_same({"A_dtype": A_dtype}, valid_types, self.name)
validator.check_tensor_type_same({"w_dtype": w_dtype}, valid_types, self.name)
return y_backprop_dtype, w_dtype
class ReluGrad(Primitive):
"""Performs grad of Relu operation."""
@prim_attr_register
def __init__(self):
"""init ReluGrad"""
self.init_prim_io_names(inputs=['y_backprop', 'x'], outputs=['output'])
def __call__(self, y_backprop, x):
raise NotImplementedError
class ReLU6Grad(PrimitiveWithInfer):
"""Performs grad of ReLU6 operation."""
@prim_attr_register
def __init__(self):
self.init_prim_io_names(inputs=['y_grad', 'x'], outputs=['output'])
def __call__(self, y_grad, x):
raise NotImplementedError
def infer_shape(self, y_grad_shape, x_shape):
return x_shape
def infer_dtype(self, y_grad_dtype, x_dtype):
validator.check_tensor_type_same({"y_grad": y_grad_dtype}, (mstype.float16, mstype.float32), self.name)
validator.check_tensor_type_same({"x": x_dtype}, (mstype.float16, mstype.float32), self.name)
return x_dtype
class ReluGradV2(PrimitiveWithInfer):
"""Performs grad of ReLUV2 operation."""
@prim_attr_register
def __init__(self):
self.init_prim_io_names(inputs=['gradients', 'mask'], outputs=['output'])
def __call__(self, gradients, mask):
raise NotImplementedError
def infer_shape(self, gradients_shape, mask_shape):
return gradients_shape
def infer_dtype(self, gradients_dtype, mask_dtype):
validator.check_tensor_type_same({'gradients': gradients_dtype}, mstype.number_type, self.name)
validator.check_tensor_type_same({'mask': mask_dtype}, (mstype.uint8,), self.name)
return gradients_dtype
class EluGrad(PrimitiveWithInfer):
"""Performs grad of Elu operation."""
@prim_attr_register
def __init__(self):
"""Init EluGrad"""
def infer_shape(self, y_grad_shape, x_shape):
return x_shape
def infer_dtype(self, y_grad_dtype, x_dtype):
args = {'y_grad': y_grad_dtype, 'x': x_dtype}
validator.check_tensor_type_same(args, mstype.float_type, self.name)
return x_dtype
class ResizeBilinearGrad(PrimitiveWithInfer):
"""Performs grad of ResizeBilinear operation."""
@prim_attr_register
def __init__(self, align_corners=False):
"""init"""
def infer_shape(self, dout_shape, orig_shape):
return orig_shape
def infer_dtype(self, dout_dtype, orig_type):
return dout_dtype
class ResizeNearestNeighborGrad(PrimitiveWithInfer):
"""
Compute gradient of `ResizeNearestNeighbor` operator.
Note:
The shape of input parameter `size` must be (height, width).
Args:
align_corners (bool): Whether the centers of the 4 corner pixels of the input
and output tensors are aligned. Default: False.
"""
@prim_attr_register
def __init__(self, align_corners=False):
"""Init ResizeNearestNeighborGrad"""
self.init_prim_io_names(inputs=['grads', 'size'], outputs=['y'])
def __infer__(self, grads, size):
shp = (grads['shape'][0],) + (grads['shape'][1],) + size['value']
return {'shape': shp,
'dtype': grads['dtype'],
'value': None}
class ROIAlignGrad(PrimitiveWithInfer):
"""
ROIAlignGrad operator.
Args:
pooled_height (int): The output feature height.
pooled_width (int): The output feature width.
spatial_scale (float): The feature stride.
sample_num (int): Number of sampling points. Default: 2.
"""
@prim_attr_register
def __init__(self, xdiff_shape, pooled_height, pooled_width, spatial_scale, sample_num=2):
"""init ROIAlignGrad"""
validator.check_value_type("pooled_height", pooled_height, [int], self.name)
validator.check_value_type("pooled_width", pooled_width, [int], self.name)
validator.check_value_type("spatial_scale", spatial_scale, [float], self.name)
validator.check_value_type("sample_num", sample_num, [int], self.name)
validator.check_value_type("xdiff_shape", xdiff_shape, [tuple], self.name)
self.xdiff_shape = xdiff_shape
self.pooled_height = pooled_height
self.pooled_width = pooled_width
self.spatial_scale = spatial_scale
self.sample_num = sample_num
def infer_shape(self, ydiff_shape, rois_shape):
return self.xdiff_shape
def infer_dtype(self, ydiff_type, rois_type):
return ydiff_type
class SigmoidGrad(PrimitiveWithInfer):
"""Gets the gradient of Sigmoid operation."""
@prim_attr_register
def __init__(self):
pass
def infer_shape(self, out, dout):
return out
def infer_dtype(self, out, dout):
args = {'out': out, 'dout': dout}
validator.check_tensor_type_same(args, mstype.number_type, self.name)
return out
class HSigmoidGrad(PrimitiveWithInfer):
"""Gets the gradient of HSigmoid operation."""
@prim_attr_register
def __init__(self):
self.init_prim_io_names(inputs=['y_grad', 'x'], outputs=['output'])
def infer_shape(self, y_grad_shape, x_shape):
return x_shape
def infer_dtype(self, y_grad_dtype, x_dtype):
validator.check_tensor_type_same({"y_grad": y_grad_dtype}, (mstype.float16, mstype.float32), self.name)
validator.check_tensor_type_same({"x": x_dtype}, (mstype.float16, mstype.float32), self.name)
return x_dtype
class HSwishGrad(PrimitiveWithInfer):
"""Gets the gradient of HSwish operation."""
@prim_attr_register
def __init__(self):
self.init_prim_io_names(inputs=['y_grad', 'x'], outputs=['output'])
def infer_shape(self, y_grad_shape, x_shape):
return x_shape
def infer_dtype(self, y_grad_dtype, x_dtype):
validator.check_tensor_type_same({"y_grad": y_grad_dtype}, (mstype.float16, mstype.float32), self.name)
validator.check_tensor_type_same({"x": x_dtype}, (mstype.float16, mstype.float32), self.name)
return x_dtype
class SigmoidCrossEntropyWithLogitsGrad(PrimitiveWithInfer):
"""Computes the gradients of `SigmoidCrossEntropyWithLogits`."""
@prim_attr_register
def __init__(self):
"""Init SigmoidCrossEntropyWithLogitsGrad"""
self.init_prim_io_names(inputs=['x', 'y', 'dout'], outputs=['x_grad'])
def infer_shape(self, x_shape, y_shape, dout_shape):
validator.check("x_shape", x_shape, "y_shape", y_shape, Rel.EQ, self.name)
validator.check("x_shape", x_shape, "dout_shape", dout_shape, Rel.EQ, self.name)
return x_shape
def infer_dtype(self, x_dtype, y_dtype, dout_dtype):
args = {"x_dtype": x_dtype, "y_dtype": y_dtype, 'dout_dtype': dout_dtype}
validator.check_tensor_type_same(args, mstype.number_type, self.name)
return dout_dtype
class SliceGrad(PrimitiveWithInfer):
"""Reverse of slice."""
@prim_attr_register
def __init__(self):
"""init SliceGrad"""
self.init_prim_io_names(inputs=['dy', 'x', 'begin', 'size'], outputs=['dx'])
def __infer__(self, dy, x, begin, size):
dy_shape, x_shape, size_value = dy['shape'], x['shape'], size['value']
dy_shape_len = len(dy_shape)
for i in range(dy_shape_len):
validator.check(f'dy_shape[{i}]', dy_shape[i], f'x_shape[{i}]', x_shape[i], Rel.LE, self.name)
validator.check(f'dy_shape[{i}]', dy_shape[i], f'size_shape[{i}]', size_value[i], Rel.EQ, self.name)
return {'shape': x_shape,
'dtype': x['dtype'],
'value': None}
class SmoothL1LossGrad(PrimitiveWithInfer):
"""Computes gradient for prediction on SmoothL1Loss."""
@prim_attr_register
def __init__(self, sigma=1.0):
pass
def infer_shape(self, prediction, target, dloss):
validator.check('prediction shape', prediction, 'target shape', target, Rel.EQ, self.name)
validator.check('prediction shape', prediction, 'dloss shape', dloss, Rel.EQ, self.name)
return prediction
def infer_dtype(self, prediction, target, dloss):
args = {"prediction": prediction, "target": target, 'dloss': dloss}
validator.check_tensor_type_same(args, mstype.number_type, self.name)
return dloss
class StridedSliceGrad(PrimitiveWithInfer):
"""
Performs grad of StridedSlice operation.
Args:
begin_mask (int): Start indexing the slice. Default: 0.
end_mask (int): End indexing the slice. Default: 0.
ellipsis_mask (int): An int32 mask. Default: 0.
new_axis_mask (int): An int32 mask. Default: 0.
shrink_axis_mask (int): An int32 mask. Default: 0.
Returns:
Tensor, has the same shape of input.
"""
@prim_attr_register
def __init__(self,
begin_mask=0,
end_mask=0,
ellipsis_mask=0,
new_axis_mask=0,
shrink_axis_mask=0):
"""init StrideSliceGrad"""
validator.check_value_type('begin_mask', begin_mask, [int], self.name)
validator.check_value_type('end_mask', end_mask, [int], self.name)
validator.check_value_type('ellipsis_mask', ellipsis_mask, [int], self.name)
validator.check_value_type('new_axis_mask', new_axis_mask, [int], self.name)
validator.check_value_type('shrink_axis_mask', shrink_axis_mask, [int], self.name)
self.init_prim_io_names(inputs=['dy', 'shapex', 'begin', 'end', 'strides'], outputs=['output'])
def __infer__(self, dy, shapex, begin, end, strides):
args = {"dy": dy['dtype']}
validator.check_tensor_type_same(args, mstype.number_type, self.name)
for idx, item in enumerate(shapex['value']):
validator.check_value_type("shapex[%d]" % idx, item, [int], self.name)
for idx, item in enumerate(begin['value']):
validator.check_value_type("begin[%d]" % idx, item, [int], self.name)
for idx, item in enumerate(end['value']):
validator.check_value_type("end[%d]" % idx, item, [int], self.name)
for idx, item in enumerate(strides['value']):
validator.check_value_type("strides[%d]" % idx, item, [int], self.name)
return {'shape': shapex['value'],
'dtype': dy['dtype'],
'value': None}
class StridedSliceGradAICPU(PrimitiveWithInfer):
"""
Performs grad of StridedSlice operation.
Args:
begin_mask (int): Start indexing the slice. Default: 0.
end_mask (int): End indexing the slice. Default: 0.
ellipsis_mask (int): An int32 mask. Default: 0.
new_axis_mask (int): An int32 mask. Default: 0.
shrink_axis_mask (int): An int32 mask. Default: 0.
Returns:
Tensor, has the same shape of input.
"""
@prim_attr_register
def __init__(self,
begin_mask=0,
end_mask=0,
ellipsis_mask=0,
new_axis_mask=0,
shrink_axis_mask=0):
"""init StrideSliceGrad"""
validator.check_value_type('begin_mask', begin_mask, [int], self.name)
validator.check_value_type('end_mask', end_mask, [int], self.name)
validator.check_value_type('ellipsis_mask', ellipsis_mask, [int], self.name)
validator.check_value_type('new_axis_mask', new_axis_mask, [int], self.name)
validator.check_value_type('shrink_axis_mask', shrink_axis_mask, [int], self.name)
self.init_prim_io_names(inputs=['dy', 'shapex', 'begin', 'end', 'strides'], outputs=['output'])
def __infer__(self, dy, shapex, begin, end, strides):
args = {"dy": dy['dtype']}
validator.check_tensor_type_same(args, mstype.number_type, self.name)
for idx, item in enumerate(shapex['value']):
validator.check_value_type("shapex[%d]" % idx, item, [int], self.name)
for idx, item in enumerate(begin['value']):
validator.check_value_type("begin[%d]" % idx, item, [int], self.name)
for idx, item in enumerate(end['value']):
validator.check_value_type("end[%d]" % idx, item, [int], self.name)
for idx, item in enumerate(strides['value']):
validator.check_value_type("strides[%d]" % idx, item, [int], self.name)
return {'shape': shapex['value'],
'dtype': dy['dtype'],
'value': None}
class SoftplusGrad(PrimitiveWithInfer):
"""Computes gradient for the Log Softmax activation."""
@prim_attr_register
def __init__(self):
self.init_prim_io_names(inputs=['dout', 'x'], outputs=['output'])
def infer_shape(self, dout_shape, x_shape):
validator.check("x_shape", x_shape, "dout_shape", dout_shape, Rel.EQ, self.name)
return x_shape
def infer_dtype(self, dout_dtype, x_dtype):
args = {"x_dtype": x_dtype, "dout_dtype": dout_dtype}
validator.check_tensor_type_same(args, mstype.float_type, self.name)
return x_dtype
class TanhGrad(PrimitiveWithInfer):
"""Computes gradient of hyperbolic tangent of input element-wise."""
@prim_attr_register
def __init__(self):
pass
def infer_shape(self, out, dout):
return out
def infer_dtype(self, out, dout):
args = {"out": out, "dout": dout}
validator.check_tensor_type_same(args, mstype.number_type, self.name)
return out
class MirrorPadGrad(PrimitiveWithInfer):
"""Gradients of MirrorPad operation."""
@prim_attr_register
def __init__(self, mode="REFLECT"):
"""init MirrorPad"""
validator.check_string('mode', mode, ['REFLECT', 'SYMMETRIC'], self.name)
self.mode = mode
def __infer__(self, dout, paddings):
validator.check_subclass("dout", dout['dtype'], mstype.tensor, self.name)
validator.check_subclass("paddings", paddings['dtype'], mstype.tensor, self.name)
validator.check("paddings rank", len(paddings['shape']), "expected", 2, Rel.EQ, self.name)
validator.check("paddings dim_1", paddings['shape'][1], "expected", 2, Rel.EQ, self.name)
if paddings['value'] is None:
raise ValueError(f"For {self.name}, paddings must be const.")
paddings_value = paddings['value'].asnumpy()
y_shape = ()
dout_shape = dout['shape']
for i, val in enumerate(dout_shape):
y_shape += (val - paddings_value[i][0] - paddings_value[i][1],)
return {'shape': y_shape,
'dtype': dout['dtype'],
'value': None}
class EmbeddingLookupCommGrad(PrimitiveWithInfer):
"""
Perform the gradient for the communication part of EmbeddingLookup operator.
This works ONLY when 'reduce_scatter_flag' is True in 'EmbeddingLookup'. Roughly speaking,
this primitive is implemented by StridedSlice --> _HostAllGather --> Concat. This primitive runs on host.
"""
@prim_attr_register
def __init__(self):
self.init_prim_io_names(inputs=['dy', 'split_num'], outputs=['output'])
self.add_prim_attr('primitive_target', 'CPU')
def __infer__(self, dy, split_num):
"""
This primitive is implemented by three steps:
1) Split the 'dy' along dimension 0 into 'split_num' parts.
2) For each part, perform _HostAllGather((0, 1, 2, 3, 4, 5, 6, 7)) on the host.
3) After _HostAllGather, there are still 'split_num' parts in each process. Then, perform Concat on them
along dimension 0.
The output shape of this primitive: shape(output)[0] == shape(dy)[0] * 8
"""
dy_shape = tuple(dy['shape'])
split_num_value = split_num['value']
validator.check_value_type("split_num_value", split_num_value, [int], self.name)
dy_shape_all = F.tuple_setitem(dy_shape, 0, dy_shape[0] * 8)
return {'shape': dy_shape_all,
'dtype': dy['dtype'],
'value': None}
class RefToEmbed(Primitive):
r"""
Make a key from Ref.
The Key is a symbolic_key, is a embedding on Parameter, which is used as a key of the variable in env_type,
and get items by operation `env_get_item` with the symbolic_key instance. The `Parameter` is a ref.
Inputs:
- **input** (Ref) - Target ref, ref is short for reference. The value of a Parameter is a ref.
Outputs:
symbolic_key, made from the Ref.
Examples:
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.weight = mindspore.Parameter(1.0, name='weight')
>>>
>>> def construct(self):
>>> key = RefToEmbed()(self.weight)
>>> return key, self.weight
"""
__mindspore_signature__ = (
('variable', sig_rw.RW_REF, sig_kind.KIND_POSITIONAL_KEYWORD),
)
@prim_attr_register
def __init__(self):
pass
class AtanGrad(PrimitiveWithInfer):
"""
Computes AtanGrad of input element-wise.
Returns:
Tensor, has the same type as input.
"""
@prim_attr_register
def __init__(self):
"""init AtanGrad"""
def infer_shape(self, x, dout):
validator.check("x shape", x, "dout shape", dout, Rel.EQ, self.name)
return x
def infer_dtype(self, x, dout):
args = {"x": x, "dout": dout}
validator.check_tensor_type_same(args, mstype.number_type, self.name)
return x
class BasicLSTMCellCStateGrad(PrimitiveWithInfer):
"""Computes the state gradients of BasicLSTMCell."""
@prim_attr_register
def __init__(self, forget_bias, activation):
self.forget_bias = validator.check_value_type("forget_bias", forget_bias, [float], self.name)
self.activation = validator.check_string("activation", activation, ['tanh'], self.name)
def infer_shape(self, c_shape, dht_shape, dct_shape, it_shape, jt_shape, ft_shape, ot_shape, tanhct_shape):
# dhy and dcy should be same shape
validator.check_integer("c rank", len(c_shape), 2, Rel.EQ, self.name)
validator.check("dht rank", len(dht_shape), "c rank", len(c_shape), Rel.EQ, self.name)
validator.check("dct rank", len(dct_shape), "c rank", len(c_shape), Rel.EQ, self.name)
validator.check("it rank", len(it_shape), "c rank", len(c_shape), Rel.EQ, self.name)
validator.check("jt rank", len(jt_shape), "c rank", len(c_shape), Rel.EQ, self.name)
validator.check("ft rank", len(ft_shape), "c rank", len(c_shape), Rel.EQ, self.name)
validator.check("ot rank", len(ot_shape), "c rank", len(c_shape), Rel.EQ, self.name)
validator.check("tanhct rank", len(tanhct_shape), "c rank", len(c_shape), Rel.EQ, self.name)
validator.check("dht shape", dht_shape, "c shape", c_shape, Rel.EQ, self.name)
validator.check("dct shape", dct_shape, "c shape", c_shape, Rel.EQ, self.name)
validator.check("it shape", it_shape, "c shape", c_shape, Rel.EQ, self.name)
validator.check("jt shape", jt_shape, "c shape", c_shape, Rel.EQ, self.name)
validator.check("ft shape", ft_shape, "c shape", c_shape, Rel.EQ, self.name)
validator.check("ot shape", ot_shape, "c shape", c_shape, Rel.EQ, self.name)
validator.check("tanhct shape", tanhct_shape, "c shape", c_shape, Rel.EQ, self.name)
dgate_shape = (c_shape[0], 4 * c_shape[1])
dct_1_shape = c_shape
return (dgate_shape, dct_1_shape)
def infer_dtype(self, c_dtype, dht_dtype, dct_dtype, it_dtype, jt_dtype, ft_dtype, ot_dtype, tanhct_dtype):
validator.check_subclass("c", c_dtype, [mstype.tensor], self.name)
validator.check_subclass("dht", dht_dtype, [mstype.tensor], self.name)
validator.check_subclass("dct", dct_dtype, [mstype.tensor], self.name)
validator.check_subclass("it", it_dtype, [mstype.tensor], self.name)
validator.check_subclass("jt", jt_dtype, [mstype.tensor], self.name)
validator.check_subclass("ft", ft_dtype, [mstype.tensor], self.name)
validator.check_subclass("ot", ot_dtype, [mstype.tensor], self.name)
validator.check_subclass("tanhct", tanhct_dtype, [mstype.tensor], self.name)
validator.check_type_name("c", c_dtype, [mstype.float16, mstype.float32], self.name)
validator.check_type_name("dht", dht_dtype, [mstype.float16, mstype.float32], self.name)
validator.check_type_name("dct", dct_dtype, [mstype.float16, mstype.float32], self.name)
validator.check_type_name("it", it_dtype, [mstype.float16, mstype.float32], self.name)
validator.check_type_name("jt", jt_dtype, [mstype.float16, mstype.float32], self.name)
validator.check_type_name("ft", ft_dtype, [mstype.float16, mstype.float32], self.name)
validator.check_type_name("ot", ot_dtype, [mstype.float16, mstype.float32], self.name)
validator.check_type_name("tanhct", tanhct_dtype, [mstype.float16, mstype.float32], self.name)
return (c_dtype, c_dtype)
class BasicLSTMCellWeightGrad(PrimitiveWithInfer):
"""Computes the weight gradients of BasicLSTM."""
@prim_attr_register
def __init__(self):
pass
def infer_shape(self, x_shape, h_shape, dgate_shape):
validator.check_integer("x rank", len(x_shape), 2, Rel.EQ, self.name)
validator.check("h rank", len(h_shape), " x rank", len(x_shape), Rel.EQ, self.name)
validator.check("dgate rank", len(dgate_shape), "x rank", len(x_shape), Rel.EQ, self.name)
validator.check("h_shape[0]", h_shape[0], "x_shape[0]", x_shape[0], Rel.EQ, self.name)
validator.check("dgate_shape[0]", dgate_shape[0], "h_shape[0]", h_shape[0], Rel.EQ, self.name)
validator.check("dgate_shape[1]", dgate_shape[1], "4*h_shape[1]", 4 * h_shape[1], Rel.EQ, self.name)
dw_shape = (dgate_shape[1], x_shape[1] + h_shape[1], 1, 1)
db_shape = (dgate_shape[1], 1, 1, 1)
return (dw_shape, db_shape)
def infer_dtype(self, x_dtype, h_dtype, dgate_dtype):
validator.check_subclass("x", x_dtype, mstype.tensor, self.name)
validator.check_subclass("h", h_dtype, mstype.tensor, self.name)
validator.check_subclass("dgate", dgate_dtype, mstype.tensor, self.name)
validator.check_type_name("x", x_dtype, [mstype.float16, mstype.float32], self.name)
validator.check_type_name("h", h_dtype, [mstype.float16, mstype.float32], self.name)
validator.check_type_name("dgate", dgate_dtype, [mstype.float16, mstype.float32], self.name)
return (x_dtype, x_dtype)
class BasicLSTMCellInputGrad(PrimitiveWithInfer):
"""Computes the input gradients of BasicLSTM."""
@prim_attr_register
def __init__(self, keep_prob):
self.keep_prob = validator.check_value_type("keep_prob", keep_prob, [float], self.name)
self.keep_prob = validator.check_number_range("keep_prob", keep_prob, 0.0, 1.0, Rel.INC_BOTH, self.name)
def infer_shape(self, dgate_shape, w_shape):
validator.check_integer("dgate rank", len(dgate_shape), 2, Rel.EQ, self.name)
validator.check_integer("w rank", len(w_shape), 4, Rel.EQ, self.name)
validator.check("dgate_shape[1]", dgate_shape[1], "w_shape[0]", w_shape[0], Rel.EQ, self.name)
dxt_shape = (dgate_shape[0], w_shape[1] - w_shape[0] // 4)
dht_shape = (dgate_shape[0], dgate_shape[1] // 4)
return (dxt_shape, dht_shape)
def infer_dtype(self, dgate_dtype, w_dtype):
validator.check_subclass("dgate", dgate_dtype, mstype.tensor, self.name)
validator.check_subclass("w", w_dtype, mstype.tensor, self.name)
validator.check_type_name("dgate", dgate_dtype, [mstype.float16, mstype.float32], self.name)
validator.check_type_name("w", w_dtype, [mstype.float16, mstype.float32], self.name)
return (dgate_dtype, dgate_dtype)
class InvGrad(PrimitiveWithInfer):
"""Computes gradients for inv operation."""
@prim_attr_register
def __init__(self):
pass
def infer_shape(self, x, grad):
validator.check("x_shape", x, "grad_shape", grad, Rel.EQ, self.name)
return x
def infer_dtype(self, x, grad):
validator.check_type_name("dgate", x, [mstype.float16, mstype.float32, mstype.int32, mstype.int8], self.name)
validator.check_type_name("grad", grad, [mstype.float16, mstype.float32, mstype.int32, mstype.int8], self.name)
return x
class LRNGrad(PrimitiveWithInfer):
"""Computes gradients for LRN operation."""
@prim_attr_register
def __init__(self, depth_radius=5, bias=1.0, alpha=1.0, beta=0.5):
self.init_prim_io_names(inputs=['grads', 'x', 'y'], outputs=['z'])
validator.check_value_type("depth_radius", depth_radius, [int], self.name)
validator.check_value_type("bias", bias, [float], self.name)
validator.check_value_type("alpha", alpha, [float], self.name)
validator.check_value_type("beta", beta, [float], self.name)
def infer_dtype(self, grads, x, y):
args = {"grads": grads, "x": x, "y": y}
validator.check_tensor_type_same(args, (mstype.float16, mstype.float32,), self.name)
return x
def infer_shape(self, grads, x, y):
return x
| 39.648338 | 119 | 0.651574 |
79406707be2da045747fa6070187c44ef0a6f386 | 3,312 | py | Python | HomeLab/polls/views.py | amalik18/HomeLab | b0d71f379e8628948ceb15bd776386a81a468558 | [
"MIT"
] | null | null | null | HomeLab/polls/views.py | amalik18/HomeLab | b0d71f379e8628948ceb15bd776386a81a468558 | [
"MIT"
] | null | null | null | HomeLab/polls/views.py | amalik18/HomeLab | b0d71f379e8628948ceb15bd776386a81a468558 | [
"MIT"
] | null | null | null | from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse, HttpResponseRedirect
from django.template import loader
from django.shortcuts import render
from django.urls import reverse
from django.http import Http404
from django.utils import timezone
from django.views import generic
# Create your views here.
from .models import Question, Choice
# def index(request):
# latest_question_list = Question.objects.order_by('-pub_date')[:5]
# template = loader.get_template('polls/index.html')
# context = {
# 'latest_question_list': latest_question_list,
# }
# # output = ', '.join([q.question_text for q in latest_question_list])
# return render(request=request, template_name='polls/index.html', context=context)
#
#
# def detail(request, question_id):
# # try:
# # question = Question.objects.get(pk=question_id)
# # except Question.DoesNotExist:
# # raise Http404("The Question does not exist")
# question = get_object_or_404(Question, pk=question_id)
# context = {
# 'question': question
# }
# return render(request=request, template_name='polls/detail.html', context=context)
#
#
# def results(request, question_id):
# question = get_object_or_404(Question, pk=question_id)
# context = {
# 'question': question,
# }
# return render(request=request, template_name='polls/results.html', context=context)
class IndexView(generic.ListView):
template_name = 'polls/index.html'
context_object_name = 'latest_question_list'
def get_queryset(self):
"""Return the last five published questions"""
return Question.objects.filter(pub_date__lte=timezone.now(), choice__isnull=False).order_by('-pub_date')[:5]
class DetailView(generic.DetailView):
model = Question
template_name = 'polls/detail.html'
def get_queryset(self):
"""
Excludes any questions that aren't published yet
:return:
"""
return Question.objects.filter(pub_date__lte=timezone.now(), choice__isnull=False)
class ResultsView(generic.DetailView):
model = Question
template_name = 'polls/results.html'
def get_queryset(self):
"""
Excludes any questions that aren't published yet and any questions that do not have a choice.
:return:
"""
return Question.objects.filter(pub_date__lte=timezone.now(), choice__isnull=False)
def vote(request, question_id):
question = get_object_or_404(Question, pk=question_id)
try:
selected_choice = question.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
# re-display the question voting form
context = {
'question': question,
'error_message': "You didn't select a choice.",
}
return render(request=request, template_name='polls/detail.html', context=context)
else:
selected_choice.votes += 1
selected_choice.save()
'''
Always return an HttpResponseRedirect after a succesful POST operation.
This prevents data from being posted twice if a user hits the back button in their browser.
'''
return HttpResponseRedirect(reverse(viewname='polls:results', args=(question_id,)))
| 34.5 | 116 | 0.688406 |
794068983f05db8990a9af23cebbdd282c752a1f | 159 | py | Python | tests/model_control/detailed/transf_RelativeDifference/model_control_one_enabled_RelativeDifference_Lag1Trend_NoCycle_MLP.py | shaido987/pyaf | b9afd089557bed6b90b246d3712c481ae26a1957 | [
"BSD-3-Clause"
] | 377 | 2016-10-13T20:52:44.000Z | 2022-03-29T18:04:14.000Z | tests/model_control/detailed/transf_RelativeDifference/model_control_one_enabled_RelativeDifference_Lag1Trend_NoCycle_MLP.py | ysdede/pyaf | b5541b8249d5a1cfdc01f27fdfd99b6580ed680b | [
"BSD-3-Clause"
] | 160 | 2016-10-13T16:11:53.000Z | 2022-03-28T04:21:34.000Z | tests/model_control/detailed/transf_RelativeDifference/model_control_one_enabled_RelativeDifference_Lag1Trend_NoCycle_MLP.py | ysdede/pyaf | b5541b8249d5a1cfdc01f27fdfd99b6580ed680b | [
"BSD-3-Clause"
] | 63 | 2017-03-09T14:51:18.000Z | 2022-03-27T20:52:57.000Z | import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['RelativeDifference'] , ['Lag1Trend'] , ['NoCycle'] , ['MLP'] ); | 39.75 | 86 | 0.754717 |
79406a1e9454beef765d23ef6a4aad365ba03fff | 174,468 | py | Python | geostatspy/geostats.py | jessepisel/GeostatsPy | 239a2a04b67991393892f5c010635b75e2eac062 | [
"MIT"
] | null | null | null | geostatspy/geostats.py | jessepisel/GeostatsPy | 239a2a04b67991393892f5c010635b75e2eac062 | [
"MIT"
] | null | null | null | geostatspy/geostats.py | jessepisel/GeostatsPy | 239a2a04b67991393892f5c010635b75e2eac062 | [
"MIT"
] | 6 | 2020-03-27T01:57:58.000Z | 2020-04-07T13:20:18.000Z | """
This file includes the reimplementations of GSLIB functionality in Python. While
this code will not be as well-tested and robust as the original GSLIB, it does
provide the opportunity to build 2D spatial modeling projects in Python without
the need to rely on compiled Fortran code from GSLIB. If you want to use the
GSLIB compiled code called from Python workflows use the functions available
with geostatspy.GSLIB.
This file includes the (1) GSLIB subroutines (converted to Python), followed by
the (2) functions: declus, gam, gamv, nscore, kb2d (more added all the time)
Note: some GSLIB subroutines are not included as they were replaced by available
NumPy and SciPy functionality or they were not required as we don't have to deal
with graphics and files in the same manner as GSLIB.
The original GSLIB code is from GSLIB: Geostatistical Library by Deutsch and
Journel, 1998. The reimplementation is by Michael Pyrcz, Associate Professor,
the University of Texas at Austin.
"""
import math # for trig functions etc.
from bisect import bisect # for maintaining array elements sorted
import numpy as np # for ndarrays
import numpy.linalg as linalg # for linear algebra
import scipy.spatial as sp # for fast nearest neighbor search
from numba import jit # for numerical speed up
from statsmodels.stats.weightstats import DescrStatsW
def backtr(df,vcol,vr,vrg,zmin,zmax,ltail,ltpar,utail,utpar):
"""Back transform an entire DataFrame column with a provided transformation table and tail extrapolation.
:param df: the source DataFrame
:param vcol: the column with the variable to transfrom
:param vr: the transformation table, 1D ndarray with the original values
:param vrg: the transformation table, 1D ndarray with the trasnformed variable
:param zmin: lower trimming limits
:param zmax: upper trimming limits
:param ltail: lower tail value
:param ltpar: lower tail extrapolation parameter
:param utail: upper tail value
:param utpar: upper tail extrapolation parameter
:return: backtr: the DataFrame column, which has been back transformed using
the provided transformation table and tail extrapolation.
"""
EPSLON=1.0e-20
nd = len(df); nt = len(vr) # number of data to transform and number of data in table
backtr = np.zeros(nd)
vrgs = df[vcol].values
# Value in the lower tail? 1=linear, 2=power, (3 and 4 are invalid):
for id in range(0,nd):
if vrgs[id] <= vrg[0]:
backtr[id] = vr[0]
cdflo = gcum(vrg[0])
cdfbt = gcum(vrgs[id])
if ltail == 1:
backtr[id] = powint(0.0,cdflo,zmin,vr[0],cdfbt,1.0)
elif ltail == 2:
cpow = 1.0 / ltpar
backtr[id] = powint(0.0,cdflo,zmin,vr[0],cdfbt,cpow)
# Value in the upper tail? 1=linear, 2=power, 4=hyperbolic:
elif vrgs[id] >= vrg[nt-1]:
backtr[id] = vr[nt-1]
cdfhi = gcum(vrg[nt-1])
cdfbt = gcum(vrgs[id])
if utail == 1:
backtr[id] = powint(cdfhi,1.0,vr[nt-1],zmax,cdfbt,1.0)
elif utail == 2:
cpow = 1.0 / utpar
backtr[id] = powint(cdfhi,1.0,vr[nt-1],zmax,cdfbt,cpow)
elif utail == 4:
plambda = (vr[nt-1]**utpar)*(1.0-gcum(vrg[nt-1]))
backtr[id] = (plambda/(1.0-gcum(vrgs)))**(1.0/utpar)
else:
# Value within the transformation table:
j = locate(vrg,1,nt,vrgs[id])
j = max(min((nt-2),j),1)
backtr[id] = powint(vrg[j],vrg[j+1],vr[j],vr[j+1],vrgs[id],1.0)
return backtr
def backtr_value(vrgs,vr,vrg,zmin,zmax,ltail,ltpar,utail,utpar):
"""Back transform a single value with a provided transformation table and tail extrapolation.
:param vrgs: value to transform
:param vr: the transformation table, 1D ndarray with the original values
:param vrg: the transformation table, 1D ndarray with the trasnformed variable
:param zmin: lower trimming limits
:param zmax: upper trimming limits
:param ltail: lower tail value
:param ltpar: lower tail extrapolation parameter
:param utail: upper tail value
:param utpar: upper tail extrapolation parameter
:return: returns revised array
"""
EPSLON=1.0e-20
nt = len(vr) # number of data to transform
# Value in the lower tail? 1=linear, 2=power, (3 and 4 are invalid):
if vrgs <= vrg[0]:
backtr = vr[0]
cdflo = gcum(vrg[0])
cdfbt = gcum(vrgs)
if ltail == 1:
backtr = dpowint(0.0,cdflo,zmin,vr[0],cdfbt,1.0)
elif ltail == 2:
cpow = 1.0 / ltpar
backtr = dpowint(0.0,cdflo,zmin,vr[0],cdfbt,cpow)
# Value in the upper tail? 1=linear, 2=power, 4=hyperbolic:
elif vrgs >= vrg[nt-1]:
backtr = vr[nt-1]
cdfhi = gcum(vrg[nt-1])
cdfbt = gcum(vrgs)
if utail == 1:
backtr = dpowint(cdfhi,1.0,vr[nt-1],zmax,cdfbt,1.0)
elif utail == 2:
cpow = 1.0 / utpar
backtr = dpowint(cdfhi,1.0,vr[nt-1],zmax,cdfbt,cpow)
elif utail == 4:
plambda = (vr[nt-1]**utpar)*(1.0-gcum(vrg[nt-1]))
backtr = (plambda/(1.0-gcum(vrgs)))**(1.0/utpar)
else:
# Value within the transformation table:
j = dlocate(vrg,1,nt,vrgs)
j = max(min((nt-2),j),1)
backtr = dpowint(vrg[j],vrg[j+1],vr[j],vr[j+1],vrgs,1.0)
return backtr
def gcum(x):
"""Calculate the cumulative probability of the standard normal distribution.
:param x: the value from the standard normal distribution
:type x: float
:return: probability selected x will be less than or equal to a specific value
:type: float
"""
z = x
if z < 0:
z = -z
t = 1./(1.+ 0.2316419*z)
gcum = t*(0.31938153 + t*(-0.356563782 + t*(1.781477937 + t*(-1.821255978 + t*1.330274429))))
e2 = 0.
# 6 standard deviations out gets treated as infinity:
if z <= 6.:
e2 = np.exp(-z*z/2.)*0.3989422803
gcum = 1.0- e2 * gcum
if x >= 0:
return gcum
gcum = 1.0 - gcum
return gcum
def locate(xx,iis,iie,x):
"""Return value `j` such that `x` is between `xx[j]` and `xx[j+1]`, where
`xx` is an array of length `n`, and `x` is a given value. `xx` must be
monotonic, either increasing or decreasing (GSLIB version).
:param xx: monotonic array to be searched
:type: array
:param iis: start point
:type: integer
:param iie: end point
:type: integer
:param x: given value
:type: integer
:return: location (index) in array
:type: int
"""
n = len(xx)
# Initialize lower and upper methods:
if iis <= 0:
iis = 0
if iie >= n:
iie = n-1
jl = iis-1
ju = iie
if xx[n-1] <= x:
j = iie
return j
# If we are not done then compute a midpoint:
while (ju-jl) > 1:
jm = int((ju+jl)/2)
# Replace the lower or upper limit with the midpoint:
if (xx[iie] > xx[iis]) == (x > xx[jm]):
jl = jm
else:
ju = jm
# Return with the array index:
j = jl
return j
def dlocate(xx, iis, iie, x):
"""Return value `j` such that `x` is between `xx[j]` and `xx[j+1]`, where
`xx` is an array of length `n`, and `x` is a given value. `xx` must be
monotonic, either increasing or decreasing (updated with Python bisect)
:param xx: array
:param iis: start point
:param iie: end point
:param x: given value
:return: TODO
"""
n = len(xx)
if iie <= iis:
iis = 0
iie = n - 1
array = xx[iis: iie - 1] # this is accounting for swith to 0,...,n-1 index
j = bisect(array, x)
return j
def powint(xlow,xhigh,ylow,yhigh,xval,power):
"""Power-based interpolator
:param xlow: x lower interval
:param xhigh: x upper interval
:param ylow: y lower interval
:param yhigh: y upper interval
:param xval: value on x
:param power: power for interpolation
:return: TODO
"""
EPSLON=1.0e-20
if (xhigh-xlow) < EPSLON:
powint = (yhigh+ylow)/2.0
else:
powint = ylow + (yhigh-ylow)*(((xval-xlow)/(xhigh-xlow))**power)
return powint
def dsortem(ib, ie, a, iperm, b=0, c=0, d=0, e=0, f=0, g=0, h=0):
"""Sort array in ascending order.
:param ib: start index
:param ie: end index
:param a: array
:param iperm: 0 no other array is permuted.
1 array b is permuted according to array a.
2 arrays b, c are permuted.
3 arrays b, c, d are permuted.
4 arrays b, c, d, e are permuted.
5 arrays b, c, d, e, f are permuted.
6 arrays b, c, d, e, f, g are permuted.
7 arrays b, c, d, e, f, g, h are permuted.
>7 no other array is permuted.
:param b: array to be permuted according to array a.
:param c: array to be permuted according to array a.
:param d: array to be permuted according to array a.
:param e: array to be permuted according to array a.
:param f: array to be permuted according to array a.
:param g: array to be permuted according to array a.
:param h: array to be permuted according to array a.
:return: a: the array, a portion of which has been sorted.
b, c, d, e, f, g, h: arrays permuted according to array a (see
iperm)
"""
a = a[ib:ie]
inds = a.argsort()
a = np.copy(a[inds]) # deepcopy forces pass to outside scope
if iperm == 1:
return a
b_slice = b[ib:ie]
b = b_slice[inds]
if iperm == 2:
return a, b
c_slice = c[ib:ie]
c = c_slice[inds]
if iperm == 3:
return a, b, c
d_slice = d[ib:ie]
d = d_slice[inds]
if iperm == 4:
return a, b, c, d
e_slice = e[ib:ie]
e = e_slice[inds]
if iperm == 5:
return a, b, c, d, e
f_slice = f[ib:ie]
f = f_slice[inds]
if iperm == 6:
return a, b, c, d, e, f
g_slice = g[ib:ie]
g = g_slice[inds]
if iperm == 7:
return a, b, c, d, e, f, g # TODO: changed from 'a, b, c, d, e, f, h'
h_slice = h[ib:ie]
h = h_slice[inds]
return a, b, c, d, e, f, g, h # TODO: changed from 'a, b, c, d, e, f, h'
def gauinv(p):
"""Compute the inverse of the standard normal cumulative distribution
function.
:param p: cumulative probability value
:type: float
:return: inverse of the normal cdf, evaluated at probability p
:type: float
"""
lim = 1.0e-10
p0 = -0.322_232_431_088
p1 = -1.0
p2 = -0.342_242_088_547
p3 = -0.020_423_121_024_5
p4 = -0.000_045_364_221_014_8
q0 = 0.099_348_462_606_0
q1 = 0.588_581_570_495
q2 = 0.531_103_462_366
q3 = 0.103_537_752_850
q4 = 0.003_856_070_063_4
# Check for an error situation
if p < lim:
xp = -1.0e10
return xp
if p > (1.0 - lim):
xp = 1.0e10
return xp
# Get k for an error situation
pp = p
if p > 0.5:
pp = 1 - pp
xp = 0.0
if p == 0.5:
return xp
# Approximate the function
y = np.sqrt(np.log(1.0 / (pp * pp)))
xp = float(
y
+ ((((y * p4 + p3) * y + p2) * y + p1) * y + p0)
/ ((((y * q4 + q3) * y + q2) * y + q1) * y + q0)
)
if float(p) == float(pp):
xp = -xp
return xp
def gcum(x):
"""Evaluate the standard normal cdf given a normal deviate `x`. `gcum` is
the area under a unit normal curve to the left of `x`. The results are
accurate only to about 5 decimal places.
:param x: normal deviate
:type: float
:return: proportion of area left of x
:type: float
"""
z = x
if z < 0:
z = -z
t = 1.0 / (1.0 + 0.231_641_9 * z)
gcum_ = t * (
0.319_381_53
+ t
* (
-0.356_563_782
+ t * (1.781_477_937 + t * (-1.821_255_978 + t * 1.330_274_429))
)
)
e2 = 0.0
# Standard deviations out gets treated as infinity
if z <= 6:
e2 = np.exp(-z * z / 2.0) * 0.398_942_280_3
gcum_ = 1.0 - e2 * gcum_
if x >= 0.0:
return gcum_
gcum_ = 1.0 - gcum_
return gcum_
def dpowint(xlow, xhigh, ylow, yhigh, xval, pwr):
"""Power interpolate the value of `y` between (`xlow`, `ylow`) and
(`xhigh`, `yhigh`) for a value of `x` and a power `pwr`.
:param xlow: minimum x-value for the interpolation
:type xlow: float
:param xhigh: int: maximum x-value for the interpolation
:type xhigh: float
:param ylow: minimum y-value for the interpolation
:type ylow: float
:param yhigh: maximum y-value for the interpolation
:type yhigh: float
:param xval: x-values used to calculate the interpolation value
:type xval: float or array
:param pwr: power value used in the interpolation calculation
:type pwr: float
:return: interpolated y-values
"""
EPSLON = 1.0e-20
if (xhigh - xlow) < EPSLON:
dpowint_ = (yhigh + ylow) / 2.0
else:
dpowint_ = ylow + (yhigh - ylow) * (
((xval - xlow) / (xhigh - xlow)) ** pwr
)
return dpowint_
#@jit(nopython=True) # all NumPy array operations included in this function for precompile with NumBa
def setup_rotmat2(c0,nst,it,cc,ang):
DTOR=3.14159265/180.0; EPSLON=0.000000; PI=3.141593
# The first time around, re-initialize the cosine matrix for the
# variogram structures:
rotmat = np.zeros((4,nst))
maxcov = c0
for js in range(0,nst):
azmuth = (90.0-ang[js])*DTOR
rotmat[0,js] = math.cos(azmuth)
rotmat[1,js] = math.sin(azmuth)
rotmat[2,js] = -1*math.sin(azmuth)
rotmat[3,js] = math.cos(azmuth)
if it[js] == 4:
maxcov = maxcov + 9999.9
else:
maxcov = maxcov + cc[js]
return rotmat, maxcov
@jit(nopython=True)
def setup_rotmat(c0, nst, it, cc, ang, pmx):
"""Setup rotation matrix.
:param c0: nugget constant (isotropic)
:param nst: number of nested structures (max. 4)
:param it: TODO
:param cc: multiplicative factor of each nested structure
:param ang: TODO
:param pmx: TODO
:return: TODO
"""
PI = 3.141_592_65
DTOR = PI / 180.0
# The first time around, re-initialize the cosine matrix for the variogram
# structures
rotmat = np.zeros((4, nst))
maxcov = c0
for js in range(0, nst):
azmuth = (90.0 - ang[js]) * DTOR
rotmat[0, js] = math.cos(azmuth)
rotmat[1, js] = math.sin(azmuth)
rotmat[2, js] = -1 * math.sin(azmuth)
rotmat[3, js] = math.cos(azmuth)
if it[js] == 4:
maxcov = maxcov + pmx
else:
maxcov = maxcov + cc[js]
return rotmat, maxcov
@jit(nopython=True)
def cova2(x1, y1, x2, y2, nst, c0, pmx, cc, aa, it, ang, anis, rotmat, maxcov):
"""Calculate the covariance associated with a variogram model specified by
a nugget effect and nested variogram structures.
:param x1: x coordinate of first point
:type x1: float
:param y1: y coordinate of first point
:type y1: float
:param x2: x coordinate of second point
:type x2: float
:param y2: y coordinate of second point
:type y2: float
:param nst: number of nested structures (maximum of 4)
:type nst: int
:param c0: isotropic nugget constant (TODO: not used)
:type c0: float
:param pmx: Maximum variogram value needed for kriging when using power
model. pmx is a unique value used for all nested structures
that use the power model, so pmx should be chosen to account
for the largest structure that uses the power model.
:type pmx: float
:param cc: multiplicative factor of each nested structure
:type cc: array
:param aa: parameter `a` of each nested structure
:type aa: array
:param it: Integer value indicating type of variogram model
for values 0,1,2,..., nst
it[value] == 1: Spherical model
(aa[value] == `a` is the range, cc[value] is the contribution)
it[value] == 2: Exponential model
(aa[value] == `a`, 3a is the practical range,
cc[value] is the contribution)
it[value] == 3: Gaussian model
(aa[value] == `a`, a*sqrt(3) is the practical range),
cc[value] is the contribution)
it[value] == 4: Power model
(aa[value] == `a` is the power such that 0 < a < 2,
if linear, then a == 1, and cc[value] is the slope)
:type it: array
:param ang: azimuth angle measured in degrees clockwise from positive
y-diretion for each variogram structure: not used
(accounted for in anis)
:type ang: array
:param anis: Anistropy factors that apply after rotations
:type anis: array
:param rotmat: rotation matrices
:type rotmat: array
:param maxcov: maximum covariance value
:type maxcov: float
:return: covariance of a nested variogram model described by the inputs
:type return: float
"""
EPSLON = 0.000001
# Check for very small distance
dx = x2 - x1
dy = y2 - y1
if (dx * dx + dy * dy) < EPSLON:
cova2_ = maxcov
return cova2_
# Non-zero distance, loop over all the structures
cova2_ = 0.0
for js in range(0, nst):
# Compute the appropriate structural distance
dx1 = dx * rotmat[0, js] + dy * rotmat[1, js]
dy1 = (dx * rotmat[2, js] + dy * rotmat[3, js]) / anis[js]
h = math.sqrt(max((dx1 * dx1 + dy1 * dy1), 0.0))
if it[js] == 1:
# Spherical model
hr = h / aa[js]
if hr < 1.0:
(cova2)_ = cova2_ + cc[js] * (1.0 - hr * (1.5 - 0.5 * hr * hr))
elif it[js] == 2:
# Exponential model
cova2_ = cova2_ + cc[js] * np.exp(-3.0 * h / aa[js])
elif it[js] == 3:
# Gaussian model
hh = -3.0 * (h * h) / (aa[js] * aa[js])
cova2_ = cova2_ + cc[js] * np.exp(hh)
elif it[js] == 4:
# Power model
cov1 = pmx - cc[js] * (h ** aa[js])
cova2_ = cova2_ + cov1
return cova2_
def sqdist(x1,y1,z1,x2,y2,z2,ind,rotmat):
# Compute component distance vectors and the squared distance:
dx = x1 - x2
dy = y1 - y2
dz = 0.0
sqdist = 0.0
for i in range(0,2):
cont = rotmat[ind,i,1] * dx + rotmat[ind,i,2] * dy
sqdist = sqdist + cont * cont
return sqdist
def sqdist2(x1,y1,x2,y2,ist,rotmat,anis):
"""Calculate the 2D square distance based on geometric ani
:param x1: x coordinate of first point
:param y1: y coordinate of first point
:param x2: x coordinate of second point
:param y2: y coordinate of second point
:param ist: structure index
:param rotmat: 2d rotation matrix
:param anis: 2D anisotropy ratio
:return: TODO
"""
# Compute component distance vectors and the squared distance:
dx = x1 - x2
dy = y1 - y2
dx1 = (dx*rotmat[0,ist] + dy*rotmat[1,ist])
dy1 = (dx*rotmat[2,ist] + dy*rotmat[3,ist])/anis[ist]
sqdist_ = (dx1*dx1+dy1*dy1)
return sqdist_
def setrot(ang1,ang2,sang1,anis1,anis2,sanis1,nst,MAXROT):
"""GSLIB's SETROT subroutine (Deutsch and Journel, 1998) converted from the
original Fortran to Python by Michael Pyrcz, the University of Texas at
Austin (March, 2019).
Note this was simplified to 2D only.
"""
DEG2RAD = 3.141592654/180.0; EPSLON=1.e-20
rotmat = np.zeros((MAXROT+1,3,3))
if ang1 >= 0.0 and ang1 < 270.0:
alpha = (90.0 - ang1) * DEG2RAD
else:
alpha = (450.0 - ang1) * DEG2RAD
# Get the required sines and cosines:
sina = math.sin(alpha)
cosa = math.cos(alpha)
# Construct the rotation matrix in the required memory:
afac1 = 1.0 / (max(anis1,EPSLON))
rotmat[0,1,1] = cosa
rotmat[0,1,2] = sina
rotmat[0,2,1] = afac1*(-sina)
rotmat[0,2,2] = afac1*(cosa)
# 2nd structure if present
if nst > 1:
if ang2 >= 0.0 and ang2 < 270.0:
alpha = (90.0 - ang2) * DEG2RAD
else:
alpha = (450.0 - ang2) * DEG2RAD
# Get the required sines and cosines:
sina = math.sin(alpha)
cosa = math.cos(alpha)
# Construct the rotation matrix in the required memory:
afac2 = 1.0 / (max(anis2,EPSLON))
rotmat[1,1,1] = cosa
rotmat[1,1,2] = sina
rotmat[1,2,1] = afac1*(-sina)
rotmat[1,2,2] = afac1*(cosa)
# search rotation
if sang1 >= 0.0 and sang1 < 270.0:
alpha = (90.0 - sang1) * DEG2RAD
else:
alpha = (450.0 - sang1) * DEG2RAD
# Get the required sines and cosines:
sina = math.sin(alpha)
cosa = math.cos(alpha)
# Construct the rotation matrix in the required memory:
afac1 = 1.0 / (max(sanis1,EPSLON))
rotmat[MAXROT,1,1] = cosa
rotmat[MAXROT,1,2] = sina
rotmat[MAXROT,2,1] = afac1*(-sina)
rotmat[MAXROT,2,2] = afac1*(cosa)
# Return to calling program:
return rotmat
def ksol_numpy(neq, a, r):
"""Find solution of a system of linear equations.
:param neq: number of equations
:param a: upper triangular left hand side matrix
:param r: right hand side matrix
:return: solution array, same dimension as `r`
"""
a = a[0: neq * neq] # trim the array
a = np.reshape(a, (neq, neq)) # reshape to 2D
ainv = linalg.inv(a) # invert matrix
r = r[0: neq] # trim the array
s = np.matmul(ainv, r) # matrix multiplication
return s
def ctable(MAXNOD,MAXCXY,MAXCTX,MAXCTY,MAXXYZ,xsiz,ysiz,isrot,nx,ny,nst,c0,cc,aa,it,ang,anis,global_rotmat,radsqd):
"""GSLIB's CTABLE subroutine (Deutsch and Journel, 1998) converted from the
original Fortran to Python by Michael Pyrcz, the University of Texas at
Austin (March, 2019).
Note this was simplified to 2D only, WARNING: only spiral search setup works currently.
"""
# Declare constants
TINY = 1.0e-10
PMX = 9999.9
MAXROT=2
# Size of the look-up table:
tmp = np.zeros(MAXXYZ)
MAXORD = MAXXYZ
if (nx*ny) < MAXCXY:
MAXORD = MAXCXY
order = np.zeros(MAXORD)
nctx = int(min(((MAXCTX-1)/2),(nx-1)))
ncty = int(min(((MAXCTY-1)/2),(ny-1)))
# print('CTable check')
# print('nctx ' + str(nctx) + ', ncty ' + str(ncty))
ixnode = np.zeros(MAXXYZ)
iynode = np.zeros(MAXXYZ)
covtab = np.zeros((MAXCTX,MAXCTY))
# Initialize the covariance subroutine and cbb at the same time:
rotmat, maxcov = setup_rotmat2(c0,nst,it,cc,ang)
cbb = cova2(0.0,0.0,0.0,0.0,nst,c0,PMX,cc,aa,it,ang,anis,rotmat,maxcov)
# Now, set up the table and keep track of the node offsets that are
# within the search radius:
nlooku = -1 # adjusted for 0 origin
for i in range(-nctx,nctx+1): # cover entire range
xx = i * xsiz
ic = nctx + i
for j in range(-ncty,ncty+1): # cover entire range
yy = j * ysiz
jc = ncty + j
covtab[ic,jc] = cova2(0.0,0.0,xx,yy,nst,c0,PMX,cc,aa,it,ang,anis,rotmat,maxcov)
# print('cov table offset'); print(xx,yy); print(covtab[ic,jc])
hsqd = sqdist(0.0,0.0,0.0,xx,yy,0.0,MAXROT,global_rotmat)
if hsqd <= radsqd:
nlooku = nlooku + 1
# We want to search by closest variogram distance (and use the
# anisotropic Euclidean distance to break ties:
tmp[nlooku] = - (covtab[ic,jc] - TINY*hsqd)
order[nlooku] = (jc)*MAXCTX+ic
# print('populated presort'); print(tmp,order)
# Finished setting up the look-up table, now order the nodes such
# that the closest ones, according to variogram distance, are searched
# first. Note: the "loc" array is used because I didn't want to make
# special allowance for 2 byte integers in the sorting subroutine:
nlooku = nlooku + 1
# print('nlooku' + str(nlooku)); print('MAXCTX' + str(MAXCTX))
tmp, order = dsortem(0,nlooku,tmp,2,b=order)
# print('populated postsort'); print(tmp,order)
for il in range(0,nlooku):
loc = int(order[il])
iy = int((loc-0)/MAXCTX)
ix = loc - (iy-0)*MAXCTX
iynode[il] = int(iy)
ixnode[il] = int(ix)
# print('populated ix, iy node list'); print(ixnode, iynode)
return covtab,tmp,order,ixnode,iynode,nlooku,nctx,ncty
def srchnd(ix,iy,nx,ny,xmn,ymn,xsiz,ysiz,sim,noct,nodmax,ixnode,iynode,nlooku,nctx,ncty,UNEST):
"""GSLIB's SRCHND subroutine (Deutsch and Journel, 1998) converted from the
original Fortran to Python by Michael Pyrcz, the University of Texas at
Austin (March, 2019).
Note this was simplified to 2D only.
"""
# Consider all the nearby nodes until enough have been found:
ncnode = 0;
icnode = np.zeros(nodmax,dtype=int); icnode.fill(-1)
cnodev = np.zeros(nodmax);cnodex = np.zeros(nodmax); cnodey = np.zeros(nodmax);
# print('Node search at '); print(ix,iy)
# print('nlooku'); print(nlooku)
if noct > 0:
ninoct = np.zeros(8)
for il in range(0,nlooku):
if ncnode == nodmax: return ncnode, icnode, cnodev, cnodex, cnodey
i = ix + (int(ixnode[il])-nctx)
j = iy + (int(iynode[il])-ncty)
# print('i,j'); print(i,j)
if i < 0 or j < 0: continue
if i >= nx or j >= ny: continue
ind = i + (j)*nx
if sim[ind] > UNEST:
icnode[ncnode] = il
cnodex[ncnode] = xmn + (i)*xsiz # adjust for 0 origin
cnodey[ncnode] = ymn + (j)*ysiz
cnodev[ncnode] = sim[ind]
# print('srchnd found at index - ' +str(ind) + ' at x and y ' + str(cnodex[ncnode]) + ',' + str(cnodey[ncnode]))
# print(' ix = ' + str(i) + ' and iy = ' + str(j))
# print(' value = ' + str(sim[ind]))
ncnode = ncnode + 1 # moved to account for origin 0
return ncnode, icnode, cnodev, cnodex, cnodey
def beyond(ivtype,nccut,ccut,ccdf,ncut,cut,cdf,zmin,zmax,ltail,ltpar,middle,mpar,utail,utpar,zval,cdfval):
"""GSLIB's BEYOND subroutine (Deutsch and Journel, 1998) converted from the
original Fortran to Python by Michael Pyrcz, the University of Texas at
Austin (March, 2019).
Note this was simplified to 2D only.
"""
EPSLON = 1.0e-20; UNEST=-1.0
# Check for both "zval" and "cdfval" defined or undefined:
ierr = 1;
if zval > UNEST and cdfva > UNEST:
return -1
if zval <= UNEST and cdfval <= UNEST:
return - 1
# Handle the case of a categorical variable:
if ivtype == 0:
cum = 0
for i in range(0,nccut):
cum = cum + ccdf[i]
if cdfval <= cum:
zval = ccut[i]
return zval
return zval
# Figure out what part of distribution: ipart = 0 - lower tail
# ipart = 1 - middle
# ipart = 2 - upper tail
ierr = 0
ipart = 1
if zva > UNEST:
if zval <= ccut[0]:
ipart = 0
if zval >= ccut[nccut-1]:
ipart = 2
else:
if cdfval <= ccdf[0]:
ipart = 0
if cdfval >= ccdf[nccut-1]:
ipart = 2
# ARE WE IN THE LOWER TAIL?
if ipart == 0:
if ltail ==1:
# Straight Linear Interpolation:
powr = 1.0
if zval > UNEST:
cdfval = powint(zmin,ccut[0],0.0,ccdf[0],zval,powr)
else:
zval = powint(0.0,ccdf[0],zmin,ccut[0],cdfval,powr)
elif ltail == 2:
# Power Model interpolation to lower limit "zmin"?
if zval > UNEST:
cdfval = powint(zmin,ccut[0],0.0,ccdf[0],zval,ltpar)
else:
powr = 1.0 / ltpar
zval = powint(0.0,ccdf[0],zmin,ccut[0],cdfval,powr)
# Linear interpolation between the rescaled global cdf?
elif ltail == 3:
if zval > UNEST:
# Computing the cdf value. Locate the point and the class bound:
idat = locate(cut,1,ncut,zval)
iupp = locate(cut,ncut,1,ncut,ccut[0])
# Straight linear interpolation if no data; otherwise, linear:
if idat <= -1 or idat >= ncut -1 or iupp <= -1 or iupp >= ncut-1: # modfity for 0 index
cdfval = powint(zmin,cut[0],0.0,cdf[0],zval,1.)
else:
temp = powint(cut[idat],cut[idat+1],cdf[idat],cdf[idat+1],zval,1.)
cdfval = temp*ccdf[0]/cdf[iupp]
else:
# Computing Z value: Are there any data out in the tail?
iupp = locate(cut,ncut,1,ncut,ccut[0])
# Straight linear interpolation if no data; otherwise, local linear
# interpolation:
if iupp <= 0 or iupp >= ncut:
zval = powint(0.0,cdf[0],zmin,cut[0],cdfval,1.)
else:
temp = cdfval*cdf[iupp]/ccdf[1]
idat = locate(cdf,ncut,1,ncut,temp)
if idat <= -1 or idat >= ncut-1: # adjusted for 0 origin
zval = powint(0.0,cdf[0],zmin,cut[0],cdfval,1.)
else:
zval = powint(cdf[idat],cdf[idat+1],cut[dat],cut[idat+1],temp,1.)
else:
# Error situation - unacceptable option:
ierr = 2
return -1
# FINISHED THE LOWER TAIL, ARE WE IN THE MIDDLE?
if ipart == 1:
# Establish the lower and upper limits:
if zval > UNEST:
cclow = locate(ccut,1,nccut,zval)
else:
cclow = locate(ccdf,1,nccut,cdfval)
cchigh = cclow + 1
if middle == 1:
# Straight Linear Interpolation:
powr = 1.0
if zval > UNEST:
cdfval = powint(ccut[cclow],ccut[cchigh],ccdf[cclow],ccdf[cchigh],zval,powr)
else:
zval = powint(ccdf[cclow],ccdf[cchigh],ccut[cclow],ccut[cchigh],cdfval,powr)
# Power interpolation between class bounds?
elif middle == 2:
if zval > UNEST:
cdfval = powint(ccut[cclow],ccut[cchigh],ccdf[cclow],ccdf[cchigh],zval,mpar)
else:
powr = 1.0 / mpar
zval = powint(ccdf[cclow],ccdf[cchigh],ccut[cclow],ccut[cchigh],cdfval,powr)
# Linear interpolation between the rescaled global cdf?
elif middle == 3:
ilow = locate(cut,ncut,1,ncut,ccut[cclow])
iupp = locate(cut,ncut,1,ncut,ccut[cchigh])
if cut[ilow] < ccut[cclow]:
ilow = ilow + 1
if cut[iupp] > ccut[cchigh]:
iupp = iupp - 1
if zval > UNEST:
idat = locate(cut,1,ncut,zval)
# Straight linear interpolation if no data; otherwise, local linear
# interpolation:
if idat <= -1 or idat >= ncut-1 or ilow <= -1 or ilow >= ncut-1 or iupp <= -1 or iupp >= ncut-1 or iupp <= ilow:
cdfval=powint(ccut[cclow],ccut[cchigh],ccdf[cclow],ccdf[cchigh],zval,1.)
else:
temp = powint(cut[idat],cut[idat+1],cdf[idat],cdf[idat+1],zval,1.)
cdfval=powint(cdf[ilow],cdf[iupp],ccdf[cclow],ccdf[cchigh],temp,1.)
else:
# Straight linear interpolation if no data; otherwise, local linear
# interpolation:
if ilow <= -1 or ilow >= ncut-1 or iup <= -1 or iupp >= ncut-1 or iupp < ilow:
zval=powint(ccdf[cclow],ccdf[cchigh],ccut[cclow],ccut[cchigh],cdfval,1.)
else:
temp=powint(ccdf[cclow],ccdf[cchigh],cdf[ilow],cdf[iupp],cdfval,1.)
idat = locate(cdf,1,ncut,temp)
if cut[idat] < ccut[cclow]:
idat=idat+1
if idat <= -1 or idat >= ncut-1 or cut[idat+1] > ccut[cchigh]:
zval = powint(ccdf[cclow],ccdf[cchigh],ccut[cclow],ccut[cchigh],cdfval,1.)
else:
zval = powint(cdf[idat],cdf[idat+1],cut[idat],cut[idat+1],temp,1.)
zval = powint(cdf[idat],cdf[idat+1],cut[idat],cut[idat+1],temp,1.)
else:
# Error situation - unacceptable option:
ierr = 2
return -1
# FINISHED THE MIDDLE, ARE WE IN THE UPPER TAIL?
if ipart == 2:
if utail == 1:
powr = 1.0
if zval > UNEST:
cdfval = powint(ccut(nccut),zmax,ccdf(nccut),1.0,zval,powr)
else:
zval = powint(ccdf(nccut),1.0,ccut(nccut),zmax,cdfval,powr)
elif utail == 2:
# Power interpolation to upper limit "utpar"?
if zval > UNEST:
cdfval = powint(ccut(nccut),zmax,ccdf(nccut),1.0,zval,utpar)
else:
powr = 1.0 / utpar
zval = powint(ccdf(nccut),1.0,ccut(nccut),zmax,cdfval,powr)
# Linear interpolation between the rescaled global cdf?
elif utail == 3:
if zval > UNEST:
# Approximately Locate the point and the class bound:
idat = locate(cut,1,ncut,zval,idat)
ilow = locate(cut,1,ncut,ccut(nccut),ilow)
if cut[idat] < zval:
idat = idat + 1
if cut[ilow] < ccut[nccut-1]:
ilow = ilow + 1
# Straight linear interpolation if no data; otherwise, local linear
# interpolation:
if idat < -1 or idat >= ncut-1 or ilow <= -1 or ilow >= ncut-1:
cdfval = powint(ccut(nccut),zmax,ccdf(nccut),1.0,zval,1.)
else:
temp = powint(cut(idat),cut(idat+1),cdf(idat),cdf(idat+1),zval,1.)
cdfval = powint(cdf(ilow),1.0,ccdf(nccut),1.0,temp,1.)
else:
# Computing Z value: Are there any data out in the tail?
ilow = locate(cut,ncut,1,ncut,ccut(nccut),ilow)
if cut[ilow] < ccut[nccut-1]:
ilow = ilow + 1
# Straight linear interpolation if no data; otherwise, local linear
# interpolation:
if ilow <= -1 or ilow >= ncut-1:
zval = powint(ccdf(nccut),1.0,ccut(nccut),zmax,cdfval,1.)
else:
temp = powint(ccdf(nccut),1.0,cdf(ilow),1.0,cdfval,1.)
idat = locate(cdf,ncut,1,ncut,temp)
if cut[idat] < ccut[nccut-1]:
idat=idat+1
if idat >= ncut-1:
zval = powint(ccdf[nccut-1],1.0,ccut[nccut-1],zmax,cdfval,1.)
else:
zval = powint(cdf[idat],cdf[idat+1],cut[idat],cut[idat+1],temp,1.)
# Fit a Hyperbolic Distribution?
elif utail == 4:
# Figure out "lambda" and required info:
lambd = math.pow(ccut[nccut],utpar)*(1.0-ccdf[nccut-1])
if zval > UNEST:
cdfval = 1.0 - (lambd/(math.pow(zval,utpar)))
else:
zval = (lambd/math.pow((1.0-cdfval),(1.0/utpar)))
else:
# Error situation - unacceptable option:
ierr = 2
return -1
if zval < zmin:
zval = zmin
if zval > zmax:
zval = zmax
# All finished - return:
return zval
def krige(ix,iy,nx,ny,xx,yy,lktype,x,y,vr,sec,colocorr,lvm,close,covtab,nctx,ncty,icnode,ixnode,iynode,cnodev,cnodex,cnodey,nst,c0,PMX,cc,aa,it,ang,anis,rotmat,maxcov,MAXCTX,MAXCTY,MAXKR1,MAXKR2):
"""GSLIB's KRIGE subroutine (Deutsch and Journel, 1998) converted from the
original Fortran to Python by Michael Pyrcz, the University of Texas at
Austin (March, 2019).
Note this was simplified to 2D only.
"""
EPSLON = 1.0e-20
cur_index = ix + (iy)*nx
# print('krige at grid '); print(ix,iy)
# print('krige at node '); print(xx,yy)
# print('grid index = '); print(cur_index)
# print('Check ixnode '); print(ixnode); print(iynode)
nclose = len(close)
ncnode = (icnode >= 0).sum()
# print('In kriging, maxcov = ' + str(maxcov))
# print('kriging')
# print('nclose ' + str(nclose) + ', ncnode ' + str(ncnode))
# print('MAXKR1'); print(MAXKR1)
vra = np.zeros(MAXKR1); vrea = np.zeros(MAXKR1)
r = np.zeros(MAXKR1); rr = np.zeros(MAXKR1); s = np.zeros(MAXKR1); a = np.zeros(MAXKR2)
cbb = cova2(0,0,0,0,nst,c0,9999.9,cc,aa,it,ang,anis,rotmat,maxcov)
# print(r.shape)
# Local mean
if lktype == 2:
gmean = lvm[cur_index]
else:
gmean = 0.0
# Size of the kriging system:
first = False
na = nclose + ncnode
# print('lktype' + str(lktype))
if lktype == 0: neq = na
if lktype == 1:
# print('ordinary kriging')
neq = na + 1
if lktype == 2: neq = na
if lktype == 3: neq = na + 2
if lktype == 4: neq = na + 1
# print('prior matrix build neq'); print(neq)
# print('na'); print(na)
# Set up kriging matrices:
iin=-1 # acocunting for 0 origin
# print('krige na' + str(na))
for j in range(0,na):
# Sort out the actual location of point "j"
if j < nclose: # adjusted for 0 index origin
index = int(close[j])
x1 = x[index]
y1 = y[index]
vra[j] = vr[index]
# print('data: index = ' + str(index) + ', x,y ' + str(x1) + ',' + str(y1) + ', value = ' + str(vra[j]))
if sec.shape[0] > 1:
vrea[j]= sec[index];
else:
vrea[j] = 0.0 # added this - no effect
if lktype == 2: vra[j] = vra[j] - vrea[j]
else:
# It is a previously simulated node (keep index for table look-up):
# print(j)
index = j-(nclose) # adjust for 0 index
x1 = cnodex[index]
y1 = cnodey[index]
vra[j] = cnodev[index]
ind = icnode[index]
# print('prev node: index = ' + str(index) + ', x,y ' + str(x1) + ',' + str(y1) + ', value = ' + str(vra[j]))
ix1 = ix + (int(ixnode[ind])-nctx-1)
iy1 = iy + (int(iynode[ind])-ncty-1)
# print('ix1, iy1 = '); print(ix1,iy1)
index = ix1 + (iy1-1)*nx
if lktype == 2:
vrea[j]= lvm[index]
vra[j] = vra[j] - vrea[j]
for i in range(0,na): # we need the full matrix
# print('kriging indice populated' + str(j) + ',' + str(i))
# Sort out the actual location of point "i"
if i < nclose:
index = int(close[i]) # adjust for 0 index
x2 = x[index]
y2 = y[index]
else:
# It is a previously simulated node (keep index for table look-up):
#print('i = ' + str(i) + ',nclose = ' + str(nclose) + ', na = ' + str(na))
index = i-(nclose)
x2 = cnodex[index]
y2 = cnodey[index]
ind = icnode[index]
# print('previous node index' + str(ind))
ix2 = ix + (int(ixnode[ind])-nctx-1)
iy2 = iy + (int(iynode[ind])-ncty-1)
# Now, get the covariance value:
iin = iin + 1
# print('kriging data location = '); print(x2,y2)
# Decide whether or not to use the covariance look-up table:
if j <= nclose or i <= nclose:
cov = cova2(x1,y1,x2,y2,nst,c0,9999.9,cc,aa,it,ang,anis,rotmat,maxcov)
a[iin] = cov
else:
# Try to use the covariance look-up (if the distance is in range):
# ii = nctx + 1 + (ix1 - ix2)
# jj = ncty + 1 + (iy1 - iy2)
cov = cova2(x1,y1,x2,y2,nst,c0,9999.9,cc,aa,it,ang,anis,rotmat,maxcov)
# if ii < 0 or ii >= MAXCTX or jj < 0 or jj >= MAXCTY:
# cov = cova2(x1,y1,x2,y2,nst,c0,9999.9,cc,aa,it,ang,anis,rotmat,maxcov)
# else:
# cov = covtab[ii,jj]
# print(x1,y1,x2,y2,cov)
a[iin] = cov
# Get the RHS value (possibly with covariance look-up table):
if j <= nclose:
# print(cc,aa,it,ang,anis,rotmat,maxcov)
cov = cova2(xx,yy,x1,y1,nst,c0,9999.9,cc,aa,it,ang,anis,rotmat,maxcov)
# if cov >= 1.0:
# print('cov of 1.0 RHS for data ')
# print('ix,iy ='); print(xx,xx)
# print('ix1,iy1'); print(x1,y1)
r[j] = cov
else:
# Try to use the covariance look-up (if the distance is in range):
# ii = nctx + 1 + (ix - ix1)
# jj = ncty + 1 + (iy - iy1)
# print('RHS ctable coord' + str(ii) + ',' + str(jj))
# print('ix,iy ='); print(ix,iy)
# print('ix1,iy1'); print(ix1,iy1)
# if ii < 0 or ii >= MAXCTX or jj < 0 or jj >= MAXCTY: # adjusted for origin 0
# print('Not using covariance table')
# cov = cova2(xx,yy,x1,y1,nst,c0,9999.9,cc,aa,it,ang,anis,rotmat,maxcov)
# else:
# cov = covtab[ii,jj]
cov = cova2(xx,yy,x1,y1,nst,c0,9999.9,cc,aa,it,ang,anis,rotmat,maxcov)
# if cov >= 1.0:
# print('cov of 1.0 RHS for node ' + str(j))
# print('ix,iy ='); print(xx,xx)
# print('ix1,iy1'); print(x1,y1)
r[j] = cov
# print('kriging, writing RHS '+ str(j) + ',' + str(cov) + 'loc_est' + str(xx) + ',' + str(yy) + 'data' + str(x1) + ',' + str(y1))
rr[j] = r[j]
if lktype == 1: # we need the full array
iin = iin + 1
a[iin] = 1.0
if lktype == 4: # we need the full array
iin = iin + 1
a[iin] = colocorr*r[j]
# Addition of OK constraint:
if lktype == 1 or lktype == 3:
for i in range(0,na):
iin = iin + 1
a[iin] = 1.0
iin = iin + 1
a[iin] = 0.0
r[na] = 1.0
rr[na] = 1.0
# Addition of the External Drift Constraint:
if lktype == 3:
edmin = 999999.
edmax = -999999.
for i in range(0,na):
iin = iin + 1
a[iin] = vrea(i)
if a[iin] <edmin: edmin = a[iin]
if a[iin] > edmax: edmax = a[iin]
iin = iin + 1
a[iin] = 0.0
iin = iin + 1
a[iin] = 0.0
ind = ix + (iy-1)*nx
r[na+1] = lvm[ind]
rr[na+1] = r[na+1]
if (edmax-edmin) < EPSLON: neq = neq - 1
# Addition of Collocated Cosimulation Constraint:
if lktype == 4:
colc = True
sfmin = 1.0e21
sfmax = -1.0e21
for i in range(0,na):
iin = iin + 1
a[iin] = colocorr*r[i]
if a[iin] < sfmin: sfmin = a[iin]
if a[iin] > sfmax: sfmax = a[iin]
iin = iin + 1
a[iin] = 1.0
ii = na
r[ii] = colocorr
rr[ii] = r[ii]
# if (sfmax-sfmin) < EPSLON:
# neq = neq - 1
# colc = False
# Solve the Kriging System:
# print('neq = ' + str(neq));
# print('a'); print(a)
# print('r'); print(r)
# print('data'); print(vra)
if neq == 1 and lktype != 3:
# print('neq = 1 '); print(a,r)
s[0] = r[0] / a[0]
else:
# print('neq prior ksol' + str(neq))
s = ksol_numpy(neq,a,r)
# print('neq post ksol' + str(neq))
# if s.shape[0]< neq:
# print('s shape'); print(s.shape)
# print('a'); print(a)
# print('r'); print(r)
ising = 0 # need to figure this out
# print('s'); print(s)
# Compute the estimate and kriging variance. Recall that kriging type
# 0 = Simple Kriging:
# 1 = Ordinary Kriging:
# 2 = Locally Varying Mean:
# 3 = External Drift:
# 4 = Collocated Cosimulation:
# print('kriging weights'); print(s)
cmean = 0.0
# print('cbb = ' + str(cbb))
cstdev = cbb
sumwts = 0.0
for i in range(0,na):
cmean = cmean + s[i]*vra[i]
cstdev = cstdev - s[i]*rr[i]
sumwts = sumwts + s[i]
if lktype == 1:
cstdev = cstdev - s[na]
# print('Ordinary Weight' + str(s[na]))
if lktype == 2: cmean = cmean + gmean
if lktype == 4 and colc == True: # we may drop colocated if low covariance dispersion
ind = ix + (iy-1)*nx
# print(ind)
# print('neq'); print(neq)
# print('s'); print(s.shape)
# print('lvm'); print(lvm.shape)
# print('colc wt = ' + str(s[na]) + ' for ' + str(lvm[cur_index]) + ' at index ' + str(cur_index))
cmean = cmean + s[na]*lvm[cur_index]
cstdev = cstdev - s[na] *rr[na]
# Error message if negative variance:
if cstdev < 0.0:
# print('ERROR: Negative Variance: ' + str(cstdev))
cstdev = 0.0
cstdev = math.sqrt(max(cstdev,0.0))
# print('kriging estimate and variance' + str(cmean) + ', ' + str(cstdev))
return cmean, cstdev
def ikrige(ix,iy,nx,ny,xx,yy,lktype,x,y,vr,sec,colocorr,gmean,lvm,close,covtab,nctx,ncty,icnode,ixnode,iynode,cnodev,cnodex,cnodey,nst,c0,PMX,cc,aa,it,ang,anis,rotmat,maxcov,MAXCTX,MAXCTY,MAXKR1,MAXKR2):
"""GSLIB's KRIGE subroutine (Deutsch and Journel, 1998) converted from the
original Fortran to Python and modified for indicator kriging by Michael Pyrcz, the University of Texas at
Austin (March, 2019).
Note this was simplified to 2D only. WARNING: tested only for ktype 0,1,2 (2 is local proportion model / local mean provided, not residual approach)
"""
EPSLON = 1.0e-20
cur_index = ix + (iy)*nx
# print('krige at grid '); print(ix,iy)
# print('krige at node '); print(xx,yy)
# print('grid index = '); print(cur_index)
# print('Check ixnode '); print(ixnode); print(iynode)
nclose = len(close)
ncnode = (icnode >= 0).sum()
# print('In kriging, maxcov = ' + str(maxcov))
# print('kriging')
# print('nclose ' + str(nclose) + ', ncnode ' + str(ncnode))
# print('MAXKR1'); print(MAXKR1)
vra = np.zeros(MAXKR1); vrea = np.zeros(MAXKR1)
r = np.zeros(MAXKR1); rr = np.zeros(MAXKR1); s = np.zeros(MAXKR1); a = np.zeros(MAXKR2)
cbb = cova2(0,0,0,0,nst,c0,9999.9,cc,aa,it,ang,anis,rotmat,maxcov)
# print(r.shape)
# Local mean # just pass the local probability as gmean
# if lktype == 2:
# gmean = lvm[cur_index]
# keep input gmean otherwise
# Size of the kriging system:
first = False
na = nclose + ncnode
# print('lktype' + str(lktype))
if lktype == 0: neq = na
if lktype == 1:
# print('ordinary kriging')
neq = na + 1
if lktype == 2: neq = na
if lktype == 3: neq = na + 2
if lktype == 4: neq = na + 1
# print('prior matrix build neq'); print(neq)
# print('na'); print(na)
# print('kriging data close'); print(close)
# print('kriging node close'); print(icnode)
# Set up kriging matrices:
iin=-1 # acocunting for 0 origin
# print('krige na' + str(na))
for j in range(0,na):
# Sort out the actual location of point "j"
if j < nclose: # adjusted for 0 index origin
index = int(close[j])
x1 = x[index]
y1 = y[index]
vra[j] = vr[index]
# print('data: index = ' + str(index) + ', x,y ' + str(x1) + ',' + str(y1) + ', value = ' + str(vra[j]))
# if lvm.shape[0] > 1:
# vrea[j]= sec[index];
# else:
vrea[j] = 0.0 # added this - no effect
# if lktype == 2: vra[j] = vra[j] - vrea[j] # just using local variable mean not full residual approach
else:
# It is a previously simulated node (keep index for table look-up):
# print(j)
index = j-(nclose) # adjust for 0 index
x1 = cnodex[index]
y1 = cnodey[index]
vra[j] = cnodev[index]
ind = icnode[index]
# print('prev node: index = ' + str(index) + ', x,y ' + str(x1) + ',' + str(y1) + ', value = ' + str(vra[j]))
ix1 = ix + (int(ixnode[ind])-nctx-1)
iy1 = iy + (int(iynode[ind])-ncty-1)
# print('ix1, iy1 = '); print(ix1,iy1)
index = ix1 + (iy1-1)*nx
# if lktype == 2:
# vrea[j]= lvm[index]
# vra[j] = vra[j] - vrea[j]
for i in range(0,na): # we need the full matrix
# print('kriging indice populated' + str(j) + ',' + str(i))
# Sort out the actual location of point "i"
if i < nclose:
index = int(close[i]) # adjust for 0 index
x2 = x[index]
y2 = y[index]
else:
# It is a previously simulated node (keep index for table look-up):
#print('i = ' + str(i) + ',nclose = ' + str(nclose) + ', na = ' + str(na))
index = i-(nclose)
x2 = cnodex[index]
y2 = cnodey[index]
ind = icnode[index]
# print('previous node index' + str(ind))
ix2 = ix + (int(ixnode[ind])-nctx-1)
iy2 = iy + (int(iynode[ind])-ncty-1)
# Now, get the covariance value:
iin = iin + 1
# print('kriging data location = '); print(x2,y2)
# Decide whether or not to use the covariance look-up table:
if j <= nclose or i <= nclose:
# print('x1,y1,x2,y2,nst,c0,9999.9,cc,aa,it,ang,anis,rotmat,maxcov')
# print(x1,y1,x2,y2,nst,c0,9999.9,cc,aa,it,ang,anis,rotmat,maxcov)
cov = cova2(x1,y1,x2,y2,nst,c0,9999.9,cc,aa,it,ang,anis,rotmat,maxcov)
# print('cov'); print(cov)
a[iin] = cov
else:
# Try to use the covariance look-up (if the distance is in range):
# ii = nctx + 1 + (ix1 - ix2)
# jj = ncty + 1 + (iy1 - iy2)
cov = cova2(x1,y1,x2,y2,nst,c0,9999.9,cc,aa,it,ang,anis,rotmat,maxcov)
# if ii < 0 or ii >= MAXCTX or jj < 0 or jj >= MAXCTY:
# cov = cova2(x1,y1,x2,y2,nst,c0,9999.9,cc,aa,it,ang,anis,rotmat,maxcov)
# else:
# cov = covtab[ii,jj]
# print(x1,y1,x2,y2,cov)
a[iin] = cov
# Get the RHS value (possibly with covariance look-up table):
if j <= nclose:
# print(cc,aa,it,ang,anis,rotmat,maxcov)
cov = cova2(xx,yy,x1,y1,nst,c0,9999.9,cc,aa,it,ang,anis,rotmat,maxcov)
# if cov >= 1.0:
# print('cov of 1.0 RHS for data ')
# print('ix,iy ='); print(xx,xx)
# print('ix1,iy1'); print(x1,y1)
r[j] = cov
else:
# Try to use the covariance look-up (if the distance is in range):
# ii = nctx + 1 + (ix - ix1)
# jj = ncty + 1 + (iy - iy1)
# print('RHS ctable coord' + str(ii) + ',' + str(jj))
# print('ix,iy ='); print(ix,iy)
# print('ix1,iy1'); print(ix1,iy1)
# if ii < 0 or ii >= MAXCTX or jj < 0 or jj >= MAXCTY: # adjusted for origin 0
# print('Not using covariance table')
# cov = cova2(xx,yy,x1,y1,nst,c0,9999.9,cc,aa,it,ang,anis,rotmat,maxcov)
# else:
# cov = covtab[ii,jj]
cov = cova2(xx,yy,x1,y1,nst,c0,9999.9,cc,aa,it,ang,anis,rotmat,maxcov)
# if cov >= 1.0:
# print('cov of 1.0 RHS for node ' + str(j))
# print('ix,iy ='); print(xx,xx)
# print('ix1,iy1'); print(x1,y1)
r[j] = cov
# print('kriging, writing RHS '+ str(j) + ',' + str(cov) + 'loc_est' + str(xx) + ',' + str(yy) + 'data' + str(x1) + ',' + str(y1))
rr[j] = r[j]
if lktype == 1: # we need the full array
iin = iin + 1
a[iin] = 1.0
if lktype == 4: # we need the full array
iin = iin + 1
a[iin] = colocorr*r[j]
# Addition of OK constraint:
if lktype == 1 or lktype == 3:
for i in range(0,na):
iin = iin + 1
a[iin] = 1.0
iin = iin + 1
a[iin] = 0.0
r[na] = 1.0
rr[na] = 1.0
# Addition of the External Drift Constraint:
if lktype == 3:
edmin = 999999.
edmax = -999999.
for i in range(0,na):
iin = iin + 1
a[iin] = vrea(i)
if a[iin] <edmin: edmin = a[iin]
if a[iin] > edmax: edmax = a[iin]
iin = iin + 1
a[iin] = 0.0
iin = iin + 1
a[iin] = 0.0
ind = ix + (iy-1)*nx
r[na+1] = lvm[ind]
rr[na+1] = r[na+1]
if (edmax-edmin) < EPSLON: neq = neq - 1
# Addition of Collocated Cosimulation Constraint:
if lktype == 4:
colc = True
sfmin = 1.0e21
sfmax = -1.0e21
for i in range(0,na):
iin = iin + 1
a[iin] = colocorr*r[i]
if a[iin] < sfmin: sfmin = a[iin]
if a[iin] > sfmax: sfmax = a[iin]
iin = iin + 1
a[iin] = 1.0
ii = na
r[ii] = colocorr
rr[ii] = r[ii]
# if (sfmax-sfmin) < EPSLON:
# neq = neq - 1
# colc = False
# Solve the Kriging System:
# print('Kriging equations neq = ' + str(neq));
# print('a'); print(a)
# print('r'); print(r)
# print('data'); print(vra)
if neq == 1 and lktype != 3:
# print('neq = 1 '); print(a,r)
s[0] = r[0] / a[0]
else:
# print('neq prior ksol' + str(neq))
s = ksol_numpy(neq,a,r)
# print('neq post ksol' + str(neq))
# if s.shape[0]< neq:
# print('s shape'); print(s.shape)
# print('a'); print(a)
# print('r'); print(r)
ising = 0 # need to figure this out
# print('s'); print(s)
# Compute the estimate and kriging variance. Recall that kriging type
# 0 = Simple Kriging:
# 1 = Ordinary Kriging:
# 2 = Locally Varying Mean:
# 3 = External Drift:
# 4 = Collocated Cosimulation:
# print('kriging weights'); print(s)
cmean = 0.0
# print('cbb = ' + str(cbb))
cstdev = cbb
sumwts = 0.0
for i in range(0,na):
cmean = cmean + s[i]*vra[i]
cstdev = cstdev - s[i]*rr[i]
sumwts = sumwts + s[i]
if lktype == 1:
cstdev = cstdev - s[na]
# print('Ordinary Weight' + str(s[na]))
# if lktype == 2: cmean = cmean + gmean
if lktype == 4 and colc == True: # we may drop colocated if low covariance dispersion
ind = ix + (iy-1)*nx
# print(ind)
# print('neq'); print(neq)
# print('s'); print(s.shape)
# print('lvm'); print(lvm.shape)
# print('colc wt = ' + str(s[na]) + ' for ' + str(lvm[cur_index]) + ' at index ' + str(cur_index))
cmean = cmean + s[na]*lvm[cur_index]
cstdev = cstdev - s[na] *rr[na]
if lktype == 0 or lktype == 2:
cmean = cmean + (1.0-sumwts)*gmean
# print('cmean'); print(cmean)
# Error message if negative variance:
if cstdev < 0.0:
# print('ERROR: Negative Variance: ' + str(cstdev))
cstdev = 0.0
cstdev = math.sqrt(max(cstdev,0.0))
# print('kriging estimate and variance' + str(cmean) + ', ' + str(cstdev))
return cmean, cstdev
def getindex(nc,cmn,csiz,loc):
ic = min(int((loc - cmn) / csiz), nc - 1)
return ic
def correct_trend(trend):
"""Correct a indicator based trend model for closure (probabilities sum to 1.0).
:param trend: ndarray [ny,nx,ncut]
:return: nadarray [ny,nx,ncut] corrected for closure
"""
ny = trend.shape[0]
nx = trend.shape[1]
ncut = trend.shape[2]
for iy in range(0,ny):
for ix in range(0,nx):
sum = 0.0
for ic in range(0,ncut):
sum = sum + trend[iy,ix,ic]
if sum > 0.0:
for icut in range(0,ncut):
trend[iy,ix,ic] = trend[iy,ix,ic] / sum
return trend
def ordrel(ivtype,ncut,ccdf):
"""Correct a indicator based CDF for order relations.
:param ivtype: variable type, 0 - categorical and 1 - continuous
:param ncut: number of categories or thresholds
:param ccdf: input cumulative distribution function
:return: cumulative distribution function correct for order relations
"""
# print('input ordering relations'); print(ccdf)
ccdfo = np.zeros(ncut)
ccdf1 = np.zeros(ncut)
ccdf2 = np.zeros(ncut) # do we need MAXCUT = 100 for these 2?
# Make sure conditional cdf is within [0,1]:
for i in range(0,ncut):
if ccdf[i] < 0.0:
ccdf1[i] = 0.0
ccdf2[i] = 0.0
elif ccdf[i] > 1.0:
ccdf1[i] = 1.0
ccdf2[i] = 1.0
else:
ccdf1[i] = ccdf[i]
ccdf2[i] = ccdf[i]
# print('ordering relations'); print(ccdf1,ccdf2)
# Correct sequentially up, then down, and then average:
if ivtype == 0:
sumcdf = 0.0
for i in range(0,ncut):
sumcdf = sumcdf + ccdf1[i]
if sumcdf <= 0.0: sumcdf = 1.0
for i in range(0,ncut):
ccdfo[i] = ccdf1[i] / sumcdf
else:
for i in range(1,ncut):
if ccdf1[i] < ccdf1[i-1]: ccdf1[i] = ccdf1[i-1]
for i in range(ncut-2,0,-1):
if ccdf2[i] > ccdf2[i+1]: ccdf2[i] = ccdf2[i+1]
for i in range(0,ncut):
ccdfo[i] = 0.5*(ccdf1[i]+ccdf2[i])
# Return with corrected CDF:
return ccdfo
def declus(df, xcol, ycol, vcol, iminmax, noff, ncell, cmin, cmax):
"""GSLIB's DECLUS program (Deutsch and Journel, 1998) converted from the
original Fortran to Python by Michael Pyrcz, the University of Texas at
Austin (Jan, 2019).
Note this was simplified to 2D only.
:param df: pandas DataFrame with the spatial data
:param xcol: name of the x coordinate column
:param ycol: name of the y coordinate column
:param vcol: name of the property column
:param iminmax: 1 / True: for use cell size with max decluster mean
0 / False: for declustered mean minimizing cell size
:param noff: number of offsets
:param ncell: number of cell sizes
:param cmin: min cell size
:param cmax: max cell size
:return: TODO
"""
# Load data and set up arrays
nd = len(df)
x = df[xcol].values
y = df[ycol].values
v = df[vcol].values
wt = np.zeros(nd)
wtopt = np.ones(nd)
index = np.zeros(nd, np.int32)
xcs_mat = np.zeros(ncell + 2) # we use 1,...,n for this array
vrcr_mat = np.zeros(ncell + 2) # we use 1,...,n for this array
anisy = 1.0 # hard code the cells to 2D isotropic
roff = float(noff)
# Calculate extents
xmin = np.min(x)
xmax = np.max(x)
ymin = np.min(y)
ymax = np.max(y)
# Calculate summary statistics
vmean = np.mean(v)
vstdev = np.std(v)
vmin = np.min(v)
vmax = np.max(v)
xcs_mat[0] = 0.0
vrcr_mat[0] = vmean
vrop = vmean # include the naive case
print(f"There are {nd} data with:")
print(f" mean of {vmean} ")
print(f" min and max {vmin} and {vmax}")
print(f" standard dev {vstdev} ")
# Define a "lower" origin to use for the cell sizes
xo1 = xmin - 0.01
yo1 = ymin - 0.01
# Define the increment for the cell size
xinc = (cmax - cmin) / ncell
yinc = xinc
# Loop over "ncell+1" cell sizes in the grid network
ncellx = int((xmax - (xo1 - cmin)) / cmin) + 1
ncelly = int((ymax - (yo1 - cmin * anisy)) / cmin) + 1
ncellt = ncellx * ncelly
cellwt = np.zeros(ncellt)
xcs = cmin - xinc
ycs = (cmin * anisy) - yinc
# Main loop over cell sizes
# 0 index is the 0.0 cell, note n + 1 in Fortran
for lp in range(1, ncell + 2):
xcs = xcs + xinc
ycs = ycs + yinc
# Initialize the weights to zero
wt.fill(0.0)
# Determine the maximum number of grid cells in the network
ncellx = int((xmax - (xo1 - xcs)) / xcs) + 1
ncelly = int((ymax - (yo1 - ycs)) / ycs) + 1
ncellt = float(ncellx * ncelly) # TODO: not used
# Loop over all the origin offsets selected
xfac = min((xcs / roff), (0.5 * (xmax - xmin)))
yfac = min((ycs / roff), (0.5 * (ymax - ymin)))
for kp in range(1, noff + 1):
xo = xo1 - (float(kp) - 1.0) * xfac
yo = yo1 - (float(kp) - 1.0) * yfac
# Initialize the cumulative weight indicators
cellwt.fill(0.0)
# Determine which cell each datum is in
for i in range(0, nd):
icellx = int((x[i] - xo) / xcs) + 1
icelly = int((y[i] - yo) / ycs) + 1
icell = icellx + (icelly - 1) * ncellx
index[i] = icell
cellwt[icell] = cellwt[icell] + 1.0
# The weight assigned to each datum is inversely proportional to the
# number of data in the cell. We first need to get the sum of
# weights so that we can normalize the weights to sum to one
sumw = 0.0
for i in range(0, nd):
ipoint = index[i]
sumw = sumw + (1.0 / cellwt[ipoint])
sumw = 1.0 / sumw
# Accumulate the array of weights (that now sum to one)
for i in range(0, nd):
ipoint = index[i]
wt[i] = wt[i] + (1.0 / cellwt[ipoint]) * sumw
# End loop over all offsets
# Compute the weighted average for this cell size
sumw = 0.0
sumwg = 0.0
for i in range(0, nd):
sumw = sumw + wt[i]
sumwg = sumwg + wt[i] * v[i]
vrcr = sumwg / sumw
vrcr_mat[lp] = vrcr
xcs_mat[lp] = xcs
# See if this weighting is optimal
if iminmax and vrcr < vrop or not iminmax and vrcr > vrop or ncell == 1:
best = xcs # TODO: not used
vrop = vrcr
wtopt = wt.copy() # deep copy
# End main loop over all cell sizes
# Get the optimal weights
sumw = 0.0
for i in range(0, nd):
sumw = sumw + wtopt[i]
wtmin = np.min(wtopt) # TODO: not used
wtmax = np.max(wtopt) # TODO: not used
facto = float(nd) / sumw
wtopt = wtopt * facto
return wtopt, xcs_mat, vrcr_mat
def gam(array, tmin, tmax, xsiz, ysiz, ixd, iyd, nlag, isill):
"""GSLIB's GAM program (Deutsch and Journel, 1998) converted from the
original Fortran to Python by Michael Pyrcz, the University of Texas at
Austin (Jan, 2019).
:param array: 2D gridded data / model
:param tmin: property trimming limit
:param tmax: property trimming limit
:param xsiz: grid cell extents in x direction
:param ysiz: grid cell extents in y direction
:param ixd: lag offset in grid cells
:param iyd: lag offset in grid cells
:param nlag: number of lags to calculate
:param isill: 1 for standardize sill
:return: TODO
"""
if array.ndim == 2:
ny, nx = array.shape
elif array.ndim == 1:
ny, nx = 1, len(array)
nvarg = 1 # for multiple variograms repeat the program
nxy = nx * ny # TODO: not used
mxdlv = nlag
# Allocate the needed memory
lag = np.zeros(mxdlv)
vario = np.zeros(mxdlv)
hm = np.zeros(mxdlv)
tm = np.zeros(mxdlv)
hv = np.zeros(mxdlv) # TODO: not used
npp = np.zeros(mxdlv)
ivtail = np.zeros(nvarg + 2)
ivhead = np.zeros(nvarg + 2)
ivtype = np.zeros(nvarg + 2)
ivtail[0] = 0
ivhead[0] = 0
ivtype[0] = 0
# Summary statistics for the data after trimming
inside = (array > tmin) & (array < tmax)
avg = array[(array > tmin) & (array < tmax)].mean() # TODO: not used
stdev = array[(array > tmin) & (array < tmax)].std()
var = stdev ** 2.0
vrmin = array[(array > tmin) & (array < tmax)].min() # TODO: not used
vrmax = array[(array > tmin) & (array < tmax)].max() # TODO: not used
num = ((array > tmin) & (array < tmax)).sum() # TODO: not used
# For the fixed seed point, loop through all directions
for iy in range(0, ny):
for ix in range(0, nx):
if inside[iy, ix]:
vrt = array[iy, ix]
ixinc = ixd
iyinc = iyd
ix1 = ix
iy1 = iy
for il in range(0, nlag):
ix1 = ix1 + ixinc
if 0 <= ix1 < nx:
iy1 = iy1 + iyinc
if 1 <= iy1 < ny:
if inside[iy1, ix1]:
vrh = array[iy1, ix1]
npp[il] = npp[il] + 1
tm[il] = tm[il] + vrt
hm[il] = hm[il] + vrh
vario[il] = vario[il] + ((vrh - vrt) ** 2.0)
# Get average values for gam, hm, tm, hv, and tv, then compute the correct
# "variogram" measure
for il in range(0, nlag):
if npp[il] > 0:
rnum = npp[il]
lag[il] = np.sqrt((ixd * xsiz * il) ** 2 + (iyd * ysiz * il) ** 2)
vario[il] = vario[il] / float(rnum)
hm[il] = hm[il] / float(rnum)
tm[il] = tm[il] / float(rnum)
# Standardize by the sill
if isill == 1:
vario[il] = vario[il] / var
# Semivariogram
vario[il] = 0.5 * vario[il]
return lag, vario, npp
def gamv(
df,
xcol,
ycol,
vcol,
tmin,
tmax,
xlag,
xltol,
nlag,
azm,
atol,
bandwh,
isill,
):
"""GSLIB's GAMV program (Deutsch and Journel, 1998) converted from the
original Fortran to Python by Michael Pyrcz, the University of Texas at
Austin (Jan, 2019).
Note simplified for 2D, semivariogram only and one direction at a time.
:param df: pandas DataFrame with the spatial data
:param xcol: name of the x coordinate column
:param ycol: name of the y coordinate column
:param vcol: name of the property column
:param tmin: property trimming limit
:param tmax: property trimming limit
:param xlag: lag distance
:param xltol: lag distance tolerance
:param nlag: number of lags to calculate
:param azm: azimuth
:param atol: azimuth tolerance
:param bandwh: horizontal bandwidth / maximum distance offset orthogonal to
azimuth
:param isill: 1 for standardize sill
:return: TODO
"""
# Load the data
# Trim values outside tmin and tmax
df_extract = df.loc[(df[vcol] >= tmin) & (df[vcol] <= tmax)]
nd = len(df_extract) # TODO: not used
x = df_extract[xcol].values
y = df_extract[ycol].values
vr = df_extract[vcol].values
# Summary statistics for the data after trimming
avg = vr.mean() # TODO: not used
stdev = vr.std()
sills = stdev ** 2.0
ssq = sills # TODO: not used
vrmin = vr.min() # TODO: not used
vrmax = vr.max() # TODO: not used
# Define the distance tolerance if it isn't already
if xltol < 0.0:
xltol = 0.5 * xlag
# Loop over combinatorial of data pairs to calculate the variogram
dis, vario, npp = variogram_loop(
x, y, vr, xlag, xltol, nlag, azm, atol, bandwh
)
# Standardize sill to one by dividing all variogram values by the variance
for il in range(0, nlag + 2):
if isill == 1:
vario[il] = vario[il] / sills
# Apply 1/2 factor to go from variogram to semivariogram
vario[il] = 0.5 * vario[il]
return dis, vario, npp
@jit(nopython=True)
def variogram_loop(x, y, vr, xlag, xltol, nlag, azm, atol, bandwh):
"""Calculate the variogram by looping over combinatorial of data pairs.
:param x: x values
:param y: y values
:param vr: property values
:param xlag: lag distance
:param xltol: lag distance tolerance
:param nlag: number of lags to calculate
:param azm: azimuth
:param atol: azimuth tolerance
:param bandwh: horizontal bandwidth / maximum distance offset orthogonal to
azimuth
:return: TODO
"""
# Allocate the needed memory
nvarg = 1
mxdlv = nlag + 2 # in gamv the npp etc. arrays go to nlag + 2
dis = np.zeros(mxdlv)
lag = np.zeros(mxdlv) # TODO: not used
vario = np.zeros(mxdlv)
hm = np.zeros(mxdlv)
tm = np.zeros(mxdlv)
hv = np.zeros(mxdlv) # TODO: not used
npp = np.zeros(mxdlv)
ivtail = np.zeros(nvarg + 2)
ivhead = np.zeros(nvarg + 2)
ivtype = np.ones(nvarg + 2)
ivtail[0] = 0
ivhead[0] = 0
ivtype[0] = 0
EPSLON = 1.0e-20
nd = len(x)
# The mathematical azimuth is measured counterclockwise from EW and
# not clockwise from NS as the conventional azimuth is
azmuth = (90.0 - azm) * math.pi / 180.0
uvxazm = math.cos(azmuth)
uvyazm = math.sin(azmuth)
if atol <= 0.0:
csatol = math.cos(45.0 * math.pi / 180.0)
else:
csatol = math.cos(atol * math.pi / 180.0)
# Initialize the arrays for each direction, variogram, and lag
nsiz = nlag + 2 # TODO: not used
dismxs = ((float(nlag) + 0.5 - EPSLON) * xlag) ** 2
# Main loop over all pairs
for i in range(0, nd):
for j in range(0, nd):
# Definition of the lag corresponding to the current pair
dx = x[j] - x[i]
dy = y[j] - y[i]
dxs = dx * dx
dys = dy * dy
hs = dxs + dys
if hs <= dismxs:
if hs < 0.0:
hs = 0.0
h = np.sqrt(hs)
# Determine which lag this is and skip if outside the defined
# distance tolerance
if h <= EPSLON:
lagbeg = 0
lagend = 0
else:
lagbeg = -1
lagend = -1
for ilag in range(1, nlag + 1):
# reduced to -1
if (
(xlag * float(ilag - 1) - xltol)
<= h
<= (xlag * float(ilag - 1) + xltol)
):
if lagbeg < 0:
lagbeg = ilag
lagend = ilag
if lagend >= 0:
# Definition of the direction corresponding to the current
# pair. All directions are considered (overlapping of
# direction tolerance cones is allowed)
# Check for an acceptable azimuth angle
dxy = np.sqrt(max((dxs + dys), 0.0))
if dxy < EPSLON:
dcazm = 1.0
else:
dcazm = (dx * uvxazm + dy * uvyazm) / dxy
# Check the horizontal bandwidth criteria (maximum deviation
# perpendicular to the specified direction azimuth)
band = uvxazm * dy - uvyazm * dx
# Apply all the previous checks at once to avoid a lot of
# nested if statements
if (abs(dcazm) >= csatol) and (abs(band) <= bandwh):
# Check whether or not an omni-directional variogram is
# being computed
omni = False
if atol >= 90.0:
omni = True
# For this variogram, sort out which is the tail and
# the head value
iv = 0 # hardcoded just one variogram
it = ivtype[iv] # TODO: not used
if dcazm >= 0.0:
vrh = vr[i]
vrt = vr[j]
if omni:
vrtpr = vr[i]
vrhpr = vr[j]
else:
vrh = vr[j]
vrt = vr[i]
if omni:
vrtpr = vr[j]
vrhpr = vr[i]
# Reject this pair on the basis of missing values
# Data was trimmed at the beginning
# The Semivariogram (all other types of measures are
# removed for now)
for il in range(lagbeg, lagend + 1):
npp[il] = npp[il] + 1
dis[il] = dis[il] + h
tm[il] = tm[il] + vrt
hm[il] = hm[il] + vrh
vario[il] = vario[il] + ((vrh - vrt) * (vrh - vrt))
if omni:
npp[il] = npp[il] + 1.0
dis[il] = dis[il] + h
tm[il] = tm[il] + vrtpr
hm[il] = hm[il] + vrhpr
vario[il] = vario[il] + (
(vrhpr - vrtpr) * (vrhpr - vrtpr)
)
# Get average values for gam, hm, tm, hv, and tv, then compute the correct
# "variogram" measure
for il in range(0, nlag + 2):
i = il
if npp[i] > 0:
rnum = npp[i]
dis[i] = dis[i] / rnum
vario[i] = vario[i] / rnum
hm[i] = hm[i] / rnum
tm[i] = tm[i] / rnum
return dis, vario, npp
def varmapv(df,xcol,ycol,vcol,tmin,tmax,nxlag,nylag,dxlag,dylag,minnp,isill):
"""Calculate the variogram map from irregularly spaced data.
:param df: DataFrame with the spatial data, xcol, ycol, vcol coordinates and property columns
:param xcol: DataFrame column with x coordinate
:param ycol: DataFrame column with y coordinate
:param vcol: DataFrame column with value of interest
:param tmin: lower trimming limit
:param tmax: upper trimming limit
:param nxlag: number of lags in the x direction
:param nxlag: number of lags in the y direction
:param dxlag: size of the lags in the x direction
:param dylag: size of the lags in the y direction
:param minnp: minimum number of pairs to calculate a variogram value
:param isill: standardize sill to be 1.0
:return: TODO
"""
# Load the data
df_extract = df.loc[(df[vcol] >= tmin) & (df[vcol] <= tmax)] # trim values outside tmin and tmax
nd = len(df_extract)
x = df_extract[xcol].values
y = df_extract[ycol].values
vr = df_extract[vcol].values
# Summary statistics for the data after trimming
avg = vr.mean()
stdev = vr.std()
sills = stdev**2.0
ssq = sills
vrmin = vr.min()
vrmax = vr.max()
# Initialize the summation arrays
npp = np.zeros((nylag*2+1,nxlag*2+1))
gam = np.zeros((nylag*2+1,nxlag*2+1))
nppf = np.zeros((nylag*2+1,nxlag*2+1))
gamf = np.zeros((nylag*2+1,nxlag*2+1))
hm = np.zeros((nylag*2+1,nxlag*2+1))
tm = np.zeros((nylag*2+1,nxlag*2+1))
hv = np.zeros((nylag*2+1,nxlag*2+1))
tv = np.zeros((nylag*2+1,nxlag*2+1))
# First fix the location of a seed point:
for i in range(0,nd):
# Second loop over the data:
for j in range(0,nd):
# The lag:
ydis = y[j] - y[i]
iyl = nylag + int(ydis/dylag)
if iyl < 0 or iyl > nylag*2: # acocunting for 0,...,n-1 array indexing
continue
xdis = x[j] - x[i]
ixl = nxlag + int(xdis/dxlag)
if ixl < 0 or ixl > nxlag*2: # acocunting for 0,...,n-1 array indexing
continue
# We have an acceptable pair, therefore accumulate all the statistics
# that are required for the variogram:
npp[iyl,ixl] = npp[iyl,ixl] + 1 # our ndarrays read from the base to top, so we flip
tm[iyl,ixl] = tm[iyl,ixl] + vr[i]
hm[iyl,ixl] = hm[iyl,ixl] + vr[j]
tv[iyl,ixl] = tm[iyl,ixl] + vr[i]*vr[i]
hv[iyl,ixl] = hm[iyl,ixl] + vr[j]*vr[j]
gam[iyl,ixl] = gam[iyl,ixl] + ((vr[i]-vr[j])*(vr[i]-vr[j]))
# Get average values for gam, hm, tm, hv, and tv, then compute
# the correct "variogram" measure:
for iy in range(0,nylag*2+1):
for ix in range(0,nxlag*2+1):
if npp[iy,ix] <= minnp:
gam[iy,ix] = -999.
hm[iy,ix] = -999.
tm[iy,ix] = -999.
hv[iy,ix] = -999.
tv[iy,ix] = -999.
else:
rnum = npp[iy,ix]
gam[iy,ix] = gam[iy,ix] / (2*rnum) # semivariogram
hm[iy,ix] = hm[iy,ix] / rnum
tm[iy,ix] = tm[iy,ix] / rnum
hv[iy,ix] = hv[iy,ix] / rnum - hm[iy,ix]*hm[iy,ix]
tv[iy,ix] = tv[iy,ix] / rnum - tm[iy,ix]*tm[iy,ix]
# Attempt to standardize:
if isill > 0:
gamf[iy,ix] = gamf[iy,ix]/sills
for iy in range(0,nylag*2+1):
for ix in range(0,nxlag*2+1):
gamf[iy,ix] = gam[nylag*2-iy,ix]
nppf[iy,ix] = npp[nylag*2-iy,ix]
return gamf, nppf
def vmodel(
nlag,
xlag,
azm,
vario
):
"""GSLIB's VMODEL program (Deutsch and Journel, 1998) converted from the
original Fortran to Python by Michael Pyrcz, the University of Texas at
Austin (Mar, 2019).
:param nlag: number of variogram lags
:param xlag: size of the lags
:param axm: direction by 2D azimuth, 000 is y positive, 090 is x positive
:param vario: dictionary with the variogram parameters
:return:
"""
# Parameters
MAXNST=4
DEG2RAD=3.14159265/180.0
MAXROT=MAXNST+1
EPSLON = 1.0e-20
VERSION= 1.01
# Declare arrays
index = np.zeros(nlag+1)
h = np.zeros(nlag+1)
gam = np.zeros(nlag+1)
cov = np.zeros(nlag+1)
ro = np.zeros(nlag+1)
# Load the variogram
nst = vario["nst"]
cc = np.zeros(nst)
aa = np.zeros(nst)
it = np.zeros(nst)
ang = np.zeros(nst)
anis = np.zeros(nst)
c0 = vario["nug"]
cc[0] = vario["cc1"]
it[0] = vario["it1"]
ang[0] = vario["azi1"]
aa[0] = vario["hmaj1"]
anis[0] = vario["hmin1"] / vario["hmaj1"]
if nst == 2:
cc[1] = vario["cc2"]
it[1] = vario["it2"]
ang[1] = vario["azi2"]
aa[1] = vario["hmaj2"]
anis[1] = vario["hmin2"] / vario["hmaj2"]
xoff = math.sin(DEG2RAD*azm)*xlag
yoff = math.cos(DEG2RAD*azm)*xlag
print(' x,y,z offsets = ' + str(xoff) + ',' + str(yoff))
rotmat, maxcov = setup_rotmat(c0, nst, it, cc, ang, 99999.9)
xx = 0.0; yy = 0.0
for il in range(0,nlag+1):
index[il] = il
cov[il] = cova2(0.0,0.0,xx,yy,nst,c0,9999.9,cc,aa,it,ang,anis,rotmat,maxcov)
gam[il] = maxcov - cov[il]
ro[il] = cov[il]/maxcov
h[il] = math.sqrt(max((xx*xx+yy*yy),0.0))
xx = xx + xoff
yy = yy + yoff
# finished
return index,h,gam,cov,ro
def nscore(
df, vcol, wcol=None, ismooth=False, dfsmooth=None, smcol=0, smwcol=0
):
"""GSLIB's NSCORE program (Deutsch and Journel, 1998) converted from the
original Fortran to Python by Michael Pyrcz, the University of Texas at
Austin (Jan, 2019).
:param df: pandas DataFrame with the spatial data
:param vcol: name of the variable column
:param wcol: name of the weight column, if None assumes equal weighting
:param ismooth: if True then use a reference distribution
:param dfsmooth: pandas DataFrame required if reference distribution is used
:param smcol: reference distribution property (required if reference
distribution is used)
:param smwcol: reference distribution weight (required if reference
distribution is used)
:return: TODO
"""
# Set constants
np.random.seed(73073)
pwr = 1.0 # interpolation power, hard coded to 1.0 in GSLIB
EPSILON = 1.0e-20
# Decide which file to use for establishing the transformation table
if ismooth:
nd = len(dfsmooth)
vr = dfsmooth[smcol].values
wt_ns = np.ones(nd)
if smwcol != 0:
wt_ns = dfsmooth[smwcol].values
else:
nd = len(df)
vr = df[vcol].values
wt_ns = np.ones(nd)
if wcol is not None:
wt_ns = df[wcol].values
twt = np.sum(wt_ns)
# Sort data by value
istart = 0
iend = nd
vr, wt_ns = dsortem(istart, iend, vr, 2, wt_ns)
# Compute the cumulative probabilities and write transformation table
wtfac = 1.0 / twt
oldcp = 0.0
cp = 0.0
for j in range(istart, iend):
w = wtfac * wt_ns[j]
cp = cp + w
wt_ns[j] = (cp + oldcp) / 2.0
vrrg = gauinv(wt_ns[j])
vrg = float(vrrg)
oldcp = cp
# Now, reset the weight to the normal scores value
wt_ns[j] = vrg
# Normal scores transform
nd_trans = len(df)
ns = np.zeros(nd_trans)
val = df[vcol].values
for i in range(0, nd_trans):
vrr = val[i] + np.random.rand() * EPSILON
# Now, get the normal scores value for "vrr"
j = dlocate(vr, 1, nd, vrr)
j = min(max(1, j), (nd - 1))
ns[i] = dpowint(vr[j], vr[j + 1], wt_ns[j], wt_ns[j + 1], vrr, pwr)
return ns, vr, wt_ns
def kb2d(
df,
xcol,
ycol,
vcol,
tmin,
tmax,
nx,
xmn,
xsiz,
ny,
ymn,
ysiz,
nxdis,
nydis,
ndmin,
ndmax,
radius,
ktype,
skmean,
vario,
):
"""GSLIB's KB2D program (Deutsch and Journel, 1998) converted from the
original Fortran to Python by Michael Pyrcz, the University of Texas at
Austin (Jan, 2019).
:param df: pandas DataFrame with the spatial data
:param xcol: name of the x coordinate column
:param ycol: name of the y coordinate column
:param vcol: name of the property column
:param tmin: property trimming limit
:param tmax: property trimming limit
:param nx: definition of the grid system (x axis)
:param xmn: definition of the grid system (x axis)
:param xsiz: definition of the grid system (x axis)
:param ny: definition of the grid system (y axis)
:param ymn: definition of the grid system (y axis)
:param ysiz: definition of the grid system (y axis)
:param nxdis: number of discretization points for a block
:param nydis: number of discretization points for a block
:param ndmin: minimum number of data points to use for kriging a block
:param ndmax: maximum number of data points to use for kriging a block
:param radius: maximum isotropic search radius
:param ktype:
:param skmean:
:param vario:
:return:
"""
# Constants
UNEST = -999.
EPSLON = 1.0e-10
VERSION = 2.907
first = True
PMX = 9999.0
MAXSAM = ndmax + 1
MAXDIS = nxdis * nydis
MAXKD = MAXSAM + 1
MAXKRG = MAXKD * MAXKD
# load the variogram
nst = vario['nst']
cc = np.zeros(nst); aa = np.zeros(nst); it = np.zeros(nst)
ang = np.zeros(nst); anis = np.zeros(nst)
c0 = vario['nug'];
cc[0] = vario['cc1']; it[0] = vario['it1']; ang[0] = vario['azi1'];
aa[0] = vario['hmaj1']; anis[0] = vario['hmin1']/vario['hmaj1'];
if nst == 2:
cc[1] = vario['cc2']; it[1] = vario['it2']; ang[1] = vario['azi2'];
aa[1] = vario['hmaj2']; anis[1] = vario['hmin2']/vario['hmaj2'];
# Allocate the needed memory:
xdb = np.zeros(MAXDIS)
ydb = np.zeros(MAXDIS)
xa = np.zeros(MAXSAM)
ya = np.zeros(MAXSAM)
vra = np.zeros(MAXSAM)
dist = np.zeros(MAXSAM)
nums = np.zeros(MAXSAM)
r = np.zeros(MAXKD)
rr = np.zeros(MAXKD)
s = np.zeros(MAXKD)
a = np.zeros(MAXKRG)
kmap = np.zeros((nx,ny))
vmap = np.zeros((nx,ny))
# Load the data
df_extract = df.loc[(df[vcol] >= tmin) & (df[vcol] <= tmax)] # trim values outside tmin and tmax
nd = len(df_extract)
ndmax = min(ndmax,nd)
x = df_extract[xcol].values
y = df_extract[ycol].values
vr = df_extract[vcol].values
# Make a KDTree for fast search of nearest neighbours
dp = list((y[i], x[i]) for i in range(0,nd))
data_locs = np.column_stack((y,x))
tree = sp.cKDTree(data_locs, leafsize=16, compact_nodes=True, copy_data=False, balanced_tree=True)
# Summary statistics for the data after trimming
avg = vr.mean()
stdev = vr.std()
ss = stdev**2.0
vrmin = vr.min()
vrmax = vr.max()
# Set up the discretization points per block. Figure out how many
# are needed, the spacing, and fill the xdb and ydb arrays with the
# offsets relative to the block center (this only gets done once):
ndb = nxdis * nydis
if ndb > MAXDIS:
print('ERROR KB2D: Too many discretization points ')
print(' Increase MAXDIS or lower n[xy]dis')
return kmap
xdis = xsiz / max(float(nxdis),1.0)
ydis = ysiz / max(float(nydis),1.0)
xloc = -0.5*(xsiz+xdis)
i = -1 # accounting for 0 as lowest index
for ix in range(0,nxdis):
xloc = xloc + xdis
yloc = -0.5*(ysiz+ydis)
for iy in range(0,nydis):
yloc = yloc + ydis
i = i+1
xdb[i] = xloc
ydb[i] = yloc
# Initialize accumulators:
cbb = 0.0
rad2 = radius*radius
# Calculate Block Covariance. Check for point kriging.
rotmat, maxcov = setup_rotmat(c0,nst,it,cc,ang,PMX)
cov = cova2(xdb[0],ydb[0],xdb[0],ydb[0],nst,c0,PMX,cc,aa,it,ang,anis,rotmat,maxcov)
# Keep this value to use for the unbiasedness constraint:
unbias = cov
first = False
if ndb <= 1:
cbb = cov
else:
for i in range(0,ndb):
for j in range(0,ndb):
cov = cova2(xdb[i],ydb[i],xdb[j],ydb[j],nst,c0,PMX,cc,aa,it,ang,anis,rotmat,maxcov)
if i == j:
cov = cov - c0
cbb = cbb + cov
cbb = cbb/real(ndb*ndb)
# MAIN LOOP OVER ALL THE BLOCKS IN THE GRID:
nk = 0
ak = 0.0
vk = 0.0
for iy in range(0,ny):
yloc = ymn + (iy-0)*ysiz
for ix in range(0,nx):
xloc = xmn + (ix-0)*xsiz
current_node = (yloc,xloc)
# Find the nearest samples within each octant: First initialize
# the counter arrays:
na = -1 # accounting for 0 as first index
dist.fill(1.0e+20)
nums.fill(-1)
dist, nums = tree.query(current_node,ndmax) # use kd tree for fast nearest data search
# remove any data outside search radius
na = len(dist)
nums = nums[dist<radius]
dist = dist[dist<radius]
na = len(dist)
# Is there enough samples?
if na + 1 < ndmin: # accounting for min index of 0
est = UNEST
estv = UNEST
print('UNEST at ' + str(ix) + ',' + str(iy))
else:
# Put coordinates and values of neighborhood samples into xa,ya,vra:
for ia in range(0,na):
jj = int(nums[ia])
xa[ia] = x[jj]
ya[ia] = y[jj]
vra[ia] = vr[jj]
# Handle the situation of only one sample:
if na == 0: # accounting for min index of 0 - one sample case na = 0
cb1 = cova2(xa[0],ya[0],xa[0],ya[0],nst,c0,PMX,cc,aa,it,ang,anis,rotmat,maxcov)
xx = xa[0] - xloc
yy = ya[0] - yloc
# Establish Right Hand Side Covariance:
if ndb <= 1:
cb = cova2(xx,yy,xdb[0],ydb[0],nst,c0,PMX,cc,aa,it,ang,anis,rotmat,maxcov)
else:
cb = 0.0
for i in range(0,ndb):
cb = cb + cova2(xx,yy,xdb[i],ydb[i],nst,c0,PMX,cc,aa,it,ang,anis,rotmat,maxcov)
dx = xx - xdb(i)
dy = yy - ydb(i)
if (dx*dx+dy*dy) < EPSLON:
cb = cb - c0
cb = cb / real(ndb)
if ktype == 0:
s[0] = cb/cbb
est = s[0]*vra[0] + (1.0-s[0])*skmean
estv = cbb - s[0] * cb
else:
est = vra[0]
estv = cbb - 2.0*cb + cb1
else:
# Solve the Kriging System with more than one sample:
neq = na + ktype # accounting for first index of 0
# print('NEQ' + str(neq))
nn = (neq + 1)*neq/2
# Set up kriging matrices:
iin=-1 # accounting for first index of 0
for j in range(0,na):
# Establish Left Hand Side Covariance Matrix:
for i in range(0,na): # was j - want full matrix
iin = iin + 1
a[iin] = cova2(xa[i],ya[i],xa[j],ya[j],nst,c0,PMX,cc,aa,it,ang,anis,rotmat,maxcov)
if ktype == 1:
iin = iin + 1
a[iin] = unbias
xx = xa[j] - xloc
yy = ya[j] - yloc
# Establish Right Hand Side Covariance:
if ndb <= 1:
cb = cova2(xx,yy,xdb[0],ydb[0],nst,c0,PMX,cc,aa,it,ang,anis,rotmat,maxcov)
else:
cb = 0.0
for j1 in range(0,ndb):
cb = cb + cova2(xx,yy,xdb[j1],ydb[j1],nst,c0,PMX,cc,aa,it,ang,anis,rotmat,maxcov)
dx = xx - xdb[j1]
dy = yy - ydb[j1]
if (dx*dx+dy*dy) < EPSLON:
cb = cb - c0
cb = cb / real(ndb)
r[j] = cb
rr[j] = r[j]
# Set the unbiasedness constraint:
if ktype == 1:
for i in range(0,na):
iin = iin + 1
a[iin] = unbias
iin = iin + 1
a[iin] = 0.0
r[neq-1] = unbias
rr[neq-1] = r[neq]
# Solve the Kriging System:
# print('NDB' + str(ndb))
# print('NEQ' + str(neq) + ' Left' + str(a) + ' Right' + str(r))
# stop
s = ksol_numpy(neq,a,r)
ising = 0 # need to figure this out
# print('weights' + str(s))
# stop
# Write a warning if the matrix is singular:
if ising != 0:
print('WARNING KB2D: singular matrix')
print(' for block' + str(ix) + ',' + str(iy)+ ' ')
est = UNEST
estv = UNEST
else:
# Compute the estimate and the kriging variance:
est = 0.0
estv = cbb
sumw = 0.0
if ktype == 1:
estv = estv - (s[na])*unbias
for i in range(0,na):
sumw = sumw + s[i]
est = est + s[i]*vra[i]
estv = estv - s[i]*rr[i]
if ktype == 0:
est = est + (1.0-sumw)*skmean
kmap[ny-iy-1,ix] = est
vmap[ny-iy-1,ix] = estv
if est > UNEST:
nk = nk + 1
ak = ak + est
vk = vk + est*est
# END OF MAIN LOOP OVER ALL THE BLOCKS:
if nk >= 1:
ak = ak / float(nk)
vk = vk/float(nk) - ak*ak
print(' Estimated ' + str(nk) + ' blocks ')
print(' average ' + str(ak) + ' variance ' + str(vk))
return kmap, vmap
def ik2d(df,xcol,ycol,vcol,ivtype,koption,ncut,thresh,gcdf,trend,tmin,tmax,nx,xmn,xsiz,ny,ymn,ysiz,ndmin,ndmax,radius,ktype,vario):
"""A 2D version of GSLIB's IK3D Indicator Kriging program (Deutsch and Journel, 1998) converted from the
original Fortran to Python by Michael Pyrcz, the University of Texas at
Austin (March, 2019).
:param df: pandas DataFrame with the spatial data
:param xcol: name of the x coordinate column
:param ycol: name of the y coordinate column
:param vcol: name of the property column (cateogorical or continuous - note continuous is untested)
:param ivtype: variable type, 0 - categorical, 1 - continuous
:param koption: kriging option, 0 - estimation, 1 - cross validation (under construction)
:param ncut: number of categories or continuous thresholds
:param thresh: an ndarray with the category labels or continuous thresholds
:param gcdf: global CDF, not used if trend is present
:param trend: an ndarray [ny,ny,ncut] with the local trend proportions or cumulative CDF values
:param tmin: property trimming limit
:param tmax: property trimming limit
:param nx: definition of the grid system (x axis)
:param xmn: definition of the grid system (x axis)
:param xsiz: definition of the grid system (x axis)
:param ny: definition of the grid system (y axis)
:param ymn: definition of the grid system (y axis)
:param ysiz: definition of the grid system (y axis)
:param nxdis: number of discretization points for a block
:param nydis: number of discretization points for a block
:param ndmin: minimum number of data points to use for kriging a block
:param ndmax: maximum number of data points to use for kriging a block
:param radius: maximum isotropic search radius
:param ktype: kriging type, 0 - simple kriging and 1 - ordinary kriging
:param vario: list with all of the indicator variograms (sill of 1.0) in consistent order with above parameters
:return:
"""
# Find the needed paramters:
PMX = 9999.9
MAXSAM = ndmax + 1
MAXEQ = MAXSAM + 1
mik = 0 # full indicator kriging
use_trend = False
if trend.shape[0] == nx and trend.shape[1] == ny and trend.shape[2] == ncut: use_trend = True
# load the variogram
MAXNST = 2
nst = np.zeros(ncut,dtype=int); c0 = np.zeros(ncut); cc = np.zeros((MAXNST,ncut))
aa = np.zeros((MAXNST,ncut),dtype=int); it = np.zeros((MAXNST,ncut),dtype=int)
ang = np.zeros((MAXNST,ncut)); anis = np.zeros((MAXNST,ncut))
for icut in range(0,ncut):
nst[icut] = int(vario[icut]['nst'])
c0[icut] = vario[icut]['nug']; cc[0,icut] = vario[icut]['cc1']; it[0,icut] = vario[icut]['it1'];
ang[0,icut] = vario[icut]['azi1'];
aa[0,icut] = vario[icut]['hmaj1']; anis[0,icut] = vario[icut]['hmin1']/vario[icut]['hmaj1'];
if nst[icut] == 2:
cc[1,icut] = vario[icut]['cc2']; it[1,icut] = vario[icut]['it2']; ang[1,icut] = vario[icut]['azi2'];
aa[1,icut] = vario[icut]['hmaj2']; anis[1,icut] = vario[icut]['hmin2']/vario[icut]['hmaj2'];
# Load the data
df_extract = df.loc[(df[vcol] >= tmin) & (df[vcol] <= tmax)] # trim values outside tmin and tmax
MAXDAT = len(df_extract)
MAXCUT = ncut
MAXNST = 2
MAXROT = MAXNST*MAXCUT+ 1
ikout = np.zeros((nx,ny,ncut))
maxcov = np.zeros(ncut)
# Allocate the needed memory:
xa = np.zeros(MAXSAM)
ya = np.zeros(MAXSAM)
vra = np.zeros(MAXSAM)
dist = np.zeros(MAXSAM)
nums = np.zeros(MAXSAM)
r = np.zeros(MAXEQ)
rr = np.zeros(MAXEQ)
s = np.zeros(MAXEQ)
a = np.zeros(MAXEQ*MAXEQ)
ikmap = np.zeros((nx,ny,ncut))
vr = np.zeros((MAXDAT,MAXCUT+1))
nviol = np.zeros(MAXCUT)
aviol = np.zeros(MAXCUT)
xviol = np.zeros(MAXCUT)
ccdf = np.zeros(ncut)
ccdfo = np.zeros(ncut)
ikout = np.zeros((nx,ny,ncut))
x = df_extract[xcol].values
y = df_extract[ycol].values
v = df_extract[vcol].values
# The indicator data are constructed knowing the thresholds and the
# data value.
if ivtype == 0:
for icut in range(0,ncut):
vr[:,icut] = np.where((v <= thresh[icut] + 0.5) & (v > thresh[icut] - 0.5), '1', '0')
else:
for icut in range(0,ncut):
vr[:,icut] = np.where(v <= thresh[icut], '1', '0')
vr[:,ncut] = v
# Make a KDTree for fast search of nearest neighbours
dp = list((y[i], x[i]) for i in range(0,MAXDAT))
data_locs = np.column_stack((y,x))
tree = sp.cKDTree(data_locs, leafsize=16, compact_nodes=True, copy_data=False, balanced_tree=True)
# Summary statistics of the input data
avg = vr[:,ncut].mean()
stdev = vr[:,ncut].std()
ss = stdev**2.0
vrmin = vr[:,ncut].min()
vrmax = vr[:,ncut].max()
print('Data for IK3D: Variable column ' + str(vcol))
print(' Number = ' + str(MAXDAT))
ndh = MAXDAT
actloc = np.zeros(MAXDAT, dtype = int)
for i in range(1,MAXDAT):
actloc[i] = i
# Set up the rotation/anisotropy matrices that are needed for the
# variogram and search:
print('Setting up rotation matrices for variogram and search')
radsqd = radius * radius
rotmat = []
for ic in range(0,ncut):
rotmat_temp, maxcov[ic] = setup_rotmat(c0[ic],int(nst[ic]),it[:,ic],cc[:,ic],ang[:,ic],9999.9)
rotmat.append(rotmat_temp)
# Initialize accumulators: # not setup yet
nk = 0
xk = 0.0
vk = 0.0
for icut in range (0,ncut):
nviol[icut] = 0
aviol[icut] = 0.0
xviol[icut] = -1.0
nxy = nx*ny
print('Working on the kriging')
# Report on progress from time to time:
if koption == 0:
nxy = nx*ny
nloop = nxy
irepo = max(1,min((nxy/10),10000))
else:
nloop = 10000000
irepo = max(1,min((nd/10),10000))
ddh = 0.0
# MAIN LOOP OVER ALL THE BLOCKS IN THE GRID:
for index in range(0,nloop):
if (int(index/irepo)*irepo) == index: print(' currently on estimate ' + str(index))
if koption == 0:
iy = int((index)/nx)
ix = index - (iy)*nx
xloc = xmn + (ix)*xsiz
yloc = ymn + (iy)*ysiz
else:
ddh = 0.0
# TODO: pass the cross validation value
# Find the nearest samples within each octant: First initialize the counter arrays:
na = -1 # accounting for 0 as first index
dist.fill(1.0e+20)
nums.fill(-1)
current_node = (yloc,xloc)
dist, close = tree.query(current_node,ndmax) # use kd tree for fast nearest data search
# remove any data outside search radius
close = close[dist<radius]
dist = dist[dist<radius]
nclose = len(dist)
# Is there enough samples?
if nclose < ndmin: # accounting for min index of 0
for i in range(0,ncut):
ccdfo[i] = UNEST
print('UNEST at ' + str(ix) + ',' + str(iy))
else:
# Loop over all the thresholds/categories:
for ic in range(0,ncut):
krig = True
if mik == 1 and ic >= 1: krig = False
# Identify the close data (there may be a different number of data at
# each threshold because of constraint intervals); however, if
# there are no constraint intervals then this step can be avoided.
nca = -1
for ia in range(0,nclose):
j = int(close[ia]+0.5)
ii = actloc[j]
accept = True
if koption != 0 and (abs(x[j]-xloc) + abs(y[j]-yloc)).lt.EPSLON: accept = False
if accept:
nca = nca + 1
vra[nca] = vr[ii,ic]
xa[nca] = x[j]
ya[nca] = y[j]
# If there are no samples at this threshold then use the global cdf:
if nca == -1:
if use_trend:
ccdf[ic] = trend[ny-iy-1,ix,ic]
else:
ccdf[ic] = gcdf[ic]
else:
# Now, only load the variogram, build the matrix,... if kriging:
neq = nclose + ktype
na = nclose
# Set up kriging matrices:
iin=-1 # accounting for first index of 0
for j in range(0,na):
# Establish Left Hand Side Covariance Matrix:
for i in range(0,na): # was j - want full matrix
iin = iin + 1
a[iin] = cova2(xa[i],ya[i],xa[j],ya[j],nst[ic],c0[ic],PMX,cc[:,ic],aa[:,ic],it[:,ic],ang[:,ic],anis[:,ic],rotmat[ic],maxcov[ic])
if ktype == 1:
iin = iin + 1
a[iin] = maxcov[ic]
r[j] = cova2(xloc,yloc,xa[j],ya[j],nst[ic],c0[ic],PMX,cc[:,ic],aa[:,ic],it[:,ic],ang[:,ic],anis[:,ic],rotmat[ic],maxcov[ic])
# Set the unbiasedness constraint:
if ktype == 1:
for i in range(0,na):
iin = iin + 1
a[iin] = maxcov[ic]
iin = iin + 1
a[iin] = 0.0
r[neq-1] = maxcov[ic]
rr[neq-1] = r[neq]
# Solve the system:
if neq == 1:
ising = 0.0
s[0] = r[0] / a[0]
else:
s = ksol_numpy(neq,a,r)
# Finished kriging (if it was necessary):
# Compute Kriged estimate of cumulative probability:
sumwts = 0.0
ccdf[ic] = 0.0
for i in range(0,nclose):
ccdf[ic] = ccdf[ic] + vra[i]*s[i]
sumwts = sumwts + s[i]
if ktype == 0:
if use_trend == True:
ccdf[ic] = ccdf[ic] + (1.0-sumwts)*trend[ny-iy-1,ix,ic]
else:
ccdf[ic] = ccdf[ic] + (1.0-sumwts)*gcdf[ic]
# Keep looping until all the thresholds are estimated:
# Correct and write the distribution to the output file:
nk = nk + 1
ccdfo = ordrel(ivtype,ncut,ccdf)
# Write the IK CCDF for this grid node:
if koption == 0:
ikout[ny-iy-1,ix,:] = ccdfo
else:
print('TBD')
return ikout
def sgsim(df,xcol,ycol,vcol,wcol,scol,tmin,tmax,itrans,ismooth,dftrans,tcol,twtcol,zmin,zmax,ltail,ltpar,utail,utpar,nsim,
nx,xmn,xsiz,ny,ymn,ysiz,seed,ndmin,ndmax,nodmax,mults,nmult,noct,radius,radius1,sang1,
mxctx,mxcty,ktype,colocorr,sec_map,vario):
# Parameters from sgsim.inc
MAXNST=2; MAXROT=2; UNEST=-99.0; EPSLON=1.0e-20; VERSION=2.907
KORDEI=12; MAXOP1=KORDEI+1; MAXINT=2**30
# Set other parameters
np.random.seed(seed)
nxy = nx*ny
sstrat = 0 # search data and nodes by default, turned off if unconditional
radsqd = radius * radius
sanis1 = radius1/radius
if ktype == 4: varred = 1.0
# load the variogram
nst = int(vario['nst'])
cc = np.zeros(nst); aa = np.zeros(nst); it = np.zeros(nst,dtype=int)
ang = np.zeros(nst); anis = np.zeros(nst)
c0 = vario['nug'];
cc[0] = vario['cc1']; it[0] = vario['it1']; ang[0] = vario['azi1'];
aa[0] = vario['hmaj1']; anis[0] = vario['hmin1']/vario['hmaj1'];
if nst == 2:
cc[1] = vario['cc2']; it[1] = vario['it2']; ang[1] = vario['azi2'];
aa[1] = vario['hmaj2']; anis[1] = vario['hmin2']/vario['hmaj2'];
# Set the constants
MAXCTX = mxctx
MAXCTY = mxcty
MAXCXY = MAXCTX * MAXCTY
MAXX = nx
MAXY = ny
MAXZ = 1 # assuming 2D for now
MXY = MAXX * MAXY
if MXY < 100: MXY = 100
MAXNOD = nodmax
MAXSAM = ndmax
MAXKR1 = MAXNOD + MAXSAM + 1
# print('MAXKR1'); print(MAXKR1)
MAXKR2 = MAXKR1 * MAXKR1
MAXSBX = 1
if nx > 1:
MAXSBX = int(nx/2)
if MAXSBX > 50: MAXSBX=50
MAXSBY = 1
if ny > 1:
MAXSBY = int(ny/2)
if MAXSBY > 50: MAXSBY=50
MAXSBZ = 1
MAXSB = MAXSBX*MAXSBY*MAXSBZ
# Declare arrays
dist = np.zeros(ndmax)
nums = np.zeros(ndmax,dtype = int)
# Perform some quick checks
if nx > MAXX or ny> MAXY:
print('ERROR: available grid size: ' + str(MAXX) + ',' + str(MAXY) + ',' + str(MAXZ) +'.')
print(' you have asked for : ' + str(nx) + ',' + str(ny) + ',' + str(nz) + '.')
return sim
if ltail != 1 and ltail != 2:
print('ERROR invalid lower tail option ' + str(ltail))
print(' only allow 1 or 2 - see GSLIB manual ')
return sim
if utail != 1 and utail != 2 and utail != 4:
print('ERROR invalid upper tail option ' + str(ltail))
print(' only allow 1,2 or 4 - see GSLIB manual ')
return sim
if utail == 4 and utpar < 1.0:
print('ERROR invalid power for hyperbolic tail' + str(utpar))
print(' must be greater than 1.0!')
return sim
if ltail == 2 and ltpar < 0.0:
print('ERROR invalid power for power model' + str(ltpar))
print(' must be greater than 0.0!')
return sim
if utail == 2 and utpar < 0.0:
print('ERROR invalid power for power model' + str(utpar))
print(' must be greater than 0.0!')
return sim
# Load the data
df_extract = df.loc[(df[vcol] >= tmin) & (df[vcol] <= tmax)] # trim values outside tmin and tmax
nd = len(df_extract)
ndmax = min(ndmax,nd)
x = df_extract[xcol].values
y = df_extract[ycol].values
vr = df_extract[vcol].values
vr_orig = np.copy(vr)
# print('size of data extract'); print(len(vr))
wt = []; wt = np.array(wt)
if wcol > -1:
wt = df_extract[wcol].values
else:
wt = np.ones(nd)
sec = []; sec = np.array(sec)
if scol > -1:
sec = df_extract[scol].values
if itrans == 1:
if ismooth == 1:
dftrans_extract = dftrans.loc[(dftrans[tcol] >= tmin) & (dftrans[tcol] <= tmax)]
ntr = len(dftrans_extract)
vrtr = dftrans_extrac[tcol].values
if twtcol > -1:
vrgtr = dftrans_extrac[tcol].values
else:
vrgtr = np.ones(ntr)
else:
vrtr = df_extract[vcol].values
ntr = len(df_extract)
vrgtr = np.copy(wt)
twt = np.sum(vrgtr)
# sort
vrtr,vrgtr = dsortem(0,ntr,vrtr,2,b=vrgtr)
# Compute the cumulative probabilities and write transformation table
twt = max(twt,EPSLON)
oldcp = 0.0
cp = 0.0
# print('ntr'); print(ntr)
for j in range(0,ntr):
cp = cp + vrgtr[j]/twt
w = (cp + oldcp)*0.5
vrg = gauinv(w)
oldcp = cp
# Now, reset the weight to the normal scores value:
vrgtr[j] = vrg
twt = np.sum(wt)
# Normal scores transform the data
for id in range(0,nd):
if itrans == 1:
vrr = vr[id]
j = dlocate(vrtr,1,nd,vrr)
j = min(max(0,j),(nd-2))
vrg = dpowint(vrtr[j],vrtr[j+1],vrgtr[j],vrgtr[j+1],vrr,1.0)
if vrg < vrgtr[0]: vrg = vrgtr[0]
if(vrg > vrgtr[nd-1]): vrg = vrgtr[nd-1]
vr[id] = vrg
weighted_stats_orig = DescrStatsW(vr_orig,weights=wt)
orig_av = weighted_stats_orig.mean
orig_ss = weighted_stats_orig.var
weighted_stats = DescrStatsW(vr,weights=wt)
av = weighted_stats.mean
ss = weighted_stats.var
print('\n Data for SGSIM: Number of acceptable data = ' + str(nd))
print(' Number trimmed = ' + str(len(df)- nd))
print(' Weighted Average = ' + str(round(orig_av,4)))
print(' Weighted Variance = ' + str(round(orig_ss,4)))
print(' Weighted Transformed Average = ' + str(round(av,4)))
print(' Weighted Transformed Variance = ' + str(round(ss,4)))
# Read in secondary data
sim = np.random.rand(nx*ny)
index = 0
for ixy in range(0,nxy):
sim[index] = index
lvm = []; lvm = np.array(lvm)
if ktype >= 2:
#lvm = np.copy(sec_map.flatten())
ind = 0
lvm = np.zeros(nxy)
for iy in range(0,ny):
for ix in range(0,nx):
lvm[ind] = sec_map[ny-iy-1,ix]
ind = ind + 1
if ktype == 2 and itrans == 1:
for ixy in range(0,nxy):
# Do we to transform the secondary variable for a local mean?
vrr = lvm[ixy]
j = dlocate(vrtr,1,ntr,vrr)
j = min(max(0,j),(ntr-2))
vrg = dpowint(vrtr[j],vrtr[j+1],vrgtr[j],vrgtr[j+1],vrr,1.0)
if vrg < vrgtr[0]: vrg = vrgtr[0]
if(vrg > vrgtr[ntr-1]): vrg = vrgtr[nd-1]
lvm[ixy] = vrg
av = np.average(lvm)
ss = np.var(lvm)
print(' Secondary Data: Number of data = ' + str(nx*ny))
print(' Equal Weighted Average = ' + str(round(av,4)))
print(' Equal Weighted Variance = ' + str(round(ss,4)))
# Do we need to work with data residuals? (Locally Varying Mean)
if ktype == 2:
sec = np.zeros(nd)
for idd in range(0,nd):
ix = getindex(nx,xmn,xsiz,x[idd])
iy = getindex(ny,ymn,ysiz,y[idd])
index = ix + (iy-1)*nx
sec[idd] = lvm[index]
# Calculation of residual moved to krige subroutine: vr(i)=vr(i)-sec(i)
# Do we need to get an external drift attribute for the data?
if ktype == 3:
for idd in range(0,nd):
if sec[i] != UNEST:
ix = getindx(nx,xmn,xsiz,x[idd])
iy = getindx(ny,ymn,ysiz,y[idd])
ind = ix + (iy)*nx
sec[ind] = lvm[ind]
# Transform the secondary attribute to normal scores?
if ktype == 4:
order_sec = np.zeros(nxy)
ind = 0
for ixy in range(0,nxy):
order_sec[ixy] = ind
ind = ind + 1
print(' Transforming Secondary Data with')
print(' variance reduction of ' + str(varred))
lvm,order_sec = dsortem(0,nxy,lvm,2,b=order_sec)
oldcp = 0.0
cp = 0.0
for i in range(0,nxy):
cp = cp + (1.0/(nxy))
w = (cp + oldcp)/2.0
lvm[i] = gauinv(w)
lvm[i] = lvm[i] * varred
oldcp = cp
order_sec,lvm = dsortem(0,nxy,order_sec,2,b=lvm)
# return np.reshape(lvm,(ny,nx)) # check the transform
# Set up the rotation/anisotropy matrices that are needed for the
# variogram and search.
print('Setting up rotation matrices for variogram and search')
if nst == 1:
rotmat = setrot(ang[0],ang[0],sang1,anis[0],anis[0],sanis1,nst,MAXROT=2)
else:
rotmat = setrot(ang[0],ang[1],sang1,anis[0],anis[1],sanis1,nst,MAXROT=2)
isrot = 2 # search rotation is appended as 3rd
rotmat_2d, maxcov = setup_rotmat2(c0,nst,it,cc,ang) # will use one in the future
# print('MaxCov = ' + str(maxcov))
# Make a KDTree for fast search of nearest neighbours
dp = list((y[i], x[i]) for i in range(0,nd))
data_locs = np.column_stack((y,x))
tree = sp.cKDTree(data_locs, leafsize=16, compact_nodes=True, copy_data=False, balanced_tree=True)
# Set up the covariance table and the spiral search:
cov_table,tmp,order,ixnode,iynode,nlooku,nctx,ncty = ctable(MAXNOD,MAXCXY,MAXCTX,MAXCTY,MXY,
xsiz,ysiz,isrot,nx,ny,nst,c0,cc,aa,it,ang,anis,rotmat,radsqd)
# print('Covariance Table'); print(cov_table)
# MAIN LOOP OVER ALL THE SIMULAUTIONS:
for isim in range(0,nsim):
# Work out a random path for this realization:
sim = np.random.rand(nx*ny)
order = np.zeros(nxy)
ind = 0
for ixy in range(0,nxy):
order[ixy] = ind
ind = ind + 1
# The multiple grid search works with multiples of 4 (yes, that is
# somewhat arbitrary):
if mults == 1:
for imult in range(0,nmult):
nny = int(max(1,ny/((imult+1)*4)))
nnx = int(max(1,nx/((imult+1)*4)))
# print('multi grid - nnx, nny'); print(nnx,nny)
jy = 1
jx = 1
for iy in range(0,nny):
if nny > 0: jy = iy*(imult+1)*4
for ix in range(0,nnx):
if nnx > 0: jx = ix*(imult+1)*4
index = jx + (jy-1)*nx
sim[index] = sim[index] - (imult+1)
# Initialize the simulation:
sim, order = dsortem(0,nxy,sim,2,b=order)
sim.fill(UNEST)
print('Working on realization number ' + str(isim))
# Assign the data to the closest grid node:
TINY = 0.0001
for idd in range(0,nd):
# print('data'); print(x[idd],y[idd])
ix = getindex(nx,xmn,xsiz,x[idd])
iy = getindex(ny,ymn,ysiz,y[idd])
ind = ix + (iy-1)*nx
xx = xmn + (ix)*xsiz
yy = ymn + (iy)*ysiz
# print('xx, yy' + str(xx) + ',' + str(yy))
test = abs(xx-x[idd]) + abs(yy-y[idd])
# Assign this data to the node (unless there is a closer data):
if sstrat == 1:
if sim[ind] > 0.0:
id2 = int(sim[ind]+0.5)
test2 = abs(xx-x(id2)) + abs(yy-y(id2))
if test <= test2:
sim[ind] = idd
else:
sim[ind] = id2
# Assign a flag so that this node does not get simulated:
if sstrat == 0 and test <= TINY: sim[ind]=10.0*UNEST
# Now, enter data values into the simulated grid:
for ind in range(0,nxy):
idd = int(sim[ind]+0.5)
if idd > 0: sim[ind] = vr[id]
irepo = max(1,min((nxy/10),10000))
# MAIN LOOP OVER ALL THE NODES:
for ind in range(0,nxy):
if (int(ind/irepo)*irepo) == ind:
print(' currently on node ' + str(ind))
# Figure out the location of this point and make sure it has
# not been assigned a value already:
index = int(order[ind]+0.5)
if (sim[index] > (UNEST+EPSLON)) or (sim[index] < (UNEST*2.0)): continue
iy = int((index)/nx)
ix = index - (iy)*nx
xx = xmn + (ix)*xsiz
yy = ymn + (iy)*ysiz
current_node = (yy,xx)
# print('Current_node'); print(current_node)
# Now, we'll simulate the point ix,iy,iz. First, get the close data
# and make sure that there are enough to actually simulate a value,
# we'll only keep the closest "ndmax" data, and look for previously
# simulated grid nodes:
if sstrat == 0:
# print('searching for nearest data')
na = -1 # accounting for 0 as first index
if ndmax == 1:
dist = np.zeros(1); nums = np.zeros(1)
dist[0], nums[0] = tree.query(current_node,ndmax) # use kd tree for fast nearest data search
else:
dist, nums = tree.query(current_node,ndmax)
# remove any data outside search radius
# print('nums'); print(nums)
# print('dist'); print(dist)
na = len(dist)
nums = nums[dist<radius]
dist = dist[dist<radius]
na = len(dist)
if na < ndmin: continue # bail if not enough data
# print('Found ' + str(na) + 'neighbouring data')
# print('node search inputs')
# print('nodmax ' + str(nodmax))
# print('ixnode'); print(ixnode)
ncnode, icnode, cnodev, cnodex, cnodey = srchnd(ix,iy,nx,ny,xmn,ymn,xsiz,ysiz,sim,noct,nodmax,ixnode,iynode,nlooku,nctx,ncty,UNEST)
# print('srchnd'); print(ncnode,icnode,cnodev,cnodex,cnodey)
# print('Result of srchnd, cnodex = '); print(cnodex)
nclose = na
# print('srch node, nclose ' + str(nclose) + ', ncnode ' + str(ncnode))
# print('nums'); print(nums)
# Calculate the conditional mean and standard deviation. This will be
# done with kriging if there are data, otherwise, the global mean and
# standard deviation will be used:
if ktype == 2:
gmean = lvm[index]
else:
gmean = 0.0
if nclose+ncnode < 1:
cmean = gmean
cstdev = 1.0
#Perform the kriging. Note that if there are fewer than four data
# then simple kriging is prefered so that the variance of the
# realization does not become artificially inflated:
else:
lktype = ktype
if ktype == 1 and (nclose+ncnode) < 4: lktype=0
cmean, cstdev = krige(ix,iy,nx,ny,xx,yy,lktype,x,y,vr,sec,colocorr,lvm,nums,cov_table,nctx,ncty,icnode,ixnode,iynode,cnodev,cnodex,cnodey,
nst,c0,9999.9,cc,aa,it,ang,anis,rotmat_2d,maxcov,MAXCTX,MAXCTY,MAXKR1,
MAXKR2)
# Draw a random number and assign a value to this node:
p = np.random.rand()
xp = gauinv(p)
sim[index] = xp * cstdev + cmean
# print('simulated value = ' + str(sim[index]))
# Quick check for far out results:
if abs(cmean) > 5.0 or abs(cstdev) > 5.0 or abs(sim[index]) > 6.0:
print('WARNING: grid node location: ' + str(ix) + ',' + str(iy))
print(' conditional mean and stdev: ' + str(cmean) + ',' + str(cstdev))
print(' simulated value: ' + str(sim[index]))
# Do we need to reassign the data to the grid nodes?
if sstrat == 0:
print('Reassigning data to nodes')
for iid in range(0,nd):
ix = getindex(nx,xmn,xsiz,x[iid])
iy = getindex(ny,ymn,ysiz,y[iid])
xx = xmn + (ix)*xsiz
yy = ymn + (iy)*ysiz
ind = ix + (iy-1)*nx
test=abs(xx-x[iid])+abs(yy-y[iid])
if test <= TINY: sim[ind] = vr[iid]
# Back transform each value and write results:
ne = 0
av = 0.0
ss = 0.0
for ind in range(0,nxy):
iy = int((index-1)/nx) + 1
ix = index - (iy-1)*nx
simval = sim[ind]
if simval > -9.0 and simval < 9.0:
ne = ne + 1
av = av + simval
ss = ss + simval*simval
if itrans == 1 and simval > (UNEST+EPSLON):
simval = backtr_value(simval,vrtr,vrgtr,zmin,zmax,ltail,ltpar,utail,utpar)
if simval < zmin: simval = zmin
if simval > zmax: simval = zmax
sim[ind] = simval
# print('simulated value = ' + str(sim[ind]) + ' at location index = ' + str(ind))
av = av / max(ne,1.0)
ss =(ss / max(ne,1.0)) - av * av
print('\n Realization ' + str(isim) + ': number = ' + str(ne))
print(' mean = ' + str(round(av,4)) + ' (close to 0.0?)')
print(' variance = ' + str(round(ss,4)) + ' (close to gammabar(V,V)? approx. 1.0)')
# END MAIN LOOP OVER SIMULATIONS:
sim_out = np.zeros((ny,nx))
for ind in range(0,nxy):
iy = int((ind)/nx)
ix = ind - (iy)*nx
sim_out[ny-iy-1,ix] = sim[ind]
return sim_out
def sisim(df,xcol,ycol,vcol,ivtype,koption,ncut,thresh,gcdf,trend,tmin,tmax,zmin,zmax,ltail,ltpar,middle,mpar,utail,utpar,nx,xmn,xsiz,ny,ymn,ysiz,seed,ndmin,
ndmax,nodmax,mults,nmult,noct,radius,ktype,vario):
"""A 2D version of GSLIB's SISIM Indicator Simulation program (Deutsch and Journel, 1998) converted from the
original Fortran to Python by Michael Pyrcz, the University of Texas at
Austin (March, 2019). WARNING: only tested for cateogrical ktype 0, 1 and 2 (locally variable proportion).
:param df: pandas DataFrame with the spatial data
:param xcol: name of the x coordinate column
:param ycol: name of the y coordinate column
:param vcol: name of the property column (cateogorical or continuous - note continuous is untested)
:param ivtype: variable type, 0 - categorical, 1 - continuous
:param koption: kriging option, 0 - estimation, 1 - cross validation (under construction)
:param ncut: number of categories or continuous thresholds
:param thresh: an ndarray with the category labels or continuous thresholds
:param gcdf: global CDF, not used if trend is present
:param trend: an ndarray [ny,ny,ncut] with the local trend proportions or cumulative CDF values
:param tmin: property trimming limit
:param tmax: property trimming limit
:param nx: definition of the grid system (x axis)
:param xmn: definition of the grid system (x axis)
:param xsiz: definition of the grid system (x axis)
:param ny: definition of the grid system (y axis)
:param ymn: definition of the grid system (y axis)
:param ysiz: definition of the grid system (y axis)
:param nxdis: number of discretization points for a block
:param nydis: number of discretization points for a block
:param ndmin: minimum number of data points to use for kriging a block
:param ndmax: maximum number of data points to use for kriging a block
:param radius: maximum isotropic search radius
:param ktype: kriging type, 0 - simple kriging and 1 - ordinary kriging
:param vario: list with all of the indicator variograms (sill of 1.0) in consistent order with above parameters
:return:
"""
# Checks
if utail == 3 or ltail == 3 or middle == 3:
print('ERROR - distribution extrapolation option 3 with table is not available')
return sim_out
if xcol == "" or ycol == "":
print('ERROR - must have x and y column in the DataFrame')
return sim_out
# Set parameters from the include
UNEST = -99.0
EPSLON = 1.0e-20
VERSION = 0.001
np.random.seed(seed)
colocorr = 0.0 # no collocated cokriging
lvm = 0 # no kriging with a locally variable mean
sec = []; sec = np.array(sec) # no secondary data
ng = 0 # no tabulated values
# Find the needed paramters:
PMX = 9999.9
MAXSAM = ndmax + 1
MAXEQ = MAXSAM + 1
nxy = nx*ny
mik = 0 # full indicator kriging
use_trend = False
trend1d = np.zeros((nxy,1)) # no trend make a dummy trend
if trend.shape[0] == nx and trend.shape[1] == ny and trend.shape[2] == ncut:
trend1d = np.zeros((nxy,ncut))
use_trend = True
index = 0
for iy in range(0,ny):
for ix in range(0,nx):
for ic in range(0,ncut):
trend1d[index,ic] = trend[ny-iy-1,ix,ic] # copy trend
index = index + 1
MAXORD = nxy
MAXNOD = nodmax
cnodeiv = np.zeros((ncut+1,MAXNOD))
tmp = np.zeros(MAXORD)
sstrat = 0 # search data and nodes by default, turned off if unconditional
sang1 = 0 # using isotropic search now
sanis1 = 1.0
# No covariance lookup table
mxctx = int(radius/xsiz)*2+1; mxcty = int(radius/xsiz)*2+1
# print('cov table / spiral search nx, ny '); print(mxctx); print(mxcty)
MAXCTX = mxctx
MAXCTY = mxcty
MAXCXY = MAXCTX * MAXCTY
# Grid extents
MAXX = nx
MAXY = ny
MXY = MAXX * MAXY
# Kriging system
MAXKR1 = 2 * MAXNOD + 2 * MAXSAM + 1
MAXKR2 = MAXKR1 * MAXKR1
MAXSBX = 1
if nx > 1:
MAXSBX = int(nx/2)
if MAXSBX > 50:
MAXSBX=50
MAXSBY = 1
if ny > 1:
MAXSBY = int(ny/2)
if MAXSBY > 50:
MAXSBY=50
# print('ncut'); print(ncut)
# load the variogram
MAXNST = 2
nst = np.zeros(ncut,dtype=int); c0 = np.zeros(ncut); cc = np.zeros((ncut,MAXNST))
aa = np.zeros((ncut,MAXNST),dtype=int); it = np.zeros((ncut,MAXNST),dtype=int)
ang = np.zeros((ncut,MAXNST)); anis = np.zeros((ncut,MAXNST))
# print('varios - 1 vario'); print(vario[1])
for icut in range(0,ncut):
# print('icut'); print(icut)
nst[icut] = int(vario[icut]['nst'])
c0[icut] = vario[icut]['nug']; cc[icut,0] = vario[icut]['cc1']; it[icut,0] = vario[icut]['it1'];
ang[icut,0] = vario[icut]['azi1'];
aa[icut,0] = vario[icut]['hmaj1']; anis[icut,0] = vario[icut]['hmin1']/vario[icut]['hmaj1'];
if nst[icut] == 2:
cc[icut,1] = vario[icut]['cc2']; it[icut,1] = vario[icut]['it2']; ang[icut,1] = vario[icut]['azi2'];
aa[icut,1] = vario[icut]['hmaj2']; anis[icut,1] = vario[icut]['hmin2']/vario[icut]['hmaj2'];
# print('check loaded cov model- icut '); print(icut)
# print(cc[icut],aa[icut],it[icut],ang[icut],anis[icut])
# Load the data
df_extract = df.loc[(df[vcol] >= tmin) & (df[vcol] <= tmax)] # trim values outside tmin and tmax
MAXDAT = len(df_extract)
nd = MAXDAT
MAXCUT = ncut
MAXNST = 2
MAXROT = MAXNST*MAXCUT+ 1
ikout = np.zeros((nx,ny,ncut))
maxcov = np.zeros(ncut)
# Allocate the needed memory:
xa = np.zeros(MAXSAM)
ya = np.zeros(MAXSAM)
vra = np.zeros(MAXSAM)
dist = np.zeros(MAXSAM)
nums = np.zeros(MAXSAM)
r = np.zeros(MAXEQ)
rr = np.zeros(MAXEQ)
s = np.zeros(MAXEQ)
a = np.zeros(MAXEQ*MAXEQ)
ikmap = np.zeros((nx,ny,ncut))
vr = np.zeros((MAXDAT,MAXCUT+1))
nviol = np.zeros(MAXCUT)
aviol = np.zeros(MAXCUT)
xviol = np.zeros(MAXCUT)
ccdf = np.zeros(ncut)
ccdfo = np.zeros(ncut)
ikout = np.zeros((nx,ny,ncut))
x = df_extract[xcol].values
y = df_extract[ycol].values
v = df_extract[vcol].values
MAXTAB = MAXDAT + MAXCUT # tabulated probabilities not used
gcut = np.zeros(MAXTAB)
# The indicator data are constructed knowing the thresholds and the
# data value.
# print('ncut'); print(ncut)
if ivtype == 0:
for icut in range(0,ncut):
vr[:,icut] = np.where((v <= thresh[icut] + 0.5) & (v > thresh[icut] - 0.5), '1', '0')
else:
for icut in range(0,ncut):
vr[:,icut] = np.where(v <= thresh[icut], '1', '0')
vr[:,ncut] = v
# print('loaded data '); print(vr)
# Make a KDTree for fast search of nearest neighbours
dp = list((y[i], x[i]) for i in range(0,MAXDAT))
data_locs = np.column_stack((y,x))
tree = sp.cKDTree(data_locs, leafsize=16, compact_nodes=True, copy_data=False, balanced_tree=True)
# Summary statistics of the input data
avg = vr[:,ncut].mean()
stdev = vr[:,ncut].std()
ss = stdev**2.0
vrmin = vr[:,ncut].min()
vrmax = vr[:,ncut].max()
print('Data for IK3D: Variable column ' + str(vcol))
print(' Number = ' + str(MAXDAT))
ndh = MAXDAT
actloc = np.zeros(MAXDAT, dtype = int) # need to set up data at node locations
for i in range(1,MAXDAT):
actloc[i] = i
# Set up the rotation/anisotropy matrices that are needed for the
# variogram and search:
print('Setting up rotation matrices for variogram and search')
radsqd = radius * radius
rotmat = []
for ic in range(0,ncut):
rotmat_temp, maxcov[ic] = setup_rotmat(c0[ic],int(nst[ic]),it[ic],cc[ic],ang[ic],9999.9)
rotmat.append(rotmat_temp)
#return rotmat
# Set up the covariance table and the spiral search based just on the first variogram
# This is ok as we are not using the covariance look up table, just spiral search for previous nodes
isrot = MAXNST*MAXCUT + 1 # note I removed anisotropic search here
# print('ang[0]'); print(ang[0])
if nst[0] == 1:
global_rotmat = setrot(ang[0,0],ang[0,0],sang1,anis[0,0],anis[0,0],sanis1,nst[0],MAXROT=2)
else:
global_rotmat = setrot(ang[0,0],ang[1,0],sang1,anis[0,0],anis[1,0],sanis1,nst[0],MAXROT=2)
cov_table,tmp2,order,ixnode,iynode,nlooku,nctx,ncty = ctable(MAXNOD,MAXCXY,MAXCTX,MAXCTY,MXY,
xsiz,ysiz,isrot,nx,ny,nst[0],c0[0],cc[0],aa[0],it[0],ang[0],anis[0],global_rotmat,radsqd)
# print('spiral search number nodes '); print(nlooku)
# print('ixnode,iynode'); print(ixnode,iynode)
# Initialize accumulators: # not setup yet
nk = 0
xk = 0.0
vk = 0.0
for icut in range (0,ncut):
nviol[icut] = 0
aviol[icut] = 0.0
xviol[icut] = -1.0
# print('Working on the kriging')
# Report on progress from time to time:
if koption == 0:
nxy = nx*ny
nloop = nxy
irepo = max(1,min((nxy/10),10000))
else:
nloop = 10000000
irepo = max(1,min((nd/10),10000))
ddh = 0.0
# MAIN LOOP OVER ALL THE SIMULAUTIONS:
# for isim in range(0,nsim): # will add multiple realizations soon
# Work out a random path for this realization:
sim = np.random.rand(nx*ny)
order = np.zeros(nxy)
ind = 0
for ixy in range(0,nxy):
order[ixy] = ind
ind = ind + 1
# Multiple grid search works with multiples of 4 (yes, that is
# soat arbitrary):
if mults == 1:
for imult in range(0,nmult):
nny = int(max(1,ny/((imult+1)*4)))
nnx = int(max(1,nx/((imult+1)*4)))
# print('multi grid - nnx, nny'); print(nnx,nny)
jy = 1
jx = 1
for iy in range(0,nny):
if nny > 0: jy = iy*(imult+1)*4
for ix in range(0,nnx):
if nnx > 0: jx = ix*(imult+1)*4
index = jx + (jy-1)*nx
sim[index] = sim[index] - (imult+1)
# Inlize the simulation:
sim, order = dsortem(0,nxy,sim,2,b=order)
sim.fill(UNEST)
tmp.fill(0.0)
print('Working on a single realization, seed ' + str(seed))
# print('Random Path'); print(order)
# As the data to the closest grid node:
TINY = 0.0001
for idd in range(0,nd):
# print('data'); print(x[idd],y[idd])
ix = getindex(nx,xmn,xsiz,x[idd])
iy = getindex(ny,ymn,ysiz,y[idd])
ind = ix + (iy-1)*nx
xx = xmn + (ix)*xsiz
yy = ymn + (iy)*ysiz
# print('xx, yy' + str(xx) + ',' + str(yy))
test = abs(xx-x[idd]) + abs(yy-y[idd])
# As this data to the node (unless there is a closer data):
if sstrat == 1 or (sstrat == 0 and test <= TINY):
if sim[ind] > UNEST:
id2 = int(sim[ind]+0.5)
test2 = abs(xx-x[id2]) + abs(yy-y[id2])
if test <= test2:
sim[ind] = idd
else:
sim[ind] = idd
# As a flag so that this node does not get simulated:
# Another data values into the simulated grid:
for ind in range(0,nxy):
idd = int(sim[ind]+0.5)
if idd > 0:
sim[ind] = vr[idd]
else:
tmp[ind] = sim[ind]
sim[ind] = UNEST
irepo = max(1,min((nxy/10),10000))
# LOOP OVER ALL THE NODES:
for ind in range(0,nxy):
if (int(ind/irepo)*irepo) == ind:
print(' currently on node ' + str(ind))
# Find the index on the random path, check if assigned data and get location
index = int(order[ind]+0.5)
if (sim[index] > (UNEST+EPSLON)) or (sim[index] < (UNEST*2.0)): continue
iy = int((index)/nx)
ix = index - (iy)*nx
xx = xmn + (ix)*xsiz
yy = ymn + (iy)*ysiz
current_node = (yy,xx)
# print('Current_node'); print(current_node)
# Now we'll simulate the point ix,iy,iz. First, get the close data
# and make sure that there are enough to actually simulate a value,
# we'll only keep the closest "ndmax" data, and look for previously
# simulated grid nodes:
if sstrat == 0:
# print('searching for nearest data')
na = -1 # accounting for 0 as first index
if ndmax == 1:
dist = np.zeros(1); nums = np.zeros(1)
dist[0], nums[0] = tree.query(current_node,ndmax) # use kd tree for fast nearest data search
else:
dist, nums = tree.query(current_node,ndmax)
# remove any data outside search radius
# print('nums'); print(nums)
# print('dist'); print(dist)
na = len(dist)
nums = nums[dist<radius]
dist = dist[dist<radius]
na = len(dist)
if na < ndmin: continue # bail if not enough data
# print('Found ' + str(na) + 'neighbouring data')
# print('node search inputs')
# print('nodmax ' + str(nodmax))
# print('ixnode'); print(ixnode)
# Indicator transform the nearest node data
# print('start node search')
ncnode, icnode, cnodev, cnodex, cnodey = srchnd(ix,iy,nx,ny,xmn,ymn,xsiz,ysiz,sim,noct,nodmax,ixnode,iynode,nlooku,nctx,ncty,UNEST)
if ncnode > 0:
for icut in range(0,ncut):
cnodeiv[icut,:] = np.where((cnodev <= thresh[icut] + 0.5) & (cnodev > thresh[icut] - 0.5), '1', '0')
else:
for icut in range(0,ncut):
cnodeiv[icut,:] = np.where(cnodev <= thresh[icut], '1', '0')
cnodeiv[ncut,:] = cnodev
# print('indicator transformed nearest nodes'); print(cnodeiv)
# print('srchnd'); print(ncnode,icnode,cnodev,cnodex,cnodey)
# print('Result of srchnd, cnodex = '); print(cnodex)
nclose = na
# print('*****srch node, nclose ' + str(nclose) + ', ncnode ' + str(ncnode))
# print('near data'); print(nums)
# print('near data distance'); print(dist)
# print('nums'); print(nums)
# What cdf value are we looking for?
zval = UNEST
cdfval = np.random.rand()
# Use the global distribution?
# check inputs
# print('nst'); print(nst)
if nclose + ncnode <= 0:
# print('nclose & ncnode'); print(nclose, ncnode)
zval = beyond(ivtype,ncut,thresh,gcdf,ng,gcut,gcdf,zmin,zmax,ltail,ltpar,middle,mpar,utail,utpar,zval,cdfval)
else:
# print('kriging')
# Estimate the local distribution by indicator kriging:
# print('maxcov'); print(maxcov)
for ic in range(0,ncut):
# print('check kriging cov model- icut '); print(ic)
# print('node data values for kriging'); print(cnodev)
# print(cc[ic],aa[ic],it[ic],ang[ic],anis[ic],rotmat[ic],maxcov[ic])
#ccdf([ic] = krige(ix,iy,iz,xx,yy,zz,ic,cdf(ic),MAXCTX,MAXCTY,MAXCTZ,MAXKR1,ccdf(ic),MAXROT)
if ktype == 0:
gmean = gcdf[ic]
elif ktype == 2:
gmean = trend1d[index,ic]
else:
gmean = 0 # if locally variable mean it is set from trend in ikrige, otherwise not used
# print('gmean'); print(gmean)
ccdf[ic], cstdev = ikrige(ix,iy,nx,ny,xx,yy,ktype,x,y,vr[:,ic],sec,colocorr,gmean,trend[:,ic],nums,cov_table,nctx,ncty,
icnode,ixnode,iynode,cnodeiv[ic],cnodex,cnodey,nst[ic],c0[ic],9999.9,cc[ic],aa[ic],it[ic],ang[ic],anis[ic],
rotmat[ic],maxcov[ic],MAXCTX,MAXCTY,MAXKR1,MAXKR2)
# print('ccdf'); print(ccdf)
# Correct order relations:
ccdfo = ordrel(ivtype,ncut,ccdf)
# Draw from the local distribution:
zval = beyond(ivtype,ncut,thresh,ccdfo,ng,gcut,gcdf,zmin,zmax,ltail,ltpar,middle,mpar,utail,utpar,zval,cdfval)
sim[index] = zval
# print('zval'); print(zval)
# END MAIN LOOP OVER SIMULATIONS:
sim_out = np.zeros((ny,nx))
for ind in range(0,nxy):
iy = int((ind)/nx)
ix = ind - (iy)*nx
sim_out[ny-iy-1,ix] = sim[ind]
return sim_out
def kb2d_locations(
df,
xcol,
ycol,
vcol,
tmin,
tmax,
df_loc,
xcol_loc,
ycol_loc,
ndmin,
ndmax,
radius,
ktype,
skmean,
vario,
):
"""GSLIB's KB2D program (Deutsch and Journel, 1998) converted from the
original Fortran to Python by Michael Pyrcz, the University of Texas at
Austin (Jan, 2019). Version for kriging at a set of spatial locations.
:param df: pandas DataFrame with the spatial data
:param xcol: name of the x coordinate column
:param ycol: name of the y coordinate column
:param vcol: name of the property column
:param tmin: property trimming limit
:param tmax: property trimming limit
:param df_loc: pandas DataFrame with the locations to krige
:param xcol: name of the x coordinate column for locations to krige
:param ycol: name of the y coordinate column for locations to krige
:param ndmin: minimum number of data points to use for kriging a block
:param ndmax: maximum number of data points to use for kriging a block
:param radius: maximum isotropic search radius
:param ktype:
:param skmean:
:param vario:
:return:
"""
# Constants
UNEST = -999.
EPSLON = 1.0e-10
VERSION = 2.907
first = True
PMX = 9999.0
MAXSAM = ndmax + 1
MAXKD = MAXSAM + 1
MAXKRG = MAXKD * MAXKD
# load the variogram
nst = vario['nst']
cc = np.zeros(nst); aa = np.zeros(nst); it = np.zeros(nst)
ang = np.zeros(nst); anis = np.zeros(nst)
c0 = vario['nug'];
cc[0] = vario['cc1']; it[0] = vario['it1']; ang[0] = vario['azi1'];
aa[0] = vario['hmaj1']; anis[0] = vario['hmin1']/vario['hmaj1'];
if nst == 2:
cc[1] = vario['cc2']; it[1] = vario['it2']; ang[1] = vario['azi2'];
aa[1] = vario['hmaj2']; anis[1] = vario['hmin2']/vario['hmaj2'];
# Allocate the needed memory:
xa = np.zeros(MAXSAM)
ya = np.zeros(MAXSAM)
vra = np.zeros(MAXSAM)
dist = np.zeros(MAXSAM)
nums = np.zeros(MAXSAM)
r = np.zeros(MAXKD)
rr = np.zeros(MAXKD)
s = np.zeros(MAXKD)
a = np.zeros(MAXKRG)
klist = np.zeros(len(df_loc)) # list of kriged estimates
vlist = np.zeros(len(df_loc))
# Load the data
df_extract = df.loc[(df[vcol] >= tmin) & (df[vcol] <= tmax)] # trim values outside tmin and tmax
nd = len(df_extract)
ndmax = min(ndmax,nd)
x = df_extract[xcol].values
y = df_extract[ycol].values
vr = df_extract[vcol].values
# Load the estimation loactions
nd_loc = len(df_loc)
x_loc = df_loc[xcol].values
y_loc = df_loc[ycol].values
vr_loc = df_loc[vcol].values
# Make a KDTree for fast search of nearest neighbours
dp = list((y[i], x[i]) for i in range(0,nd))
data_locs = np.column_stack((y,x))
tree = sp.cKDTree(data_locs, leafsize=16, compact_nodes=True, copy_data=False, balanced_tree=True)
# Summary statistics for the data after trimming
avg = vr.mean()
stdev = vr.std()
ss = stdev**2.0
vrmin = vr.min()
vrmax = vr.max()
# Initialize accumulators:
cbb = 0.0
rad2 = radius*radius
# Calculate Block Covariance. Check for point kriging.
rotmat, maxcov = setup_rotmat(c0,nst,it,cc,ang,PMX)
cov = cova2(0.0,0.0,0.0,0.0,nst,c0,PMX,cc,aa,it,ang,anis,rotmat,maxcov)
# Keep this value to use for the unbiasedness constraint:
unbias = cov
cbb = cov
first = False
# MAIN LOOP OVER ALL THE BLOCKS IN THE GRID:
nk = 0
ak = 0.0
vk = 0.0
for idata in range(len(df_loc)):
print('Working on location ' + str(idata))
xloc = x_loc[idata]
yloc = y_loc[idata]
current_node = (yloc,xloc)
# Find the nearest samples within each octant: First initialize
# the counter arrays:
na = -1 # accounting for 0 as first index
dist.fill(1.0e+20)
nums.fill(-1)
dist, nums = tree.query(current_node,ndmax) # use kd tree for fast nearest data search
# remove any data outside search radius
na = len(dist)
nums = nums[dist<radius]
dist = dist[dist<radius]
na = len(dist)
# Is there enough samples?
if na + 1 < ndmin: # accounting for min index of 0
est = UNEST
estv = UNEST
print('UNEST for Data ' + str(idata) + ', at ' + str(xloc) + ',' + str(yloc))
else:
# Put coordinates and values of neighborhood samples into xa,ya,vra:
for ia in range(0,na):
jj = int(nums[ia])
xa[ia] = x[jj]
ya[ia] = y[jj]
vra[ia] = vr[jj]
# Handle the situation of only one sample:
if na == 0: # accounting for min index of 0 - one sample case na = 0
cb1 = cova2(xa[0],ya[0],xa[0],ya[0],nst,c0,PMX,cc,aa,it,ang,anis,rotmat,maxcov)
xx = xa[0] - xloc
yy = ya[0] - yloc
# Establish Right Hand Side Covariance:
cb = cova2(xx,yy,0.0,0.0,nst,c0,PMX,cc,aa,it,ang,anis,rotmat,maxcov)
if ktype == 0:
s[0] = cb/cbb
est = s[0]*vra[0] + (1.0-s[0])*skmean
estv = cbb - s[0] * cb
else:
est = vra[0]
estv = cbb - 2.0*cb + cb1
else:
# Solve the Kriging System with more than one sample:
neq = na + ktype # accounting for first index of 0
# print('NEQ' + str(neq))
nn = (neq + 1)*neq/2
# Set up kriging matrices:
iin=-1 # accounting for first index of 0
for j in range(0,na):
# Establish Left Hand Side Covariance Matrix:
for i in range(0,na): # was j - want full matrix
iin = iin + 1
a[iin] = cova2(xa[i],ya[i],xa[j],ya[j],nst,c0,PMX,cc,aa,it,ang,anis,rotmat,maxcov)
if ktype == 1:
iin = iin + 1
a[iin] = unbias
xx = xa[j] - xloc
yy = ya[j] - yloc
# Establish Right Hand Side Covariance:
cb = cova2(xx,yy,0.0,0.0,nst,c0,PMX,cc,aa,it,ang,anis,rotmat,maxcov)
r[j] = cb
rr[j] = r[j]
# Set the unbiasedness constraint:
if ktype == 1:
for i in range(0,na):
iin = iin + 1
a[iin] = unbias
iin = iin + 1
a[iin] = 0.0
r[neq-1] = unbias
rr[neq-1] = r[neq]
# Solve the Kriging System:
# print('NDB' + str(ndb))
# print('NEQ' + str(neq) + ' Left' + str(a) + ' Right' + str(r))
# stop
s = ksol_numpy(neq,a,r)
ising = 0 # need to figure this out
# print('weights' + str(s))
# stop
# Write a warning if the matrix is singular:
if ising != 0:
print('WARNING KB2D: singular matrix')
print(' for block' + str(ix) + ',' + str(iy)+ ' ')
est = UNEST
estv = UNEST
else:
# Compute the estimate and the kriging variance:
est = 0.0
estv = cbb
sumw = 0.0
if ktype == 1:
estv = estv - (s[na])*unbias
for i in range(0,na):
sumw = sumw + s[i]
est = est + s[i]*vra[i]
estv = estv - s[i]*rr[i]
if ktype == 0:
est = est + (1.0-sumw)*skmean
klist[idata] = est
vlist[idata] = estv
if est > UNEST:
nk = nk + 1
ak = ak + est
vk = vk + est*est
# END OF MAIN LOOP OVER ALL THE BLOCKS:
if nk >= 1:
ak = ak / float(nk)
vk = vk/float(nk) - ak*ak
print(' Estimated ' + str(nk) + ' blocks ')
print(' average ' + str(ak) + ' variance ' + str(vk))
return klist, vlist
#Partial Correlation in Python (clone of Matlab's partialcorr)
#This uses the linear regression approach to compute the partial correlation
#(might be slow for a huge number of variables). The algorithm is detailed here:
# http://en.wikipedia.org/wiki/Partial_correlation#Using_linear_regression
#Taking X and Y two variables of interest and Z the matrix with all the variable minus {X, Y},
#the algorithm can be summarized as
# 1) perform a normal linear least-squares regression with X as the target and Z as the predictor
# 2) calculate the residuals in Step #1
# 3) perform a normal linear least-squares regression with Y as the target and Z as the predictor
# 4) calculate the residuals in Step #3
# 5) calculate the correlation coefficient between the residuals from Steps #2 and #4;
# The result is the partial correlation between X and Y while controlling for the effect of Z
#Date: Nov 2014
#Author: Fabian Pedregosa-Izquierdo, [email protected]
#Testing: Valentina Borghesani, [email protected]
def partial_corr(C):
# Returns the sample linear partial correlation coefficients between pairs of variables in C, controlling
# for the remaining variables in C.
# Parameters
# C : array-like, shape (n, p)
# Array with the different variables. Each column of C is taken as a variable
# Returns
# P : array-like, shape (p, p)
# P[i, j] contains the partial correlation of C[:, i] and C[:, j] controlling
# for the remaining variables in C.
C = np.asarray(C)
p = C.shape[1]
P_corr = np.zeros((p, p), dtype=np.float)
for i in range(p):
P_corr[i, i] = 1
for j in range(i+1, p):
idx = np.ones(p, dtype=np.bool)
idx[i] = False
idx[j] = False
beta_i = linalg.lstsq(C[:, idx], C[:, j])[0]
beta_j = linalg.lstsq(C[:, idx], C[:, i])[0]
res_j = C[:, j] - C[:, idx].dot( beta_i)
res_i = C[:, i] - C[:, idx].dot(beta_j)
corr = stats.pearsonr(res_i, res_j)[0]
P_corr[i, j] = corr
P_corr[j, i] = corr
return P_corr
def semipartial_corr(C): # Michael Pyrcz modified the function above by Fabian Pedregosa-Izquierdo, [email protected] for semipartial correlation
C = np.asarray(C)
p = C.shape[1]
P_corr = np.zeros((p, p), dtype=np.float)
for i in range(p):
P_corr[i, i] = 1
for j in range(i+1, p):
idx = np.ones(p, dtype=np.bool)
idx[i] = False
idx[j] = False
beta_i = linalg.lstsq(C[:, idx], C[:, j])[0]
res_j = C[:, j] - C[:, idx].dot( beta_i)
res_i = C[:, i] # just use the value, not a residual
corr = stats.pearsonr(res_i, res_j)[0]
P_corr[i, j] = corr
P_corr[j, i] = corr
return P_corr
def sqdist3(x1,y1,z1,x2,y2,z2,ind,rotmat):
"""Squared Anisotropic Distance Calculation Given Matrix Indicator - 3D
This routine calculates the anisotropic distance between two points
given the coordinates of each point and a definition of the
anisotropy.
Converted from original fortran GSLIB (Deutsch and Journel, 1998) to Python by Wendi Liu, University of Texas at Austin
INPUT VARIABLES:
x1,y1,z1 Coordinates of first point
x2,y2,z2 Coordinates of second point
ind The rotation matrix to use
rotmat The rotation matrices"""
dx = x1 - x2
dy = y1 - y2
dz = z1 - z2
sqdist = 0.0
for i in range(3):
cont = rotmat[ind, i, 0] * dx + rotmat[ind, i, 1] * dy + rotmat[ind, i, 2] * dz
sqdist += cont**2
return sqdist
def setrot3(ang1,ang2,ang3,anis1,anis2,ind,rotmat):
"""Sets up an Anisotropic Rotation Matrix - 3D
Sets up the matrix to transform cartesian coordinates to coordinates
accounting for angles and anisotropy
Converted from original fortran GSLIB (Deutsch and Journel, 1998) to Python by Wendi Liu, University of Texas at Austin
INPUT PARAMETERS:
ang1 Azimuth angle for principal direction
ang2 Dip angle for principal direction
ang3 Third rotation angle
anis1 First anisotropy ratio
anis2 Second anisotropy ratio
ind matrix indicator to initialize
rotmat rotation matrices
Converts the input angles to three angles which make more mathematical sense:
alpha angle between the major axis of anisotropy and the
E-W axis. Note: Counter clockwise is positive.
beta angle between major axis and the horizontal plane.
(The dip of the ellipsoid measured positive down)
theta Angle of rotation of minor axis about the major axis
of the ellipsoid."""
DEG2RAD=np.pi/180.0; EPSLON=1e-20
if (ang1 >= 0.0)&(ang1<270.0):
alpha = (90.0 - ang1) * DEG2RAD
else:
alpha = (450.0 - ang1) * DEG2RAD
beta = -1.0 * ang2 *DEG2RAD
theta = ang3 * DEG2RAD
sina = np.sin(alpha)
sinb = np.sin(beta)
sint = np.sin(theta)
cosa = np.cos(alpha)
cosb = np.cos(beta)
cost = np.cos(theta)
### Construct the rotation matrix in the required memory
afac1 = 1.0/max(anis1, EPSLON)
afac2 = 1.0/max(anis2, EPSLON)
rotmat[ind,0,0] = cosb * cosa
rotmat[ind,0,1] = cosb * sina
rotmat[ind,0,2] = -sinb
rotmat[ind,1,0] = afac1*(-cost*sina + sint*sinb*cosa)
rotmat[ind,1,1] = afac1*(cost*cosa + sint*sinb*sina)
rotmat[ind,1,2] = afac1*( sint * cosb)
rotmat[ind,2,0] = afac2*(sint*sina + cost*sinb*cosa)
rotmat[ind,2,1] = afac2*(-sint*cosa + cost*sinb*sina)
rotmat[ind,2,2] = afac2*(cost * cosb)
return rotmat
def gammabar(xsiz, ysiz, zsiz,nst,c0,it,cc,hmaj,hmin,hvert):
"""This program calculates the gammabar value from a 3D semivariogram model"""
"""Converted from original fortran GSLIB (Deutsch and Journel, 1998) to Python by Wendi Liu, University of Texas at Austin"""
###Initialization
rotmat = np.zeros((5, 3, 3))
EPSLON = 1.0e-20
MAXNST=4
maxcov=1.0
cmax = c0
nx = 3
ny = 3
nz = 6
ang1 = np.zeros((MAXNST,)) #azimuth
ang2 = np.ones((MAXNST,))*90.0 #dip
ang3 = np.zeros((MAXNST,)) #plenge
anis1 = np.zeros((MAXNST,))
anis2 = np.zeros((MAXNST,))
for i in range(nst):
anis1[i] = hmin[i]/max(hmaj[i],EPSLON)
anis2[i] = hvert[i]/max(hmaj[i],EPSLON)
rotmat = setrot3(ang1[i],ang2[i],ang3[i],anis1[i],anis2[i],i,rotmat)
cmax,cov = cova3(0.0,0.0,0.0,0.0,0.0,0.0,nst,c0,it,cc,hmaj,rotmat,cmax)
##Discretization parameters
xsz = xsiz/nx
xmn = xsz/2.0
xzero = xsz * 0.0001
ysz = ysiz/ny
ymn = ysz/2.0
yzero = ysz * 0.0001
zsz = zsiz/nz
zmn = zsz/2.0
zzero = zsz * 0.0001
##Calculate Gammabar
gb = 0.0
for ix in range(nx):
xxi = xmn +(ix-1)*xsz+xzero
for jx in range(nx):
xxj = xmn +(jx-1)*xsz
for iy in range(ny):
yyi = ymn +(iy-1)*ysz+yzero
for jy in range(ny):
yyj = ymn +(jy-1)*ysz
for iz in range(nz):
zzi = zmn +(iz-1)*zsz+zzero
for jz in range(nz):
zzj = zmn +(jz-1)*zsz
cmax,cov = cova3(xxi,yyi,zzi,xxj,yyj,zzj,nst,c0,it,cc,hmaj,rotmat,cmax)
gb += maxcov-cov
gb = gb/((nx*ny*nz)**2)
return gb
def gam_3D(array, tmin, tmax, xsiz, ysiz, zsiz, ixd, iyd, izd, nlag, isill):
"""GSLIB's GAM program (Deutsch and Journel, 1998) converted from the
original Fortran to Python by Michael Pyrcz, the University of Texas at
Austin (Nov, 2019).
:param array: 2D gridded data / model
:param tmin: property trimming limit
:param tmax: property trimming limit
:param xsiz: grid cell extents in x direction
:param ysiz: grid cell extents in y direction
:param zsiz: grid cell extents in z direction
:param ixd: lag offset in grid cells
:param iyd: lag offset in grid cells
:param izd: lag offset in grid cells
:param nlag: number of lags to calculate
:param isill: 1 for standardize sill
:return: TODO
"""
if array.ndim ==3:
nz, ny, nx = array.shape
elif array.ndim == 2:
ny, nx = array.shape
elif array.ndim == 1:
ny, nx = 1, len(array)
nvarg = 1 # for multiple variograms repeat the program
nxyz = nx * ny * nz # TODO: not used
mxdlv = nlag
# Allocate the needed memory
lag = np.zeros(mxdlv)
vario = np.zeros(mxdlv)
hm = np.zeros(mxdlv)
tm = np.zeros(mxdlv)
hv = np.zeros(mxdlv) # TODO: not used
npp = np.zeros(mxdlv)
ivtail = np.zeros(nvarg + 2)
ivhead = np.zeros(nvarg + 2)
ivtype = np.zeros(nvarg + 2)
ivtail[0] = 0
ivhead[0] = 0
ivtype[0] = 0
# Summary statistics for the data after trimming
inside = (array > tmin) & (array < tmax)
avg = array[(array > tmin) & (array < tmax)].mean() # TODO: not used
stdev = array[(array > tmin) & (array < tmax)].std()
var = stdev ** 2.0
vrmin = array[(array > tmin) & (array < tmax)].min() # TODO: not used
vrmax = array[(array > tmin) & (array < tmax)].max() # TODO: not used
num = ((array > tmin) & (array < tmax)).sum() # TODO: not used
# For the fixed seed point, loop through all directions
for iz in range(0, nz):
for iy in range(0, ny):
for ix in range(0, nx):
if inside[iz, iy, ix]:
vrt = array[iz, iy, ix]
ixinc = ixd
iyinc = iyd
izinc = izd
ix1 = ix
iy1 = iy
iz1 = iz
for il in range(0, nlag):
ix1 = ix1 + ixinc
if 0 <= ix1 < nx:
iy1 = iy1 + iyinc
if 1 <= iy1 < ny:
if 1 <= iz1 < nz:
if inside[iz1, iy1, ix1]:
vrh = array[iz1, iy1, ix1]
npp[il] = npp[il] + 1
tm[il] = tm[il] + vrt
hm[il] = hm[il] + vrh
vario[il] = vario[il] + ((vrh - vrt) ** 2.0)
# Get average values for gam, hm, tm, hv, and tv, then compute the correct
# "variogram" measure
for il in range(0, nlag):
if npp[il] > 0:
rnum = npp[il]
lag[il] = np.sqrt((ixd * xsiz * il) ** 2 + (iyd * ysiz * il) ** 2 + (izd * zsiz * il) ** 2)
vario[il] = vario[il] / float(rnum)
hm[il] = hm[il] / float(rnum)
tm[il] = tm[il] / float(rnum)
# Standardize by the sill
if isill == 1:
vario[il] = vario[il] / var
# Semivariogram
vario[il] = 0.5 * vario[il]
return lag, vario, npp
def make_variogram_3D(
nug,
nst,
it1,
cc1,
azi1,
dip1,
hmax1,
hmed1,
hmin1,
it2=1,
cc2=0,
azi2=0,
dip2=0,
hmax2=0,
hmed2=0,
hmin2=0,
):
"""Make a dictionary of variogram parameters for application with spatial
estimation and simulation.
:param nug: Nugget constant (isotropic)
:param nst: Number of structures (up to 2)
:param it1: Structure of 1st variogram (1: Spherical, 2: Exponential, 3: Gaussian)
:param cc1: Contribution of 2nd variogram
:param azi1: Azimuth of 1st variogram
:param dip1: Dip of 1st variogram
:param hmax1: Range in major direction (Horizontal)
:param hmed1: Range in minor direction (Horizontal)
:param hmin1: Range in vertical direction
:param it2: Structure of 2nd variogram (1: Spherical, 2: Exponential, 3: Gaussian)
:param cc2: Contribution of 2nd variogram
:param azi2: Azimuth of 2nd variogram
:param dip1: Dip of 2nd variogram
:param hmax2: Range in major direction (Horizontal)
:param hmed2: Range in minor direction (Horizontal)
:param hmin2: Range in vertical direction
:return: TODO
"""
if cc2 == 0:
nst = 1
var = dict(
[
("nug", nug),
("nst", nst),
("it1", it1),
("cc1", cc1),
("azi1", azi1),
("dip1", dip1),
("hmax1", hmax1),
("hmed1", hmed1),
("hmin1", hmin1),
("it2", it2),
("cc2", cc2),
("azi2", azi2),
("dip2", dip2),
("hmax2", hmax2),
("hmed2", hmed2),
("hmin2", hmin2),
]
)
if nug + cc1 + cc2 != 1:
print(
"\x1b[0;30;41m make_variogram Warning: "
"sill does not sum to 1.0, do not use in simulation \x1b[0m"
)
if (
cc1 < 0
or cc2 < 0
or nug < 0
or hmax1 < 0
or hmax2 < 0
or hmin1 < 0
or hmin2 < 0
):
print(
"\x1b[0;30;41m make_variogram Warning: "
"contributions and ranges must be all positive \x1b[0m"
)
if hmax1 < hmed1 or hmax2 < hmed2:
print(
"\x1b[0;30;41m make_variogram Warning: "
"major range should be greater than minor range \x1b[0m"
)
return var
def vmodel_3D(
nlag,
xlag,
azm,
dip,
vario
):
"""GSLIB's VMODEL program (Deutsch and Journel, 1998) converted from the
original Fortran to Python by Michael Pyrcz, the University of Texas at
Austin (Nov, 2019).
:param nlag: number of variogram lags
:param xlag: size of the lags
:param axm: direction by 3D azimuth, 000 is y positive, 090 is x positive
:param dip: direction by 3D dip, 000 is horizontal to x-y plane, 090 is perpendicular to x-y plane
:param vario: dictionary with the variogram parameters
:return:
"""
# Parameters
MAXNST=4
DEG2RAD=3.14159265/180.0
MAXROT=MAXNST+1
EPSLON = 1.0e-20
VERSION= 1.01
# Declare arrays
index = np.zeros(nlag+1)
h = np.zeros(nlag+1)
gam = np.zeros(nlag+1)
cov = np.zeros(nlag+1)
ro = np.zeros(nlag+1)
# Load the variogram
nst = vario["nst"]
cc = np.zeros(nst)
aa = np.zeros(nst)
it = np.zeros(nst)
ang_azi = np.zeros(nst)
ang_dip = np.zeros(nst)
anis = np.zeros(nst)
anis_v = np.zeros(nst)
c0 = vario["nug"]
cc[0] = vario["cc1"]
it[0] = vario["it1"]
ang_azi[0] = vario["azi1"]
ang_dip[0] = vario["dip1"]
aa[0] = vario["hmax1"]
anis[0] = vario["hmed1"] / vario["hmax1"]
anis_v[0] = vario["hmin1"] / vario["hmax1"]
if nst == 2:
cc[1] = vario["cc2"]
it[1] = vario["it2"]
ang_azi[1] = vario["azi2"]
ang_dip[1] = vario["dip2"]
aa[1] = vario["hmax2"]
anis[1] = vario["hmed2"] / vario["hmax2"]
anis_v[1] = vario["hmin2"] / vario["hmax2"]
xoff = math.sin(DEG2RAD*azm)*math.cos(DEG2RAD*dip)*xlag
yoff = math.cos(DEG2RAD*azm)*math.cos(DEG2RAD*dip)*xlag
zoff = math.sin(DEG2RAD*dip)*xlag
print(' x,y,z offsets = ' + str(xoff) + ',' + str(yoff) + ',' + str(zoff))
rotmat, maxcov = setup_rotmat_3D(c0, nst, it, cc, ang_azi, ang_dip, 99999.9)
xx = 0.0; yy = 0.0; zz = 0.0;
for il in range(0,nlag+1):
index[il] = il
cov[il] = cova3(0.0,0.0,0.0,xx,yy,zz,nst,c0,9999.9,cc,aa,it,anis, anis_v, rotmat, maxcov)
gam[il] = maxcov - cov[il]
ro[il] = cov[il]/maxcov
h[il] = math.sqrt(max((xx*xx + yy*yy + zz*zz),0.0))
xx = xx + xoff
yy = yy + yoff
zz = zz + zoff
# finished
return index,h,gam,cov,ro
@jit(nopython=True)
def setup_rotmat_3D(c0, nst, it, cc, ang_azi, ang_dip, pmx):
"""Setup rotation matrix.
:param c0: nugget constant (isotropic)
:param nst: number of nested structures (max. 4)
:param it: Variogram shapes (i.e., Gaussian, Exponential, Spherical) of each nested structure
:param cc: multiplicative factor of each nested structure
:param ang_azi: azimuths of each nested structure
:param ang_dip: dips of each nested structure
:param pmx: constant 9999.0
:return: TODO
"""
PI = 3.141_592_65
DTOR = PI / 180.0
# The first time around, re-initialize the cosine matrix for the variogram
# structures
rotmat = np.zeros((9, nst))
maxcov = c0
for js in range(0, nst):
azmuth = (90.0 + ang_azi[js]) * DTOR
dip = (ang_dip[js]) * DTOR
rotmat[0, js] = math.cos(azmuth)
rotmat[1, js] = -1 * math.sin(azmuth)
rotmat[2, js] = 0
rotmat[3, js] = math.cos(dip) * math.sin(azmuth)
rotmat[4, js] = math.cos(dip) * math.cos(azmuth)
rotmat[5, js] = -1 * math.sin(dip)
rotmat[6, js] = math.sin(dip) * math.sin(azmuth)
rotmat[7, js] = math.sin(dip) * math.cos(azmuth)
rotmat[8, js] = math.cos(dip)
if it[js] == 4:
maxcov = maxcov + pmx
else:
maxcov = maxcov + cc[js]
return rotmat, maxcov
@jit(nopython=True)
def cova3(x1, y1, z1, x2, y2, z2, nst, c0, pmx, cc, aa, it, anis, anis_v, rotmat, maxcov):
"""Calculate the covariance associated with a variogram model specified by a
nugget effect and nested variogram structures.
:param x1: x coordinate of first point
:type x1: float
:param y1: y coordinate of first point
:type y1: float
:param z1: z coordinate of first point
:type z1: float
:param x2: x coordinate of second point
:type x2: float
:param y2: y coordinate of second point
:type y2: float
:param z2: z coordinate of second point
:type z2: float
:param nst: number of nested structures (maximum of 4)
:type nst: int
:param c0: isotropic nugget constant (TODO: not used)
:type c0: float
:param pmx: Maximum variogram value needed for kriging when using Power
model. Each nested structure that uses the power model uses a
unique value of PMX. Therefore, PMX needs to be large enough
to account for the singly largest structure that uses the Power
model.
:type pmx: float
:param cc: multiplicative factor of each nested structure
:type cc: an array
:param aa: parameter `a` of each nested structure
:type aa: an array
:param it: Type of each nested structure:
1: spherical model of range `a`
2: exponential model of param `a` (practical range is 3`a`)
3: gaussian model of param `a` (practical range is `a`*sqrt(3))
4: power model of power `a` (a must be 0 < `a` < 2). if linear
model: a = 1; c = slope
:type it: an array
:param ang: Azmiuth angle for the principal direction of continuity
(measured clockwise in degrees from y); TODO: not used
:type ang: an array
:param anis: Horizontal aspect ratio
:type anis: an array
:param anis_v: Vertical aspect ratio
:type anis_v: an array
:param rotmat: rotation matrices
:type rotmat: an array
:param maxcov: maximum covariance value
:type maxcov: float
:return: returns the covariance obtained from the variagram model
:type return: float
"""
""" Revised from Wendi Liu's code """
EPSLON = 0.000001
# Check for very small distance
dx = x2 - x1
dy = y2 - y1
dz = z2 - z1
if (dx * dx + dy * dy + dz * dz) < EPSLON:
cova3_ = maxcov
return cova3_
# Non-zero distance, loop over all the structures
cova3_ = 0.0
for js in range(0, nst):
# Compute the appropriate structural distance
dx1 = dx * rotmat[0, js] + dy * rotmat[1, js] + dz * rotmat[2, js]
dy1 = (dx * rotmat[3, js] + dy * rotmat[4, js] + dz * rotmat[5, js] ) / anis[js]
dz1 = (dx * rotmat[6, js] + dy * rotmat[7, js] + dz * rotmat[8, js] ) / anis_v[js]
h = math.sqrt(max((dx1 * dx1 + dy1 * dy1 + dz1 * dz1 ), 0.0))
if it[js] == 1:
# Spherical model
hr = h / aa[js]
if hr < 1.0:
cova3_ = cova3_ + cc[js] * (1.0 - hr * (1.5 - 0.5 * hr * hr))
elif it[js] == 2:
# Exponential model
cova3_ = cova3_ + cc[js] * np.exp(-3.0 * h / aa[js])
elif it[js] == 3:
# Gaussian model
hh = -3.0 * (h * h) / (aa[js] * aa[js])
cova3_ = cova3_ + cc[js] * np.exp(hh)
elif it[js] == 4:
# Power model
cov1 = pmx - cc[js] * (h ** aa[js])
cova3_ = cova3_ + cov1
return cova3_
def gamv_3D(
df,
xcol,
ycol,
zcol,
vcol,
tmin,
tmax,
xlag,
xltol,
nlag,
azm,
dip,
atol,
dtol,
bandwh,
isill,
):
"""GSLIB's GAMV program (Deutsch and Journel, 1998) converted from the
original Fortran to Python by Michael Pyrcz, the University of Texas at
Austin (Nov, 2019).
Note simplified for 2D, semivariogram only and one direction at a time.
:param df: pandas DataFrame with the spatial data
:param xcol: name of the x coordinate column
:param ycol: name of the y coordinate column
:param zcol: name of the z coordinate column
:param vcol: name of the property column
:param tmin: property trimming limit
:param tmax: property trimming limit
:param xlag: lag distance
:param xltol: lag distance tolerance
:param nlag: number of lags to calculate
:param azm: azimuth
:param dip: dip
:param atol: azimuth tolerance
:param dtol: dip tolerance
:param bandwh: horizontal bandwidth / maximum distance offset orthogonal to
azimuth
:param isill: 1 for standardize sill
:return: TODO
"""
# Load the data
# Trim values outside tmin and tmax
df_extract = df.loc[(df[vcol] >= tmin) & (df[vcol] <= tmax)]
nd = len(df_extract) # TODO: not used
x = df_extract[xcol].values
y = df_extract[ycol].values
z = df_extract[zcol].values
vr = df_extract[vcol].values
# Summary statistics for the data after trimming
avg = vr.mean() # TODO: not used
stdev = vr.std()
sills = stdev ** 2.0
ssq = sills # TODO: not used
vrmin = vr.min() # TODO: not used
vrmax = vr.max() # TODO: not used
# Define the distance tolerance if it isn't already
if xltol < 0.0:
xltol = 0.5 * xlag
# Loop over combinatorial of data pairs to calculate the variogram
dis, vario, npp = variogram_loop_3D(
x, y, z, vr, xlag, xltol, nlag, azm, dip, atol, dtol, bandwh
)
# Standardize sill to one by dividing all variogram values by the variance
for il in range(0, nlag + 2):
if isill == 1:
vario[il] = vario[il] / sills
# Apply 1/2 factor to go from variogram to semivariogram
vario[il] = 0.5 * vario[il]
return dis, vario, npp
def cova(x, y, z, vr, xlag, xltol, nlag, azm, dip, atol, dtol, bandwh):
"""Calculate the variogram by looping over combinatorial of data pairs.
:param x: x values
:param y: y values
:param z: z values
:param vr: property values
:param xlag: lag distance
:param xltol: lag distance tolerance
:param nlag: number of lags to calculate
:param azm: azimuth
:param dip: dip
:param atol: azimuth tolerance
:param dtol: dip tolerance
:param bandwh: horizontal bandwidth / maximum distance offset orthogonal to
azimuth
:return: TODO
"""
# Allocate the needed memory
nvarg = 1
mxdlv = nlag + 2 # in gamv the npp etc. arrays go to nlag + 2
dis = np.zeros(mxdlv)
lag = np.zeros(mxdlv) # TODO: not used
vario = np.zeros(mxdlv)
hm = np.zeros(mxdlv)
tm = np.zeros(mxdlv)
hv = np.zeros(mxdlv) # TODO: not used
npp = np.zeros(mxdlv)
#ivtail = np.zeros(nvarg + 2)
#ivhead = np.zeros(nvarg + 2)
#ivtype = np.ones(nvarg + 2)
#ivtail[0] = 0
#ivhead[0] = 0
#ivtype[0] = 0
EPSLON = 1.0e-20
nd = len(x)
# The mathematical azimuth is measured counterclockwise from EW and
# not clockwise from NS as the conventional azimuth is
azmuth = (90.0 - azm) * math.pi / 180.0
dip = (dip) * math.pi / 180.0
uvxazm = math.cos(azmuth) * math.cos(dip)
uvyazm = math.sin(azmuth) * math.cos(dip)
uvzdip = math.sin(dip)
if atol <= 0.0:
csatol = math.cos(45.0 * math.pi / 180.0)
else:
csatol = math.cos(atol * math.pi / 180.0)
if dtol <= 0.0:
csdtol = math.cos(30.0 * math.pi / 180.0)
else:
csdtol = math.cos(dtol * math.pi / 180.0)
# Initialize the arrays for each direction, variogram, and lag
nsiz = nlag + 2 # TODO: not used
dismxs = ((float(nlag) + 0.5 - EPSLON) * xlag) ** 2
# Main loop over all pairs
for i in range(0, nd):
for j in range(0, nd):
# Definition of the lag corresponding to the current pair
dx = x[j] - x[i]
dy = y[j] - y[i]
dz = z[j] - z[i]
dxs = dx * dx
dys = dy * dy
dzs = dz * dz
hs = dxs + dys + dzs
if hs <= dismxs:
if hs < 0.0:
hs = 0.0
h = np.sqrt(hs)
# Determine which lag this is and skip if outside the defined
# distance tolerance
if h <= EPSLON:
lagbeg = 0
lagend = 0
else:
lagbeg = -1
lagend = -1
for ilag in range(1, nlag + 1):
# reduced to -1
if (
(xlag * float(ilag - 1) - xltol)
<= h
<= (xlag * float(ilag - 1) + xltol)
):
if lagbeg < 0:
lagbeg = ilag
lagend = ilag
if lagend >= 0:
# Definition of the direction corresponding to the current
# pair. All directions are considered (overlapping of
# direction tolerance cones is allowed)
# Check for an acceptable azimuth angle
dxy = np.sqrt(max((dxs + dys), 0.0))
dxyz = np.sqrt(max((dxs + dys + dzs), 0.0))
if dxy < EPSLON:
dcazm = 1.0
else:
dcazm = (dx * uvxazm + dy * uvyazm) / dxy
if dxyz < EPSLON:
dcdip = 1.0
else:
dcdip = (dx * uvxazm + dy * uvyazm + dz * uvzdip) / dxyz
# Check the horizontal bandwidth criteria (maximum deviation
# perpendicular to the specified direction azimuth)
band = np.cross([dx,dy,dz], [uvxazm, uvyazm, uvzdip])
band = np.sqrt(band.dot(band))
# Apply all the previous checks at once to avoid a lot of
# nested if statements
if (abs(dcazm) >= csatol) and (abs(dcdip) >= csdtol) and (abs(band) <= bandwh):
# Check whether or not an omni-directional variogram is
# being computed
omni = False
if atol >= 90.0:
omni = True
# For this variogram, sort out which is the tail and
# the head value
# iv = 0 # hardcoded just one variogram
# it = ivtype[iv] # TODO: not used
if dcazm >= 0.0:
vrh = vr[i]
vrt = vr[j]
if omni:
vrtpr = vr[i]
vrhpr = vr[j]
else:
vrh = vr[j]
vrt = vr[i]
if omni:
vrtpr = vr[j]
vrhpr = vr[i]
# Reject this pair on the basis of missing values
# Data was trimmed at the beginning
# The Semivariogram (all other types of measures are
# removed for now)
for il in range(lagbeg, lagend + 1):
npp[il] = npp[il] + 1
dis[il] = dis[il] + h
tm[il] = tm[il] + vrt
hm[il] = hm[il] + vrh
vario[il] = vario[il] + ((vrh - vrt) * (vrh - vrt))
if omni:
npp[il] = npp[il] + 1.0
dis[il] = dis[il] + h
tm[il] = tm[il] + vrtpr
hm[il] = hm[il] + vrhpr
vario[il] = vario[il] + (
(vrhpr - vrtpr) * (vrhpr - vrtpr)
)
# Get average values for gam, hm, tm, hv, and tv, then compute the correct
# "variogram" measure
for il in range(0, nlag + 2):
i = il
if npp[i] > 0:
rnum = npp[i]
dis[i] = dis[i] / rnum
vario[i] = vario[i] / rnum
hm[i] = hm[i] / rnum
tm[i] = tm[i] / rnum
return dis, vario, npp
| 36.29457 | 203 | 0.537044 |
79406b7cda8ca3fd218970e93ffa1d81f600b07e | 6,473 | py | Python | burplist/settings.py | ngshiheng/burplist | 87c4340e63fde476b25c3e7674b9486e8225f87f | [
"MIT"
] | 7 | 2021-11-02T03:19:23.000Z | 2022-01-27T08:05:51.000Z | burplist/settings.py | ngshiheng/burplist | 87c4340e63fde476b25c3e7674b9486e8225f87f | [
"MIT"
] | 10 | 2021-11-10T08:20:50.000Z | 2022-03-04T15:23:25.000Z | burplist/settings.py | ngshiheng/burplist | 87c4340e63fde476b25c3e7674b9486e8225f87f | [
"MIT"
] | 2 | 2021-11-05T04:27:02.000Z | 2022-01-27T08:05:53.000Z | import copy
import os
import scrapy.utils.log
from colorlog import ColoredFormatter
# Logging
color_formatter = ColoredFormatter(
(
'%(log_color)s%(levelname)-5s%(reset)s '
'%(yellow)s[%(asctime)s]%(reset)s'
'%(white)s %(name)s %(funcName)s %(bold_purple)s:%(lineno)d%(reset)s '
'%(log_color)s%(message)s%(reset)s'
),
datefmt='%d-%B-%y %H:%M:%S',
log_colors={
'DEBUG': 'blue',
'INFO': 'bold_cyan',
'WARNING': 'red',
'ERROR': 'bg_bold_red',
'CRITICAL': 'red,bg_white',
},
)
_get_handler = copy.copy(scrapy.utils.log._get_handler)
def _get_handler_custom(*args, **kwargs):
handler = _get_handler(*args, **kwargs)
handler.setFormatter(color_formatter)
return handler
scrapy.utils.log._get_handler = _get_handler_custom
# Burplist
BOT_NAME = 'burplist'
ENVIRONMENT = os.environ.get('ENVIRONMENT', 'development')
SPIDER_MODULES = ['burplist.spiders']
NEWSPIDER_MODULE = 'burplist.spiders'
# Scraper API
SCRAPER_API_KEY = os.environ.get('SCRAPER_API_KEY')
# Management Commands
COMMANDS_MODULE = 'burplist.commands'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
# USER_AGENT = 'burplist (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = os.environ.get('ROBOTSTXT_OBEY', True)
# Configure maximum concurrent requests performed by Scrapy (default: 16)
# Currently `CONCURRENT_REQUESTS` is set to 5 as we are using Scaper API free tier (https://www.scraperapi.com/pricing)
CONCURRENT_REQUESTS = 5 if SCRAPER_API_KEY is not None else 16
RETRY_TIMES = os.environ.get('RETRY_TIMES', 10)
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 0 if SCRAPER_API_KEY is not None else 2
# The download delay setting will honor only one of:
# CONCURRENT_REQUESTS_PER_DOMAIN = 16
# CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
# COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
# TELNETCONSOLE_ENABLED = False
# Override the default request headers:
# DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
# }
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {
# 'burplist.middlewares.BurplistSpiderMiddleware': 543,
# 'scrapy_splash.SplashDeduplicateArgsMiddleware': 100,
# }
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
# 'burplist.middlewares.BurplistDownloaderMiddleware': 543,
'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None,
'scrapy.downloadermiddlewares.retry.RetryMiddleware': None,
'scrapy_fake_useragent.middleware.RandomUserAgentMiddleware': 400,
'scrapy_fake_useragent.middleware.RetryUserAgentMiddleware': 401,
# 'scrapy_splash.SplashCookiesMiddleware': 723,
# 'scrapy_splash.SplashMiddleware': 725,
'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware': 810,
}
# scrapy-fake-useragent
# https://github.com/alecxe/scrapy-fake-useragent
FAKEUSERAGENT_PROVIDERS = [
'scrapy_fake_useragent.providers.FakeUserAgentProvider', # This is the first provider we'll try
'scrapy_fake_useragent.providers.FakerProvider', # If FakeUserAgentProvider fails, we'll use faker to generate a user-agent string for us
'scrapy_fake_useragent.providers.FixedUserAgentProvider', # Fall back to USER_AGENT value
]
USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.93 Safari/537.36'
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
EXTENSIONS = {
'burplist.extensions.SentryLogging': -1,
# 'scrapy.extensions.telnet.TelnetConsole': None,
}
# if ENVIRONMENT == 'main':
# EXTENSIONS['scrapy.extensions.statsmailer.StatsMailer'] = 500
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'burplist.pipelines.ExistingProductPricePipeline': 300,
'burplist.pipelines.NewProductPricePipeline': 400,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
AUTOTHROTTLE_ENABLED = True
# The initial download delay
AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
# AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
HTTPCACHE_ENABLED = True
HTTPCACHE_EXPIRATION_SECS = os.environ.get('HTTPCACHE_EXPIRATION_SECS', 0)
# HTTPCACHE_DIR = 'httpcache'
# HTTPCACHE_IGNORE_HTTP_CODES = []
# HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
# Splash
# SPLASH_URL = 'http://localhost:8050'
# DUPEFILTER_CLASS = 'scrapy_splash.SplashAwareDupeFilter'
# HTTPCACHE_STORAGE = 'scrapy_splash.SplashAwareFSCacheStorage'
# Sentry
# https://stackoverflow.com/questions/25262765/handle-all-exception-in-scrapy-with-sentry
SENTRY_DSN = os.environ.get('SENTRY_DSN')
# PostgreSQL
DATABASE_CONNECTION_STRING = '{drivername}://{user}:{password}@{host}:{port}/{db_name}'.format(
drivername='postgresql',
user=os.environ.get('PG_USERNAME', 'postgres'),
password=os.environ.get('PG_PASSWORD', 'developmentpassword'),
host=os.environ.get('PG_HOST', 'localhost'),
port=os.environ.get('PG_PORT', '5432'),
db_name=os.environ.get('PG_DATABASE', 'burplist'),
)
# Email Settings
MAIL_FROM = os.environ.get('MAIL_FROM')
MAIL_HOST = os.environ.get('MAIL_HOST')
MAIL_PORT = os.environ.get('MAIL_PORT')
MAIL_USER = os.environ.get('MAIL_USER')
MAIL_PASS = os.environ.get('MAIL_PASS')
MAIL_TLS = os.environ.get('MAIL_TLS', True)
MAIL_SSL = os.environ.get('MAIL_SSL', True)
STATSMAILER_RCPTS = [os.environ.get('STATSMAILER_RCPTS')] # TODO: Get a list from environment variable
| 35.961111 | 142 | 0.753283 |
79406d8a9ec8f1bc3d6fb34d910f38ae9615b95c | 11,044 | py | Python | src/cone/app/tests/test_browser_sharing.py | lenadax/cone.app | b25c55aedb85e45a962003d2767a22a927cc61c0 | [
"BSD-3-Clause"
] | 1 | 2022-03-13T17:51:09.000Z | 2022-03-13T17:51:09.000Z | src/cone/app/tests/test_browser_sharing.py | lenadax/cone.app | b25c55aedb85e45a962003d2767a22a927cc61c0 | [
"BSD-3-Clause"
] | 1 | 2021-08-06T08:12:00.000Z | 2021-08-06T08:12:00.000Z | src/cone/app/tests/test_browser_sharing.py | lenadax/cone.app | b25c55aedb85e45a962003d2767a22a927cc61c0 | [
"BSD-3-Clause"
] | null | null | null | from cone.app import testing
from cone.app.browser.ajax import ajax_tile
from cone.app.browser.sharing import sharing
from cone.app.model import BaseNode
from cone.app.testing.mock import SharingNode
from cone.tile import render_tile
from cone.tile.tests import TileTestCase
from pyramid.exceptions import HTTPForbidden
class TestBrowserSharing(TileTestCase):
layer = testing.security
def test_render_sharing_view(self):
model = SharingNode(name='root')
request = self.layer.new_request()
err = self.expectError(
HTTPForbidden,
sharing,
model,
request
)
self.checkOutput("""
...Unauthorized: tile <cone.app.browser.sharing.SharingTile object at ...>
failed permission check...
""", str(err))
with self.layer.authenticated('manager'):
res = sharing(model, request)
self.assertTrue(res.text.find('<!DOCTYPE html>') > -1)
def test_render_sharing_tile(self):
root = SharingNode(name='root')
request = self.layer.new_request()
# Render sharing tile
with self.layer.authenticated('manager'):
res = render_tile(root, request, 'sharing')
self.checkOutput("""
...<table class="table table-striped table-condensed"
id="localacltable_table">...
""", res)
def test_search_principal(self):
root = SharingNode(name='root')
request = self.layer.new_request()
# Render sharing tile with search term
with self.layer.authenticated('manager'):
res = render_tile(root, request, 'sharing')
self.assertFalse(res.find('Manager User') > -1)
request.params['term'] = 'manager'
with self.layer.authenticated('manager'):
res = render_tile(root, request, 'sharing')
self.assertTrue(res.find('Manager User') > -1)
request.params['term'] = 'group1'
with self.layer.authenticated('manager'):
res = render_tile(root, request, 'sharing')
self.assertTrue(res.find('Group 1') > -1)
# Existing principal roles are not rendered if term found on request
root.principal_roles['viewer'] = ['editor']
with self.layer.authenticated('manager'):
res = render_tile(root, request, 'sharing')
expected = (
'<input checked="checked" '
'class="add_remove_role_for_principal" '
'id="input-viewer" name="viewer" type="checkbox" value="editor" />'
)
self.assertFalse(res.find(expected) > -1)
# Existing principal roles are rendered if no term found
del request.params['term']
with self.layer.authenticated('manager'):
res = render_tile(root, request, 'sharing')
self.assertTrue(res.find(expected) > -1)
def test_inherited_principal_roles(self):
root = SharingNode(name='root')
root.principal_roles['viewer'] = ['editor']
child = root['child'] = SharingNode()
child.role_inheritance = True
child.principal_roles['viewer'] = ['admin']
request = self.layer.new_request()
with self.layer.authenticated('manager'):
res = render_tile(child, request, 'sharing')
expected = (
'<input checked="checked" '
'class="add_remove_role_for_principal" disabled="disabled" '
'id="input-viewer" name="viewer" type="checkbox" value="editor" />'
)
self.assertTrue(res.find(expected) > -1)
expected = (
'<input checked="checked" '
'class="add_remove_role_for_principal" id="input-viewer" '
'name="viewer" type="checkbox" value="admin" />'
)
self.assertTrue(res.find(expected) > -1)
expected = (
'<input class="add_remove_role_for_principal" '
'id="input-viewer" name="viewer" type="checkbox" '
'value="manager" />'
)
self.assertTrue(res.find(expected) > -1)
def test_table_sorting(self):
root = SharingNode(name='root')
child = root['child'] = SharingNode()
child.principal_roles['viewer'] = ['admin']
child.principal_roles['editor'] = ['admin']
request = self.layer.new_request()
# Sharing table sorting
with self.layer.authenticated('manager'):
res = render_tile(child, request, 'sharing')
self.assertTrue(res.find('Editor User') > -1)
self.assertTrue(res.find('Viewer User') > -1)
self.assertTrue(res.find('Editor User') < res.find('Viewer User'))
request.params['order'] = 'desc'
with self.layer.authenticated('manager'):
res = render_tile(child, request, 'sharing')
self.assertTrue(res.find('Editor User') > -1)
self.assertTrue(res.find('Viewer User') > -1)
self.assertFalse(res.find('Editor User') < res.find('Viewer User'))
del request.params['order']
def test_skip_inexistent(self):
root = SharingNode(name='root')
child = root['child'] = SharingNode()
# Users defined in ``principal_roles`` but not exists in ugm are
# skipped. This could happen if user was deleted but principal roles
# were not
child.principal_roles['inexistent'] = ['viewer']
request = self.layer.new_request()
with self.layer.authenticated('manager'):
res = render_tile(child, request, 'sharing')
self.assertFalse(res.find('name="inexistent"') > -1)
def test_add_role(self):
root = SharingNode(name='root')
child = root['child'] = SharingNode()
child.principal_roles['viewer'] = ['admin']
child.principal_roles['editor'] = ['admin']
# Add role for user
request = self.layer.new_request()
request.params['id'] = 'viewer'
request.params['role'] = 'manager'
request.params['bdajax.action'] = 'add_principal_role'
request.params['bdajax.mode'] = 'NONE'
request.params['bdajax.selector'] = 'NONE'
# Nothing happens if success
with self.layer.authenticated('manager'):
res = ajax_tile(child, request)
self.assertEqual(res, {
'continuation': False,
'payload': u'',
'mode': 'NONE',
'selector': 'NONE'
})
# Principal roles have changed
self.assertEqual(len(child.principal_roles), 2)
self.assertEqual(
sorted(child.principal_roles['viewer']),
['admin', 'manager']
)
self.assertEqual(child.principal_roles['editor'], ['admin'])
# Add role for user not added yet
request.params['id'] = 'otheruser'
request.params['role'] = 'manager'
with self.layer.authenticated('manager'):
res = ajax_tile(child, request)
self.assertEqual(res, {
'continuation': False,
'payload': u'',
'mode': 'NONE',
'selector': 'NONE'
})
self.assertEqual(len(child.principal_roles), 3)
self.assertEqual(
sorted(child.principal_roles['viewer']),
['admin', 'manager']
)
self.assertEqual(child.principal_roles['editor'], ['admin'])
self.assertEqual(child.principal_roles['otheruser'], ['manager'])
# If an error occurs, a message gets displayed
invalid_node = BaseNode()
request.params['id'] = 'viewer'
with self.layer.authenticated('manager'):
res = ajax_tile(invalid_node, request)
self.assertEqual(res, {
'continuation': [{
'flavor': 'error',
'type': 'message',
'payload': u"Can not add role 'manager' for principal 'viewer'",
'selector': None
}],
'payload': u'',
'mode': 'NONE',
'selector': 'NONE'
})
def test_remove_role(self):
root = SharingNode(name='root')
child = root['child'] = SharingNode()
child.principal_roles['viewer'] = ['admin', 'manager']
child.principal_roles['editor'] = ['admin']
child.principal_roles['otheruser'] = ['manager']
# Remove role for user
request = self.layer.new_request()
request.params['id'] = 'viewer'
request.params['role'] = 'manager'
request.params['bdajax.action'] = 'remove_principal_role'
request.params['bdajax.mode'] = 'NONE'
request.params['bdajax.selector'] = 'NONE'
# Nothing happens if success
with self.layer.authenticated('manager'):
res = ajax_tile(child, request)
self.assertEqual(res, {
'continuation': False,
'payload': u'',
'mode': 'NONE',
'selector': 'NONE'
})
# Principal roles has changed
self.assertEqual(child.principal_roles, {
'viewer': ['admin'],
'editor': ['admin'],
'otheruser': ['manager']
})
# Principal id gets removed if no more roles left
request.params['id'] = 'otheruser'
request.params['role'] = 'manager'
with self.layer.authenticated('manager'):
res = ajax_tile(child, request)
self.assertEqual(res, {
'continuation': False,
'payload': u'',
'mode': 'NONE',
'selector': 'NONE'
})
self.assertEqual(child.principal_roles, {
'viewer': ['admin'],
'editor': ['admin']
})
# If an error occurs, a message gets displayed.
# Inexistent role
request.params['id'] = 'viewer'
request.params['role'] = 'inexistent'
with self.layer.authenticated('manager'):
res = ajax_tile(child, request)
self.assertEqual(res, {
'continuation': [{
'flavor': 'error',
'type': 'message',
'payload': u"Can not remove role 'inexistent' for principal 'viewer'",
'selector': None
}],
'payload': u'',
'mode': 'NONE',
'selector': 'NONE'
})
# Inexistent userid
request = self.layer.new_request()
request.params['id'] = 'foo'
request.params['role'] = 'manager'
request.params['bdajax.action'] = 'remove_principal_role'
request.params['bdajax.mode'] = 'NONE'
request.params['bdajax.selector'] = 'NONE'
with self.layer.authenticated('manager'):
res = ajax_tile(child, request)
self.assertEqual(res, {
'continuation': [{
'flavor': 'error',
'type': 'message',
'payload': u"Can not remove role 'manager' for principal 'foo'",
'selector': None
}],
'payload': u'',
'mode': 'NONE',
'selector': 'NONE'
})
| 35.625806 | 86 | 0.566099 |
79406dd14bfbd45c61739c40ecb835536a0f6344 | 498 | py | Python | env/lib/python3.8/site-packages/plotly/validators/layout/geo/_countrywidth.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 76 | 2020-07-06T14:44:05.000Z | 2022-02-14T15:30:21.000Z | env/lib/python3.8/site-packages/plotly/validators/layout/geo/_countrywidth.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 11 | 2020-08-09T02:30:14.000Z | 2022-03-12T00:50:14.000Z | env/lib/python3.8/site-packages/plotly/validators/layout/geo/_countrywidth.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 11 | 2020-07-12T16:18:07.000Z | 2022-02-05T16:48:35.000Z | import _plotly_utils.basevalidators
class CountrywidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="countrywidth", parent_name="layout.geo", **kwargs):
super(CountrywidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "style"),
**kwargs
)
| 35.571429 | 87 | 0.646586 |
79406e5aa1041afe982444b0c01b398016e8c98c | 1,626 | py | Python | venv/lib/python3.6/site-packages/phonenumbers/shortdata/region_US.py | exdeam/opencrm | dfdcfdf99f0b42eb3959171927cb6574583f5ee0 | [
"MIT"
] | null | null | null | venv/lib/python3.6/site-packages/phonenumbers/shortdata/region_US.py | exdeam/opencrm | dfdcfdf99f0b42eb3959171927cb6574583f5ee0 | [
"MIT"
] | null | null | null | venv/lib/python3.6/site-packages/phonenumbers/shortdata/region_US.py | exdeam/opencrm | dfdcfdf99f0b42eb3959171927cb6574583f5ee0 | [
"MIT"
] | null | null | null | """Auto-generated file, do not edit by hand. US metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_US = PhoneMetadata(id='US', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='[1-9]\\d{2,5}', possible_length=(3, 4, 5, 6)),
toll_free=PhoneNumberDesc(national_number_pattern='112|[69]11', example_number='112', possible_length=(3,)),
premium_rate=PhoneNumberDesc(national_number_pattern='2(?:4280|5209|7(?:449|663))|3(?:2340|3786|5564|8(?:135|254))|4(?:1(?:366|463)|3355|6(?:15|32)7|7553|82(?:21|77))|5(?:2944|4892|5928|9(?:187|342))|69388|7(?:20(?:78|87)|3(?:288|909)|6426)|8(?:6234|9616)|9(?:5297|6(?:040|835)|7(?:294|688)|9(?:689|796))', example_number='24280', possible_length=(5,)),
emergency=PhoneNumberDesc(national_number_pattern='112|911', example_number='112', possible_length=(3,)),
short_code=PhoneNumberDesc(national_number_pattern='11(?:2|5[1-47]|[68]\\d|7[0-57]|98)|[2-9]\\d{3,5}|[2-9]11', example_number='112', possible_length=(3, 4, 5, 6)),
standard_rate=PhoneNumberDesc(national_number_pattern='2(?:3333|(?:4224|7562|900)2|56447|6688)|3(?:1010|2665|7404)|40404|560560|6(?:0060|22639|5246|7622)|7(?:0701|3822|4666)|8(?:(?:3825|7226)5|4816)|99099', example_number='23333', possible_length=(5, 6)),
carrier_specific=PhoneNumberDesc(national_number_pattern='336\\d\\d|[2-9]\\d{3}|[2356]11', example_number='211', possible_length=(3, 4, 5)),
sms_services=PhoneNumberDesc(national_number_pattern='[2-9]\\d{4,5}', example_number='20000', possible_length=(5, 6)),
short_data=True)
| 116.142857 | 357 | 0.712177 |
79406f3d5b496f317119afa17d667f503dfcf6e7 | 480 | py | Python | demo/exclusive.py | abbudao/slides-pulsar | 178bc49a9a73efc55147a7143c39f2438981da2f | [
"MIT"
] | null | null | null | demo/exclusive.py | abbudao/slides-pulsar | 178bc49a9a73efc55147a7143c39f2438981da2f | [
"MIT"
] | null | null | null | demo/exclusive.py | abbudao/slides-pulsar | 178bc49a9a73efc55147a7143c39f2438981da2f | [
"MIT"
] | null | null | null | from pulsar import Client, ConsumerType
client = Client('pulsar://localhost:6650')
consumer = client.subscribe(
'persistent://public/default/tick',
'i-am-exclusive',
consumer_type=ConsumerType.Exclusive
)
while True:
msg = consumer.receive()
try:
data = msg.data().decode('utf-8')
print(f"Received message {data} id={msg.message_id()}")
consumer.acknowledge(msg)
except:
consumer.negative_acknowledge(msg)
client.close()
| 24 | 63 | 0.670833 |
79406f497f2feb85887ffda612ec55e2d7bd746c | 934 | py | Python | python-katas/tests/test_console.py | rtimmons/katas | 70aab07385c80125209de4f6ee6021a6b8bbf521 | [
"MIT"
] | null | null | null | python-katas/tests/test_console.py | rtimmons/katas | 70aab07385c80125209de4f6ee6021a6b8bbf521 | [
"MIT"
] | null | null | null | python-katas/tests/test_console.py | rtimmons/katas | 70aab07385c80125209de4f6ee6021a6b8bbf521 | [
"MIT"
] | null | null | null | """Test cases for the console module."""
from unittest.mock import Mock
from click.testing import CliRunner
import pytest
from pytest_mock import MockFixture
import requests
from katas import console
@pytest.fixture
def runner() -> CliRunner:
"""Fixture for invoking command-line interfaces."""
return CliRunner()
@pytest.fixture
def mock_wikipedia_random_page(mocker: MockFixture) -> Mock:
"""Fixture for mocking wikipedia.random_page."""
return mocker.patch("katas.wikipedia.random_page")
def test_main_succeeds(runner: CliRunner, mock_requests_get: Mock) -> None:
"""It exits with a status code of zero."""
result = runner.invoke(console.main)
assert result.exit_code == 0
@pytest.mark.e2e
def test_main_succeeds_in_production_env(runner: CliRunner) -> None:
"""It exits with a status code of zero (end-to-end)."""
result = runner.invoke(console.main)
assert result.exit_code == 0
| 26.685714 | 75 | 0.738758 |
79406fa62ec7b5536430f4eab04190ea7a4c5862 | 326 | py | Python | prediction_flow/transformers/column/tests/test_log_transformer.py | dydcfg/prediction-flow | 332068f521bba51acc8600fe72e36e92c331bef1 | [
"MIT"
] | 211 | 2019-08-02T23:04:40.000Z | 2022-03-18T06:36:25.000Z | prediction_flow/transformers/column/tests/test_log_transformer.py | dydcfg/prediction-flow | 332068f521bba51acc8600fe72e36e92c331bef1 | [
"MIT"
] | 18 | 2019-08-10T07:13:05.000Z | 2022-03-17T10:45:30.000Z | prediction_flow/transformers/column/tests/test_log_transformer.py | dydcfg/prediction-flow | 332068f521bba51acc8600fe72e36e92c331bef1 | [
"MIT"
] | 51 | 2019-08-02T23:04:41.000Z | 2021-12-24T02:48:58.000Z | import numpy as np
from prediction_flow.transformers.column import LogTransformer
def test_normal():
log_transformer = LogTransformer()
x = np.array([100, 10, 32])
log_transformer.fit(x)
np.testing.assert_array_almost_equal(
log_transformer.transform(x), np.array([4.615121, 2.397895, 3.496508]))
| 23.285714 | 79 | 0.723926 |
7940708b7c973ed49bebcb5ac4f45d85504d0a00 | 1,782 | py | Python | main.py | EH30/To-Do-List | 0a58c247bbd15f6828bcc8f72c334f67cc9f2b58 | [
"Unlicense"
] | null | null | null | main.py | EH30/To-Do-List | 0a58c247bbd15f6828bcc8f72c334f67cc9f2b58 | [
"Unlicense"
] | 1 | 2021-10-01T08:51:40.000Z | 2021-10-01T08:51:40.000Z | main.py | EH30/To-Do-List | 0a58c247bbd15f6828bcc8f72c334f67cc9f2b58 | [
"Unlicense"
] | 1 | 2021-10-01T08:10:21.000Z | 2021-10-01T08:10:21.000Z | import platform
import sys
import json
import todo
import tkinter
from tkinter.constants import RIGHT, BOTTOM
# Created By EH
# -----------------------
# this script was tested on python 3.9
if len(sys.argv) > 1:
if type(sys.argv[1]) == str and sys.argv[1] == "--reset":
with open("data.json", "w") as opn:
opn.seek(0)
json.dump({"data":[]}, opn, indent=4)
opn.truncate()
exit(0)
if __name__ == "__main__":
root = tkinter.Tk()
mframe = tkinter.Frame(root)
root.geometry("800x500")
root.title("To-Do-List")
if platform.system() == "Windows":
root.iconbitmap("icon.ico")
ver = tkinter.Label(root, text="v1.2")
lbl_ent = tkinter.Label(mframe, text="Enter:")
lbl_lbox = tkinter.Label(mframe, text="ListBox: ")
lbl = tkinter.Label(root, text="To-Do-List", font=("", 20))
ver.place(x=0, y=0)
lbl_lbox.place(x=0, y=0)
lbl_ent.place(x=0, y=203)
ent = todo.AppEntry(mframe)
l_box = todo.AppBox(mframe)
app = todo.App(mframe, l_box, ent)
app.load_data()
root.bind("<Return>", app.add)
root.bind("<Delete>", app.delete)
lbl.pack()
mframe.pack(ipadx=55, ipady=70)
l_box.vsbar.pack(side=RIGHT, fill="y")
l_box.hsbar.pack(side=BOTTOM, fill="x")
l_box.lb.pack()
l_box.tframe.pack()
app.errtxt.pack()
app.count.place(x=333, y=0)
ent.ent.place(width=200, x=58, y=203)
app.btn.b_add.place(width=55,height=45, x=30, y=250)
app.btn.b_delete.place(width=55,height=45, x=103, y=250)
app.btn.b_check.place(width=55,height=45, x=175, y=250)
app.btn.b_uncheck.place(width=55,height=45, x=250, y=250)
root.mainloop()
| 27.415385 | 64 | 0.578002 |
7940709b9e9412b00c2c6325745e1775e6fb3b0f | 4,384 | py | Python | benchmark/startQiskit_noisy1591.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startQiskit_noisy1591.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startQiskit_noisy1591.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=5
# total number=50
import cirq
import qiskit
from qiskit.providers.aer import QasmSimulator
from qiskit.test.mock import FakeVigo
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.h(controls[n])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0]) # number=3
prog.h(input_qubit[1]) # number=4
prog.h(input_qubit[2]) # number=5
prog.h(input_qubit[3]) # number=6
prog.h(input_qubit[4]) # number=21
prog.h(input_qubit[0]) # number=44
prog.cz(input_qubit[3],input_qubit[0]) # number=45
prog.h(input_qubit[0]) # number=46
prog.z(input_qubit[3]) # number=33
prog.cx(input_qubit[3],input_qubit[0]) # number=34
prog.rx(0.11938052083641225,input_qubit[1]) # number=36
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(repeat):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0]) # number=1
prog.rx(1.4765485471872026,input_qubit[2]) # number=35
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.cx(input_qubit[1],input_qubit[0]) # number=41
prog.x(input_qubit[0]) # number=42
prog.cx(input_qubit[1],input_qubit[0]) # number=43
prog.x(input_qubit[4]) # number=30
prog.cx(input_qubit[0],input_qubit[1]) # number=47
prog.x(input_qubit[1]) # number=48
prog.cx(input_qubit[0],input_qubit[1]) # number=49
prog.x(input_qubit[2]) # number=11
prog.rx(0.45238934211692994,input_qubit[3]) # number=38
prog.y(input_qubit[1]) # number=39
prog.rx(-2.5258404934861938,input_qubit[1]) # number=25
prog.h(input_qubit[3]) # number=29
prog.cx(input_qubit[0],input_qubit[3]) # number=22
prog.x(input_qubit[3]) # number=23
prog.cx(input_qubit[0],input_qubit[3]) # number=24
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.x(input_qubit[0]) # number=13
prog.rx(-0.0722566310325653,input_qubit[4]) # number=37
prog.x(input_qubit[1]) # number=14
prog.cx(input_qubit[0],input_qubit[2]) # number=26
prog.x(input_qubit[2]) # number=27
prog.h(input_qubit[4]) # number=40
prog.cx(input_qubit[0],input_qubit[2]) # number=28
prog.x(input_qubit[3]) # number=16
prog.h(input_qubit[0]) # number=17
prog.h(input_qubit[1]) # number=18
prog.h(input_qubit[2]) # number=19
prog.h(input_qubit[3]) # number=20
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
backend = FakeVigo()
sample_shot =7924
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_noisy1591.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 32.474074 | 82 | 0.619982 |
794070fb918a6fbe5d674ef6743f655648872977 | 2,688 | py | Python | lib/aws_utils.py | murkyl/ecs_meta_search | 0c02a5c1feece877c441569cc199424102ceca01 | [
"MIT"
] | 2 | 2019-06-05T04:43:54.000Z | 2020-07-15T05:49:57.000Z | lib/aws_utils.py | murkyl/ecs_meta_search | 0c02a5c1feece877c441569cc199424102ceca01 | [
"MIT"
] | null | null | null | lib/aws_utils.py | murkyl/ecs_meta_search | 0c02a5c1feece877c441569cc199424102ceca01 | [
"MIT"
] | 5 | 2020-02-21T22:47:35.000Z | 2022-02-03T15:21:39.000Z | import time
import datetime
import base64
import hmac
import hashlib
try:
from urllib import quote_plus, quote
except:
from urllib.parse import quote_plus, quote
S3_SIGNED_URL_FORMAT = '%(prot)s://%(bucket)s/%(endpoint)s/%(obj)s?AWSAccessKeyId=%(id)s&Expires=%(expires)d&Signature=%(sig)s'
ECS_SIGNED_URL_FORMAT = '%(prot)s://%(endpoint)s/%(bucket)s/%(obj)s?AWSAccessKeyId=%(id)s&Expires=%(expires)d&Signature=%(sig)s'
def get_signed_url(endpoint, bucket, obj, id, key, type='s3', expire=3600):
"""
Get a signed URL for an object
Parameters:
endpoint (string): Base URL to access the resource.
e.g. https://object.ecstestdrive.com or https://s3.amazonaws.com
bucket (string): Name of the bucket containing the object
obj (string): ID for the object. e.g. Something/File.ext
id (string): User ID credentials
key (string): The secret key for the User ID crendentials
type (string): Optional - Either 's3' (default) or 'ecs'. This determines
the URL style that will be returned.
's3' style uses bucket.endpoint/object
'ecs' style uses endpoint/bucket/object
expire (int): Optional - Number of seconds from now that the URL will be
valid. Default 3600 seconds. You can also pass in a datetime object.
When this is done, the expiration time will be based on the datetime
object only.
Returns:
Signed URL string value that can be used to access the object resource
"""
# Parse out the http:// or https:// portion of the URL
if endpoint[0:8] == 'https://':
prot = 'https'
endpoint = endpoint[8:]
elif endpoint [0:7] == 'http://':
prot = 'http'
endpoint = endpoint[7:]
if not (type == 's3' or type == 'ecs'):
type = 's3'
url_format = S3_SIGNED_URL_FORMAT
if type == 'ecs':
url_format = ECS_SIGNED_URL_FORMAT
if isinstance(expire, datetime.datetime):
expiry_ts = int(expire.strftime('%s'))
else:
expiry_ts = int(time.time()) + expire
h = hmac.new(
bytes(key.encode('utf-8')),
("GET\n\n\n%d\n/%s/%s"%(expiry_ts, bucket, obj)).encode('utf-8'),
hashlib.sha1)
# Signature
sig = quote_plus(base64.encodestring(h.digest()).strip().decode('utf-8'))
# S3 and ECS require different URL encoding for the object name
# S3 uses the standard quote_plus while ECS allows for extra characters
if type == 's3':
obj = quote_plus(obj)
else:
obg = quote(obj, '/')
# Create full signed URL
signed_url = url_format%{
'prot': prot,
'endpoint': endpoint,
'bucket': bucket,
'obj': obj,
'id': id,
'key': key,
'expires': expiry_ts,
'sig': sig
}
return signed_url
| 34.909091 | 128 | 0.65439 |
794071014fb04ef3d875d2960ac65e8c93ccfdc7 | 1,519 | py | Python | cifar10/trainer/discriminator.py | rahhul/GANs | cec9e2f81528099407b8a9d3dce2f1cf85e449be | [
"MIT"
] | null | null | null | cifar10/trainer/discriminator.py | rahhul/GANs | cec9e2f81528099407b8a9d3dce2f1cf85e449be | [
"MIT"
] | null | null | null | cifar10/trainer/discriminator.py | rahhul/GANs | cec9e2f81528099407b8a9d3dce2f1cf85e449be | [
"MIT"
] | null | null | null | # python3
import util
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, Dropout, Flatten, LeakyReLU
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.initializers import RandomNormal, glorot_normal, VarianceScaling
# Discriminator model
def discriminator_model(in_shape=(32, 32, 3)):
init = glorot_normal()
model = Sequential()
model.add(Conv2D(64, (3,3), padding='same', kernel_initializer=init, input_shape=in_shape))
# model.add(BatchNormalization())
model.add(LeakyReLU(alpha=0.2))
# downsample
model.add(Conv2D(128, (3,3), strides=(2,2), kernel_initializer=init, padding='same'))
# model.add(BatchNormalization())
model.add(LeakyReLU(alpha=0.2))
# downsample
model.add(Conv2D(128, (3,3), strides=(2,2), kernel_initializer=init, padding='same'))
# model.add(BatchNormalization())
model.add(LeakyReLU(alpha=0.2))
# downsample
model.add(Conv2D(256, (3,3), strides=(2,2), kernel_initializer=init, padding='same'))
# model.add(BatchNormalization())
model.add(LeakyReLU(alpha=0.2))
# classifier
model.add(Flatten())
model.add(Dropout(0.4))
model.add(Dense(1, activation='sigmoid'))
# compile model
model.compile(loss='binary_crossentropy',
optimizer=Adam(lr=2e-4, beta_1=0.5),
metrics=['accuracy'])
return model
| 37.04878 | 95 | 0.706386 |
7940710a28ff00624d3b55dca33112c911249d56 | 8,654 | py | Python | manilaclient/common/cliutils.py | SolKuczala/python-manilaclient | 9613c7fd2652dc3c7b8793c9af2b6357f42a4757 | [
"CNRI-Python",
"Apache-1.1"
] | null | null | null | manilaclient/common/cliutils.py | SolKuczala/python-manilaclient | 9613c7fd2652dc3c7b8793c9af2b6357f42a4757 | [
"CNRI-Python",
"Apache-1.1"
] | null | null | null | manilaclient/common/cliutils.py | SolKuczala/python-manilaclient | 9613c7fd2652dc3c7b8793c9af2b6357f42a4757 | [
"CNRI-Python",
"Apache-1.1"
] | null | null | null | # Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# W0603: Using the global statement
# W0621: Redefining name %s from outer scope
# pylint: disable=W0603,W0621
import getpass
import inspect
import os
import sys
import textwrap
from oslo_utils import encodeutils
from oslo_utils import strutils
import prettytable
import six
from six import moves
from manilaclient.common._i18n import _
class MissingArgs(Exception):
"""Supplied arguments are not sufficient for calling a function."""
def __init__(self, missing):
self.missing = missing
msg = _("Missing arguments: %s") % ", ".join(missing)
super(MissingArgs, self).__init__(msg)
def validate_args(fn, *args, **kwargs):
"""Check that the supplied args are sufficient for calling a function.
>>> validate_args(lambda a: None)
Traceback (most recent call last):
...
MissingArgs: Missing argument(s): a
>>> validate_args(lambda a, b, c, d: None, 0, c=1)
Traceback (most recent call last):
...
MissingArgs: Missing argument(s): b, d
:param fn: the function to check
:param arg: the positional arguments supplied
:param kwargs: the keyword arguments supplied
"""
argspec = inspect.getargspec(fn)
num_defaults = len(argspec.defaults or [])
required_args = argspec.args[:len(argspec.args) - num_defaults]
def isbound(method):
return getattr(method, '__self__', None) is not None
if isbound(fn):
required_args.pop(0)
missing = [arg for arg in required_args if arg not in kwargs]
missing = missing[len(args):]
if missing:
raise MissingArgs(missing)
def arg(*args, **kwargs):
"""Decorator for CLI args.
Example:
>>> @arg("name", help="Name of the new entity")
... def entity_create(args):
... pass
"""
def _decorator(func):
add_arg(func, *args, **kwargs)
return func
return _decorator
def env(*args, **kwargs):
"""Returns the first environment variable set.
If all are empty, defaults to '' or keyword arg `default`.
"""
for arg in args:
value = os.environ.get(arg)
if value:
return value
return kwargs.get('default', '')
def add_arg(func, *args, **kwargs):
"""Bind CLI arguments to a shell.py `do_foo` function."""
if not hasattr(func, 'arguments'):
func.arguments = []
# NOTE(sirp): avoid dups that can occur when the module is shared across
# tests.
if (args, kwargs) not in func.arguments:
# Because of the semantics of decorator composition if we just append
# to the options list positional options will appear to be backwards.
func.arguments.insert(0, (args, kwargs))
def unauthenticated(func):
"""Adds 'unauthenticated' attribute to decorated function.
Usage:
>>> @unauthenticated
... def mymethod(f):
... pass
"""
func.unauthenticated = True
return func
def isunauthenticated(func):
"""Checks if the function does not require authentication.
Mark such functions with the `@unauthenticated` decorator.
:returns: bool
"""
return getattr(func, 'unauthenticated', False)
def print_list(objs, fields, formatters=None, sortby_index=0,
mixed_case_fields=None, field_labels=None):
"""Print a list or objects as a table, one row per object.
:param objs: iterable of :class:`Resource`
:param fields: attributes that correspond to columns, in order
:param formatters: `dict` of callables for field formatting
:param sortby_index: index of the field for sorting table rows
:param mixed_case_fields: fields corresponding to object attributes that
have mixed case names (e.g., 'serverId')
:param field_labels: Labels to use in the heading of the table, default to
fields.
"""
formatters = formatters or {}
mixed_case_fields = mixed_case_fields or []
field_labels = field_labels or fields
if len(field_labels) != len(fields):
raise ValueError(_("Field labels list %(labels)s has different number "
"of elements than fields list %(fields)s"),
{'labels': field_labels, 'fields': fields})
if sortby_index is None:
kwargs = {}
else:
kwargs = {'sortby': field_labels[sortby_index]}
pt = prettytable.PrettyTable(field_labels)
pt.align = 'l'
for o in objs:
row = []
for field in fields:
if field in formatters:
row.append(formatters[field](o))
else:
if field in mixed_case_fields:
field_name = field.replace(' ', '_')
else:
field_name = field.lower().replace(' ', '_')
data = getattr(o, field_name, '')
row.append(data)
pt.add_row(row)
if six.PY3:
print(encodeutils.safe_encode(pt.get_string(**kwargs)).decode())
else:
print(encodeutils.safe_encode(pt.get_string(**kwargs)))
def print_dict(dct, dict_property="Property", wrap=0):
"""Print a `dict` as a table of two columns.
:param dct: `dict` to print
:param dict_property: name of the first column
:param wrap: wrapping for the second column
"""
pt = prettytable.PrettyTable([dict_property, 'Value'])
pt.align = 'l'
for k, v in dct.items():
# convert dict to str to check length
if isinstance(v, dict):
v = six.text_type(v)
if wrap > 0:
v = textwrap.fill(six.text_type(v), wrap)
# if value has a newline, add in multiple rows
# e.g. fault with stacktrace
if v and isinstance(v, six.string_types) and r'\n' in v:
lines = v.strip().split(r'\n')
col1 = k
for line in lines:
pt.add_row([col1, line])
col1 = ''
else:
pt.add_row([k, v])
if six.PY3:
print(encodeutils.safe_encode(pt.get_string()).decode())
else:
print(encodeutils.safe_encode(pt.get_string()))
def get_password(max_password_prompts=3):
"""Read password from TTY."""
verify = strutils.bool_from_string(env("OS_VERIFY_PASSWORD"))
pw = None
if hasattr(sys.stdin, "isatty") and sys.stdin.isatty():
# Check for Ctrl-D
try:
for __ in moves.range(max_password_prompts):
pw1 = getpass.getpass("OS Password: ")
if verify:
pw2 = getpass.getpass("Please verify: ")
else:
pw2 = pw1
if pw1 == pw2 and pw1:
pw = pw1
break
except EOFError:
pass
return pw
def service_type(stype):
"""Adds 'service_type' attribute to decorated function.
Usage:
.. code-block:: python
@service_type('volume')
def mymethod(f):
...
"""
def inner(f):
f.service_type = stype
return f
return inner
def get_service_type(f):
"""Retrieves service type from function."""
return getattr(f, 'service_type', None)
def pretty_choice_list(l):
return ', '.join("'%s'" % i for i in l)
def exit(msg=''):
if msg:
print(msg, file=sys.stderr)
sys.exit(1)
def transform_export_locations_to_string_view(export_locations):
export_locations_string_view = ''
replica_export_location_ignored_keys = (
'replica_state', 'availability_zone', 'share_replica_id')
for el in export_locations:
if hasattr(el, '_info'):
export_locations_dict = el._info
else:
export_locations_dict = el
for k, v in export_locations_dict.items():
# NOTE(gouthamr): We don't want to show replica related info
# twice in the output, so ignore those.
if k not in replica_export_location_ignored_keys:
export_locations_string_view += '\n%(k)s = %(v)s' % {
'k': k, 'v': v}
return export_locations_string_view
| 30.048611 | 79 | 0.619598 |
Subsets and Splits