content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
import pymongo
def connect_to_mongo(username="", password="", host="localhost", port=27017):
credentials = ""
if username and password:
credentials = f"{username}:{password}@"
connection_url = f"mongodb://{credentials}{host}:{port}"
return pymongo.MongoClient(connection_url)
| 30.2 | 77 | 0.692053 | [
"MIT"
] | stump-vote/stump-data-pipeline | src/stump_data_pipeline/mongo_client.py | 302 | Python |
from __future__ import division
import numpy as np
import sys
import os
import shutil
import vtk
from vtk.util.numpy_support import vtk_to_numpy
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import matplotlib.animation as animation
import matplotlib.colors as mcolors
import argparse
import paraview.simple as parasim
import multiprocessing as mp
import copy
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'model')))
from geometry_generation import *
matplotlib.rcParams['font.size'] = 6
import scipy.interpolate as interpsci
import seaborn as sns
def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=-1):
if n == -1:
n = cmap.N
new_cmap = mcolors.LinearSegmentedColormap.from_list(
'trunc({name},{a:.2f},{b:.2f})'.format(name=cmap.name, a=minval, b=maxval),
cmap(np.linspace(minval, maxval, n)))
return new_cmap
def makeImage(snapshots, geometryX, geometryY, data_names, folder_name, times):
fig = plt.figure(figsize=(7,4.72441/4*3))
ax1 = fig.add_subplot(231)
ax2 = fig.add_subplot(234)
ax3 = fig.add_subplot(232)
ax4 = fig.add_subplot(235)
ax5 = fig.add_subplot(233)
ax6 = fig.add_subplot(236)
axes = [[ax1, ax2], [ax3, ax4], [ax5, ax6]]
all_axes = [ax1, ax2, ax3, ax4, ax5, ax6]
for k in range(len(snapshots[0])):
axes_current = axes[k]
values = [[],[]]
for j in range(len(data_names)):
i = snapshots[j][k]
reader = vtk.vtkXMLUnstructuredGridReader()
reader.SetFileName(data_names[j] + '{0:06d}.vtu'.format(i))
reader.Update()
data = reader.GetOutput()
points = data.GetPoints()
npts = points.GetNumberOfPoints()
x = vtk_to_numpy(points.GetData())[:, 0]
y = vtk_to_numpy(points.GetData())[:, 1]
f = vtk_to_numpy(data.GetPointData().GetArray(0))
triangles = vtk_to_numpy(data.GetCells().GetData())
ntri = triangles.size//4
tri = np.take(triangles,[m for m in range(triangles.size) if m%4 != 0]).reshape(ntri, 3)
waterX = np.linspace(0, 60000, 100)
waterY = np.zeros(100)
values[j].append(x)
values[j].append(y)
values[j].append(tri)
values[j].append(f)
levels = np.linspace(0, 1.0, 100, endpoint=True)
cmap_new = truncate_colormap(plt.get_cmap("BuPu"), 0.25, 1.0)
maxL = 51100
bed_interpolator = interpsci.interp1d(geometryX, geometryY, fill_value='extrapolate')
geometryX = np.linspace(0, 60000, 1000)
geometryY = bed_interpolator(geometryX)
axes_current[0].fill_between(waterX, -200, 0, color='#94aec4ff', zorder=-21)
axes_current[1].fill_between(waterX, -200, 0, color='#94aec4ff', zorder=-21)
axes_current[0].fill_between(geometryX, -200, geometryY, color='#c69d6eff', zorder=-18)
axes_current[1].fill_between(geometryX, -200, geometryY, color='#c69d6eff', zorder=-18)
cnt1 = axes_current[0].tricontourf(values[0][0], values[0][1], values[0][2], values[0][3]*100, 100, cmap=cmap_new, levels=levels, extend='both', zorder=-20)
cnt2 = axes_current[1].tricontourf(values[1][0], values[1][1], values[1][2], values[1][3]*100, 100, cmap=cmap_new, levels=levels, extend='both', zorder=-20)
for cnt in [cnt1, cnt2]:
for c in cnt.collections:
c.set_edgecolor("face")
axes_current[0].set_title("t = %.1f years" % (times[k]-0.5))
print("Processed file number " + str(i) + ".")
labels = ['a', 'd', 'b', 'e', 'c', 'f']
for ax in all_axes:
ax.set_xlim([49400,maxL])
ax.set_ylim([-200,100])
ax.set_rasterization_zorder(-10)
for j in range(len(all_axes)):
all_axes[j].text(0.025, 0.97, labels[j], transform=all_axes[j].transAxes, va='top', fontsize=8, weight='bold')
for ax in [ax3, ax4, ax5, ax6]:
plt.sca(ax)
ylims = plt.yticks()
print(ylims)
locs = ylims[0][1:-1]
labels = []
for j in range(len(locs)):
labels.append('%.2f'%(locs[j]))
plt.sca(ax)
plt.yticks(locs, [" "]*len(locs))
for ax in [ax1, ax3, ax5]:
plt.sca(ax)
xlims = plt.xticks()
print(xlims)
locs = xlims[0][1:-1]
labels = []
for j in range(len(locs)):
labels.append('%.2f'%(locs[j]))
plt.sca(ax)
plt.xticks(locs, [" "]*len(locs))
for ax in [ax2, ax4, ax6]:
plt.sca(ax)
labelsx = [num/1000 for num in locs]
plt.xticks(locs, labelsx)
for ax in [ax2, ax4, ax6]:
ax.set_xlabel('Distance (km)')
for ax in [ax1, ax2]:
ax.set_ylabel('Height (m)')
ax1.text(-0.5, 0.5, 'No Advection', transform=ax1.transAxes, va='center', fontsize=12, rotation='vertical')
ax2.text(-0.5, 0.5, 'Advection', transform=ax2.transAxes, va='center', fontsize=12, rotation='vertical')
plt.tight_layout(pad=1.0,h_pad=-1.0,w_pad=0.0)
fig.savefig(folder_name + "/" + "thumbnails_warm.eps", transparent=False)
plt.close(fig)
if __name__ == "__main__":
sns.set(palette='colorblind')
sns.set(font_scale=0.8)
sns.set_style(style='ticks')
starting_directory = os.getcwd()
os.chdir(os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'tests')))
main_directory = os.getcwd()
geometryX, geometryY, xz_boundary = make_geometry_grounded(-0.01, 50000, -150, 50, 100, 10)
times = [0.5, 8.0, 15.5]
directories = ['warm_noad', 'warm']
dataName = 'width_timeseries'
data_names = [os.path.join(directory, dataName) for directory in directories]
snapshots = [[], []]
for i in range(len(directories)):
for j in range(len(times)):
if times[j] == int(0):
snapshots[i].append(int(0))
else:
os.chdir(directories[i])
reader_paraview = parasim.PVDReader(FileName=dataName + '.pvd')
times_imported = reader_paraview.GetPropertyValue('TimestepValues')
times_temp = 0.0
for k in range(len(times_imported)):
if times_imported[k] >= times[j] and times_temp <= times[j]:
snapshots[i].append(int(k))
break
else:
times_temp = times_imported[k]
os.chdir(main_directory)
os.chdir(main_directory)
print(snapshots)
makeImage(snapshots, geometryX, geometryY, data_names, starting_directory, times) | 37.066667 | 164 | 0.601169 | [
"MIT"
] | brberg/stokes-crevasse-advection | plotting/thumbnails_warm.py | 6,672 | Python |
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import QColorDialog, QDialogButtonBox
BB = QDialogButtonBox
class ColorDialog(QColorDialog):
def __init__(self, parent=None):
super(ColorDialog, self).__init__(parent)
self.setOption(QColorDialog.ShowAlphaChannel)
# The Mac native dialog does not support our restore button.
self.setOption(QColorDialog.DontUseNativeDialog)
# Add a restore defaults button.
# The default is set at invocation time, so that it
# works across dialogs for different elements.
self.default = None
self.bb = self.layout().itemAt(1).widget()
self.bb.addButton(BB.RestoreDefaults)
self.bb.clicked.connect(self.checkRestore)
def getColor(self, value=None, title=None, default=None):
self.default = default
if title:
self.setWindowTitle(title)
if value:
self.setCurrentColor(value)
return self.currentColor() if self.exec_() else None
def checkRestore(self, button):
if self.bb.buttonRole(button) & BB.ResetRole and self.default:
self.setCurrentColor(self.default)
| 35.147059 | 70 | 0.680335 | [
"Apache-2.0"
] | 7eta/udk_labeler | view/libs/colorDialog.py | 1,195 | Python |
# Copyright (C) 2019 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Provides an HTML cleaner function with sqalchemy compatible API"""
import re
import HTMLParser
import bleach
# Set up custom tags/attributes for bleach
BLEACH_TAGS = [
'caption', 'strong', 'em', 'b', 'i', 'p', 'code', 'pre', 'tt', 'samp',
'kbd', 'var', 'sub', 'sup', 'dfn', 'cite', 'big', 'small', 'address',
'hr', 'br', 'div', 'span', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'ul',
'ol', 'li', 'dl', 'dt', 'dd', 'abbr', 'acronym', 'a', 'img',
'blockquote', 'del', 'ins', 'table', 'tbody', 'tr', 'td', 'th',
] + bleach.ALLOWED_TAGS
BLEACH_ATTRS = {}
ATTRS = [
'href', 'src', 'width', 'height', 'alt', 'cite', 'datetime',
'title', 'class', 'name', 'xml:lang', 'abbr'
]
BUGGY_STRINGS_PATTERN = "&.{2,3};"
for tag in BLEACH_TAGS:
BLEACH_ATTRS[tag] = ATTRS
CLEANER = bleach.sanitizer.Cleaner(
tags=BLEACH_TAGS, attributes=BLEACH_ATTRS, strip=True
)
PARSER = HTMLParser.HTMLParser()
def cleaner(dummy, value, *_):
"""Cleans out unsafe HTML tags.
Uses bleach and unescape until it reaches a fix point.
Args:
dummy: unused, sqalchemy will pass in the model class
value: html (string) to be cleaned
Returns:
Html (string) without unsafe tags.
"""
if value is None:
# No point in sanitizing None values
return value
if not isinstance(value, basestring):
# No point in sanitizing non-strings
return value
value = unicode(value)
buggy_strings = re.finditer(BUGGY_STRINGS_PATTERN, PARSER.unescape(value))
while True:
lastvalue = value
value = PARSER.unescape(CLEANER.clean(value))
if value == lastvalue:
break
# for some reason clean() function converts strings like "&*!;" to "&*;;".
# if we have such string we are replacing new incorrect values to old ones
if buggy_strings:
backup_value = value
updated_buggy_strings = re.finditer(BUGGY_STRINGS_PATTERN, value)
for match in updated_buggy_strings:
try:
old_value = buggy_strings.next().group()
start, finish = match.span()
value = value[:start] + old_value + value[finish:]
except StopIteration:
# If we have different number of string after clean function
# we should skip replacing
return backup_value
return value
| 27.55814 | 78 | 0.646835 | [
"ECL-2.0",
"Apache-2.0"
] | VRolich/ggrc-core | src/ggrc/utils/html_cleaner.py | 2,370 | Python |
'''
This script includes:
1. ClassifierOfflineTrain
This is for offline training. The input data are the processed features.
2. class ClassifierOnlineTest(object)
This is for online testing. The input data are the raw skeletons.
It uses FeatureGenerator to extract features,
and then use ClassifierOfflineTrain to recognize the action.
Notice, this model is only for recognizing the action of one person.
TODO: Add more comments to this function.
'''
import numpy as np
import sys
import os
import pickle
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from collections import deque
import cv2
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.decomposition import PCA
if True:
import sys
import os
ROOT = os.path.dirname(os.path.abspath(__file__))+"/../"
sys.path.append(ROOT)
from utils.lib_feature_proc import FeatureGenerator
# -- Settings
NUM_FEATURES_FROM_PCA = 50
# -- Classes
class ClassifierOfflineTrain(object):
''' The classifer for offline training.
The input features to this classifier are already
processed by `class FeatureGenerator`.
'''
def __init__(self):
self._init_all_models()
# self.clf = self._choose_model("Nearest Neighbors")
# self.clf = self._choose_model("Linear SVM")
# self.clf = self._choose_model("RBF SVM")
# self.clf = self._choose_model("Gaussian Process")
# self.clf = self._choose_model("Decision Tree")
# self.clf = self._choose_model("Random Forest")
self.clf = self._choose_model("Neural Net")
def predict(self, X):
''' Predict the class index of the feature X '''
Y_predict = self.clf.predict(self.pca.transform(X))
return Y_predict
def predict_and_evaluate(self, te_X, te_Y):
''' Test model on test set and obtain accuracy '''
te_Y_predict = self.predict(te_X)
N = len(te_Y)
n = sum(te_Y_predict == te_Y)
accu = n / N
return accu, te_Y_predict
def train(self, X, Y):
''' Train model. The result is saved into self.clf '''
n_components = min(NUM_FEATURES_FROM_PCA, X.shape[1])
self.pca = PCA(n_components=n_components, whiten=True)
self.pca.fit(X)
# print("Sum eig values:", np.sum(self.pca.singular_values_))
print("Sum eig values:", np.sum(self.pca.explained_variance_ratio_))
X_new = self.pca.transform(X)
print("After PCA, X.shape = ", X_new.shape)
self.clf.fit(X_new, Y)
def _choose_model(self, name):
self.model_name = name
idx = self.names.index(name)
return self.classifiers[idx]
def _init_all_models(self):
self.names = ["Nearest Neighbors", "Linear SVM", "RBF SVM", "Gaussian Process",
"Decision Tree", "Random Forest", "Neural Net", "AdaBoost",
"Naive Bayes", "QDA"]
self.model_name = None
self.classifiers = [
KNeighborsClassifier(5),
SVC(kernel="linear", C=10.0),
SVC(gamma=0.01, C=1.0, verbose=True),
GaussianProcessClassifier(1.0 * RBF(1.0)),
DecisionTreeClassifier(max_depth=5),
RandomForestClassifier(
max_depth=30, n_estimators=100, max_features="auto"),
MLPClassifier((20, 30, 40)), # Neural Net
AdaBoostClassifier(),
GaussianNB(),
QuadraticDiscriminantAnalysis()]
def _predict_proba(self, X):
''' Predict the probability of feature X belonging to each of the class Y[i] '''
Y_probs = self.clf.predict_proba(self.pca.transform(X))
return Y_probs # np.array with a length of len(classes)
class ClassifierOnlineTest(object):
''' Classifier for online inference.
The input data to this classifier is the raw skeleton data, so they
are processed by `class FeatureGenerator` before sending to the
self.model trained by `class ClassifierOfflineTrain`.
'''
def __init__(self, model_path, action_labels, window_size, human_id=0):
# -- Settings
self.human_id = human_id
with open(model_path, 'rb') as f:
self.model = pickle.load(f)
if self.model is None:
print("my Error: failed to load model")
assert False
self.action_labels = action_labels
self.THRESHOLD_SCORE_FOR_DISP = 0.5
# -- Time serials storage
self.feature_generator = FeatureGenerator(window_size)
self.reset()
def reset(self):
self.feature_generator.reset()
self.scores_hist = deque()
self.scores = None
def predict(self, skeleton):
''' Predict the class (string) of the input raw skeleton '''
LABEL_UNKNOWN = ""
is_features_good, features = self.feature_generator.add_cur_skeleton(
skeleton)
if is_features_good:
# convert to 2d array
features = features.reshape(-1, features.shape[0])
curr_scores = self.model._predict_proba(features)[0]
self.scores = self.smooth_scores(curr_scores)
if self.scores.max() < self.THRESHOLD_SCORE_FOR_DISP: # If lower than threshold, bad
prediced_label = LABEL_UNKNOWN
else:
predicted_idx = self.scores.argmax()
prediced_label = self.action_labels[predicted_idx]
else:
prediced_label = LABEL_UNKNOWN
return prediced_label
def smooth_scores(self, curr_scores):
''' Smooth the current prediction score
by taking the average with previous scores
'''
self.scores_hist.append(curr_scores)
DEQUE_MAX_SIZE = 2
if len(self.scores_hist) > DEQUE_MAX_SIZE:
self.scores_hist.popleft()
if 1: # Use sum
score_sums = np.zeros((len(self.action_labels),))
for score in self.scores_hist:
score_sums += score
score_sums /= len(self.scores_hist)
print("\nMean score:\n", score_sums)
return score_sums
else: # Use multiply
score_mul = np.ones((len(self.action_labels),))
for score in self.scores_hist:
score_mul *= score
return score_mul
def draw_scores_onto_image(self, img_disp):
if self.scores is None:
return
for i in range(len(self.action_labels)):
FONT_SIZE = 0.6
TXT_X = 20
TXT_Y = 150 + i*30
COLOR_INTENSITY = 255
#if i == -1:
# s = "{}".format(self.human_id)
#else:
#label = self.action_labels[i]
#s = "{:<5}: {:.2f}".format(label, self.scores[i])
#COLOR_INTENSITY *= (0.0 + 1.0 * self.scores[i])**0.5
if i!=-1:
label = self.action_labels[i]
s = "{:<5}: {:.2f}".format(label, self.scores[i])
COLOR_INTENSITY *= (0.0 + 1.0 * self.scores[i])**0.5
cv2.putText(img_disp, text=s, org=(TXT_X, TXT_Y),
fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=FONT_SIZE,
color=(0, 0, int(COLOR_INTENSITY)), thickness=2)
| 35.855856 | 97 | 0.629899 | [
"MIT"
] | eddylamhw/trAIner24 | utils/lib_classifier.py | 7,960 | Python |
#!/usr/bin/python
import time
import re
import os
class ScavUtility:
def __init__(self):
pass
def check(self, email):
regex = '^(?=.{1,64}@)[A-Za-z0-9_-]+(\\.[A-Za-z0-9_-]+)*@[^-][A-Za-z0-9-]+(\\.[A-Za-z0-9-]+)*(\\.[A-Za-z]{2,})$'
if (re.search(regex, email)):
return 1
else:
return 0
def loadSearchTerms(self):
searchterms = set()
f = open("configs/searchterms.txt", "r")
tmpcontent = f.readlines()
f.close()
for tmpline in tmpcontent:
tmpline = tmpline.strip()
searchterms.add(tmpline)
return searchterms
def archivepastes(self, directory):
pastecount = len([name for name in os.listdir(directory) if os.path.isfile(os.path.join(directory, name))])
if pastecount > 48000:
archivefilename = str(time.time()) + ".zip"
os.system("zip -r pastebin_" + archivefilename + " " + directory)
os.system("mv pastebin_" + archivefilename + " archive/.")
os.system("rm " + directory + "/*")
| 29.567568 | 120 | 0.54479 | [
"Apache-2.0"
] | SCR-Hy3n4/Scavenger | classes/utility.py | 1,094 | Python |
#------------------------------------------------------------------------------
# Copyright (c) 2013, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#------------------------------------------------------------------------------
from atom.api import Typed, ForwardTyped
from .bounded_date import BoundedDate, ProxyBoundedDate
class ProxyCalendar(ProxyBoundedDate):
""" The abstract defintion of a proxy Calendar object.
"""
#: A reference to the Calendar declaration.
declaration = ForwardTyped(lambda: Calendar)
class Calendar(BoundedDate):
""" A bounded date control which edits a Python datetime.date using
a widget which resembles a calendar.
"""
#: A reference to the ProxyCalendar object.
proxy = Typed(ProxyCalendar)
| 31.714286 | 79 | 0.606982 | [
"BSD-3-Clause"
] | ContinuumIO/ashiba | enaml/enaml/widgets/calendar.py | 888 | Python |
# -*- coding: utf-8 -*-
"""
WOLFEYES'S FRAMEWORK
Python 3 / OpenCV 3
This file describes some TypeCheking decorators.
Might be useless, but allows for almost very precise type checking,
especially on keyworded args, which might help.
"""
# 'kargs' get the arguments and passes the decorator
def args(*types, **ktypes):
"""Allow testing of input types:
argkey=(types) or argkey=type"""
# The decorator modifies the function
def decorator(func):
def modified(*args, **kargs):
# The modified fonction takes some args and kargs,
# Which we need to check before passing it to func.
# Works much like a proxy/firewall.
# Check args:
position = 1
for arg, T in zip(args, types):
if not isinstance(arg, T):
raise TypeError("Positional arg (%d) should be of type(s) %s, got %s" % (position, T, type(arg)))
position += 1
# Check kargs:
for key, arg in kargs.items():
if key in ktypes:
T = ktypes[key]
if not isinstance(arg, T):
raise TypeError("Keyworded arg '%s' should be of type(s) %s, got %s" % (key, T, type(arg)))
# Actual result after check
return func(*args, **kargs)
# The decorator has decorated 'func' in 'modified'
return modified
# We pass the actual decorator right after getting the kargs
return decorator
# 'ret' gets the possible output types
def ret(*types, **kargs):
# Garde-fou
if len(types) is 1 and not isinstance(types[0], type) and callable(types[0]):
raise ValueError("You should not pass a function to TypeError.ret, maybe you did not write as '@TypeError.ret()...'")
# This decorator will modify the function 'func'
def decorator(func):
def modified(*args, **kargs):
# This is the modified function, 'modified' Works
# like a proxy, just passes the arguments and
# checks the type of the return's value
ret = func(*args, **kargs)
if not isinstance(ret, types):
raise TypeError("The function %s is returning an abnormal value, expected %s but got %s" % (func, types, type(ret)))
# func's return
return ret
# Modified function
return modified
# The actual decorator
return decorator
| 33.405405 | 132 | 0.588592 | [
"BSD-3-Clause"
] | TBIproject/WolfEye | WolfEyes/Utils/TypeChecker.py | 2,472 | Python |
import requests
def run(event, context):
#return event.get('url') + 'aaa'
r = requests.get(event.get('url'))
return r.text
#return '...**^^.This is a request test for url: {0}'.format(event.get('url')) | 27.375 | 82 | 0.611872 | [
"Apache-2.0"
] | owasp-sbot/pbx-gs-python-utils | _from_pydot/dev/request_test.py | 219 | Python |
import insightconnect_plugin_runtime
from .schema import StartPatchScanInput, StartPatchScanOutput, Input, Output, Component
# Custom imports below
from insightconnect_plugin_runtime.exceptions import PluginException
import polling2
class StartPatchScan(insightconnect_plugin_runtime.Action):
def __init__(self):
super(self.__class__, self).__init__(
name='start_patch_scan',
description=Component.DESCRIPTION,
input=StartPatchScanInput(),
output=StartPatchScanOutput())
def run(self, params={}):
endpoint_names = params.get(Input.HOSTNAMES, [])
machine_group_ids = params.get(Input.MACHINE_GROUP_IDS, [])
use_machine_credential = params.get(Input.USE_MACHINE_CREDENTIAL, False)
max_poll_time = params.get(Input.MAX_POLL_TIME)
if not endpoint_names and not machine_group_ids:
raise PluginException(cause='No hostnames or machine group IDs specified.',
assistance='Either hostnames or machine group IDs must be specified.'
)
if use_machine_credential:
if not endpoint_names:
raise PluginException(cause='Machine credentials can only be set to true if hostname is specified.',
assistance='Either provide a valid hostname or set machine credentials to False.')
payload = {
"credentialId": params.get(Input.CREDENTIAL_ID),
"diagnosticTraceEnabled": params.get(Input.DIAGNOSTIC_TRACE_ENABLED),
"endpointNames": endpoint_names,
"machinegroupIds": machine_group_ids,
"name": params.get(Input.NAME),
"runAsCredentialId": params.get(Input.RUN_AS_CREDENTIAL_ID),
"templateId": params.get(Input.TEMPLATE_ID),
"useMachineCredential": use_machine_credential
}
self.connection.ivanti_api.create_session_credential()
scan = self.connection.ivanti_api.start_patch_scan(payload)
try:
operation_location_url = scan.headers.get("Operation-Location")
polling2.poll(lambda: self.connection.ivanti_api.get_operation_location(operation_location_url)
.get("percentComplete") == 100, step=10, timeout=max_poll_time)
except KeyError as e:
raise PluginException(
cause=f'{e} not found within the header.',
assistance=f'If the issue persists please contact support.')
except polling2.TimeoutException as e:
raise PluginException(
cause='Action timeout.',
assistance=f'This scan has exceeded the maximum poll time of {max_poll_time}.')
operation_location = self.connection.ivanti_api.get_operation_location(operation_location_url)
scan_details = scan.json()
scan_details['isComplete'] = True
scan_details['updatedOn'] = operation_location['lastAction']
return {
Output.SCAN_DETAILS: scan_details
}
| 46.223881 | 120 | 0.656765 | [
"MIT"
] | hashtagcyber/insightconnect-plugins | ivanti_security_controls/icon_ivanti_security_controls/actions/start_patch_scan/action.py | 3,097 | Python |
import configparser
import os
import re
import subprocess
import sys
import time
import utilities_common.cli as clicommon
from urllib.request import urlopen, urlretrieve
import click
from sonic_py_common import logger
from swsscommon.swsscommon import SonicV2Connector
from .bootloader import get_bootloader
from .common import (
run_command, run_command_or_raise,
IMAGE_PREFIX,
UPPERDIR_NAME,
WORKDIR_NAME,
DOCKERDIR_NAME,
)
from .exception import SonicRuntimeException
SYSLOG_IDENTIFIER = "sonic-installer"
LOG_ERR = logger.Logger.LOG_PRIORITY_ERROR
LOG_WARN = logger.Logger.LOG_PRIORITY_WARNING
LOG_NOTICE = logger.Logger.LOG_PRIORITY_NOTICE
# Global Config object
_config = None
# Global logger instance
log = logger.Logger(SYSLOG_IDENTIFIER)
# This is from the aliases example:
# https://github.com/pallets/click/blob/57c6f09611fc47ca80db0bd010f05998b3c0aa95/examples/aliases/aliases.py
class Config(object):
"""Object to hold CLI config"""
def __init__(self):
self.path = os.getcwd()
self.aliases = {}
def read_config(self, filename):
parser = configparser.RawConfigParser()
parser.read([filename])
try:
self.aliases.update(parser.items('aliases'))
except configparser.NoSectionError:
pass
class AliasedGroup(click.Group):
"""This subclass of click.Group supports abbreviations and
looking up aliases in a config file with a bit of magic.
"""
def get_command(self, ctx, cmd_name):
global _config
# If we haven't instantiated our global config, do it now and load current config
if _config is None:
_config = Config()
# Load our config file
cfg_file = os.path.join(os.path.dirname(__file__), 'aliases.ini')
_config.read_config(cfg_file)
# Try to get builtin commands as normal
rv = click.Group.get_command(self, ctx, cmd_name)
if rv is not None:
return rv
# No builtin found. Look up an explicit command alias in the config
if cmd_name in _config.aliases:
actual_cmd = _config.aliases[cmd_name]
return click.Group.get_command(self, ctx, actual_cmd)
# Alternative option: if we did not find an explicit alias we
# allow automatic abbreviation of the command. "status" for
# instance will match "st". We only allow that however if
# there is only one command.
matches = [x for x in self.list_commands(ctx)
if x.lower().startswith(cmd_name.lower())]
if not matches:
return None
elif len(matches) == 1:
return click.Group.get_command(self, ctx, matches[0])
ctx.fail('Too many matches: %s' % ', '.join(sorted(matches)))
#
# Helper functions
#
_start_time = None
_last_time = None
def reporthook(count, block_size, total_size):
global _start_time, _last_time
cur_time = int(time.time())
if count == 0:
_start_time = cur_time
_last_time = cur_time
return
if cur_time == _last_time:
return
_last_time = cur_time
duration = cur_time - _start_time
progress_size = int(count * block_size)
speed = int(progress_size / (1024 * duration))
percent = int(count * block_size * 100 / total_size)
time_left = (total_size - progress_size) / speed / 1024
sys.stdout.write("\r...%d%%, %d MB, %d KB/s, %d seconds left... " %
(percent, progress_size / (1024 * 1024), speed, time_left))
sys.stdout.flush()
# TODO: Embed tag name info into docker image meta data at build time,
# and extract tag name from docker image file.
def get_docker_tag_name(image):
# Try to get tag name from label metadata
cmd = "docker inspect --format '{{.ContainerConfig.Labels.Tag}}' " + image
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True, text=True)
(out, _) = proc.communicate()
if proc.returncode != 0:
return "unknown"
tag = out.rstrip()
if tag == "<no value>":
return "unknown"
return tag
def echo_and_log(msg, priority=LOG_NOTICE, fg=None):
if priority >= LOG_ERR:
# Print to stderr if priority is error
click.secho(msg, fg=fg, err=True)
else:
click.secho(msg, fg=fg)
log.log(priority, msg, False)
# Function which validates whether a given URL specifies an existent file
# on a reachable remote machine. Will abort the current operation if not
def validate_url_or_abort(url):
# Attempt to retrieve HTTP response code
try:
urlfile = urlopen(url)
response_code = urlfile.getcode()
urlfile.close()
except IOError:
response_code = None
if not response_code:
echo_and_log("Did not receive a response from remote machine. Aborting...", LOG_ERR)
raise click.Abort()
else:
# Check for a 4xx response code which indicates a nonexistent URL
if response_code / 100 == 4:
echo_and_log("Image file not found on remote machine. Aborting...", LOG_ERR)
raise click.Abort()
# Callback for confirmation prompt. Aborts if user enters "n"
def abort_if_false(ctx, param, value):
if not value:
ctx.abort()
def get_container_image_name(container_name):
# example image: docker-lldp-sv2:latest
cmd = "docker inspect --format '{{.Config.Image}}' " + container_name
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True, text=True)
(out, _) = proc.communicate()
if proc.returncode != 0:
sys.exit(proc.returncode)
image_latest = out.rstrip()
# example image_name: docker-lldp-sv2
cmd = "echo " + image_latest + " | cut -d ':' -f 1"
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True, text=True)
image_name = proc.stdout.read().rstrip()
return image_name
def get_container_image_id(image_tag):
# TODO: extract commond docker info fetching functions
# this is image_id for image with tag, like 'docker-teamd:latest'
cmd = "docker images --format '{{.ID}}' " + image_tag
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True, text=True)
image_id = proc.stdout.read().rstrip()
return image_id
def get_container_image_id_all(image_name):
# All images id under the image name like 'docker-teamd'
cmd = "docker images --format '{{.ID}}' " + image_name
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True, text=True)
image_id_all = proc.stdout.read()
image_id_all = image_id_all.splitlines()
image_id_all = set(image_id_all)
return image_id_all
def hget_warm_restart_table(db_name, table_name, warm_app_name, key):
db = SonicV2Connector()
db.connect(db_name, False)
_hash = table_name + db.get_db_separator(db_name) + warm_app_name
client = db.get_redis_client(db_name)
return client.hget(_hash, key)
def hdel_warm_restart_table(db_name, table_name, warm_app_name, key):
db = SonicV2Connector()
db.connect(db_name, False)
_hash = table_name + db.get_db_separator(db_name) + warm_app_name
client = db.get_redis_client(db_name)
return client.hdel(_hash, key)
def print_deprecation_warning(deprecated_cmd_or_subcmd, new_cmd_or_subcmd):
click.secho("Warning: '{}' {}command is deprecated and will be removed in the future"
.format(deprecated_cmd_or_subcmd, "" if deprecated_cmd_or_subcmd == "sonic_installer" else "sub"),
fg="red", err=True)
click.secho("Please use '{}' instead".format(new_cmd_or_subcmd), fg="red", err=True)
def mount_squash_fs(squashfs_path, mount_point):
run_command_or_raise(["mkdir", "-p", mount_point])
run_command_or_raise(["mount", "-t", "squashfs", squashfs_path, mount_point])
def umount(mount_point, read_only=True, recursive=False, force=True, remove_dir=True, raise_exception=True):
flags = []
if read_only:
flags.append("-r")
if force:
flags.append("-f")
if recursive:
flags.append("-R")
run_command_or_raise(["umount", *flags, mount_point], raise_exception=raise_exception)
if remove_dir:
run_command_or_raise(["rm", "-rf", mount_point], raise_exception=raise_exception)
def mount_overlay_fs(lowerdir, upperdir, workdir, mount_point):
run_command_or_raise(["mkdir", "-p", mount_point])
overlay_options = "rw,relatime,lowerdir={},upperdir={},workdir={}".format(lowerdir, upperdir, workdir)
run_command_or_raise(["mount", "overlay", "-t", "overlay", "-o", overlay_options, mount_point])
def mount_bind(source, mount_point):
run_command_or_raise(["mkdir", "-p", mount_point])
run_command_or_raise(["mount", "--bind", source, mount_point])
def mount_procfs_chroot(root):
run_command_or_raise(["chroot", root, "mount", "proc", "/proc", "-t", "proc"])
def mount_sysfs_chroot(root):
run_command_or_raise(["chroot", root, "mount", "sysfs", "/sys", "-t", "sysfs"])
def update_sonic_environment(bootloader, binary_image_version):
"""Prepare sonic environment variable using incoming image template file. If incoming image template does not exist
use current image template file.
"""
SONIC_ENV_TEMPLATE_FILE = os.path.join("usr", "share", "sonic", "templates", "sonic-environment.j2")
SONIC_VERSION_YML_FILE = os.path.join("etc", "sonic", "sonic_version.yml")
sonic_version = re.sub(IMAGE_PREFIX, '', binary_image_version)
new_image_dir = bootloader.get_image_path(binary_image_version)
new_image_mount = os.path.join('/', "tmp", "image-{0}-fs".format(sonic_version))
env_dir = os.path.join(new_image_dir, "sonic-config")
env_file = os.path.join(env_dir, "sonic-environment")
with bootloader.get_rootfs_path(new_image_dir) as new_image_squashfs_path:
try:
mount_squash_fs(new_image_squashfs_path, new_image_mount)
next_sonic_env_template_file = os.path.join(new_image_mount, SONIC_ENV_TEMPLATE_FILE)
next_sonic_version_yml_file = os.path.join(new_image_mount, SONIC_VERSION_YML_FILE)
sonic_env = run_command_or_raise([
"sonic-cfggen",
"-d",
"-y",
next_sonic_version_yml_file,
"-t",
next_sonic_env_template_file,
])
os.mkdir(env_dir, 0o755)
with open(env_file, "w+") as ef:
print(sonic_env, file=ef)
os.chmod(env_file, 0o644)
except SonicRuntimeException as ex:
echo_and_log("Warning: SONiC environment variables are not supported for this image: {0}".format(str(ex)), LOG_ERR, fg="red")
if os.path.exists(env_file):
os.remove(env_file)
os.rmdir(env_dir)
finally:
umount(new_image_mount)
def migrate_sonic_packages(bootloader, binary_image_version):
""" Migrate SONiC packages to new SONiC image. """
SONIC_PACKAGE_MANAGER = "sonic-package-manager"
PACKAGE_MANAGER_DIR = "/var/lib/sonic-package-manager/"
DOCKER_CTL_SCRIPT = "/usr/lib/docker/docker.sh"
DOCKERD_SOCK = "docker.sock"
VAR_RUN_PATH = "/var/run/"
tmp_dir = "tmp"
packages_file = "packages.json"
packages_path = os.path.join(PACKAGE_MANAGER_DIR, packages_file)
sonic_version = re.sub(IMAGE_PREFIX, '', binary_image_version)
new_image_dir = bootloader.get_image_path(binary_image_version)
new_image_upper_dir = os.path.join(new_image_dir, UPPERDIR_NAME)
new_image_work_dir = os.path.join(new_image_dir, WORKDIR_NAME)
new_image_docker_dir = os.path.join(new_image_dir, DOCKERDIR_NAME)
new_image_mount = os.path.join("/", tmp_dir, "image-{0}-fs".format(sonic_version))
new_image_docker_mount = os.path.join(new_image_mount, "var", "lib", "docker")
if not os.path.isdir(new_image_docker_dir):
# NOTE: This codepath can be reached if the installation process did not
# extract the default dockerfs. This can happen with docker_inram
# though the bootloader class should have disabled the package
# migration which is why this message is a non fatal error message.
echo_and_log("Error: SONiC package migration cannot proceed due to missing docker folder", LOG_ERR, fg="red")
return
docker_started = False
with bootloader.get_rootfs_path(new_image_dir) as new_image_squashfs_path:
try:
mount_squash_fs(new_image_squashfs_path, new_image_mount)
# make sure upper dir and work dir exist
run_command_or_raise(["mkdir", "-p", new_image_upper_dir])
run_command_or_raise(["mkdir", "-p", new_image_work_dir])
mount_overlay_fs(new_image_mount, new_image_upper_dir, new_image_work_dir, new_image_mount)
mount_bind(new_image_docker_dir, new_image_docker_mount)
mount_procfs_chroot(new_image_mount)
mount_sysfs_chroot(new_image_mount)
# Assume if docker.sh script exists we are installing Application Extension compatible image.
if not os.path.exists(os.path.join(new_image_mount, os.path.relpath(DOCKER_CTL_SCRIPT, os.path.abspath(os.sep)))):
echo_and_log("Warning: SONiC Application Extension is not supported in this image", LOG_WARN, fg="yellow")
return
run_command_or_raise(["chroot", new_image_mount, DOCKER_CTL_SCRIPT, "start"])
docker_started = True
run_command_or_raise(["cp", packages_path, os.path.join(new_image_mount, tmp_dir, packages_file)])
run_command_or_raise(["touch", os.path.join(new_image_mount, "tmp", DOCKERD_SOCK)])
run_command_or_raise(["mount", "--bind",
os.path.join(VAR_RUN_PATH, DOCKERD_SOCK),
os.path.join(new_image_mount, "tmp", DOCKERD_SOCK)])
run_command_or_raise(["chroot", new_image_mount, "sh", "-c", "command -v {}".format(SONIC_PACKAGE_MANAGER)])
run_command_or_raise(["chroot", new_image_mount, SONIC_PACKAGE_MANAGER, "migrate",
os.path.join("/", tmp_dir, packages_file),
"--dockerd-socket", os.path.join("/", tmp_dir, DOCKERD_SOCK),
"-y"])
finally:
if docker_started:
run_command_or_raise(["chroot", new_image_mount, DOCKER_CTL_SCRIPT, "stop"], raise_exception=False)
umount(new_image_mount, recursive=True, read_only=False, remove_dir=False, raise_exception=False)
umount(new_image_mount, raise_exception=False)
class SWAPAllocator(object):
"""Context class to allocate SWAP memory."""
SWAP_MEM_SIZE = 1024
DISK_FREESPACE_THRESHOLD = 4 * 1024
TOTAL_MEM_THRESHOLD = 2048
AVAILABLE_MEM_THRESHOLD = 1200
SWAP_FILE_PATH = '/host/swapfile'
KiB_TO_BYTES_FACTOR = 1024
MiB_TO_BYTES_FACTOR = 1024 * 1024
def __init__(self, allocate, swap_mem_size=None, total_mem_threshold=None, available_mem_threshold=None):
"""
Initialize the SWAP memory allocator.
The allocator will try to setup SWAP memory only if all the below conditions are met:
- allocate evaluates to True
- disk has enough space(> DISK_MEM_THRESHOLD)
- either system total memory < total_mem_threshold or system available memory < available_mem_threshold
@param allocate: True to allocate SWAP memory if necessarry
@param swap_mem_size: the size of SWAP memory to allocate(in MiB)
@param total_mem_threshold: the system totla memory threshold(in MiB)
@param available_mem_threshold: the system available memory threshold(in MiB)
"""
self.allocate = allocate
self.swap_mem_size = SWAPAllocator.SWAP_MEM_SIZE if swap_mem_size is None else swap_mem_size
self.total_mem_threshold = SWAPAllocator.TOTAL_MEM_THRESHOLD if total_mem_threshold is None else total_mem_threshold
self.available_mem_threshold = SWAPAllocator.AVAILABLE_MEM_THRESHOLD if available_mem_threshold is None else available_mem_threshold
self.is_allocated = False
@staticmethod
def get_disk_freespace(path):
"""Return free disk space in bytes."""
fs_stats = os.statvfs(path)
return fs_stats.f_bsize * fs_stats.f_bavail
@staticmethod
def read_from_meminfo():
"""Read information from /proc/meminfo."""
meminfo = {}
with open("/proc/meminfo") as fd:
for line in fd.readlines():
if line:
fields = line.split()
if len(fields) >= 2 and fields[1].isdigit():
meminfo[fields[0].rstrip(":")] = int(fields[1])
return meminfo
def setup_swapmem(self):
swapfile = SWAPAllocator.SWAP_FILE_PATH
with open(swapfile, 'wb') as fd:
os.posix_fallocate(fd.fileno(), 0, self.swap_mem_size * SWAPAllocator.MiB_TO_BYTES_FACTOR)
os.chmod(swapfile, 0o600)
run_command(f'mkswap {swapfile}; swapon {swapfile}')
def remove_swapmem(self):
swapfile = SWAPAllocator.SWAP_FILE_PATH
run_command_or_raise(['swapoff', swapfile], raise_exception=False)
try:
os.unlink(swapfile)
finally:
pass
def __enter__(self):
if self.allocate:
if self.get_disk_freespace('/host') < max(SWAPAllocator.DISK_FREESPACE_THRESHOLD, self.swap_mem_size) * SWAPAllocator.MiB_TO_BYTES_FACTOR:
echo_and_log("Failed to setup SWAP memory due to insufficient disk free space...", LOG_ERR)
return
meminfo = self.read_from_meminfo()
mem_total_in_bytes = meminfo["MemTotal"] * SWAPAllocator.KiB_TO_BYTES_FACTOR
mem_avail_in_bytes = meminfo["MemAvailable"] * SWAPAllocator.KiB_TO_BYTES_FACTOR
if (mem_total_in_bytes < self.total_mem_threshold * SWAPAllocator.MiB_TO_BYTES_FACTOR
or mem_avail_in_bytes < self.available_mem_threshold * SWAPAllocator.MiB_TO_BYTES_FACTOR):
echo_and_log("Setup SWAP memory")
swapfile = SWAPAllocator.SWAP_FILE_PATH
if os.path.exists(swapfile):
self.remove_swapmem()
try:
self.setup_swapmem()
except Exception:
self.remove_swapmem()
raise
self.is_allocated = True
def __exit__(self, *exc_info):
if self.is_allocated:
self.remove_swapmem()
def validate_positive_int(ctx, param, value):
"""Callback to validate param passed is a positive integer."""
if isinstance(value, int) and value > 0:
return value
raise click.BadParameter("Must be a positive integer")
# Main entrypoint
@click.group(cls=AliasedGroup)
def sonic_installer():
""" SONiC image installation manager """
if os.geteuid() != 0:
exit("Root privileges required for this operation")
# Warn the user if they are calling the deprecated version of the command (with an underscore instead of a hyphen)
if os.path.basename(sys.argv[0]) == "sonic_installer":
print_deprecation_warning("sonic_installer", "sonic-installer")
# Install image
@sonic_installer.command('install')
@click.option('-y', '--yes', is_flag=True, callback=abort_if_false,
expose_value=False, prompt='New image will be installed, continue?')
@click.option('-f', '--force', is_flag=True,
help="Force installation of an image of a type which differs from that of the current running image")
@click.option('--skip_migration', is_flag=True,
help="Do not migrate current configuration to the newly installed image")
@click.option('--skip-package-migration', is_flag=True,
help="Do not migrate current packages to the newly installed image")
@click.option('--skip-setup-swap', is_flag=True,
help='Skip setup temporary SWAP memory used for installation')
@click.option('--swap-mem-size', default=1024, type=int, show_default='1024 MiB',
help='SWAP memory space size', callback=validate_positive_int,
cls=clicommon.MutuallyExclusiveOption, mutually_exclusive=['skip_setup_swap'])
@click.option('--total-mem-threshold', default=2048, type=int, show_default='2048 MiB',
help='If system total memory is lower than threshold, setup SWAP memory',
cls=clicommon.MutuallyExclusiveOption, mutually_exclusive=['skip_setup_swap'],
callback=validate_positive_int)
@click.option('--available-mem-threshold', default=1200, type=int, show_default='1200 MiB',
help='If system available memory is lower than threhold, setup SWAP memory',
cls=clicommon.MutuallyExclusiveOption, mutually_exclusive=['skip_setup_swap'],
callback=validate_positive_int)
@click.argument('url')
def install(url, force, skip_migration=False, skip_package_migration=False,
skip_setup_swap=False, swap_mem_size=None, total_mem_threshold=None, available_mem_threshold=None):
""" Install image from local binary or URL"""
bootloader = get_bootloader()
if url.startswith('http://') or url.startswith('https://'):
echo_and_log('Downloading image...')
validate_url_or_abort(url)
try:
urlretrieve(url, bootloader.DEFAULT_IMAGE_PATH, reporthook)
click.echo('')
except Exception as e:
echo_and_log("Download error", e)
raise click.Abort()
image_path = bootloader.DEFAULT_IMAGE_PATH
else:
image_path = os.path.join("./", url)
binary_image_version = bootloader.get_binary_image_version(image_path)
if not binary_image_version:
echo_and_log("Image file does not exist or is not a valid SONiC image file", LOG_ERR)
raise click.Abort()
# Is this version already installed?
if binary_image_version in bootloader.get_installed_images():
echo_and_log("Image {} is already installed. Setting it as default...".format(binary_image_version))
if not bootloader.set_default_image(binary_image_version):
echo_and_log('Error: Failed to set image as default', LOG_ERR)
raise click.Abort()
else:
# Verify that the binary image is of the same type as the running image
if not bootloader.verify_binary_image(image_path) and not force:
echo_and_log("Image file '{}' is of a different type than running image.\n".format(url) +
"If you are sure you want to install this image, use -f|--force.\n" +
"Aborting...", LOG_ERR)
raise click.Abort()
echo_and_log("Installing image {} and setting it as default...".format(binary_image_version))
with SWAPAllocator(not skip_setup_swap, swap_mem_size, total_mem_threshold, available_mem_threshold):
bootloader.install_image(image_path)
# Take a backup of current configuration
if skip_migration:
echo_and_log("Skipping configuration migration as requested in the command option.")
else:
run_command('config-setup backup')
update_sonic_environment(bootloader, binary_image_version)
if not bootloader.supports_package_migration(binary_image_version) and not skip_package_migration:
echo_and_log("Warning: SONiC package migration is not supported for this bootloader/image", fg="yellow")
skip_package_migration = True
if not skip_package_migration:
migrate_sonic_packages(bootloader, binary_image_version)
# Finally, sync filesystem
run_command("sync;sync;sync")
run_command("sleep 3") # wait 3 seconds after sync
echo_and_log('Done')
# List installed images
@sonic_installer.command('list')
def list_command():
""" Print installed images """
bootloader = get_bootloader()
images = bootloader.get_installed_images()
curimage = bootloader.get_current_image()
nextimage = bootloader.get_next_image()
click.echo("Current: " + curimage)
click.echo("Next: " + nextimage)
click.echo("Available: ")
for image in images:
click.echo(image)
# Set default image for boot
@sonic_installer.command('set-default')
@click.argument('image')
def set_default(image):
""" Choose image to boot from by default """
# Warn the user if they are calling the deprecated version of the subcommand (with an underscore instead of a hyphen)
if "set_default" in sys.argv:
print_deprecation_warning("set_default", "set-default")
bootloader = get_bootloader()
if image not in bootloader.get_installed_images():
echo_and_log('Error: Image does not exist', LOG_ERR)
raise click.Abort()
bootloader.set_default_image(image)
# Set image for next boot
@sonic_installer.command('set-next-boot')
@click.argument('image')
def set_next_boot(image):
""" Choose image for next reboot (one time action) """
# Warn the user if they are calling the deprecated version of the subcommand (with underscores instead of hyphens)
if "set_next_boot" in sys.argv:
print_deprecation_warning("set_next_boot", "set-next-boot")
bootloader = get_bootloader()
if image not in bootloader.get_installed_images():
echo_and_log('Error: Image does not exist', LOG_ERR)
sys.exit(1)
bootloader.set_next_image(image)
# Uninstall image
@sonic_installer.command('remove')
@click.option('-y', '--yes', is_flag=True, callback=abort_if_false,
expose_value=False, prompt='Image will be removed, continue?')
@click.argument('image')
def remove(image):
""" Uninstall image """
bootloader = get_bootloader()
images = bootloader.get_installed_images()
current = bootloader.get_current_image()
if image not in images:
echo_and_log('Image does not exist', LOG_ERR)
sys.exit(1)
if image == current:
echo_and_log('Cannot remove current image', LOG_ERR)
sys.exit(1)
# TODO: check if image is next boot or default boot and fix these
bootloader.remove_image(image)
# Retrieve version from binary image file and print to screen
@sonic_installer.command('binary-version')
@click.argument('binary_image_path')
def binary_version(binary_image_path):
""" Get version from local binary image file """
# Warn the user if they are calling the deprecated version of the subcommand (with an underscore instead of a hyphen)
if "binary_version" in sys.argv:
print_deprecation_warning("binary_version", "binary-version")
bootloader = get_bootloader()
version = bootloader.get_binary_image_version(binary_image_path)
if not version:
click.echo("Image file does not exist or is not a valid SONiC image file")
sys.exit(1)
else:
click.echo(version)
# Remove installed images which are not current and next
@sonic_installer.command('cleanup')
@click.option('-y', '--yes', is_flag=True, callback=abort_if_false,
expose_value=False, prompt='Remove images which are not current and next, continue?')
def cleanup():
""" Remove installed images which are not current and next """
bootloader = get_bootloader()
images = bootloader.get_installed_images()
curimage = bootloader.get_current_image()
nextimage = bootloader.get_next_image()
image_removed = 0
for image in images:
if image != curimage and image != nextimage:
echo_and_log("Removing image %s" % image)
bootloader.remove_image(image)
image_removed += 1
if image_removed == 0:
echo_and_log("No image(s) to remove")
DOCKER_CONTAINER_LIST = [
"bgp",
"dhcp_relay",
"lldp",
"macsec",
"nat",
"pmon",
"radv",
"restapi",
"sflow",
"snmp",
"swss",
"syncd",
"teamd",
"telemetry"
]
# Upgrade docker image
@sonic_installer.command('upgrade-docker')
@click.option('-y', '--yes', is_flag=True, callback=abort_if_false,
expose_value=False, prompt='New docker image will be installed, continue?')
@click.option('--cleanup_image', is_flag=True, help="Clean up old docker image")
@click.option('--skip_check', is_flag=True, help="Skip task check for docker upgrade")
@click.option('--tag', type=str, help="Tag for the new docker image")
@click.option('--warm', is_flag=True, help="Perform warm upgrade")
@click.argument('container_name', metavar='<container_name>', required=True,
type=click.Choice(DOCKER_CONTAINER_LIST))
@click.argument('url')
def upgrade_docker(container_name, url, cleanup_image, skip_check, tag, warm):
""" Upgrade docker image from local binary or URL"""
# Warn the user if they are calling the deprecated version of the subcommand (with an underscore instead of a hyphen)
if "upgrade_docker" in sys.argv:
print_deprecation_warning("upgrade_docker", "upgrade-docker")
image_name = get_container_image_name(container_name)
image_latest = image_name + ":latest"
image_id_previous = get_container_image_id(image_latest)
DEFAULT_IMAGE_PATH = os.path.join("/tmp/", image_name)
if url.startswith('http://') or url.startswith('https://'):
echo_and_log('Downloading image...')
validate_url_or_abort(url)
try:
urlretrieve(url, DEFAULT_IMAGE_PATH, reporthook)
except Exception as e:
echo_and_log("Download error: {}".format(e), LOG_ERR)
raise click.Abort()
image_path = DEFAULT_IMAGE_PATH
else:
image_path = os.path.join("./", url)
# Verify that the local file exists and is a regular file
# TODO: Verify the file is a *proper Docker image file*
if not os.path.isfile(image_path):
echo_and_log("Image file '{}' does not exist or is not a regular file. Aborting...".format(image_path), LOG_ERR)
raise click.Abort()
warm_configured = False
# warm restart enable/disable config is put in stateDB, not persistent across cold reboot, not saved to config_DB.json file
state_db = SonicV2Connector(host='127.0.0.1')
state_db.connect(state_db.STATE_DB, False)
TABLE_NAME_SEPARATOR = '|'
prefix = 'WARM_RESTART_ENABLE_TABLE' + TABLE_NAME_SEPARATOR
_hash = '{}{}'.format(prefix, container_name)
if state_db.get(state_db.STATE_DB, _hash, "enable") == "true":
warm_configured = True
state_db.close(state_db.STATE_DB)
if container_name == "swss" or container_name == "bgp" or container_name == "teamd":
if warm_configured is False and warm:
run_command("config warm_restart enable %s" % container_name)
# Fetch tag of current running image
tag_previous = get_docker_tag_name(image_latest)
# Load the new image beforehand to shorten disruption time
run_command("docker load < %s" % image_path)
warm_app_names = []
# warm restart specific procssing for swss, bgp and teamd dockers.
if warm_configured is True or warm:
# make sure orchagent is in clean state if swss is to be upgraded
if container_name == "swss":
skipPendingTaskCheck = ""
if skip_check:
skipPendingTaskCheck = " -s"
cmd = "docker exec -i swss orchagent_restart_check -w 2000 -r 5 " + skipPendingTaskCheck
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True, text=True)
(out, err) = proc.communicate()
if proc.returncode != 0:
if not skip_check:
echo_and_log("Orchagent is not in clean state, RESTARTCHECK failed", LOG_ERR)
# Restore orignal config before exit
if warm_configured is False and warm:
run_command("config warm_restart disable %s" % container_name)
# Clean the image loaded earlier
image_id_latest = get_container_image_id(image_latest)
run_command("docker rmi -f %s" % image_id_latest)
# Re-point latest tag to previous tag
run_command("docker tag %s:%s %s" % (image_name, tag_previous, image_latest))
sys.exit(proc.returncode)
else:
echo_and_log("Orchagent is not in clean state, upgrading it anyway")
else:
echo_and_log("Orchagent is in clean state and frozen for warm upgrade")
warm_app_names = ["orchagent", "neighsyncd"]
elif container_name == "bgp":
# Kill bgpd to restart the bgp graceful restart procedure
echo_and_log("Stopping bgp ...")
run_command("docker exec -i bgp pkill -9 zebra")
run_command("docker exec -i bgp pkill -9 bgpd")
warm_app_names = ["bgp"]
echo_and_log("Stopped bgp ...")
elif container_name == "teamd":
echo_and_log("Stopping teamd ...")
# Send USR1 signal to all teamd instances to stop them
# It will prepare teamd for warm-reboot
run_command("docker exec -i teamd pkill -USR1 teamd > /dev/null")
warm_app_names = ["teamsyncd"]
echo_and_log("Stopped teamd ...")
# clean app reconcilation state from last warm start if exists
for warm_app_name in warm_app_names:
hdel_warm_restart_table("STATE_DB", "WARM_RESTART_TABLE", warm_app_name, "state")
run_command("docker kill %s > /dev/null" % container_name)
run_command("docker rm %s " % container_name)
if tag is None:
# example image: docker-lldp-sv2:latest
tag = get_docker_tag_name(image_latest)
run_command("docker tag %s:latest %s:%s" % (image_name, image_name, tag))
run_command("systemctl restart %s" % container_name)
# All images id under the image name
image_id_all = get_container_image_id_all(image_name)
# this is image_id for image with "latest" tag
image_id_latest = get_container_image_id(image_latest)
for id in image_id_all:
if id != image_id_latest:
# Unless requested, the previoud docker image will be preserved
if not cleanup_image and id == image_id_previous:
continue
run_command("docker rmi -f %s" % id)
exp_state = "reconciled"
state = ""
# post warm restart specific procssing for swss, bgp and teamd dockers, wait for reconciliation state.
if warm_configured is True or warm:
count = 0
for warm_app_name in warm_app_names:
state = ""
# Wait up to 180 seconds for reconciled state
while state != exp_state and count < 90:
sys.stdout.write("\r {}: ".format(warm_app_name))
sys.stdout.write("[%-s" % ('='*count))
sys.stdout.flush()
count += 1
time.sleep(2)
state = hget_warm_restart_table("STATE_DB", "WARM_RESTART_TABLE", warm_app_name, "state")
log.log_notice("%s reached %s state" % (warm_app_name, state))
sys.stdout.write("]\n\r")
if state != exp_state:
echo_and_log("%s failed to reach %s state" % (warm_app_name, exp_state), LOG_ERR)
else:
exp_state = "" # this is cold upgrade
# Restore to previous cold restart setting
if warm_configured is False and warm:
if container_name == "swss" or container_name == "bgp" or container_name == "teamd":
run_command("config warm_restart disable %s" % container_name)
if state == exp_state:
echo_and_log('Done')
else:
echo_and_log('Failed', LOG_ERR)
sys.exit(1)
# rollback docker image
@sonic_installer.command('rollback-docker')
@click.option('-y', '--yes', is_flag=True, callback=abort_if_false,
expose_value=False, prompt='Docker image will be rolled back, continue?')
@click.argument('container_name', metavar='<container_name>', required=True,
type=click.Choice(DOCKER_CONTAINER_LIST))
def rollback_docker(container_name):
""" Rollback docker image to previous version"""
# Warn the user if they are calling the deprecated version of the subcommand (with an underscore instead of a hyphen)
if "rollback_docker" in sys.argv:
print_deprecation_warning("rollback_docker", "rollback-docker")
image_name = get_container_image_name(container_name)
# All images id under the image name
image_id_all = get_container_image_id_all(image_name)
if len(image_id_all) != 2:
echo_and_log("Two images required, but there are '{}' images for '{}'. Aborting...".format(len(image_id_all), image_name), LOG_ERR)
raise click.Abort()
image_latest = image_name + ":latest"
image_id_previous = get_container_image_id(image_latest)
version_tag = ""
for id in image_id_all:
if id != image_id_previous:
version_tag = get_docker_tag_name(id)
# make previous image as latest
run_command("docker tag %s:%s %s:latest" % (image_name, version_tag, image_name))
if container_name == "swss" or container_name == "bgp" or container_name == "teamd":
echo_and_log("Cold reboot is required to restore system state after '{}' rollback !!".format(container_name), LOG_ERR)
else:
run_command("systemctl restart %s" % container_name)
echo_and_log('Done')
# verify the next image
@sonic_installer.command('verify-next-image')
def verify_next_image():
""" Verify the next image for reboot"""
bootloader = get_bootloader()
if not bootloader.verify_next_image():
echo_and_log('Image verification failed', LOG_ERR)
sys.exit(1)
click.echo('Image successfully verified')
if __name__ == '__main__':
sonic_installer()
| 42.141907 | 150 | 0.669631 | [
"Apache-2.0"
] | Cosmin-Jinga-MS/sonic-utilities | sonic_installer/main.py | 38,012 | Python |
#!/usr/bin/python3
# encoding: utf-8
# Setup file for dulwich
# Copyright (C) 2008-2016 Jelmer Vernooij <[email protected]>
try:
from setuptools import setup, Extension
except ImportError:
from distutils.core import setup, Extension
has_setuptools = False
else:
has_setuptools = True
from distutils.core import Distribution
import io
import os
import sys
from typing import Dict, Any
if sys.version_info < (3, 6):
raise Exception(
'Dulwich only supports Python 3.6 and later. '
'For 2.7 support, please install a version prior to 0.20')
dulwich_version_string = '0.20.32'
class DulwichDistribution(Distribution):
def is_pure(self):
if self.pure:
return True
def has_ext_modules(self):
return not self.pure
global_options = Distribution.global_options + [
('pure', None, "use pure Python code instead of C "
"extensions (slower on CPython)")]
pure = False
if sys.platform == 'darwin' and os.path.exists('/usr/bin/xcodebuild'):
# XCode 4.0 dropped support for ppc architecture, which is hardcoded in
# distutils.sysconfig
import subprocess
p = subprocess.Popen(
['/usr/bin/xcodebuild', '-version'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env={})
out, err = p.communicate()
for line in out.splitlines():
line = line.decode("utf8")
# Also parse only first digit, because 3.2.1 can't be parsed nicely
if (line.startswith('Xcode') and
int(line.split()[1].split('.')[0]) >= 4):
os.environ['ARCHFLAGS'] = ''
tests_require = ['fastimport']
if '__pypy__' not in sys.modules and sys.platform != 'win32':
tests_require.extend([
'gevent', 'geventhttpclient', 'setuptools>=17.1'])
ext_modules = [
Extension('dulwich._objects', ['dulwich/_objects.c']),
Extension('dulwich._pack', ['dulwich/_pack.c']),
Extension('dulwich._diff_tree', ['dulwich/_diff_tree.c']),
]
setup_kwargs = {} # type: Dict[str, Any]
scripts = ['bin/dul-receive-pack', 'bin/dul-upload-pack']
if has_setuptools:
setup_kwargs['extras_require'] = {
'fastimport': ['fastimport'],
'https': ['urllib3[secure]>=1.24.1'],
'pgp': ['gpg'],
'watch': ['pyinotify'],
}
setup_kwargs['install_requires'] = ['urllib3>=1.24.1', 'certifi']
setup_kwargs['include_package_data'] = True
setup_kwargs['test_suite'] = 'dulwich.tests.test_suite'
setup_kwargs['tests_require'] = tests_require
setup_kwargs['entry_points'] = {
"console_scripts": [
"dulwich=dulwich.cli:main",
]}
setup_kwargs['python_requires'] = '>=3.6'
else:
scripts.append('bin/dulwich')
with io.open(os.path.join(os.path.dirname(__file__), "README.rst"),
encoding="utf-8") as f:
description = f.read()
setup(name='dulwich',
author="Jelmer Vernooij",
author_email="[email protected]",
url="https://www.dulwich.io/",
long_description=description,
description="Python Git Library",
version=dulwich_version_string,
license='Apachev2 or later or GPLv2',
project_urls={
"Bug Tracker": "https://github.com/dulwich/dulwich/issues",
"Repository": "https://www.dulwich.io/code/",
"GitHub": "https://github.com/dulwich/dulwich",
},
keywords="git vcs",
packages=['dulwich', 'dulwich.tests', 'dulwich.tests.compat',
'dulwich.contrib'],
package_data={'': ['../docs/tutorial/*.txt', 'py.typed']},
scripts=scripts,
ext_modules=ext_modules,
zip_safe=False,
distclass=DulwichDistribution,
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Topic :: Software Development :: Version Control',
],
**setup_kwargs
)
| 32.153285 | 75 | 0.620204 | [
"Apache-2.0"
] | epopcop/dulwich | setup.py | 4,406 | Python |
import ctypes
import logging
import threading
import time
from contextlib import contextmanager
from queue import Queue
from typing import Any, Callable, Dict, Generator, Iterable, List, Optional, Tuple, Union, cast
import attr
import hypothesis
import hypothesis.errors
import requests
from _pytest.logging import LogCaptureHandler, catching_logs
from requests.auth import HTTPDigestAuth, _basic_auth_str
from .._hypothesis import make_test_or_exception
from ..checks import DEFAULT_CHECKS
from ..constants import USER_AGENT
from ..exceptions import InvalidSchema
from ..loaders import from_uri
from ..models import Case, Endpoint, Status, TestResult, TestResultSet
from ..schemas import BaseSchema
from ..utils import WSGIResponse, capture_hypothesis_output, get_base_url
from . import events
DEFAULT_DEADLINE = 500 # pragma: no mutate
RawAuth = Tuple[str, str] # pragma: no mutate
def get_hypothesis_settings(hypothesis_options: Optional[Dict[str, Any]] = None) -> hypothesis.settings:
# Default settings, used as a parent settings object below
settings = hypothesis.settings(deadline=DEFAULT_DEADLINE)
if hypothesis_options is not None:
settings = hypothesis.settings(settings, **hypothesis_options)
return settings
# pylint: disable=too-many-instance-attributes
@attr.s
class BaseRunner:
schema: BaseSchema = attr.ib()
checks: Iterable[Callable] = attr.ib()
hypothesis_settings: hypothesis.settings = attr.ib(converter=get_hypothesis_settings)
auth: Optional[RawAuth] = attr.ib(default=None)
auth_type: Optional[str] = attr.ib(default=None)
headers: Optional[Dict[str, Any]] = attr.ib(default=None)
request_timeout: Optional[int] = attr.ib(default=None)
seed: Optional[int] = attr.ib(default=None)
def execute(self,) -> Generator[events.ExecutionEvent, None, None]:
"""Common logic for all runners."""
results = TestResultSet()
initialized = events.Initialized(
results=results, schema=self.schema, checks=self.checks, hypothesis_settings=self.hypothesis_settings
)
yield initialized
yield from self._execute(results)
yield events.Finished(results=results, schema=self.schema, running_time=time.time() - initialized.start_time)
def _execute(self, results: TestResultSet) -> Generator[events.ExecutionEvent, None, None]:
raise NotImplementedError
@attr.s(slots=True)
class SingleThreadRunner(BaseRunner):
"""Fast runner that runs tests sequentially in the main thread."""
def _execute(self, results: TestResultSet) -> Generator[events.ExecutionEvent, None, None]:
auth = get_requests_auth(self.auth, self.auth_type)
with get_session(auth, self.headers) as session:
for endpoint, test in self.schema.get_all_tests(network_test, self.hypothesis_settings, self.seed):
for event in run_test(
self.schema,
endpoint,
test,
self.checks,
results,
session=session,
request_timeout=self.request_timeout,
):
yield event
if isinstance(event, events.Interrupted):
return
@attr.s(slots=True)
class SingleThreadWSGIRunner(SingleThreadRunner):
def _execute(self, results: TestResultSet) -> Generator[events.ExecutionEvent, None, None]:
for endpoint, test in self.schema.get_all_tests(wsgi_test, self.hypothesis_settings, self.seed):
for event in run_test(
self.schema,
endpoint,
test,
self.checks,
results,
auth=self.auth,
auth_type=self.auth_type,
headers=self.headers,
):
yield event
if isinstance(event, events.Interrupted):
return
def _run_task(
test_template: Callable,
tasks_queue: Queue,
events_queue: Queue,
schema: BaseSchema,
checks: Iterable[Callable],
settings: hypothesis.settings,
seed: Optional[int],
results: TestResultSet,
**kwargs: Any,
) -> None:
# pylint: disable=too-many-arguments
with capture_hypothesis_output():
while not tasks_queue.empty():
endpoint = tasks_queue.get()
test = make_test_or_exception(endpoint, test_template, settings, seed)
for event in run_test(schema, endpoint, test, checks, results, **kwargs):
events_queue.put(event)
def thread_task(
tasks_queue: Queue,
events_queue: Queue,
schema: BaseSchema,
checks: Iterable[Callable],
settings: hypothesis.settings,
auth: Optional[RawAuth],
auth_type: Optional[str],
headers: Optional[Dict[str, Any]],
seed: Optional[int],
results: TestResultSet,
kwargs: Any,
) -> None:
"""A single task, that threads do.
Pretty similar to the default one-thread flow, but includes communication with the main thread via the events queue.
"""
# pylint: disable=too-many-arguments
prepared_auth = get_requests_auth(auth, auth_type)
with get_session(prepared_auth, headers) as session:
_run_task(
network_test, tasks_queue, events_queue, schema, checks, settings, seed, results, session=session, **kwargs
)
def wsgi_thread_task(
tasks_queue: Queue,
events_queue: Queue,
schema: BaseSchema,
checks: Iterable[Callable],
settings: hypothesis.settings,
seed: Optional[int],
results: TestResultSet,
kwargs: Any,
) -> None:
# pylint: disable=too-many-arguments
_run_task(wsgi_test, tasks_queue, events_queue, schema, checks, settings, seed, results, **kwargs)
def stop_worker(thread_id: int) -> None:
"""Raise an error in a thread so it is possible to asynchronously stop thread execution."""
ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(thread_id), ctypes.py_object(SystemExit))
class ThreadInterrupted(Exception):
"""Special exception when worker thread received SIGINT."""
@attr.s(slots=True)
class ThreadPoolRunner(BaseRunner):
"""Spread different tests among multiple worker threads."""
workers_num: int = attr.ib(default=2)
def _execute(self, results: TestResultSet) -> Generator[events.ExecutionEvent, None, None]:
"""All events come from a queue where different workers push their events."""
tasks_queue = self._get_tasks_queue()
# Events are pushed by workers via a separate queue
events_queue: Queue = Queue()
workers = self._init_workers(tasks_queue, events_queue, results)
def stop_workers() -> None:
for worker in workers:
# workers are initialized at this point and `worker.ident` is set with an integer value
ident = cast(int, worker.ident)
stop_worker(ident)
worker.join()
is_finished = False
try:
while not is_finished:
# Sleep is needed for performance reasons
# each call to `is_alive` of an alive worker waits for a lock
# iterations without waiting are too frequent and a lot of time will be spent on waiting for this locks
time.sleep(0.001)
is_finished = all(not worker.is_alive() for worker in workers)
while not events_queue.empty():
event = events_queue.get()
yield event
if isinstance(event, events.Interrupted):
# Thread received SIGINT
# We could still have events in the queue, but ignore them to keep the logic simple
# for now, could be improved in the future to show more info in such corner cases
raise ThreadInterrupted
except ThreadInterrupted:
stop_workers()
except KeyboardInterrupt:
stop_workers()
yield events.Interrupted(results=results, schema=self.schema)
def _get_tasks_queue(self) -> Queue:
"""All endpoints are distributed among all workers via a queue."""
tasks_queue: Queue = Queue()
tasks_queue.queue.extend(self.schema.get_all_endpoints())
return tasks_queue
def _init_workers(self, tasks_queue: Queue, events_queue: Queue, results: TestResultSet) -> List[threading.Thread]:
"""Initialize & start workers that will execute tests."""
workers = [
threading.Thread(
target=self._get_task(), kwargs=self._get_worker_kwargs(tasks_queue, events_queue, results)
)
for _ in range(self.workers_num)
]
for worker in workers:
worker.start()
return workers
def _get_task(self) -> Callable:
return thread_task
def _get_worker_kwargs(self, tasks_queue: Queue, events_queue: Queue, results: TestResultSet) -> Dict[str, Any]:
return {
"tasks_queue": tasks_queue,
"events_queue": events_queue,
"schema": self.schema,
"checks": self.checks,
"settings": self.hypothesis_settings,
"auth": self.auth,
"auth_type": self.auth_type,
"headers": self.headers,
"seed": self.seed,
"results": results,
"kwargs": {"request_timeout": self.request_timeout},
}
class ThreadPoolWSGIRunner(ThreadPoolRunner):
def _get_task(self) -> Callable:
return wsgi_thread_task
def _get_worker_kwargs(self, tasks_queue: Queue, events_queue: Queue, results: TestResultSet) -> Dict[str, Any]:
return {
"tasks_queue": tasks_queue,
"events_queue": events_queue,
"schema": self.schema,
"checks": self.checks,
"settings": self.hypothesis_settings,
"seed": self.seed,
"results": results,
"kwargs": {"auth": self.auth, "auth_type": self.auth_type, "headers": self.headers},
}
def execute_from_schema(
schema: BaseSchema,
checks: Iterable[Callable],
*,
workers_num: int = 1,
hypothesis_options: Optional[Dict[str, Any]] = None,
auth: Optional[RawAuth] = None,
auth_type: Optional[str] = None,
headers: Optional[Dict[str, Any]] = None,
request_timeout: Optional[int] = None,
seed: Optional[int] = None,
) -> Generator[events.ExecutionEvent, None, None]:
"""Execute tests for the given schema.
Provides the main testing loop and preparation step.
"""
runner: BaseRunner
if workers_num > 1:
if schema.app:
runner = ThreadPoolWSGIRunner(
schema=schema,
checks=checks,
hypothesis_settings=hypothesis_options,
auth=auth,
auth_type=auth_type,
headers=headers,
seed=seed,
workers_num=workers_num,
)
else:
runner = ThreadPoolRunner(
schema=schema,
checks=checks,
hypothesis_settings=hypothesis_options,
auth=auth,
auth_type=auth_type,
headers=headers,
seed=seed,
request_timeout=request_timeout,
)
else:
if schema.app:
runner = SingleThreadWSGIRunner(
schema=schema,
checks=checks,
hypothesis_settings=hypothesis_options,
auth=auth,
auth_type=auth_type,
headers=headers,
seed=seed,
)
else:
runner = SingleThreadRunner(
schema=schema,
checks=checks,
hypothesis_settings=hypothesis_options,
auth=auth,
auth_type=auth_type,
headers=headers,
seed=seed,
request_timeout=request_timeout,
)
yield from runner.execute()
def run_test(
schema: BaseSchema,
endpoint: Endpoint,
test: Union[Callable, InvalidSchema],
checks: Iterable[Callable],
results: TestResultSet,
**kwargs: Any,
) -> Generator[events.ExecutionEvent, None, None]:
"""A single test run with all error handling needed."""
# pylint: disable=too-many-arguments
result = TestResult(endpoint=endpoint)
yield events.BeforeExecution(results=results, schema=schema, endpoint=endpoint)
hypothesis_output: List[str] = []
try:
if isinstance(test, InvalidSchema):
status = Status.error
result.add_error(test)
else:
with capture_hypothesis_output() as hypothesis_output:
test(checks, result, **kwargs)
status = Status.success
except AssertionError:
status = Status.failure
except hypothesis.errors.Flaky:
status = Status.error
result.mark_errored()
# Sometimes Hypothesis detects inconsistent test results and checks are not available
if result.checks:
flaky_example = result.checks[-1].example
else:
flaky_example = None
result.add_error(
hypothesis.errors.Flaky(
"Tests on this endpoint produce unreliable results: \n"
"Falsified on the first call but did not on a subsequent one"
),
flaky_example,
)
except hypothesis.errors.Unsatisfiable:
# We need more clear error message here
status = Status.error
result.add_error(hypothesis.errors.Unsatisfiable("Unable to satisfy schema parameters for this endpoint"))
except KeyboardInterrupt:
yield events.Interrupted(results=results, schema=schema)
return
except Exception as error:
status = Status.error
result.add_error(error)
# Fetch seed value, hypothesis generates it during test execution
result.seed = getattr(test, "_hypothesis_internal_use_seed", None) or getattr(
test, "_hypothesis_internal_use_generated_seed", None
)
results.append(result)
yield events.AfterExecution(
results=results, schema=schema, endpoint=endpoint, status=status, hypothesis_output=hypothesis_output
)
def execute( # pylint: disable=too-many-arguments
schema_uri: str,
checks: Iterable[Callable] = DEFAULT_CHECKS,
api_options: Optional[Dict[str, Any]] = None,
loader_options: Optional[Dict[str, Any]] = None,
hypothesis_options: Optional[Dict[str, Any]] = None,
loader: Callable = from_uri,
) -> TestResultSet:
generator = prepare(
schema_uri=schema_uri,
checks=checks,
api_options=api_options,
loader_options=loader_options,
hypothesis_options=hypothesis_options,
loader=loader,
)
all_events = list(generator)
finished = all_events[-1]
return finished.results
def prepare( # pylint: disable=too-many-arguments
schema_uri: str,
checks: Iterable[Callable] = DEFAULT_CHECKS,
workers_num: int = 1,
api_options: Optional[Dict[str, Any]] = None,
loader_options: Optional[Dict[str, Any]] = None,
hypothesis_options: Optional[Dict[str, Any]] = None,
loader: Callable = from_uri,
seed: Optional[int] = None,
) -> Generator[events.ExecutionEvent, None, None]:
"""Prepare a generator that will run test cases against the given API definition."""
api_options = api_options or {}
loader_options = loader_options or {}
if "base_url" not in loader_options:
loader_options["base_url"] = get_base_url(schema_uri)
schema = loader(schema_uri, **loader_options)
return execute_from_schema(
schema, checks, hypothesis_options=hypothesis_options, seed=seed, workers_num=workers_num, **api_options
)
def network_test(
case: Case,
checks: Iterable[Callable],
result: TestResult,
session: requests.Session,
request_timeout: Optional[int],
) -> None:
"""A single test body that will be executed against the target."""
# pylint: disable=too-many-arguments
timeout = prepare_timeout(request_timeout)
response = case.call(session=session, timeout=timeout)
_run_checks(case, checks, result, response)
def wsgi_test(
case: Case,
checks: Iterable[Callable],
result: TestResult,
auth: Optional[RawAuth],
auth_type: Optional[str],
headers: Optional[Dict[str, Any]],
) -> None:
# pylint: disable=too-many-arguments
headers = _prepare_wsgi_headers(headers, auth, auth_type)
with catching_logs(LogCaptureHandler(), level=logging.DEBUG) as recorded:
response = case.call_wsgi(headers=headers)
result.logs.extend(recorded.records)
_run_checks(case, checks, result, response)
def _prepare_wsgi_headers(
headers: Optional[Dict[str, Any]], auth: Optional[RawAuth], auth_type: Optional[str]
) -> Dict[str, Any]:
headers = headers or {}
headers.setdefault("User-agent", USER_AGENT)
wsgi_auth = get_wsgi_auth(auth, auth_type)
if wsgi_auth:
headers["Authorization"] = wsgi_auth
return headers
def _run_checks(
case: Case, checks: Iterable[Callable], result: TestResult, response: Union[requests.Response, WSGIResponse]
) -> None:
errors = None
for check in checks:
check_name = check.__name__
try:
check(response, result)
result.add_success(check_name, case)
except AssertionError as exc:
errors = True # pragma: no mutate
result.add_failure(check_name, case, str(exc))
if errors is not None:
# An exception needed to trigger Hypothesis shrinking & flaky tests detection logic
# The message doesn't matter
raise AssertionError
def prepare_timeout(timeout: Optional[int]) -> Optional[float]:
"""Request timeout is in milliseconds, but `requests` uses seconds"""
output: Optional[Union[int, float]] = timeout
if timeout is not None:
output = timeout / 1000
return output
@contextmanager
def get_session(
auth: Optional[Union[HTTPDigestAuth, RawAuth]] = None, headers: Optional[Dict[str, Any]] = None
) -> Generator[requests.Session, None, None]:
with requests.Session() as session:
if auth is not None:
session.auth = auth
session.headers["User-agent"] = USER_AGENT
if headers is not None:
session.headers.update(**headers)
yield session
def get_requests_auth(auth: Optional[RawAuth], auth_type: Optional[str]) -> Optional[Union[HTTPDigestAuth, RawAuth]]:
if auth and auth_type == "digest":
return HTTPDigestAuth(*auth)
return auth
def get_wsgi_auth(auth: Optional[RawAuth], auth_type: Optional[str]) -> Optional[str]:
if auth:
if auth_type == "digest":
raise ValueError("Digest auth is not supported for WSGI apps")
return _basic_auth_str(*auth)
return None
| 35.88743 | 120 | 0.644657 | [
"MIT"
] | hlobit/schemathesis | src/schemathesis/runner/__init__.py | 19,128 | Python |
#!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'rateMyProf.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| 33.875 | 74 | 0.688192 | [
"MIT"
] | DefCon-007/rateMyProfessor | rateMyProf/manage.py | 542 | Python |
# coding: utf-8
from __future__ import unicode_literals
import json
import re
import socket
from .common import InfoExtractor
from ..compat import (
compat_etree_fromstring,
compat_http_client,
compat_str,
compat_urllib_error,
compat_urllib_parse_unquote,
compat_urllib_parse_unquote_plus,
)
from ..utils import (
clean_html,
error_to_compat_str,
ExtractorError,
float_or_none,
get_element_by_id,
int_or_none,
js_to_json,
limit_length,
parse_count,
qualities,
sanitized_Request,
try_get,
urlencode_postdata,
urljoin,
)
class FacebookIE(InfoExtractor):
_VALID_URL = r'''(?x)
(?:
https?://
(?:[\w-]+\.)?(?:facebook\.com|facebookcorewwwi\.onion)/
(?:[^#]*?\#!/)?
(?:
(?:
video/video\.php|
photo\.php|
video\.php|
video/embed|
story\.php|
watch(?:/live)?/?
)\?(?:.*?)(?:v|video_id|story_fbid)=|
[^/]+/videos/(?:[^/]+/)?|
[^/]+/posts/|
groups/[^/]+/permalink/|
watchparty/
)|
facebook:
)
(?P<id>[0-9]+)
'''
_LOGIN_URL = 'https://www.facebook.com/login.php?next=http%3A%2F%2Ffacebook.com%2Fhome.php&login_attempt=1'
_CHECKPOINT_URL = 'https://www.facebook.com/checkpoint/?next=http%3A%2F%2Ffacebook.com%2Fhome.php&_fb_noscript=1'
_NETRC_MACHINE = 'facebook'
IE_NAME = 'facebook'
_VIDEO_PAGE_TEMPLATE = 'https://www.facebook.com/video/video.php?v=%s'
_VIDEO_PAGE_TAHOE_TEMPLATE = 'https://www.facebook.com/video/tahoe/async/%s/?chain=true&isvideo=true&payloadtype=primary'
_TESTS = [{
'url': 'https://www.facebook.com/video.php?v=637842556329505&fref=nf',
'md5': '6a40d33c0eccbb1af76cf0485a052659',
'info_dict': {
'id': '637842556329505',
'ext': 'mp4',
'title': 're:Did you know Kei Nishikori is the first Asian man to ever reach a Grand Slam',
'uploader': 'Tennis on Facebook',
'upload_date': '20140908',
'timestamp': 1410199200,
},
'skip': 'Requires logging in',
}, {
# data.video
'url': 'https://www.facebook.com/video.php?v=274175099429670',
'info_dict': {
'id': '274175099429670',
'ext': 'mp4',
'title': 're:^Asif Nawab Butt posted a video',
'uploader': 'Asif Nawab Butt',
'upload_date': '20140506',
'timestamp': 1399398998,
'thumbnail': r're:^https?://.*',
},
'expected_warnings': [
'title'
]
}, {
'note': 'Video with DASH manifest',
'url': 'https://www.facebook.com/video.php?v=957955867617029',
'md5': 'b2c28d528273b323abe5c6ab59f0f030',
'info_dict': {
'id': '957955867617029',
'ext': 'mp4',
'title': 'When you post epic content on instagram.com/433 8 million followers, this is ...',
'uploader': 'Demy de Zeeuw',
'upload_date': '20160110',
'timestamp': 1452431627,
},
'skip': 'Requires logging in',
}, {
'url': 'https://www.facebook.com/maxlayn/posts/10153807558977570',
'md5': '037b1fa7f3c2d02b7a0d7bc16031ecc6',
'info_dict': {
'id': '544765982287235',
'ext': 'mp4',
'title': '"What are you doing running in the snow?"',
'uploader': 'FailArmy',
},
'skip': 'Video gone',
}, {
'url': 'https://m.facebook.com/story.php?story_fbid=1035862816472149&id=116132035111903',
'md5': '1deb90b6ac27f7efcf6d747c8a27f5e3',
'info_dict': {
'id': '1035862816472149',
'ext': 'mp4',
'title': 'What the Flock Is Going On In New Zealand Credit: ViralHog',
'uploader': 'S. Saint',
},
'skip': 'Video gone',
}, {
'note': 'swf params escaped',
'url': 'https://www.facebook.com/barackobama/posts/10153664894881749',
'md5': '97ba073838964d12c70566e0085c2b91',
'info_dict': {
'id': '10153664894881749',
'ext': 'mp4',
'title': 'Average time to confirm recent Supreme Court nominees: 67 days Longest it\'s t...',
'thumbnail': r're:^https?://.*',
'timestamp': 1456259628,
'upload_date': '20160223',
'uploader': 'Barack Obama',
},
}, {
# have 1080P, but only up to 720p in swf params
# data.video.story.attachments[].media
'url': 'https://www.facebook.com/cnn/videos/10155529876156509/',
'md5': '9571fae53d4165bbbadb17a94651dcdc',
'info_dict': {
'id': '10155529876156509',
'ext': 'mp4',
'title': 'She survived the holocaust — and years later, she’s getting her citizenship s...',
'timestamp': 1477818095,
'upload_date': '20161030',
'uploader': 'CNN',
'thumbnail': r're:^https?://.*',
'view_count': int,
},
}, {
# bigPipe.onPageletArrive ... onPageletArrive pagelet_group_mall
# data.node.comet_sections.content.story.attachments[].style_type_renderer.attachment.media
'url': 'https://www.facebook.com/yaroslav.korpan/videos/1417995061575415/',
'info_dict': {
'id': '1417995061575415',
'ext': 'mp4',
'title': 'md5:1db063d6a8c13faa8da727817339c857',
'timestamp': 1486648217,
'upload_date': '20170209',
'uploader': 'Yaroslav Korpan',
},
'params': {
'skip_download': True,
},
}, {
'url': 'https://www.facebook.com/LaGuiaDelVaron/posts/1072691702860471',
'info_dict': {
'id': '1072691702860471',
'ext': 'mp4',
'title': 'md5:ae2d22a93fbb12dad20dc393a869739d',
'timestamp': 1477305000,
'upload_date': '20161024',
'uploader': 'La Guía Del Varón',
'thumbnail': r're:^https?://.*',
},
'params': {
'skip_download': True,
},
}, {
# data.node.comet_sections.content.story.attachments[].style_type_renderer.attachment.media
'url': 'https://www.facebook.com/groups/1024490957622648/permalink/1396382447100162/',
'info_dict': {
'id': '1396382447100162',
'ext': 'mp4',
'title': 'md5:19a428bbde91364e3de815383b54a235',
'timestamp': 1486035494,
'upload_date': '20170202',
'uploader': 'Elisabeth Ahtn',
},
'params': {
'skip_download': True,
},
}, {
'url': 'https://www.facebook.com/video.php?v=10204634152394104',
'only_matching': True,
}, {
'url': 'https://www.facebook.com/amogood/videos/1618742068337349/?fref=nf',
'only_matching': True,
}, {
# data.mediaset.currMedia.edges
'url': 'https://www.facebook.com/ChristyClarkForBC/videos/vb.22819070941/10153870694020942/?type=2&theater',
'only_matching': True,
}, {
# data.video.story.attachments[].media
'url': 'facebook:544765982287235',
'only_matching': True,
}, {
# data.node.comet_sections.content.story.attachments[].style_type_renderer.attachment.media
'url': 'https://www.facebook.com/groups/164828000315060/permalink/764967300301124/',
'only_matching': True,
}, {
# data.video.creation_story.attachments[].media
'url': 'https://zh-hk.facebook.com/peoplespower/videos/1135894589806027/',
'only_matching': True,
}, {
# data.video
'url': 'https://www.facebookcorewwwi.onion/video.php?v=274175099429670',
'only_matching': True,
}, {
# no title
'url': 'https://www.facebook.com/onlycleverentertainment/videos/1947995502095005/',
'only_matching': True,
}, {
# data.video
'url': 'https://www.facebook.com/WatchESLOne/videos/359649331226507/',
'info_dict': {
'id': '359649331226507',
'ext': 'mp4',
'title': '#ESLOne VoD - Birmingham Finals Day#1 Fnatic vs. @Evil Geniuses',
'uploader': 'ESL One Dota 2',
},
'params': {
'skip_download': True,
},
}, {
# data.node.comet_sections.content.story.attachments[].style_type_renderer.attachment.all_subattachments.nodes[].media
'url': 'https://www.facebook.com/100033620354545/videos/106560053808006/',
'info_dict': {
'id': '106560053808006',
},
'playlist_count': 2,
}, {
# data.video.story.attachments[].media
'url': 'https://www.facebook.com/watch/?v=647537299265662',
'only_matching': True,
}, {
# data.node.comet_sections.content.story.attachments[].style_type_renderer.attachment.all_subattachments.nodes[].media
'url': 'https://www.facebook.com/PankajShahLondon/posts/10157667649866271',
'info_dict': {
'id': '10157667649866271',
},
'playlist_count': 3,
}, {
# data.nodes[].comet_sections.content.story.attachments[].style_type_renderer.attachment.media
'url': 'https://m.facebook.com/Alliance.Police.Department/posts/4048563708499330',
'info_dict': {
'id': '117576630041613',
'ext': 'mp4',
# TODO: title can be extracted from video page
'title': 'Facebook video #117576630041613',
'uploader_id': '189393014416438',
'upload_date': '20201123',
'timestamp': 1606162592,
},
'skip': 'Requires logging in',
}, {
# node.comet_sections.content.story.attached_story.attachments.style_type_renderer.attachment.media
'url': 'https://www.facebook.com/groups/ateistiskselskab/permalink/10154930137678856/',
'info_dict': {
'id': '211567722618337',
'ext': 'mp4',
'title': 'Facebook video #211567722618337',
'uploader_id': '127875227654254',
'upload_date': '20161122',
'timestamp': 1479793574,
},
}, {
# data.video.creation_story.attachments[].media
'url': 'https://www.facebook.com/watch/live/?v=1823658634322275',
'only_matching': True,
}, {
'url': 'https://www.facebook.com/watchparty/211641140192478',
'info_dict': {
'id': '211641140192478',
},
'playlist_count': 1,
'skip': 'Requires logging in',
}]
_SUPPORTED_PAGLETS_REGEX = r'(?:pagelet_group_mall|permalink_video_pagelet|hyperfeed_story_id_[0-9a-f]+)'
_api_config = {
'graphURI': '/api/graphql/'
}
@staticmethod
def _extract_urls(webpage):
urls = []
for mobj in re.finditer(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://www\.facebook\.com/(?:video/embed|plugins/video\.php).+?)\1',
webpage):
urls.append(mobj.group('url'))
# Facebook API embed
# see https://developers.facebook.com/docs/plugins/embedded-video-player
for mobj in re.finditer(r'''(?x)<div[^>]+
class=(?P<q1>[\'"])[^\'"]*\bfb-(?:video|post)\b[^\'"]*(?P=q1)[^>]+
data-href=(?P<q2>[\'"])(?P<url>(?:https?:)?//(?:www\.)?facebook.com/.+?)(?P=q2)''', webpage):
urls.append(mobj.group('url'))
return urls
def _login(self):
useremail, password = self._get_login_info()
if useremail is None:
return
login_page_req = sanitized_Request(self._LOGIN_URL)
self._set_cookie('facebook.com', 'locale', 'en_US')
login_page = self._download_webpage(login_page_req, None,
note='Downloading login page',
errnote='Unable to download login page')
lsd = self._search_regex(
r'<input type="hidden" name="lsd" value="([^"]*)"',
login_page, 'lsd')
lgnrnd = self._search_regex(r'name="lgnrnd" value="([^"]*?)"', login_page, 'lgnrnd')
login_form = {
'email': useremail,
'pass': password,
'lsd': lsd,
'lgnrnd': lgnrnd,
'next': 'http://facebook.com/home.php',
'default_persistent': '0',
'legacy_return': '1',
'timezone': '-60',
'trynum': '1',
}
request = sanitized_Request(self._LOGIN_URL, urlencode_postdata(login_form))
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
try:
login_results = self._download_webpage(request, None,
note='Logging in', errnote='unable to fetch login page')
if re.search(r'<form(.*)name="login"(.*)</form>', login_results) is not None:
error = self._html_search_regex(
r'(?s)<div[^>]+class=(["\']).*?login_error_box.*?\1[^>]*><div[^>]*>.*?</div><div[^>]*>(?P<error>.+?)</div>',
login_results, 'login error', default=None, group='error')
if error:
raise ExtractorError('Unable to login: %s' % error, expected=True)
self._downloader.report_warning('unable to log in: bad username/password, or exceeded login rate limit (~3/min). Check credentials or wait.')
return
fb_dtsg = self._search_regex(
r'name="fb_dtsg" value="(.+?)"', login_results, 'fb_dtsg', default=None)
h = self._search_regex(
r'name="h"\s+(?:\w+="[^"]+"\s+)*?value="([^"]+)"', login_results, 'h', default=None)
if not fb_dtsg or not h:
return
check_form = {
'fb_dtsg': fb_dtsg,
'h': h,
'name_action_selected': 'dont_save',
}
check_req = sanitized_Request(self._CHECKPOINT_URL, urlencode_postdata(check_form))
check_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
check_response = self._download_webpage(check_req, None,
note='Confirming login')
if re.search(r'id="checkpointSubmitButton"', check_response) is not None:
self._downloader.report_warning('Unable to confirm login, you have to login in your browser and authorize the login.')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.report_warning('unable to log in: %s' % error_to_compat_str(err))
return
def _real_initialize(self):
self._login()
def _extract_from_url(self, url, video_id):
webpage = self._download_webpage(
url.replace('://m.facebook.com/', '://www.facebook.com/'), video_id)
video_data = None
def extract_video_data(instances):
video_data = []
for item in instances:
if try_get(item, lambda x: x[1][0]) == 'VideoConfig':
video_item = item[2][0]
if video_item.get('video_id'):
video_data.append(video_item['videoData'])
return video_data
server_js_data = self._parse_json(self._search_regex(
[r'handleServerJS\(({.+})(?:\);|,")', r'\bs\.handle\(({.+?})\);'],
webpage, 'server js data', default='{}'), video_id, fatal=False)
if server_js_data:
video_data = extract_video_data(server_js_data.get('instances', []))
def extract_from_jsmods_instances(js_data):
if js_data:
return extract_video_data(try_get(
js_data, lambda x: x['jsmods']['instances'], list) or [])
def extract_dash_manifest(video, formats):
dash_manifest = video.get('dash_manifest')
if dash_manifest:
formats.extend(self._parse_mpd_formats(
compat_etree_fromstring(compat_urllib_parse_unquote_plus(dash_manifest))))
def process_formats(formats):
# Downloads with browser's User-Agent are rate limited. Working around
# with non-browser User-Agent.
for f in formats:
f.setdefault('http_headers', {})['User-Agent'] = 'facebookexternalhit/1.1'
self._sort_formats(formats)
def extract_relay_data(_filter):
return self._parse_json(self._search_regex(
r'handleWithCustomApplyEach\([^,]+,\s*({.*?%s.*?})\);' % _filter,
webpage, 'replay data', default='{}'), video_id, fatal=False) or {}
def extract_relay_prefetched_data(_filter):
replay_data = extract_relay_data(_filter)
for require in (replay_data.get('require') or []):
if require[0] == 'RelayPrefetchedStreamCache':
return try_get(require, lambda x: x[3][1]['__bbox']['result']['data'], dict) or {}
if not video_data:
server_js_data = self._parse_json(self._search_regex([
r'bigPipe\.onPageletArrive\(({.+?})\)\s*;\s*}\s*\)\s*,\s*["\']onPageletArrive\s+' + self._SUPPORTED_PAGLETS_REGEX,
r'bigPipe\.onPageletArrive\(({.*?id\s*:\s*"%s".*?})\);' % self._SUPPORTED_PAGLETS_REGEX
], webpage, 'js data', default='{}'), video_id, js_to_json, False)
video_data = extract_from_jsmods_instances(server_js_data)
if not video_data:
data = extract_relay_prefetched_data(
r'"(?:dash_manifest|playable_url(?:_quality_hd)?)"\s*:\s*"[^"]+"')
if data:
entries = []
def parse_graphql_video(video):
formats = []
q = qualities(['sd', 'hd'])
for (suffix, format_id) in [('', 'sd'), ('_quality_hd', 'hd')]:
playable_url = video.get('playable_url' + suffix)
if not playable_url:
continue
formats.append({
'format_id': format_id,
'quality': q(format_id),
'url': playable_url,
})
extract_dash_manifest(video, formats)
process_formats(formats)
v_id = video.get('videoId') or video.get('id') or video_id
info = {
'id': v_id,
'formats': formats,
'thumbnail': try_get(video, lambda x: x['thumbnailImage']['uri']),
'uploader_id': try_get(video, lambda x: x['owner']['id']),
'timestamp': int_or_none(video.get('publish_time')),
'duration': float_or_none(video.get('playable_duration_in_ms'), 1000),
}
description = try_get(video, lambda x: x['savable_description']['text'])
title = video.get('name')
if title:
info.update({
'title': title,
'description': description,
})
else:
info['title'] = description or 'Facebook video #%s' % v_id
entries.append(info)
def parse_attachment(attachment, key='media'):
media = attachment.get(key) or {}
if media.get('__typename') == 'Video':
return parse_graphql_video(media)
nodes = data.get('nodes') or []
node = data.get('node') or {}
if not nodes and node:
nodes.append(node)
for node in nodes:
story = try_get(node, lambda x: x['comet_sections']['content']['story'], dict) or {}
attachments = try_get(story, [
lambda x: x['attached_story']['attachments'],
lambda x: x['attachments']
], list) or []
for attachment in attachments:
attachment = try_get(attachment, lambda x: x['style_type_renderer']['attachment'], dict)
ns = try_get(attachment, lambda x: x['all_subattachments']['nodes'], list) or []
for n in ns:
parse_attachment(n)
parse_attachment(attachment)
edges = try_get(data, lambda x: x['mediaset']['currMedia']['edges'], list) or []
for edge in edges:
parse_attachment(edge, key='node')
video = data.get('video') or {}
if video:
attachments = try_get(video, [
lambda x: x['story']['attachments'],
lambda x: x['creation_story']['attachments']
], list) or []
for attachment in attachments:
parse_attachment(attachment)
if not entries:
parse_graphql_video(video)
return self.playlist_result(entries, video_id)
if not video_data:
m_msg = re.search(r'class="[^"]*uiInterstitialContent[^"]*"><div>(.*?)</div>', webpage)
if m_msg is not None:
raise ExtractorError(
'The video is not available, Facebook said: "%s"' % m_msg.group(1),
expected=True)
elif '>You must log in to continue' in webpage:
self.raise_login_required()
if not video_data and '/watchparty/' in url:
post_data = {
'doc_id': 3731964053542869,
'variables': json.dumps({
'livingRoomID': video_id,
}),
}
prefetched_data = extract_relay_prefetched_data(r'"login_data"\s*:\s*{')
if prefetched_data:
lsd = try_get(prefetched_data, lambda x: x['login_data']['lsd'], dict)
if lsd:
post_data[lsd['name']] = lsd['value']
relay_data = extract_relay_data(r'\[\s*"RelayAPIConfigDefaults"\s*,')
for define in (relay_data.get('define') or []):
if define[0] == 'RelayAPIConfigDefaults':
self._api_config = define[2]
living_room = self._download_json(
urljoin(url, self._api_config['graphURI']), video_id,
data=urlencode_postdata(post_data))['data']['living_room']
entries = []
for edge in (try_get(living_room, lambda x: x['recap']['watched_content']['edges']) or []):
video = try_get(edge, lambda x: x['node']['video']) or {}
v_id = video.get('id')
if not v_id:
continue
v_id = compat_str(v_id)
entries.append(self.url_result(
self._VIDEO_PAGE_TEMPLATE % v_id,
self.ie_key(), v_id, video.get('name')))
return self.playlist_result(entries, video_id)
if not video_data:
# Video info not in first request, do a secondary request using
# tahoe player specific URL
tahoe_data = self._download_webpage(
self._VIDEO_PAGE_TAHOE_TEMPLATE % video_id, video_id,
data=urlencode_postdata({
'__a': 1,
'__pc': self._search_regex(
r'pkg_cohort["\']\s*:\s*["\'](.+?)["\']', webpage,
'pkg cohort', default='PHASED:DEFAULT'),
'__rev': self._search_regex(
r'client_revision["\']\s*:\s*(\d+),', webpage,
'client revision', default='3944515'),
'fb_dtsg': self._search_regex(
r'"DTSGInitialData"\s*,\s*\[\]\s*,\s*{\s*"token"\s*:\s*"([^"]+)"',
webpage, 'dtsg token', default=''),
}),
headers={
'Content-Type': 'application/x-www-form-urlencoded',
})
tahoe_js_data = self._parse_json(
self._search_regex(
r'for\s+\(\s*;\s*;\s*\)\s*;(.+)', tahoe_data,
'tahoe js data', default='{}'),
video_id, fatal=False)
video_data = extract_from_jsmods_instances(tahoe_js_data)
if not video_data:
raise ExtractorError('Cannot parse data')
if len(video_data) > 1:
entries = []
for v in video_data:
video_url = v[0].get('video_url')
if not video_url:
continue
entries.append(self.url_result(urljoin(
url, video_url), self.ie_key(), v[0].get('video_id')))
return self.playlist_result(entries, video_id)
video_data = video_data[0]
formats = []
subtitles = {}
for f in video_data:
format_id = f['stream_type']
if f and isinstance(f, dict):
f = [f]
if not f or not isinstance(f, list):
continue
for quality in ('sd', 'hd'):
for src_type in ('src', 'src_no_ratelimit'):
src = f[0].get('%s_%s' % (quality, src_type))
if src:
preference = -10 if format_id == 'progressive' else 0
if quality == 'hd':
preference += 5
formats.append({
'format_id': '%s_%s_%s' % (format_id, quality, src_type),
'url': src,
'quality': preference,
})
extract_dash_manifest(f[0], formats)
subtitles_src = f[0].get('subtitles_src')
if subtitles_src:
subtitles.setdefault('en', []).append({'url': subtitles_src})
if not formats:
raise ExtractorError('Cannot find video formats')
process_formats(formats)
video_title = self._html_search_regex(
r'<h2\s+[^>]*class="uiHeaderTitle"[^>]*>([^<]*)</h2>', webpage,
'title', default=None)
if not video_title:
video_title = self._html_search_regex(
r'(?s)<span class="fbPhotosPhotoCaption".*?id="fbPhotoPageCaption"><span class="hasCaption">(.*?)</span>',
webpage, 'alternative title', default=None)
if not video_title:
video_title = self._html_search_meta(
'description', webpage, 'title', default=None)
if video_title:
video_title = limit_length(video_title, 80)
else:
video_title = 'Facebook video #%s' % video_id
uploader = clean_html(get_element_by_id(
'fbPhotoPageAuthorName', webpage)) or self._search_regex(
r'ownerName\s*:\s*"([^"]+)"', webpage, 'uploader',
default=None) or self._og_search_title(webpage, fatal=False)
timestamp = int_or_none(self._search_regex(
r'<abbr[^>]+data-utime=["\'](\d+)', webpage,
'timestamp', default=None))
thumbnail = self._html_search_meta(['og:image', 'twitter:image'], webpage)
view_count = parse_count(self._search_regex(
r'\bviewCount\s*:\s*["\']([\d,.]+)', webpage, 'view count',
default=None))
info_dict = {
'id': video_id,
'title': video_title,
'formats': formats,
'uploader': uploader,
'timestamp': timestamp,
'thumbnail': thumbnail,
'view_count': view_count,
'subtitles': subtitles,
}
return info_dict
def _real_extract(self, url):
video_id = self._match_id(url)
real_url = self._VIDEO_PAGE_TEMPLATE % video_id if url.startswith('facebook:') else url
return self._extract_from_url(real_url, video_id)
class FacebookPluginsVideoIE(InfoExtractor):
_VALID_URL = r'https?://(?:[\w-]+\.)?facebook\.com/plugins/video\.php\?.*?\bhref=(?P<id>https.+)'
_TESTS = [{
'url': 'https://www.facebook.com/plugins/video.php?href=https%3A%2F%2Fwww.facebook.com%2Fgov.sg%2Fvideos%2F10154383743583686%2F&show_text=0&width=560',
'md5': '5954e92cdfe51fe5782ae9bda7058a07',
'info_dict': {
'id': '10154383743583686',
'ext': 'mp4',
'title': 'What to do during the haze?',
'uploader': 'Gov.sg',
'upload_date': '20160826',
'timestamp': 1472184808,
},
'add_ie': [FacebookIE.ie_key()],
}, {
'url': 'https://www.facebook.com/plugins/video.php?href=https%3A%2F%2Fwww.facebook.com%2Fvideo.php%3Fv%3D10204634152394104',
'only_matching': True,
}, {
'url': 'https://www.facebook.com/plugins/video.php?href=https://www.facebook.com/gov.sg/videos/10154383743583686/&show_text=0&width=560',
'only_matching': True,
}]
def _real_extract(self, url):
return self.url_result(
compat_urllib_parse_unquote(self._match_id(url)),
FacebookIE.ie_key())
| 42.512023 | 159 | 0.523756 | [
"Unlicense"
] | 2ShedsJackson/yt-dlp | yt_dlp/extractor/facebook.py | 30,062 | Python |
"""
The patch module allows for a grid to be created and for data to be
defined on that grid.
Typical usage:
-- create the grid
grid = Grid1d(nx)
-- create the data that lives on that grid
data = CellCenterData1d(grid)
bcObj = bcObject(xlb="reflect", xrb="reflect"_
data.registerVar("density", bcObj)
...
data.create()
-- initialize some data
dens = data.get_var("density")
dens[:,:] = ...
-- fill the ghost cells
data.fil_lBC("density")
"""
from __future__ import print_function
import sys
import numpy
valid = ["outflow", "periodic",
"reflect", "reflect-even", "reflect-odd",
"dirichlet", "neumann"]
class BCObject(object):
"""
Boundary condition container -- hold the BCs on each boundary
for a single variable
"""
def __init__(self,
xlb="outflow", xrb="outflow",
odd_reflect_dir=""):
# note: "reflect" is ambiguous and will be converted into
# either reflect-even (the default) or reflect-odd
if xlb not in valid or xrb not in valid:
sys.exit("ERROR: invalid BC")
# -x boundary
self.xlb = xlb
if self.xlb == "reflect":
self.xlb = numpy.where(odd_reflect_dir == "x",
"reflect-odd", "reflect-even")
# +x boundary
self.xrb = xrb
if self.xrb == "reflect":
self.xrb = numpy.where(odd_reflect_dir == "x",
"reflect-odd", "reflect-even")
# periodic checks
if ((xlb == "periodic" and xrb != "periodic") or
(xrb == "periodic" and xlb != "periodic")):
sys.exit("ERROR: both xlb and xrb must be periodic")
def __str__(self):
""" print out some basic information about the BC object """
string = "BCs: -x: %s +x: %s " % \
(self.xlb, self.xrb)
return string
class Grid1d(object):
"""
the 1-d grid class. The grid object will contain the coordinate
information (at various centerings).
A basic (1-d) representation of the layout is:
| | | X | | | | X | | |
+--*--+- // -+--*--X--*--+--*--+- // -+--*--+--*--X--*--+- // -+--*--+
0 ng-1 ng ng+1 ... ng+nx-1 ng+nx 2ng+nx-1
ilo ihi
|<- ng ghostcells->|<---- nx interior zones ----->|<- ng ghostcells->|
The '*' marks the data locations.
"""
def __init__(self, nx, ng=1, xmin=0.0, xmax=1.0):
"""
The class constructor function.
The only data that we require is the number of points that
make up the mesh.
We optionally take the extrema of the domain, number of ghost
cells (assume 1)
"""
# size of grid
self.nx = nx
self.ng = ng
self.qx = 2*ng+nx
# domain extrema
self.xmin = xmin
self.xmax = xmax
# compute the indices of the block interior (excluding guardcells)
self.ilo = ng
self.ihi = ng+nx-1
# define the coordinate information at the left, center, and right
# zone coordinates
self.dx = (xmax - xmin)/nx
self.xl = (numpy.arange(nx+2*ng) - ng)*self.dx + xmin
self.xr = (numpy.arange(nx+2*ng) + 1.0 - ng)*self.dx + xmin
self.x = 0.5*(self.xl + self.xr)
def scratch_array(self):
return numpy.zeros((self.qx), dtype=numpy.float64)
def __str__(self):
""" print out some basic information about the grid object """
return "1-d grid: nx = {}, ng = {}".format(self.nx, self.ng)
class CellCenterData1d(object):
"""
the cell-centered data that lives on a grid.
a CellCenterData1d object is built in a multi-step process before it can
be used. We pass in a grid object to describe where the data
lives:
my_data = patch.CellCenterData1d(myGrid)
register any variables that we expect to live on this patch. Here
bcObject describes the boundary conditions for that variable.
my_data.registerVar('density', bcObject)
my_data.registerVar('x-momentum', bcObject)
...
finally, finish the initialization of the patch
my_data.create()
This last step actually allocates the storage for the state
variables. Once this is done, the patch is considered to be
locked. New variables cannot be added.
"""
def __init__(self, grid, dtype=numpy.float64):
self.grid = grid
self.dtype = dtype
self.data = None
self.vars = []
self.nvar = 0
self.BCs = {}
# time
self.t = -1
self.initialized = 0
def register_var(self, name, bc_object):
"""
register a variable with CellCenterData1d object. Here we pass in a
BCObject that describes the boundary conditions for that
variable.
"""
if self.initialized == 1:
sys.exit("ERROR: grid already initialized")
self.vars.append(name)
self.nvar += 1
self.BCs[name] = bc_object
def create(self):
"""
called after all the variables are registered and allocates
the storage for the state data
"""
if self.initialized == 1:
sys.exit("ERROR: grid already initialized")
self.data = numpy.zeros((self.nvar, self.grid.qx), dtype=self.dtype)
self.initialized = 1
def __str__(self):
""" print out some basic information about the ccData2d object """
if self.initialized == 0:
mystr = "CellCenterData1d object not yet initialized"
return mystr
mystr = "cc data: nx = {}, ng = {}\n".format(self.grid.nx, self.grid.ng) + \
" nvars = {}\n".format(self.nvar) + \
"variables: \n"
ilo = self.grid.ilo
ihi = self.grid.ihi
for n in range(self.nvar):
mystr += "%16s: min: %15.10f max: %15.10f\n" % \
(self.vars[n],
numpy.min(self.data[n, ilo:ihi+1]),
numpy.max(self.data[n, ilo:ihi+1]))
mystr += "%16s BCs: -x: %-12s +x: %-12s \n" %\
(" ", self.BCs[self.vars[n]].xlb,
self.BCs[self.vars[n]].xrb)
return mystr
def get_var(self, name):
"""
return a data array the variable described by name. Any changes
made to this are automatically reflected in the CellCenterData1d
object.
"""
n = self.vars.index(name)
return self.data[n, :]
def zero(self, name):
n = self.vars.index(name)
self.data[n, :] = 0.0
def fill_BC_all(self):
"""
fill boundary conditions on all variables
"""
for name in self.vars:
self.fill_BC(name)
def fill_BC(self, name):
"""
fill the boundary conditions. This operates on a single state
variable at a time, to allow for maximum flexibility
we do periodic, reflect-even, reflect-odd, and outflow
each variable name has a corresponding bc_object stored in the
ccData2d object -- we refer to this to figure out the action
to take at each boundary.
"""
# there is only a single grid, so every boundary is on
# a physical boundary (except if we are periodic)
# Note: we piggy-back on outflow and reflect-odd for
# Neumann and Dirichlet homogeneous BCs respectively, but
# this only works for a single ghost cell
n = self.vars.index(name)
# -x boundary
if self.BCs[name].xlb == "outflow" or self.BCs[name].xlb == "neumann":
for i in range(0, self.grid.ilo):
self.data[n, i] = self.data[n, self.grid.ilo]
elif self.BCs[name].xlb == "reflect-even":
for i in range(0, self.grid.ilo):
self.data[n, i] = self.data[n, 2*self.grid.ng-i-1]
elif self.BCs[name].xlb in ["reflect-odd", "dirichlet"]:
for i in range(0, self.grid.ilo):
self.data[n, i] = -self.data[n, 2*self.grid.ng-i-1]
elif self.BCs[name].xlb == "periodic":
for i in range(0, self.grid.ilo):
self.data[n, i] = self.data[n, self.grid.ihi-self.grid.ng+i+1]
# +x boundary
if self.BCs[name].xrb == "outflow" or self.BCs[name].xrb == "neumann":
for i in range(self.grid.ihi+1, self.grid.nx+2*self.grid.ng):
self.data[n, i] = self.data[n, self.grid.ihi]
elif self.BCs[name].xrb == "reflect-even":
for i in range(0, self.grid.ng):
i_bnd = self.grid.ihi+1+i
i_src = self.grid.ihi-i
self.data[n, i_bnd] = self.data[n, i_src]
elif self.BCs[name].xrb in ["reflect-odd", "dirichlet"]:
for i in range(0, self.grid.ng):
i_bnd = self.grid.ihi+1+i
i_src = self.grid.ihi-i
self.data[n, i_bnd] = -self.data[n, i_src]
elif self.BCs[name].xrb == "periodic":
for i in range(self.grid.ihi+1, 2*self.grid.ng + self.grid.nx):
self.data[n, i] = self.data[n, i-self.grid.ihi-1+self.grid.ng]
def restrict(self, varname):
"""
restrict the variable varname to a coarser grid (factor of 2
coarser) and return an array with the resulting data (and same
number of ghostcells)
"""
fG = self.grid
fData = self.get_var(varname)
# allocate an array for the coarsely gridded data
ng_c = fG.ng
nx_c = fG.nx//2
cData = numpy.zeros((2*ng_c+nx_c), dtype=self.dtype)
ilo_c = ng_c
ihi_c = ng_c+nx_c-1
# fill the coarse array with the restricted data -- just
# average the 2 fine cells into the corresponding coarse cell
# that encompasses them.
# This is done by shifting our view into the fData array and
# using a stride of 2 in the indexing.
cData[ilo_c:ihi_c+1] = \
0.5*(fData[fG.ilo :fG.ihi+1:2] + fData[fG.ilo+1:fG.ihi+1:2])
return cData
def prolong(self, varname):
"""
prolong the data in the current (coarse) grid to a finer
(factor of 2 finer) grid. Return an array with the resulting
data (and same number of ghostcells).
We will reconstruct the data in the zone from the
zone-averaged variables using the centered-difference slopes
(x)
f(x,y) = m x/dx + <f>
When averaged over the parent cell, this reproduces <f>.
Each zone's reconstrution will be averaged over 2 children.
| | | | |
| <f> | --> | | |
| | | 1 | 2 |
+-----------+ +-----+-----+
We will fill each of the finer resolution zones by filling all
the 1's together, using a stride 2 into the fine array. Then
the 2's, this allows us to operate in a vector
fashion. All operations will use the same slopes for their
respective parents.
"""
cG = self.grid
cData = self.get_var(varname)
# allocate an array for the coarsely gridded data
ng_f = cG.ng
nx_f = cG.nx*2
fData = numpy.zeros((2*ng_f+nx_f), dtype=self.dtype)
ilo_f = ng_f
ihi_f = ng_f+nx_f-1
# slopes for the coarse data
m_x = cG.scratch_array()
m_x[cG.ilo:cG.ihi+1] = \
0.5*(cData[cG.ilo+1:cG.ihi+2] - cData[cG.ilo-1:cG.ihi])
# fill the '1' children
fData[ilo_f:ihi_f+1:2] = \
cData[cG.ilo:cG.ihi+1] - 0.25*m_x[cG.ilo:cG.ihi+1]
# fill the '2' children
fData[ilo_f+1:ihi_f+1:2] = \
cData[cG.ilo:cG.ihi+1] + 0.25*m_x[cG.ilo:cG.ihi+1]
return fData
if __name__ == "__main__":
# illustrate basic mesh operations
myg = Grid1d(16, xmax=1.0)
mydata = CellCenterData1d(myg)
bc = BCObject()
mydata.register_var("a", bc)
mydata.create()
a = mydata.get_var("a")
a[:] = numpy.exp(-(myg.x - 0.5)**2/0.1**2)
print(mydata)
| 28.161364 | 84 | 0.548463 | [
"BSD-3-Clause"
] | python-hydro/hydro_examples | multigrid/patch1d.py | 12,391 | Python |
#!/usr/bin/python
# coding: utf-8
# This file is execfile()d with the current directory set to its containing
# dir. Note that not all possible configuration values are present in this
# autogenerated file. All configuration values have a default; values that are
# commented out serve to show the default.
import sys
import os
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(os.path.join('..')))
import pageit # noqa
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.doctest', 'sphinx.ext.autodoc',
'sphinxcontrib.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pageit'
copyright = u'2013, Metaist'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = pageit.__version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'default'
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pageitdoc'
# -- Options for LaTeX output -------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'pageit.tex', u'pageit Documentation',
u'The Metaist', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output -------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pageit', u'pageit Documentation',
[u'The Metaist'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pageit', u'pageit Documentation',
u'The Metaist', 'pageit', pageit.__doc__.split('\n')[0],
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
| 32.072874 | 79 | 0.709038 | [
"MIT"
] | metaist/pageit | docs/conf.py | 7,922 | Python |
from chainerui.models.log import Log
def get_test_json():
return [
{
"loss": 100,
"epoch": 1,
},
{
"loss": 90,
"epoch": 2,
}
]
def test_log_serialize_numbers():
json_data = get_test_json()
logs = [Log(data) for data in json_data]
serialized_data = [log.serialize for log in logs]
assert serialized_data[0]['logDict']['epoch'] == 1
assert serialized_data[1]['logDict']['epoch'] == 2
def test_log_serialize_arbitrary_data():
json_data = get_test_json()
json_data.insert(
0,
{
"loss": 110,
"epoch": 0,
"model_files": ["Model", "model.py"]
}
)
logs = [Log(data) for data in json_data]
serialized_data = [log.serialize for log in logs]
assert serialized_data[0]['logDict']['epoch'] == 0
assert serialized_data[0]['logDict']['model_files'] is None
assert serialized_data[1]['logDict']['epoch'] == 1
assert serialized_data[2]['logDict']['epoch'] == 2
def test_log_serialize_nan_and_inf():
json_data = get_test_json()
json_data.insert(
0,
{
"loss": float('nan'),
"epoch": float('inf'),
"iteration": 0,
}
)
logs = [Log(data) for data in json_data]
serialized_data = [log.serialize for log in logs]
assert serialized_data[0]['logDict']['iteration'] == 0
assert serialized_data[0]['logDict']['epoch'] is None
assert serialized_data[0]['logDict']['loss'] is None
assert serialized_data[1]['logDict']['epoch'] == 1
assert serialized_data[2]['logDict']['epoch'] == 2
| 25.692308 | 63 | 0.573653 | [
"MIT"
] | chainer/chainerui | tests/models_tests/test_log.py | 1,670 | Python |
#!/usr/bin/python3
"""
An example script that cleans up failed experiments by moving them to the archive
"""
import argparse
from datetime import datetime
from clearml_agent import APIClient
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--project", "-P", help="Project ID. Only clean up experiments from this project")
parser.add_argument("--user", "-U", help="User ID. Only clean up experiments assigned to this user")
parser.add_argument(
"--status", "-S", default="failed",
help="Experiment status. Only clean up experiments with this status (default %(default)s)"
)
parser.add_argument(
"--iterations", "-I", type=int,
help="Number of iterations. Only clean up experiments with less or equal number of iterations"
)
parser.add_argument(
"--sec-from-start", "-T", type=int,
help="Seconds from start time. "
"Only clean up experiments if less or equal number of seconds have elapsed since started"
)
args = parser.parse_args()
client = APIClient()
tasks = client.tasks.get_all(
project=[args.project] if args.project else None,
user=[args.user] if args.user else None,
status=[args.status] if args.status else None,
system_tags=["-archived"]
)
count = 0
for task in tasks:
if args.iterations and (task.last_iteration or 0) > args.iterations:
continue
if args.sec_from_start:
if not task.started:
continue
if (datetime.utcnow() - task.started.replace(tzinfo=None)).total_seconds() > args.sec_from_start:
continue
try:
client.tasks.edit(
task=task.id,
system_tags=(task.system_tags or []) + ["archived"],
force=True
)
count += 1
except Exception as ex:
print("Failed editing experiment: {}".format(ex))
print("Cleaned up {} experiments".format(count))
| 31.183333 | 105 | 0.676644 | [
"Apache-2.0"
] | AzaelCicero/clearml-agent | examples/archive_experiments.py | 1,871 | Python |
# Token type identifiers.
AUT_INVALID = 0x00
AUT_OTHER_FILE32 = 0x11
AUT_OHEADER = 0x12
AUT_TRAILER = 0x13
AUT_HEADER32 = 0x14
AUT_HEADER32_EX = 0x15
AUT_DATA = 0x21
AUT_IPC = 0x22
AUT_PATH = 0x23
AUT_SUBJECT32 = 0x24
AUT_XATPATH = 0x25
AUT_PROCESS32 = 0x26
AUT_RETURN32 = 0x27
AUT_TEXT = 0x28
AUT_OPAQUE = 0x29
AUT_IN_ADDR = 0x2A
AUT_IP = 0x2B
AUT_IPORT = 0x2C
AUT_ARG32 = 0x2D
AUT_SOCKET = 0x2E
AUT_SEQ = 0x2F
AUT_ACL = 0x30
AUT_ATTR = 0x31
AUT_IPC_PERM = 0x32
AUT_LABEL = 0x33
AUT_GROUPS = 0x34
AUT_ACE = 0x35
AUT_PRIV = 0x38
AUT_UPRIV = 0x39
AUT_LIAISON = 0x3A
AUT_NEWGROUPS = 0x3B
AUT_EXEC_ARGS = 0x3C
AUT_EXEC_ENV = 0x3D
AUT_ATTR32 = 0x3E
AUT_UNAUTH = 0x3F
AUT_XATOM = 0x40
AUT_XOBJ = 0x41
AUT_XPROTO = 0x42
AUT_XSELECT = 0x43
AUT_XCOLORMAP = 0x44
AUT_XCURSOR = 0x45
AUT_XFONT = 0x46
AUT_XGC = 0x47
AUT_XPIXMAP = 0x48
AUT_XPROPERTY = 0x49
AUT_XWINDOW = 0x4A
AUT_XCLIENT = 0x4B
AUT_CMD = 0x51
AUT_EXIT = 0x52
AUT_ZONENAME = 0x60
AUT_HOST = 0x70
AUT_ARG64 = 0x71
AUT_RETURN64 = 0x72
AUT_ATTR64 = 0x73
AUT_HEADER64 = 0x74
AUT_SUBJECT64 = 0x75
AUT_PROCESS64 = 0x77
AUT_OTHER_FILE64 = 0x78
AUT_HEADER64_EX = 0x79
AUT_SUBJECT32_EX = 0x7A
AUT_PROCESS32_EX = 0x7B
AUT_SUBJECT64_EX = 0x7C
AUT_PROCESS64_EX = 0x7D
AUT_IN_ADDR_EX = 0x7E
AUT_SOCKET_EX = 0x7F
#
# Pre-64-bit BSM, 32-bit tokens weren't explicitly named as '32'. We have
# compatibility defines.
AUT_HEADER = AUT_HEADER32
AUT_ARG = AUT_ARG32
AUT_RETURN = AUT_RETURN32
AUT_SUBJECT = AUT_SUBJECT32
AUT_PROCESS = AUT_PROCESS32
AUT_OTHER_FILE = AUT_OTHER_FILE32
#
# *
# The values for the following token ids are not defined by BSM.
#
# XXXRW: Not sure how to handle these in OpenBSM yet, but I'll give them
# names more consistent with Sun's BSM. These originally came from Apple's
# BSM.
AUT_SOCKINET32 = 0x80 # XXX
AUT_SOCKINET128 = 0x81 # XXX
AUT_SOCKUNIX = 0x82 # XXX
_AUT_RIGHTS = 0x83 # XXX FreeBSD
AUT_ARG_UUID = 0x84 # UUID of argument object
AUT_RETURN_UUID = 0x85 # UUID of returned object
#
# Apple specific tokens
AUT_IDENTITY = 0xED
AUT_KRB5_PRINCIPA = 0xEE
AUT_CERT_HAHSH = 0xEF
# print values for the arbitrary token
AUP_BINARY = 0
AUP_OCTAL = 1
AUP_DECIMAL = 2
AUP_HEX = 3
AUP_STRING = 4
#
# data-types for the arbitrary token
AUR_BYTE = 0
AUR_CHAR = AUR_BYTE
AUR_SHORT = 1
AUR_INT32 = 2
AUR_INT = AUR_INT32
AUR_INT64 = 3
#
# ... and their sizes
AUR_BYTE_SIZE = 1 # sizeof(u_char)
AUR_CHAR_SIZE = AUR_BYTE_SIZE
AUR_SHORT_SIZE = 2 # sizeof(uint16_t)
AUR_INT32_SIZE = 4 # sizeof(uint32_t)
AUR_INT_SIZE = AUR_INT32_SIZE
AUR_INT64_SIZE = 8 # sizeof(uint64_t)
AUR_BYTE_FORMAT = "B"
AUR_CHAR_FORMAT = "c"
AUR_SHORT_FORMAT = "H"
AUR_INT32_FORMAT = "I"
AUR_INT_FORMAT = AUR_INT32_FORMAT
AUR_INT64_FORMAT = "Q"
#
# Modifiers for the header token
PAD_NOTATTR = 0x4000 # nonattributable event
PAD_FAILURE = 0x8000 # fail audit event
#
AUDIT_MAX_GROUPS = 16
#
# *
# A number of BSM versions are floating around and defined. Here are
# constants for them. OpenBSM uses the same token types, etc, used in the
# Solaris BSM version, but has a separate version number in order to
# identify a potentially different event identifier name space.
AUDIT_HEADER_VERSION_OLDDARWIN = 1 # In = retrospect, a mistake.
AUDIT_HEADER_VERSION_SOLARIS = 2
AUDIT_HEADER_VERSION_TSOL25 = 3
AUDIT_HEADER_VERSION_TSOL = 4
AUDIT_HEADER_VERSION_OPENBSM10 = 10
AUDIT_HEADER_VERSION_OPENBSM11 = 11
AUDIT_HEADER_VERSION_OPENBSM = AUDIT_HEADER_VERSION_OPENBSM11
#
AUT_TRAILER_MAGIC = 0xB105
#
AUT_HEADERS = [AUT_HEADER32, AUT_HEADER32_EX, AUT_HEADER64, AUT_HEADER64_EX]
| 23.825503 | 76 | 0.773803 | [
"MIT"
] | haginara/openbsm-python | bsm/audit_record.py | 3,550 | Python |
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from argparse import ArgumentParser
from pyspark.sql import SparkSession
from pyspark.sql.types import *
LABEL_COL = 0
INT_COLS = list(range(1, 14))
CAT_COLS = list(range(14, 40))
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--input', type=str, required=True, help="The path to the csv file to be processed.")
parser.add_argument('--output', type=str, default=".", help="The path to the folder to save the parquet data.")
args = parser.parse_args()
spark = SparkSession.builder.getOrCreate()
input = args.input
output = args.output
label_fields = [StructField('_c%d' % LABEL_COL, IntegerType())]
int_fields = [StructField('_c%d' % i, IntegerType()) for i in INT_COLS]
str_fields = [StructField('_c%d' % i, StringType()) for i in CAT_COLS]
schema = StructType(label_fields + int_fields + str_fields)
df = spark.read.schema(schema).option('sep', '\t').csv(input)
df.write.parquet(output, mode="overwrite")
| 36.395349 | 114 | 0.723323 | [
"Apache-2.0"
] | ColossusChang/BigDL | python/friesian/example/dlrm/csv_to_parquet.py | 1,565 | Python |
from setuptools import setup
requirements = '''
flask
'''
name='imager'
console_scripts = f'''
pf-moves={name}.moves_gui:main
pf-flash={name}.flash:main
imager={name}.cli:main
'''
packages=f'''
{name}
{name}.utils
'''
setup(
name=name,
packages=packages.split(),
version='0.1',
description='IMX imaging using the PreciseFlex robot arm and LiCONiC fridge',
url='https://github.com/pharmbio/robot-imager',
author='Dan Rosén',
author_email='[email protected]',
python_requires='>=3.10',
license='MIT',
install_requires=requirements.split(),
entry_points={'console_scripts': console_scripts.split()}
)
| 20.515152 | 81 | 0.660266 | [
"MIT"
] | pharmbio/imx-pharmbio-automation | setup.py | 678 | Python |
import numpy as np
import scipy
from scipy.stats import qmc
from scipy.stats import special_ortho_group
import matplotlib.pyplot as plt
from scipy.optimize import minimize
import warnings
from .ssp import SSP
class SSPSpace:
def __init__(self, domain_dim: int, ssp_dim: int, axis_matrix=None, phase_matrix=None,
domain_bounds=None, length_scale=1):
self.sample_points = None
self.sample_ssps = None
self.domain_dim = domain_dim
self.ssp_dim = ssp_dim
if not isinstance(length_scale, np.ndarray) or length_scale.size == 1:
self.length_scale = length_scale * np.ones((self.domain_dim,))
if domain_bounds is not None:
assert domain_bounds.shape[0] == domain_dim
self.domain_bounds = domain_bounds
if (axis_matrix is None) & (phase_matrix is None):
raise RuntimeError("SSP spaces must be defined by either a axis matrix or phase matrix. Use subclasses to construct spaces with predefined axes.")
elif (phase_matrix is None):
assert axis_matrix.shape[0] == ssp_dim, f'Expected ssp_dim {axis_matrix.shape[0]}, got {ssp_dim}.'
assert axis_matrix.shape[1] == domain_dim
self.axis_matrix = axis_matrix
self.phase_matrix = (-1.j*np.log(np.fft.fft(axis_matrix,axis=0))).real
elif (axis_matrix is None):
assert phase_matrix.shape[0] == ssp_dim
assert phase_matrix.shape[1] == domain_dim
self.phase_matrix = phase_matrix
self.axis_matrix = np.fft.ifft(np.exp(1.j*phase_matrix), axis=0).real
def update_lengthscale(self, scale):
if not isinstance(scale, np.ndarray) or scale.size == 1:
self.length_scale = scale * np.ones((self.domain_dim,))
else:
assert scale.size == self.domain_dim
self.length_scale = scale
assert self.length_scale.size == self.domain_dim
def encode(self,x):
assert x.shape[0] == self.domain_dim
ls_mat = np.atleast_2d(np.diag(1/self.length_scale.flatten()))
scaled_x = ls_mat @ x
data = np.fft.ifft( np.exp( 1.j * self.phase_matrix @ scaled_x ), axis=0 ).real
return data
def encode_and_deriv(self,x):
ls_mat = np.atleast_2d(np.diag(1 / self.length_scale))
scaled_x = x @ ls_mat
fdata = np.exp( 1.j * self.phase_matrix @ scaled_x.T )
data = np.fft.ifft( fdata, axis=0 ).real
ddata = np.fft.ifft( 1.j * np.stack([np.diag(fdata[:,j]) for j in range(x.shape[0])]) @ self.phase_matrix @ ls_mat, axis=0 ).real
return data.T, ddata.T
def encode_fourier(self,x):
assert x.shape[0] == self.domain_dim
ls_mat = np.atleast_2d(np.diag(1/self.length_scale.flatten()))
scaled_x = ls_mat @ x
data = np.exp( 1.j * self.phase_matrix @ scaled_x )
return data
def encode_as_SSP(self,x):
assert x.shape[0] == self.domain_dim
return SSP(self.encode(x),self)
def decode(self,ssp,method='from-set', num_sample_pts=10000,from_set_method='grid',num_init_pts =10):
if method=='least-squares':
# problems duw to complex log
x = np.linalg.lstsq(self.phase_matrix, (1.j*np.log(np.fft.fft(ssp,axis=0))).real)[0]
#raise NotImplementedError()
#fssp = np.fft.fft(ssp,axis=0)
#x = np.linalg.lstsq(np.tile(self.phase_matrix,(2,1)), np.hstack([np.arccos(fssp.real), np.arcsin(fssp.imag)]))
return x
elif method=='from-set':
sample_ssps, sample_points = self.get_sample_ssps(num_sample_pts,method=from_set_method)
sims = sample_ssps.T @ ssp
return sample_points[:,np.argmax(sims)]
elif method=='direct-optim':
x0 = self.decode(ssp, method='from-set',num_sample_pts=num_init_pts)
def min_func(x,target=ssp):
x_ssp = self.encode(np.atleast_2d(x))
return -np.inner(x_ssp, target).flatten()
soln = minimize(min_func, x0, method='L-BFGS-B')
return soln.x
elif method=='grad_descent':
x = self.decode(ssp, method='from-set',num_sample_pts=num_init_pts)
fssp = np.fft.fft(ssp,axis=0)
ls_mat = np.diag(1/self.length_scale.flatten())
for j in range(10):
scaled_x = ls_mat @ x
x_enc = np.exp(1.j * self.phase_matrix @ scaled_x)
grad_mat = (1.j * (self.phase_matrix @ ls_mat).T * x_enc)
grad = (grad_mat @ fssp.T).flatten()
x = x - 0.1*grad.real
return x
elif method=='nonlin-reg':
x = self.decode(ssp, method='from-set',num_sample_pts=num_init_pts)
fssp = np.fft.fft(ssp,axis=0)
dy = np.hstack([fssp.real, fssp.imag])
ls_mat = np.diag(1/self.length_scale.flatten())
for j in range(10):
J = np.vstack([self.phase_matrix * np.sin(self.phase_matrix @ x @ ls_mat).reshape(1,-1),
-self.phase_matrix * np.cos(self.phase_matrix @ x @ ls_mat).reshape(1,-1)])
soln = np.linalg.pinv(J.T @ J) @ J.T @ dy
x = x + soln
return x
else:
raise NotImplementedError()
def clean_up(self,ssp,**kwargs):
x = self.decode(ssp,**kwargs)
return self.encode(x)
def get_sample_points(self,num_points,method='grid'):
if self.domain_bounds is None:
bounds = np.vstack([-10*np.ones(self.domain_dim), 10*np.ones(self.domain_dim)]).T
else:
bounds = self.domain_bounds
if method=='grid':
n_per_dim = int(num_points**(1/self.domain_dim))
if n_per_dim**self.domain_dim != num_points:
warnings.warn((f'Evenly distributing points over a '
f'{self.domain_dim} grid requires numbers '
f'of samples to be powers of {self.domain_dim}.'
f'Requested {num_points} samples, returning '
f'{n_per_dim**self.domain_dim}'), RuntimeWarning)
### end if
xs = np.linspace(bounds[:,0],bounds[:,1],n_per_dim)
xxs = np.meshgrid(*[xs[:,i] for i in range(self.domain_dim)])
sample_points = np.array([x.reshape(-1) for x in xxs])
return sample_points
elif method=='sobol':
sampler = qmc.Sobol(d=self.domain_dim)
lbounds = bounds[:,0]
ubounds = bounds[:,1]
u_sample_points = sampler.random(num_points)
sample_points = qmc.scale(u_sample_points, lbounds, ubounds)
return sample_points.T
else:
raise NotImplementedError()
def get_sample_ssps(self,num_points,**kwargs): # make new if num_pts different than whats stored?
sample_points = self.get_sample_points(num_points,**kwargs)
sample_ssps = self.encode(sample_points)
return sample_ssps, sample_points
def identity(self):
s = np.zeros(self.ssp_dim)
s[0] = 1
return s
def bind(self,a,b):
return np.fft.ifft(np.fft.fft(a) * np.fft.fft(b)).real
def invert(self,a):
return a[-np.arange(len(a))]
def normalize(self,ssp):
return ssp/np.max([1e-6,np.sqrt(np.sum(ssp**2))])
def unitary(self,ssp):
fssp = np.fft.fft(ssp)
fssp = fssp/np.sqrt(fssp.real**2 + fssp.imag**2)
return np.fft.ifft(fssp).real
def unitary_fourier(self,fssp):
fssp = fssp/np.sqrt(fssp.real**2 + fssp.imag**2)
return fssp
def decode_path(self, ssp_path, N_ma=None, n_samples = 10000):
sample_ssps, sample_points = self.get_sample_ssps(n_samples)
path = np.zeros((ssp_path.shape[0], self.domain_dim))
max_sims = np.zeros(ssp_path.shape[0])
for i in range(ssp_path.shape[0]):
sims = sample_ssps.T @ ssp_path[i,:]
max_sims[i] = np.max(sims)
path[i,:] = sample_points[:,np.argmax(sims)]
return path, max_sims
def similarity_plot(self,ssp,n_grid=100,plot_type='heatmap',cmap="YlGnBu",ax=None,**kwargs):
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
if self.domain_dim == 1:
xs = np.linspace(self.domain_bounds[0,0],self.domain_bounds[0,1], n_grid)
im=ax.plot(xs, self.encode(xs.reshape(1,-1)).T @ self.data)
ax.set_xlim(self.domain_bounds[0,0],self.domain_bounds[0,1])
elif self.domain_dim == 2:
xs = np.linspace(self.domain_bounds[0,0],self.domain_bounds[0,1], n_grid)
ys = np.linspace(self.domain_bounds[1,0],self.domain_bounds[1,1], n_grid)
X,Y = np.meshgrid(xs,ys)
sims = self.encode(np.vstack([X.reshape(-1),Y.reshape(-1)])).T @ ssp
if plot_type=='heatmap':
im=ax.pcolormesh(X,Y,sims.reshape(X.shape),cmap=cmap,**kwargs)
elif plot_type=='contour':
im=ax.contour(X,Y,sims.reshape(X.shape),cmap=cmap,**kwargs)
elif plot_type=='contourf':
im=ax.contourf(X,Y,sims.reshape(X.shape),cmap=cmap,**kwargs)
ax.set_xlim(self.domain_bounds[0,0],self.domain_bounds[0,1])
ax.set_ylim(self.domain_bounds[1,0],self.domain_bounds[1,1])
else:
raise NotImplementedError()
return im
class RandomSSPSpace(SSPSpace):
def __init__(self, domain_dim: int, ssp_dim: int, domain_bounds=None, length_scale=1, rng=np.random.default_rng()):
partial_phases = rng.random.rand(ssp_dim//2,domain_dim)*2*np.pi - np.pi
axis_matrix = _constructaxisfromphases(partial_phases)
super().__init__(domain_dim,ssp_dim,axis_matrix=axis_matrix,
domain_bounds=domain_bounds,length_scale=length_scale)
class HexagonalSSPSpace(SSPSpace):
def __init__(self, domain_dim:int,ssp_dim: int=151, n_rotates:int=5, n_scales:int=5,
scale_min=2*np.pi/np.sqrt(6) - 0.5, scale_max=2*np.pi/np.sqrt(6) + 0.5,
domain_bounds=None, length_scale=1):
if (n_rotates==5) & (n_scales==5) & (ssp_dim != 151):
n_rotates = int(np.max([1,np.sqrt((ssp_dim-1)/(2*(domain_dim+1)))]))
n_scales = n_rotates
phases_hex = np.hstack([np.sqrt(1+ 1/domain_dim)*np.identity(domain_dim) - (domain_dim**(-3/2))*(np.sqrt(domain_dim+1) + 1),
(domain_dim**(-1/2))*np.ones((domain_dim,1))]).T
self.grid_basis_dim = domain_dim + 1
self.num_grids = n_rotates*n_scales
scales = np.linspace(scale_min,scale_max,n_scales)
phases_scaled = np.vstack([phases_hex*i for i in scales])
if (n_rotates==1):
phases_scaled_rotated = phases_scaled
elif (domain_dim==1):
scales = np.linspace(scale_min,scale_max,n_scales+n_rotates)
phases_scaled_rotated = np.vstack([phases_hex*i for i in scales])
elif (domain_dim == 2):
angles = np.linspace(0,2*np.pi/3,n_rotates)
R_mats = np.stack([np.stack([np.cos(angles), -np.sin(angles)],axis=1),
np.stack([np.sin(angles), np.cos(angles)], axis=1)], axis=1)
phases_scaled_rotated = (R_mats @ phases_scaled.T).transpose(0,2,1).reshape(-1,domain_dim)
else:
R_mats = special_ortho_group.rvs(domain_dim, size=n_rotates)
phases_scaled_rotated = (R_mats @ phases_scaled.T).transpose(0,2,1).reshape(-1,domain_dim)
axis_matrix = _constructaxisfromphases(phases_scaled_rotated)
ssp_dim = axis_matrix.shape[0]
super().__init__(domain_dim,ssp_dim,axis_matrix=axis_matrix,
domain_bounds=domain_bounds,length_scale=length_scale)
def sample_grid_encoders(self, n):
sample_pts = self.get_sample_points(n,method='sobol')
N = self.num_grids
if N < n:
sorts = np.hstack([np.arange(N), np.random.randint(0, N - 1, size = n - N)])
else:
sorts = np.arange(n)
encoders = np.zeros((self.ssp_dim,n))
for i in range(n):
sub_mat = _get_sub_SSP(sorts[i],N,sublen=self.grid_basis_dim)
proj_mat = _proj_sub_SSP(sorts[i],N,sublen=self.grid_basis_dim)
sub_space = SSPSpace(self.domain_dim,2*self.grid_basis_dim + 1, axis_matrix= sub_mat @ self.axis_matrix)
encoders[:,i] = N * proj_mat @ sub_space.encode(sample_pts[:,i])
return encoders
def _constructaxisfromphases(K):
d = K.shape[0]
n = K.shape[1]
axes = np.ones((d*2 + 1,n))
for i in range(n):
F = np.ones((d*2 + 1,), dtype="complex")
F[0:d] = np.exp(1.j*K[:,i])
F[-d:] = np.flip(np.conj(F[0:d]))
F = np.fft.ifftshift(F)
axes[:,i] = np.fft.ifft(F).real
return axes
def _get_sub_FourierSSP(n, N, sublen=3):
# Return a matrix, \bar{A}_n
# Consider the multi scale representation (S_{total}) and sub vectors (S_n) described in the paper
# Then
# \bar{A}_n F{S_{total}} = F{S_n}
# i.e. pick out the sub vector in the Fourier domain
tot_len = 2*sublen*N + 1
FA = np.zeros((2*sublen + 1, tot_len))
FA[0:sublen, sublen*n:sublen*(n+1)] = np.eye(sublen)
FA[sublen, sublen*N] = 1
FA[sublen+1:, tot_len - np.arange(sublen*(n+1),sublen*n,-1)] = np.eye(sublen)
return FA
def _get_sub_SSP(n,N,sublen=3):
# Return a matrix, A_n
# Consider the multi scale representation (S_{total}) and sub vectors (S_n) described in the paper
# Then
# A_n S_{total} = S_n
# i.e. pick out the sub vector in the time domain
tot_len = 2*sublen*N + 1
FA = _get_sub_FourierSSP(n,N,sublen=sublen)
W = np.fft.fft(np.eye(tot_len))
invW = np.fft.ifft(np.eye(2*sublen + 1))
A = invW @ np.fft.ifftshift(FA) @ W
return A.real
def _proj_sub_FourierSSP(n,N,sublen=3):
# Return a matrix, \bar{B}_n
# Consider the multi scale representation (S_{total}) and sub vectors (S_n) described in the paper
# Then
# \sum_n \bar{B}_n F{S_{n}} = F{S_{total}}
# i.e. project the sub vector in the Fourier domain such that summing all such projections gives the full vector in Fourier domain
tot_len = 2*sublen*N + 1
FB = np.zeros((2*sublen + 1, tot_len))
FB[0:sublen, sublen*n:sublen*(n+1)] = np.eye(sublen)
FB[sublen, sublen*N] = 1/N # all sub vectors have a "1" zero freq term so scale it so full vector will have 1
FB[sublen+1:, tot_len - np.arange(sublen*(n+1),sublen*n,-1)] = np.eye(sublen)
return FB.T
def _proj_sub_SSP(n,N,sublen=3):
# Return a matrix, B_n
# Consider the multi scale representation (S_{total}) and sub vectors (S_n) described in the paper
# Then
# \sum_n B_n S_{n} = S_{total}
# i.e. project the sub vector in the time domain such that summing all such projections gives the full vector
tot_len = 2*sublen*N + 1
FB = _proj_sub_FourierSSP(n,N,sublen=sublen)
invW = np.fft.ifft(np.eye(tot_len))
W = np.fft.fft(np.eye(2*sublen + 1))
B = invW @ np.fft.ifftshift(FB) @ W
return B.real
| 44.44 | 158 | 0.594317 | [
"MIT"
] | nsdumont/SemanticMapping | semanticmapping/sspspace.py | 15,554 | Python |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers
from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc
from google.cloud.datastore_v1.types import datastore
from .base import DatastoreTransport, DEFAULT_CLIENT_INFO
class DatastoreGrpcTransport(DatastoreTransport):
"""gRPC backend transport for Datastore.
Each RPC normalizes the partition IDs of the keys in its
input entities, and always returns entities with keys with
normalized partition IDs. This applies to all keys and entities,
including those in values, except keys with both an empty path
and an empty or unset partition ID. Normalization of input keys
sets the project ID (if not already set) to the project ID from
the request.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "datastore.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "datastore.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def lookup(self) -> Callable[[datastore.LookupRequest], datastore.LookupResponse]:
r"""Return a callable for the lookup method over gRPC.
Looks up entities by key.
Returns:
Callable[[~.LookupRequest],
~.LookupResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "lookup" not in self._stubs:
self._stubs["lookup"] = self.grpc_channel.unary_unary(
"/google.datastore.v1.Datastore/Lookup",
request_serializer=datastore.LookupRequest.serialize,
response_deserializer=datastore.LookupResponse.deserialize,
)
return self._stubs["lookup"]
@property
def run_query(
self,
) -> Callable[[datastore.RunQueryRequest], datastore.RunQueryResponse]:
r"""Return a callable for the run query method over gRPC.
Queries for entities.
Returns:
Callable[[~.RunQueryRequest],
~.RunQueryResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "run_query" not in self._stubs:
self._stubs["run_query"] = self.grpc_channel.unary_unary(
"/google.datastore.v1.Datastore/RunQuery",
request_serializer=datastore.RunQueryRequest.serialize,
response_deserializer=datastore.RunQueryResponse.deserialize,
)
return self._stubs["run_query"]
@property
def begin_transaction(
self,
) -> Callable[
[datastore.BeginTransactionRequest], datastore.BeginTransactionResponse
]:
r"""Return a callable for the begin transaction method over gRPC.
Begins a new transaction.
Returns:
Callable[[~.BeginTransactionRequest],
~.BeginTransactionResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "begin_transaction" not in self._stubs:
self._stubs["begin_transaction"] = self.grpc_channel.unary_unary(
"/google.datastore.v1.Datastore/BeginTransaction",
request_serializer=datastore.BeginTransactionRequest.serialize,
response_deserializer=datastore.BeginTransactionResponse.deserialize,
)
return self._stubs["begin_transaction"]
@property
def commit(self) -> Callable[[datastore.CommitRequest], datastore.CommitResponse]:
r"""Return a callable for the commit method over gRPC.
Commits a transaction, optionally creating, deleting
or modifying some entities.
Returns:
Callable[[~.CommitRequest],
~.CommitResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "commit" not in self._stubs:
self._stubs["commit"] = self.grpc_channel.unary_unary(
"/google.datastore.v1.Datastore/Commit",
request_serializer=datastore.CommitRequest.serialize,
response_deserializer=datastore.CommitResponse.deserialize,
)
return self._stubs["commit"]
@property
def rollback(
self,
) -> Callable[[datastore.RollbackRequest], datastore.RollbackResponse]:
r"""Return a callable for the rollback method over gRPC.
Rolls back a transaction.
Returns:
Callable[[~.RollbackRequest],
~.RollbackResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "rollback" not in self._stubs:
self._stubs["rollback"] = self.grpc_channel.unary_unary(
"/google.datastore.v1.Datastore/Rollback",
request_serializer=datastore.RollbackRequest.serialize,
response_deserializer=datastore.RollbackResponse.deserialize,
)
return self._stubs["rollback"]
@property
def allocate_ids(
self,
) -> Callable[[datastore.AllocateIdsRequest], datastore.AllocateIdsResponse]:
r"""Return a callable for the allocate ids method over gRPC.
Allocates IDs for the given keys, which is useful for
referencing an entity before it is inserted.
Returns:
Callable[[~.AllocateIdsRequest],
~.AllocateIdsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "allocate_ids" not in self._stubs:
self._stubs["allocate_ids"] = self.grpc_channel.unary_unary(
"/google.datastore.v1.Datastore/AllocateIds",
request_serializer=datastore.AllocateIdsRequest.serialize,
response_deserializer=datastore.AllocateIdsResponse.deserialize,
)
return self._stubs["allocate_ids"]
@property
def reserve_ids(
self,
) -> Callable[[datastore.ReserveIdsRequest], datastore.ReserveIdsResponse]:
r"""Return a callable for the reserve ids method over gRPC.
Prevents the supplied keys' IDs from being auto-
llocated by Cloud Datastore.
Returns:
Callable[[~.ReserveIdsRequest],
~.ReserveIdsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "reserve_ids" not in self._stubs:
self._stubs["reserve_ids"] = self.grpc_channel.unary_unary(
"/google.datastore.v1.Datastore/ReserveIds",
request_serializer=datastore.ReserveIdsRequest.serialize,
response_deserializer=datastore.ReserveIdsResponse.deserialize,
)
return self._stubs["reserve_ids"]
def close(self):
self.grpc_channel.close()
__all__ = ("DatastoreGrpcTransport",)
| 42.800948 | 87 | 0.62972 | [
"Apache-2.0"
] | LaudateCorpus1/python-datastore | google/cloud/datastore_v1/services/datastore/transports/grpc.py | 18,062 | Python |
from src.utils.config import config
import json
# import uuid
import requests
_NAMESPACE = "WS"
_VER_NAMESPACE = "WSVER"
_SAMPLE_NAMESPACE = "SMP"
# versioned and non-versioned index have same version
_SAMPLE_SET_INDEX_VERSION = 1
_SAMPLE_SET_INDEX_NAME = 'sample_set_' + str(_SAMPLE_SET_INDEX_VERSION)
_VER_SAMPLE_SET_INDEX_NAME = 'sample_set_version_' + str(_SAMPLE_SET_INDEX_VERSION)
# versioned and non-versioned index have same version
_SAMPLE_INDEX_VERSION = 1
_SAMPLE_INDEX_NAME = 'sample_' + str(_SAMPLE_INDEX_VERSION)
# _VER_SAMPLE_INDEX_NAME = 'sample_version_' + str(_SAMPLE_INDEX_VERSION)
def _get_sample(sample_info):
""" Get sample from SampleService
sample_info - dict containing 'id' and 'version' of a sample
"""
headers = {"Authorization": config()['ws_token']}
params = {
"id": sample_info['id']
}
if sample_info.get('version'):
params['version'] = sample_info['version']
payload = {
"method": "SampleService.get_sample",
"id": "", # str(uuid.uuid4()),
"params": [params],
"version": "1.1"
}
resp = requests.post(url=config()['sample_service_url'], headers=headers, data=json.dumps(payload))
if not resp.ok:
raise RuntimeError(f"Returned from sample service with status {resp.status_code} - {resp.text}")
resp_json = resp.json()
if resp_json.get('error'):
raise RuntimeError(f"Error from SampleService - {resp_json['error']}")
sample = resp_json['result'][0]
return sample
def _flatten_meta(meta, prefix=None):
""" Flattens metadata fields in a Sample object. Fields are concatenated into a
single string field to save into an Elasticsearch index
meta - Sample Metadata to be flattened
prefix - (optional) prefix for the metadata values. default=None
"""
new_meta = {}
for key in meta:
if prefix:
val = prefix + ":"
else:
val = ""
if "value" in meta[key]:
val += str(meta[key]['value'])
if "units" in meta[key]:
val += ";" + str(meta[key]['units'])
new_meta[key] = val
return new_meta
def _combine_meta(meta, flattened_meta, idx):
""" Combine newly flattened metadata with existing metadata. This Function is designed to keep the indexing
of the different metadata fields consistent for each node within the sample node tree s.t. all the
fields in index (idx) 0 will be from item 0 in the node tree. Empty string ("") entries are Empty and
added simply so that the indexing of all fields line up.
meta - existing metadata.
flattened_meta - newly flattened metadata.
idx - current index of ndoe_tree.
"""
for key in flattened_meta:
if key in meta:
meta[key] += ["" for _ in range(idx - len(meta[key]))] + [flattened_meta[key]]
else:
meta[key] = ["" for _ in range(idx)] + [flattened_meta[key]]
return meta
def index_sample_set(obj_data, ws_info, obj_data_v1):
"""Indexer for KBaseSets.SampleSet object type"""
info = obj_data['info']
if not obj_data.get('data'):
raise Exception("no data in object")
data = obj_data['data']
workspace_id = info[6]
object_id = info[0]
version = info[4]
sample_set_id = f"{_NAMESPACE}::{workspace_id}:{object_id}"
ver_sample_set_id = f"{_VER_NAMESPACE}::{workspace_id}:{object_id}:{version}"
sample_set_index = {
"_action": "index",
"doc": {
"description": data["description"],
"sample_ids": [s['id'] for s in data['samples']],
"sample_names": [s['name'] for s in data['samples']],
"sample_versions": [s['version'] for s in data['samples']]
},
"index": _SAMPLE_SET_INDEX_NAME,
"id": sample_set_id
}
yield sample_set_index
ver_sample_set_index = dict(sample_set_index)
ver_sample_set_index['index'] = _VER_SAMPLE_SET_INDEX_NAME
ver_sample_set_index['id'] = ver_sample_set_id
yield ver_sample_set_index
for samp in data["samples"]:
# query the sample service for sample
sample = _get_sample(samp)
sample_id = f"{_SAMPLE_NAMESPACE}::{sample['id']}:{sample['version']}"
# not sure on how we need to handle more than 1 node.
if len(sample['node_tree']) == 1:
meta_controlled = _flatten_meta(
sample['node_tree'][0]['meta_controlled']
)
meta_user = _flatten_meta(
sample['node_tree'][0]['meta_user']
)
meta_controlled['node_id'] = sample['node_tree'][0]['id']
else:
meta_controlled, meta_user = {}, {}
for idx, node in enumerate(sample['node_tree']):
meta_controlled = _combine_meta(
meta_controlled,
_flatten_meta(
node['meta_controlled']
),
idx
)
meta_user = _combine_meta(
meta_user,
_flatten_meta(
node['meta_user']
),
idx
)
meta_controlled['node_id'] = node['id']
sample_index = {
"_action": "index",
"doc": {
"save_date": sample['save_date'],
"sample_version": sample['version'],
"name": sample['name'],
"parent_id": sample_set_id,
**meta_user,
**meta_controlled
},
"index": _SAMPLE_INDEX_NAME,
"id": sample_id
}
yield sample_index
| 36.417722 | 111 | 0.590024 | [
"MIT"
] | slebras/index_runner | src/index_runner/es_indexers/sample_set.py | 5,754 | Python |
from alipay import AliPay
from django.core.paginator import Paginator
from django.http import HttpResponseRedirect
from django.http import JsonResponse
from django.shortcuts import render
from django.utils.http import urlquote
from Qshop.settings import alipay_private_key_string, alipay_public_key_string
from Seller.views import getPassword
# Create your views here.
def loginValid(func):
"""
:desc 闭包函数校验是否登录
:param func:
:return:
"""
def inner(request, *args, **kwargs):
email = request.COOKIES.get("user")
s_email = request.session.get("user")
if email and s_email and email == s_email:
user = LoginUser.objects.filter(email=email).first()
if user:
return func(request, *args, **kwargs)
return HttpResponseRedirect("/Buyer/login/")
return inner
def login(request):
if request.method == "POST":
erremail = ""
email = request.POST.get("email")
pwd = request.POST.get("pwd")
user = LoginUser.objects.filter(email=email).first()
if user:
db_password = user.password
pwd = getPassword(pwd)
if db_password == pwd:
response = HttpResponseRedirect("/Buyer/index/", locals())
response.set_cookie("user", user.email)
response.set_cookie("user_id", user.id)
response.set_cookie("username", urlquote(user.username))
request.session["user"] = user.email
return response
else:
errpwd = "密码不匹配"
else:
erremail = "该邮箱未注册"
return render(request, "buyer/login.html", locals())
def register(request):
errmsg = ""
if request.method == "POST":
username = request.POST.get("user_name")
pwd = request.POST.get("pwd")
email = request.POST.get("email")
db_email = LoginUser.objects.filter(email=email).first()
db_username = LoginUser.objects.filter(username=username).first()
if not db_email:
if not db_username:
user = LoginUser()
user.username = username
user.password = getPassword(pwd)
user.email = email
user.save()
return HttpResponseRedirect("/Buyer/login/", locals())
else:
errmsg = "用户名已存在"
else:
errmsg = "邮箱已注册"
return render(request, "buyer/register.html", {"errmsg": errmsg})
def index(request):
types = GoodsType.objects.all()
goods_result = []
for type in types:
goods = type.goods_set.order_by("goods_pro_date")[0:4]
if len(goods) >= 4:
goods_result.append({type: goods})
return render(request, "buyer/index.html", locals())
def logout(request):
url = request.META.get("HTTP_REFERER", "/Buyer/index/")
response = HttpResponseRedirect(url)
cookies = request.COOKIES.keys()
for cookie in cookies:
response.delete_cookie(cookie)
if request.session.get("user"):
del request.session['user']
return response
@loginValid
def user_info(request):
id = request.COOKIES.get("user_id")
if id:
user = LoginUser.objects.filter(id=id).first()
return render(request, "buyer/user_center_info.html", locals())
@loginValid
def user_site(request):
id = request.COOKIES.get("user_id")
if id:
user = LoginUser.objects.filter(id=id).first()
return render(request, "buyer/user_center_site.html", locals())
@loginValid
def user_order(request):
id = request.COOKIES.get("user_id")
if id:
user = LoginUser.objects.filter(id=id).first()
order_lists = PayOrder.objects.filter(order_user=user).order_by("-order_date")
return render(request, "buyer/user_center_order.html", locals())
"""
def good_list(request):
type = request.GET.get("type")
keyword = request.GET.get("keyword")
goods_list = []
if type == 'byid':
if keyword:
types = GoodsType.objects.get(id=keyword)
goods_list = types.goods_set.order_by("goods_pro_date")
elif type == 'bykey':
if keyword:
goods_list = Goods.objects.filter(goods_name__contains=keyword).order_by("goods_pro_date")
if goods_list:
nums = goods_list.count()
nums = int(math.ceil(nums / 5))
recommon_list = goods_list[:nums]
return render(request, "buyer/goods_list.html", locals())
"""
def good_list(request, page):
page = int(page)
type = request.GET.get("type")
keyword = request.GET.get("keyword")
goods_list = []
if type == 'byid': # 按照商品id查
if keyword:
types = GoodsType.objects.get(id=int(keyword))
goods_list = types.goods_set.order_by("goods_pro_date")
elif type == 'bykey': # 按商品名字查
if keyword:
goods_list = Goods.objects.filter(goods_name__contains=keyword).order_by("goods_pro_date")
if goods_list:
# 分页
page_list = Paginator(goods_list, 15)
goods_list = page_list.page(page)
pages = page_list.page_range
# 推荐商品
nums = len(goods_list)
nums = int(math.ceil(nums / 5))
recommon_list = goods_list[:nums]
return render(request, "buyer/goods_list.html", locals())
def good_detail(request, id):
good = Goods.objects.filter(id=int(id)).first()
return render(request, "buyer/detail.html", locals())
import math
import time
import datetime
from Buyer.models import *
@loginValid
def pay_order(request):
"""
get请求 商品详情页购买单个商品。传入商品id,数量。
post请求 购物车购买多个商品。
"""
if request.method == "GET":
num = request.GET.get("num")
id = request.GET.get("id")
if num and id:
num = int(num)
id = int(id)
order = PayOrder() # 订单
order.order_number = str(time.time()).replace(".", "")
order.order_date = datetime.datetime.now()
order.order_user = LoginUser.objects.get(id=int(request.COOKIES.get("user_id")))
order.save()
good = Goods.objects.get(id=id)
order_info = OrderInfo() # 订单详情
order_info.order_id = order
order_info.goods_id = good.id
order_info.goods_picture = good.goods_picture
order_info.goods_name = good.goods_name
order_info.goods_count = num
order_info.goods_price = good.goods_price
order_info.goods_total_price = round(good.goods_price * num, 3)
order_info.store_id = good.goods_store
order_info.order_status = 0 # 状态
order_info.save()
order.order_total = order_info.goods_total_price
order.save()
elif request.method == "POST":
request_data = []
data = request.POST
data_item = request.POST.items()
for key, value in data_item:
if key.startswith("check_"):
id = int(key.split("_", 1)[1])
num = int(data.get("count_" + str(id)))
request_data.append((id, num))
if request_data:
order = PayOrder() # 创建订单
order.order_number = str(time.time()).replace(".", "")
order.order_date = datetime.datetime.now()
order.order_user = LoginUser.objects.get(id=int(request.COOKIES.get("user_id")))
order.order_total = 0.0
order.goods_number = 0
order.save()
for id, num in request_data:
good = Goods.objects.get(id=id)
order_info = OrderInfo() # 订单详情
order_info.order_id = order
order_info.goods_id = good.id
order_info.goods_picture = good.goods_picture
order_info.goods_name = good.goods_name
order_info.goods_count = num
order_info.goods_price = good.goods_price
order_info.goods_total_price = round(good.goods_price * num, 3)
order_info.store_id = good.goods_store
order_info.order_status = 0
order_info.save()
order.order_total += order_info.goods_total_price # 订单总价
order.goods_number += 1 # 商品种类个数
order.save()
return render(request, "buyer/place_order.html", locals())
@loginValid
def alipayOrder(request):
"""
阿里支付,传入交易订单号,总金额
"""
order_number = request.GET.get("order_number")
total = request.GET.get("total")
# 实例化支付
alipay = AliPay(
appid="2016101200667714",
app_notify_url=None,
app_private_key_string=alipay_private_key_string,
alipay_public_key_string=alipay_public_key_string,
sign_type="RSA2"
)
order_string = alipay.api_alipay_trade_page_pay(
out_trade_no=order_number, # 订单编号
total_amount=str(total), # 金额 字符串类型
subject="生鲜交易",
return_url="http://127.0.0.1:8000/Buyer/pay_result/", # 支付跳转页面
notify_url="http://127.0.0.1:8000/Buyer/pay_result/",
)
result = "https://openapi.alipaydev.com/gateway.do?" + order_string
return HttpResponseRedirect(result)
@loginValid
def pay_result(request):
"""
支付结果页
如果有out_trade_no,支付成功,修改订单状态
"""
out_trade_no = request.GET.get("out_trade_no")
if out_trade_no:
payorder = PayOrder.objects.get(order_number=out_trade_no)
payorder.orderinfo_set.all().update(order_status=1)
return render(request, "buyer/pay_result.html", locals())
@loginValid
def delgood(request):
sendData = {
"code": 200,
"data": ""
}
id = request.GET.get("id")
if id:
cart = Cart.objects.get(id=id)
cart.delete()
sendData["data"] = "删除编号%s成功"%id
return JsonResponse(sendData)
@loginValid
def add_cart(request):
"""
处理ajax 请求,添加商品到购物车 ,成功保存到数据库。
传入商品id,数量
"""
sendData = {
"code": 200,
"data": ""
}
if request.method == "POST":
id = int(request.POST.get("goods_id"))
count = int(request.POST.get("count", 1))
goods = Goods.objects.get(id=id)
cart = Cart()
cart.goods_name = goods.goods_name
cart.goods_num = count
cart.goods_price = goods.goods_price
cart.goods_picture = goods.goods_picture
cart.goods_total = round(goods.goods_price * count, 3)
cart.goods_id = goods.id
cart.cart_user = request.COOKIES.get("user_id")
cart.save()
sendData['data'] = "加入购物车成功"
else:
sendData["code"] = 500
sendData["data"] = "请求方式错误"
return JsonResponse(sendData)
@loginValid
def mycart(request):
id = request.COOKIES.get("user_id")
carts = Cart.objects.filter(cart_user=id).order_by("-id")
number = carts.count()
return render(request, "buyer/cart.html", locals())
| 29.986301 | 102 | 0.604568 | [
"MIT"
] | songdanlee/DjangoWorkSpace | Qshop/Buyer/views.py | 11,385 | Python |
import errno, os
from django.db import models
from django.http import Http404, HttpResponseServerError
from discodex.restapi.resource import Resource, Collection
from discodex.restapi.resource import (HttpResponseAccepted,
HttpResponseCreated,
HttpResponseNoContent,
HttpResponseServiceUnavailable)
from discodex import settings
from discodex.mapreduce import (Indexer,
DiscoDBIterator)
from discodex.objects import (DataSet,
IChunks,
Indices,
Index,
Results,
Dict)
from disco.core import Disco
from disco.ddfs import DDFS
from disco.error import DiscoError
from disco.util import flatten, parse_dir
discodex_settings = settings.DiscodexSettings()
disco_master_url = discodex_settings['DISCODEX_DISCO_MASTER']
disco_prefix = discodex_settings['DISCODEX_DISCO_PREFIX']
index_prefix = discodex_settings['DISCODEX_INDEX_PREFIX']
purge_file = discodex_settings['DISCODEX_PURGE_FILE']
disco_master = Disco(disco_master_url)
ddfs = DDFS(disco_master_url)
NOT_FOUND, OK, ACTIVE, DEAD = 'unknown job', 'ready', 'active', 'dead'
class IndexCollection(Collection):
allowed_methods = ('GET', 'POST')
def delegate(self, request, *args, **kwargs):
name = str(kwargs.pop('name'))
return IndexResource(name)(request, *args, **kwargs)
@property
def names(self):
return ddfs.list(index_prefix)
def __iter__(self):
for name in self.names:
yield IndexResource(name)
def create(self, request, *args, **kwargs):
dataset = DataSet.loads(request.raw_post_data)
prefix = '%s:discodb:' % disco_prefix
job = Indexer(disco_master, prefix, dataset)
try:
job.run()
except ImportError, e:
return HttpResponseServerError("Callable object not found: %s" % e)
except DiscoError, e:
return HttpResponseServerError("Failed to run indexing job: %s" % e)
return HttpResponseAccepted(job.name)
def read(self, request, *args, **kwargs):
return Indices(self.names).response(request)
class IndexResource(Collection):
allowed_methods = ('GET', 'POST', 'PUT', 'DELETE')
def __init__(self, name):
self.name = name
self.responses['POST'] = 'append'
def delegate(self, request, *args, **kwargs):
if self.status == NOT_FOUND:
raise Http404
return DiscoDBResource(self)(request, *args, **kwargs)
@property
def exists(self):
return ddfs.exists(self.tag)
@property
def isdisco(self):
return self.name.startswith(disco_prefix)
@property
def isindex(self):
return self.name.startswith(index_prefix)
@property
def jobname(self):
if self.isdisco:
return self.name
if self.isindex:
return self.name.replace(index_prefix, disco_prefix, 1)
return '%s:%s' % (disco_prefix, self.name)
@property
def tag(self):
return self.jobname.replace(disco_prefix, index_prefix, 1)
@property
@models.permalink
def url(self):
return 'index', (), {'name': self.name}
@property
def ichunks(self):
return ddfs.blobs(self.tag)
@property
def status(self):
if self.exists:
return OK
if self.isdisco:
status, results = disco_master.results(self.name)
if status == OK:
_prefix, type, id = self.name.split(':', 2)
ddfs.put(self.tag, [[url.replace('disco://', '%s://' % type, 1)
for url in urls]
for urls in ddfs.blobs(results)])
disco_master.purge(self.jobname)
return status
return NOT_FOUND
def read(self, request, *args, **kwargs):
status = self.status
if status == OK:
return Index(ddfs.get(self.tag)).response(request)
if status == ACTIVE:
return HttpResponseServiceUnavailable(2)
if status == DEAD:
return HttpResponseServerError("Indexing failed.")
raise Http404
def append(self, request, *args, **kwargs):
ddfs.tag(self.tag, [['tag://%s' % IndexResource(request.raw_post_data).tag]])
return HttpResponseCreated(self.url)
def update(self, request, *args, **kwargs):
ddfs.put(self.tag, IChunks.loads(request.raw_post_data))
return HttpResponseCreated(self.url)
def delete(self, request, *args, **kwargs):
ddfs.delete(self.tag)
ddfs.delete(ddfs.job_tag(self.jobname))
return HttpResponseNoContent()
class DiscoDBResource(Resource):
allowed_methods = ('GET', 'POST')
def __init__(self, index):
self.index = index
def read(self, request, *args, **kwargs):
from discodex.mapreduce.func import reify
method = str(kwargs.pop('method', None) or '')
arg = str(kwargs.pop('arg', None) or '')
streams = [reify(s) for s in kwargs.pop('streams').split('|') if s]
reduce = reify((kwargs.pop('reduce') or 'None').strip('}'))
try:
job = DiscoDBIterator(disco_master,
disco_prefix,
self.index,
method,
arg,
streams,
reduce,
**dict(request.GET.items())).run()
except DiscoError, e:
return HttpResponseServerError("Failed to run DiscoDB job: %s" % e)
try:
results = Results(job.results)
except DiscoError, e:
return HttpResponseServerError("DiscoDB job failed: %s" % e)
finally:
if os.path.exists(purge_file):
disco_master.purge(job.name)
return results.response(request)
def create(self, request, *args, **kwargs):
kwargs.update(Dict.loads(request.raw_post_data))
return self.read(request, *args, **kwargs)
| 33.182292 | 85 | 0.578402 | [
"BSD-3-Clause"
] | dimazest/disco | contrib/discodex/lib/discodex/models.py | 6,371 | Python |
#Import Library
import warnings
import numpy as np
import datetime
from extract_data import *
from word_encoder import *
from sklearn import svm
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn import tree
# send the extracted data availble from extract_data to the encode function
# this function vectorizes the text based data into ASCII format for use by
# the algorithms
encoded_data = encode(data)
scores = []
# convert the float scores to int. Multiplying by 10 helps us keep the decimal
# level precision which would otherwise be lost in typecasting
i = 0
while i < len(label):
scores.append(int (float(label[i]) * 10))
i += 1;
# ignore depricated warning
def warn(*args, **kwargs):
pass
warnings.warn = warn
# SVM classifier
svm_clf = svm.SVC(kernel = 'linear')
#svm_clf.fit(encoded_data, scores)
# Gaussian Naive Bayes
gnb_clf = GaussianNB()
gnb_clf.fit(encoded_data, scores)
# Random Forest
rf_clf = RandomForestClassifier(n_estimators=10)
rf_clf.fit(encoded_data, scores)
# Decision Tree
dt_clf = tree.DecisionTreeClassifier()
dt_clf.fit(encoded_data, scores)
#print("SVM:")
#print(svm_clf.predict ([1403, 2752, 3263, 4200, 4309, 4417, 4518, 4675, 5909, 6102, 6500, 8459, 8672, 8882, 9712, 9810, 10524, 10757, 11096, 11299, 11461, 11617, 11775]))
print("Gaussian Naive Bayes:")
print(gnb_clf.predict ([1403, 2752, 3263, 4200, 4309, 4417, 4518, 4675, 5909, 6102, 6500, 8459, 8672, 8882, 9712, 9810, 10524, 10757, 11096, 11299, 11461, 11617, 11775]))
print("Random Forest:")
print(rf_clf.predict ([1403, 2752, 3263, 4200, 4309, 4417, 4518, 4675, 5909, 6102, 6500, 8459, 8672, 8882, 9712, 9810, 10524, 10757, 11096, 11299, 11461, 11617, 11775]))
print("Decision Tree:")
print(dt_clf.predict ([1403, 2752, 3263, 4200, 4309, 4417, 4518, 4675, 5909, 6102, 6500, 8459, 8672, 8882, 9712, 9810, 10524, 10757, 11096, 11299, 11461, 11617, 11775]))
print("End time: " + str(datetime.datetime.now()).split('.')[0])
| 30.8 | 171 | 0.732767 | [
"MIT"
] | aksh4y/IMDb-Rating-Prediction | Project/algorithm.old.py | 2,002 | Python |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="py-mcc-f1",
version="0.1.0",
author="Arthur Colombini Gusmão",
description="MCC-F1 Curve",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/arthurcgusmao/py-mcc-f1",
packages=setuptools.find_packages(),
install_requires=[
"numpy>=1.14.0",
"scikit-learn>=0.22"
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
| 26.423077 | 53 | 0.630277 | [
"MIT"
] | arthurcgusmao/py-mcc-f1 | setup.py | 688 | Python |
from __future__ import division
import databench
import math
import random
class Dummypi(databench.Analysis):
"""A dummy analysis."""
@databench.on
def connected(self):
yield self.data.init({'samples': 100000})
@databench.on
def run(self):
"""Run when button is pressed."""
inside = 0
for draws in range(1, self.data['samples']):
# generate points and check whether they are inside the unit circle
r1 = random.random()
r2 = random.random()
if r1 ** 2 + r2 ** 2 < 1.0:
inside += 1
# every 1000 iterations, update status
if draws % 1000 != 0:
continue
# debug
yield self.emit('log', {'draws': draws, 'inside': inside})
# calculate pi and its uncertainty given the current draws
p = inside / draws
pi = {
'estimate': 4.0 * p,
'uncertainty': 4.0 * math.sqrt(draws * p * (1.0 - p)) / draws,
}
# send status to frontend
yield self.set_state(pi=pi)
yield self.emit('log', {'action': 'done'})
@databench.on
def samples(self, value):
yield self.set_state(samples=value)
| 26.142857 | 79 | 0.527713 | [
"MIT"
] | phillipaug/Data-Analysis-General-repository | databench/analyses_packaged/dummypi/analysis.py | 1,281 | Python |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
This script will be executed whenever a change is pushed to the
master branch. It will schedule multiple child tasks that build
the app, run tests and execute code quality tools:
"""
import datetime
import json
import taskcluster
import os
from lib.tasks import schedule_task
TASK_ID = os.environ.get('TASK_ID')
REPO_URL = os.environ.get('MOBILE_HEAD_REPOSITORY')
BRANCH = os.environ.get('MOBILE_HEAD_BRANCH')
COMMIT = os.environ.get('MOBILE_HEAD_REV')
OWNER = "[email protected]"
SOURCE = "https://github.com/mozilla-mobile/focus-android/tree/master/tools/taskcluster"
def generate_build_task():
return taskcluster.slugId(), generate_task(
name="(Focus for Android) Build",
description="Build Focus/Klar for Android from source code.",
command=('echo "--" > .adjust_token'
' && python tools/l10n/check_translations.py'
' && ./gradlew --no-daemon clean assemble'))
def generate_unit_test_task(buildTaskId):
return taskcluster.slugId(), generate_task(
name="(Focus for Android) Unit tests",
description="Run unit tests for Focus/Klar for Android.",
command='echo "--" > .adjust_token && ./gradlew --no-daemon clean test',
dependencies=[buildTaskId])
def generate_code_quality_task(buildTaskId):
return taskcluster.slugId(), generate_task(
name="(Focus for Android) Code quality",
description="Run code quality tools on Focus/Klar for Android code base.",
command='echo "--" > .adjust_token && ./gradlew --no-daemon clean detektCheck ktlint lint pmd checkstyle spotbugs',
dependencies=[buildTaskId])
def generate_compare_locales_task():
return taskcluster.slugId(), generate_task(
name="(Focus for Android) String validation",
description="Check Focus/Klar for Android for errors in en-US and l10n.",
command=('pip install "compare-locales>=5.0.2,<6.0"'
' && mkdir -p /opt/focus-android/test_artifacts'
' && compare-locales --validate l10n.toml .'
' && compare-locales --json=/opt/focus-android/test_artifacts/data.json l10n.toml .'),
artifacts={
"public": {
"type": "directory",
"path": "/opt/focus-android/test_artifacts",
"expires": taskcluster.stringDate(taskcluster.fromNow('1 week'))
}
})
def generate_ui_test_task(dependencies, engine="Klar", device="ARM"):
'''
:param str engine: Klar, Webview
:param str device: ARM, X86
:return: uiWebviewARMTestTaskId, uiWebviewARMTestTask
'''
if engine is "Klar":
engine = "geckoview"
assemble_engine = engine
elif engine is "Webview":
engine = "webview"
assemble_engine = "Focus"
else:
raise Exception("ERROR: unknown engine type --> Aborting!")
task_name = "(Focus for Android) UI tests - {0} {1}".format(engine, device)
task_description = "Run UI tests for {0} {1} build for Android.".format(engine, device)
build_dir = "assemble{0}{1}Debug".format(assemble_engine, device.capitalize())
build_dir_test = "assemble{0}{1}DebugAndroidTest".format(assemble_engine, device.capitalize())
print('BUILD_DIR: {0}'.format(build_dir))
print('BUILD_DIR_TEST: {0}'.format(build_dir_test))
device = device.lower()
return taskcluster.slugId(), generate_task(
name=task_name,
description=task_description,
command=('echo "--" > .adjust_token'
' && ./gradlew --no-daemon clean ' + build_dir + ' ' + build_dir_test + ' '
' && ./tools/taskcluster/google-firebase-testlab-login.sh'
' && tools/taskcluster/execute-firebase-tests.sh ' + device + ' ' + engine),
dependencies=dependencies,
scopes=['secrets:get:project/focus/firebase'],
routes=['notify.irc-channel.#android-ci.on-any'],
artifacts={
"public": {
"type": "directory",
"path": "/opt/focus-android/test_artifacts",
"expires": taskcluster.stringDate(taskcluster.fromNow('1 week'))
}
})
# For GeckoView, upload nightly (it has release config) by default, all Release builds have WV
def upload_apk_nimbledroid_task(dependencies):
return taskcluster.slugId(), generate_task(
name="(Focus for Android) Upload Debug APK to Nimbledroid",
description="Upload APKs to Nimbledroid for performance measurement and tracking.",
command=('echo "--" > .adjust_token'
' && ./gradlew --no-daemon clean assembleKlarArmNightly'
' && python tools/taskcluster/upload_apk_nimbledroid.py'),
dependencies=dependencies,
scopes=['secrets:get:project/focus/nimbledroid'],
)
def generate_task(name, description, command, dependencies=[], artifacts={}, scopes=[], routes=[]):
created = datetime.datetime.now()
expires = taskcluster.fromNow('1 month')
deadline = taskcluster.fromNow('1 day')
return {
"workerType": "github-worker",
"taskGroupId": TASK_ID,
"expires": taskcluster.stringDate(expires),
"retries": 5,
"created": taskcluster.stringDate(created),
"tags": {},
"priority": "lowest",
"schedulerId": "taskcluster-github",
"deadline": taskcluster.stringDate(deadline),
"dependencies": [TASK_ID] + dependencies,
"routes": routes,
"scopes": scopes,
"requires": "all-completed",
"payload": {
"features": {
"taskclusterProxy": True
},
"maxRunTime": 7200,
"image": "mozillamobile/focus-android:1.4",
"command": [
"/bin/bash",
"--login",
"-c",
"git fetch %s %s && git config advice.detachedHead false && git checkout %s && %s" % (REPO_URL, BRANCH, COMMIT, command)
],
"artifacts": artifacts,
"deadline": taskcluster.stringDate(deadline)
},
"provisionerId": "aws-provisioner-v1",
"metadata": {
"name": name,
"description": description,
"owner": OWNER,
"source": SOURCE
}
}
if __name__ == "__main__":
queue = taskcluster.Queue({'baseUrl': 'http://taskcluster/queue/v1'})
buildTaskId, buildTask = generate_build_task()
schedule_task(queue, buildTaskId, buildTask)
unitTestTaskId, unitTestTask = generate_unit_test_task(buildTaskId)
schedule_task(queue, unitTestTaskId, unitTestTask)
codeQualityTaskId, codeQualityTask = generate_code_quality_task(buildTaskId)
schedule_task(queue, codeQualityTaskId, codeQualityTask)
clTaskId, clTask = generate_compare_locales_task()
schedule_task(queue, clTaskId, clTask)
uiWebviewARMTestTaskId, uiWebviewARMTestTask = generate_ui_test_task( [unitTestTaskId, codeQualityTaskId], "Webview", "ARM")
schedule_task(queue, uiWebviewARMTestTaskId, uiWebviewARMTestTask)
# uiWebviewX86TestTaskId, uiWebviewX86TestTask = generate_ui_test_task([unitTestTaskId, codeQualityTaskId], "Webview", "X86")
# schedule_task(queue, uiWebviewX86TestTaskId, uiWebviewX86TestTask)
uploadNDTaskId, uploadNDTask = upload_apk_nimbledroid_task([unitTestTaskId, codeQualityTaskId])
schedule_task(queue, uploadNDTaskId, uploadNDTask)
| 39.854167 | 136 | 0.644668 | [
"MPL-2.0"
] | kglazko/focus-android | tools/taskcluster/schedule-master-build.py | 7,652 | Python |
from . import (
admin,
ban,
close,
contact,
copyright,
donate,
download,
emoji,
help,
legacy,
noop,
roll,
search,
settings,
shortlink,
start,
stop,
submit,
top_missed,
view,
vote,
)
__all__ = ['admin', 'ban', 'contact', 'copyright', 'close', 'donate', 'download', 'emoji', 'help',
'legacy', 'noop', 'roll', 'search', 'settings',
'shortlink', 'start', 'stop', 'submit', 'top_missed', 'view', 'vote']
| 18.071429 | 98 | 0.51581 | [
"Unlicense"
] | RobbiNespu/hyperboria | nexus/bot/handlers/__init__.py | 506 | Python |
# Copyright 2018 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests that exectation values are correctly computed in the plugin devices"""
import pytest
import numpy as np
import pennylane as qml
from contextlib import contextmanager
from conftest import U, U2, A, B
np.random.seed(42)
@contextmanager
def mimic_execution_for_expval(device):
device.reset()
with device.execution_context():
yield
if not device.shots is None:
device._samples = device.generate_samples()
@pytest.mark.parametrize("shots", [None, 8192])
class TestExpval:
"""Test expectation values"""
def test_identity_expectation(self, device, shots, tol):
"""Test that identity expectation value (i.e. the trace) is 1"""
theta = 0.432
phi = 0.123
dev = device(2)
with mimic_execution_for_expval(dev):
dev.apply(
[qml.RX(theta, wires=[0]), qml.RX(phi, wires=[1]), qml.CNOT(wires=[0, 1]),]
)
O = qml.Identity
name = "Identity"
dev._obs_queue = [O(wires=[0], do_queue=False), O(wires=[1], do_queue=False)]
res = np.array(
[dev.expval(O(wires=[0], do_queue=False)), dev.expval(O(wires=[1], do_queue=False)),]
)
assert np.allclose(res, np.array([1, 1]), **tol)
def test_pauliz_expectation(self, device, shots, tol):
"""Test that PauliZ expectation value is correct"""
theta = 0.432
phi = 0.123
dev = device(2)
with mimic_execution_for_expval(dev):
dev.apply(
[qml.RX(theta, wires=[0]), qml.RX(phi, wires=[1]), qml.CNOT(wires=[0, 1]),]
)
O = qml.PauliZ
name = "PauliZ"
dev._obs_queue = [O(wires=[0], do_queue=False), O(wires=[1], do_queue=False)]
res = np.array(
[dev.expval(O(wires=[0], do_queue=False)), dev.expval(O(wires=[1], do_queue=False)),]
)
assert np.allclose(res, np.array([np.cos(theta), np.cos(theta) * np.cos(phi)]), **tol)
def test_paulix_expectation(self, device, shots, tol):
"""Test that PauliX expectation value is correct"""
theta = 0.432
phi = 0.123
dev = device(2)
O = qml.PauliX
with mimic_execution_for_expval(dev):
dev.apply(
[qml.RY(theta, wires=[0]), qml.RY(phi, wires=[1]), qml.CNOT(wires=[0, 1]),],
rotations=O(wires=[0], do_queue=False).diagonalizing_gates()
+ O(wires=[1], do_queue=False).diagonalizing_gates(),
)
dev._obs_queue = [O(wires=[0], do_queue=False), O(wires=[1], do_queue=False)]
res = np.array(
[dev.expval(O(wires=[0], do_queue=False)), dev.expval(O(wires=[1], do_queue=False)),]
)
assert np.allclose(res, np.array([np.sin(theta) * np.sin(phi), np.sin(phi)]), **tol)
def test_pauliy_expectation(self, device, shots, tol):
"""Test that PauliY expectation value is correct"""
theta = 0.432
phi = 0.123
dev = device(2)
O = qml.PauliY
with mimic_execution_for_expval(dev):
dev.apply(
[qml.RX(theta, wires=[0]), qml.RX(phi, wires=[1]), qml.CNOT(wires=[0, 1]),],
rotations=O(wires=[0], do_queue=False).diagonalizing_gates()
+ O(wires=[1], do_queue=False).diagonalizing_gates(),
)
dev._obs_queue = [O(wires=[0], do_queue=False), O(wires=[1], do_queue=False)]
res = np.array(
[dev.expval(O(wires=[0], do_queue=False)), dev.expval(O(wires=[1], do_queue=False)),]
)
assert np.allclose(res, np.array([0, -(np.cos(theta)) * np.sin(phi)]), **tol)
def test_hadamard_expectation(self, device, shots, tol):
"""Test that Hadamard expectation value is correct"""
theta = 0.432
phi = 0.123
dev = device(2)
O = qml.Hadamard
with mimic_execution_for_expval(dev):
dev.apply(
[qml.RY(theta, wires=[0]), qml.RY(phi, wires=[1]), qml.CNOT(wires=[0, 1]),],
rotations=O(wires=[0], do_queue=False).diagonalizing_gates()
+ O(wires=[1], do_queue=False).diagonalizing_gates(),
)
dev._obs_queue = [O(wires=[0], do_queue=False), O(wires=[1], do_queue=False)]
res = np.array(
[dev.expval(O(wires=[0], do_queue=False)), dev.expval(O(wires=[1], do_queue=False)),]
)
expected = np.array(
[
np.sin(theta) * np.sin(phi) + np.cos(theta),
np.cos(theta) * np.cos(phi) + np.sin(phi),
]
) / np.sqrt(2)
assert np.allclose(res, expected, **tol)
def test_hermitian_expectation(self, device, shots, tol):
"""Test that arbitrary Hermitian expectation values are correct"""
theta = 0.432
phi = 0.123
dev = device(2)
O = qml.Hermitian
with mimic_execution_for_expval(dev):
dev.apply(
[qml.RY(theta, wires=[0]), qml.RY(phi, wires=[1]), qml.CNOT(wires=[0, 1]),],
rotations=O(A, wires=[0], do_queue=False).diagonalizing_gates()
+ O(A, wires=[1], do_queue=False).diagonalizing_gates(),
)
dev._obs_queue = [
O(A, wires=[0], do_queue=False),
O(A, wires=[1], do_queue=False),
]
res = np.array(
[
dev.expval(O(A, wires=[0], do_queue=False)),
dev.expval(O(A, wires=[1], do_queue=False)),
]
)
a = A[0, 0]
re_b = A[0, 1].real
d = A[1, 1]
ev1 = ((a - d) * np.cos(theta) + 2 * re_b * np.sin(theta) * np.sin(phi) + a + d) / 2
ev2 = ((a - d) * np.cos(theta) * np.cos(phi) + 2 * re_b * np.sin(phi) + a + d) / 2
expected = np.array([ev1, ev2])
assert np.allclose(res, expected, **tol)
def test_multi_mode_hermitian_expectation(self, device, shots, tol):
"""Test that arbitrary multi-mode Hermitian expectation values are correct"""
theta = 0.432
phi = 0.123
dev = device(2)
O = qml.Hermitian
with mimic_execution_for_expval(dev):
dev.apply(
[qml.RY(theta, wires=[0]), qml.RY(phi, wires=[1]), qml.CNOT(wires=[0, 1]),],
rotations=O(B, wires=[0, 1], do_queue=False).diagonalizing_gates(),
)
dev._obs_queue = [O(B, wires=[0, 1], do_queue=False)]
res = np.array([dev.expval(O(B, wires=[0, 1], do_queue=False))])
# below is the analytic expectation value for this circuit with arbitrary
# Hermitian observable B
expected = 0.5 * (
6 * np.cos(theta) * np.sin(phi)
- np.sin(theta) * (8 * np.sin(phi) + 7 * np.cos(phi) + 3)
- 2 * np.sin(phi)
- 6 * np.cos(phi)
- 6
)
assert np.allclose(res, expected, **tol)
@pytest.mark.parametrize("shots", [None, 8192])
class TestTensorExpval:
"""Test tensor expectation values"""
def test_paulix_pauliy(self, device, shots, tol):
"""Test that a tensor product involving PauliX and PauliY works correctly"""
theta = 0.432
phi = 0.123
varphi = -0.543
dev = device(3)
obs = qml.PauliX(wires=[0], do_queue=False) @ qml.PauliY(wires=[2], do_queue=False)
with mimic_execution_for_expval(dev):
dev.apply(
[
qml.RX(theta, wires=[0]),
qml.RX(phi, wires=[1]),
qml.RX(varphi, wires=[2]),
qml.CNOT(wires=[0, 1]),
qml.CNOT(wires=[1, 2]),
],
rotations=obs.diagonalizing_gates(),
)
res = dev.expval(obs)
expected = np.sin(theta) * np.sin(phi) * np.sin(varphi)
assert np.allclose(res, expected, **tol)
def test_pauliz_hadamard(self, device, shots, tol):
"""Test that a tensor product involving PauliZ and PauliY and hadamard works correctly"""
theta = 0.432
phi = 0.123
varphi = -0.543
dev = device(3)
obs = (
qml.PauliZ(wires=[0], do_queue=False)
@ qml.Hadamard(wires=[1], do_queue=False)
@ qml.PauliY(wires=[2], do_queue=False)
)
with mimic_execution_for_expval(dev):
dev.apply(
[
qml.RX(theta, wires=[0]),
qml.RX(phi, wires=[1]),
qml.RX(varphi, wires=[2]),
qml.CNOT(wires=[0, 1]),
qml.CNOT(wires=[1, 2]),
],
rotations=obs.diagonalizing_gates(),
)
res = dev.expval(obs)
expected = -(np.cos(varphi) * np.sin(phi) + np.sin(varphi) * np.cos(theta)) / np.sqrt(2)
assert np.allclose(res, expected, **tol)
def test_hermitian(self, device, shots, tol):
"""Test that a tensor product involving qml.Hermitian works correctly"""
theta = 0.432
phi = 0.123
varphi = -0.543
dev = device(3)
A = np.array(
[
[-6, 2 + 1j, -3, -5 + 2j],
[2 - 1j, 0, 2 - 1j, -5 + 4j],
[-3, 2 + 1j, 0, -4 + 3j],
[-5 - 2j, -5 - 4j, -4 - 3j, -6],
]
)
obs = qml.PauliZ(wires=[0], do_queue=False) @ qml.Hermitian(A, wires=[1, 2], do_queue=False)
with mimic_execution_for_expval(dev):
dev.apply(
[
qml.RX(theta, wires=[0]),
qml.RX(phi, wires=[1]),
qml.RX(varphi, wires=[2]),
qml.CNOT(wires=[0, 1]),
qml.CNOT(wires=[1, 2]),
],
rotations=obs.diagonalizing_gates(),
)
res = dev.expval(obs)
expected = 0.5 * (
-6 * np.cos(theta) * (np.cos(varphi) + 1)
- 2 * np.sin(varphi) * (np.cos(theta) + np.sin(phi) - 2 * np.cos(phi))
+ 3 * np.cos(varphi) * np.sin(phi)
+ np.sin(phi)
)
assert np.allclose(res, expected, **tol)
| 32.896657 | 100 | 0.530537 | [
"Apache-2.0"
] | wongwsvincent/pennylane-cirq | tests/test_expval.py | 10,823 | Python |
##
# @filename : epd4in2b.py
# @brief : Implements for Dual-color e-paper library
# @author : Yehui from Waveshare
#
# Copyright (C) Waveshare August 15 2017
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documnetation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS OR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
from . import epdif
from PIL import Image
import RPi.GPIO as GPIO
# Display resolution
EPD_WIDTH = 400
EPD_HEIGHT = 300
# EPD4IN2B commands
PANEL_SETTING = 0x00
POWER_SETTING = 0x01
POWER_OFF = 0x02
POWER_OFF_SEQUENCE_SETTING = 0x03
POWER_ON = 0x04
POWER_ON_MEASURE = 0x05
BOOSTER_SOFT_START = 0x06
DEEP_SLEEP = 0x07
DATA_START_TRANSMISSION_1 = 0x10
DATA_STOP = 0x11
DISPLAY_REFRESH = 0x12
DATA_START_TRANSMISSION_2 = 0x13
VCOM_LUT = 0x20
W2W_LUT = 0x21
B2W_LUT = 0x22
W2B_LUT = 0x23
B2B_LUT = 0x24
PLL_CONTROL = 0x30
TEMPERATURE_SENSOR_CALIBRATION = 0x40
TEMPERATURE_SENSOR_SELECTION = 0x41
TEMPERATURE_SENSOR_WRITE = 0x42
TEMPERATURE_SENSOR_READ = 0x43
VCOM_AND_DATA_INTERVAL_SETTING = 0x50
LOW_POWER_DETECTION = 0x51
TCON_SETTING = 0x60
RESOLUTION_SETTING = 0x61
GSST_SETTING = 0x65
GET_STATUS = 0x71
AUTO_MEASURE_VCOM = 0x80
VCOM_VALUE = 0x81
VCM_DC_SETTING = 0x82
PARTIAL_WINDOW = 0x90
PARTIAL_IN = 0x91
PARTIAL_OUT = 0x92
PROGRAM_MODE = 0xA0
ACTIVE_PROGRAM = 0xA1
READ_OTP_DATA = 0xA2
POWER_SAVING = 0xE3
class EPD:
def __init__(self):
self.reset_pin = epdif.RST_PIN
self.dc_pin = epdif.DC_PIN
self.busy_pin = epdif.BUSY_PIN
self.width = EPD_WIDTH
self.height = EPD_HEIGHT
def digital_write(self, pin, value):
epdif.epd_digital_write(pin, value)
def digital_read(self, pin):
return epdif.epd_digital_read(pin)
def delay_ms(self, delaytime):
epdif.epd_delay_ms(delaytime)
def send_command(self, command):
self.digital_write(self.dc_pin, GPIO.LOW)
# the parameter type is list but not int
# so use [command] instead of command
epdif.spi_transfer([command])
def send_data(self, data):
self.digital_write(self.dc_pin, GPIO.HIGH)
# the parameter type is list but not int
# so use [data] instead of data
epdif.spi_transfer([data])
def init(self):
if (epdif.epd_init() != 0):
return -1
self.reset()
self.send_command(BOOSTER_SOFT_START)
self.send_data (0x17)
self.send_data (0x17)
self.send_data (0x17) # 07 0f 17 1f 27 2F 37 2f
self.send_command(POWER_ON)
self.wait_until_idle()
self.send_command(PANEL_SETTING)
self.send_data(0x0F) # LUT from OTP
def wait_until_idle(self):
while(self.digital_read(self.busy_pin) == 0): # 0: busy, 1: idle
self.delay_ms(100)
def reset(self):
self.digital_write(self.reset_pin, GPIO.LOW) # module reset
self.delay_ms(200)
self.digital_write(self.reset_pin, GPIO.HIGH)
self.delay_ms(200)
def get_frame_buffer(self, image):
buf = [0xFF] * int(self.width * self.height / 8)
# Set buffer to value of Python Imaging Library image.
# Image must be in mode 1.
image_monocolor = image.convert('1')
imwidth, imheight = image_monocolor.size
if imwidth != self.width or imheight != self.height:
raise ValueError('Image must be same dimensions as display \
({0}x{1}).' .format(self.width, self.height))
pixels = image_monocolor.load()
for y in range(self.height):
for x in range(self.width):
# Set the bits for the column of pixels at the current position.
if pixels[x, y] == 0:
buf[int((x + y * self.width) / 8)] &= ~(0x80 >> (x % 8))
return buf
def display_frame(self, frame_buffer_black, frame_buffer_red):
if (frame_buffer_black != None):
self.send_command(DATA_START_TRANSMISSION_1)
self.delay_ms(2)
for i in range(0, int(self.width * self.height / 8)):
self.send_data(frame_buffer_black[i])
self.delay_ms(2)
if (frame_buffer_red != None):
self.send_command(DATA_START_TRANSMISSION_2)
self.delay_ms(2)
for i in range(0, int(self.width * self.height / 8)):
self.send_data(frame_buffer_red[i])
self.delay_ms(2)
self.send_command(DISPLAY_REFRESH)
self.wait_until_idle()
# after this, call epd.init() to awaken the module
def sleep(self):
self.send_command(VCOM_AND_DATA_INTERVAL_SETTING)
self.send_data(0xF7) # border floating
self.send_command(POWER_OFF)
self.wait_until_idle()
self.send_command(DEEP_SLEEP)
self.send_data(0xA5) # check code
### END OF FILE ###
| 35.75 | 81 | 0.543357 | [
"MIT"
] | Richard-Kirby/sema_clock | display/epd4in2b.py | 7,150 | Python |
# -*- coding: utf-8 -*-
"""This module is a stub for classes related to vulnerability exposure scores.
Copyright:
(c) 2022 Illumio
License:
Apache2, see LICENSE for more details.
"""
from dataclasses import dataclass
from illumio.util import MutableObject
@dataclass
class Vulnerability(MutableObject):
score: int = None
| 17.842105 | 78 | 0.731563 | [
"Apache-2.0"
] | dsommerville-illumio/illumio-py | illumio/vulnerabilities/vulnerability.py | 339 | Python |
import fileinput
from itertools import permutations
SEQ = [int(x) for x in fileinput.input()]
LEN = 25
for i in range(LEN, len(SEQ)):
for x, y in permutations(SEQ[i-LEN:i], 2):
if x + y == SEQ[i]:
break
else:
INVALID = SEQ[i]
print "Part 1:", INVALID
break
for n in range(2, len(SEQ)):
tot = 0
for i in range(len(SEQ)-n):
tot = sum(SEQ[i:i+n])
if tot == INVALID:
print "Part 2:", min(SEQ[i:i+n]) + max(SEQ[i:i+n])
| 22 | 62 | 0.529644 | [
"MIT"
] | iKevinY/advent | 2020/day09.py | 506 | Python |
import logging
import os
from pathlib import Path
import typing
from logging.handlers import RotatingFileHandler
from dotenv import load_dotenv
import services.pv_simulator.constants as constants
from services.pv_simulator.main_loop import MainLoop
from services.pv_simulator.mq_receiver import MQReceiver, MQReceiverFactory
from services.pv_simulator.pv_power_value_calculator import PVPowerValueCalculator
from services.pv_simulator.typing_custom_protocols import (
MQReceiverProtocol,
PVPowerValueCalculatorProtocol
)
from services.pv_simulator import utils
current_dir_path: Path = Path(__file__).parent.absolute()
load_dotenv(dotenv_path=f"{current_dir_path}/.env")
def get_test_modules_names() -> typing.List[str]:
from services.pv_simulator.tests.unit import constants_for_tests
return constants_for_tests.TESTS_MODULES
def get_mq_receiver(callback: typing.Callable[[], bool]) -> MQReceiverProtocol:
return MQReceiverFactory.get_mq_receiver(constants.MQ_RECEIVER_TYPE, check_if_must_exit=callback)
def get_pv_power_value_calculator() -> PVPowerValueCalculatorProtocol:
return PVPowerValueCalculator(constants.MINUTES_DATA_SET, constants.PV_POWER_VALUES_DATA_SET)
def main(sys_argv: typing.List[str]) -> None:
"""PV simulator execution entry point.
Parameters
----------
sys_argv : list
contains the list of arguments passed to the CLI during its execution. The first argument contains the
executed script name.
"""
main_logger: typing.Optional[logging.Logger] = None
try:
must_exit_after_24h = os.getenv("MUST_EXIT_AFTER_24H", "0")
must_exit_after_24h = \
True if must_exit_after_24h.isdecimal() and int(must_exit_after_24h) == 1 else False
main_logger = utils.initialize_loggers(current_dir_path)
main_loop: MainLoop = MainLoop(constants.LOGGER_NAME,
constants.RESULTS_LOGGER_NAME,
current_dir_path,
must_exit_after_24h,
get_mq_receiver,
get_pv_power_value_calculator,
tests_modules_names_provider=get_test_modules_names)
main_loop.handle_arguments(sys_argv)
except KeyboardInterrupt:
if main_logger is not None:
main_logger.exception("Required to abort:")
else:
import traceback
traceback.print_exc()
except Exception:
if main_logger is not None:
main_logger.exception("Error:")
else:
import traceback
traceback.print_exc()
| 36.675676 | 110 | 0.695652 | [
"MIT"
] | reynierg/pv_simulator_challenge | services/pv_simulator/main.py | 2,714 | Python |
from .tools import pair, check_sizes
from .dcn_v2 import deform_conv2d_jt
from .init import trunc_normal_ | 35 | 36 | 0.847619 | [
"MIT"
] | liuruiyang98/Jittor-MLP | models_jittor/utils/__init__.py | 105 | Python |
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import aerospike
from aerospike import exception as e
try:
from aerospike_helpers.operations import map_operations as mh
except:
pass # Needs Aerospike client >= 3.4.0
import datetime
import pprint
import random
import sys
import time
argparser = argparse.ArgumentParser(add_help=False)
argparser.add_argument(
"--help", dest="help", action="store_true", help="Displays this message."
)
argparser.add_argument(
"-U",
"--username",
dest="username",
metavar="<USERNAME>",
help="Username to connect to database.",
)
argparser.add_argument(
"-P",
"--password",
dest="password",
metavar="<PASSWORD>",
help="Password to connect to database.",
)
argparser.add_argument(
"-h",
"--host",
dest="host",
default="127.0.0.1",
metavar="<ADDRESS>",
help="Address of Aerospike server.",
)
argparser.add_argument(
"-p",
"--port",
dest="port",
type=int,
default=3000,
metavar="<PORT>",
help="Port of the Aerospike server.",
)
argparser.add_argument(
"-n",
"--namespace",
dest="namespace",
default="test",
metavar="<NS>",
help="Port of the Aerospike server.",
)
argparser.add_argument(
"-s",
"--set",
dest="set",
default="profiles",
metavar="<SET>",
help="Port of the Aerospike server.",
)
argparser.add_argument(
"-i",
"--interactive",
dest="interactive",
action="store_true",
help="Interactive Mode",
)
options = argparser.parse_args()
if options.help:
argparser.print_help()
print()
sys.exit(1)
def version_tuple(version):
return tuple(int(i) for i in version.split("."))
def pause():
input("Hit return to continue")
if options.namespace and options.namespace != "None":
namespace = options.namespace
else:
namespace = None
set = options.set if options.set and options.set != "None" else None
config = {"hosts": [(options.host, options.port)]}
try:
client = aerospike.client(config).connect(options.username, options.password)
policy = {"key": aerospike.POLICY_KEY_SEND}
except e.ClientError as e:
if not options.quiet:
print("Error: {0} [{1}]".format(e.msg, e.code))
sys.exit(2)
version = client.info_all("version")
release = list(version.values())[0][1].split(" ")[-1]
if version_tuple(aerospike.__version__) < version_tuple("3.4.0") or version_tuple(
release
) < version_tuple("4.6"):
print(
"\nPlease use Python client >= 3.4.0, ",
"Aerospike database >= 4.6 for this example.",
)
sys.exit(3)
pp = pprint.PrettyPrinter(indent=2)
spacer = "=" * 30
epoch = datetime.datetime(2019, 1, 1)
now = datetime.datetime.now()
try:
# Find all segments whose TTL is before this hour
key = (namespace, set, "u3")
current_hour = int((now - epoch).total_seconds() / 3600)
print("\nCurrent hour is {} hours since epoch".format(current_hour))
if options.interactive:
pause()
ops = [
mh.map_get_by_value_range(
"u",
[0, aerospike.null()],
[current_hour - 1, aerospike.null()],
aerospike.MAP_RETURN_KEY,
False,
),
mh.map_size("u"),
]
_, _, b = client.operate_ordered(key, ops)
stale_segments, total_segments = b
print("This user has a total of {} segments".format(total_segments[1]))
print(
"Of those, a total of {} segments should be cleaned".format(
len(stale_segments[1])
)
)
print("Show all segments with a segment TTL before the current hour:")
print(stale_segments)
print(spacer)
# Clean up the stale segments using a background scan with a transaction
# attached to it
print("Clean the stale segments from the entire namespace")
if options.interactive:
pause()
ops = [
mh.map_remove_by_value_range(
"u",
[0, aerospike.null()],
[current_hour - 1, aerospike.null()],
aerospike.MAP_RETURN_NONE,
False,
)
]
# _, _, _ = client.operate_ordered(key, ops)
scan = client.scan(namespace, set)
scan.add_ops(ops)
job_id = scan.execute_background()
# wait for job to finish
while True:
response = client.job_info(job_id, aerospike.JOB_SCAN)
if response["status"] != aerospike.JOB_STATUS_INPROGRESS:
break
time.sleep(0.25)
ops = [
mh.map_get_by_value_range(
"u",
[0, aerospike.null()],
[current_hour - 1, aerospike.null()],
aerospike.MAP_RETURN_KEY,
False,
),
mh.map_size("u"),
]
_, _, b = client.operate_ordered(key, ops)
stale_segments, total_segments = b
print("This user now has a total of {} segments".format(total_segments[1]))
print(
"Of those, a total of {} segments should be cleaned".format(
len(stale_segments[1])
)
)
print("Show all segments with a segment TTL before the current hour:")
print(stale_segments)
print(spacer)
except Exception as e:
print("Error: {0} [{1}]".format(e.msg, e.code))
client.close()
| 26.185 | 82 | 0.618675 | [
"MIT"
] | aerospike-examples/modeling-user-segmentation | trim_segments.py | 5,237 | Python |
import copy
from enum import Enum
from jinja2 import Template
from typing import List
from dispatch.conversation.enums import ConversationButtonActions
from dispatch.incident.enums import IncidentStatus
from .config import (
DISPATCH_UI_URL,
INCIDENT_RESOURCE_CONVERSATION_REFERENCE_DOCUMENT,
INCIDENT_RESOURCE_EXECUTIVE_REPORT_DOCUMENT,
INCIDENT_RESOURCE_INCIDENT_FAQ_DOCUMENT,
INCIDENT_RESOURCE_INCIDENT_REVIEW_DOCUMENT,
INCIDENT_RESOURCE_INVESTIGATION_DOCUMENT,
INCIDENT_RESOURCE_INVESTIGATION_SHEET,
)
class MessageType(str, Enum):
incident_daily_summary = "incident-daily-summary"
incident_daily_summary_no_incidents = "incident-daily-summary-no-incidents"
incident_executive_report = "incident-executive-report"
incident_notification = "incident-notification"
incident_participant_welcome = "incident-participant-welcome"
incident_resources_message = "incident-resources-message"
incident_tactical_report = "incident-tactical-report"
incident_task_list = "incident-task-list"
incident_task_reminder = "incident-task-reminder"
incident_status_reminder = "incident-status-reminder"
incident_participant_suggested_reading = "incident-participant-suggested-reading"
INCIDENT_STATUS_DESCRIPTIONS = {
IncidentStatus.active: "This incident is under active investigation.",
IncidentStatus.stable: "This incident is stable, the bulk of the investigation has been completed or most of the risk has been mitigated.",
IncidentStatus.closed: "This no longer requires additional involvement, long term incident action items have been assigned to their respective owners.",
}
INCIDENT_TASK_REMINDER_DESCRIPTION = """
You are assigned to the following incident tasks.
This is a reminder that these tasks have passed their due date.
Please review and update them as appropriate. Resolving them will stop the reminders.""".replace(
"\n", " "
).strip()
INCIDENT_TASK_LIST_DESCRIPTION = """The following are open incident tasks."""
INCIDENT_DAILY_SUMMARY_DESCRIPTION = """
Daily Incidents Summary""".replace(
"\n", " "
).strip()
INCIDENT_DAILY_SUMMARY_ACTIVE_INCIDENTS_DESCRIPTION = f"""
Active Incidents (<{DISPATCH_UI_URL}/incidents/status|Details>)""".replace(
"\n", " "
).strip()
INCIDENT_DAILY_SUMMARY_NO_ACTIVE_INCIDENTS_DESCRIPTION = """
There are no active incidents at this moment.""".replace(
"\n", " "
).strip()
INCIDENT_DAILY_SUMMARY_STABLE_CLOSED_INCIDENTS_DESCRIPTION = """
Stable or Closed Incidents (last 24 hours)""".replace(
"\n", " "
).strip()
INCIDENT_DAILY_SUMMARY_NO_STABLE_CLOSED_INCIDENTS_DESCRIPTION = """
There are no stable or closed incidents in the last 24 hours.""".replace(
"\n", " "
).strip()
INCIDENT_COMMANDER_DESCRIPTION = """
The Incident Commander (IC) is responsible for
knowing the full context of the incident.
Contact them about any questions or concerns.""".replace(
"\n", " "
).strip()
INCIDENT_COMMANDER_READDED_DESCRIPTION = """
{{ commander_fullname }} (Incident Commander) has been re-added to the conversation.
Please, handoff the Incident Commander role before leaving the conversation.""".replace(
"\n", " "
).strip()
INCIDENT_TICKET_DESCRIPTION = """
Ticket for tracking purposes. It contains a description of
the incident and links to resources.""".replace(
"\n", " "
).strip()
INCIDENT_CONVERSATION_DESCRIPTION = """
Private conversation for real-time discussion. All incident participants get added to it.
""".replace(
"\n", " "
).strip()
INCIDENT_CONVERSATION_REFERENCE_DOCUMENT_DESCRIPTION = """
Document containing the list of slash commands available to the Incident Commander (IC)
and participants in the incident conversation.""".replace(
"\n", " "
).strip()
INCIDENT_CONFERENCE_DESCRIPTION = """
Video conference and phone bridge to be used throughout the incident. Password: {{conference_challenge if conference_challenge else 'N/A'}}
""".replace(
"\n", ""
).strip()
INCIDENT_STORAGE_DESCRIPTION = """
Common storage for all incident artifacts and
documents. Add logs, screen captures, or any other data collected during the
investigation to this drive. It is shared with all incident participants.""".replace(
"\n", " "
).strip()
INCIDENT_INVESTIGATION_DOCUMENT_DESCRIPTION = """
This is a document for all incident facts and context. All
incident participants are expected to contribute to this document.
It is shared with all incident participants.""".replace(
"\n", " "
).strip()
INCIDENT_INVESTIGATION_SHEET_DESCRIPTION = """
This is a sheet for tracking impacted assets. All
incident participants are expected to contribute to this sheet.
It is shared with all incident participants.""".replace(
"\n", " "
).strip()
INCIDENT_FAQ_DOCUMENT_DESCRIPTION = """
First time responding to an information security incident? This
document answers common questions encountered when
helping us respond to an incident.""".replace(
"\n", " "
).strip()
INCIDENT_REVIEW_DOCUMENT_DESCRIPTION = """
This document will capture all lessons learned, questions, and action items raised during the incident.""".replace(
"\n", " "
).strip()
INCIDENT_EXECUTIVE_REPORT_DOCUMENT_DESCRIPTION = """
This is a document that contains an executive report about the incident.""".replace(
"\n", " "
).strip()
INCIDENT_DOCUMENT_DESCRIPTIONS = {
INCIDENT_RESOURCE_CONVERSATION_REFERENCE_DOCUMENT: INCIDENT_CONVERSATION_REFERENCE_DOCUMENT_DESCRIPTION,
INCIDENT_RESOURCE_EXECUTIVE_REPORT_DOCUMENT: INCIDENT_EXECUTIVE_REPORT_DOCUMENT_DESCRIPTION,
INCIDENT_RESOURCE_INCIDENT_FAQ_DOCUMENT: INCIDENT_FAQ_DOCUMENT_DESCRIPTION,
INCIDENT_RESOURCE_INCIDENT_REVIEW_DOCUMENT: INCIDENT_REVIEW_DOCUMENT_DESCRIPTION,
INCIDENT_RESOURCE_INVESTIGATION_DOCUMENT: INCIDENT_INVESTIGATION_DOCUMENT_DESCRIPTION,
INCIDENT_RESOURCE_INVESTIGATION_SHEET: INCIDENT_INVESTIGATION_SHEET_DESCRIPTION,
}
INCIDENT_PARTICIPANT_WELCOME_DESCRIPTION = """
You\'re being contacted because we think you may
be able to help us during this information security incident.
Please review the content below and join us in the
incident Slack channel.""".replace(
"\n", " "
).strip()
INCIDENT_WELCOME_CONVERSATION_COPY = """
This is the incident conversation. Please pull in any
individuals you feel may be able to help resolve this incident.""".replace(
"\n", " "
).strip()
INCIDENT_PARTICIPANT_SUGGESTED_READING_DESCRIPTION = """
Dispatch thinks the following documents might be
relevant to this incident.""".replace(
"\n", " "
).strip()
INCIDENT_NOTIFICATION_PURPOSES_FYI = """
This message is for notification purposes only.""".replace(
"\n", " "
).strip()
INCIDENT_CAN_REPORT_REMINDER = """
It's time to send a new CAN report. Go to the Demisto UI and run the
CAN Report playbook from the Playground Work Plan.""".replace(
"\n", " "
).strip()
INCIDENT_VULNERABILITY_DESCRIPTION = """
We are tracking the details of the vulnerability that led to this incident
in the VUL Jira issue linked above.""".replace(
"\n", " "
).strip()
INCIDENT_STABLE_DESCRIPTION = """
The risk has been contained and the incident marked as stable.""".replace(
"\n", " "
).strip()
INCIDENT_CLOSED_DESCRIPTION = """
The incident has been resolved and marked as closed.""".replace(
"\n", " "
).strip()
INCIDENT_TACTICAL_REPORT_DESCRIPTION = """
The following conditions, actions, and needs summarize the current status of the incident.""".replace(
"\n", " "
).strip()
INCIDENT_NEW_ROLE_DESCRIPTION = """
{{assigner_fullname}} has assigned the role of {{assignee_role}} to {{assignee_fullname}}.
Please, contact {{assignee_firstname}} about any questions or concerns.""".replace(
"\n", " "
).strip()
INCIDENT_REPORT_REMINDER_DESCRIPTION = """You have not provided a {{report_type}} for this incident recently.
You can use `{{command}}` in the conversation to assist you in writing one.""".replace(
"\n", " "
).strip()
INCIDENT_STATUS_REMINDER_DESCRIPTION = """You have not updated the status for this incident recently. If the incident has been resolved,
you can use `{{command}}` in the conversation to assist you in closing your incident.""".replace(
"\n", " "
).strip()
INCIDENT_TASK_NEW_DESCRIPTION = """
The following incident task has been created in the incident document.\n\n*Description:* {{task_description}}\n\n*Assignees:* {{task_assignees|join(',')}}"""
INCIDENT_TASK_RESOLVED_DESCRIPTION = """
The following incident task has been resolved in the incident document.\n\n*Description:* {{task_description}}\n\n*Assignees:* {{task_assignees|join(',')}}"""
INCIDENT_WORKFLOW_CREATED_DESCRIPTION = """
A new workflow instance has been created.
\n\n *Creator:* {{instance_creator_name}}
"""
INCIDENT_WORKFLOW_UPDATE_DESCRIPTION = """
This workflow's status has changed from *{{ instance_status_old }}* to *{{ instance_status_new }}*.
\n\n*Workflow Description*: {{workflow_description}}
\n\n *Creator:* {{instance_creator_name}}
"""
INCIDENT_WORKFLOW_COMPLETE_DESCRIPTION = """
This workflow's status has changed from *{{ instance_status_old }}* to *{{ instance_status_new }}*.
\n\n *Workflow Description:* {{workflow_description}}
\n\n *Creator:* {{instance_creator_name}}
{% if instance_artifacts %}
\n\n *Workflow Artifacts:*
\n\n {% for a in instance_artifacts %}- <{{a.weblink}}|{{a.name}}> \n\n{% endfor %}
{% endif %}
"""
INCIDENT_TYPE_CHANGE_DESCRIPTION = """
The incident type has been changed from *{{ incident_type_old }}* to *{{ incident_type_new }}*."""
INCIDENT_STATUS_CHANGE_DESCRIPTION = """
The incident status has been changed from *{{ incident_status_old }}* to *{{ incident_status_new }}*."""
INCIDENT_PRIORITY_CHANGE_DESCRIPTION = """
The incident priority has been changed from *{{ incident_priority_old }}* to *{{ incident_priority_new }}*."""
INCIDENT_NAME_WITH_ENGAGEMENT = {
"title": "{{name}} Incident Notification",
"title_link": "{{ticket_weblink}}",
"text": INCIDENT_NOTIFICATION_PURPOSES_FYI,
"button_text": "Join Incident",
"button_value": "{{incident_id}}",
"button_action": ConversationButtonActions.invite_user,
}
INCIDENT_NAME = {
"title": "{{name}} Incident Notification",
"title_link": "{{ticket_weblink}}",
"text": INCIDENT_NOTIFICATION_PURPOSES_FYI,
}
INCIDENT_TITLE = {"title": "Incident Title", "text": "{{title}}"}
INCIDENT_DESCRIPTION = {"title": "Incident Description", "text": "{{description}}"}
INCIDENT_STATUS = {
"title": "Incident Status - {{status}}",
"status_mapping": INCIDENT_STATUS_DESCRIPTIONS,
}
INCIDENT_TYPE = {"title": "Incident Type - {{type}}", "text": "{{type_description}}"}
INCIDENT_PRIORITY = {
"title": "Incident Priority - {{priority}}",
"text": "{{priority_description}}",
}
INCIDENT_PRIORITY_FYI = {
"title": "Incident Priority - {{priority}}",
"text": "{{priority_description}}",
}
INCIDENT_COMMANDER = {
"title": "Incident Commander - {{commander_fullname}}",
"title_link": "{{commander_weblink}}",
"text": INCIDENT_COMMANDER_DESCRIPTION,
}
INCIDENT_CONFERENCE = {
"title": "Incident Conference",
"title_link": "{{conference_weblink}}",
"text": INCIDENT_CONFERENCE_DESCRIPTION,
}
INCIDENT_STORAGE = {
"title": "Incident Storage",
"title_link": "{{storage_weblink}}",
"text": INCIDENT_STORAGE_DESCRIPTION,
}
INCIDENT_CONVERSATION_COMMANDS_REFERENCE_DOCUMENT = {
"title": "Incident Conversation Commands Reference Document",
"title_link": "{{conversation_commands_reference_document_weblink}}",
"text": INCIDENT_CONVERSATION_REFERENCE_DOCUMENT_DESCRIPTION,
}
INCIDENT_INVESTIGATION_DOCUMENT = {
"title": "Incident Investigation Document",
"title_link": "{{document_weblink}}",
"text": INCIDENT_INVESTIGATION_DOCUMENT_DESCRIPTION,
}
INCIDENT_INVESTIGATION_SHEET = {
"title": "Incident Investigation Sheet",
"title_link": "{{sheet_weblink}}",
"text": INCIDENT_INVESTIGATION_SHEET_DESCRIPTION,
}
INCIDENT_REVIEW_DOCUMENT = {
"title": "Incident Review Document",
"title_link": "{{review_document_weblink}}",
"text": INCIDENT_REVIEW_DOCUMENT_DESCRIPTION,
}
INCIDENT_FAQ_DOCUMENT = {
"title": "Incident FAQ Document",
"title_link": "{{faq_weblink}}",
"text": INCIDENT_FAQ_DOCUMENT_DESCRIPTION,
}
INCIDENT_TYPE_CHANGE = {"title": "Incident Type Change", "text": INCIDENT_TYPE_CHANGE_DESCRIPTION}
INCIDENT_STATUS_CHANGE = {
"title": "Incident Status Change",
"text": INCIDENT_STATUS_CHANGE_DESCRIPTION,
}
INCIDENT_PRIORITY_CHANGE = {
"title": "Incident Priority Change",
"text": INCIDENT_PRIORITY_CHANGE_DESCRIPTION,
}
INCIDENT_PARTICIPANT_SUGGESTED_READING_ITEM = {
"title": "{{name}}",
"title_link": "{{weblink}}",
"text": "{{description}}",
}
INCIDENT_PARTICIPANT_WELCOME = {
"title": "Welcome to {{name}}",
"title_link": "{{ticket_weblink}}",
"text": INCIDENT_PARTICIPANT_WELCOME_DESCRIPTION,
}
INCIDENT_PARTICIPANT_WELCOME_MESSAGE = [
INCIDENT_PARTICIPANT_WELCOME,
INCIDENT_TITLE,
INCIDENT_STATUS,
INCIDENT_TYPE,
INCIDENT_PRIORITY,
INCIDENT_COMMANDER,
INCIDENT_INVESTIGATION_DOCUMENT,
INCIDENT_STORAGE,
INCIDENT_CONFERENCE,
INCIDENT_CONVERSATION_COMMANDS_REFERENCE_DOCUMENT,
INCIDENT_FAQ_DOCUMENT,
]
INCIDENT_RESOURCES_MESSAGE = [
INCIDENT_COMMANDER,
INCIDENT_INVESTIGATION_DOCUMENT,
INCIDENT_REVIEW_DOCUMENT,
INCIDENT_STORAGE,
INCIDENT_CONFERENCE,
INCIDENT_CONVERSATION_COMMANDS_REFERENCE_DOCUMENT,
INCIDENT_FAQ_DOCUMENT,
]
INCIDENT_NOTIFICATION_COMMON = [INCIDENT_TITLE]
INCIDENT_NOTIFICATION = INCIDENT_NOTIFICATION_COMMON.copy()
INCIDENT_NOTIFICATION.extend(
[INCIDENT_STATUS, INCIDENT_TYPE, INCIDENT_PRIORITY_FYI, INCIDENT_COMMANDER]
)
INCIDENT_TACTICAL_REPORT = [
{"title": "Incident Tactical Report", "text": INCIDENT_TACTICAL_REPORT_DESCRIPTION},
{"title": "Conditions", "text": "{{conditions}}"},
{"title": "Actions", "text": "{{actions}}"},
{"title": "Needs", "text": "{{needs}}"},
]
INCIDENT_EXECUTIVE_REPORT = [
{"title": "Incident Title", "text": "{{title}}"},
{"title": "Current Status", "text": "{{current_status}}"},
{"title": "Overview", "text": "{{overview}}"},
{"title": "Next Steps", "text": "{{next_steps}}"},
]
INCIDENT_REPORT_REMINDER = [
{
"title": "{{name}} Incident - {{report_type}} Reminder",
"title_link": "{{ticket_weblink}}",
"text": INCIDENT_REPORT_REMINDER_DESCRIPTION,
},
INCIDENT_TITLE,
]
INCIDENT_STATUS_REMINDER = [
{
"title": "{{name}} Incident - Status Reminder",
"title_link": "{{ticket_weblink}}",
"text": INCIDENT_STATUS_REMINDER_DESCRIPTION,
},
INCIDENT_TITLE,
INCIDENT_STATUS,
]
INCIDENT_TASK_REMINDER = [
{"title": "Incident - {{ name }}", "text": "{{ title }}"},
{"title": "Creator", "text": "{{ creator }}"},
{"title": "Description", "text": "{{ description }}"},
{"title": "Priority", "text": "{{ priority }}"},
{"title": "Created At", "text": "", "datetime": "{{ created_at}}"},
{"title": "Resolve By", "text": "", "datetime": "{{ resolve_by }}"},
{"title": "Link", "text": "{{ weblink }}"},
]
INCIDENT_NEW_ROLE_NOTIFICATION = [
{
"title": "New {{assignee_role}} - {{assignee_fullname}}",
"title_link": "{{assignee_weblink}}",
"text": INCIDENT_NEW_ROLE_DESCRIPTION,
}
]
INCIDENT_TASK_NEW_NOTIFICATION = [
{
"title": "New Incident Task",
"title_link": "{{task_weblink}}",
"text": INCIDENT_TASK_NEW_DESCRIPTION,
}
]
INCIDENT_TASK_RESOLVED_NOTIFICATION = [
{
"title": "Resolved Incident Task",
"title_link": "{{task_weblink}}",
"text": INCIDENT_TASK_RESOLVED_DESCRIPTION,
}
]
INCIDENT_WORKFLOW_CREATED_NOTIFICATION = [
{
"title": "Workflow Created - {{workflow_name}}",
"text": INCIDENT_WORKFLOW_CREATED_DESCRIPTION,
}
]
INCIDENT_WORKFLOW_UPDATE_NOTIFICATION = [
{
"title": "Workflow Status Change - {{workflow_name}}",
"title_link": "{{instance_weblink}}",
"text": INCIDENT_WORKFLOW_UPDATE_DESCRIPTION,
}
]
INCIDENT_WORKFLOW_COMPLETE_NOTIFICATION = [
{
"title": "Workflow Completed - {{workflow_name}}",
"title_link": "{{instance_weblink}}",
"text": INCIDENT_WORKFLOW_COMPLETE_DESCRIPTION,
}
]
INCIDENT_COMMANDER_READDED_NOTIFICATION = [
{"title": "Incident Commander Re-Added", "text": INCIDENT_COMMANDER_READDED_DESCRIPTION}
]
def render_message_template(message_template: List[dict], **kwargs):
"""Renders the jinja data included in the template itself."""
data = []
new_copy = copy.deepcopy(message_template)
for d in new_copy:
if d.get("status_mapping"):
d["text"] = d["status_mapping"][kwargs["status"]]
if d.get("datetime"):
d["datetime"] = Template(d["datetime"]).render(**kwargs)
d["text"] = Template(d["text"]).render(**kwargs)
d["title"] = Template(d["title"]).render(**kwargs)
if d.get("title_link"):
d["title_link"] = Template(d["title_link"]).render(**kwargs)
if d["title_link"] == "None": # skip blocks with no content
continue
# skip blocks that do not have new links rendered, as no real value was provided
if not d["title_link"]:
continue
if d.get("button_text"):
d["button_text"] = Template(d["button_text"]).render(**kwargs)
if d.get("button_value"):
d["button_value"] = Template(d["button_value"]).render(**kwargs)
data.append(d)
return data
| 33.210526 | 158 | 0.712305 | [
"Apache-2.0"
] | oliverzgy/dispatch | src/dispatch/messaging.py | 17,668 | Python |
import random
import copy
from collections import defaultdict
from collections import deque
from collections import namedtuple
from matplotlib import pyplot as plt
import numpy as np
class Q():
def __init__(self, n_actions, observation_space, bin_size, low_bound=None, high_bound=None, initial_mean=0.0, initial_std=0.0):
self.n_actions = n_actions
self._observation_dimension = 1
for d in observation_space.shape:
self._observation_dimension *= d
self._bin_sizes = bin_size if isinstance(bin_size, list) else [bin_size] * self._observation_dimension
self._dimension_bins = []
for i, low, high in self._low_high_iter(observation_space, low_bound, high_bound):
b_size = self._bin_sizes[i]
bins = self._make_bins(low, high, b_size)
print(bins)
self._dimension_bins.append(bins)
# if we encounter the new observation, we initialize action evaluations
self.table = defaultdict(lambda: initial_std * np.random.randn(self.n_actions) + initial_mean)
@classmethod
def _make_bins(cls, low, high, bin_size):
bins = np.arange(low, high, (float(high) - float(low)) / (bin_size - 2)) # exclude both ends
if min(bins) < 0 and 0 not in bins:
bins = np.sort(np.append(bins, [0])) # 0 centric bins
return bins
@classmethod
def _low_high_iter(cls, observation_space, low_bound, high_bound):
lows = observation_space.low
highs = observation_space.high
for i in range(len(lows)):
low = lows[i]
if low_bound is not None:
_low_bound = low_bound if not isinstance(low_bound, list) else low_bound[i]
low = low if _low_bound is None else max(low, _low_bound)
high = highs[i]
if high_bound is not None:
_high_bound = high_bound if not isinstance(high_bound, list) else high_bound[i]
high = high if _high_bound is None else min(high, _high_bound)
yield i, low, high
def observation_to_state(self, observation):
state = 0
# caution: bin_size over 10 will not work accurately
unit = max(self._bin_sizes)
for d, o in enumerate(observation.flatten()):
state = state + np.digitize(o, self._dimension_bins[d]) * pow(unit, d) # bin_size numeral system
return state
def values(self, observation):
state = self.observation_to_state(observation)
return self.table[state]
class Agent():
def __init__(self, q, epsilon=0.05):
self.q = q
self.epsilon = epsilon
def act(self, observation):
action = -1
if np.random.random() < self.epsilon:
action = np.random.choice(self.q.n_actions)
else:
action = np.argmax(self.q.values(observation))
return action
class Trainer():
def __init__(self, agent, gamma=0.95, learning_rate=0.1, learning_rate_decay=None, epsilon=0.05, epsilon_decay=None, max_step=-1,target=500):
self.agent = agent
self.gamma = gamma
self.learning_rate = learning_rate
self.learning_rate_decay = learning_rate_decay
self.epsilon = epsilon
self.epsilon_decay = epsilon_decay
self.max_step = max_step
def train(self, env, episode_count, render=False):
mean_step_all =[]
mean_q_all=[]
goal_time_all=[]
reward_all=[]
self.agent.epsilon = self.epsilon
values = []
steps = deque(maxlen=100)
lr = self.learning_rate
for i in range(episode_count):
reward_total = 0
goal_time =0
obs = env.reset()
step = 0
done = False
while not done:
if render:
env.render()
action = self.agent.act(obs)
next_obs, reward, done,goal_time= env.step(action)
reward_total+= reward
goal_time += goal_time
state = self.agent.q.observation_to_state(obs)
future = 0 if done else np.max(self.agent.q.values(next_obs))
value = self.agent.q.table[state][action]
self.agent.q.table[state][action] += lr * (reward + self.gamma * future - value)
obs = next_obs
values.append(value)
step += 1
if self.max_step > 0 and step > self.max_step:
done = True
else:
mean = np.mean(values)
steps.append(step)
mean_step = np.mean(steps)
print("Episode {}: {}steps(avg{}). epsilon={:.3f}, lr={:.3f}, mean q value={:.2f}".format(
i, step, mean_step, self.agent.epsilon, lr, mean)
)
mean_step_all.append(mean_step)
mean_q_all.append(mean)
reward_all.append(reward_total)
if mean_step>1000:
render=True
if self.epsilon_decay is not None:
self.agent.epsilon = self.epsilon_decay(self.agent.epsilon, i)
if self.learning_rate_decay is not None:
lr = self.learning_rate_decay(lr, i)
# plot in comparsion
plt.xlabel('Episodes')
plt.ylabel('reward')
# plt.plot(mean_step_all, label='Q-learning', color='blue')
plt.plot(reward_all, label='Q-learning', color='yellow')
plt.plot(goal_time_all, label='Q-learning', color='green')
# plt.legend(['reward', 'Q-learning'], loc='upper right')
plt.title('reward/Episode')
plt.show()
# plot in comparsion
plt.xlabel('Episodes')
plt.ylabel('goal_time')
# plt.plot(mean_step_all, label='Q-learning', color='blue')
plt.plot(goal_time_all, label='Q-learning', color='green')
# plt.legend(['reward', 'Q-learning'], loc='upper right')
plt.title('goal/Episode')
plt.show()
| 36.664706 | 145 | 0.574683 | [
"MIT"
] | cisc474projectgroup/cartpole-q-learning | agent.py | 6,233 | Python |
# coding: utf-8
# /*##########################################################################
#
# Copyright (c) 2015-2018 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
"""
The silx package contains the following main sub-packages:
- silx.gui: Qt widgets for data visualization and data file browsing
- silx.image: Some processing functions for 2D images
- silx.io: Reading and writing data files (HDF5/NeXus, SPEC, ...)
- silx.math: Some processing functions for 1D, 2D, 3D, nD arrays
- silx.opencl: OpenCL-based data processing
- silx.sx: High-level silx functions suited for (I)Python console.
- silx.utils: Miscellaneous convenient functions
See silx documentation: http://www.silx.org/doc/silx/latest/
"""
__authors__ = ["Jérôme Kieffer"]
__license__ = "MIT"
__date__ = "31/08/2018"
import os as _os
import logging as _logging
_logging.getLogger(__name__).addHandler(_logging.NullHandler())
project = _os.path.basename(_os.path.dirname(_os.path.abspath(__file__)))
try:
from ._version import __date__ as date # noqa
from ._version import (
version,
version_info,
hexversion,
strictversion,
dated_version,
) # noqa
except ImportError:
raise RuntimeError(
"Do NOT use %s from its sources: build it and use the built version"
% project
)
| 37.830769 | 79 | 0.697031 | [
"MIT"
] | kif/freesas | freesas/__init__.py | 2,461 | Python |
import pytest
import activeLearning as tP
def test_sayHello():
assert tP.sayHello() == 'Hello World'
assert tP.sayHello('Sankha') == 'Hello Sankha'
assert tP.sayHello(-1) == 'Hello -1'
return
| 23.222222 | 50 | 0.669856 | [
"MIT"
] | sankhaMukherjee/activeLearning | tests/test_activeLearning.py | 209 | Python |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class SubCommand(object):
name = NotImplementedError("Please add 'name' member in your SubCommand")
help = NotImplementedError("Please add 'help' member in your SubCommand")
def addParser(self, parser):
raise NotImplementedError("Please implement 'addParser' method in your SubCommand")
def execute(self):
raise NotImplementedError("Please implement 'execute' method in your SubCommand")
| 35.533333 | 91 | 0.765478 | [
"BSD-3-Clause"
] | Stibbons/python-project-bootstrap | src/ppb/cli/sub_cmd/_sub_command.py | 533 | Python |
def encrypt():
message = raw_input("Enter the message you want to encrypt")
ascii_message = [ord(char)+3 for char in message]
encrypt_message = [ chr(char) for char in ascii_message]
print ''.join(encrypt_message)
def decrypt():
message = raw_input("Enter the message you want to decrypt")
ascii_message = [ord(char)-3 for char in message]
decrypt_message = [ chr(char) for char in ascii_message]
print ''.join(decrypt_message)
flag = True
while flag == True:
choice = raw_input("What do you want to do? \n1. Encrypt a message 2. Decrypt a message \nEnter 'e' or 'd' respectively!")
if choice =='e' or choice=="1":
encrypt()
elif choice == 'd' or choice=="2":
decrypt()
else:
play_again = raw_input("Do you want to try agian or Do you want to exit? (Y/N)")
if play_again == 'Y':
continue
elif play_again == 'N':
break | 35.88 | 124 | 0.653289 | [
"MIT"
] | Ena-Sharma/Meraki_Solution | Debugging-4/cipher2_0.py | 897 | Python |
with frontend.signin():
frontend.page("repositories", expect={ "document_title": testing.expect.document_title(u"Repositories"),
"content_title": testing.expect.paleyellow_title(0, u"Repositories"),
"pageheader_links": testing.expect.pageheader_links("authenticated",
"administrator"),
"script_user": testing.expect.script_user("admin") })
| 81.142857 | 112 | 0.473592 | [
"Apache-2.0"
] | SyuTingSong/critic | testing/tests/001-main/001-empty/002-authenticated/008-repositories.py | 568 | Python |
from jgsnippets.strings.encoding import clean_encoding
from jgsnippets.strings.format import jprint, pprint | 53.5 | 54 | 0.878505 | [
"MIT"
] | jgontrum/jgsnippets | jgsnippets/strings/__init__.py | 107 | Python |
"""Queuing Search Algorithm.
"""
import copy
import numpy as np
import opytimizer.math.random as r
import opytimizer.utils.constant as c
import opytimizer.utils.logging as l
from opytimizer.core import Optimizer
logger = l.get_logger(__name__)
class QSA(Optimizer):
"""A QSA class, inherited from Optimizer.
This is the designed class to define QSA-related
variables and methods.
References:
J. Zhang et al. Queuing search algorithm: A novel metaheuristic algorithm
for solving engineering optimization problems.
Applied Mathematical Modelling (2018).
"""
def __init__(self, params=None):
"""Initialization method.
Args:
params (dict): Contains key-value parameters to the meta-heuristics.
"""
logger.info('Overriding class: Optimizer -> QSA.')
# Overrides its parent class with the receiving params
super(QSA, self).__init__()
# Builds the class
self.build(params)
logger.info('Class overrided.')
def _calculate_queue(self, n_agents, t_1, t_2, t_3):
"""Calculates the number of agents that belongs to each queue.
Args:
n_agents (int): Number of agents.
t_1 (float): Fitness value of first agent in the population.
t_2 (float): Fitness value of second agent in the population.
t_3 (float): Fitness value of third agent in the population.
Returns:
The number of agents in first, second and third queues.
"""
# Checks if potential service time is bigger than `epsilon`
if t_1 > c.EPSILON:
# Calculates the proportion of agents in first, second and third queues
n_1 = (1 / t_1) / ((1 / t_1) + (1 / t_2) + (1 / t_3))
n_2 = (1 / t_2) / ((1 / t_1) + (1 / t_2) + (1 / t_3))
n_3 = (1 / t_3) / ((1 / t_1) + (1 / t_2) + (1 / t_3))
# If the potential service time is smaller than `epsilon`
else:
# Each queue will have 1/3 ratio
n_1 = 1 / 3
n_2 = 1 / 3
n_3 = 1 / 3
# Calculates the number of agents that belongs to each queue
q_1 = int(n_1 * n_agents)
q_2 = int(n_2 * n_agents)
q_3 = int(n_3 * n_agents)
return q_1, q_2, q_3
def _business_one(self, agents, function, beta):
"""Performs the first business phase.
Args:
agents (list): List of agents.
function (Function): A Function object that will be used as the objective function.
beta (float): Range of fluctuation.
"""
# Sorts agents
agents.sort(key=lambda x: x.fit)
# Copies temporary agents to represent `A_1`, `A_2` and `A_3`
A_1, A_2, A_3 = copy.deepcopy(agents[0]), copy.deepcopy(agents[1]), copy.deepcopy(agents[2])
# Calculates the number of agents in each queue
q_1, q_2, _ = self._calculate_queue(len(agents), A_1.fit, A_2.fit, A_3.fit)
# Represents the update patterns by eq. 4 and eq. 5
case = None
# Iterates through all agents
for i, agent in enumerate(agents):
# Creates another temporary agent
a = copy.deepcopy(agent)
# If index is smaller than the number of agents in first queue
if i < q_1:
# If it is the first agent in first queue
if i == 0:
# Defines the case as one
case = 1
# `A` will receive a copy from `A_1`
A = copy.deepcopy(A_1)
# If index is between first and second queues
elif q_1 <= i < q_1 + q_2:
# If index is the first agent in second queue
if i == q_1:
# Defines the case as one
case = 1
# `A` will receive a copy from `A_2`
A = copy.deepcopy(A_2)
# If index is between second and third queues
else:
# If index is the first agent in third queue
if i == q_1 + q_2:
# Defines the case as one
case = 1
# `A` will receive a copy from `A_3`
A = copy.deepcopy(A_3)
# Generates a uniform random number
alpha = r.generate_uniform_random_number(-1, 1)
# Generates an Erlang distribution
E = r.generate_gamma_random_number(1, 0.5, (agent.n_variables, agent.n_dimensions))
# If case is defined as one
if case == 1:
# Generates an Erlang number
e = r.generate_gamma_random_number(1, 0.5, 1)
# Calculates the fluctuation (eq. 6)
F_1 = beta * alpha * (E * np.fabs(A.position - a.position)) + \
e * (A.position - a.position)
# Updates the temporary agent's position (eq. 4)
a.position = A.position + F_1
# Evaluates the agent
a.fit = function(a.position)
# If new fitness is better than current agent's fitness
if a.fit < agent.fit:
# Replaces the current agent's position and fitness
agent.position = copy.deepcopy(a.position)
agent.fit = copy.deepcopy(a.fit)
# Defines the case as one
case = 1
# If new fitness is worse than current agent's fitness
else:
# Defines the case as two
case = 2
# If case is defined as two
else:
# Calculates the fluctuation (eq. 7)
F_2 = beta * alpha * (E * np.fabs(A.position - a.position))
# Updates the temporary agent's position (eq. 5)
a.position += F_2
# Evaluates the agent
a.fit = function(a.position)
# If new fitness is better than current agent's fitness
if a.fit < agent.fit:
# Replaces the current agent's position and fitness
agent.position = copy.deepcopy(a.position)
agent.fit = copy.deepcopy(a.fit)
# Defines the case as two
case = 2
# If new fitness is worse than current agent's fitness
else:
# Defines the case as one
case = 1
def _business_two(self, agents, function):
"""Performs the second business phase.
Args:
agents (list): List of agents.
function (Function): A Function object that will be used as the objective function.
"""
# Sorts agents
agents.sort(key=lambda x: x.fit)
# Copies temporary agents to represent `A_1`, `A_2` and `A_3`
A_1, A_2, A_3 = copy.deepcopy(agents[0]), copy.deepcopy(agents[1]), copy.deepcopy(agents[2])
# Calculates the number of agents in each queue
q_1, q_2, _ = self._calculate_queue(len(agents), A_1.fit, A_2.fit, A_3.fit)
# Calculates the probability of handling the business
pr = [i / len(agents) for i in range(1, len(agents) + 1)]
# Calculates the confusion degree
cv = A_1.fit / (A_2.fit + A_3.fit + c.EPSILON)
# Iterates through all agents
for i, agent in enumerate(agents):
# Creates another temporary agent
a = copy.deepcopy(agent)
# If index is smaller than the number of agents in first queue
if i < q_1:
# `A` will receive a copy from `A_1`
A = copy.deepcopy(A_1)
# If index is between first and second queues
elif q_1 <= i < q_1 + q_2:
# `A` will receive a copy from `A_2`
A = copy.deepcopy(A_2)
# If index is between second and third queues
else:
# `A` will receive a copy from `A_3`
A = copy.deepcopy(A_3)
# Generates a uniform random number
r1 = r.generate_uniform_random_number()
# If random number is smaller than probability of handling the business
if r1 < pr[i]:
# Randomly selects two individuals
A_1, A_2 = np.random.choice(agents, 2, replace=False)
# Generates another uniform random number
r2 = r.generate_uniform_random_number()
# Generates an Erlang number
e = r.generate_gamma_random_number(1, 0.5, 1)
# If random number is smaller than confusion degree
if r2 < cv:
# Calculates the fluctuation (eq. 14)
F_1 = e * (A_1.position - A_2.position)
# Update agent's position (eq. 12)
a.position += F_1
# If random number is bigger than confusion degree
else:
# Calculates the fluctuation (eq. 15)
F_2 = e * (A.position - A_1.position)
# Update agent's position (eq. 13)
a.position += F_2
# Evaluates the agent
a.fit = function(a.position)
# If the new fitness is better than the current agent's fitness
if a.fit < agent.fit:
# Replaces the current agent's position and fitness
agent.position = copy.deepcopy(a.position)
agent.fit = copy.deepcopy(a.fit)
def _business_three(self, agents, function):
"""Performs the third business phase.
Args:
agents (list): List of agents.
function (Function): A Function object that will be used as the objective function.
"""
# Sorts agents
agents.sort(key=lambda x: x.fit)
# Calculates the probability of handling the business
pr = [i / len(agents) for i in range(1, len(agents) + 1)]
# Iterates through all agents
for i, agent in enumerate(agents):
# Creates another temporary agent
a = copy.deepcopy(agent)
# Iterates through all decision variables
for j in range(agent.n_variables):
# Generates a uniform random number
r1 = r.generate_uniform_random_number()
# If random number is smaller than probability of handling the business
if r1 < pr[i]:
# Randomly selects two individuals
A_1, A_2 = np.random.choice(agents, 2, replace=False)
# Generates an Erlang number
e = r.generate_gamma_random_number(1, 0.5, 1)
# Updates temporary agent's position (eq. 17)
a.position[j] = A_1.position[j] + e * (A_2.position[j] - a.position[j])
# Evaluates the agent
a.fit = function(a.position)
# If the new fitness is better than the current agent's fitness
if a.fit < agent.fit:
# Replaces the current agent's position and fitness
agent.position = copy.deepcopy(a.position)
agent.fit = copy.deepcopy(a.fit)
def update(self, space, function, iteration, n_iterations):
"""Wraps Queue Search Algorithm over all agents and variables.
Args:
space (Space): Space containing agents and update-related information.
function (Function): A Function object that will be used as the objective function.
iteration (int): Current iteration.
n_iterations (int): Maximum number of iterations.
"""
# Calculates the range of fluctuation.
beta = np.exp(np.log(1 / (iteration + c.EPSILON)) * np.sqrt(iteration / n_iterations))
# Performs the first business phase
self._business_one(space.agents, function, beta)
# Performs the second business phase
self._business_two(space.agents, function)
# Performs the third business phase
self._business_three(space.agents, function)
| 35.446991 | 100 | 0.546035 | [
"Apache-2.0"
] | anukaal/opytimizer | opytimizer/optimizers/social/qsa.py | 12,371 | Python |
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Struct Class
# this is a auto generated file generated by Cheetah
# Namespace: com.sun.star.ucb
# Libre Office Version: 7.3
from typing import TYPE_CHECKING
from ooo.oenv.env_const import UNO_ENVIRONMENT, UNO_RUNTIME, UNO_NONE
if (not TYPE_CHECKING) and UNO_RUNTIME and UNO_ENVIRONMENT:
import uno
def _get_class():
orig_init = None
ordered_keys = ('Source', 'Action', 'Content', 'Id')
def init(self, *args, **kwargs):
if len(kwargs) == 0 and len(args) == 1 and getattr(args[0], "__class__", None) == self.__class__:
orig_init(self, args[0])
return
kargs = kwargs.copy()
for i, arg in enumerate(args):
kargs[ordered_keys[i]] = arg
orig_init(self, **kargs)
type_name = 'com.sun.star.ucb.ContentEvent'
struct = uno.getClass(type_name)
struct.__ooo_ns__ = 'com.sun.star.ucb'
struct.__ooo_full_ns__= type_name
struct.__ooo_type_name__ = 'struct'
orig_init = struct.__init__
struct.__init__ = init
return struct
ContentEvent = _get_class()
else:
from ...lo.ucb.content_event import ContentEvent as ContentEvent
__all__ = ['ContentEvent']
| 34.259259 | 109 | 0.672973 | [
"Apache-2.0"
] | Amourspirit/ooo_uno_tmpl | ooobuild/dyn/ucb/content_event.py | 1,850 | Python |
# A lot of failures in these tests on Mac OS X.
# Byte order related?
import unittest
from ctypes import *
from ctypes.test import need_symbol
import _ctypes_test
class CFunctions(unittest.TestCase):
_dll = CDLL(_ctypes_test.__file__)
def S(self):
return c_longlong.in_dll(self._dll, "last_tf_arg_s").value
def U(self):
return c_ulonglong.in_dll(self._dll, "last_tf_arg_u").value
def test_byte(self):
self._dll.tf_b.restype = c_byte
self._dll.tf_b.argtypes = (c_byte,)
self.assertEqual(self._dll.tf_b(-126), -42)
self.assertEqual(self.S(), -126)
def test_byte_plus(self):
self._dll.tf_bb.restype = c_byte
self._dll.tf_bb.argtypes = (c_byte, c_byte)
self.assertEqual(self._dll.tf_bb(0, -126), -42)
self.assertEqual(self.S(), -126)
def test_ubyte(self):
self._dll.tf_B.restype = c_ubyte
self._dll.tf_B.argtypes = (c_ubyte,)
self.assertEqual(self._dll.tf_B(255), 85)
self.assertEqual(self.U(), 255)
def test_ubyte_plus(self):
self._dll.tf_bB.restype = c_ubyte
self._dll.tf_bB.argtypes = (c_byte, c_ubyte)
self.assertEqual(self._dll.tf_bB(0, 255), 85)
self.assertEqual(self.U(), 255)
def test_short(self):
self._dll.tf_h.restype = c_short
self._dll.tf_h.argtypes = (c_short,)
self.assertEqual(self._dll.tf_h(-32766), -10922)
self.assertEqual(self.S(), -32766)
def test_short_plus(self):
self._dll.tf_bh.restype = c_short
self._dll.tf_bh.argtypes = (c_byte, c_short)
self.assertEqual(self._dll.tf_bh(0, -32766), -10922)
self.assertEqual(self.S(), -32766)
def test_ushort(self):
self._dll.tf_H.restype = c_ushort
self._dll.tf_H.argtypes = (c_ushort,)
self.assertEqual(self._dll.tf_H(65535), 21845)
self.assertEqual(self.U(), 65535)
def test_ushort_plus(self):
self._dll.tf_bH.restype = c_ushort
self._dll.tf_bH.argtypes = (c_byte, c_ushort)
self.assertEqual(self._dll.tf_bH(0, 65535), 21845)
self.assertEqual(self.U(), 65535)
def test_int(self):
self._dll.tf_i.restype = c_int
self._dll.tf_i.argtypes = (c_int,)
self.assertEqual(self._dll.tf_i(-2147483646), -715827882)
self.assertEqual(self.S(), -2147483646)
def test_int_plus(self):
self._dll.tf_bi.restype = c_int
self._dll.tf_bi.argtypes = (c_byte, c_int)
self.assertEqual(self._dll.tf_bi(0, -2147483646), -715827882)
self.assertEqual(self.S(), -2147483646)
def test_uint(self):
self._dll.tf_I.restype = c_uint
self._dll.tf_I.argtypes = (c_uint,)
self.assertEqual(self._dll.tf_I(4294967295), 1431655765)
self.assertEqual(self.U(), 4294967295)
def test_uint_plus(self):
self._dll.tf_bI.restype = c_uint
self._dll.tf_bI.argtypes = (c_byte, c_uint)
self.assertEqual(self._dll.tf_bI(0, 4294967295), 1431655765)
self.assertEqual(self.U(), 4294967295)
def test_long(self):
self._dll.tf_l.restype = c_long
self._dll.tf_l.argtypes = (c_long,)
self.assertEqual(self._dll.tf_l(-2147483646), -715827882)
self.assertEqual(self.S(), -2147483646)
def test_long_plus(self):
self._dll.tf_bl.restype = c_long
self._dll.tf_bl.argtypes = (c_byte, c_long)
self.assertEqual(self._dll.tf_bl(0, -2147483646), -715827882)
self.assertEqual(self.S(), -2147483646)
def test_ulong(self):
self._dll.tf_L.restype = c_ulong
self._dll.tf_L.argtypes = (c_ulong,)
self.assertEqual(self._dll.tf_L(4294967295), 1431655765)
self.assertEqual(self.U(), 4294967295)
def test_ulong_plus(self):
self._dll.tf_bL.restype = c_ulong
self._dll.tf_bL.argtypes = (c_char, c_ulong)
self.assertEqual(self._dll.tf_bL(b' ', 4294967295), 1431655765)
self.assertEqual(self.U(), 4294967295)
def test_longlong(self):
self._dll.tf_q.restype = c_longlong
self._dll.tf_q.argtypes = (c_longlong, )
self.assertEqual(self._dll.tf_q(-9223372036854775806), -3074457345618258602)
self.assertEqual(self.S(), -9223372036854775806)
def test_longlong_plus(self):
self._dll.tf_bq.restype = c_longlong
self._dll.tf_bq.argtypes = (c_byte, c_longlong)
self.assertEqual(self._dll.tf_bq(0, -9223372036854775806), -3074457345618258602)
self.assertEqual(self.S(), -9223372036854775806)
def test_ulonglong(self):
self._dll.tf_Q.restype = c_ulonglong
self._dll.tf_Q.argtypes = (c_ulonglong, )
self.assertEqual(self._dll.tf_Q(18446744073709551615), 6148914691236517205)
self.assertEqual(self.U(), 18446744073709551615)
def test_ulonglong_plus(self):
self._dll.tf_bQ.restype = c_ulonglong
self._dll.tf_bQ.argtypes = (c_byte, c_ulonglong)
self.assertEqual(self._dll.tf_bQ(0, 18446744073709551615), 6148914691236517205)
self.assertEqual(self.U(), 18446744073709551615)
def test_float(self):
self._dll.tf_f.restype = c_float
self._dll.tf_f.argtypes = (c_float,)
self.assertEqual(self._dll.tf_f(-42.), -14.)
self.assertEqual(self.S(), -42)
def test_float_plus(self):
self._dll.tf_bf.restype = c_float
self._dll.tf_bf.argtypes = (c_byte, c_float)
self.assertEqual(self._dll.tf_bf(0, -42.), -14.)
self.assertEqual(self.S(), -42)
def test_double(self):
self._dll.tf_d.restype = c_double
self._dll.tf_d.argtypes = (c_double,)
self.assertEqual(self._dll.tf_d(42.), 14.)
self.assertEqual(self.S(), 42)
def test_double_plus(self):
self._dll.tf_bd.restype = c_double
self._dll.tf_bd.argtypes = (c_byte, c_double)
self.assertEqual(self._dll.tf_bd(0, 42.), 14.)
self.assertEqual(self.S(), 42)
def test_longdouble(self):
self._dll.tf_D.restype = c_longdouble
self._dll.tf_D.argtypes = (c_longdouble,)
self.assertEqual(self._dll.tf_D(42.), 14.)
self.assertEqual(self.S(), 42)
def test_longdouble_plus(self):
self._dll.tf_bD.restype = c_longdouble
self._dll.tf_bD.argtypes = (c_byte, c_longdouble)
self.assertEqual(self._dll.tf_bD(0, 42.), 14.)
self.assertEqual(self.S(), 42)
def test_callwithresult(self):
def process_result(result):
return result * 2
self._dll.tf_i.restype = process_result
self._dll.tf_i.argtypes = (c_int,)
self.assertEqual(self._dll.tf_i(42), 28)
self.assertEqual(self.S(), 42)
self.assertEqual(self._dll.tf_i(-42), -28)
self.assertEqual(self.S(), -42)
def test_void(self):
self._dll.tv_i.restype = None
self._dll.tv_i.argtypes = (c_int,)
self.assertEqual(self._dll.tv_i(42), None)
self.assertEqual(self.S(), 42)
self.assertEqual(self._dll.tv_i(-42), None)
self.assertEqual(self.S(), -42)
# The following repeats the above tests with stdcall functions (where
# they are available)
try:
WinDLL
except NameError:
def stdcall_dll(*_): pass
else:
class stdcall_dll(WinDLL):
def __getattr__(self, name):
if name[:2] == '__' and name[-2:] == '__':
raise AttributeError(name)
func = self._FuncPtr(("s_" + name, self))
setattr(self, name, func)
return func
@need_symbol('WinDLL')
class stdcallCFunctions(CFunctions):
_dll = stdcall_dll(_ctypes_test.__file__)
if __name__ == '__main__':
unittest.main()
| 37.051643 | 89 | 0.628865 | [
"Apache-2.0"
] | BirknerAlex/cloudbase-init-installer-1 | Python36_x64_Template/Lib/ctypes/test/test_cfuncs.py | 7,892 | Python |
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class WorkRequestLogEntryCollection(object):
"""
Wrapper object for an array of WorkRequestLogEntry objects.
"""
def __init__(self, **kwargs):
"""
Initializes a new WorkRequestLogEntryCollection object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param items:
The value to assign to the items property of this WorkRequestLogEntryCollection.
:type items: list[oci.network_load_balancer.models.WorkRequestLogEntry]
"""
self.swagger_types = {
'items': 'list[WorkRequestLogEntry]'
}
self.attribute_map = {
'items': 'items'
}
self._items = None
@property
def items(self):
"""
Gets the items of this WorkRequestLogEntryCollection.
An array of WorkRequestLogEntry objects.
:return: The items of this WorkRequestLogEntryCollection.
:rtype: list[oci.network_load_balancer.models.WorkRequestLogEntry]
"""
return self._items
@items.setter
def items(self, items):
"""
Sets the items of this WorkRequestLogEntryCollection.
An array of WorkRequestLogEntry objects.
:param items: The items of this WorkRequestLogEntryCollection.
:type: list[oci.network_load_balancer.models.WorkRequestLogEntry]
"""
self._items = items
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 32 | 245 | 0.680458 | [
"Apache-2.0",
"BSD-3-Clause"
] | LaudateCorpus1/oci-python-sdk | src/oci/network_load_balancer/models/work_request_log_entry_collection.py | 2,272 | Python |
class Solution:
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
lookup = dict(((v, i) for i, v in enumerate(nums)))
return next(( (i+1, lookup.get(target-v)+1)
for i, v in enumerate(nums)
if lookup.get(target-v, i) != i), None)
a = Solution()
print(a.twoSum([2, 11, 7, 15],9))
# 越简单的问题越要小心 | 29.266667 | 59 | 0.498861 | [
"MIT"
] | mengyangbai/leetcode | array/twosum.py | 459 | Python |
#
# @lc app=leetcode id=102 lang=python3
#
# [102] Binary Tree Level Order Traversal
#
# @lc code=start
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
from typing import List, Optional
class Solution:
def levelOrder(self, root: Optional[TreeNode]) -> List[List[int]]:
ans = []
def _larger(n):
for i in range(len(ans), n + 1):
ans.append([])
def _traversal(node, depth):
if node != None:
_larger(depth)
tmp = ans[depth]
tmp.append(node.val)
_traversal(node.left, depth + 1)
_traversal(node.right, depth + 1)
_traversal(root, 0)
return ans
# @lc code=end
| 26.151515 | 70 | 0.543453 | [
"MIT"
] | ryderfang/LeetCode | Difficulty/Medium/102.binary-tree-level-order-traversal.py | 863 | Python |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.command_modules.monitor.util import get_operator_map, get_aggregation_map
from knack.log import get_logger
logger = get_logger(__name__)
def create_metric_alert(client, resource_group_name, rule_name, scopes, condition, disabled=False, description=None,
tags=None, actions=None, severity=2, window_size='5m', evaluation_frequency='1m',
auto_mitigate=None):
from azure.mgmt.monitor.models import (MetricAlertResource,
MetricAlertSingleResourceMultipleMetricCriteria,
MetricAlertMultipleResourceMultipleMetricCriteria)
# generate names for the conditions
for i, cond in enumerate(condition):
cond.name = 'cond{}'.format(i)
criteria = None
target_resource_type = None
target_resource_region = None
if len(scopes) == 1:
criteria = MetricAlertSingleResourceMultipleMetricCriteria(all_of=condition)
else:
criteria = MetricAlertMultipleResourceMultipleMetricCriteria(all_of=condition)
target_resource_type = _parse_resource_type(scopes)
target_resource_region = 'global'
kwargs = {
'description': description,
'severity': severity,
'enabled': not disabled,
'scopes': scopes,
'evaluation_frequency': evaluation_frequency,
'window_size': window_size,
'criteria': criteria,
'target_resource_type': target_resource_type,
'target_resource_region': target_resource_region,
'actions': actions,
'tags': tags,
'location': 'global',
'auto_mitigate': auto_mitigate
}
return client.create_or_update(resource_group_name, rule_name, MetricAlertResource(**kwargs))
def update_metric_alert(instance, scopes=None, description=None, enabled=None, tags=None,
severity=None, window_size=None, evaluation_frequency=None, auto_mitigate=None,
add_actions=None, remove_actions=None, add_conditions=None, remove_conditions=None):
if scopes is not None:
instance.scopes = scopes
if description is not None:
instance.description = description
if enabled is not None:
instance.enabled = enabled
if tags is not None:
instance.tags = tags
if severity is not None:
instance.severity = severity
if window_size is not None:
instance.window_size = window_size
if evaluation_frequency is not None:
instance.evaluation_frequency = evaluation_frequency
if auto_mitigate is not None:
instance.auto_mitigate = auto_mitigate
# process action removals
if remove_actions is not None:
instance.actions = [x for x in instance.actions if x.action_group_id.lower() not in remove_actions]
# process action additions
if add_actions is not None:
for action in add_actions:
match = next(
(x for x in instance.actions if action.action_group_id.lower() == x.action_group_id.lower()), None
)
if match:
match.webhook_properties = action.webhook_properties
else:
instance.actions.append(action)
# process condition removals
if remove_conditions is not None:
instance.criteria.all_of = [x for x in instance.criteria.all_of if x.name not in remove_conditions]
def _get_next_name():
i = 0
while True:
possible_name = 'cond{}'.format(i)
match = next((x for x in instance.criteria.all_of if x.name == possible_name), None)
if match:
i = i + 1
continue
return possible_name
# process condition additions
if add_conditions is not None:
for condition in add_conditions:
condition.name = _get_next_name()
instance.criteria.all_of.append(condition)
return instance
def list_metric_alerts(client, resource_group_name=None):
if resource_group_name:
return client.list_by_resource_group(resource_group_name)
return client.list_by_subscription()
def create_metric_rule(client, resource_group_name, rule_name, target, condition, description=None, disabled=False,
location=None, tags=None, email_service_owners=False, actions=None):
from azure.mgmt.monitor.models import AlertRuleResource, RuleEmailAction
condition.data_source.resource_uri = target
custom_emails, webhooks, _ = _parse_actions(actions)
actions = [
RuleEmailAction(send_to_service_owners=email_service_owners, custom_emails=custom_emails)
] + (webhooks or [])
rule = AlertRuleResource(
location=location, alert_rule_resource_name=rule_name, is_enabled=not disabled,
condition=condition, tags=tags, description=description, actions=actions)
return client.create_or_update(resource_group_name, rule_name, rule)
def update_metric_rule(instance, target=None, condition=None, description=None, enabled=None, metric=None,
operator=None, threshold=None, aggregation=None, period=None, tags=None,
email_service_owners=None, add_actions=None, remove_actions=None):
# Update general properties
if description is not None:
instance.description = description
if enabled is not None:
instance.is_enabled = enabled
if tags is not None:
instance.tags = tags
# Update conditions
if condition is not None:
target = target or instance.condition.data_source.resource_uri
instance.condition = condition
if metric is not None:
instance.condition.data_source.metric_name = metric
if operator is not None:
instance.condition.operator = get_operator_map()[operator]
if threshold is not None:
instance.condition.threshold = threshold
if aggregation is not None:
instance.condition.time_aggregation = get_aggregation_map()[aggregation]
if period is not None:
instance.condition.window_size = period
if target is not None:
instance.condition.data_source.resource_uri = target
# Update actions
emails, webhooks, curr_email_service_owners = _parse_actions(instance.actions)
# process removals
if remove_actions is not None:
removed_emails, removed_webhooks = _parse_action_removals(remove_actions)
emails = [x for x in emails if x not in removed_emails]
webhooks = [x for x in webhooks if x.service_uri not in removed_webhooks]
# process additions
if add_actions is not None:
added_emails, added_webhooks, _ = _parse_actions(add_actions)
emails = list(set(emails) | set(added_emails))
webhooks = webhooks + added_webhooks
# Replace the existing actions array. This potentially restructures rules that were created
# via other methods (Portal, ARM template). However, the functionality of these rules should
# be the same.
from azure.mgmt.monitor.models import RuleEmailAction
if email_service_owners is None:
email_service_owners = curr_email_service_owners
actions = [RuleEmailAction(send_to_service_owners=email_service_owners, custom_emails=emails)] + webhooks
instance.actions = actions
return instance
def _parse_actions(actions):
""" Actions come in as a combined list. This method separates the webhook actions into a
separate collection and combines any number of email actions into a single email collection
and a single value for `email_service_owners`. If any email action contains a True value
for `send_to_service_owners` then it is assumed the entire value should be True. """
from azure.mgmt.monitor.models import RuleEmailAction, RuleWebhookAction
actions = actions or []
email_service_owners = None
webhooks = [x for x in actions if isinstance(x, RuleWebhookAction)]
custom_emails = set()
for action in actions:
if isinstance(action, RuleEmailAction):
if action.send_to_service_owners:
email_service_owners = True
custom_emails = custom_emails | set(action.custom_emails)
return list(custom_emails), webhooks, email_service_owners
def _parse_action_removals(actions):
""" Separates the combined list of keys to remove into webhooks and emails. """
flattened = list({x for sublist in actions for x in sublist})
emails = []
webhooks = []
for item in flattened:
if item.startswith('http://') or item.startswith('https://'):
webhooks.append(item)
else:
emails.append(item)
return emails, webhooks
def _parse_resource_type(scopes):
from msrestazure.tools import parse_resource_id
from azure.cli.core import CLIError
namespace = None
resource_type = None
for item in scopes:
item_namespace = parse_resource_id(item)['namespace']
item_resource_type = parse_resource_id(item)['resource_type']
if namespace is None and resource_type is None:
namespace = item_namespace
resource_type = item_resource_type
else:
if namespace != item_namespace or resource_type != item_resource_type:
raise CLIError('Multiple scopes should be the same resource type.')
return namespace + '/' + resource_type
| 42.447826 | 116 | 0.679402 | [
"MIT"
] | 21m57/azure-cli | src/azure-cli/azure/cli/command_modules/monitor/operations/metric_alert.py | 9,763 | Python |
# -*- coding: utf-8 -*-
# cox regression
if __name__ == "__main__":
import pandas as pd
import time
import numpy as np
from lifelines import CoxPHFitter
from lifelines.datasets import load_rossi, load_regression_dataset
reps = 1
df = load_rossi()
df = pd.concat([df] * reps)
cp_breslow = CoxPHFitter(penalizer=0.01, l1_ratio=0.0, baseline_estimation_method="breslow")
start_time = time.time()
cp_breslow.fit(df, duration_col="week", event_col="arrest", show_progress=True)
print("--- %s seconds ---" % (time.time() - start_time))
cp_breslow.print_summary(2)
print(cp_breslow.score(df))
print(cp_breslow.score(df, scoring_method="concordance_index"))
| 31.043478 | 96 | 0.689076 | [
"MIT"
] | ManuelaS/lifelines | perf_tests/cp_perf_test.py | 714 | Python |
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import uuid
from oslo_serialization import jsonutils
from oslo_utils import netutils
from oslo_utils import timeutils
import routes
import six
from six.moves import range
import webob
import webob.dec
import webob.request
from nova.api import auth as api_auth
from nova.api import openstack as openstack_api
from nova.api.openstack import api_version_request as api_version
from nova.api.openstack import auth
from nova.api.openstack import compute
from nova.api.openstack.compute.legacy_v2 import limits
from nova.api.openstack.compute import versions
from nova.api.openstack import urlmap
from nova.api.openstack import wsgi as os_wsgi
from nova.compute import api as compute_api
from nova.compute import flavors
from nova.compute import vm_states
from nova import context
from nova.db.sqlalchemy import models
from nova import exception as exc
import nova.netconf
from nova.network import api as network_api
from nova import objects
from nova.objects import base
from nova import quota
from nova.tests.unit import fake_block_device
from nova.tests.unit import fake_network
from nova.tests.unit.objects import test_keypair
from nova import utils
from nova import wsgi
QUOTAS = quota.QUOTAS
FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
FAKE_UUIDS = {}
class Context(object):
pass
class FakeRouter(wsgi.Router):
def __init__(self, ext_mgr=None):
pass
@webob.dec.wsgify
def __call__(self, req):
res = webob.Response()
res.status = '200'
res.headers['X-Test-Success'] = 'True'
return res
@webob.dec.wsgify
def fake_wsgi(self, req):
return self.application
def wsgi_app(inner_app_v2=None, fake_auth_context=None,
use_no_auth=False, ext_mgr=None, init_only=None):
if not inner_app_v2:
inner_app_v2 = compute.APIRouter(ext_mgr, init_only)
if use_no_auth:
api_v2 = openstack_api.FaultWrapper(auth.NoAuthMiddleware(
limits.RateLimitingMiddleware(inner_app_v2)))
else:
if fake_auth_context is not None:
ctxt = fake_auth_context
else:
ctxt = context.RequestContext('fake', 'fake', auth_token=True)
api_v2 = openstack_api.FaultWrapper(api_auth.InjectContext(ctxt,
limits.RateLimitingMiddleware(inner_app_v2)))
mapper = urlmap.URLMap()
mapper['/v2'] = api_v2
mapper['/v1.1'] = api_v2
mapper['/'] = openstack_api.FaultWrapper(versions.Versions())
return mapper
def wsgi_app_v21(inner_app_v21=None, fake_auth_context=None,
use_no_auth=False, ext_mgr=None, init_only=None, v2_compatible=False):
if not inner_app_v21:
inner_app_v21 = compute.APIRouterV21(init_only)
if v2_compatible:
inner_app_v21 = openstack_api.LegacyV2CompatibleWrapper(inner_app_v21)
if use_no_auth:
api_v21 = openstack_api.FaultWrapper(auth.NoAuthMiddlewareV3(
limits.RateLimitingMiddleware(inner_app_v21)))
else:
if fake_auth_context is not None:
ctxt = fake_auth_context
else:
ctxt = context.RequestContext('fake', 'fake', auth_token=True)
api_v21 = openstack_api.FaultWrapper(api_auth.InjectContext(ctxt,
limits.RateLimitingMiddleware(inner_app_v21)))
mapper = urlmap.URLMap()
mapper['/v2'] = api_v21
mapper['/v2.1'] = api_v21
mapper['/'] = openstack_api.FaultWrapper(versions.Versions())
return mapper
def stub_out_key_pair_funcs(stubs, have_key_pair=True, **kwargs):
def key_pair(context, user_id):
return [dict(test_keypair.fake_keypair,
name='key', public_key='public_key', **kwargs)]
def one_key_pair(context, user_id, name):
if name == 'key':
return dict(test_keypair.fake_keypair,
name='key', public_key='public_key', **kwargs)
else:
raise exc.KeypairNotFound(user_id=user_id, name=name)
def no_key_pair(context, user_id):
return []
if have_key_pair:
stubs.Set(nova.db, 'key_pair_get_all_by_user', key_pair)
stubs.Set(nova.db, 'key_pair_get', one_key_pair)
else:
stubs.Set(nova.db, 'key_pair_get_all_by_user', no_key_pair)
def stub_out_rate_limiting(stubs):
def fake_rate_init(self, app):
super(limits.RateLimitingMiddleware, self).__init__(app)
self.application = app
v2_limits = nova.api.openstack.compute.legacy_v2.limits
stubs.Set(v2_limits.RateLimitingMiddleware, '__init__', fake_rate_init)
stubs.Set(v2_limits.RateLimitingMiddleware, '__call__', fake_wsgi)
def stub_out_instance_quota(stubs, allowed, quota, resource='instances'):
def fake_reserve(context, **deltas):
requested = deltas.pop(resource, 0)
if requested > allowed:
quotas = dict(instances=1, cores=1, ram=1)
quotas[resource] = quota
usages = dict(instances=dict(in_use=0, reserved=0),
cores=dict(in_use=0, reserved=0),
ram=dict(in_use=0, reserved=0))
usages[resource]['in_use'] = (quotas[resource] * 0.9 -
allowed)
usages[resource]['reserved'] = quotas[resource] * 0.1
raise exc.OverQuota(overs=[resource], quotas=quotas,
usages=usages)
stubs.Set(QUOTAS, 'reserve', fake_reserve)
def stub_out_networking(stubs):
def get_my_ip():
return '127.0.0.1'
stubs.Set(netutils, 'get_my_ipv4', get_my_ip)
def stub_out_compute_api_snapshot(stubs):
def snapshot(self, context, instance, name, extra_properties=None):
# emulate glance rejecting image names which are too long
if len(name) > 256:
raise exc.Invalid
return dict(id='123', status='ACTIVE', name=name,
properties=extra_properties)
stubs.Set(compute_api.API, 'snapshot', snapshot)
class stub_out_compute_api_backup(object):
def __init__(self, stubs):
self.stubs = stubs
self.extra_props_last_call = None
stubs.Set(compute_api.API, 'backup', self.backup)
def backup(self, context, instance, name, backup_type, rotation,
extra_properties=None):
self.extra_props_last_call = extra_properties
props = dict(backup_type=backup_type,
rotation=rotation)
props.update(extra_properties or {})
return dict(id='123', status='ACTIVE', name=name, properties=props)
def stub_out_nw_api_get_instance_nw_info(stubs, num_networks=1, func=None):
fake_network.stub_out_nw_api_get_instance_nw_info(stubs)
def stub_out_nw_api(stubs, cls=None, private=None, publics=None):
if not private:
private = '192.168.0.3'
if not publics:
publics = ['1.2.3.4']
class Fake(object):
def __init__(self, skip_policy_check=False):
pass
def get_instance_nw_info(*args, **kwargs):
pass
def get_floating_ips_by_fixed_address(*args, **kwargs):
return publics
def validate_networks(self, context, networks, max_count):
return max_count
def create_pci_requests_for_sriov_ports(self, context,
system_metadata,
requested_networks):
pass
if cls is None:
cls = Fake
stubs.Set(network_api, 'API', cls)
fake_network.stub_out_nw_api_get_instance_nw_info(stubs)
class FakeToken(object):
id_count = 0
def __getitem__(self, key):
return getattr(self, key)
def __init__(self, **kwargs):
FakeToken.id_count += 1
self.id = FakeToken.id_count
for k, v in six.iteritems(kwargs):
setattr(self, k, v)
class FakeRequestContext(context.RequestContext):
def __init__(self, *args, **kwargs):
kwargs['auth_token'] = kwargs.get('auth_token', 'fake_auth_token')
super(FakeRequestContext, self).__init__(*args, **kwargs)
class HTTPRequest(os_wsgi.Request):
@staticmethod
def blank(*args, **kwargs):
kwargs['base_url'] = 'http://localhost/v2'
use_admin_context = kwargs.pop('use_admin_context', False)
version = kwargs.pop('version', os_wsgi.DEFAULT_API_VERSION)
out = os_wsgi.Request.blank(*args, **kwargs)
out.environ['nova.context'] = FakeRequestContext('fake_user', 'fake',
is_admin=use_admin_context)
out.api_version_request = api_version.APIVersionRequest(version)
return out
class HTTPRequestV21(os_wsgi.Request):
@staticmethod
def blank(*args, **kwargs):
kwargs['base_url'] = 'http://localhost/v3'
use_admin_context = kwargs.pop('use_admin_context', False)
version = kwargs.pop('version', os_wsgi.DEFAULT_API_VERSION)
out = os_wsgi.Request.blank(*args, **kwargs)
out.api_version_request = api_version.APIVersionRequest(version)
out.environ['nova.context'] = FakeRequestContext('fake_user', 'fake',
is_admin=use_admin_context)
return out
class TestRouter(wsgi.Router):
def __init__(self, controller, mapper=None):
if not mapper:
mapper = routes.Mapper()
mapper.resource("test", "tests",
controller=os_wsgi.Resource(controller))
super(TestRouter, self).__init__(mapper)
class TestRouterV21(wsgi.Router):
def __init__(self, controller, mapper=None):
if not mapper:
mapper = routes.Mapper()
mapper.resource("test", "tests",
controller=os_wsgi.ResourceV21(controller))
super(TestRouterV21, self).__init__(mapper)
class FakeAuthDatabase(object):
data = {}
@staticmethod
def auth_token_get(context, token_hash):
return FakeAuthDatabase.data.get(token_hash, None)
@staticmethod
def auth_token_create(context, token):
fake_token = FakeToken(created_at=timeutils.utcnow(), **token)
FakeAuthDatabase.data[fake_token.token_hash] = fake_token
FakeAuthDatabase.data['id_%i' % fake_token.id] = fake_token
return fake_token
@staticmethod
def auth_token_destroy(context, token_id):
token = FakeAuthDatabase.data.get('id_%i' % token_id)
if token and token.token_hash in FakeAuthDatabase.data:
del FakeAuthDatabase.data[token.token_hash]
del FakeAuthDatabase.data['id_%i' % token_id]
class FakeRateLimiter(object):
def __init__(self, application):
self.application = application
@webob.dec.wsgify
def __call__(self, req):
return self.application
def create_info_cache(nw_cache):
if nw_cache is None:
pub0 = ('192.168.1.100',)
pub1 = ('2001:db8:0:1::1',)
def _ip(ip):
return {'address': ip, 'type': 'fixed'}
nw_cache = [
{'address': 'aa:aa:aa:aa:aa:aa',
'id': 1,
'network': {'bridge': 'br0',
'id': 1,
'label': 'test1',
'subnets': [{'cidr': '192.168.1.0/24',
'ips': [_ip(ip) for ip in pub0]},
{'cidr': 'b33f::/64',
'ips': [_ip(ip) for ip in pub1]}]}}]
if not isinstance(nw_cache, six.string_types):
nw_cache = jsonutils.dumps(nw_cache)
return {
"info_cache": {
"network_info": nw_cache,
"deleted": False,
"created_at": None,
"deleted_at": None,
"updated_at": None,
}
}
def get_fake_uuid(token=0):
if token not in FAKE_UUIDS:
FAKE_UUIDS[token] = str(uuid.uuid4())
return FAKE_UUIDS[token]
def fake_instance_get(**kwargs):
def _return_server(context, uuid, columns_to_join=None, use_slave=False):
return stub_instance(1, **kwargs)
return _return_server
def fake_compute_get(**kwargs):
def _return_server_obj(context, uuid, want_objects=False,
expected_attrs=None):
return stub_instance_obj(context, **kwargs)
return _return_server_obj
def fake_actions_to_locked_server(self, context, instance, *args, **kwargs):
raise exc.InstanceIsLocked(instance_uuid=instance['uuid'])
def fake_instance_get_all_by_filters(num_servers=5, **kwargs):
def _return_servers(context, *args, **kwargs):
servers_list = []
marker = None
limit = None
found_marker = False
if "marker" in kwargs:
marker = kwargs["marker"]
if "limit" in kwargs:
limit = kwargs["limit"]
if 'columns_to_join' in kwargs:
kwargs.pop('columns_to_join')
if 'use_slave' in kwargs:
kwargs.pop('use_slave')
if 'sort_keys' in kwargs:
kwargs.pop('sort_keys')
if 'sort_dirs' in kwargs:
kwargs.pop('sort_dirs')
for i in range(num_servers):
uuid = get_fake_uuid(i)
server = stub_instance(id=i + 1, uuid=uuid,
**kwargs)
servers_list.append(server)
if marker is not None and uuid == marker:
found_marker = True
servers_list = []
if marker is not None and not found_marker:
raise exc.MarkerNotFound(marker=marker)
if limit is not None:
servers_list = servers_list[:limit]
return servers_list
return _return_servers
def fake_compute_get_all(num_servers=5, **kwargs):
def _return_servers_objs(context, search_opts=None, limit=None,
marker=None, want_objects=False,
expected_attrs=None, sort_keys=None,
sort_dirs=None):
db_insts = fake_instance_get_all_by_filters()(None,
limit=limit,
marker=marker)
expected = ['metadata', 'system_metadata', 'flavor',
'info_cache', 'security_groups']
return base.obj_make_list(context, objects.InstanceList(),
objects.Instance, db_insts,
expected_attrs=expected)
return _return_servers_objs
def stub_instance(id=1, user_id=None, project_id=None, host=None,
node=None, vm_state=None, task_state=None,
reservation_id="", uuid=FAKE_UUID, image_ref="10",
flavor_id="1", name=None, key_name='',
access_ipv4=None, access_ipv6=None, progress=0,
auto_disk_config=False, display_name=None,
include_fake_metadata=True, config_drive=None,
power_state=None, nw_cache=None, metadata=None,
security_groups=None, root_device_name=None,
limit=None, marker=None,
launched_at=timeutils.utcnow(),
terminated_at=timeutils.utcnow(),
availability_zone='', locked_by=None, cleaned=False,
memory_mb=0, vcpus=0, root_gb=0, ephemeral_gb=0,
instance_type=None, launch_index=0, kernel_id="",
ramdisk_id="", user_data=None):
if user_id is None:
user_id = 'fake_user'
if project_id is None:
project_id = 'fake_project'
if metadata:
metadata = [{'key': k, 'value': v} for k, v in metadata.items()]
elif include_fake_metadata:
metadata = [models.InstanceMetadata(key='seq', value=str(id))]
else:
metadata = []
inst_type = flavors.get_flavor_by_flavor_id(int(flavor_id))
sys_meta = flavors.save_flavor_info({}, inst_type)
if host is not None:
host = str(host)
if key_name:
key_data = 'FAKE'
else:
key_data = ''
if security_groups is None:
security_groups = [{"id": 1, "name": "test", "description": "Foo:",
"project_id": "project", "user_id": "user",
"created_at": None, "updated_at": None,
"deleted_at": None, "deleted": False}]
# ReservationID isn't sent back, hack it in there.
server_name = name or "server%s" % id
if reservation_id != "":
server_name = "reservation_%s" % (reservation_id, )
info_cache = create_info_cache(nw_cache)
if instance_type is None:
instance_type = flavors.get_default_flavor()
flavorinfo = jsonutils.dumps({
'cur': instance_type.obj_to_primitive(),
'old': None,
'new': None,
})
instance = {
"id": int(id),
"created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
"updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
"deleted_at": datetime.datetime(2010, 12, 12, 10, 0, 0),
"deleted": None,
"user_id": user_id,
"project_id": project_id,
"image_ref": image_ref,
"kernel_id": kernel_id,
"ramdisk_id": ramdisk_id,
"launch_index": launch_index,
"key_name": key_name,
"key_data": key_data,
"config_drive": config_drive,
"vm_state": vm_state or vm_states.BUILDING,
"task_state": task_state,
"power_state": power_state,
"memory_mb": memory_mb,
"vcpus": vcpus,
"root_gb": root_gb,
"ephemeral_gb": ephemeral_gb,
"ephemeral_key_uuid": None,
"hostname": display_name or server_name,
"host": host,
"node": node,
"instance_type_id": 1,
"instance_type": inst_type,
"user_data": user_data,
"reservation_id": reservation_id,
"mac_address": "",
"launched_at": launched_at,
"terminated_at": terminated_at,
"availability_zone": availability_zone,
"display_name": display_name or server_name,
"display_description": "",
"locked": locked_by is not None,
"locked_by": locked_by,
"metadata": metadata,
"access_ip_v4": access_ipv4,
"access_ip_v6": access_ipv6,
"uuid": uuid,
"progress": progress,
"auto_disk_config": auto_disk_config,
"name": "instance-%s" % id,
"shutdown_terminate": True,
"disable_terminate": False,
"security_groups": security_groups,
"root_device_name": root_device_name,
"system_metadata": utils.dict_to_metadata(sys_meta),
"pci_devices": [],
"vm_mode": "",
"default_swap_device": "",
"default_ephemeral_device": "",
"launched_on": "",
"cell_name": "",
"architecture": "",
"os_type": "",
"extra": {"numa_topology": None,
"pci_requests": None,
"flavor": flavorinfo,
},
"cleaned": cleaned}
instance.update(info_cache)
instance['info_cache']['instance_uuid'] = instance['uuid']
return instance
def stub_instance_obj(ctxt, *args, **kwargs):
db_inst = stub_instance(*args, **kwargs)
expected = ['metadata', 'system_metadata', 'flavor',
'info_cache', 'security_groups']
inst = objects.Instance._from_db_object(ctxt, objects.Instance(),
db_inst,
expected_attrs=expected)
inst.fault = None
return inst
def stub_volume(id, **kwargs):
volume = {
'id': id,
'user_id': 'fakeuser',
'project_id': 'fakeproject',
'host': 'fakehost',
'size': 1,
'availability_zone': 'fakeaz',
'instance_uuid': 'fakeuuid',
'mountpoint': '/',
'status': 'fakestatus',
'attach_status': 'attached',
'name': 'vol name',
'display_name': 'displayname',
'display_description': 'displaydesc',
'created_at': datetime.datetime(1999, 1, 1, 1, 1, 1),
'snapshot_id': None,
'volume_type_id': 'fakevoltype',
'volume_metadata': [],
'volume_type': {'name': 'vol_type_name'}}
volume.update(kwargs)
return volume
def stub_volume_create(self, context, size, name, description, snapshot,
**param):
vol = stub_volume('1')
vol['size'] = size
vol['display_name'] = name
vol['display_description'] = description
try:
vol['snapshot_id'] = snapshot['id']
except (KeyError, TypeError):
vol['snapshot_id'] = None
vol['availability_zone'] = param.get('availability_zone', 'fakeaz')
return vol
def stub_volume_update(self, context, *args, **param):
pass
def stub_volume_delete(self, context, *args, **param):
pass
def stub_volume_get(self, context, volume_id):
return stub_volume(volume_id)
def stub_volume_notfound(self, context, volume_id):
raise exc.VolumeNotFound(volume_id=volume_id)
def stub_volume_get_all(context, search_opts=None):
return [stub_volume(100, project_id='fake'),
stub_volume(101, project_id='superfake'),
stub_volume(102, project_id='superduperfake')]
def stub_volume_check_attach(self, context, *args, **param):
pass
def stub_snapshot(id, **kwargs):
snapshot = {
'id': id,
'volume_id': 12,
'status': 'available',
'volume_size': 100,
'created_at': timeutils.utcnow(),
'display_name': 'Default name',
'display_description': 'Default description',
'project_id': 'fake'
}
snapshot.update(kwargs)
return snapshot
def stub_snapshot_create(self, context, volume_id, name, description):
return stub_snapshot(100, volume_id=volume_id, display_name=name,
display_description=description)
def stub_compute_volume_snapshot_create(self, context, volume_id, create_info):
return {'snapshot': {'id': 100, 'volumeId': volume_id}}
def stub_snapshot_delete(self, context, snapshot_id):
if snapshot_id == '-1':
raise exc.SnapshotNotFound(snapshot_id=snapshot_id)
def stub_compute_volume_snapshot_delete(self, context, volume_id, snapshot_id,
delete_info):
pass
def stub_snapshot_get(self, context, snapshot_id):
if snapshot_id == '-1':
raise exc.SnapshotNotFound(snapshot_id=snapshot_id)
return stub_snapshot(snapshot_id)
def stub_snapshot_get_all(self, context):
return [stub_snapshot(100, project_id='fake'),
stub_snapshot(101, project_id='superfake'),
stub_snapshot(102, project_id='superduperfake')]
def stub_bdm_get_all_by_instance(context, instance_uuid, use_slave=False):
return [fake_block_device.FakeDbBlockDeviceDict(
{'id': 1, 'source_type': 'volume', 'destination_type': 'volume',
'volume_id': 'volume_id1', 'instance_uuid': instance_uuid}),
fake_block_device.FakeDbBlockDeviceDict(
{'id': 2, 'source_type': 'volume', 'destination_type': 'volume',
'volume_id': 'volume_id2', 'instance_uuid': instance_uuid})]
def fake_get_available_languages():
existing_translations = ['en_GB', 'en_AU', 'de', 'zh_CN', 'en_US']
return existing_translations
def fake_not_implemented(*args, **kwargs):
raise NotImplementedError()
| 33.100551 | 79 | 0.624402 | [
"Apache-2.0"
] | isabella232/nova | nova/tests/unit/api/openstack/fakes.py | 24,031 | Python |
# made by @Eviral
from . import *
@bot.on(admin_cmd(pattern="indanime(.*)"))
async def xd(event):
await event.edit("wishing to all🇮🇳🇮🇳...")
event.pattern_match.group(1)
async for tele in borg.iter_dialogs():
if tele.is_group:
chat = tele.id
lol = 0
done = 0
try:
await bot.send_message(
chat,
'⣿⣿⣿⣿⣿⣍⠀⠉⠻⠟⠻⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿\n⣿⣿⣿⣿⣿⡇⠀⠀⠀⠀⣰⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿\n⣿⣿⣿⣿⣿⣿⠓⠀⠀⢒⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿\n⣿⣿⣿⣿⡿⠃⠀⠀⠀⠀⠈⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⡿⠿⣿\n⣿⡿⠋⠋⠀⠀⠀⠀⠀⠀⠈⠙⠻⢿⢿⣿⣿⡿⣿⣿⡟⠋⠀⢀⣩\n⣿⣿⡄⠀⠀⠀⠀⠀⠁⡀⠀⠀⠀⠀⠈⠉⠛⢷⣭⠉⠁⠀⠀⣿⣿\n⣇⣀. INDIA🇮🇳INDIA🇮🇳⠆⠠..⠘⢷⣿⣿⣛⠐⣶⣿⣿\n⣿⣄⠀⣰⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠐⢀⣠⣿⣿⣿⣾⣿⣿⣿\n⣿⣿⣿⣿⠀⠀⠀⠀⡠⠀⠀⠀⠀⠀⢀⣠⣿⣿⣿⣿⣿⣿⣿⣿⣿\n⣿⣿⣿⣿⠀⠀⠀⠀⠀⠀⠀⠄⠀⣤⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿\n⣿⣿⣿⣿⡄⠀⠀⠀⠀⠀⣠⣤⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿\n⣿⣿⣿⣿⣿⠀⠀⠂⠀⠀⢿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿\n⣿⣿⣿⣿⣿⣇⠀⠀⠀⢠⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿\n⣿⣿⣿⣿⣿⣿⡆⠀⢀⣼⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿\n⣿⣿⣿⣿⣿⣿⣿⣦⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿\n\n[нαρργ ιи∂ρєи∂єиϲє ∂αγ🇮🇳](https://t.me/FirexSupport)',
)
done += 1
except:
lol += 1
await event.reply(
'happy Independence day 🇮🇳 from FIREX support\nthanks for using this Plugin.'
)
CmdHelp("indanime").add_command("indanime", None, "Wish u happy indpendamce day").add()
| 38.225806 | 488 | 0.357806 | [
"MIT"
] | aksr-aashish/FIREXUSERBOT | userbot/plugins/indanime.py | 1,969 | Python |
"""Support for Aqualink temperature sensors."""
from __future__ import annotations
from openpeerpower.components.sensor import DOMAIN, SensorEntity
from openpeerpower.config_entries import ConfigEntry
from openpeerpower.const import DEVICE_CLASS_TEMPERATURE, TEMP_CELSIUS, TEMP_FAHRENHEIT
from openpeerpower.core import OpenPeerPower
from . import AqualinkEntity
from .const import DOMAIN as AQUALINK_DOMAIN
PARALLEL_UPDATES = 0
async def async_setup_entry(
opp: OpenPeerPower, config_entry: ConfigEntry, async_add_entities
) -> None:
"""Set up discovered sensors."""
devs = []
for dev in opp.data[AQUALINK_DOMAIN][DOMAIN]:
devs.append(OppAqualinkSensor(dev))
async_add_entities(devs, True)
class OppAqualinkSensor(AqualinkEntity, SensorEntity):
"""Representation of a sensor."""
@property
def name(self) -> str:
"""Return the name of the sensor."""
return self.dev.label
@property
def unit_of_measurement(self) -> str | None:
"""Return the measurement unit for the sensor."""
if self.dev.name.endswith("_temp"):
if self.dev.system.temp_unit == "F":
return TEMP_FAHRENHEIT
return TEMP_CELSIUS
return None
@property
def state(self) -> str | None:
"""Return the state of the sensor."""
if self.dev.state == "":
return None
try:
state = int(self.dev.state)
except ValueError:
state = float(self.dev.state)
return state
@property
def device_class(self) -> str | None:
"""Return the class of the sensor."""
if self.dev.name.endswith("_temp"):
return DEVICE_CLASS_TEMPERATURE
return None
| 29.166667 | 87 | 0.662286 | [
"Apache-2.0"
] | OpenPeerPower/core | openpeerpower/components/iaqualink/sensor.py | 1,750 | Python |
"""
Copyright (C) 2020 Piek Solutions LLC
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import requests
import binascii
from codecs import getencoder
import time
def enforce_hex(addr):
if type(addr) == int and addr < 256:
return hex(addr).lstrip('0x')
elif type(addr) == str:
return addr.lstrip('0x')
else:
raise ValueError('addr must be hex string or int < 256')
def scanI2c(ip):
"""
scans devices on i2c bus
:return: list of hex string addresses present on i2c bus
"""
try:
req_url = 'http://' + ip + '/i2c/scan'
resp = requests.get(url=req_url)
return resp.content.decode('utf-8')
except ValueError:
print("i2c failed scan")
class I2cHttpDevice:
def __init__(self, ip, dev_addr):
# device address should be hex string
self.url = 'http://' + ip + '/i2c/'
self.dev_addr = enforce_hex(dev_addr)
def read(self, reg_addr, len_read):
"""
read len_read bytes starting from register reg_addr
:param reg_addr: (str) register address to read in hex
:param len_read: (int) number of bytes to read
:return: bytestring of data
"""
assert len_read < 256, "num of bytes to read cannot exceed 255"
hex_reg_addr = enforce_hex(reg_addr)
try:
req_url = '%sread/%s/%s/%d' % (self.url, self.dev_addr, hex_reg_addr, len_read)
resp = requests.get(url=req_url)
return binascii.a2b_hex(resp.content)
except ValueError:
print("i2c failed read")
def write(self, reg_addr, data, len_data=0):
"""
:param reg_addr: (str) register address to write to in hex
:param data: (str or bytes) hex-encoded bytes, ie: '014ce8'
:param len_data: (optional int) dummy variable to support code portability
:return: None
"""
hex_reg_addr = enforce_hex(reg_addr)
if type(data) == bytes:
# to work across python 2+3:
# https://izziswift.com/whats-the-correct-way-to-convert-bytes-to-a-hex-string-in-python-3/
data = getencoder('hex')(data)[0].decode('ascii')
try:
req_url = '%swrite/%s/%s/%s' % (self.url, self.dev_addr, hex_reg_addr, data)
requests.get(url=req_url)
except ValueError:
print("i2c device 0x%s failed write" % self.dev_addr)
class BME280(I2cHttpDevice):
"""
Bosch BME280
https://www.bosch-sensortec.com/media/boschsensortec/downloads/datasheets/bst-bme280-ds002.pdf
code adapted from BME280.py, http://abyz.me.uk/rpi/pigpio/examples.html (2016-08-05)
This example shows that porting the original code to use the Wifi
Papaya Controller is straightforward and minimal
"""
_calib00 = 0x88
_T1 = 0x88 - _calib00
_T2 = 0x8A - _calib00
_T3 = 0x8C - _calib00
_P1 = 0x8E - _calib00
_P2 = 0x90 - _calib00
_P3 = 0x92 - _calib00
_P4 = 0x94 - _calib00
_P5 = 0x96 - _calib00
_P6 = 0x98 - _calib00
_P7 = 0x9A - _calib00
_P8 = 0x9C - _calib00
_P9 = 0x9E - _calib00
_H1 = 0xA1 - _calib00
_chip_id = 0xD0
_reset = 0xE0
_calib26 = 0xE1
_H2 = 0xE1 - _calib26
_H3 = 0xE3 - _calib26
_xE4 = 0xE4 - _calib26
_xE5 = 0xE5 - _calib26
_xE6 = 0xE6 - _calib26
_H6 = 0xE7 - _calib26
_ctrl_hum = 0xF2
_status = 0xF3
_ctrl_meas = 0xF4
_config = 0xF5
_rawdata = 0xF7
_press = 0xF7
_temp = 0xFA
_humid = 0xFD
_p_msb = 0xF7 - _rawdata
_p_lsb = 0xF8 - _rawdata
_p_xlsb = 0xF9 - _rawdata
_t_msb = 0xFA - _rawdata
_t_lsb = 0xFB - _rawdata
_t_xlsb = 0xFC - _rawdata
_h_msb = 0xFD - _rawdata
_h_lsb = 0xFE - _rawdata
_os_ms = [0, 1, 2, 4, 8, 16]
def __init__(self, i2c_conn, gpib_addr, sampling):
super().__init__(i2c_conn, gpib_addr)
# additional initialization procedure
self.sampling = sampling
self._load_calibration()
self.measure_delay = self._measurement_time(sampling, sampling, sampling)
self.t_fine = 0.0
def _s16(self, _calib, off):
v = self._u16(_calib, off)
if v > 32767:
v -= 65536
return v
def _u16(self, _calib, off):
return _calib[off] | (_calib[off + 1] << 8)
def _u8(self, _calib, off):
return _calib[off]
def _s8(self, _calib, off):
v = self._u8(_calib, off)
if v > 127:
v -= 256
return v
def _measurement_time(self, os_temp, os_press, os_hum):
ms = ((1.25 + 2.3 * self._os_ms[os_temp]) +
(0.575 + 2.3 * self._os_ms[os_press]) +
(0.575 + 2.3 * self._os_ms[os_hum]))
return ms / 1000.0
def _load_calibration(self):
d1 = self.read(self._calib00, 26)
self.T1 = self._u16(d1, self._T1)
self.T2 = self._s16(d1, self._T2)
self.T3 = self._s16(d1, self._T3)
self.P1 = self._u16(d1, self._P1)
self.P2 = self._s16(d1, self._P2)
self.P3 = self._s16(d1, self._P3)
self.P4 = self._s16(d1, self._P4)
self.P5 = self._s16(d1, self._P5)
self.P6 = self._s16(d1, self._P6)
self.P7 = self._s16(d1, self._P7)
self.P8 = self._s16(d1, self._P8)
self.P9 = self._s16(d1, self._P9)
self.H1 = self._u8(d1, self._H1)
d2 = self.read(self._calib26, 7)
self.H2 = self._s16(d2, self._H2)
self.H3 = self._u8(d2, self._H3)
t = self._u8(d2, self._xE5)
t_l = t & 15
t_h = (t >> 4) & 15
self.H4 = (self._u8(d2, self._xE4) << 4) | t_l
if self.H4 > 2047:
self.H4 -= 4096
self.H5 = (self._u8(d2, self._xE6) << 4) | t_h
if self.H5 > 2047:
self.H5 -= 4096
self.H6 = self._s8(d2, self._H6)
def _read_raw_data(self):
# write control bytes for oversampling config
self.write(self._ctrl_hum, bytes([self.sampling]), 1)
self.write(self._ctrl_meas, bytes([self.sampling << 5 | self.sampling << 2 | 1]), 1)
time.sleep(self.measure_delay)
# read 8 bytes starting from register self._rawdata
d = self.read(self._rawdata, 8)
# print(''.join(format(x, '02x') for x in d))
msb = d[self._t_msb]
lsb = d[self._t_lsb]
xlsb = d[self._t_xlsb]
raw_t = ((msb << 16) | (lsb << 8) | xlsb) >> 4
msb = d[self._p_msb]
lsb = d[self._p_lsb]
xlsb = d[self._p_xlsb]
raw_p = ((msb << 16) | (lsb << 8) | xlsb) >> 4
msb = d[self._h_msb]
lsb = d[self._h_lsb]
raw_h = (msb << 8) | lsb
return raw_t, raw_p, raw_h
def read_temp(self):
# write measurement control byte
self.write(self._ctrl_meas, bytes([self.sampling << 5 | self.sampling << 2 | 1]), 1)
time.sleep(self.measure_delay)
# read 3 bytes starting from register self._temp
d = self.read(self._temp, 3)
# print(''.join(format(x, '02x') for x in d))
msb, lsb, xlsb = d
raw_t = ((msb << 16) | (lsb << 8) | xlsb) >> 4
var1 = (raw_t / 16384.0 - (self.T1) / 1024.0) * float(self.T2)
var2 = (((raw_t) / 131072.0 - (self.T1) / 8192.0) *
((raw_t) / 131072.0 - (self.T1) / 8192.0)) * (self.T3)
self.t_fine = var1 + var2
t = (var1 + var2) / 5120.0
return t
def read_data(self):
raw_t, raw_p, raw_h = self._read_raw_data()
var1 = (raw_t / 16384.0 - (self.T1) / 1024.0) * float(self.T2)
var2 = (((raw_t) / 131072.0 - (self.T1) / 8192.0) *
((raw_t) / 131072.0 - (self.T1) / 8192.0)) * (self.T3)
self.t_fine = var1 + var2
t = (var1 + var2) / 5120.0
var1 = (self.t_fine / 2.0) - 64000.0
var2 = var1 * var1 * self.P6 / 32768.0
var2 = var2 + (var1 * self.P5 * 2.0)
var2 = (var2 / 4.0) + (self.P4 * 65536.0)
var1 = ((self.P3 * var1 * var1 / 524288.0) + (self.P2 * var1)) / 524288.0
var1 = (1.0 + var1 / 32768.0) * self.P1
if var1 != 0.0:
p = 1048576.0 - raw_p
p = (p - (var2 / 4096.0)) * 6250.0 / var1
var1 = self.P9 * p * p / 2147483648.0
var2 = p * self.P8 / 32768.0
p = p + (var1 + var2 + self.P7) / 16.0
else:
p = 0
h = self.t_fine - 76800.0
h = ((raw_h - ((self.H4) * 64.0 + (self.H5) / 16384.0 * h)) *
((self.H2) / 65536.0 * (1.0 + (self.H6) / 67108864.0 * h *
(1.0 + (self.H3) / 67108864.0 * h))))
h = h * (1.0 - self.H1 * h / 524288.0)
if h > 100.0:
h = 100.0
elif h < 0.0:
h = 0.0
return t, p, h
| 33.512739 | 756 | 0.566949 | [
"BSD-3-Clause"
] | papaya-iot/papaya-examples | python/papaya_i2chttpinst.py | 10,523 | Python |
# You should modify this for your own use.
# In particular, set the FQDN to your domain name, and
# pick and set a secure SECRET_KEY. If you are going
# to run HA, you will want to modify the SQLALCHEMY
# variables to point to your shared server rather than
# SQLite3.
import os
ENV = os.environ.get("ENV", "dev")
SECRET_KEY = 'top-secret'
SQLALCHEMY_DATABASE_URI = 'sqlite:///db.sqlite'
SQLALCHEMY_TRACK_MODIFICATIONS = False
PERMANENT_SESSION_LIFETIME = 60 * 60 * 20
BOOTSTRAP_CDN_FORCE_SSL = True
BOOTSTRAP_SERVE_LOCAL = True
SCHEME = "https"
FQDN = f'fed-{ENV}.bortels.us'
URL = f'{SCHEME}://{FQDN}'
| 30.3 | 54 | 0.745875 | [
"MIT"
] | bortels/awsfed | app/config.py | 606 | Python |
# Copyright 2009-2015 Eucalyptus Systems, Inc.
#
# Redistribution and use of this software in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from requestbuilder import Arg
from euca2ools.commands.iam import IAMRequest, arg_account_name
class DeleteAccount(IAMRequest):
DESCRIPTION = '[Eucalyptus cloud admin only] Delete an account'
ARGS = [arg_account_name(
help='name or ID of the account to delete (required)'),
Arg('-r', '--recursive', dest='Recursive', action='store_const',
const='true', help='''delete all users, groups, and policies
associated with the account as well''')]
| 49.5 | 78 | 0.743222 | [
"MIT"
] | cirobessa/receitas-aws | paws/lib/python2.7/site-packages/euca2ools-3.4.1_2_g6b3f62f2-py2.7.egg/euca2ools/commands/iam/deleteaccount.py | 1,881 | Python |
# evaluate cnn for monthly car sales dataset
from math import sqrt
from numpy import array
from numpy import mean
from numpy import std
from pandas import DataFrame
from pandas import concat
from pandas import read_csv
from sklearn.metrics import mean_squared_error
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
from matplotlib import pyplot
# split a univariate dataset into train/test sets
def train_test_split(data, n_test):
return data[:-n_test], data[-n_test:]
# transform list into supervised learning format
def series_to_supervised(data, n_in, n_out=1):
df = DataFrame(data)
cols = list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
# put it all together
agg = concat(cols, axis=1)
# drop rows with NaN values
agg.dropna(inplace=True)
return agg.values
# root mean squared error or rmse
def measure_rmse(actual, predicted):
return sqrt(mean_squared_error(actual, predicted))
# fit a model
def model_fit(train, config):
# unpack config
n_input, n_filters, n_kernel, n_epochs, n_batch = config
# prepare data
data = series_to_supervised(train, n_input)
train_x, train_y = data[:, :-1], data[:, -1]
train_x = train_x.reshape((train_x.shape[0], train_x.shape[1], 1))
# define model
model = Sequential()
model.add(Conv1D(n_filters, n_kernel, activation='relu', input_shape=(n_input, 1)))
model.add(Conv1D(n_filters, n_kernel, activation='relu'))
model.add(MaxPooling1D())
model.add(Flatten())
model.add(Dense(1))
model.compile(loss='mse', optimizer='adam')
# fit
model.fit(train_x, train_y, epochs=n_epochs, batch_size=n_batch, verbose=0)
return model
# forecast with a pre-fit model
def model_predict(model, history, config):
# unpack config
n_input, _, _, _, _ = config
# prepare data
x_input = array(history[-n_input:]).reshape((1, n_input, 1))
# forecast
yhat = model.predict(x_input, verbose=0)
return yhat[0]
# walk-forward validation for univariate data
def walk_forward_validation(data, n_test, cfg):
predictions = list()
# split dataset
train, test = train_test_split(data, n_test)
# fit model
model = model_fit(train, cfg)
# seed history with training dataset
history = [x for x in train]
# step over each time-step in the test set
for i in range(len(test)):
# fit model and make forecast for history
yhat = model_predict(model, history, cfg)
# store forecast in list of predictions
predictions.append(yhat)
# add actual observation to history for the next loop
history.append(test[i])
# estimate prediction error
error = measure_rmse(test, predictions)
print(' > %.3f' % error)
return error
# repeat evaluation of a config
def repeat_evaluate(data, config, n_test, n_repeats=30):
# fit and evaluate the model n times
scores = [walk_forward_validation(data, n_test, config) for _ in range(n_repeats)]
return scores
# summarize model performance
def summarize_scores(name, scores):
# print a summary
scores_m, score_std = mean(scores), std(scores)
print('%s: %.3f RMSE (+/- %.3f)' % (name, scores_m, score_std))
# box and whisker plot
pyplot.boxplot(scores)
pyplot.show()
series = read_csv('monthly-car-sales.csv', header=0, index_col=0)
data = series.values
# data split
n_test = 12
# define config
config = [36, 256, 3, 100, 100]
# grid search
scores = repeat_evaluate(data, config, n_test)
# summarize scores
summarize_scores('cnn', scores) | 30.974359 | 84 | 0.74117 | [
"MIT"
] | gdepalma93/bright-athlete-academy | Resources/books/deep_learning_time_series_forecasting/code/chapter_14/03_cnn_forecast_model.py | 3,624 | Python |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import numpy as np
from progress.bar import Bar
import time
import torch
import os
try:
from external.nms import soft_nms
except:
print('NMS not imported! If you need it,'
' do \n cd $CenterNet_ROOT/src/lib/external \n make')
from models.decode import ctdet_decode
from models.utils import flip_tensor
from utils.image import get_affine_transform
from utils.post_process import ctdet_post_process
from utils.debugger import Debugger
from .base_detector import BaseDetector
class CtdetDetector(BaseDetector):
def __init__(self, opt):
super(CtdetDetector, self).__init__(opt)
def process(self, images, return_time=False):
with torch.no_grad():
output = self.model(images)[-1]
hm = output['hm'].sigmoid_()
wh = output['wh']
reg = output['reg'] if self.opt.reg_offset else None
if self.opt.flip_test:
hm = (hm[0:1] + flip_tensor(hm[1:2])) / 2
wh = (wh[0:1] + flip_tensor(wh[1:2])) / 2
reg = reg[0:1] if reg is not None else None
torch.cuda.synchronize()
forward_time = time.time()
dets = ctdet_decode(hm, wh, reg=reg, cat_spec_wh=self.opt.cat_spec_wh, K=self.opt.K)
if return_time:
return output, dets, forward_time
else:
return output, dets
def post_process(self, dets, meta, scale=1):
dets = dets.detach().cpu().numpy()
dets = dets.reshape(1, -1, dets.shape[2])
dets = ctdet_post_process(
dets.copy(), [meta['c']], [meta['s']],
meta['out_height'], meta['out_width'], self.opt.num_classes)
for j in range(1, self.num_classes + 1):
dets[0][j] = np.array(dets[0][j], dtype=np.float32).reshape(-1, 5)
dets[0][j][:, :4] /= scale
return dets[0]
def merge_outputs(self, detections):
results = {}
for j in range(1, self.num_classes + 1):
results[j] = np.concatenate(
[detection[j] for detection in detections], axis=0).astype(np.float32)
if len(self.scales) > 1 or self.opt.nms:
soft_nms(results[j], Nt=0.5, method=2)
scores = np.hstack(
[results[j][:, 4] for j in range(1, self.num_classes + 1)])
if len(scores) > self.max_per_image:
kth = len(scores) - self.max_per_image
thresh = np.partition(scores, kth)[kth]
for j in range(1, self.num_classes + 1):
keep_inds = (results[j][:, 4] >= thresh)
results[j] = results[j][keep_inds]
return results
def debug(self, debugger, images, dets, output, scale=1):
detection = dets.detach().cpu().numpy().copy()
detection[:, :, :4] *= self.opt.down_ratio
for i in range(1):
img = images[i].detach().cpu().numpy().transpose(1, 2, 0)
img = ((img * self.std + self.mean) * 255).astype(np.uint8)
pred = debugger.gen_colormap(output['hm'][i].detach().cpu().numpy())
debugger.add_blend_img(img, pred, 'pred_hm_{:.1f}'.format(scale))
debugger.add_img(img, img_id='out_pred_{:.1f}'.format(scale))
for k in range(len(dets[i])):
if detection[i, k, 4] > self.opt.center_thresh:
debugger.add_coco_bbox(detection[i, k, :4], detection[i, k, -1],
detection[i, k, 4],
img_id='out_pred_{:.1f}'.format(scale))
def show_results(self, debugger, image, results):
debugger.add_img(image, img_id='ctdet')
for j in range(1, self.num_classes + 1):
for bbox in results[j]:
if bbox[4] > self.opt.vis_thresh:
debugger.add_coco_bbox(bbox[:4], j - 1, bbox[4], img_id='ctdet')
debugger.show_all_imgs(pause=self.pause)
#prefix = image_name.split('.')[0]
#path = os.path.dirname(self.opt.det_output_path) + '/img'
#debugger.save_all_imgs(path, prefix)
def save_results_only(self, debugger, image, results, image_name):
debugger.add_img(image, img_id='ctdet')
for j in range(1, self.num_classes + 1):
for bbox in results[j]:
if bbox[4] > self.opt.vis_thresh:
debugger.add_coco_bbox(bbox[:4], j - 1, bbox[4], img_id='ctdet')
prefix = image_name.split('.')[0]
path = os.path.dirname(self.opt.det_output_path) + '/img'
debugger.save_all_imgs(path, prefix)
| 38.513514 | 90 | 0.638363 | [
"MIT"
] | Wastoon/XinTong_CenterNet | src/lib/detectors/ctdet.py | 4,275 | Python |
import abc
from abc import ABCMeta
from typing import Callable
from typing import Iterator
from typing import List
from typing import Optional
from xsdata.codegen.models import Attr
from xsdata.codegen.models import Class
from xsdata.models.config import GeneratorConfig
from xsdata.utils.constants import return_true
class ContainerInterface(abc.ABC):
"""Wrap a list of classes and expose a simple api for easy access and
process."""
__slots__ = ("config",)
def __init__(self, config: GeneratorConfig):
self.config = config
@abc.abstractmethod
def __iter__(self) -> Iterator[Class]:
"""Create an iterator for the class map values."""
@abc.abstractmethod
def find(self, qname: str, condition: Callable = return_true) -> Optional[Class]:
"""Search by qualified name for a specific class with an optional
condition callable."""
@abc.abstractmethod
def find_inner(self, source: Class, qname: str) -> Class:
"""Search by qualified name for a specific inner class or fail."""
@abc.abstractmethod
def add(self, item: Class):
"""Add class item to the container."""
@abc.abstractmethod
def extend(self, items: List[Class]):
"""Add a list of classes the container."""
@abc.abstractmethod
def reset(self, item: Class, qname: str):
"""Update the given class qualified name."""
class HandlerInterface(abc.ABC):
"""Class handler interface."""
__slots__ = ()
@abc.abstractmethod
def process(self, target: Class):
"""Process the given target class."""
class RelativeHandlerInterface(HandlerInterface, metaclass=ABCMeta):
"""Class handler interface with access to the complete classes
container."""
__slots__ = "container"
def __init__(self, container: ContainerInterface):
self.container = container
def base_attrs(self, target: Class) -> List[Attr]:
attrs: List[Attr] = []
for extension in target.extensions:
base = self.container.find(extension.type.qname)
assert base is not None
attrs.extend(base.attrs)
attrs.extend(self.base_attrs(base))
return attrs
class ContainerHandlerInterface(abc.ABC):
"""Class container."""
__slots__ = "container"
def __init__(self, container: ContainerInterface):
self.container = container
@abc.abstractmethod
def run(self):
"""Run the process for the whole container."""
| 27.25 | 85 | 0.67491 | [
"MIT"
] | amal-khailtash/xsdata | xsdata/codegen/mixins.py | 2,507 | Python |
#!/usr/bin/python
import gdbm
import sys
import os
db_filename = "aclhistory.db"
example_filename = "HOMEOFFICEROLL3_20180521.CSV"
example_status = "D"
if len(sys.argv) != 3:
scriptname = os.path.basename(str(sys.argv[0]))
print "usage:", scriptname, "<FILENAME>", "<STATUS>"
print "\t Pass in the filename and status to be set in the .db file(" + db_filename + ")"
print "\t Example: ", scriptname, example_filename, example_status
print "\t to set file", example_filename, "=", example_status, "in", db_filename
os._exit(1)
file_to_set = str(sys.argv[1])
status_to_set = str(sys.argv[2])
db_file = gdbm.open(db_filename,'c')
for f in db_file.keys():
if f == file_to_set:
print "Updating the key", f
db_file[f] = status_to_set
print "File", f, "State", db_file[f]
| 27 | 90 | 0.702427 | [
"MIT"
] | UKHomeOffice/dq-ssm_ingest | ADT/aclhistory-edit.py | 783 | Python |
from Tree import TreeNode
def zdir(root):
if root[0] == 0:
return 0 #left
elif root[1] ==0:
return 1 #right
else:
return 2 #nothing
def add_nodes(root, nodes):
for i in range(0,len(nodes)):
if nodes[i][0] == root.gfv():
root.addChild(add_nodes(TreeNode(nodes[i], 0), nodes[0:i] + nodes[i+1:]))
elif nodes[i][1] == root.gfv():
root.addChild(add_nodes(TreeNode(nodes[i], 1), nodes[0:i] + nodes[i+1:]))
return root
f = open("Day24DB")
cm = []
for l in f.readlines():
t = l.split('/')
cm.append((int(t[0]), int(t[1]), 0))
cm = sorted(cm, key=sum, reverse=True)
begs = filter(lambda x: x[0] == 0 or x[1] == 0 , cm)
cm = filter(lambda x: not(x[0] == 0 or x[1] == 0), cm)
rts = map(lambda x: TreeNode(x, zdir(x)), begs)
t = TreeNode((0,0),2)
for root in rts:
t.addChild(add_nodes(root, filter(lambda x: not(x[0] == root.l and x[1] == root.r), cm)))
print "Looking for max"
print t.gwgh()
print t.glen()
print t.glnw()
| 23.634146 | 90 | 0.593395 | [
"MIT"
] | JanStoltman/100DaysOfCode | AdventOfCode/Day24.py | 969 | Python |
"""
Django settings for project project.
Generated by 'django-admin startproject' using Django 1.8.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
import django
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'y*b^6p#z&cm2)8rzgbp2i4k*+rg2h%60l*bmf6hg&ro!z0-ael'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# SECURITY WARNING: do not use '*' on production or use some HTTP(S) proxy
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
# Django 1
if django.VERSION[0] == 1:
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
else:
# Django 2+
MIDDLEWARE = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
| 28.016807 | 74 | 0.695861 | [
"Apache-2.0"
] | AlexMeier99/s2i-python-container | examples/django-different-port-test-app/project/settings.py | 3,334 | Python |
from flask import current_app, render_template
from flask_restful import Resource, reqparse
from flask_mail import Message
from utils.authorizations import admin_required
from models.user import UserModel
class Email(Resource):
NO_REPLY = "[email protected]" # Should this be dwellingly address?
parser = reqparse.RequestParser()
parser.add_argument("user_id", required=True)
parser.add_argument("subject", required=True)
parser.add_argument("body", required=True)
@admin_required
def post(self):
data = Email.parser.parse_args()
user = UserModel.find_by_id(data.user_id)
message = Message(data.subject, sender=Email.NO_REPLY, body=data.body)
message.recipients = [user.email]
current_app.mail.send(message)
return {"message": "Message sent"}
@staticmethod
def send_reset_password_msg(user):
token = user.reset_password_token()
msg = Message(
"Reset password for Dwellingly",
sender=Email.NO_REPLY,
recipients=[user.email],
)
msg.body = render_template("emails/reset_msg.txt", user=user, token=token)
msg.html = render_template("emails/reset_msg.html", user=user, token=token)
current_app.mail.send(msg)
@staticmethod
def send_user_invite_msg(user):
token = user.reset_password_token()
msg = Message(
"Create Your Dwellingly Account",
sender=Email.NO_REPLY,
recipients=[user.email],
)
msg.body = render_template("emails/invite_user_msg.txt", user=user, token=token)
msg.html = render_template(
"emails/invite_user_msg.html", user=user, token=token
)
current_app.mail.send(msg)
| 33.433962 | 88 | 0.667607 | [
"MIT"
] | donovan-PNW/dwellinglybackend | resources/email.py | 1,772 | Python |
# coding=utf-8
"""
API for dataset indexing, access and search.
"""
from __future__ import absolute_import
import logging
from cachetools.func import lru_cache
from datacube import compat
from datacube.model import Dataset, DatasetType, MetadataType
from datacube.utils import InvalidDocException, check_doc_unchanged, jsonify_document, get_doc_changes, contains
from . import fields
from .exceptions import DuplicateRecordError, UnknownFieldError
_LOG = logging.getLogger(__name__)
class MetadataTypeResource(object):
def __init__(self, db):
"""
:type db: datacube.index.postgres._api.PostgresDb
"""
self._db = db
def add(self, definition, allow_table_lock=False):
"""
:type definition: dict
:param allow_table_lock:
Allow an exclusive lock to be taken on the table while creating the indexes.
This will halt other user's requests until completed.
If false, creation will be slightly slower and cannot be done in a transaction.
:rtype: datacube.model.MetadataType
"""
# This column duplication is getting out of hand:
MetadataType.validate(definition)
name = definition['name']
existing = self._db.get_metadata_type_by_name(name)
if existing:
# They've passed us the same one again. Make sure it matches what is stored.
# TODO: Support for adding/updating search fields?
check_doc_unchanged(
existing.definition,
definition,
'Metadata Type {}'.format(name)
)
else:
self._db.add_metadata_type(
name=name,
definition=definition,
concurrently=not allow_table_lock
)
return self.get_by_name(name)
@lru_cache()
def get(self, id_):
"""
:rtype: datacube.model.MetadataType
"""
return self._make(self._db.get_metadata_type(id_))
@lru_cache()
def get_by_name(self, name):
"""
:rtype: datacube.model.MetadataType
"""
record = self._db.get_metadata_type_by_name(name)
if not record:
return None
return self._make(record)
def check_field_indexes(self, allow_table_lock=False, rebuild_all=False):
"""
Create or replace per-field indexes and views.
:param allow_table_lock:
Allow an exclusive lock to be taken on the table while creating the indexes.
This will halt other user's requests until completed.
If false, creation will be slightly slower and cannot be done in a transaction.
"""
self._db.check_dynamic_fields(concurrently=not allow_table_lock, rebuild_all=rebuild_all)
def _make_many(self, query_rows):
return (self._make(c) for c in query_rows)
def _make(self, query_row):
"""
:rtype list[datacube.model.MetadataType]
"""
definition = query_row['definition']
dataset_ = definition['dataset']
return MetadataType(
query_row['name'],
dataset_,
dataset_search_fields=self._db.get_dataset_fields(query_row),
id_=query_row['id']
)
class DatasetTypeResource(object):
"""
:type _db: datacube.index.postgres._api.PostgresDb
:type metadata_type_resource: MetadataTypeResource
"""
def __init__(self, db, metadata_type_resource):
"""
:type db: datacube.index.postgres._api.PostgresDb
:type metadata_type_resource: MetadataTypeResource
"""
self._db = db
self.metadata_type_resource = metadata_type_resource
def from_doc(self, definition):
"""
Create a Product from its definitions
:param dict definition: product definition document
:rtype: datacube.model.DatasetType
"""
# This column duplication is getting out of hand:
DatasetType.validate(definition)
metadata_type = definition['metadata_type']
# They either specified the name of a metadata type, or specified a metadata type.
# Is it a name?
if isinstance(metadata_type, compat.string_types):
metadata_type = self.metadata_type_resource.get_by_name(metadata_type)
else:
# Otherwise they embedded a document, add it if needed:
metadata_type = self.metadata_type_resource.add(metadata_type, allow_table_lock=False)
if not metadata_type:
raise InvalidDocException('Unknown metadata type: %r' % definition['metadata_type'])
return DatasetType(metadata_type, definition)
def add(self, type_):
"""
Add a Product
:param datacube.model.DatasetType type_: Product to add
:rtype: datacube.model.DatasetType
"""
DatasetType.validate(type_.definition)
existing = self._db.get_dataset_type_by_name(type_.name)
if existing:
# TODO: Support for adding/updating match rules?
# They've passed us the same collection again. Make sure it matches what is stored.
check_doc_unchanged(
existing.definition,
jsonify_document(type_.definition),
'Dataset type {}'.format(type_.name)
)
else:
self._db.add_dataset_type(
name=type_.name,
metadata=type_.metadata_doc,
metadata_type_id=type_.metadata_type.id,
definition=type_.definition
)
return self.get_by_name(type_.name)
def update(self, type_, allow_unsafe_updates=False):
"""
Update a product. Unsafe changes will throw a ValueError by default.
(An unsafe change is anything that may potentially make the product
incompatible with existing datasets of that type)
:param datacube.model.DatasetType type_: Product to add
:param allow_unsafe_updates bool: Allow unsafe changes. Use with caution.
:rtype: datacube.model.DatasetType
"""
DatasetType.validate(type_.definition)
existing = self._db.get_dataset_type_by_name(type_.name)
if not existing:
raise ValueError('Unknown product %s, cannot update – did you intend to add it?' % type_.name)
def handle_unsafe(msg):
if not allow_unsafe_updates:
raise ValueError(msg)
else:
_LOG.warning("Ignoring %s", msg)
# We'll probably want to use offsets in the future (ie. nested dicts), not just keys, but for now this suffices.
safe_keys_to_change = ('description', 'metadata')
doc_changes = get_doc_changes(existing.definition, jsonify_document(type_.definition))
for offset, old_value, new_value in doc_changes:
_LOG.info('Changing %s %s: %r -> %r', type_.name, '.'.join(offset), old_value, new_value)
key_name = offset[0]
if key_name not in safe_keys_to_change:
handle_unsafe('Potentially unsafe update: changing %r of product definition.' % key_name)
# You can safely make the match rules looser but not tighter.
if key_name == 'metadata':
# Tightening them could exclude datasets already matched to the product.
# (which would make search results wrong)
if not contains(old_value, new_value, case_sensitive=True):
handle_unsafe('Unsafe update: new product match rules are not a superset of old ones.')
if doc_changes:
_LOG.info("Updating product %s", type_.name)
self._db.update_dataset_type(
name=type_.name,
metadata=type_.metadata_doc,
metadata_type_id=type_.metadata_type.id,
definition=type_.definition
)
# Clear our local cache. Note that other users may still have
# cached copies for the duration of their connections.
self.get_by_name.cache_clear()
self.get.cache_clear()
else:
_LOG.info("No changes detected for product %s", type_.name)
def update_document(self, definition, allow_unsafe_update=False):
"""
Update a Product using its difinition
:param dict definition: product definition document
:rtype: datacube.model.DatasetType
"""
type_ = self.from_doc(definition)
return self.update(type_, allow_unsafe_updates=allow_unsafe_update)
def add_document(self, definition):
"""
Add a Product using its difinition
:param dict definition: product definition document
:rtype: datacube.model.DatasetType
"""
type_ = self.from_doc(definition)
return self.add(type_)
@lru_cache()
def get(self, id_):
"""
Retrieve Product by id
:param int id_: id of the Product
:rtype: datacube.model.DatasetType
"""
return self._make(self._db.get_dataset_type(id_))
@lru_cache()
def get_by_name(self, name):
"""
Retrieve Product by name
:param str name: name of the Product
:rtype: datacube.model.DatasetType
"""
result = self._db.get_dataset_type_by_name(name)
if not result:
return None
return self._make(result)
def get_with_fields(self, field_names):
"""
Return dataset types that have all the given fields.
:param tuple[str] field_names:
:rtype: __generator[DatasetType]
"""
for type_ in self.get_all():
for name in field_names:
if name not in type_.metadata_type.dataset_fields:
break
else:
yield type_
def search(self, **query):
"""
Return dataset types that have all the given fields.
:param dict query:
:rtype: __generator[DatasetType]
"""
for type_, q in self.search_robust(**query):
if not q:
yield type_
def search_robust(self, **query):
"""
Return dataset types that match match-able fields and dict of remaining un-matchable fields.
:param dict query:
:rtype: __generator[(DatasetType, dict)]
"""
for type_ in self.get_all():
q = query.copy()
if q.pop('product', type_.name) != type_.name:
continue
if q.pop('metadata_type', type_.metadata_type.name) != type_.metadata_type.name:
continue
for key, value in list(q.items()):
try:
exprs = fields.to_expressions(type_.metadata_type.dataset_fields.get, **{key: value})
except UnknownFieldError as e:
break
try:
if all(expr.evaluate(type_.metadata_doc) for expr in exprs):
q.pop(key)
else:
break
except (AttributeError, KeyError, ValueError) as e:
continue
else:
yield type_, q
def get_all(self):
"""
Retrieve all Products
:rtype: iter[datacube.model.DatasetType]
"""
return (self._make(record) for record in self._db.get_all_dataset_types())
def _make_many(self, query_rows):
return (self._make(c) for c in query_rows)
def _make(self, query_row):
"""
:rtype datacube.model.DatasetType
"""
return DatasetType(
definition=query_row['definition'],
metadata_type=self.metadata_type_resource.get(query_row['metadata_type_ref']),
id_=query_row['id'],
)
class DatasetResource(object):
"""
:type _db: datacube.index.postgres._api.PostgresDb
:type types: datacube.index._datasets.DatasetTypeResource
"""
def __init__(self, db, dataset_type_resource):
"""
:type db: datacube.index.postgres._api.PostgresDb
:type dataset_type_resource: datacube.index._datasets.DatasetTypeResource
"""
self._db = db
self.types = dataset_type_resource
def get(self, id_, include_sources=False):
"""
Get dataset by id
:param uuid id_: id of the dataset to retrieve
:param bool include_sources: get the full provenance graph?
:rtype: datacube.model.Dataset
"""
if not include_sources:
return self._make(self._db.get_dataset(id_), full_info=True)
datasets = {result['id']: (self._make(result, full_info=True), result)
for result in self._db.get_dataset_sources(id_)}
for dataset, result in datasets.values():
dataset.metadata_doc['lineage']['source_datasets'] = {
classifier: datasets[str(source)][0].metadata_doc
for source, classifier in zip(result['sources'], result['classes']) if source
}
dataset.sources = {
classifier: datasets[str(source)][0]
for source, classifier in zip(result['sources'], result['classes']) if source
}
return datasets[id_][0]
def get_derived(self, id_):
"""
Get drived datasets
:param uuid id_: dataset id
:rtype: list[datacube.model.Dataset]
"""
return [self._make(result) for result in self._db.get_derived_datasets(id_)]
def has(self, dataset):
"""
Have we already indexed this dataset?
:param datacube.model.Dataset dataset: dataset to check
:rtype: bool
"""
return self._db.contains_dataset(dataset.id)
def add(self, dataset, skip_sources=False):
"""
Ensure a dataset is in the index. Add it if not present.
:param datacube.model.Dataset dataset: dataset to add
:param bool skip_sources: don't attempt to index source (use when sources are already indexed)
:rtype: datacube.model.Dataset
"""
if not skip_sources:
for source in dataset.sources.values():
self.add(source)
was_inserted = False
sources_tmp = dataset.type.dataset_reader(dataset.metadata_doc).sources
dataset.type.dataset_reader(dataset.metadata_doc).sources = {}
try:
_LOG.info('Indexing %s', dataset.id)
with self._db.begin() as transaction:
try:
was_inserted = transaction.insert_dataset(dataset.metadata_doc, dataset.id, dataset.type.id)
for classifier, source_dataset in dataset.sources.items():
transaction.insert_dataset_source(classifier, dataset.id, source_dataset.id)
# try to update location in the same transaction as insertion.
# if insertion fails we'll try updating location later
# if insertion succeeds the location bit can't possibly fail
if dataset.local_uri:
transaction.ensure_dataset_location(dataset.id, dataset.local_uri)
except DuplicateRecordError as e:
_LOG.warning(str(e))
if not was_inserted:
existing = self.get(dataset.id)
if existing:
check_doc_unchanged(
existing.metadata_doc,
jsonify_document(dataset.metadata_doc),
'Dataset {}'.format(dataset.id)
)
# reinsert attempt? try updating the location
if dataset.local_uri:
try:
self._db.ensure_dataset_location(dataset.id, dataset.local_uri)
except DuplicateRecordError as e:
_LOG.warning(str(e))
finally:
dataset.type.dataset_reader(dataset.metadata_doc).sources = sources_tmp
return dataset
def archive(self, ids):
"""
Mark datasets as archived
:param list[uuid] ids: list of dataset ids to archive
"""
with self._db.begin() as transaction:
for id_ in ids:
transaction.archive_dataset(id_)
def restore(self, ids):
"""
Mark datasets as not archived
:param list[uuid] ids: list of dataset ids to restore
"""
with self._db.begin() as transaction:
for id_ in ids:
transaction.restore_dataset(id_)
def get_field_names(self, type_name=None):
"""
:param str type_name:
:rtype: __generator[str]
"""
if type_name is None:
types = self.types.get_all()
else:
types = [self.types.get_by_name(type_name)]
for type_ in types:
for name in type_.metadata_type.dataset_fields:
yield name
def get_locations(self, dataset):
"""
:param datacube.model.Dataset dataset: dataset
:rtype: list[str]
"""
return self._db.get_locations(dataset.id)
def _make(self, dataset_res, full_info=False):
"""
:rtype datacube.model.Dataset
:param bool full_info: Include all available fields
"""
return Dataset(
self.types.get(dataset_res.dataset_type_ref),
dataset_res.metadata,
dataset_res.local_uri,
indexed_by=dataset_res.added_by if full_info else None,
indexed_time=dataset_res.added if full_info else None
)
def _make_many(self, query_result):
"""
:rtype list[datacube.model.Dataset]
"""
return (self._make(dataset) for dataset in query_result)
def search_by_metadata(self, metadata):
"""
Perform a search using arbitrary metadata, returning results as Dataset objects.
Caution – slow! This will usually not use indexes.
:param dict metadata:
:rtype: list[datacube.model.Dataset]
"""
return self._make_many(self._db.search_datasets_by_metadata(metadata))
def search(self, **query):
"""
Perform a search, returning results as Dataset objects.
:param dict[str,str|float|datacube.model.Range] query:
:rtype: __generator[datacube.model.Dataset]
"""
for dataset_type, datasets in self._do_search_by_product(query):
for dataset in self._make_many(datasets):
yield dataset
def search_by_product(self, **query):
"""
Perform a search, returning datasets grouped by product type.
:param dict[str,str|float|datacube.model.Range] query:
:rtype: __generator[(datacube.model.DatasetType, __generator[datacube.model.Dataset])]]
"""
for dataset_type, datasets in self._do_search_by_product(query):
yield dataset_type, self._make_many(datasets)
def count(self, **query):
"""
Perform a search, returning count of results.
:param dict[str,str|float|datacube.model.Range] query:
:rtype: int
"""
# This may be optimised into one query in the future.
result = 0
for product_type, count in self._do_count_by_product(query):
result += count
return result
def count_by_product(self, **query):
"""
Perform a search, returning a count of for each matching product type.
:param dict[str,str|float|datacube.model.Range] query:
:returns: Sequence of (product, count)
:rtype: __generator[(datacube.model.DatasetType, int)]]
"""
return self._do_count_by_product(query)
def count_by_product_through_time(self, period, **query):
"""
Perform a search, returning counts for each product grouped in time slices
of the given period.
:param dict[str,str|float|datacube.model.Range] query:
:param str period: Time range for each slice: '1 month', '1 day' etc.
:returns: For each matching product type, a list of time ranges and their count.
:rtype: __generator[(datacube.model.DatasetType, list[(datetime.datetime, datetime.datetime), int)]]
"""
return self._do_time_count(period, query)
def count_product_through_time(self, period, **query):
"""
Perform a search, returning counts for a single product grouped in time slices
of the given period.
Will raise an error if the search terms match more than one product.
:param dict[str,str|float|datacube.model.Range] query:
:param str period: Time range for each slice: '1 month', '1 day' etc.
:returns: For each matching product type, a list of time ranges and their count.
:rtype: list[(str, list[(datetime.datetime, datetime.datetime), int)]]
"""
return next(self._do_time_count(period, query, ensure_single=True))[1]
def _get_dataset_types(self, q):
types = set()
if 'product' in q.keys():
types.add(self.types.get_by_name(q['product']))
else:
# Otherwise search any metadata type that has all the given search fields.
types = self.types.get_with_fields(tuple(q.keys()))
if not types:
raise ValueError('No type of dataset has fields: %r', tuple(q.keys()))
return types
def _get_product_queries(self, query):
for dataset_type, q in self.types.search_robust(**query):
q['dataset_type_id'] = dataset_type.id
yield q, dataset_type
def _do_search_by_product(self, query, return_fields=False, with_source_ids=False):
for q, dataset_type in self._get_product_queries(query):
dataset_fields = dataset_type.metadata_type.dataset_fields
query_exprs = tuple(fields.to_expressions(dataset_fields.get, **q))
select_fields = None
if return_fields:
select_fields = tuple(dataset_fields.values())
yield (dataset_type,
self._db.search_datasets(
query_exprs,
select_fields=select_fields,
with_source_ids=with_source_ids
))
def _do_count_by_product(self, query):
for q, dataset_type in self._get_product_queries(query):
dataset_fields = dataset_type.metadata_type.dataset_fields
query_exprs = tuple(fields.to_expressions(dataset_fields.get, **q))
count = self._db.count_datasets(query_exprs)
if count > 0:
yield dataset_type, count
def _do_time_count(self, period, query, ensure_single=False):
if 'time' not in query:
raise ValueError('Counting through time requires a "time" range query argument')
query = dict(query)
start, end = query['time']
del query['time']
product_quries = list(self._get_product_queries(query))
if ensure_single:
if len(product_quries) == 0:
raise ValueError('No products match search terms: %r' % query)
if len(product_quries) > 1:
raise ValueError('Multiple products match single query search: %r' %
([dt.name for q, dt in product_quries],))
for q, dataset_type in product_quries:
dataset_fields = dataset_type.metadata_type.dataset_fields
query_exprs = tuple(fields.to_expressions(dataset_fields.get, **q))
yield dataset_type, list(self._db.count_datasets_through_time(
start,
end,
period,
dataset_fields.get('time'),
query_exprs
))
def search_summaries(self, **query):
"""
Perform a search, returning just the search fields of each dataset.
:param dict[str,str|float|datacube.model.Range] query:
:rtype: dict
"""
for dataset_type, results in self._do_search_by_product(query, return_fields=True):
for columns in results:
yield dict(columns)
def search_eager(self, **query):
"""
Perform a search, returning results as Dataset objects.
:param dict[str,str|float|datacube.model.Range] query:
:rtype: list[datacube.model.Dataset]
"""
return list(self.search(**query))
| 36.196481 | 120 | 0.606741 | [
"Apache-2.0"
] | cronosnull/agdc-v2 | datacube/index/_datasets.py | 24,690 | Python |
import discord
import io
import aiohttp
from aiohttp import request, ClientSession
from src.embeds.image_embed import ImageEmbed
async def request_canvas_image(ctx, url, member: discord.Member = None, params={}, is_gif=False):
params_url = "&" + "&".join(["{}={}".format(k, v)
for k, v in params.items()]) if params != {} else ""
async with ClientSession() as wastedSession:
async with wastedSession.get(f'{url}?avatar={member.avatar_url_as(format="png", size=1024)}{params_url}') as wastedImage:
imageData = io.BytesIO(await wastedImage.read())
await wastedSession.close()
await ctx.send(file=discord.File(imageData, 'image.gif' if is_gif else 'image.png'))
async def request_image(ctx, url, params={}, key="link", title="Requested image", description="", footer=""):
params_url = "&" + "&".join(["{}={}".format(k, v)
for k, v in params.items()]) if params != {} else ""
async with request("GET", url + params_url) as response:
if response.status == 200:
data = await response.json()
if "caption" in data:
title = data["caption"]
embed = ImageEmbed(
ctx,
title,
description,
footer,
data[key]
)
await embed.send()
else:
await ctx.send(f"API returned a {response.status} status :((")
async def request_text(ctx, url, key, params={}, text_format="{}"):
params_url = "&" + "&".join(["{}={}".format(k, v)
for k, v in params.items()]) if params != {} else ""
print(params_url)
async with request("GET", url + params_url) as response:
if response.status == 200:
data = await response.json()
print(data)
await ctx.send(text_format.format(data[key]))
else:
await ctx.send(f"API returned a {response.status} status :((")
| 40.72 | 129 | 0.556483 | [
"MIT"
] | alejandrodlsp/grogu-bot | src/helpers/api_handler.py | 2,036 | Python |
"""Configuration file for the Sphinx documentation builder.
This file only contains a selection of the most common options. For a full
list see the documentation:
https://www.sphinx-doc.org/en/master/usage/configuration.html
"""
import configparser
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# Get the global config info as currently stated
# (we use the config file to avoid actually loading any python here)
config = configparser.ConfigParser()
config.read(["../../src/sqlfluff/config.ini"])
stable_version = config.get("sqlfluff", "stable_version")
# -- Project information -----------------------------------------------------
project = "SQLFluff"
copyright = "2019, Alan Cruickshank"
author = "Alan Cruickshank"
# The full version, including alpha/beta/rc tags
release = stable_version
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
# Autodocumentation from docstrings
"sphinx.ext.autodoc",
# Allow Google style docstrings
"sphinx.ext.napoleon",
# Documenting click commands
"sphinx_click.ext",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# Master doc
master_doc = "index"
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "alabaster"
html_favicon = "favicon-fluff.png"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# -- Options for Alabaster Theme ---------------------------------------------
html_theme_options = {
"logo": "images/sqlfluff-lrg.png",
# Icon for iOS shortcuts
"touch_icon": "images/sqlfluff-sm2-sq.png",
"github_user": "sqlfluff",
"github_repo": "sqlfluff",
# Github Fork button
"github_banner": True,
# Github link button
"github_button": True,
# Codecov button
"codecov_button": True,
}
def ultimate_replace(app, docname, source):
"""Replaces variables in docs, including code blocks.
From: https://github.com/sphinx-doc/sphinx/issues/4054#issuecomment-329097229
"""
result = source[0]
for key in app.config.ultimate_replacements:
result = result.replace(key, app.config.ultimate_replacements[key])
source[0] = result
ultimate_replacements = {"|release|": release}
def setup(app):
"""Configures the documentation app."""
app.add_config_value("ultimate_replacements", {}, True)
app.connect("source-read", ultimate_replace)
| 32.587719 | 82 | 0.655989 | [
"MIT"
] | Aeolun/sqlfluff | docs/source/conf.py | 3,715 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from ... import opcodes as OperandDef
from ..utils import infer_dtype
from .core import TensorBinOp
from .utils import arithmetic_operand
@arithmetic_operand(sparse_mode='binary_and')
class TensorHypot(TensorBinOp):
_op_type_ = OperandDef.HYPOT
_func_name = 'hypot'
@infer_dtype(np.hypot)
def hypot(x1, x2, out=None, where=None, **kwargs):
"""
Given the "legs" of a right triangle, return its hypotenuse.
Equivalent to ``sqrt(x1**2 + x2**2)``, element-wise. If `x1` or
`x2` is scalar_like (i.e., unambiguously cast-able to a scalar type),
it is broadcast for use with each element of the other argument.
(See Examples)
Parameters
----------
x1, x2 : array_like
Leg of the triangle(s).
out : Tensor, None, or tuple of Tensor and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned. A tuple (possible only as a
keyword argument) must have length equal to the number of outputs.
where : array_like, optional
Values of True indicate to calculate the ufunc at that position, values
of False indicate to leave the value in the output alone.
**kwargs
Returns
-------
z : Tensor
The hypotenuse of the triangle(s).
Examples
--------
>>> import mars.tensor as mt
>>> mt.hypot(3*mt.ones((3, 3)), 4*mt.ones((3, 3))).execute()
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
Example showing broadcast of scalar_like argument:
>>> mt.hypot(3*mt.ones((3, 3)), [4]).execute()
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
"""
op = TensorHypot(**kwargs)
return op(x1, x2, out=out, where=where)
| 32.461538 | 79 | 0.646919 | [
"Apache-2.0"
] | Alfa-Shashank/mars | mars/tensor/arithmetic/hypot.py | 2,532 | Python |
from enum import Enum
class IndexMethod(str, Enum):
"""
Used to specify the index method for a
:class:`Column <piccolo.columns.base.Column>`.
"""
btree = "btree"
hash = "hash"
gist = "gist"
gin = "gin"
def __str__(self):
return f"{self.__class__.__name__}.{self.name}"
def __repr__(self):
return self.__str__()
| 18.6 | 55 | 0.594086 | [
"MIT"
] | gmos/piccolo | piccolo/columns/indexes.py | 372 | Python |
# -*- coding: utf-8 -*-
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# DO NOT EDIT! This is a generated sample ("Request", "language_entities_gcs")
# To install the latest published package dependency, execute the following:
# pip install google-cloud-language
# sample-metadata
# title: Analyzing Entities (GCS)
# description: Analyzing Entities in text file stored in Cloud Storage
# usage: python3 samples/v1/language_entities_gcs.py [--gcs_content_uri "gs://cloud-samples-data/language/entity.txt"]
# [START language_entities_gcs]
from google.cloud import language_v1
from google.cloud.language_v1 import enums
def sample_analyze_entities(gcs_content_uri):
"""
Analyzing Entities in text file stored in Cloud Storage
Args:
gcs_content_uri Google Cloud Storage URI where the file content is located.
e.g. gs://[Your Bucket]/[Path to File]
"""
client = language_v1.LanguageServiceClient()
# gcs_content_uri = 'gs://cloud-samples-data/language/entity.txt'
# Available types: PLAIN_TEXT, HTML
type_ = enums.Document.Type.PLAIN_TEXT
# Optional. If not specified, the language is automatically detected.
# For list of supported languages:
# https://cloud.google.com/natural-language/docs/languages
language = "en"
document = {"gcs_content_uri": gcs_content_uri, "type": type_, "language": language}
# Available values: NONE, UTF8, UTF16, UTF32
encoding_type = enums.EncodingType.UTF8
response = client.analyze_entities(document, encoding_type=encoding_type)
# Loop through entitites returned from the API
for entity in response.entities:
print(u"Representative name for the entity: {}".format(entity.name))
# Get entity type, e.g. PERSON, LOCATION, ADDRESS, NUMBER, et al
print(u"Entity type: {}".format(enums.Entity.Type(entity.type).name))
# Get the salience score associated with the entity in the [0, 1.0] range
print(u"Salience score: {}".format(entity.salience))
# Loop over the metadata associated with entity. For many known entities,
# the metadata is a Wikipedia URL (wikipedia_url) and Knowledge Graph MID (mid).
# Some entity types may have additional metadata, e.g. ADDRESS entities
# may have metadata for the address street_name, postal_code, et al.
for metadata_name, metadata_value in entity.metadata.items():
print(u"{}: {}".format(metadata_name, metadata_value))
# Loop over the mentions of this entity in the input document.
# The API currently supports proper noun mentions.
for mention in entity.mentions:
print(u"Mention text: {}".format(mention.text.content))
# Get the mention type, e.g. PROPER for proper noun
print(
u"Mention type: {}".format(enums.EntityMention.Type(mention.type).name)
)
# Get the language of the text, which will be the same as
# the language specified in the request or, if not specified,
# the automatically-detected language.
print(u"Language of the text: {}".format(response.language))
# [END language_entities_gcs]
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--gcs_content_uri",
type=str,
default="gs://cloud-samples-data/language/entity.txt",
)
args = parser.parse_args()
sample_analyze_entities(args.gcs_content_uri)
if __name__ == "__main__":
main()
| 38.113208 | 120 | 0.70297 | [
"Apache-2.0"
] | MShaffar19/python-language | samples/v1/language_entities_gcs.py | 4,040 | Python |
# coding=utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import logging
import math
import torch
import torch.nn as nn
import torch.nn.functional as nnf
from torch.nn import Dropout, Softmax, Linear, Conv3d, LayerNorm
from torch.nn.modules.utils import _pair, _triple
import configs as configs
from torch.distributions.normal import Normal
logger = logging.getLogger(__name__)
ATTENTION_Q = "MultiHeadDotProductAttention_1/query"
ATTENTION_K = "MultiHeadDotProductAttention_1/key"
ATTENTION_V = "MultiHeadDotProductAttention_1/value"
ATTENTION_OUT = "MultiHeadDotProductAttention_1/out"
FC_0 = "MlpBlock_3/Dense_0"
FC_1 = "MlpBlock_3/Dense_1"
ATTENTION_NORM = "LayerNorm_0"
MLP_NORM = "LayerNorm_2"
def np2th(weights, conv=False):
"""Possibly convert HWIO to OIHW."""
if conv:
weights = weights.transpose([3, 2, 0, 1])
return torch.from_numpy(weights)
def swish(x):
return x * torch.sigmoid(x)
ACT2FN = {"gelu": torch.nn.functional.gelu, "relu": torch.nn.functional.relu, "swish": swish}
class Attention(nn.Module):
def __init__(self, config, vis):
super(Attention, self).__init__()
self.vis = vis
self.num_attention_heads = config.transformer["num_heads"]
self.attention_head_size = int(config.hidden_size / self.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = Linear(config.hidden_size, self.all_head_size)
self.key = Linear(config.hidden_size, self.all_head_size)
self.value = Linear(config.hidden_size, self.all_head_size)
self.out = Linear(config.hidden_size, config.hidden_size)
self.attn_dropout = Dropout(config.transformer["attention_dropout_rate"])
self.proj_dropout = Dropout(config.transformer["attention_dropout_rate"])
self.softmax = Softmax(dim=-1)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
attention_probs = self.softmax(attention_scores)
weights = attention_probs if self.vis else None
attention_probs = self.attn_dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
attention_output = self.out(context_layer)
attention_output = self.proj_dropout(attention_output)
return attention_output, weights
class Mlp(nn.Module):
def __init__(self, config):
super(Mlp, self).__init__()
self.fc1 = Linear(config.hidden_size, config.transformer["mlp_dim"])
self.fc2 = Linear(config.transformer["mlp_dim"], config.hidden_size)
self.act_fn = ACT2FN["gelu"]
self.dropout = Dropout(config.transformer["dropout_rate"])
self._init_weights()
def _init_weights(self):
nn.init.xavier_uniform_(self.fc1.weight)
nn.init.xavier_uniform_(self.fc2.weight)
nn.init.normal_(self.fc1.bias, std=1e-6)
nn.init.normal_(self.fc2.bias, std=1e-6)
def forward(self, x):
x = self.fc1(x)
x = self.act_fn(x)
x = self.dropout(x)
x = self.fc2(x)
x = self.dropout(x)
return x
class Embeddings(nn.Module):
"""Construct the embeddings from patch, position embeddings.
"""
def __init__(self, config, img_size):
super(Embeddings, self).__init__()
self.config = config
down_factor = config.down_factor
patch_size = _triple(config.patches["size"])
n_patches = int((img_size[0]/2**down_factor// patch_size[0]) * (img_size[1]/2**down_factor// patch_size[1]) * (img_size[2]/2**down_factor// patch_size[2]))
self.hybrid_model = CNNEncoder(config, n_channels=2)
in_channels = config['encoder_channels'][-1]
self.patch_embeddings = Conv3d(in_channels=in_channels,
out_channels=config.hidden_size,
kernel_size=patch_size,
stride=patch_size)
self.position_embeddings = nn.Parameter(torch.zeros(1, n_patches, config.hidden_size))
self.dropout = Dropout(config.transformer["dropout_rate"])
def forward(self, x):
x, features = self.hybrid_model(x)
x = self.patch_embeddings(x) # (B, hidden. n_patches^(1/2), n_patches^(1/2))
x = x.flatten(2)
x = x.transpose(-1, -2) # (B, n_patches, hidden)
embeddings = x + self.position_embeddings
embeddings = self.dropout(embeddings)
return embeddings, features
class Block(nn.Module):
def __init__(self, config, vis):
super(Block, self).__init__()
self.hidden_size = config.hidden_size
self.attention_norm = LayerNorm(config.hidden_size, eps=1e-6)
self.ffn_norm = LayerNorm(config.hidden_size, eps=1e-6)
self.ffn = Mlp(config)
self.attn = Attention(config, vis)
def forward(self, x):
h = x
x = self.attention_norm(x)
x, weights = self.attn(x)
x = x + h
h = x
x = self.ffn_norm(x)
x = self.ffn(x)
x = x + h
return x, weights
class Encoder(nn.Module):
def __init__(self, config, vis):
super(Encoder, self).__init__()
self.vis = vis
self.layer = nn.ModuleList()
self.encoder_norm = LayerNorm(config.hidden_size, eps=1e-6)
for _ in range(config.transformer["num_layers"]):
layer = Block(config, vis)
self.layer.append(copy.deepcopy(layer))
def forward(self, hidden_states):
attn_weights = []
for layer_block in self.layer:
hidden_states, weights = layer_block(hidden_states)
if self.vis:
attn_weights.append(weights)
encoded = self.encoder_norm(hidden_states)
return encoded, attn_weights
class Transformer(nn.Module):
def __init__(self, config, img_size, vis):
super(Transformer, self).__init__()
self.embeddings = Embeddings(config, img_size=img_size)
self.encoder = Encoder(config, vis)
def forward(self, input_ids):
embedding_output, features = self.embeddings(input_ids)
encoded, attn_weights = self.encoder(embedding_output) # (B, n_patch, hidden)
return encoded, attn_weights, features
class Conv3dReLU(nn.Sequential):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
padding=0,
stride=1,
use_batchnorm=True,
):
conv = nn.Conv3d(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
bias=not (use_batchnorm),
)
relu = nn.ReLU(inplace=True)
bn = nn.BatchNorm3d(out_channels)
super(Conv3dReLU, self).__init__(conv, bn, relu)
class DecoderBlock(nn.Module):
def __init__(
self,
in_channels,
out_channels,
skip_channels=0,
use_batchnorm=True,
):
super().__init__()
self.conv1 = Conv3dReLU(
in_channels + skip_channels,
out_channels,
kernel_size=3,
padding=1,
use_batchnorm=use_batchnorm,
)
self.conv2 = Conv3dReLU(
out_channels,
out_channels,
kernel_size=3,
padding=1,
use_batchnorm=use_batchnorm,
)
self.up = nn.Upsample(scale_factor=2, mode='trilinear', align_corners=False)
def forward(self, x, skip=None):
x = self.up(x)
if skip is not None:
x = torch.cat([x, skip], dim=1)
x = self.conv1(x)
x = self.conv2(x)
return x
class DecoderCup(nn.Module):
def __init__(self, config, img_size):
super().__init__()
self.config = config
self.down_factor = config.down_factor
head_channels = config.conv_first_channel
self.img_size = img_size
self.conv_more = Conv3dReLU(
config.hidden_size,
head_channels,
kernel_size=3,
padding=1,
use_batchnorm=True,
)
decoder_channels = config.decoder_channels
in_channels = [head_channels] + list(decoder_channels[:-1])
out_channels = decoder_channels
self.patch_size = _triple(config.patches["size"])
skip_channels = self.config.skip_channels
blocks = [
DecoderBlock(in_ch, out_ch, sk_ch) for in_ch, out_ch, sk_ch in zip(in_channels, out_channels, skip_channels)
]
self.blocks = nn.ModuleList(blocks)
def forward(self, hidden_states, features=None):
B, n_patch, hidden = hidden_states.size() # reshape from (B, n_patch, hidden) to (B, h, w, hidden)
l, h, w = (self.img_size[0]//2**self.down_factor//self.patch_size[0]), (self.img_size[1]//2**self.down_factor//self.patch_size[1]), (self.img_size[2]//2**self.down_factor//self.patch_size[2])
x = hidden_states.permute(0, 2, 1)
x = x.contiguous().view(B, hidden, l, h, w)
x = self.conv_more(x)
for i, decoder_block in enumerate(self.blocks):
if features is not None:
skip = features[i] if (i < self.config.n_skip) else None
#print(skip.shape)
else:
skip = None
x = decoder_block(x, skip=skip)
return x
class SpatialTransformer(nn.Module):
"""
N-D Spatial Transformer
Obtained from https://github.com/voxelmorph/voxelmorph
"""
def __init__(self, size, mode='bilinear'):
super().__init__()
self.mode = mode
# create sampling grid
vectors = [torch.arange(0, s) for s in size]
grids = torch.meshgrid(vectors)
grid = torch.stack(grids)
grid = torch.unsqueeze(grid, 0)
grid = grid.type(torch.FloatTensor)
# registering the grid as a buffer cleanly moves it to the GPU, but it also
# adds it to the state dict. this is annoying since everything in the state dict
# is included when saving weights to disk, so the model files are way bigger
# than they need to be. so far, there does not appear to be an elegant solution.
# see: https://discuss.pytorch.org/t/how-to-register-buffer-without-polluting-state-dict
self.register_buffer('grid', grid)
def forward(self, src, flow):
# new locations
new_locs = self.grid + flow
shape = flow.shape[2:]
# need to normalize grid values to [-1, 1] for resampler
for i in range(len(shape)):
new_locs[:, i, ...] = 2 * (new_locs[:, i, ...] / (shape[i] - 1) - 0.5)
# move channels dim to last position
# also not sure why, but the channels need to be reversed
if len(shape) == 2:
new_locs = new_locs.permute(0, 2, 3, 1)
new_locs = new_locs[..., [1, 0]]
elif len(shape) == 3:
new_locs = new_locs.permute(0, 2, 3, 4, 1)
new_locs = new_locs[..., [2, 1, 0]]
return nnf.grid_sample(src, new_locs, align_corners=True, mode=self.mode)
class DoubleConv(nn.Module):
"""(convolution => [BN] => ReLU) * 2"""
def __init__(self, in_channels, out_channels, mid_channels=None):
super().__init__()
if not mid_channels:
mid_channels = out_channels
self.double_conv = nn.Sequential(
nn.Conv3d(in_channels, mid_channels, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv3d(mid_channels, out_channels, kernel_size=3, padding=1),
nn.ReLU(inplace=True)
)
def forward(self, x):
return self.double_conv(x)
class Down(nn.Module):
"""Downscaling with maxpool then double conv"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.maxpool_conv = nn.Sequential(
nn.MaxPool3d(2),
DoubleConv(in_channels, out_channels)
)
def forward(self, x):
return self.maxpool_conv(x)
class CNNEncoder(nn.Module):
def __init__(self, config, n_channels=2):
super(CNNEncoder, self).__init__()
self.n_channels = n_channels
decoder_channels = config.decoder_channels
encoder_channels = config.encoder_channels
self.down_num = config.down_num
self.inc = DoubleConv(n_channels, encoder_channels[0])
self.down1 = Down(encoder_channels[0], encoder_channels[1])
self.down2 = Down(encoder_channels[1], encoder_channels[2])
self.width = encoder_channels[-1]
def forward(self, x):
features = []
x1 = self.inc(x)
features.append(x1)
x2 = self.down1(x1)
features.append(x2)
feats = self.down2(x2)
features.append(feats)
feats_down = feats
for i in range(self.down_num):
feats_down = nn.MaxPool3d(2)(feats_down)
features.append(feats_down)
return feats, features[::-1]
class RegistrationHead(nn.Sequential):
def __init__(self, in_channels, out_channels, kernel_size=3, upsampling=1):
conv3d = nn.Conv3d(in_channels, out_channels, kernel_size=kernel_size, padding=kernel_size // 2)
conv3d.weight = nn.Parameter(Normal(0, 1e-5).sample(conv3d.weight.shape))
conv3d.bias = nn.Parameter(torch.zeros(conv3d.bias.shape))
super().__init__(conv3d)
class ViTVNet(nn.Module):
def __init__(self, config, img_size=(64, 256, 256), int_steps=7, vis=False):
super(ViTVNet, self).__init__()
self.transformer = Transformer(config, img_size, vis)
self.decoder = DecoderCup(config, img_size)
self.reg_head = RegistrationHead(
in_channels=config.decoder_channels[-1],
out_channels=config['n_dims'],
kernel_size=3,
)
self.spatial_trans = SpatialTransformer(img_size)
self.config = config
#self.integrate = VecInt(img_size, int_steps)
def forward(self, x):
source = x[:,0:1,:,:]
x, attn_weights, features = self.transformer(x) # (B, n_patch, hidden)
x = self.decoder(x, features)
flow = self.reg_head(x)
#flow = self.integrate(flow)
out = self.spatial_trans(source, flow)
return out, flow
class VecInt(nn.Module):
"""
Integrates a vector field via scaling and squaring.
Obtained from https://github.com/voxelmorph/voxelmorph
"""
def __init__(self, inshape, nsteps):
super().__init__()
assert nsteps >= 0, 'nsteps should be >= 0, found: %d' % nsteps
self.nsteps = nsteps
self.scale = 1.0 / (2 ** self.nsteps)
self.transformer = SpatialTransformer(inshape)
def forward(self, vec):
vec = vec * self.scale
for _ in range(self.nsteps):
vec = vec + self.transformer(vec, vec)
return vec
CONFIGS = {
'ViT-V-Net': configs.get_3DReg_config(),
}
| 36.311258 | 200 | 0.610615 | [
"MIT"
] | junyuchen245/ViT-V-Net_for_3D_Image_Registration | ViT-V-Net/models.py | 16,449 | Python |
import keras
from keras.datasets import mnist
# input image dimensions
img_rows, img_cols = 28, 28
input_shape = (img_rows, img_cols, 1)
num_classes = 10
def get_mnist_data():
# the data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
return x_train, y_train, x_test, y_test
| 31.448276 | 70 | 0.710526 | [
"MIT"
] | Stochastic13/keract | examples/data.py | 912 | Python |
# -*- coding: utf-8 -*-
import random
import itertools
from collections import defaultdict
class Chat(object):
cache_size = 200
# user_num = list(range(1, 100))
# random.shuffle(user_num)
colors = ['赤', '青', '黄', '緑', '紫', '黒', '茶', '灰色', '金', '銀']
fruits = ['りんご', 'みかん', 'メロン', 'パイナップル', 'ぶどう', '梨', 'いちご', 'もも', 'さくらんぼ', 'バナナ']
fruits_with_color = itertools.product(colors, fruits)
user_name = list(map(lambda n: n[0]+n[1], fruits_with_color))
random.shuffle(user_name)
def __init__(self):
self.cache = defaultdict(list)
self.nickname_dic = defaultdict(dict)
def set_nickname(self, room_id, client_name):
self.nickname_dic[room_id].update({client_name:str(self.__get_random_name())})
def get_nickname(self, room_id, client_name):
return self.nickname_dic[room_id][client_name]
def update_cache(self, chat, room_id):
self.cache[room_id].append(chat)
if len(self.cache[room_id]) > self.cache_size:
self.cache[room_id] = self.cache[-self.cache_size:]
def __get_random_name(self):
return self.user_name.pop()
def clear_caches(self, room_id):
del self.cache[room_id]
del self.nickname_dic[room_id]
| 31.923077 | 86 | 0.64257 | [
"MIT"
] | Hironsan/Brain_Hacker | handlers/brainstorming/chat.py | 1,331 | Python |
import imp
from os import path
from setuptools import setup
VERSION = imp.load_source(
'version',
path.join('.',
'zunzuncito',
'version.py'))
VERSION = VERSION.__version__
readme = open('README.rst', 'r')
setup(
name='zunzuncito',
version=VERSION,
author='Nicolas Embriz',
author_email='[email protected]',
description="A micro-framework for creating REST API's",
license='BSD',
keywords='wsgi web api framework rest http',
url='http://www.zunzun.io',
download_url='https://github.com/nbari/zunzuncito/tarball/master',
platforms="any",
packages=['zunzuncito'],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP :: WSGI',
'Topic :: Software Development :: Libraries :: Application Frameworks'
],
long_description=readme.read()
)
| 29.404762 | 78 | 0.622672 | [
"BSD-3-Clause"
] | nbari/zunzuncito | setup.py | 1,235 | Python |
"""
Argo Server API
You can get examples of requests and responses by using the CLI with `--gloglevel=9`, e.g. `argo list --gloglevel=9` # noqa: E501
The version of the OpenAPI document: VERSION
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from openapi_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from openapi_client.exceptions import ApiAttributeError
class FCVolumeSource(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'fs_type': (str,), # noqa: E501
'lun': (int,), # noqa: E501
'read_only': (bool,), # noqa: E501
'target_wwns': ([str],), # noqa: E501
'wwids': ([str],), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'fs_type': 'fsType', # noqa: E501
'lun': 'lun', # noqa: E501
'read_only': 'readOnly', # noqa: E501
'target_wwns': 'targetWWNs', # noqa: E501
'wwids': 'wwids', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""FCVolumeSource - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
fs_type (str): Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.. [optional] # noqa: E501
lun (int): Optional: FC target lun number. [optional] # noqa: E501
read_only (bool): Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.. [optional] # noqa: E501
target_wwns ([str]): Optional: FC target worldwide names (WWNs). [optional] # noqa: E501
wwids ([str]): Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""FCVolumeSource - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
fs_type (str): Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.. [optional] # noqa: E501
lun (int): Optional: FC target lun number. [optional] # noqa: E501
read_only (bool): Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.. [optional] # noqa: E501
target_wwns ([str]): Optional: FC target worldwide names (WWNs). [optional] # noqa: E501
wwids ([str]): Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 47.375 | 228 | 0.579621 | [
"Apache-2.0"
] | 2kindsofcs/argo-workflows | sdks/python/client/openapi_client/model/fc_volume_source.py | 12,886 | Python |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module to blocker devices based on device blocklists."""
from __future__ import absolute_import
from __future__ import division
from __future__ import google_type_annotations
from __future__ import print_function
from tradefed_cluster import datastore_entities
def IsLabBlocked(lab_name):
"""Check if the lab is blocked.
Args:
lab_name: lab name
Returns:
true if the lab is blocked, otherwise false.
"""
device_blocklists = (
datastore_entities.DeviceBlocklist.query()
.filter(datastore_entities.DeviceBlocklist.lab_name == lab_name)
.fetch(1))
return bool(device_blocklists)
| 32.405405 | 74 | 0.764804 | [
"Apache-2.0"
] | maksonlee/tradefed_cluster | tradefed_cluster/device_blocker.py | 1,199 | Python |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""The Grover operator."""
from typing import List, Optional, Union
import numpy
from qiskit.circuit import QuantumCircuit, QuantumRegister, AncillaRegister
# from qiskit.quantum_info import Statevector, Operator, DensityMatrix
from qiskit.quantum_info import Operator
from .standard_gates import MCXGate
class GroverOperator(QuantumCircuit):
r"""The Grover operator.
Grover's search algorithm [1, 2] consists of repeated applications of the so-called
Grover operator used to amplify the amplitudes of the desired output states.
This operator, :math:`\mathcal{Q}`, consists of the phase oracle, :math:`\mathcal{S}_f`,
zero phase-shift or zero reflection, :math:`\mathcal{S}_0`, and an
input state preparation :math:`\mathcal{A}`:
.. math::
\mathcal{Q} = \mathcal{A} \mathcal{S}_0 \mathcal{A}^\dagger \mathcal{S}_f
In the standard Grover search we have :math:`\mathcal{A} = H^{\otimes n}`:
.. math::
\mathcal{Q} = H^{\otimes n} \mathcal{S}_0 H^{\otimes n} \mathcal{S}_f
= D \mathcal{S_f}
The operation :math:`D = H^{\otimes n} \mathcal{S}_0 H^{\otimes n}` is also referred to as
diffusion operator. In this formulation we can see that Grover's operator consists of two
steps: first, the phase oracle multiplies the good states by -1 (with :math:`\mathcal{S}_f`)
and then the whole state is reflected around the mean (with :math:`D`).
This class allows setting a different state preparation, as in quantum amplitude
amplification (a generalization of Grover's algorithm), :math:`\mathcal{A}` might not be
a layer of Hardamard gates [3].
The action of the phase oracle :math:`\mathcal{S}_f` is defined as
.. math::
\mathcal{S}_f: |x\rangle \mapsto (-1)^{f(x)}|x\rangle
where :math:`f(x) = 1` if :math:`x` is a good state and 0 otherwise. To highlight the fact
that this oracle flips the phase of the good states and does not flip the state of a result
qubit, we call :math:`\mathcal{S}_f` a phase oracle.
Note that you can easily construct a phase oracle from a bitflip oracle by sandwiching the
controlled X gate on the result qubit by a X and H gate. For instance
.. parsed-literal::
Bitflip oracle Phaseflip oracle
q_0: ──■── q_0: ────────────■────────────
┌─┴─┐ ┌───┐┌───┐┌─┴─┐┌───┐┌───┐
out: ┤ X ├ out: ┤ X ├┤ H ├┤ X ├┤ H ├┤ X ├
└───┘ └───┘└───┘└───┘└───┘└───┘
There is some flexibility in defining the oracle and :math:`\mathcal{A}` operator. Before the
Grover operator is applied in Grover's algorithm, the qubits are first prepared with one
application of the :math:`\mathcal{A}` operator (or Hadamard gates in the standard formulation).
Thus, we always have operation of the form
:math:`\mathcal{A} \mathcal{S}_f \mathcal{A}^\dagger`. Therefore it is possible to move
bitflip logic into :math:`\mathcal{A}` and leaving the oracle only to do phaseflips via Z gates
based on the bitflips. One possible use-case for this are oracles that do not uncompute the
state qubits.
The zero reflection :math:`\mathcal{S}_0` is usually defined as
.. math::
\mathcal{S}_0 = 2 |0\rangle^{\otimes n} \langle 0|^{\otimes n} - \mathbb{I}_n
where :math:`\mathbb{I}_n` is the identity on :math:`n` qubits.
By default, this class implements the negative version
:math:`2 |0\rangle^{\otimes n} \langle 0|^{\otimes n} - \mathbb{I}_n`, since this can simply
be implemented with a multi-controlled Z sandwiched by X gates on the target qubit and the
introduced global phase does not matter for Grover's algorithm.
Examples:
>>> from qiskit.circuit import QuantumCircuit
>>> from qiskit.circuit.library import GroverOperator
>>> oracle = QuantumCircuit(2)
>>> oracle.z(0) # good state = first qubit is |1>
>>> grover_op = GroverOperator(oracle, insert_barriers=True)
>>> grover_op.draw()
┌───┐ ░ ┌───┐ ░ ┌───┐ ┌───┐ ░ ┌───┐
state_0: ┤ Z ├─░─┤ H ├─░─┤ X ├───────■──┤ X ├──────░─┤ H ├
└───┘ ░ ├───┤ ░ ├───┤┌───┐┌─┴─┐├───┤┌───┐ ░ ├───┤
state_1: ──────░─┤ H ├─░─┤ X ├┤ H ├┤ X ├┤ H ├┤ X ├─░─┤ H ├
░ └───┘ ░ └───┘└───┘└───┘└───┘└───┘ ░ └───┘
>>> oracle = QuantumCircuit(1)
>>> oracle.z(0) # the qubit state |1> is the good state
>>> state_preparation = QuantumCircuit(1)
>>> state_preparation.ry(0.2, 0) # non-uniform state preparation
>>> grover_op = GroverOperator(oracle, state_preparation)
>>> grover_op.draw()
┌───┐┌──────────┐┌───┐┌───┐┌───┐┌─────────┐
state_0: ┤ Z ├┤ RY(-0.2) ├┤ X ├┤ Z ├┤ X ├┤ RY(0.2) ├
└───┘└──────────┘└───┘└───┘└───┘└─────────┘
>>> oracle = QuantumCircuit(4)
>>> oracle.z(3)
>>> reflection_qubits = [0, 3]
>>> state_preparation = QuantumCircuit(4)
>>> state_preparation.cry(0.1, 0, 3)
>>> state_preparation.ry(0.5, 3)
>>> grover_op = GroverOperator(oracle, state_preparation,
... reflection_qubits=reflection_qubits)
>>> grover_op.draw()
┌───┐ ┌───┐
state_0: ──────────────────────■──────┤ X ├───────■──┤ X ├──────────■────────────────
│ └───┘ │ └───┘ │
state_1: ──────────────────────┼──────────────────┼─────────────────┼────────────────
│ │ │
state_2: ──────────────────────┼──────────────────┼─────────────────┼────────────────
┌───┐┌──────────┐┌────┴─────┐┌───┐┌───┐┌─┴─┐┌───┐┌───┐┌────┴────┐┌─────────┐
state_3: ┤ Z ├┤ RY(-0.5) ├┤ RY(-0.1) ├┤ X ├┤ H ├┤ X ├┤ H ├┤ X ├┤ RY(0.1) ├┤ RY(0.5) ├
└───┘└──────────┘└──────────┘└───┘└───┘└───┘└───┘└───┘└─────────┘└─────────┘
>>> mark_state = Statevector.from_label('011')
>>> diffuse_operator = 2 * DensityMatrix.from_label('000') - Operator.from_label('III')
>>> grover_op = GroverOperator(oracle=mark_state, zero_reflection=diffuse_operator)
>>> grover_op.draw(fold=70)
┌─────────────────┐ ┌───┐ »
state_0: ┤0 ├──────┤ H ├──────────────────────────»
│ │┌─────┴───┴─────┐ ┌───┐ »
state_1: ┤1 UCRZ(0,pi,0,0) ├┤0 ├─────┤ H ├──────────»
│ ││ UCRZ(pi/2,0) │┌────┴───┴────┐┌───┐»
state_2: ┤2 ├┤1 ├┤ UCRZ(-pi/4) ├┤ H ├»
└─────────────────┘└───────────────┘└─────────────┘└───┘»
« ┌─────────────────┐ ┌───┐
«state_0: ┤0 ├──────┤ H ├─────────────────────────
« │ │┌─────┴───┴─────┐ ┌───┐
«state_1: ┤1 UCRZ(pi,0,0,0) ├┤0 ├────┤ H ├──────────
« │ ││ UCRZ(pi/2,0) │┌───┴───┴────┐┌───┐
«state_2: ┤2 ├┤1 ├┤ UCRZ(pi/4) ├┤ H ├
« └─────────────────┘└───────────────┘└────────────┘└───┘
References:
[1]: L. K. Grover (1996), A fast quantum mechanical algorithm for database search,
`arXiv:quant-ph/9605043 <https://arxiv.org/abs/quant-ph/9605043>`_.
[2]: I. Chuang & M. Nielsen, Quantum Computation and Quantum Information,
Cambridge: Cambridge University Press, 2000. Chapter 6.1.2.
[3]: Brassard, G., Hoyer, P., Mosca, M., & Tapp, A. (2000).
Quantum Amplitude Amplification and Estimation.
`arXiv:quant-ph/0005055 <http://arxiv.org/abs/quant-ph/0005055>`_.
"""
def __init__(
self,
# oracle: Union[QuantumCircuit, Statevector],
oracle: QuantumCircuit,
state_preparation: Optional[QuantumCircuit] = None,
# zero_reflection: Optional[Union[QuantumCircuit, DensityMatrix, Operator]] = None,
zero_reflection: Optional[Union[QuantumCircuit, Operator]] = None,
reflection_qubits: Optional[List[int]] = None,
insert_barriers: bool = False,
mcx_mode: str = "noancilla",
name: str = "Q",
) -> None:
r"""
Args:
oracle: The phase oracle implementing a reflection about the bad state. Note that this
is not a bitflip oracle, see the docstring for more information.
state_preparation: The operator preparing the good and bad state.
For Grover's algorithm, this is a n-qubit Hadamard gate and for amplitude
amplification or estimation the operator :math:`\mathcal{A}`.
zero_reflection: The reflection about the zero state, :math:`\mathcal{S}_0`.
reflection_qubits: Qubits on which the zero reflection acts on.
insert_barriers: Whether barriers should be inserted between the reflections and A.
mcx_mode: The mode to use for building the default zero reflection.
name: The name of the circuit.
"""
super().__init__(name=name)
# store inputs
# if isinstance(oracle, Statevector):
# from qiskit.circuit.library import Diagonal # pylint: disable=cyclic-import
# oracle = Diagonal((-1) ** oracle.data)
self._oracle = oracle
# if isinstance(zero_reflection, (Operator, DensityMatrix)):
# from qiskit.circuit.library import Diagonal # pylint: disable=cyclic-import
# zero_reflection = Diagonal(zero_reflection.data.diagonal())
self._zero_reflection = zero_reflection
self._reflection_qubits = reflection_qubits
self._state_preparation = state_preparation
self._insert_barriers = insert_barriers
self._mcx_mode = mcx_mode
# build circuit
self._build()
@property
def reflection_qubits(self):
"""Reflection qubits, on which S0 is applied (if S0 is not user-specified)."""
if self._reflection_qubits is not None:
return self._reflection_qubits
num_state_qubits = self.oracle.num_qubits - self.oracle.num_ancillas
return list(range(num_state_qubits))
@property
def zero_reflection(self) -> QuantumCircuit:
"""The subcircuit implementing the reflection about 0."""
if self._zero_reflection is not None:
return self._zero_reflection
num_state_qubits = self.oracle.num_qubits - self.oracle.num_ancillas
return _zero_reflection(num_state_qubits, self.reflection_qubits, self._mcx_mode)
@property
def state_preparation(self) -> QuantumCircuit:
"""The subcircuit implementing the A operator or Hadamards."""
if self._state_preparation is not None:
return self._state_preparation
num_state_qubits = self.oracle.num_qubits - self.oracle.num_ancillas
hadamards = QuantumCircuit(num_state_qubits, name="H")
# apply Hadamards only on reflection qubits, rest will cancel out
hadamards.h(self.reflection_qubits)
return hadamards
@property
def oracle(self):
"""The oracle implementing a reflection about the bad state."""
return self._oracle
def _build(self):
num_state_qubits = self.oracle.num_qubits - self.oracle.num_ancillas
self.add_register(QuantumRegister(num_state_qubits, name="state"))
num_ancillas = numpy.max(
[
self.oracle.num_ancillas,
self.zero_reflection.num_ancillas,
self.state_preparation.num_ancillas,
]
)
if num_ancillas > 0:
self.add_register(AncillaRegister(num_ancillas, name="ancilla"))
self.compose(self.oracle, list(range(self.oracle.num_qubits)), inplace=True)
if self._insert_barriers:
self.barrier()
self.compose(
self.state_preparation.inverse(),
list(range(self.state_preparation.num_qubits)),
inplace=True,
)
if self._insert_barriers:
self.barrier()
self.compose(
self.zero_reflection, list(range(self.zero_reflection.num_qubits)), inplace=True
)
if self._insert_barriers:
self.barrier()
self.compose(
self.state_preparation, list(range(self.state_preparation.num_qubits)), inplace=True
)
# minus sign
self.global_phase = numpy.pi
# TODO use the oracle compiler or the bit string oracle
def _zero_reflection(
num_state_qubits: int, qubits: List[int], mcx_mode: Optional[str] = None
) -> QuantumCircuit:
qr_state = QuantumRegister(num_state_qubits, "state")
reflection = QuantumCircuit(qr_state, name="S_0")
num_ancillas = MCXGate.get_num_ancilla_qubits(len(qubits) - 1, mcx_mode)
if num_ancillas > 0:
qr_ancilla = AncillaRegister(num_ancillas, "ancilla")
reflection.add_register(qr_ancilla)
else:
qr_ancilla = []
reflection.x(qubits)
if len(qubits) == 1:
reflection.z(0) # MCX does not allow 0 control qubits, therefore this is separate
else:
reflection.h(qubits[-1])
reflection.mcx(qubits[:-1], qubits[-1], qr_ancilla[:], mode=mcx_mode)
reflection.h(qubits[-1])
reflection.x(qubits)
return reflection
| 47.233553 | 101 | 0.548088 | [
"Apache-2.0"
] | SpinQTech/SpinQKit | qiskit/circuit/library/grover_operator.py | 16,719 | Python |
import numpy as np
import copy, operator
from qtensor.optimisation.Optimizer import OrderingOptimizer
from qtensor import utils
from functools import reduce
import networkx as nx
import qtree
def reducelist(f, lst, x=0):
prev = x
for i in lst:
prev = f(prev, i)
yield prev
class RGreedyOptimizer(OrderingOptimizer):
"""
An orderer that greedy selects vertices
using boltzman probabilities.
"""
def __init__(self, *args, temp=0.002, repeats=10, **kwargs):
super().__init__(*args, **kwargs)
self.temp = temp
self.repeats = repeats
def _get_ordering(self, graph, **kwargs):
node_names = nx.get_node_attributes(graph, 'name')
node_sizes = nx.get_node_attributes(graph, 'size')
peo, path = self._get_ordering_ints(graph)
peo = [qtree.optimizer.Var(var, size=node_sizes[var],
name=node_names[var])
for var in peo]
#print('tw=', max(path))
return peo, path
def _get_ordering_ints(self, old_graph, free_vars=[]):
best_peo = None
best_width = np.inf
best_widths = None
for i in range(self.repeats):
graph = copy.deepcopy(old_graph)
peo = []
widths = []
while graph.number_of_nodes():
ngs = np.array(list(
map(len, map(operator.itemgetter(1), graph.adjacency()))
))
weights = np.exp(-(ngs - np.min(ngs))/self.temp)
#print(ngs)
#print(weights)
# 1, 3, 5, 2, 1
distrib = np.array([0]+list(reducelist(lambda x, y:x+y, weights, 0)))
#print(distrib)
# 0, 1, 4, 9, 11, 12
rnd = np.random.random()*distrib[-1]
# between 0 and 12 = say, 5
# find the smallest value that larger than rnd
bool_map = distrib < rnd
# True, True, True, False, False, False
select_map = bool_map[1:] ^ bool_map[:-1]
selected_elem = np.array(list(graph.nodes))[select_map]
assert len(selected_elem)==1, 'Error in algorithm, please submit an issue'
selected_node = selected_elem[0]
utils.eliminate_node_no_structure(graph, selected_node)
peo.append(int(selected_node))
widths.append(int(ngs[select_map][0]))
if max(widths) < best_width:
best_peo = peo
best_widths = widths
best_width = max(widths)
return best_peo, best_widths
| 34.205128 | 90 | 0.552099 | [
"BSD-3-Clause"
] | DaniloZZZ/QTensor | qtensor/optimisation/RGreedy.py | 2,668 | Python |
import os
import json
__author__ = 'Manfred Minimair <[email protected]>'
class JSONStorage:
"""
File storage for a dictionary.
"""
file = '' # file name of storage file
data = None # data dict
indent = ' ' # indent prefix for pretty printing json files
def __init__(self, path, name):
"""
Initizlize.
:param path: path to the storage file;
empty means the current direcory.
:param name: file name, json file; may include a path.
"""
if path:
os.makedirs(path, exist_ok=True)
self.file = os.path.normpath(os.path.join(path, name))
try:
with open(self.file) as data_file:
self.data = json.load(data_file)
except FileNotFoundError:
self.data = dict()
self.dump()
def dump(self):
"""
Dump data into storage file.
"""
with open(self.file, 'w') as out_file:
json.dump(self.data, out_file, indent=self.indent)
def get(self, item):
"""
Get stored item.
:param item: name, string, of item to get.
:return: stored item; raises a KeyError if item does not exist.
"""
return self.data[item]
def set(self, item, value):
"""
Set item's value; causes the data to be dumped into the storage file.
:param item: name, string of item to set.
:param value: value to set.
"""
self.data[item] = value
self.dump()
def __getattr__(self, item):
"""
Get stored item with .-notation if not defined as a class member.
:param item: name, string of item compatible
with Python class member name.
:return value of item.
"""
if item in self.data:
return self.data[item]
else:
raise AttributeError
| 28.029412 | 77 | 0.559811 | [
"ECL-2.0",
"Apache-2.0"
] | mincode/netdata | netdata/workers/json_storage.py | 1,906 | Python |
from __future__ import annotations
import asyncio
import logging
import uuid
from collections import defaultdict
from collections.abc import Hashable
from dask.utils import parse_timedelta
from distributed.client import Client
from distributed.utils import TimeoutError, log_errors
from distributed.worker import get_worker
logger = logging.getLogger(__name__)
class MultiLockExtension:
"""An extension for the scheduler to manage MultiLocks
This adds the following routes to the scheduler
* multi_lock_acquire
* multi_lock_release
The approach is to maintain `self.locks` that maps a lock (unique name given to
`MultiLock(names=, ...)` at creation) to a list of users (instances of `MultiLock`)
that "requests" the lock. Additionally, `self.requests` maps a user to its requested
locks and `self.requests_left` maps a user to the number of locks still need.
Every time a user `x` gets to the front in `self.locks[name] = [x, ...]` it means
that `x` now holds the lock `name` and when it holds all the requested locks
`acquire()` can return.
Finally, `self.events` contains all the events users are waiting on to finish.
"""
def __init__(self, scheduler):
self.scheduler = scheduler
self.locks = defaultdict(list) # lock -> users
self.requests = {} # user -> locks
self.requests_left = {} # user -> locks still needed
self.events = {}
self.scheduler.handlers.update(
{"multi_lock_acquire": self.acquire, "multi_lock_release": self.release}
)
def _request_locks(self, locks: list[str], id: Hashable, num_locks: int) -> bool:
"""Request locks
Parameters
----------
locks: List[str]
Names of the locks to request.
id: Hashable
Identifier of the `MultiLock` instance requesting the locks.
num_locks: int
Number of locks in `locks` requesting
Return
------
result: bool
Whether `num_locks` requested locks are free immediately or not.
"""
assert id not in self.requests
self.requests[id] = set(locks)
assert len(locks) >= num_locks and num_locks > 0
self.requests_left[id] = num_locks
locks = sorted(locks, key=lambda x: len(self.locks[x]))
for i, lock in enumerate(locks):
self.locks[lock].append(id)
if len(self.locks[lock]) == 1: # The lock was free
self.requests_left[id] -= 1
if self.requests_left[id] == 0: # Got all locks needed
# Since we got all locks need, we can remove the rest of the requests
self.requests[id] -= set(locks[i + 1 :])
return True
return False
def _refain_locks(self, locks, id):
"""Cancel/release previously requested/acquired locks
Parameters
----------
locks: List[str]
Names of the locks to refain.
id: Hashable
Identifier of the `MultiLock` instance refraining the locks.
"""
waiters_ready = set()
for lock in locks:
if self.locks[lock][0] == id:
self.locks[lock].pop(0)
if self.locks[lock]:
new_first = self.locks[lock][0]
self.requests_left[new_first] -= 1
if self.requests_left[new_first] <= 0:
# Notice, `self.requests_left[new_first]` might go below zero
# if more locks are freed than requested.
self.requests_left[new_first] = 0
waiters_ready.add(new_first)
else:
self.locks[lock].remove(id)
assert id not in self.locks[lock]
del self.requests[id]
del self.requests_left[id]
for waiter in waiters_ready:
self.scheduler.loop.add_callback(self.events[waiter].set)
async def acquire(self, locks=None, id=None, timeout=None, num_locks=None):
with log_errors():
if not self._request_locks(locks, id, num_locks):
assert id not in self.events
event = asyncio.Event()
self.events[id] = event
future = event.wait()
if timeout is not None:
future = asyncio.wait_for(future, timeout)
try:
await future
except TimeoutError:
self._refain_locks(locks, id)
return False
finally:
del self.events[id]
# At this point `id` acquired all `locks`
assert self.requests_left[id] == 0
return True
def release(self, id=None):
with log_errors():
self._refain_locks(self.requests[id], id)
class MultiLock:
"""Distributed Centralized Lock
Parameters
----------
names: List[str]
Names of the locks to acquire. Choosing the same name allows two
disconnected processes to coordinate a lock.
client: Client (optional)
Client to use for communication with the scheduler. If not given, the
default global client will be used.
Examples
--------
>>> lock = MultiLock(['x', 'y']) # doctest: +SKIP
>>> lock.acquire(timeout=1) # doctest: +SKIP
>>> # do things with protected resource 'x' and 'y'
>>> lock.release() # doctest: +SKIP
"""
def __init__(self, names=[], client=None):
try:
self.client = client or Client.current()
except ValueError:
# Initialise new client
self.client = get_worker().client
self.names = names
self.id = uuid.uuid4().hex
self._locked = False
def acquire(self, blocking=True, timeout=None, num_locks=None):
"""Acquire the lock
Parameters
----------
blocking : bool, optional
If false, don't wait on the lock in the scheduler at all.
timeout : string or number or timedelta, optional
Seconds to wait on the lock in the scheduler. This does not
include local coroutine time, network transfer time, etc..
It is forbidden to specify a timeout when blocking is false.
Instead of number of seconds, it is also possible to specify
a timedelta in string format, e.g. "200ms".
num_locks : int, optional
Number of locks needed. If None, all locks are needed
Examples
--------
>>> lock = MultiLock(['x', 'y']) # doctest: +SKIP
>>> lock.acquire(timeout="1s") # doctest: +SKIP
Returns
-------
True or False whether or not it successfully acquired the lock
"""
timeout = parse_timedelta(timeout)
if not blocking:
if timeout is not None:
raise ValueError("can't specify a timeout for a non-blocking call")
timeout = 0
result = self.client.sync(
self.client.scheduler.multi_lock_acquire,
locks=self.names,
id=self.id,
timeout=timeout,
num_locks=num_locks or len(self.names),
)
self._locked = True
return result
def release(self):
"""Release the lock if already acquired"""
if not self.locked():
raise ValueError("Lock is not yet acquired")
ret = self.client.sync(self.client.scheduler.multi_lock_release, id=self.id)
self._locked = False
return ret
def locked(self):
return self._locked
def __enter__(self):
self.acquire()
return self
def __exit__(self, *args, **kwargs):
self.release()
async def __aenter__(self):
await self.acquire()
return self
async def __aexit__(self, *args, **kwargs):
await self.release()
def __reduce__(self):
return (type(self), (self.names,))
| 33.995798 | 89 | 0.582252 | [
"BSD-3-Clause"
] | bryanwweber/distributed | distributed/multi_lock.py | 8,091 | Python |
import torchvision
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
def display_and_save_batch(title, batch, data, save=True, display=True):
"""Display and save batch of image using plt"""
im = torchvision.utils.make_grid(batch, nrow=int(batch.shape[0]**0.5))
plt.title(title)
plt.imshow(np.transpose(im.cpu().numpy(), (1, 2, 0)), cmap='gray')
if save:
plt.savefig('results/' + title + data + '.png', transparent=True, bbox_inches='tight')
if display:
plt.show()
def display_and_save_latent(batch, label, data, save=True, display=True):
"""Display and save batch of 2-D latent variable using plt"""
colors = ['black', 'red', 'green', 'blue', 'yellow', 'cyan', 'magenta', 'pink', 'violet', 'grey']
z = batch.cpu().detach().numpy()
l = label.cpu().numpy()
plt.title('Latent variables')
plt.scatter(z[:,0], z[:,1], c=l, cmap=matplotlib.colors.ListedColormap(colors))
plt.xlim(-3, 3, )
plt.ylim(-3, 3)
if save:
plt.savefig('results/latent-variable' + data + '.png', transparent=True, bbox_inches='tight')
if display:
plt.show() | 39.37931 | 101 | 0.643608 | [
"MIT"
] | jaywonchung/Learning-ML | Implementations/Conditional-Variational-Autoencoder/plot_utils.py | 1,142 | Python |
import binascii
from PySide2.QtWidgets import QTableWidget, QTableWidgetItem, QAbstractItemView
from PySide2.QtCore import Qt
class QPatchTableItem:
def __init__(self, patch, old_bytes):
self.patch = patch
self.old_bytes = old_bytes
def widgets(self):
patch = self.patch
widgets = [
QTableWidgetItem("%#x" % patch.addr),
QTableWidgetItem("%d bytes" % len(patch)),
QTableWidgetItem(binascii.hexlify(self.old_bytes).decode("ascii") if self.old_bytes else "<unknown>"),
QTableWidgetItem(binascii.hexlify(patch.new_bytes).decode("ascii")),
]
for w in widgets:
w.setFlags(w.flags() & ~Qt.ItemIsEditable)
return widgets
class QPatchTable(QTableWidget):
HEADER = ['Address', 'Size', 'Old Bytes', 'New Bytes']
def __init__(self, instance, parent):
super(QPatchTable, self).__init__(parent)
self.setColumnCount(len(self.HEADER))
self.setHorizontalHeaderLabels(self.HEADER)
self.setSelectionBehavior(QAbstractItemView.SelectRows)
self.verticalHeader().setVisible(False)
self.items = [ ]
self.instance = instance
self.instance.patches.am_subscribe(self._watch_patches)
def current_patch(self):
selected_index = self.currentRow()
if 0 <= selected_index < len(self.items):
return self.items[selected_index]
else:
return None
def reload(self):
current_row = self.currentRow()
self.clearContents()
self.items = [QPatchTableItem(item,
self._get_bytes(self.instance.project, item.addr, len(item)))
for item in self.instance.project.kb.patches.values()]
items_count = len(self.items)
self.setRowCount(items_count)
for idx, item in enumerate(self.items):
for i, it in enumerate(item.widgets()):
self.setItem(idx, i, it)
#if 0 <= current_row < len(self.items):
# self.setCurrentItem(current_row, 0)
def _on_state_selected(self, *args):
if self._selected is not None:
self._selected(self.current_state_record())
def _watch_patches(self, **kwargs):
if not self.instance.patches.am_none:
self.reload()
@staticmethod
def _get_bytes(proj, addr, size):
try:
return proj.loader.memory.load(addr, size)
except KeyError:
return None
| 30.46988 | 114 | 0.619217 | [
"BSD-2-Clause"
] | CrackerCat/angr-management | angrmanagement/ui/widgets/qpatch_table.py | 2,529 | Python |
import enum
import SimpleITK as sitk
@enum.unique
class Interpolation(enum.Enum):
"""Interpolation techniques available in ITK.
Example:
>>> import torchio as tio
>>> transform = tio.RandomAffine(image_interpolation='nearest')
"""
#: Interpolates image intensity at a non-integer pixel position by copying the intensity for the nearest neighbor.
NEAREST: str = 'sitkNearestNeighbor'
#: Linearly interpolates image intensity at a non-integer pixel position.
LINEAR: str = 'sitkLinear'
#: Computes the B-spline interpolation weights over the support region of the B-spline.
BSPLINE: str = 'sitkBSpline'
GAUSSIAN: str = 'sitkGaussian'
LABEL_GAUSSIAN: str = 'sitkLabelGaussian'
HAMMING: str = 'sitkHammingWindowedSinc'
COSINE: str = 'sitkCosineWindowedSinc'
WELCH: str = 'sitkWelchWindowedSinc'
LANCZOS: str = 'sitkLanczosWindowedSinc'
BLACKMAN: str = 'sitkBlackmanWindowedSinc'
def get_sitk_interpolator(interpolation: str) -> int:
if not isinstance(interpolation, str):
message = (
f'Interpolation must be a string, not {type(interpolation)}'
)
raise ValueError(message)
string = getattr(Interpolation, interpolation.upper()).value
return getattr(sitk, string)
| 32.4 | 118 | 0.703704 | [
"Apache-2.0"
] | jwitos/torchio | torchio/transforms/interpolation.py | 1,296 | Python |
# -*- coding: utf-8 -*- #
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""service-management operations describe command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.endpoints import common_flags
_ERROR = ('The `service-management operations describe` command has been '
'replaced by `endpoints operations describe` and '
'`services operations describe`.')
@base.Deprecate(is_removed=True, error=_ERROR)
class Describe(base.DescribeCommand):
"""Describes an operation resource for a given operation name."""
@staticmethod
def Args(parser):
"""Args is called by calliope to gather arguments for this command.
Args:
parser: An argparse parser that you can use to add arguments that go
on the command line after this command. Positional arguments are
allowed.
"""
common_flags.operation_flag(suffix='to describe').AddToParser(parser)
parser.display_info.AddFormat(
':(metadata.startTime.date(format="%Y-%m-%d %H:%M:%S %Z", tz=LOCAL)) '
'[transforms] default')
parser.add_argument(
'--full',
action='store_true',
default=False,
help=('Print the entire operation resource, which could be large. '
'By default, a summary will be printed instead.'))
def Run(self, args):
"""Stubs 'service-management operations describe'.
Args:
args: argparse.Namespace, The arguments that this command was invoked
with.
"""
pass
| 33.6 | 78 | 0.708333 | [
"Apache-2.0"
] | bshaffer/google-cloud-sdk | lib/surface/service_management/operations/describe.py | 2,184 | Python |
"""
Reads the version information from the manifest of the Chrome extension.
Author: Mustafa Emre Acer
"""
import json
import sys
def ReadChromeExtensionVersion(manifest_path):
with open(manifest_path) as manifest_file:
manifest = json.load(manifest_file)
print(manifest['version'])
if __name__ == "__main__":
if len(sys.argv) > 1:
ReadChromeExtensionVersion(sys.argv[1])
else:
print('\nUsage: chrome_extension_version.py <manifest_path>\n')
exit(-1)
| 22.090909 | 74 | 0.726337 | [
"MIT"
] | meacer/deasciifier | build/chrome_extension_version.py | 486 | Python |
"""
Backprop NN training on Madelon data (Feature selection complete)
"""
import os
import csv
import time
import sys
sys.path.append("C:/ABAGAIL/ABAGAIL.jar")
from func.nn.backprop import BackPropagationNetworkFactory
from shared import SumOfSquaresError, DataSet, Instance
from opt.example import NeuralNetworkOptimizationProblem
from func.nn.backprop import RPROPUpdateRule, BatchBackPropagationTrainer
import opt.RandomizedHillClimbing as RandomizedHillClimbing
import opt.SimulatedAnnealing as SimulatedAnnealing
import opt.ga.StandardGeneticAlgorithm as StandardGeneticAlgorithm
from func.nn.activation import ActivationFunction
# Network parameters found "optimal" in Assignment 1
INPUT_LAYER = 31
HIDDEN_LAYER1 = 62
HIDDEN_LAYER2 = 62
HIDDEN_LAYER3 = 62
OUTPUT_LAYER = 1
TRAINING_ITERATIONS = 5001
OUTFILE = 'BACKPROP_LOG.txt'
def initialize_instances(infile):
"""Read the m_trg.csv CSV data into a list of instances."""
instances = []
# Read in the CSV file
#with open(infile, "r") as dat:
dat = open(infile,"r")
reader = csv.reader(dat)
dat.close()
for row in reader:
instance = Instance([float(value) for value in row[:-1]])
if float(row[-1]) < 0:
instance.setLabel(Instance(0))
else:
instance.setLabel(Instance(1))
#instance.setLabel(Instance(0 if float(row[-1]) < 0 else 1))
instances.append(instance)
return instances
def errorOnDataSet(network,ds,measure):
N = len(ds)
error = 0.
correct = 0
incorrect = 0
for instance in ds:
network.setInputValues(instance.getData())
network.run()
actual = instance.getLabel().getContinuous()
predicted = network.getOutputValues().get(0)
predicted = max(min(predicted,1),0)
if abs(predicted - actual) < 0.5:
correct += 1
else:
incorrect += 1
output = instance.getLabel()
output_values = network.getOutputValues()
example = Instance(output_values, Instance(output_values.get(0)))
error += measure.value(output, example)
MSE = error/float(N)
acc = correct/float(correct+incorrect)
return MSE,acc
def train(oa, network, oaName, training_ints,validation_ints,testing_ints, measure):
"""Train a given network on a set of instances.
"""
print ("\nError results for {}\n---------------------------".format(oaName))
times = [0]
for iteration in xrange(TRAINING_ITERATIONS):
start = time.clock()
oa.train()
elapsed = time.clock()-start
times.append(times[-1]+elapsed)
if iteration % 10 == 0:
MSE_trg, acc_trg = errorOnDataSet(network,training_ints,measure)
MSE_val, acc_val = errorOnDataSet(network,validation_ints,measure)
MSE_tst, acc_tst = errorOnDataSet(network,testing_ints,measure)
txt = '{},{},{},{},{},{},{},{}\n'.format(iteration,MSE_trg,MSE_val,MSE_tst,acc_trg,acc_val,acc_tst,times[-1]);
print (txt)
#with open(OUTFILE,'a+') as f:
f=open(OUTFILE,'a+')
f.write(txt)
f.close()
def main():
"""Run this experiment"""
training_ints = initialize_instances('m_trg.csv')
testing_ints = initialize_instances('m_test.csv')
validation_ints = initialize_instances('m_val.csv')
factory = BackPropagationNetworkFactory()
measure = SumOfSquaresError()
data_set = DataSet(training_ints)
relu = RELU()
rule = RPROPUpdateRule()
oa_names = ["Backprop"]
classification_network = factory.createClassificationNetwork([INPUT_LAYER, HIDDEN_LAYER1,HIDDEN_LAYER2,HIDDEN_LAYER3, OUTPUT_LAYER],relu)
train(BatchBackPropagationTrainer(data_set,classification_network,measure,rule), classification_network, 'Backprop', training_ints,validation_ints,testing_ints, measure)
if __name__ == "__main__":
#with open(OUTFILE,'w') as f:
f=open(OUTFILE,'a+')
f.write('{},{},{},{},{},{},{},{}\n'.format('iteration','MSE_trg','MSE_val','MSE_tst','acc_trg','acc_val','acc_tst','elapsed'))
f.close()
main() | 35.482759 | 173 | 0.670068 | [
"MIT"
] | tirthajyoti/Randomized_Optimization | ABAGAIL_execution/flipflop.py | 4,116 | Python |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['MachineLearningCompute']
class MachineLearningCompute(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
compute_name: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['IdentityArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[Union[pulumi.InputType['AKSArgs'], pulumi.InputType['AmlComputeArgs'], pulumi.InputType['ComputeInstanceArgs'], pulumi.InputType['DataFactoryArgs'], pulumi.InputType['DataLakeAnalyticsArgs'], pulumi.InputType['DatabricksArgs'], pulumi.InputType['HDInsightArgs'], pulumi.InputType['VirtualMachineArgs']]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['SkuArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Machine Learning compute object wrapped into ARM resource envelope.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] compute_name: Name of the Azure Machine Learning compute.
:param pulumi.Input[pulumi.InputType['IdentityArgs']] identity: The identity of the resource.
:param pulumi.Input[str] location: Specifies the location of the resource.
:param pulumi.Input[Union[pulumi.InputType['AKSArgs'], pulumi.InputType['AmlComputeArgs'], pulumi.InputType['ComputeInstanceArgs'], pulumi.InputType['DataFactoryArgs'], pulumi.InputType['DataLakeAnalyticsArgs'], pulumi.InputType['DatabricksArgs'], pulumi.InputType['HDInsightArgs'], pulumi.InputType['VirtualMachineArgs']]] properties: Compute properties
:param pulumi.Input[str] resource_group_name: Name of the resource group in which workspace is located.
:param pulumi.Input[pulumi.InputType['SkuArgs']] sku: The sku of the workspace.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Contains resource tags defined as key/value pairs.
:param pulumi.Input[str] workspace_name: Name of Azure Machine Learning workspace.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['compute_name'] = compute_name
__props__['identity'] = identity
__props__['location'] = location
__props__['properties'] = properties
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['sku'] = sku
__props__['tags'] = tags
if workspace_name is None and not opts.urn:
raise TypeError("Missing required property 'workspace_name'")
__props__['workspace_name'] = workspace_name
__props__['name'] = None
__props__['system_data'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20210101:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/latest:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/latest:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20180301preview:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20180301preview:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20181119:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20181119:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20190501:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20190501:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20190601:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20190601:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20191101:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20191101:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20200101:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20200101:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20200218preview:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20200218preview:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20200301:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20200301:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20200401:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20200401:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20200501preview:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20200501preview:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20200515preview:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20200515preview:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20200601:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20200601:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20200801:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20200801:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20200901preview:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20200901preview:MachineLearningCompute")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(MachineLearningCompute, __self__).__init__(
'azure-native:machinelearningservices/v20210101:MachineLearningCompute',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'MachineLearningCompute':
"""
Get an existing MachineLearningCompute resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["identity"] = None
__props__["location"] = None
__props__["name"] = None
__props__["properties"] = None
__props__["sku"] = None
__props__["system_data"] = None
__props__["tags"] = None
__props__["type"] = None
return MachineLearningCompute(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def identity(self) -> pulumi.Output[Optional['outputs.IdentityResponse']]:
"""
The identity of the resource.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Specifies the name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> pulumi.Output[Any]:
"""
Compute properties
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def sku(self) -> pulumi.Output[Optional['outputs.SkuResponse']]:
"""
The sku of the workspace.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
Read only system data
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Contains resource tags defined as key/value pairs.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Specifies the type of the resource.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 59.895604 | 3,183 | 0.710669 | [
"Apache-2.0"
] | pulumi-bot/pulumi-azure-native | sdk/python/pulumi_azure_native/machinelearningservices/v20210101/machine_learning_compute.py | 10,901 | Python |
# Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
import unittest
from unittest.mock import ANY
from databuilder.models.graph_serializable import (
RELATION_END_KEY, RELATION_END_LABEL, RELATION_REVERSE_TYPE, RELATION_START_KEY, RELATION_START_LABEL,
RELATION_TYPE,
)
from databuilder.models.table_source import TableSource
from databuilder.serializers import neo4_serializer, neptune_serializer
from databuilder.serializers.neptune_serializer import (
NEPTUNE_CREATION_TYPE_JOB, NEPTUNE_CREATION_TYPE_RELATIONSHIP_PROPERTY_NAME_BULK_LOADER_FORMAT, NEPTUNE_HEADER_ID,
NEPTUNE_HEADER_LABEL, NEPTUNE_LAST_EXTRACTED_AT_RELATIONSHIP_PROPERTY_NAME_BULK_LOADER_FORMAT,
NEPTUNE_RELATIONSHIP_HEADER_FROM, NEPTUNE_RELATIONSHIP_HEADER_TO,
)
DB = 'hive'
SCHEMA = 'base'
TABLE = 'test'
CLUSTER = 'default'
SOURCE = '/etl/sql/file.py'
class TestTableSource(unittest.TestCase):
def setUp(self) -> None:
super(TestTableSource, self).setUp()
self.table_source = TableSource(db_name='hive',
schema=SCHEMA,
table_name=TABLE,
cluster=CLUSTER,
source=SOURCE)
self.start_key = f'{DB}://{CLUSTER}.{SCHEMA}/{TABLE}/_source'
self.end_key = f'{DB}://{CLUSTER}.{SCHEMA}/{TABLE}'
def test_get_source_model_key(self) -> None:
source = self.table_source.get_source_model_key()
self.assertEqual(source, f'{DB}://{CLUSTER}.{SCHEMA}/{TABLE}/_source')
def test_get_metadata_model_key(self) -> None:
metadata = self.table_source.get_metadata_model_key()
self.assertEqual(metadata, 'hive://default.base/test')
def test_create_nodes(self) -> None:
nodes = self.table_source.create_nodes()
self.assertEqual(len(nodes), 1)
def test_create_relation(self) -> None:
relations = self.table_source.create_relation()
self.assertEquals(len(relations), 1)
serialized_relation = neo4_serializer.serialize_relationship(relations[0])
expected_relation = {
RELATION_START_KEY: self.start_key,
RELATION_START_LABEL: TableSource.LABEL,
RELATION_END_KEY: self.end_key,
RELATION_END_LABEL: 'Table',
RELATION_TYPE: TableSource.SOURCE_TABLE_RELATION_TYPE,
RELATION_REVERSE_TYPE: TableSource.TABLE_SOURCE_RELATION_TYPE
}
self.assertDictEqual(expected_relation, serialized_relation)
def test_create_relation_neptune(self) -> None:
relations = self.table_source.create_relation()
serialized_relations = neptune_serializer.convert_relationship(relations[0])
expected = [
{
NEPTUNE_HEADER_ID: "{from_vertex_id}_{to_vertex_id}_{label}".format(
from_vertex_id=self.start_key,
to_vertex_id=self.end_key,
label=TableSource.SOURCE_TABLE_RELATION_TYPE
),
NEPTUNE_RELATIONSHIP_HEADER_FROM: self.start_key,
NEPTUNE_RELATIONSHIP_HEADER_TO: self.end_key,
NEPTUNE_HEADER_LABEL: TableSource.SOURCE_TABLE_RELATION_TYPE,
NEPTUNE_LAST_EXTRACTED_AT_RELATIONSHIP_PROPERTY_NAME_BULK_LOADER_FORMAT: ANY,
NEPTUNE_CREATION_TYPE_RELATIONSHIP_PROPERTY_NAME_BULK_LOADER_FORMAT: NEPTUNE_CREATION_TYPE_JOB
},
{
NEPTUNE_HEADER_ID: "{from_vertex_id}_{to_vertex_id}_{label}".format(
from_vertex_id=self.end_key,
to_vertex_id=self.start_key,
label=TableSource.TABLE_SOURCE_RELATION_TYPE
),
NEPTUNE_RELATIONSHIP_HEADER_FROM: self.end_key,
NEPTUNE_RELATIONSHIP_HEADER_TO: self.start_key,
NEPTUNE_HEADER_LABEL: TableSource.TABLE_SOURCE_RELATION_TYPE,
NEPTUNE_LAST_EXTRACTED_AT_RELATIONSHIP_PROPERTY_NAME_BULK_LOADER_FORMAT: ANY,
NEPTUNE_CREATION_TYPE_RELATIONSHIP_PROPERTY_NAME_BULK_LOADER_FORMAT: NEPTUNE_CREATION_TYPE_JOB
}
]
self.assertListEqual(expected, serialized_relations)
| 43.20202 | 118 | 0.679448 | [
"Apache-2.0"
] | JacobSMoller/amundsendatabuilder | tests/unit/models/test_table_source.py | 4,277 | Python |
__copyright__ = "Copyright (C) 2009-2013 Andreas Kloeckner"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import pymbolic.primitives as prim
import pytest
from pymbolic import parse
from pytools.lex import ParseError
from pymbolic.mapper import IdentityMapper
try:
reduce
except NameError:
from functools import reduce
# {{{ utilities
def assert_parsed_same_as_python(expr_str):
# makes sure that has only one line
expr_str, = expr_str.split("\n")
from pymbolic.interop.ast import ASTToPymbolic
import ast
ast2p = ASTToPymbolic()
try:
expr_parsed_by_python = ast2p(ast.parse(expr_str).body[0].value)
except SyntaxError:
with pytest.raises(ParseError):
parse(expr_str)
else:
expr_parsed_by_pymbolic = parse(expr_str)
assert expr_parsed_by_python == expr_parsed_by_pymbolic
def assert_parse_roundtrip(expr_str):
expr = parse(expr_str)
from pymbolic.mapper.stringifier import StringifyMapper
strified = StringifyMapper()(expr)
assert strified == expr_str, (strified, expr_str)
# }}}
def test_integer_power():
from pymbolic.algorithm import integer_power
for base, expn in [
(17, 5),
(17, 2**10),
(13, 20),
(13, 1343),
]:
assert base**expn == integer_power(base, expn)
def test_expand():
from pymbolic import var, expand
x = var("x")
u = (x+1)**5
expand(u)
def test_substitute():
from pymbolic import parse, substitute, evaluate
u = parse("5+x.min**2")
xmin = parse("x.min")
assert evaluate(substitute(u, {xmin: 25})) == 630
def test_no_comparison():
from pymbolic import parse
x = parse("17+3*x")
y = parse("12-5*y")
def expect_typeerror(f):
try:
f()
except TypeError:
pass
else:
raise AssertionError
expect_typeerror(lambda: x < y)
expect_typeerror(lambda: x <= y)
expect_typeerror(lambda: x > y)
expect_typeerror(lambda: x >= y)
def test_structure_preservation():
x = prim.Sum((5, 7))
from pymbolic.mapper import IdentityMapper
x2 = IdentityMapper()(x)
assert x == x2
def test_sympy_interaction():
pytest.importorskip("sympy")
import sympy as sp
x, y = sp.symbols("x y")
f = sp.Function("f")
s1_expr = 1/f(x/sp.sqrt(x**2+y**2)).diff(x, 5) # pylint:disable=not-callable
from pymbolic.interop.sympy import (
SympyToPymbolicMapper,
PymbolicToSympyMapper)
s2p = SympyToPymbolicMapper()
p2s = PymbolicToSympyMapper()
p1_expr = s2p(s1_expr)
s2_expr = p2s(p1_expr)
assert sp.ratsimp(s1_expr - s2_expr) == 0
p2_expr = s2p(s2_expr)
s3_expr = p2s(p2_expr)
assert sp.ratsimp(s1_expr - s3_expr) == 0
# {{{ fft
def test_fft_with_floats():
numpy = pytest.importorskip("numpy")
import numpy.linalg as la
from pymbolic.algorithm import fft, ifft
for n in [2**i for i in range(4, 10)]+[17, 12, 948]:
a = numpy.random.rand(n) + 1j*numpy.random.rand(n)
f_a = fft(a)
a2 = ifft(f_a)
assert la.norm(a-a2) < 1e-10
f_a_numpy = numpy.fft.fft(a)
assert la.norm(f_a-f_a_numpy) < 1e-10
class NearZeroKiller(IdentityMapper):
def map_constant(self, expr):
if isinstance(expr, complex):
r = expr.real
i = expr.imag
if abs(r) < 1e-15:
r = 0
if abs(i) < 1e-15:
i = 0
return complex(r, i)
else:
return expr
def test_fft():
numpy = pytest.importorskip("numpy")
from pymbolic import var
from pymbolic.algorithm import fft, sym_fft
vars = numpy.array([var(chr(97+i)) for i in range(16)], dtype=object)
print(vars)
print(fft(vars))
traced_fft = sym_fft(vars)
from pymbolic.mapper.stringifier import PREC_NONE
from pymbolic.mapper.c_code import CCodeMapper
ccm = CCodeMapper()
code = [ccm(tfi, PREC_NONE) for tfi in traced_fft]
for cse_name, cse_str in enumerate(ccm.cse_name_list):
print(f"{cse_name} = {cse_str}")
for i, line in enumerate(code):
print("result[%d] = %s" % (i, line))
# }}}
def test_sparse_multiply():
numpy = pytest.importorskip("numpy")
pytest.importorskip("scipy")
import scipy.sparse as ss
la = numpy.linalg
mat = numpy.random.randn(10, 10)
s_mat = ss.csr_matrix(mat)
vec = numpy.random.randn(10)
mat_vec = s_mat*vec
from pymbolic.algorithm import csr_matrix_multiply
mat_vec_2 = csr_matrix_multiply(s_mat, vec)
assert la.norm(mat_vec-mat_vec_2) < 1e-14
# {{{ parser
def test_parser():
from pymbolic import parse
parse("(2*a[1]*b[1]+2*a[0]*b[0])*(hankel_1(-1,sqrt(a[1]**2+a[0]**2)*k) "
"-hankel_1(1,sqrt(a[1]**2+a[0]**2)*k))*k /(4*sqrt(a[1]**2+a[0]**2)) "
"+hankel_1(0,sqrt(a[1]**2+a[0]**2)*k)")
print(repr(parse("d4knl0")))
print(repr(parse("0.")))
print(repr(parse("0.e1")))
assert parse("0.e1") == 0
assert parse("1e-12") == 1e-12
print(repr(parse("a >= 1")))
print(repr(parse("a <= 1")))
print(repr(parse(":")))
print(repr(parse("1:")))
print(repr(parse(":2")))
print(repr(parse("1:2")))
print(repr(parse("::")))
print(repr(parse("1::")))
print(repr(parse(":1:")))
print(repr(parse("::1")))
print(repr(parse("3::1")))
print(repr(parse(":5:1")))
print(repr(parse("3:5:1")))
assert_parse_roundtrip("()")
assert_parse_roundtrip("(3,)")
assert_parse_roundtrip("[x + 3, 3, 5]")
assert_parse_roundtrip("[]")
assert_parse_roundtrip("[x]")
assert_parse_roundtrip("g[i, k] + 2.0*h[i, k]")
parse("g[i,k]+(+2.0)*h[i, k]")
print(repr(parse("a - b - c")))
print(repr(parse("-a - -b - -c")))
print(repr(parse("- - - a - - - - b - - - - - c")))
print(repr(parse("~(a ^ b)")))
print(repr(parse("(a | b) | ~(~a & ~b)")))
print(repr(parse("3 << 1")))
print(repr(parse("1 >> 3")))
print(parse("3::1"))
assert parse("e1") == prim.Variable("e1")
assert parse("d1") == prim.Variable("d1")
from pymbolic import variables
f, x, y, z = variables("f x y z")
assert parse("f((x,y),z)") == f((x, y), z)
assert parse("f((x,),z)") == f((x,), z)
assert parse("f(x,(y,z),z)") == f(x, (y, z), z)
assert parse("f(x,(y,z),z, name=15)") == f(x, (y, z), z, name=15)
assert parse("f(x,(y,z),z, name=15, name2=17)") == f(
x, (y, z), z, name=15, name2=17)
assert_parsed_same_as_python("5+i if i>=0 else (0 if i<-1 else 10)")
assert_parsed_same_as_python("0 if 1 if 2 else 3 else 4")
assert_parsed_same_as_python("0 if (1 if 2 else 3) else 4")
assert_parsed_same_as_python("(2, 3,)")
with pytest.deprecated_call():
parse("1+if(0, 1, 2)")
# }}}
def test_mappers():
from pymbolic import variables
f, x, y, z = variables("f x y z")
for expr in [
f(x, (y, z), name=z**2)
]:
from pymbolic.mapper import WalkMapper
from pymbolic.mapper.dependency import DependencyMapper
str(expr)
IdentityMapper()(expr)
WalkMapper()(expr)
DependencyMapper()(expr)
def test_func_dep_consistency():
from pymbolic import var
from pymbolic.mapper.dependency import DependencyMapper
f = var("f")
x = var("x")
dep_map = DependencyMapper(include_calls="descend_args")
assert dep_map(f(x)) == {x}
assert dep_map(f(x=x)) == {x}
def test_conditions():
from pymbolic import var
x = var("x")
y = var("y")
assert str(x.eq(y).and_(x.le(5))) == "x == y and x <= 5"
def test_graphviz():
from pymbolic import parse
expr = parse("(2*a[1]*b[1]+2*a[0]*b[0])*(hankel_1(-1,sqrt(a[1]**2+a[0]**2)*k) "
"-hankel_1(1,sqrt(a[1]**2+a[0]**2)*k))*k /(4*sqrt(a[1]**2+a[0]**2)) "
"+hankel_1(0,sqrt(a[1]**2+a[0]**2)*k)")
from pymbolic.mapper.graphviz import GraphvizMapper
gvm = GraphvizMapper()
gvm(expr)
print(gvm.get_dot_code())
# {{{ geometric algebra
@pytest.mark.parametrize("dims", [2, 3, 4, 5])
# START_GA_TEST
def test_geometric_algebra(dims):
pytest.importorskip("numpy")
import numpy as np
from pymbolic.geometric_algebra import MultiVector as MV # noqa
vec1 = MV(np.random.randn(dims))
vec2 = MV(np.random.randn(dims))
vec3 = MV(np.random.randn(dims))
vec4 = MV(np.random.randn(dims))
vec5 = MV(np.random.randn(dims))
# Fundamental identity
assert ((vec1 ^ vec2) + (vec1 | vec2)).close_to(vec1*vec2)
# Antisymmetry
assert (vec1 ^ vec2 ^ vec3).close_to(- vec2 ^ vec1 ^ vec3)
vecs = [vec1, vec2, vec3, vec4, vec5]
if len(vecs) > dims:
from operator import xor as outer
assert reduce(outer, vecs).close_to(0)
assert (vec1.inv()*vec1).close_to(1)
assert (vec1*vec1.inv()).close_to(1)
assert ((1/vec1)*vec1).close_to(1)
assert (vec1/vec1).close_to(1)
for a, b, c in [
(vec1, vec2, vec3),
(vec1*vec2, vec3, vec4),
(vec1, vec2*vec3, vec4),
(vec1, vec2, vec3*vec4),
(vec1, vec2, vec3*vec4*vec5),
(vec1, vec2*vec1, vec3*vec4*vec5),
]:
# Associativity
assert ((a*b)*c).close_to(a*(b*c))
assert ((a ^ b) ^ c).close_to(a ^ (b ^ c))
# The inner product is not associative.
# scalar product
assert ((c*b).project(0)) .close_to(b.scalar_product(c))
assert ((c.rev()*b).project(0)) .close_to(b.rev().scalar_product(c))
assert ((b.rev()*b).project(0)) .close_to(b.norm_squared())
assert b.norm_squared() >= 0
assert c.norm_squared() >= 0
# Cauchy's inequality
assert b.scalar_product(c) <= abs(b)*abs(c) + 1e-13
# contractions
# (3.18) in [DFM]
assert abs(b.scalar_product(a ^ c) - (b >> a).scalar_product(c)) < 1e-13
# duality, (3.20) in [DFM]
assert ((a ^ b) << c) .close_to(a << (b << c))
# two definitions of the dual agree: (1.2.26) in [HS]
# and (sec 3.5.3) in [DFW]
assert (c << c.I.rev()).close_to(c | c.I.rev())
# inverse
for div in list(b.gen_blades()) + [vec1, vec1.I]:
assert (div.inv()*div).close_to(1)
assert (div*div.inv()).close_to(1)
assert ((1/div)*div).close_to(1)
assert (div/div).close_to(1)
assert ((c/div)*div).close_to(c)
assert ((c*div)/div).close_to(c)
# reverse properties (Sec 2.9.5 [DFM])
assert c.rev().rev() == c
assert (b ^ c).rev() .close_to(c.rev() ^ b.rev())
# dual properties
# (1.2.26) in [HS]
assert c.dual() .close_to(c | c.I.rev())
assert c.dual() .close_to(c*c.I.rev())
# involution properties (Sec 2.9.5 DFW)
assert c.invol().invol() == c
assert (b ^ c).invol() .close_to(b.invol() ^ c.invol())
# commutator properties
# Jacobi identity (1.1.56c) in [HS] or (8.2) in [DFW]
assert (a.x(b.x(c)) + b.x(c.x(a)) + c.x(a.x(b))).close_to(0)
# (1.57) in [HS]
assert a.x(b*c) .close_to(a.x(b)*c + b*a.x(c))
# END_GA_TEST
# }}}
def test_ast_interop():
src = """
def f():
xx = 3*y + z * (12 if x < 13 else 13)
yy = f(x, y=y)
"""
import ast
mod = ast.parse(src.replace("\n ", "\n"))
print(ast.dump(mod))
from pymbolic.interop.ast import ASTToPymbolic
ast2p = ASTToPymbolic()
for f in mod.body:
if not isinstance(f, ast.FunctionDef):
continue
for stmt in f.body:
if not isinstance(stmt, ast.Assign):
continue
lhs, = stmt.targets
lhs = ast2p(lhs)
rhs = ast2p(stmt.value)
print(lhs, rhs)
def test_compile():
from pymbolic import parse, compile
code = compile(parse("x ** y"), ["x", "y"])
assert code(2, 5) == 32
# Test pickling of compiled code.
import pickle
code = pickle.loads(pickle.dumps(code))
assert code(3, 3) == 27
def test_unifier():
from pymbolic import var
from pymbolic.mapper.unifier import UnidirectionalUnifier
a, b, c, d, e, f = [var(s) for s in "abcdef"]
def match_found(records, eqns):
for record in records:
if eqns <= set(record.equations):
return True
return False
recs = UnidirectionalUnifier("abc")(a+b*c, d+e*f)
assert len(recs) == 2
assert match_found(recs, {(a, d), (b, e), (c, f)})
assert match_found(recs, {(a, d), (b, f), (c, e)})
recs = UnidirectionalUnifier("abc")(a+b, d+e+f)
assert len(recs) == 6
assert match_found(recs, {(a, d), (b, e+f)})
assert match_found(recs, {(a, e), (b, d+f)})
assert match_found(recs, {(a, f), (b, d+e)})
assert match_found(recs, {(b, d), (a, e+f)})
assert match_found(recs, {(b, e), (a, d+f)})
assert match_found(recs, {(b, f), (a, d+e)})
vals = [var("v" + str(i)) for i in range(100)]
recs = UnidirectionalUnifier("a")(sum(vals[1:]) + a, sum(vals))
assert len(recs) == 1
assert match_found(recs, {(a, var("v0"))})
recs = UnidirectionalUnifier("abc")(a+b+c, d+e)
assert len(recs) == 0
recs = UnidirectionalUnifier("abc")(f(a+b, f(a+c)), f(b+c, f(b+d)))
assert len(recs) == 1
assert match_found(recs, {(a, b), (b, c), (c, d)})
def test_long_sympy_mapping():
sp = pytest.importorskip("sympy")
from pymbolic.interop.sympy import SympyToPymbolicMapper
SympyToPymbolicMapper()(sp.sympify(int(10**20)))
SympyToPymbolicMapper()(sp.sympify(int(10)))
def test_stringifier_preserve_shift_order():
for expr in [
parse("(a << b) >> 2"),
parse("a << (b >> 2)")
]:
assert parse(str(expr)) == expr
LATEX_TEMPLATE = r"""\documentclass{article}
\usepackage{amsmath}
\begin{document}
%s
\end{document}"""
def test_latex_mapper():
from pymbolic import parse
from pymbolic.mapper.stringifier import LaTeXMapper, StringifyMapper
tm = LaTeXMapper()
sm = StringifyMapper()
equations = []
def add(expr):
# Add an equation to the list of tests.
equations.append(r"\[{}\] % from: {}".format(tm(expr), sm(expr)))
add(parse("a * b + c"))
add(parse("f(a,b,c)"))
add(parse("a ** b ** c"))
add(parse("(a | b) ^ ~c"))
add(parse("a << b"))
add(parse("a >> b"))
add(parse("a[i,j,k]"))
add(parse("a[1:3]"))
add(parse("a // b"))
add(parse("not (a or b) and c"))
add(parse("(a % b) % c"))
add(parse("(a >= b) or (b <= c)"))
add(prim.Min((1,)) + prim.Max((1, 2)))
add(prim.Substitution(prim.Variable("x") ** 2, ("x",), (2,)))
add(prim.Derivative(parse("x**2"), ("x",)))
# Run LaTeX and ensure the file compiles.
import os
import tempfile
import subprocess
import shutil
latex_dir = tempfile.mkdtemp("pymbolic")
try:
tex_file_path = os.path.join(latex_dir, "input.tex")
with open(tex_file_path, "w") as tex_file:
contents = LATEX_TEMPLATE % "\n".join(equations)
tex_file.write(contents)
try:
subprocess.check_output(
["latex",
"-interaction=nonstopmode",
"-output-directory=%s" % latex_dir,
tex_file_path],
universal_newlines=True)
except OSError: # FIXME: Should be FileNotFoundError on Py3
pytest.skip("latex command not found")
except subprocess.CalledProcessError as err:
raise AssertionError(str(err.output))
finally:
shutil.rmtree(latex_dir)
def test_flop_counter():
x = prim.Variable("x")
y = prim.Variable("y")
z = prim.Variable("z")
subexpr = prim.CommonSubexpression(3 * (x**2 + y + z))
expr = 3*subexpr + subexpr
from pymbolic.mapper.flop_counter import FlopCounter, CSEAwareFlopCounter
assert FlopCounter()(expr) == 4 * 2 + 2
assert CSEAwareFlopCounter()(expr) == 4 + 2
def test_make_sym_vector():
numpy = pytest.importorskip("numpy")
from pymbolic.primitives import make_sym_vector
assert len(make_sym_vector("vec", 2)) == 2
assert len(make_sym_vector("vec", numpy.int32(2))) == 2
assert len(make_sym_vector("vec", [1, 2, 3])) == 3
def test_multiplicative_stringify_preserves_association():
for inner in ["*", " / ", " // ", " % "]:
for outer in ["*", " / ", " // ", " % "]:
if outer == inner:
continue
assert_parse_roundtrip(f"x{outer}(y{inner}z)")
assert_parse_roundtrip(f"(y{inner}z){outer}x")
assert_parse_roundtrip("(-1)*(((-1)*x) / 5)")
def test_differentiator_flags_for_nonsmooth_and_discontinuous():
import pymbolic.functions as pf
from pymbolic.mapper.differentiator import differentiate
x = prim.Variable("x")
with pytest.raises(ValueError):
differentiate(pf.fabs(x), x)
result = differentiate(pf.fabs(x), x, allowed_nonsmoothness="continuous")
assert result == pf.sign(x)
with pytest.raises(ValueError):
differentiate(pf.sign(x), x)
result = differentiate(pf.sign(x), x, allowed_nonsmoothness="discontinuous")
assert result == 0
def test_np_bool_handling():
from pymbolic.mapper.evaluator import evaluate
numpy = pytest.importorskip("numpy")
expr = prim.LogicalNot(numpy.bool_(False))
assert evaluate(expr) is True
if __name__ == "__main__":
import sys
if len(sys.argv) > 1:
exec(sys.argv[1])
else:
from pytest import main
main([__file__])
# vim: fdm=marker
| 27.831361 | 83 | 0.590837 | [
"MIT"
] | sv2518/pymbolic | test/test_pymbolic.py | 18,814 | Python |
"""Support for the OpenWeatherMap (OWM) service."""
from homeassistant.components.weather import WeatherEntity
from homeassistant.const import TEMP_CELSIUS
from .const import (
ATTR_API_CONDITION,
ATTR_API_FORECAST,
ATTR_API_HUMIDITY,
ATTR_API_PRESSURE,
ATTR_API_TEMPERATURE,
ATTR_API_WIND_BEARING,
ATTR_API_WIND_SPEED,
ATTRIBUTION,
DOMAIN,
ENTRY_NAME,
ENTRY_WEATHER_COORDINATOR,
)
from .weather_update_coordinator import WeatherUpdateCoordinator
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up OpenWeatherMap weather entity based on a config entry."""
domain_data = hass.data[DOMAIN][config_entry.entry_id]
name = domain_data[ENTRY_NAME]
weather_coordinator = domain_data[ENTRY_WEATHER_COORDINATOR]
unique_id = f"{config_entry.unique_id}"
owm_weather = OpenWeatherMapWeather(name, unique_id, weather_coordinator)
async_add_entities([owm_weather], False)
class OpenWeatherMapWeather(WeatherEntity):
"""Implementation of an OpenWeatherMap sensor."""
def __init__(
self,
name,
unique_id,
weather_coordinator: WeatherUpdateCoordinator,
):
"""Initialize the sensor."""
self._name = name
self._unique_id = unique_id
self._weather_coordinator = weather_coordinator
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unique_id(self):
"""Return a unique_id for this entity."""
return self._unique_id
@property
def should_poll(self):
"""Return the polling requirement of the entity."""
return False
@property
def attribution(self):
"""Return the attribution."""
return ATTRIBUTION
@property
def condition(self):
"""Return the current condition."""
return self._weather_coordinator.data[ATTR_API_CONDITION]
@property
def temperature(self):
"""Return the temperature."""
return self._weather_coordinator.data[ATTR_API_TEMPERATURE]
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def pressure(self):
"""Return the pressure."""
return self._weather_coordinator.data[ATTR_API_PRESSURE]
@property
def humidity(self):
"""Return the humidity."""
return self._weather_coordinator.data[ATTR_API_HUMIDITY]
@property
def wind_speed(self):
"""Return the wind speed."""
wind_speed = self._weather_coordinator.data[ATTR_API_WIND_SPEED]
if self.hass.config.units.name == "imperial":
return round(wind_speed * 2.24, 2)
return round(wind_speed * 3.6, 2)
@property
def wind_bearing(self):
"""Return the wind bearing."""
return self._weather_coordinator.data[ATTR_API_WIND_BEARING]
@property
def forecast(self):
"""Return the forecast array."""
return self._weather_coordinator.data[ATTR_API_FORECAST]
@property
def available(self):
"""Return True if entity is available."""
return self._weather_coordinator.last_update_success
async def async_added_to_hass(self):
"""Connect to dispatcher listening for entity data notifications."""
self.async_on_remove(
self._weather_coordinator.async_add_listener(self.async_write_ha_state)
)
async def async_update(self):
"""Get the latest data from OWM and updates the states."""
await self._weather_coordinator.async_request_refresh()
| 29.435484 | 83 | 0.682466 | [
"Apache-2.0"
] | 123dev/core | homeassistant/components/openweathermap/weather.py | 3,650 | Python |
# -*- coding: utf-8 -*-
# nodeandtag's package version information
__version_major__ = "0.2"
__version__ = "{}a1".format(__version_major__)
__version_long__ = "{}a1".format(__version_major__)
__status__ = "Alpha"
__author__ = "Jeremy Morosi"
__author_email__ = "[email protected]"
__url__ = "https://github.com/Nauja/nodeandtag"
| 30.636364 | 51 | 0.750742 | [
"MIT"
] | Nauja/noteandtag | noteandtag/__version__.py | 337 | Python |
from __future__ import absolute_import, division, print_function
import os
import subprocess
import sys
from setuptools import find_packages, setup
PROJECT_PATH = os.path.dirname(os.path.abspath(__file__))
VERSION = """
# This file is auto-generated with the version information during setup.py installation.
__version__ = '{}'
"""
# Find pyro version.
for line in open(os.path.join(PROJECT_PATH, 'pyro', '__init__.py')):
if line.startswith('version_prefix = '):
version = line.strip().split()[2][1:-1]
# Append current commit sha to version
commit_sha = ''
try:
commit_sha = subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD'],
cwd=PROJECT_PATH).decode('ascii').strip()
except OSError:
pass
# Write version to _version.py
if commit_sha:
version += '+{}'.format(commit_sha)
with open(os.path.join(PROJECT_PATH, 'pyro', '_version.py'), 'w') as f:
f.write(VERSION.format(version))
# Convert README.md to rst for display at https://pypi.python.org/pypi/pyro-ppl
# When releasing on pypi, make sure pandoc is on your system:
# $ brew install pandoc # OS X
# $ sudo apt-get install pandoc # Ubuntu Linux
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
except (IOError, ImportError, OSError) as e:
sys.stderr.write('Failed to convert README.md to rst:\n {}\n'.format(e))
sys.stderr.flush()
long_description = open('README.md').read()
# Remove badges since they will always be obsolete.
blacklist = ['Build Status', 'Latest Version', 'Documentation Status',
'travis-ci.org', 'pypi.python.org', 'pyro-ppl.readthedocs.io']
long_description = '\n'.join(
[line for line in long_description.split('\n') if not any(patt in line for patt in blacklist)])
# examples/tutorials
EXTRAS_REQUIRE = [
'jupyter>=1.0.0',
'matplotlib>=1.3',
'observations>=0.1.4',
'pillow',
'torchvision',
'visdom>=0.1.4',
'pandas',
'wget',
]
if sys.version_info[0] == 2:
EXTRAS_REQUIRE.append('functools32')
setup(
name='pyro-ppl',
version=version,
description='A Python library for probabilistic modeling and inference',
long_description=long_description,
packages=find_packages(include=['pyro', 'pyro.*']),
url='http://pyro.ai',
author='Uber AI Labs',
author_email='[email protected]',
install_requires=[
# if you add any additional libraries, please also
# add them to `docs/requirements.txt`
'contextlib2',
'graphviz>=0.8',
'networkx>=2.2',
'numpy>=1.7',
'opt_einsum>=2.2.0',
'six>=1.10.0',
'torch==0.4.0',
'tqdm>=4.25',
],
extras_require={
'extras': EXTRAS_REQUIRE,
'test': EXTRAS_REQUIRE + [
'nbval',
'pytest==3.7',
'pytest-cov',
'scipy>=0.19.0',
'ipython<=6.5.0', # https://github.com/jupyter/jupyter_console/issues/158
],
'profile': ['prettytable', 'pytest-benchmark', 'snakeviz'],
'dev': EXTRAS_REQUIRE + [
'flake8',
'isort',
'nbformat',
'nbsphinx>=0.3.2',
'nbstripout',
'nbval',
'pypandoc',
'pytest==3.7',
'pytest-xdist',
'ipython<=6.5.0', # https://github.com/jupyter/jupyter_console/issues/158
'scipy>=0.19.0',
'sphinx',
'sphinx_rtd_theme',
'yapf',
],
},
tests_require=['flake8', 'pytest==3.7'],
keywords='machine learning statistics probabilistic programming bayesian modeling pytorch',
license='MIT License',
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS :: MacOS X',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6',
],
# yapf
)
| 31.284615 | 99 | 0.601672 | [
"MIT"
] | cglazner/pyro | setup.py | 4,067 | Python |
from __future__ import unicode_literals
from django.apps import apps
from django.db import models
from django.urls import reverse
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.translation import ugettext
@python_2_unicode_compatible
class Collection(object):
_registry = []
@classmethod
def get_all(cls):
return sorted(cls._registry, key=lambda entry: entry._order)
def __init__(self, label, icon=None, link=None, queryset=None, model=None, order=None):
self._label = label
self._icon = icon
self._link = link
self._queryset = queryset
self._model = model
self._order = order or 99
self.__class__._registry.append(self)
def __str__(self):
return force_text(self.label)
def resolve(self):
self.children = self._get_children()
self.icon = self._icon
self.label = self._label
self.url = None
if self._link:
self.icon = getattr(self._link, 'icon', self._icon)
self.url = reverse(viewname=self._link.view, args=self._link.args)
return ''
def _get_children(self):
if self._queryset:
return self._queryset
else:
if self._model:
return self._model.objects.all()
class Dashboard(object):
_registry = {}
@classmethod
def get(cls, name):
return cls._registry[name]
def __init__(self, name, label):
self.name = name
self.label = label
self.widgets = {}
self.removed_widgets = []
self.__class__._registry[name] = self
def add_widget(self, widget, order=0):
self.widgets[widget] = {'widget': widget, 'order': order}
def get_widgets(self):
"""
Returns a list of widgets sorted by their 'order'.
If two or more widgets have the same 'order', sort by label.
"""
return map(
lambda x: x['widget'],
filter(
lambda x: x['widget'] not in self.removed_widgets,
sorted(
self.widgets.values(),
key=lambda x: (x['order'], x['widget'].label)
)
)
)
def remove_widget(self, widget):
self.removed_widgets.append(widget)
class DashboardWidget(object):
_registry = []
@classmethod
def get_all(cls):
return cls._registry
def __init__(self, label, func=None, icon=None, link=None, queryset=None, statistic_slug=None):
self.label = label
self.icon = icon
self.link = link
self.queryset = queryset
self.func = func
self.statistic_slug = statistic_slug
self.__class__._registry.append(self)
@python_2_unicode_compatible
class ModelAttribute(object):
__registry = {}
@classmethod
def get_for(cls, model, type_names=None):
result = []
try:
for type_name, attributes in cls.__registry[model].iteritems():
if not type_names or type_name in type_names:
result.extend(attributes)
return result
except IndexError:
# We were passed a model instance, try again using the model of
# the instance
# If we are already in the model class, exit with an error
if model.__class__ == models.base.ModelBase:
raise
return cls.get_for[type(model)]
@classmethod
def get_choices_for(cls, model, type_names=None):
return [
(
attribute.name, attribute
) for attribute in cls.get_for(model, type_names)
]
@classmethod
def help_text_for(cls, model, type_names=None):
result = []
for count, attribute in enumerate(cls.get_for(model, type_names), 1):
result.append(
'{}) {}'.format(
count, force_text(attribute.get_display(show_name=True))
)
)
return ' '.join(
[ugettext('Available attributes: \n'), ', \n'.join(result)]
)
def get_display(self, show_name=False):
if self.description:
return '{} - {}'.format(
self.name if show_name else self.label, self.description
)
else:
return force_text(self.name if show_name else self.label)
def __str__(self):
return self.get_display()
def __init__(self, model, name, label=None, description=None, type_name=None):
self.model = model
self.label = label
self.name = name
self.description = description
for field in model._meta.fields:
if field.name == name:
self.label = field.verbose_name
self.description = field.help_text
self.__registry.setdefault(model, {})
if isinstance(type_name, list):
for single_type in type_name:
self.__registry[model].setdefault(single_type, [])
self.__registry[model][single_type].append(self)
else:
self.__registry[model].setdefault(type_name, [])
self.__registry[model][type_name].append(self)
class MissingItem(object):
_registry = []
@classmethod
def get_all(cls):
return cls._registry
def __init__(self, label, condition, description, view):
self.label = label
self.condition = condition
self.description = description
self.view = view
self.__class__._registry.append(self)
@python_2_unicode_compatible
class Filter(object):
_registry = {}
@classmethod
def get(cls, slug):
return cls._registry[slug]
@classmethod
def all(cls):
return cls._registry
def __init__(self, label, slug, filter_kwargs, model, object_permission=None, hide_links=False):
self.label = label
self.slug = slug
self.filter_kwargs = filter_kwargs
self.model = model
self.object_permission = object_permission
self.hide_links = hide_links
self.__class__._registry[self.slug] = self
def __str__(self):
return force_text(self.label)
def get_queryset(self, user):
AccessControlList = apps.get_model(
app_label='acls', model_name='AccessControlList'
)
queryset = self.model.objects.all()
for kwargs in self.filter_kwargs:
queryset = queryset.filter(**kwargs)
queryset = queryset.distinct()
if self.object_permission:
return AccessControlList.objects.filter_by_access(
self.object_permission, user, queryset=queryset
)
else:
return queryset
class Package(object):
_registry = []
@classmethod
def get_all(cls):
return cls._registry
def __init__(self, label, license_text):
self.label = label
self.license_text = license_text
self.__class__._registry.append(self)
class PropertyHelper(object):
"""
Makes adding fields using __class__.add_to_class easier.
Each subclass must implement the `constructor` and the `get_result`
method.
"""
@staticmethod
@property
def constructor(source_object):
return PropertyHelper(source_object)
def __init__(self, instance):
self.instance = instance
def __getattr__(self, name):
return self.get_result(name=name)
def get_result(self, name):
"""
The method that produces the actual result. Must be implemented
by each subclass.
"""
raise NotImplementedError
| 27.916968 | 100 | 0.604423 | [
"Apache-2.0"
] | marumadang/mayan-edms | mayan/apps/common/classes.py | 7,733 | Python |
# -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# DO NOT EDIT! This is a generated sample ("Request", "speech_transcribe_multichannel")
# To install the latest published package dependency, execute the following:
# pip install google-cloud-speech
# sample-metadata
# title: Multi-Channel Audio Transcription (Local File)
# description: Transcribe a short audio file with multiple channels
# usage: python3 samples/v1/speech_transcribe_multichannel.py [--local_file_path "resources/multi.wav"]
# [START speech_transcribe_multichannel]
from google.cloud import speech_v1
import io
def sample_recognize(local_file_path):
"""
Transcribe a short audio file with multiple channels
Args:
local_file_path Path to local audio file, e.g. /path/audio.wav
"""
client = speech_v1.SpeechClient()
# local_file_path = 'resources/multi.wav'
# The number of channels in the input audio file (optional)
audio_channel_count = 2
# When set to true, each audio channel will be recognized separately.
# The recognition result will contain a channel_tag field to state which
# channel that result belongs to
enable_separate_recognition_per_channel = True
# The language of the supplied audio
language_code = "en-US"
config = {
"audio_channel_count": audio_channel_count,
"enable_separate_recognition_per_channel": enable_separate_recognition_per_channel,
"language_code": language_code,
}
with io.open(local_file_path, "rb") as f:
content = f.read()
audio = {"content": content}
response = client.recognize(config, audio)
for result in response.results:
# channel_tag to recognize which audio channel this result is for
print(u"Channel tag: {}".format(result.channel_tag))
# First alternative is the most probable result
alternative = result.alternatives[0]
print(u"Transcript: {}".format(alternative.transcript))
# [END speech_transcribe_multichannel]
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--local_file_path", type=str, default="resources/multi.wav")
args = parser.parse_args()
sample_recognize(args.local_file_path)
if __name__ == "__main__":
main()
| 32.632184 | 105 | 0.726664 | [
"Apache-2.0"
] | AzemaBaptiste/google-cloud-python | speech/samples/v1/speech_transcribe_multichannel.py | 2,839 | Python |
from hydroDL import pathSMAP, master
import os
from hydroDL.data import dbCsv
# train for each cont
contLst = [
'Africa',
'Asia',
'Australia',
'Europe',
'NorthAmerica',
'SouthAmerica',
]
subsetLst = ['Globalv4f1_' + x for x in contLst]
subsetLst.append('Globalv4f1')
outLst = [x + '_v4f1_y1' for x in contLst]
outLst.append('Global_v4f1_y1')
caseLst = ['Forcing', 'Soilm']
cid = 0
for k in range(len(subsetLst)):
for case in caseLst:
if case == 'Forcing':
varLst = dbCsv.varForcingGlobal
else:
varLst = dbCsv.varSoilmGlobal
optData = master.default.update(
master.default.optDataSMAP,
rootDB=pathSMAP['DB_L3_Global'],
subset=subsetLst[k],
tRange=[20150401, 20160401],
varT=varLst)
optModel = master.default.optLstm
optLoss = master.default.optLossSigma
optTrain = master.default.optTrainSMAP
out = os.path.join(pathSMAP['Out_L3_Global'], outLst[k] + '_' + case)
masterDict = master.wrapMaster(out, optData, optModel, optLoss,
optTrain)
master.runTrain(masterDict, cudaID=cid % 3, screen=outLst[k])
cid = cid + 1
# master.train(masterDict)
# some of them failed and rerun
# master.runTrain(
# r'/mnt/sdb/rnnSMAP/Model_SMAPgrid/L3_Global/Africa_v4f1_y1_Forcing/',
# cudaID=1,
# screen='Africa_v4f1_y1_Forcing')
# master.runTrain(
# r'/mnt/sdb/rnnSMAP/Model_SMAPgrid/L3_Global/Asia_v4f1_y1_Soilm/',
# cudaID=0,
# screen='Asia_v4f1_y1_Soilm')
# master.runTrain(
# r'/mnt/sdb/rnnSMAP/Model_SMAPgrid/L3_Global/NorthAmerica_v4f1_y1_Soilm/',
# cudaID=1,
# screen='NorthAmerica_v4f1_y1_Soilm')
# master.runTrain(
# r'/mnt/sdb/rnnSMAP/Model_SMAPgrid/L3_Global/Global_v4f1_y1_Forcing/',
# cudaID=2,
# screen='Global_v4f1_y1_Forcing')
| 30.887097 | 79 | 0.644386 | [
"MIT"
] | fkwai/geolearn | app/global/train_cont.py | 1,915 | Python |
import torch
import torch.nn as nn
from .subnet import DoubleConv, UpConv, RRCU
class R2UNet(nn.Module):
def __init__(self, in_ch, out_ch, base_ch=64):
super(R2UNet, self).__init__()
self.inc = DoubleConv(in_ch, base_ch)
self.down = nn.MaxPool2d(kernel_size=2, stride=2)
self.down1 = self._Down(base_ch, base_ch * 2)
self.down2 = self._Down(base_ch * 2, base_ch * 4)
self.down3 = self._Down(base_ch * 4, base_ch * 8)
self.down4 = self._Down(base_ch * 8, base_ch * 16)
self.up1 = self._Up(base_ch * 16, base_ch * 8)
self.up2 = self._Up(base_ch * 8, base_ch * 4)
self.up3 = self._Up(base_ch * 4, base_ch * 2)
self.up4 = self._Up(base_ch * 2, base_ch)
self.outc = nn.Conv2d(base_ch, out_ch, kernel_size=1, stride=1, padding=0)
def forward(self, x):
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x = self.up1(x5, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
x = self.up4(x, x1)
x = self.outc(x)
return x
class _Down(nn.Module):
def __init__(self, in_ch, out_ch):
super(R2UNet._Down, self).__init__()
self.down = nn.Sequential(
nn.MaxPool2d(kernel_size=2),
RRCU(in_ch, out_ch)
)
def forward(self, x):
x = self.down(x)
return x
class _Up(nn.Module):
def __init__(self, in_ch, out_ch):
super(R2UNet._Up, self).__init__()
self.up = UpConv(in_ch, out_ch)
self.conv = RRCU(in_ch, out_ch)
def forward(self, x1, x2):
x1 = self.up(x1)
x = torch.cat([x2, x1], dim=1)
x = self.conv(x)
return x
| 31.898305 | 82 | 0.529756 | [
"MIT"
] | nitsaick/pytorch-kit | network/r2unet.py | 1,882 | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.