id
stringlengths 2
8
| text
stringlengths 16
264k
| dataset_id
stringclasses 1
value |
---|---|---|
1897336
|
"""
This module provides functions for transforming curves to different models.
"""
from public import public
from sympy import FF, symbols, Poly
from .coordinates import AffineCoordinateModel
from .curve import EllipticCurve
from .mod import Mod
from .model import ShortWeierstrassModel, MontgomeryModel, TwistedEdwardsModel
from .params import DomainParameters
from .point import InfinityPoint, Point
def __M_map(params, param_names, map_parameters, map_point, model):
param_one = params.curve.parameters[param_names[0]]
param_other = params.curve.parameters[param_names[1]]
parameters = map_parameters(param_one, param_other)
aff = AffineCoordinateModel(model)
if isinstance(params.curve.neutral, InfinityPoint):
neutral = InfinityPoint(aff)
else:
neutral = map_point(param_one, param_other, params.curve.neutral, aff)
curve = EllipticCurve(model, aff, params.curve.prime, neutral, parameters)
return DomainParameters(curve, map_point(param_one, param_other, params.generator, aff), params.order,
params.cofactor)
@public
def M2SW(params: DomainParameters) -> DomainParameters:
"""
Convert a Montgomery curve to ShortWeierstrass.
:param params: The domain parameters to convert.
:return: The converted domain parameters.
"""
if not isinstance(params.curve.model, MontgomeryModel) or not isinstance(
params.curve.coordinate_model, AffineCoordinateModel):
raise ValueError
def map_parameters(A, B):
a = (3 - A ** 2) / (3 * B ** 2)
b = (2 * A ** 3 - 9 * A) / (27 * B ** 3)
return {"a": a, "b": b}
def map_point(A, B, pt, aff):
u = pt.x / B + A / (3 * B)
v = pt.y / B
return Point(aff, x=u, y=v)
return __M_map(params, ("a", "b"), map_parameters, map_point, ShortWeierstrassModel())
@public
def M2TE(params: DomainParameters) -> DomainParameters:
"""
Convert a Montgomery curve to TwistedEdwards.
:param params: The domain parameters to convert.
:return: The converted domain parameters.
"""
if not isinstance(params.curve.model, MontgomeryModel) or not isinstance(
params.curve.coordinate_model, AffineCoordinateModel):
raise ValueError
def map_parameters(A, B):
a = (A + 2) / B
d = (A - 2) / B
return {"a": a, "d": d}
def map_point(A, B, pt, aff):
u = pt.x / pt.y
v = (pt.x - 1) / (pt.x + 1)
return Point(aff, x=u, y=v)
return __M_map(params, ("a", "b"), map_parameters, map_point, TwistedEdwardsModel())
@public
def TE2M(params: DomainParameters) -> DomainParameters:
"""
Convert a TwistedEdwards curve to Montgomery.
:param params: The domain parameters to convert.
:return: The converted domain parameters.
"""
if not isinstance(params.curve.model, TwistedEdwardsModel) or not isinstance(
params.curve.coordinate_model, AffineCoordinateModel):
raise ValueError
def map_parameters(a, d):
A = (2 * (a + d)) / (a - d)
B = 4 / (a - d)
return {"a": A, "b": B}
def map_point(a, d, pt, aff):
u = (1 + pt.y) / (1 - pt.y)
v = (1 + pt.y) / ((1 - pt.y) * pt.x)
return Point(aff, x=u, y=v)
return __M_map(params, ("a", "d"), map_parameters, map_point, MontgomeryModel())
@public
def SW2M(params: DomainParameters) -> DomainParameters:
"""
Convert a ShortWeierstrass curve to Montgomery.
:param params: The domain parameters to convert.
:return: The converted domain parameters.
"""
if not isinstance(params.curve.model, ShortWeierstrassModel) or not isinstance(
params.curve.coordinate_model, AffineCoordinateModel):
raise ValueError
ax = symbols("α")
field = FF(params.curve.prime)
rhs = Poly(ax ** 3 + field(int(params.curve.parameters["a"])) * ax + field(int(params.curve.parameters["b"])), ax, domain=field)
roots = rhs.ground_roots()
if not roots:
raise ValueError("Curve cannot be transformed to Montgomery model (x^3 + ax + b has no root).")
alpha = Mod(int(next(iter(roots.keys()))), params.curve.prime)
beta = (3 * alpha**2 + params.curve.parameters["a"]).sqrt()
def map_parameters(a, b):
A = (3 * alpha) / beta
B = 1 / beta
return {"a": A, "b": B}
def map_point(a, b, pt, aff):
u = (pt.x - alpha) / beta
v = pt.y / beta
return Point(aff, x=u, y=v)
return __M_map(params, ("a", "b"), map_parameters, map_point, MontgomeryModel())
@public
def SW2TE(params: DomainParameters) -> DomainParameters:
"""
Convert a ShortWeierstrass curve to TwistedEdwards.
:param params: The domain parameters to convert.
:return: The converted domain parameters.
"""
if not isinstance(params.curve.model, ShortWeierstrassModel) or not isinstance(
params.curve.coordinate_model, AffineCoordinateModel):
raise ValueError
ax = symbols("α")
field = FF(params.curve.prime)
rhs = Poly(ax ** 3 + field(int(params.curve.parameters["a"])) * ax + field(int(params.curve.parameters["b"])), ax, domain=field)
roots = rhs.ground_roots()
if not roots:
raise ValueError("Curve cannot be transformed to Montgomery model (x^3 + ax + b has no root).")
alpha = Mod(int(next(iter(roots.keys()))), params.curve.prime)
beta = (3 * alpha**2 + params.curve.parameters["a"]).sqrt()
def map_parameters(a, b):
a = 3 * alpha + 2 * beta
d = 3 * alpha - 2 * beta
return {"a": a, "d": d}
def map_point(a, b, pt, aff):
if params.curve.is_neutral(pt):
u = Mod(0, params.curve.prime)
v = Mod(1, params.curve.prime)
elif pt.x == alpha and pt.y == 0:
u = Mod(0, params.curve.prime)
v = Mod(-1, params.curve.prime)
else:
u = (pt.x - alpha) / pt.y
v = (pt.x - alpha - beta) / (pt.x - alpha + beta)
return Point(aff, x=u, y=v)
return __M_map(params, ("a", "b"), map_parameters, map_point, TwistedEdwardsModel())
|
StarcoderdataPython
|
6497755
|
<gh_stars>0
import sys
has_pytest = False
#there is the difference between 1.3.4 and 2.0.2 versions
#Since version 1.4, the testing tool "py.test" is part of its own pytest distribution.
try:
import pytest
has_pytest = True
except:
try:
import py
except:
raise NameError("No py.test runner found in selected interpreter")
def get_plugin_manager():
try:
from _pytest.config import get_plugin_manager
return get_plugin_manager()
except ImportError:
from _pytest.core import PluginManager
return PluginManager(load=True)
# "-s" is always required: no test output provided otherwise
args = sys.argv[1:]
args.append("-s") if "-s" not in args else None
if has_pytest:
_preinit = []
def main():
_pluginmanager = get_plugin_manager()
hook = _pluginmanager.hook
try:
config = hook.pytest_cmdline_parse(
pluginmanager=_pluginmanager, args=args)
exitstatus = hook.pytest_cmdline_main(config=config)
except pytest.UsageError:
e = sys.exc_info()[1]
sys.stderr.write("ERROR: %s\n" %(e.args[0],))
exitstatus = 3
return exitstatus
else:
def main():
config = py.test.config
try:
config.parse(args)
config.pluginmanager.do_configure(config)
session = config.initsession()
colitems = config.getinitialnodes()
exitstatus = session.main(colitems)
config.pluginmanager.do_unconfigure(config)
except config.Error:
e = sys.exc_info()[1]
sys.stderr.write("ERROR: %s\n" %(e.args[0],))
exitstatus = 3
py.test.config = py.test.config.__class__()
return exitstatus
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
222447
|
#!/usr/bin/env python
import subprocess
from pathlib import Path
from os import chdir
from sys import exit
PARENT_DIR = Path(__file__).resolve().parent.parent
EDGE_DIR = PARENT_DIR / 'edge'
REQ_DIR = EDGE_DIR / 'requirements'
# COMMANDS:
# pipenv lock --requirements > requirements/base.txt
# echo "-r base.txt" > requirements/development.txt
# pipenv lock --requirements --dev >> requirements/development.txt
base_reqs = REQ_DIR / "base.txt"
dev_reqs = REQ_DIR / "development.txt"
chdir(EDGE_DIR)
if not base_reqs.exists():
print(f"base.txt not found!")
exit(-1)
if not dev_reqs.exists():
print(f"development.txt not found!")
exit(-1)
# Updating base requirements
proc = subprocess.run(["pipenv", "lock", "--requirements"],
stdout=subprocess.PIPE)
base_contents = proc.stdout.decode("utf-8")
with open(base_reqs, "w") as f:
f.write(base_contents)
print("base.txt is updated!")
# Updating dev requirements
proc = subprocess.run(["pipenv", "lock", "--requirements", "--dev"],
stdout=subprocess.PIPE)
dev_contents = proc.stdout.decode("utf-8")
with open(dev_reqs, "w") as f:
f.write("-r base.txt\n")
f.write(dev_contents)
print("development.txt is updated!")
|
StarcoderdataPython
|
3233158
|
<reponame>subwaymatch/leetcode<gh_stars>0
class Solution:
def maxProfit(self, prices: List[int]) -> int:
if len(prices) <= 1:
return 0
# Build max_after array
max_after = [0] * (len(prices) - 1)
max_after[-1] = prices[-1]
for idx in reversed(range(len(max_after) - 1)):
max_after[idx] = max(prices[idx + 1], max_after[idx + 1])
# Find max profit
max_profit = 0
for idx in range(len(prices) - 1):
max_profit = max(max_profit, max_after[idx] - prices[idx])
return max_profit
|
StarcoderdataPython
|
5054351
|
<gh_stars>1-10
import datetime as dt
from ..models import CurrDataModel, HistDataModel
from .taskManagement import createTasks, runTasks
from .utils import fieldsConversion, tickersConversion, VndDate
class Vnd:
"""
Wrapper class to handle inputs and present output
"""
def __init__(self, defaultFormat="df"):
self.defaultFormat = defaultFormat
def hist(self, tickers, fields, fromDate=None, toDate=None, **overrides):
"""
Getting historical data.
:tickers: string or list of string
:fields: string or list of string
:fromDate: 'yyyymmdd' string or python datetime object, default value is 20 days prior toTime
:toDate: 'yyyymmdd' string or python datetime object, default value is today
"""
tickers = tickersConversion(tickers) # convert tickers to string
fields = fieldsConversion(fields) # convert fields to list of fields
# Handle date format and generate default date
if toDate is None:
toDate = dt.datetime.today()
toDate = VndDate(toDate)
if fromDate is None:
fromDate = toDate - dt.timedelta(days=20)
fromDate = VndDate(fromDate)
tasks = createTasks("hist", tickers, fields, fromDate, toDate, **overrides)
data = runTasks(tasks)
# TODO: implement overrides
return HistDataModel(data)
def curr(self, tickers, fields, **overrides):
"""
Getting historical data.
:tickers: string or list of string
:fields: string or list of string
"""
tickers = tickersConversion(tickers) # convert tickers to string
fields = fieldsConversion(fields) # convert fields to list of fields
tasks = createTasks("current", tickers, fields, **overrides)
data = runTasks(tasks)
# TODO: implement overrides
return CurrDataModel(data)
|
StarcoderdataPython
|
1776141
|
<filename>python/kids/line.py
import turtle
tom = turtle.Turtle()
tom.forward(50)
turtle.done()
|
StarcoderdataPython
|
387582
|
<reponame>ohad83/pandas<filename>asv_bench/benchmarks/gil.py
import numpy as np
from pandas import DataFrame, Series, date_range, factorize, read_csv
from pandas.core.algorithms import take_1d
import pandas.util.testing as tm
try:
from pandas import (
rolling_median,
rolling_mean,
rolling_min,
rolling_max,
rolling_var,
rolling_skew,
rolling_kurt,
rolling_std,
)
have_rolling_methods = True
except ImportError:
have_rolling_methods = False
try:
from pandas._libs import algos
except ImportError:
from pandas import algos
try:
from pandas.util.testing import test_parallel
have_real_test_parallel = True
except ImportError:
have_real_test_parallel = False
def test_parallel(num_threads=1):
def wrapper(fname):
return fname
return wrapper
from .pandas_vb_common import BaseIO # isort:skip
class ParallelGroupbyMethods:
params = ([2, 4, 8], ["count", "last", "max", "mean", "min", "prod", "sum", "var"])
param_names = ["threads", "method"]
def setup(self, threads, method):
if not have_real_test_parallel:
raise NotImplementedError
N = 10 ** 6
ngroups = 10 ** 3
df = DataFrame(
{"key": np.random.randint(0, ngroups, size=N), "data": np.random.randn(N)}
)
@test_parallel(num_threads=threads)
def parallel():
getattr(df.groupby("key")["data"], method)()
self.parallel = parallel
def loop():
getattr(df.groupby("key")["data"], method)()
self.loop = loop
def time_parallel(self, threads, method):
self.parallel()
def time_loop(self, threads, method):
for i in range(threads):
self.loop()
class ParallelGroups:
params = [2, 4, 8]
param_names = ["threads"]
def setup(self, threads):
if not have_real_test_parallel:
raise NotImplementedError
size = 2 ** 22
ngroups = 10 ** 3
data = Series(np.random.randint(0, ngroups, size=size))
@test_parallel(num_threads=threads)
def get_groups():
data.groupby(data).groups
self.get_groups = get_groups
def time_get_groups(self, threads):
self.get_groups()
class ParallelTake1D:
params = ["int64", "float64"]
param_names = ["dtype"]
def setup(self, dtype):
if not have_real_test_parallel:
raise NotImplementedError
N = 10 ** 6
df = DataFrame({"col": np.arange(N, dtype=dtype)})
indexer = np.arange(100, len(df) - 100)
@test_parallel(num_threads=2)
def parallel_take1d():
take_1d(df["col"].values, indexer)
self.parallel_take1d = parallel_take1d
def time_take1d(self, dtype):
self.parallel_take1d()
class ParallelKth:
number = 1
repeat = 5
def setup(self):
if not have_real_test_parallel:
raise NotImplementedError
N = 10 ** 7
k = 5 * 10 ** 5
kwargs_list = [{"arr": np.random.randn(N)}, {"arr": np.random.randn(N)}]
@test_parallel(num_threads=2, kwargs_list=kwargs_list)
def parallel_kth_smallest(arr):
algos.kth_smallest(arr, k)
self.parallel_kth_smallest = parallel_kth_smallest
def time_kth_smallest(self):
self.parallel_kth_smallest()
class ParallelDatetimeFields:
def setup(self):
if not have_real_test_parallel:
raise NotImplementedError
N = 10 ** 6
self.dti = date_range("1900-01-01", periods=N, freq="T")
self.period = self.dti.to_period("D")
def time_datetime_field_year(self):
@test_parallel(num_threads=2)
def run(dti):
dti.year
run(self.dti)
def time_datetime_field_day(self):
@test_parallel(num_threads=2)
def run(dti):
dti.day
run(self.dti)
def time_datetime_field_daysinmonth(self):
@test_parallel(num_threads=2)
def run(dti):
dti.days_in_month
run(self.dti)
def time_datetime_field_normalize(self):
@test_parallel(num_threads=2)
def run(dti):
dti.normalize()
run(self.dti)
def time_datetime_to_period(self):
@test_parallel(num_threads=2)
def run(dti):
dti.to_period("S")
run(self.dti)
def time_period_to_datetime(self):
@test_parallel(num_threads=2)
def run(period):
period.to_timestamp()
run(self.period)
class ParallelRolling:
params = ["median", "mean", "min", "max", "var", "skew", "kurt", "std"]
param_names = ["method"]
def setup(self, method):
if not have_real_test_parallel:
raise NotImplementedError
win = 100
arr = np.random.rand(100000)
if hasattr(DataFrame, "rolling"):
df = DataFrame(arr).rolling(win)
@test_parallel(num_threads=2)
def parallel_rolling():
getattr(df, method)()
self.parallel_rolling = parallel_rolling
elif have_rolling_methods:
rolling = {
"median": rolling_median,
"mean": rolling_mean,
"min": rolling_min,
"max": rolling_max,
"var": rolling_var,
"skew": rolling_skew,
"kurt": rolling_kurt,
"std": rolling_std,
}
@test_parallel(num_threads=2)
def parallel_rolling():
rolling[method](arr, win)
self.parallel_rolling = parallel_rolling
else:
raise NotImplementedError
def time_rolling(self, method):
self.parallel_rolling()
class ParallelReadCSV(BaseIO):
number = 1
repeat = 5
params = ["float", "object", "datetime"]
param_names = ["dtype"]
def setup(self, dtype):
if not have_real_test_parallel:
raise NotImplementedError
rows = 10000
cols = 50
data = {
"float": DataFrame(np.random.randn(rows, cols)),
"datetime": DataFrame(
np.random.randn(rows, cols), index=date_range("1/1/2000", periods=rows)
),
"object": DataFrame(
"foo", index=range(rows), columns=["object%03d" for _ in range(5)]
),
}
self.fname = f"__test_{dtype}__.csv"
df = data[dtype]
df.to_csv(self.fname)
@test_parallel(num_threads=2)
def parallel_read_csv():
read_csv(self.fname)
self.parallel_read_csv = parallel_read_csv
def time_read_csv(self, dtype):
self.parallel_read_csv()
class ParallelFactorize:
number = 1
repeat = 5
params = [2, 4, 8]
param_names = ["threads"]
def setup(self, threads):
if not have_real_test_parallel:
raise NotImplementedError
strings = tm.makeStringIndex(100000)
@test_parallel(num_threads=threads)
def parallel():
factorize(strings)
self.parallel = parallel
def loop():
factorize(strings)
self.loop = loop
def time_parallel(self, threads):
self.parallel()
def time_loop(self, threads):
for i in range(threads):
self.loop()
from .pandas_vb_common import setup # noqa: F401 isort:skip
|
StarcoderdataPython
|
269187
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
AIM utility functions.
"""
# ----------------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------------
# Standard library modules
import base64
import pathlib
from io import BytesIO
# Third-party modules
from PIL import Image
# First-party modules
from aim.core.constants import IMAGE_QUALITY_JPEG
# ----------------------------------------------------------------------------
# Metadata
# ----------------------------------------------------------------------------
__author__ = "<NAME>"
__date__ = "2020-08-21"
__email__ = "<EMAIL>"
__version__ = "1.0"
# ----------------------------------------------------------------------------
# Utility functions
# ----------------------------------------------------------------------------
def read_image(filepath: pathlib.Path) -> str:
"""
Read an image from a file.
Args:
filepath: Input image file path
Returns:
Image encoded in Base64
"""
with open(filepath, "rb") as f:
image_base64: str = base64.b64encode(f.read()).decode("utf-8")
return image_base64
def write_image(image_base64: str, filepath: pathlib.Path):
"""
Write an image to a file.
Args:
image_base64: Image encoded in Base64
filepath: Output image file path
"""
with open(filepath, "wb") as f:
f.write(base64.b64decode(image_base64))
def convert_image(
png_image: str, jpeg_image_quality: int = IMAGE_QUALITY_JPEG
) -> str:
"""
Convert an image from PNG to JPEG, encoded in Base64.
(Semi-)transparent pixels are replaced with (semi-)white pixels in
the output JPEG image.
Args:
png_image: PNG image encoded in Base64
Kwargs:
jpeg_image_quality: JPEG image quality (defaults to 70)
Returns:
JPEG image encoded in Base64
"""
img_rgb: Image.Image = Image.open(
BytesIO(base64.b64decode(png_image))
).convert("RGB")
buffered: BytesIO = BytesIO()
img_rgb.save(buffered, format="JPEG", quality=jpeg_image_quality)
jpeg_image_base64: str = base64.b64encode(buffered.getvalue()).decode(
"utf-8"
)
return jpeg_image_base64
|
StarcoderdataPython
|
1725335
|
"""
This script serves to do recurrence analysis on the sv-gene pairs identified
We do the following things:
1. From the top 100 of each SV type (so top 400), which genes are there? Which are the top 15 most recurrent?
2. For these genes, also check which other mutations are found in these genes in different patients.
3. Then also check which genes are recurrent if we ignore the top 100, and just look across all positive SV-gne pairs.
"""
import sys
import numpy as np
import random
from scipy import stats
from statsmodels.sandbox.stats.multicomp import multipletests
import matplotlib.pyplot as plt
import os
import matplotlib
matplotlib.use('Agg')
outDir = sys.argv[1]
finalOutDir = outDir + '/figure4/'
if not os.path.exists(finalOutDir):
os.makedirs(finalOutDir)
finalOutDirFullFigure = outDir + '/figS5/'
if not os.path.exists(finalOutDirFullFigure):
os.makedirs(finalOutDirFullFigure)
#load the sv-gene pairs
positivePairs = np.loadtxt(outDir + '/linkedSVGenePairs/nonCoding_geneSVPairs.txt_pathogenicPairsFeatures.txt', dtype='object')
print(positivePairs.shape)
topPairs = dict()
topPairGenes = dict() #all unique genes
svTypes = ['DEL', 'DUP', 'INV', 'ITX']
for svType in svTypes:
svPairs = np.loadtxt(outDir + '/featureImportance/pairLabels_top100_' + svType + '.txt', dtype='object')
rankedPairs = []
ind = len(svPairs)
for pair in svPairs:
splitPair = pair.split('_')
topPairGenes[splitPair[0]] = 0
rankedPairs.append([pair, ind])
ind -= 1
topPairs[svType] = rankedPairs
degPairs = np.loadtxt(outDir + '/tadDisruptionsZScores/zScores.txt', dtype='object')
#format: gene as key, as values, the first is the number of times we found the gene,
#the second how many were dels, then dups, invs, itx.
splitPairs = dict()
genes = dict()
for pair in positivePairs: #get stats for all pairs
splitPair = pair[0].split('_')
if splitPair[0] + '_' + splitPair[7] not in splitPairs:
splitPairs[splitPair[0] + '_' + splitPair[7]] = []
splitPairs[splitPair[0] + '_' + splitPair[7]].append(pair)
if splitPair[0] not in genes:
#count, cross-patient count, nc: del, dup, inv, itx, snv, cnv amp, cnv del, sv del, sv dup, sv inv, sv itx
genes[splitPair[0]] = [0]*13
genes[splitPair[0]].append([]) #add the patient names here
genes[splitPair[0]].append(0) #negative dels
genes[splitPair[0]].append(0) #negative dups
genes[splitPair[0]].append(0) #negative invs
genes[splitPair[0]].append(0) #negative itx
genes[splitPair[0]][0] += 1
if splitPair[12] == 'DEL':
genes[splitPair[0]][1] += 1
elif splitPair[12] == 'DUP':
genes[splitPair[0]][2] += 1
elif splitPair[12] == 'INV':
genes[splitPair[0]][3] += 1
elif splitPair[12] == 'ITX':
genes[splitPair[0]][4] += 1
patient = splitPair[7]
genes[splitPair[0]][13].append(patient)
#convert to numpy array for easy ranking
recurrentGenes = []
for gene in genes:
#count how many unique patients affect the gene for recurrence
uniquePatients = np.unique(genes[gene][13])
data = [gene] + [len(uniquePatients)] + genes[gene]
recurrentGenes.append(data)
recurrentGenes = np.array(recurrentGenes, dtype='object')
#sort
sortedGenes = recurrentGenes[np.argsort(recurrentGenes[:,1])[::-1]]
sortedGenesTop = []
for gene in sortedGenes:
if gene[0] not in topPairGenes:
continue
sortedGenesTop.append(gene)
sortedGenesTop = np.array(sortedGenesTop, dtype='object')
#make a matrix in which we show visually which genes are affected in which patients
#this matrix is genes x patients
uniquePatients = dict()
top = 15 #making the matrix only for the top X genes
ind = 0
for gene in sortedGenesTop:
if ind >= top:
continue
patients = gene[15]
for patient in patients:
if patient not in uniquePatients:
uniquePatients[patient] = 0
uniquePatients[patient] += 1
ind += 1
#make a matrix of genes by patients
recurrenceMatrix = np.zeros([top, len(uniquePatients)])
ind = 0
patientOrder = dict() #order of patients in the matrix
for patientInd in range(0, len(uniquePatients)):
patient = list(uniquePatients.keys())[patientInd]
patientOrder[patient] = patientInd
for gene in sortedGenesTop:
if ind >= top:
continue
patients = gene[15]
for patient in patients:
patientInd = patientOrder[patient]
recurrenceMatrix[ind, patientInd] += 1
ind += 1
print(recurrenceMatrix)
#make a grid plot, showing the different SV types that the patients have
#color the genes with -/+ direction, see if it correlates with the SV types.
fig, ax = plt.subplots()
for row in range(0, recurrenceMatrix.shape[0]):
if row < recurrenceMatrix.shape[0]-1:
ax.axhline(row+0.5, linestyle='--', color='k', linewidth=0.5)
for col in range(0, recurrenceMatrix.shape[1]):
if col < recurrenceMatrix.shape[1]-1:
ax.axvline(col+0.5, linestyle='--', color='k', linewidth=0.5)
if recurrenceMatrix[row,col] > 0:
#get the sv type to see which symbol to assign
gene = sortedGenesTop[row, 0]
patient = list(uniquePatients.keys())[col]
pairs = splitPairs[gene + '_' + patient]
#generate some random offsets to avoid overlapping data
offsetsX = random.sample(range(-30,30), len(pairs))
offsetsX = [i / float(100) for i in offsetsX]
offsetsY = random.sample(range(-30,30), len(pairs))
offsetsY = [i / float(100) for i in offsetsY]
ind = 0
for pair in pairs:
splitPair = pair[0].split('_')
svType = splitPair[12]
markerType = '.'
if svType == 'DEL':
markerType = '.'
elif svType == 'DUP':
markerType = 's'
elif svType == 'INV':
markerType = '^'
elif svType == 'ITX':
markerType = '*'
#also get up/down color
if patient + '_' + gene in degPairs[:,0]:
#get the z-score of the pair.
degPairInfo = degPairs[degPairs[:,0] == patient + '_' + gene][0]
color = 'red'
if float(degPairInfo[5]) > 1.5:
color = 'red'
elif float(degPairInfo[5]) < -1.5:
color = 'blue'
else:
color = 'grey'
else:
continue #this is a pair with likely coding mutations, skip it
plt.scatter(col + offsetsY[ind], offsetsX[ind] + (recurrenceMatrix.shape[0] - row -1), marker=markerType, edgecolor=color,
facecolor='none', s=35)
ind += 1
#the genes are swapped around to show the most recurrent on top, so reverse thelabels as well
plt.yticks(range(0, recurrenceMatrix.shape[0]), sortedGenesTop[0:top,0][::-1])
plt.xticks(range(0, recurrenceMatrix.shape[1]), list(uniquePatients.keys()), rotation=90)
#plt.grid()
plt.tight_layout()
plt.savefig(finalOutDir + '/recurrence_top400.svg')
plt.clf()
#Next, we are interested in patients with alternative mutations.
#So here, for each gene, first show how many patients have an SNV, CNV, or SV
#keep in mind that a duplication could be non-coding if it is in the same patient
#this will later become obvious in the visualization
#load the patient-gene mutation pairs
mutationDir = outDir + '/patientGeneMutationPairs/'
snvPatients = np.load(mutationDir + 'snvPatients.npy', allow_pickle=True, encoding='latin1').item()
svPatientsDel = np.load(mutationDir + 'svPatientsDel.npy', allow_pickle=True, encoding='latin1').item()
svPatientsDup = np.load(mutationDir + 'svPatientsDup.npy', allow_pickle=True, encoding='latin1').item()
svPatientsInv = np.load(mutationDir + 'svPatientsInv.npy', allow_pickle=True, encoding='latin1').item()
svPatientsItx = np.load(mutationDir + 'svPatientsItx.npy', allow_pickle=True, encoding='latin1').item()
cnvPatientsDel = np.load(mutationDir + 'cnvPatientsDel.npy', allow_pickle=True, encoding='latin1').item()
cnvPatientsAmp = np.load(mutationDir + 'cnvPatientsAmp.npy', allow_pickle=True, encoding='latin1').item()
#also show the non-coding SVs that do not lead to expression changes
allPairs = np.loadtxt(outDir + '/linkedSVGenePairs/nonCoding_geneSVPairs.txt_', dtype='object')
for pair in allPairs:
splitPair = pair[0].split('_')
gene = splitPair[0]
patient = splitPair[7]
sortedGeneInd = np.where(sortedGenes[:,0] == gene)[0]
if gene in snvPatients[patient]:
sortedGenes[sortedGeneInd, 5] += 1
if gene in cnvPatientsDel[patient]:
sortedGenes[sortedGeneInd, 6] += 1
if gene in cnvPatientsAmp[patient]:
sortedGenes[sortedGeneInd, 7] += 1
if gene in svPatientsDel[patient]:
sortedGenes[sortedGeneInd, 8] += 1
if gene in svPatientsDup[patient]:
sortedGenes[sortedGeneInd, 9] += 1
if gene in svPatientsInv[patient]:
sortedGenes[sortedGeneInd, 10] += 1
if gene in svPatientsItx[patient]:
sortedGenes[sortedGeneInd, 11] += 1
#for the current pair, only add it if it is not in the positive set.
if pair[0] not in positivePairs[:,0]:
#then check the type of SV, and add it to the right gene.
svType = splitPair[12]
if svType == 'DEL':
sortedGenes[sortedGeneInd, 16] += 1
elif svType == 'DUP':
sortedGenes[sortedGeneInd, 17] += 1
elif svType == 'INV':
sortedGenes[sortedGeneInd, 18] += 1
elif svType == 'ITX':
sortedGenes[sortedGeneInd, 19] += 1
print(sortedGenesTop[0:15,:])
#show these data in a bar plot.
#for each type of mutation, add to the stacked bar chart.
#fig,ax = plt.subplots()
geneInd = 0
ymax = 0
for gene in sortedGenes:
if gene[0] not in sortedGenesTop[0:15,0]:
continue
print(gene)
plt.bar(geneInd, gene[5], color='#ffcc00ff')
plt.bar(geneInd, gene[6], bottom=gene[5], color='#9955ffff')
plt.bar(geneInd, gene[7], bottom=gene[5]+gene[6], color='#ff6600b5')
plt.bar(geneInd, gene[8], bottom=gene[5]+gene[6]+gene[7], color='#0000ffb4')
plt.bar(geneInd, gene[9], bottom=gene[5]+gene[6]+gene[7]+gene[8], color='#d40000c6')
plt.bar(geneInd, gene[10], bottom=gene[5]+gene[6]+gene[7]+gene[8]+gene[9], color='#ff00ccb8')
plt.bar(geneInd, gene[11], bottom=gene[5]+gene[6]+gene[7]+gene[8]+gene[9]+gene[10], color='#808080ff')
if gene[5]+gene[6]+gene[7]+gene[8]+gene[9]+gene[10]+gene[11] > ymax:
ymax = gene[5]+gene[6]+gene[7]+gene[8]+gene[9]+gene[10]+gene[11] + 1
geneInd += 1
plt.ylim(0,ymax+1)
plt.tight_layout()
plt.savefig(finalOutDir + '/recurrence_bars.svg')
plt.clf()
exit()
###Also make the full recurrence plot for all patients.
#this is quick and dirty, should have been a re-usable function.
#load the sv-gene pairs
topPairs = dict()
topPairGenes = dict() #all unique genes
svTypes = ['DEL', 'DUP', 'INV', 'ITX']
degPairs = np.loadtxt(outDir + '/tadDisruptionsZScores/zScores.txt', dtype='object')
#format: gene as key, as values, the first is the number of times we found the gene,
#the second how many were dels, then dups, invs, itx.
splitPairs = dict()
genes = dict()
for pair in positivePairs: #get stats for all pairs
splitPair = pair[0].split('_')
if splitPair[0] + '_' + splitPair[7] not in splitPairs:
splitPairs[splitPair[0] + '_' + splitPair[7]] = []
splitPairs[splitPair[0] + '_' + splitPair[7]].append(pair)
if splitPair[0] not in genes:
#count, cross-patient count, nc: del, dup, inv, itx, snv, cnv amp, cnv del, sv del, sv dup, sv inv, sv itx
genes[splitPair[0]] = [0]*13
genes[splitPair[0]].append([]) #add the patient names here
genes[splitPair[0]].append(0) #negative dels
genes[splitPair[0]].append(0) #negative dups
genes[splitPair[0]].append(0) #negative invs
genes[splitPair[0]].append(0) #negative itx
genes[splitPair[0]][0] += 1
if splitPair[12] == 'DEL':
genes[splitPair[0]][1] += 1
elif splitPair[12] == 'DUP':
genes[splitPair[0]][2] += 1
elif splitPair[12] == 'INV':
genes[splitPair[0]][3] += 1
elif splitPair[12] == 'ITX':
genes[splitPair[0]][4] += 1
patient = splitPair[7]
genes[splitPair[0]][13].append(patient)
#convert to numpy array for easy ranking
recurrentGenes = []
for gene in genes:
#count how many unique patients affect the gene for recurrence
uniquePatients = np.unique(genes[gene][13])
data = [gene] + [len(uniquePatients)] + genes[gene]
recurrentGenes.append(data)
recurrentGenes = np.array(recurrentGenes, dtype='object')
#sort
sortedGenes = recurrentGenes[np.argsort(recurrentGenes[:,1])[::-1]]
sortedGenesTop = []
for gene in sortedGenes:
#if gene[0] not in topPairGenes:
# continue
sortedGenesTop.append(gene)
sortedGenesTop = np.array(sortedGenesTop, dtype='object')
#make a matrix in which we show visually which genes are affected in which patients
#this matrix is genes x patients
uniquePatients = dict()
top = 50 #making the matrix only for the top X genes
ind = 0
for gene in sortedGenesTop:
if ind >= top:
continue
patients = gene[15]
for patient in patients:
if patient not in uniquePatients:
uniquePatients[patient] = 0
uniquePatients[patient] += 1
ind += 1
#make a matrix of genes by patients
recurrenceMatrix = np.zeros([top, len(uniquePatients)])
ind = 0
patientOrder = dict() #order of patients in the matrix
for patientInd in range(0, len(uniquePatients)):
patient = list(uniquePatients.keys())[patientInd]
patientOrder[patient] = patientInd
for gene in sortedGenesTop:
if ind >= top:
continue
patients = gene[15]
for patient in patients:
patientInd = patientOrder[patient]
recurrenceMatrix[ind, patientInd] += 1
ind += 1
print(recurrenceMatrix)
#make a grid plot, showing the different SV types that the patients have
#color the genes with -/+ direction, see if it correlates with the SV types.
fig, ax = plt.subplots(figsize=(20,10))
for row in range(0, recurrenceMatrix.shape[0]):
if row < recurrenceMatrix.shape[0]-1:
ax.axhline(row+0.5, linestyle='--', color='k', linewidth=0.5)
for col in range(0, recurrenceMatrix.shape[1]):
if col < recurrenceMatrix.shape[1]-1:
ax.axvline(col+0.5, linestyle='--', color='k', linewidth=0.5)
if recurrenceMatrix[row,col] > 0:
#get the sv type to see which symbol to assign
gene = sortedGenesTop[row, 0]
patient = list(uniquePatients.keys())[col]
pairs = splitPairs[gene + '_' + patient]
#generate some random offsets to avoid overlapping data
offsetsX = random.sample(range(-30,30), len(pairs))
offsetsX = [i / float(100) for i in offsetsX]
offsetsY = random.sample(range(-30,30), len(pairs))
offsetsY = [i / float(100) for i in offsetsY]
ind = 0
for pair in pairs:
splitPair = pair[0].split('_')
svType = splitPair[12]
markerType = '.'
if svType == 'DEL':
markerType = '.'
elif svType == 'DUP':
markerType = 's'
elif svType == 'INV':
markerType = '^'
elif svType == 'ITX':
markerType = '*'
#also get up/down color
if patient + '_' + gene in degPairs[:,0]:
#get the z-score of the pair.
degPairInfo = degPairs[degPairs[:,0] == patient + '_' + gene][0]
color = 'red'
if float(degPairInfo[5]) > 1.5:
color = 'red'
elif float(degPairInfo[5]) < -1.5:
color = 'blue'
else:
color = 'grey'
else:
continue #this is a pair with likely coding mutations, skip it
plt.scatter(col + offsetsY[ind], offsetsX[ind] + (recurrenceMatrix.shape[0] - row -1), marker=markerType, edgecolor=color,
facecolor='none', s=35)
ind += 1
#the genes are swapped around to show the most recurrent on top, so reverse thelabels as well
plt.yticks(range(0, recurrenceMatrix.shape[0]), sortedGenesTop[0:top,0][::-1])
plt.xticks(range(0, recurrenceMatrix.shape[1]), list(uniquePatients.keys()), rotation=90)
#plt.grid()
plt.tight_layout()
plt.savefig(finalOutDirFullFigure + '/recurrence_allPatients.svg')
plt.clf()
exit()
|
StarcoderdataPython
|
4979861
|
import flask
from flask.ext.classy import FlaskView, route, request
from annotator_supreme.views.view_tools import *
from annotator_supreme import app
from annotator_supreme.controllers.dataset_controller import DatasetController
from annotator_supreme.controllers.image_controller import ImageController
from flask import render_template
from werkzeug.utils import secure_filename
class UploadVideoViewWebApp(FlaskView):
route_base = '/'
def __init__(self):
self.dataset_controller = DatasetController()
self.image_controller = ImageController()
@route('/add/video', methods=['GET'])
def datasets(self):
datasets = self.dataset_controller.get_datasets()
return render_template('upload_video.html', datasets=datasets)
def allowed_file(self, filename):
ALLOWED_EXTENSIONS = set(['mov', 'mpeg', 'mp4', 'mpg', 'avi'])
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
def extract_frontend_frames(self, request, step_frame):
file = request.files['file']
filename = secure_filename(file.filename)
file.save(os.path.join('/tmp/', filename))
vid = cv2.VideoCapture(os.path.join('/tmp/', filename))
if not vid.isOpened():
raise error_views.InternalError("Not possible to open video file.")
else:
frames = []
ret, frame = vid.read()
i = 0
while frame is not None:
if i % step_frame == 0:
# save it as base64 image
img_bytes = cv2.imencode('.jpg', frame)[1]
img_encoded = base64.b64encode(img_bytes).decode()
frames.append(img_encoded)
i = i + 1
ret, frame = vid.read()
vid.release()
return frames
@route('/video/front-upload', methods=['POST'])
def create_video(self):
file = request.files['file']
if file and self.allowed_file(file.filename):
if 'step_frame' in request.form:
step_frame = int(request.form['step_frame'])
else:
app.logger("Step frame not provided, assuming 10.")
step_frame = 10
frames = self.extract_frontend_frames(request, step_frame)
if not frames:
raise error_views.InternalError("Unable to extract frames from the video file.")
return flask.jsonify({"frames_b64": frames})
else:
raise error_views.InternalError('The provided file is not an accepted extension.')
|
StarcoderdataPython
|
8119576
|
def get_value(hex_str, data):
if hex_str[:2] == '01':
end = 6
data['temp'] = int(hex_str[2:end], 16) / 10
hex_str = hex_str[end:]
elif hex_str[:2] == '02':
end = 4
data['humi'] = int(hex_str[2:end], 16)
hex_str = hex_str[end:]
elif hex_str[:2] == '04':
end = 6
data['lux'] = int(hex_str[2:end], 16)
hex_str = hex_str[end:]
elif hex_str[:2] == '05':
end = 4
data['motion'] = int(hex_str[2:end], 16)
hex_str = hex_str[end:]
elif hex_str[:2] == '06':
end = 6
data['co2'] = int(hex_str[2:end], 16)
hex_str = hex_str[end:]
elif hex_str[:2] == '07':
end = 6
data['volt'] = int(hex_str[2:end], 16) / 1000
hex_str = hex_str[end:]
elif hex_str[:2] == '15':
end = 6
data['soundPeak'] = int(hex_str[2:4], 16)
data['soundAvg'] = int(hex_str[4:end], 16)
hex_str = hex_str[end:]
else:
data['error'] = hex_str
hex_str = ''
return hex_str, data
def parse_elsys(hex_str, port=None):
"""
Parse payload like "01010f022e04006605000601b6070e4e".
See online converter: https://www.elsys.se/en/elsys-payload/
And document "Elsys LoRa payload_v10" at https://www.elsys.se/en/lora-doc/
:param hex_str: ELSYS hex payload
:param port: LoRaWAN port
:return: dict containing float values
"""
data = {}
# This while loop is just in case here, get_value seems to parse all values
# when they are in numeric order (01, 02, 04, 05, 06, 07)
while len(hex_str) > 0:
hex_str, data = get_value(hex_str, data)
return data
def decode_hex(hex_str: str, port: int = None):
return parse_elsys(hex_str, port=port)
if __name__ == '__main__':
import sys
try:
print(decode_hex(sys.argv[1], sys.argv[2]))
except IndexError as err:
print('Some examples:')
for s in [('0100d3020c04000b05000601aa070e41', 5),
('0100e50212040041050006003a070e4f', 5),
('0100c202100402d205000601cc070e49', 5),
('0100ef024704010a0501070e04155326', 5), # ERS Sound
]:
print(decode_hex(s[0], s[1]))
print(f'\nUsage: {sys.argv[0]} hex_payload port\n\n')
|
StarcoderdataPython
|
1603537
|
<reponame>rkingsbury/MPContribs
# -*- coding: utf-8 -*-
from hashlib import md5
from flask_mongoengine import DynamicDocument
from mongoengine import signals, EmbeddedDocument
from mongoengine.fields import StringField, ListField, IntField, EmbeddedDocumentField
from mongoengine.queryset.manager import queryset_manager
from mpcontribs.api.contributions.document import format_cell, get_resource, get_md5, COMPONENTS
class Labels(EmbeddedDocument):
index = StringField(help_text="index name / x-axis label")
value = StringField(help_text="columns name / y-axis label")
variable = StringField(help_text="legend name")
class Attributes(EmbeddedDocument):
title = StringField(help_text="title")
labels = EmbeddedDocumentField(Labels)
class Tables(DynamicDocument):
name = StringField(required=True, help_text="name / title")
attrs = EmbeddedDocumentField(Attributes)
index = ListField(StringField(), required=True, help_text="index column")
columns = ListField(StringField(), required=True, help_text="column names/headers")
data = ListField(ListField(StringField()), required=True, help_text="table rows")
md5 = StringField(regex=r"^[a-z0-9]{32}$", unique=True, help_text="md5 sum")
total_data_rows = IntField(help_text="total number of rows")
meta = {"collection": "tables", "indexes": [
"name", "columns", "md5", "attrs.title",
"attrs.labels.index", "attrs.labels.value", "attrs.labels.variable"
]}
@queryset_manager
def objects(doc_cls, queryset):
return queryset.only("name", "md5")
@classmethod
def post_init(cls, sender, document, **kwargs):
document.data = [[format_cell(cell) for cell in row] for row in document.data]
@classmethod
def pre_save_post_validation(cls, sender, document, **kwargs):
# significant digits, md5 and total_data_rows
resource = get_resource("tables")
document.md5 = get_md5(resource, document, COMPONENTS["tables"])
document.total_data_rows = len(document.data)
signals.post_init.connect(Tables.post_init, sender=Tables)
signals.pre_save_post_validation.connect(Tables.pre_save_post_validation, sender=Tables)
|
StarcoderdataPython
|
93740
|
import unittest
import pytest
import sys
import time
import hubcheck
from webdav import WebdavClient
from webdav.Connection import WebdavError,AuthorizationError
pytestmark = [ pytest.mark.container,
pytest.mark.webdav,
pytest.mark.nightly,
pytest.mark.reboot
]
# sleep for 15 minutes to avoid fail2ban related errors
SLEEPTIME=60*15
@pytest.mark.registereduser
class container_webdav(hubcheck.testcase.TestCase):
def setUp(self):
# get user account info
self.username,self.userpass = self.testdata.find_account_for(
'registeredworkspace')
webdav_base = self.testdata.find_url_for('webdav')
self.webdav_url = 'https://%s/webdav' % webdav_base
self.do_sleep = True
@pytest.mark.webdav_login
def test_valid_user_valid_password_login(self):
"""
try webdav login with valid user and valid password
"""
c = WebdavClient.CollectionStorer(self.webdav_url)
c.connection.addBasicAuthorization(self.username,self.userpass)
try:
c.validate()
# successful login does not require sleep
self.do_sleep = False
except AuthorizationError, e:
self.fail("webdav login to %s as %s failed: %s"
% (self.webdav_url,self.username,e))
def test_invalid_user_login(self):
"""
try webdav login with an invalid user
"""
c = WebdavClient.CollectionStorer(self.webdav_url)
c.connection.addBasicAuthorization('invaliduser','invalidpass')
with self.assertRaises(WebdavError) as cm:
c.validate()
def test_valid_user_invalid_passwordlogin(self):
"""
try webdav login with a valid user and invalid password
"""
c = WebdavClient.CollectionStorer(self.webdav_url)
c.connection.addBasicAuthorization(self.username,'invalidpass')
with self.assertRaises(AuthorizationError) as cm:
c.validate()
def tearDown(self):
if self.do_sleep:
time.sleep(SLEEPTIME)
|
StarcoderdataPython
|
3392935
|
"""
Tests for the future.standard_library module
"""
from __future__ import absolute_import, print_function
from future import standard_library
from future import utils
from future.tests.base import unittest, CodeHandler, expectedFailurePY2
import sys
import tempfile
import os
import copy
import textwrap
from subprocess import CalledProcessError
class TestChainMap(CodeHandler):
def setUp(self):
self.interpreter = sys.executable
standard_library.install_aliases()
super(TestChainMap, self).setUp()
def tearDown(self):
# standard_library.remove_hooks()
pass
@staticmethod
def simple_cm():
from collections import ChainMap
c = ChainMap()
c['one'] = 1
c['two'] = 2
cc = c.new_child()
cc['one'] = 'one'
return c, cc
def test_repr(self):
c, cc = TestChainMap.simple_cm()
order1 = "ChainMap({'one': 'one'}, {'one': 1, 'two': 2})"
order2 = "ChainMap({'one': 'one'}, {'two': 2, 'one': 1})"
assert repr(cc) in [order1, order2]
def test_recursive_repr(self):
"""
Test for degnerative recursive cases. Very unlikely in
ChainMaps. But all must bow before the god of testing coverage.
"""
from collections import ChainMap
c = ChainMap()
c['one'] = c
assert repr(c) == "ChainMap({'one': ...})"
def test_get(self):
c, cc = TestChainMap.simple_cm()
assert cc.get('two') == 2
assert cc.get('three') == None
assert cc.get('three', 'notthree') == 'notthree'
def test_bool(self):
from collections import ChainMap
c = ChainMap()
assert not(bool(c))
c['one'] = 1
c['two'] = 2
assert bool(c)
cc = c.new_child()
cc['one'] = 'one'
assert cc
def test_fromkeys(self):
from collections import ChainMap
keys = 'a b c'.split()
c = ChainMap.fromkeys(keys)
assert len(c) == 3
assert c['a'] == None
assert c['b'] == None
assert c['c'] == None
def test_copy(self):
c, cc = TestChainMap.simple_cm()
new_cc = cc.copy()
assert new_cc is not cc
assert sorted(new_cc.items()) == sorted(cc.items())
def test_parents(self):
c, cc = TestChainMap.simple_cm()
new_c = cc.parents
assert c is not new_c
assert len(new_c) == 2
assert new_c['one'] == c['one']
assert new_c['two'] == c['two']
def test_delitem(self):
c, cc = TestChainMap.simple_cm()
with self.assertRaises(KeyError):
del cc['two']
del cc['one']
assert len(cc) == 2
assert cc['one'] == 1
assert cc['two'] == 2
def test_popitem(self):
c, cc = TestChainMap.simple_cm()
assert cc.popitem() == ('one', 'one')
with self.assertRaises(KeyError):
cc.popitem()
def test_pop(self):
c, cc = TestChainMap.simple_cm()
assert cc.pop('one') == 'one'
with self.assertRaises(KeyError):
cc.pop('two')
assert len(cc) == 2
def test_clear(self):
c, cc = TestChainMap.simple_cm()
cc.clear()
assert len(cc) == 2
assert cc['one'] == 1
assert cc['two'] == 2
def test_missing(self):
c, cc = TestChainMap.simple_cm()
with self.assertRaises(KeyError):
cc['clown']
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
4967444
|
<filename>uproot/behaviors/TParameter.py
# BSD 3-Clause License; see https://github.com/scikit-hep/uproot4/blob/main/LICENSE
"""
This module defines the behavior of ``TParameter<T>``.
"""
from __future__ import absolute_import
class TParameter_3c_boolean_3e_(object):
"""
Behaviors for ``TParameter<boolean>``.
"""
@property
def value(self):
return bool(self.member("fVal"))
def __bool__(self):
return bool(self.member("fVal"))
def __int__(self):
return int(self.member("fVal"))
def __float__(self):
return float(self.member("fVal"))
class TParameter_3c_integer_3e_(object):
"""
Behaviors for ``TParameter<integer>``.
"""
@property
def value(self):
return int(self.member("fVal"))
def __bool__(self):
return bool(self.member("fVal"))
def __int__(self):
return int(self.member("fVal"))
def __index__(self):
return int(self.member("fVal"))
def __float__(self):
return float(self.member("fVal"))
class TParameter_3c_floating_3e_(object):
"""
Behaviors for ``TParameter<floating>``.
"""
@property
def value(self):
return float(self.member("fVal"))
def __bool__(self):
return bool(self.member("fVal"))
def __int__(self):
return int(self.member("fVal"))
def __float__(self):
return float(self.member("fVal"))
def TParameter(specialization):
if specialization in ("_3c_bool_3e_", "_3c_Bool_5f_t_3e_"):
return TParameter_3c_boolean_3e_
elif specialization in (
"_3c_float_3e_",
"_3c_double_3e_",
"_3c_long_20_double_3e_",
"_3c_Float_5f_t_3e_",
"_3c_Double_5f_t_3e_",
"_3c_LongDouble_5f_t_3e_",
):
return TParameter_3c_floating_3e_
else:
return TParameter_3c_integer_3e_
|
StarcoderdataPython
|
1722562
|
<reponame>ezquire/python-challenges
# Enter your code here. Read input from STDIN. Print output to STDOUT
class TrieNode():
def __init__(self, char):
self.character = char
self.children = {}
self.endOfWord = False
def add(root, word):
node = root
for char in word:
found_in_children = False
if char in node.children:
node = node.children[char]
found_in_children = True
if not found_in_children:
new_node = TrieNode(char)
node.children[char] = new_node
node = new_node
node.endOfWord = True
def search(root, word):
node = root
for char in word:
if char not in node.children:
return False
else: node = node.children[char]
return node.endOfWord
def searchPrefix(root, prefix):
node = root
for char in prefix:
if char not in node.children:
return False
else: node = node.children[char]
return True
def printTrie(root):
if root:
node = root
for key in node.children.keys():
print(node.children[key].character)
printTrie(node.children[key])
root = TrieNode('*')
add(root, "hellomynameis")
add(root, "goodbye")
print(searchPrefix(root, "hello"))
printTrie(root)
|
StarcoderdataPython
|
1828253
|
<filename>deployment_scripts/puppet/modules/plugin_zabbix/files/scripts/check_api.py
#!/usr/bin/python
#
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import urllib2
import sys
import simplejson as json
import ConfigParser
from zabbix_checks_logger import get_logger
CONF_FILE = '/etc/zabbix/check_api.conf'
class OSAPI(object):
"""Openstack API"""
def __init__(self, logger, config):
self.logger = logger
self.config = config
self.username = self.config.get('api', 'user')
self.password = self.config.get('api', 'password')
self.tenant_name = self.config.get('api', 'tenant')
self.endpoint_keystone = self.config.get('api',
'keystone_endpoints'
).split(',')
self.token = None
self.tenant_id = None
self.get_token()
def get_timeout(self, service):
try:
return int(self.config.get('api', '%s_timeout' % service))
except ConfigParser.NoOptionError:
return 1
def get_token(self):
data = json.dumps({
"auth":
{
'tenantName': self.tenant_name,
'passwordCredentials':
{
'username': self.username,
'password': <PASSWORD>
}
}
})
for keystone in self.endpoint_keystone:
self.logger.info("Trying to get token from '%s'" % keystone)
try:
request = urllib2.Request(
'%s/tokens' % keystone,
data=data,
headers={
'Content-type': 'application/json'
})
data = json.loads(
urllib2.urlopen(
request, timeout=self.get_timeout('keystone')).read())
self.token = data['access']['token']['id']
self.tenant_id = data['access']['token']['tenant']['id']
self.logger.debug("Got token '%s'" % self.token)
return
except Exception as e:
self.logger.debug("Got exception '%s'" % e)
self.logger.critical(0)
sys.exit(1)
def check_api(self, url, service):
self.logger.info("Trying '%s' on '%s'" % (service, url))
try:
request = urllib2.Request(url,
headers={
'X-Auth-Token': self.token,
})
urllib2.urlopen(request, timeout=self.get_timeout(service))
except Exception as e:
self.logger.debug("Got exception from '%s' '%s'" % (service, e))
self.logger.critical(0)
sys.exit(1)
self.logger.critical(1)
def main():
config = ConfigParser.RawConfigParser()
config.read(CONF_FILE)
logger = get_logger(config.get('api', 'log_level'))
API = OSAPI(logger, config)
if len(sys.argv) < 5:
logger.critical('No argvs, dunno what to do')
sys.exit(1)
map = config.get('api', '%s_map' % sys.argv[1])
url = '%s://%s:%s/%s' % (sys.argv[2], sys.argv[3], sys.argv[4], map)
url = url % API.__dict__
API.check_api(url, sys.argv[1])
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
9706950
|
"""
Qwiz Model unittests.
"""
from django.test import TestCase
from django.core.exceptions import ValidationError
from .models import Tag, Question, Room, Contestant
class TagModelTests(TestCase):
"""
Tag Model test cases.
"""
def tearDown(self):
"""
Tag test teardown, removing all created Tags
"""
Tag.objects.all().delete()
def test_tag(self):
"""
Add a tag.
"""
tag = Tag(text='foo_tag')
tag.save()
self.assertEqual(str(tag), 'foo_tag')
def test_tag_no_text(self):
"""
Add a tag with no text.
"""
tag1 = Tag()
with self.assertRaises(ValidationError):
tag1.full_clean()
def test_tag_dup(self):
"""
Add a tag and then a duplicate.
"""
tag1 = Tag(text='bar_tag')
tag1.save()
tag2 = Tag(text='bar_tag')
with self.assertRaises(ValidationError):
tag2.full_clean()
def test_tag_case(self):
"""
Attempt to add two identical tags with upper/lower case letters.
"""
tag1 = Tag(text='foo')
tag1.save()
tag2 = Tag(text='FOO')
with self.assertRaises(ValidationError):
tag2.full_clean()
class QuestionModelTests(TestCase):
"""
Question Model test cases.
"""
def tearDown(self):
"""
Question test teardown, removing all created questions
"""
Question.objects.all().delete()
def test_new_question_no_text(self):
"""
Add a new question with no text.
"""
question = Question()
with self.assertRaises(ValidationError):
question.full_clean()
def test_new_question(self):
"""
Add a new question.
"""
question = Question(question_text='foo')
self.assertEqual(str(question), 'foo')
class RoomModelTests(TestCase):
"""
Room Model test cases.
"""
def test_room_no_name(self):
"""
Add a room with no name.
"""
room = Room()
with self.assertRaises(ValidationError):
room.full_clean()
def test_room_dup_code(self):
"""
Add rooms and edit them to have the same code.
"""
room1 = Room(name='room1')
room1.save()
room2 = Room(name='room2')
room2.save()
room2.code = room1.code
with self.assertRaises(ValidationError):
room2.full_clean()
def test_room(self):
"""
Add a room.
"""
room = Room(name='test_room')
self.assertEqual(str(room), 'test_room')
class ContestantModelTests(TestCase):
"""
Contestant Model test cases.
"""
def test_contestant_no_name(self):
"""
Add a new Contestant with no name.
"""
room = Room(name='test_room')
contestant = Contestant(room=room)
with self.assertRaises(ValidationError):
contestant.full_clean()
def test_contestant_no_room(self):
"""
Add a new Contestant with no room.
"""
contestant = Contestant(handle='joe')
with self.assertRaises(Room.DoesNotExist):
contestant.full_clean()
def test_contestant(self):
"""
Add a new Contestant.
"""
room = Room(name='test_room')
contestant = Contestant(handle='bob', room=room)
self.assertEqual(str(contestant), 'bob')
self.assertEqual(contestant.score, 0)
self.assertEqual(contestant.complete, False)
|
StarcoderdataPython
|
1776402
|
file_name = "show_ver.out"
with open(file_name, "r") as f:
output = f.read()
if 'Cisco' in output:
print "Found Cisco string"
|
StarcoderdataPython
|
1725067
|
import numpy as np
import pandas as pd
import statsmodels.api as sm
import statsmodels.formula.api as smf
from stargazer.stargazer import Stargazer
from IPython.core.display import HTML
from IPython.core.interactiveshell import InteractiveShell
from statsmodels.sandbox.regression.gmm import IV2SLS
def create_table1(df):
#Ethnicity table
df_1 = df.sort_values(by="ethnicity_C2", ascending=False)
df_1 = df_1[['country', 'ethnicity_C2', 'ethnicity_I']].head()
df_1 = df_1.reset_index(drop=True)
df_2 = df.sort_values(by="ethnicity_C2", ascending=True)
df_2 = df_2[['country', 'ethnicity_C2', 'ethnicity_I']].head()
df_2 = df_2.reset_index(drop=True)
df_eth = pd.concat([df_1,df_2], axis = 1)
df_eth = df_eth.round(decimals=3)
#Language table
df_3 = df.sort_values(by="language_C2", ascending=False)
df_3 = df_3[['country', 'language_C2', 'language_I']].head()
df_3 = df_3.reset_index(drop=True)
df_4 = df.sort_values(by="language_C2", ascending=True)
df_4 = df_4[['country', 'language_C2', 'language_I']].head()
df_4 = df_4.reset_index(drop=True)
df_lang = pd.concat([df_3,df_4], axis = 1)
df_lang = df_lang.round(decimals=3)
#Religion table
df_5 = df.sort_values(by="religion_C2", ascending=False)
df_5 = df_5[['country', 'religion_C2', 'religion_I']].head()
df_5 = df_5.reset_index(drop=True)
df_6 = df.sort_values(by="religion_C2", ascending=True)
df_6 = df_6[['country', 'religion_C2', 'religion_I']].head()
df_6 = df_6.reset_index(drop=True)
df_rel = pd.concat([df_5,df_6], axis = 1)
df_rel = df_rel.round(decimals=3)
#Rename rows and columns of complete table
table1 = pd.concat([df_eth,df_lang,df_rel],axis = 1)
table1.columns = pd.MultiIndex.from_product([['Ethinicity','Language','Religion'],
['Most segregated','Least segregated',],
['Country','$\hat{S}$', '$F$']])
return table1
def create_table2(df):
variables = df[['ethnicity_C2','language_C2','religion_C2','ethnicity_C','language_C','religion_C',
'voice','PolStab','GovEffec','RegQual','RulLaw','ConCorr']]
table2 = pd.DataFrame()
table2 = variables.corr()
table2.drop(['voice','PolStab','GovEffec','RegQual','RulLaw','ConCorr'], axis = 1, inplace = True)
table2.drop(['ethnicity_C2','ethnicity_C','language_C2','language_C','religion_C2','religion_C'],
axis = 0, inplace = True)
table2 = table2.round(decimals=2)
table2.columns = pd.MultiIndex.from_product([['Segregation indicies'],
['Ethnicity $\hat{S}$','Language $\hat{S}$', 'Religion $\hat{S}$',
'Ethnicity $\tilde{S}$','Language $\tilde{S}$', 'Religion $\tilde{S}$']]
)
table2 = table2.rename(index = {
"voice" : 'Voice',
"Polstab" : 'Political stability',
"GovEffec" : 'Government effectiveness',
"RegQual" : 'Regulatory quality',
"RulLaw" : 'Rule fo law',
"ConCorr" : 'Control of corruption'}
)
return table2
def table3_7(df, regression_type) :
df_3_7E = df[['ethnicity_C2','ethnicity_instrument_C2_thresh','ethnicity_I','lnpopulation','lnGDP_pc',
'protestants','muslims','catholics','latitude','LOEnglish','LOGerman','LOSocialist','lnArea',
'LOScandin','democ','mtnall','RulLaw']].dropna(axis=0)
df_3_7L = df[['language_C2','language_instrument_C2_thresh','language_I','lnpopulation','lnGDP_pc',
'protestants','muslims','catholics','latitude','LOEnglish','LOGerman','LOSocialist','lnArea',
'LOScandin','democ','mtnall','RulLaw']].dropna(axis=0)
df_3_7R = df[['religion_C2','religion_instrument_C2_thresh','religion_I','lnpopulation','lnGDP_pc',
'protestants','muslims','catholics','latitude','LOEnglish','LOGerman','LOSocialist','lnArea',
'LOScandin','democ','mtnall','RulLaw']].dropna(axis=0)
exo = sm.add_constant(df_3_7E[['ethnicity_C2','ethnicity_I','lnpopulation','lnGDP_pc','protestants','muslims',
'catholics','latitude','LOEnglish','LOGerman','LOSocialist', 'LOScandin','lnArea',
'democ','mtnall']])
exo2 = sm.add_constant(df_3_7E[['ethnicity_C2','ethnicity_I']])
exo3 = sm.add_constant(df_3_7L[['language_C2','language_I','lnpopulation','lnGDP_pc','protestants','lnArea',
'muslims','catholics','latitude','LOEnglish','LOGerman','LOSocialist', 'LOScandin',
'democ','mtnall']])
exo4 = sm.add_constant(df_3_7L[['language_C2','language_I']])
exo5 = sm.add_constant(df_3_7R[['religion_C2','religion_I','lnpopulation','lnGDP_pc','protestants',
'muslims','catholics','latitude','LOEnglish','LOGerman','LOSocialist','lnArea',
'democ','mtnall']])
exo6 = sm.add_constant(df_3_7R[['religion_C2','religion_I']])
if regression_type == 'IV2SLS' :
reg = IV2SLS(df_3_7E['RulLaw'],
exo,
sm.add_constant(df_3_7E[['ethnicity_instrument_C2_thresh','ethnicity_I','lnpopulation','lnGDP_pc',
'protestants','muslims','catholics','latitude','LOEnglish','LOGerman',
'LOSocialist', 'LOScandin','democ','mtnall','lnArea']])).fit()
reg2 = IV2SLS(df_3_7E['RulLaw'],
exo2,
sm.add_constant(df_3_7E[['ethnicity_instrument_C2_thresh',
'ethnicity_I']])).fit()
reg3 = IV2SLS(df_3_7L['RulLaw'],
exo3,
sm.add_constant(df_3_7L[['language_instrument_C2_thresh','language_I','lnpopulation','lnGDP_pc',
'protestants','muslims','catholics','latitude','LOEnglish','LOGerman',
'LOSocialist', 'LOScandin','democ','mtnall','lnArea']])).fit()
reg4 = IV2SLS(df_3_7L['RulLaw'],
exo4,
sm.add_constant(df_3_7L[['language_instrument_C2_thresh',
'language_I']])).fit()
reg5 = IV2SLS(df_3_7R['RulLaw'],
exo5,
sm.add_constant(df_3_7R[['religion_instrument_C2_thresh','religion_I','lnpopulation','lnGDP_pc',
'protestants','muslims','catholics','latitude','LOEnglish','LOGerman',
'LOSocialist','democ','mtnall','lnArea']])).fit()
reg6 = IV2SLS(df_3_7R['RulLaw'],
exo6,
sm.add_constant(df_3_7R[['religion_instrument_C2_thresh',
'religion_I']])).fit()
elif regression_type == 'OLS' :
reg2 = sm.OLS(df_3_7E['RulLaw'], exo2).fit(cov_type = 'HC1')
reg = sm.OLS(df_3_7E['RulLaw'], exo).fit(cov_type = 'HC1')
reg4 = sm.OLS(df_3_7L['RulLaw'], exo4).fit(cov_type = 'HC1')
reg3 = sm.OLS(df_3_7L['RulLaw'], exo3).fit(cov_type = 'HC1')
reg6 = sm.OLS(df_3_7R['RulLaw'], exo6).fit(cov_type = 'HC1')
reg5 = sm.OLS(df_3_7R['RulLaw'], exo5).fit(cov_type = 'HC1')
stargazer = Stargazer([reg2, reg, reg4, reg3, reg6, reg5])
stargazer.covariate_order(['ethnicity_C2', 'ethnicity_I','language_C2','language_I','religion_C2','religion_I',
'lnpopulation','lnGDP_pc','lnArea','protestants','muslims','catholics',
'latitude','LOEnglish','LOGerman','LOSocialist','LOScandin','democ','mtnall','const'])
stargazer.rename_covariates({'ethnicity_C2' : 'Segregation $\hat{S}$ (ethnicity)',
'ethnicity_I' : 'Fractionalization $F$ (ethnicity)',
'language_C2' : 'Segregation $\hat{S}$ (language)',
'language_I' : 'Fractionalization $F$ (language)',
'religion_C2' : 'Segregation $\hat{S}$ (religion)',
'religion_I' : 'Fractionalization $F$ (religion)',
'lnpopulation' : 'ln (population)',
'lnGDP_pc' : 'ln (GDP per capita)',
'lnArea' : 'ln (average size of region)',
'protestants' : 'Pretestants share',
'muslims' : 'Muslmis Share',
'catholics' : 'Catholics share',
'latitude' : 'Latitude',
'LOEnglish' : 'English legal origin',
'LOGerman' : 'German legal origin',
'LOSocialist' : 'Socialist legal origin',
'LOScandin' : 'Scandinavian legal origin',
'democ' : 'Democratic tradition',
'mtnall' : 'Mountains',
'const' : 'Constant'})
return HTML(stargazer.render_html())
def table4_5(df,name) :
df_table4A = df[[f'{name}_C2',f'{name}_I','lnpopulation','lnGDP_pc','protestants','muslims',
'catholics','latitude','LOEnglish','LOGerman','LOSocialist','LOScandin','democ',
'mtnall','voice','PolStab','GovEffec','RegQual','ConCorr','RulLaw']].dropna(axis = 0)
df_table4B = df_table4A[[f'{name}_C2',f'{name}_I','voice','PolStab','GovEffec','RegQual','ConCorr','RulLaw']]
df_table4C = df_table4A[df_table4A.democ > 1]
xA = sm.add_constant(df_table4A[[f'{name}_C2',f'{name}_I','lnpopulation','lnGDP_pc','protestants',
'muslims','catholics','latitude','LOEnglish','LOGerman','LOSocialist',
'LOScandin','democ','mtnall']])
xB = sm.add_constant(df_table4B[[f'{name}_C2', f'{name}_I']])
xC = sm.add_constant(df_table4C[[f'{name}_C2',f'{name}_I','lnpopulation','lnGDP_pc','protestants',
'muslims','catholics','latitude','LOEnglish','LOGerman','LOSocialist',
'LOScandin','democ','mtnall']])
df_table4s = [df_table4A, df_table4B, df_table4C]
xs = [xA, xB, xC]
y = [
[f'y{idx}A', f'y{idx}B', f'y{idx}C']
for idx in range(1, 7)
]
est = [
[f'est{idx}A', f'est{idx}B', f'est{idx}C']
for idx in range(1, 7)
]
star = ['starA','starB','starC']
for idx, i in enumerate(['A','B','C']) :
y[0][idx] = df_table4s[idx]['voice']
y[1][idx] = df_table4s[idx]['PolStab']
y[2][idx] = df_table4s[idx]['GovEffec']
y[3][idx] = df_table4s[idx]['RegQual']
y[4][idx] = df_table4s[idx]['RulLaw']
y[5][idx] = df_table4s[idx]['ConCorr']
est[0][idx] = sm.OLS(y[0][idx], xs[idx]).fit(cov_type = 'HC1')
est[1][idx] = sm.OLS(y[1][idx], xs[idx]).fit(cov_type = 'HC1')
est[2][idx] = sm.OLS(y[2][idx], xs[idx]).fit(cov_type = 'HC1')
est[3][idx] = sm.OLS(y[3][idx], xs[idx]).fit(cov_type = 'HC1')
est[4][idx] = sm.OLS(y[4][idx], xs[idx]).fit(cov_type = 'HC1')
est[5][idx] = sm.OLS(y[5][idx], xs[idx]).fit(cov_type = 'HC1')
star[idx] = Stargazer([est[0][idx],est[1][idx],est[2][idx],est[3][idx],est[4][idx],est[5][idx]])
for i in range(3) :
star[i].covariate_order([f'{name}_C2',f'{name}_I'])
star[i].rename_covariates({f'{name}_C2' : 'Segregation $\hat{S}$ ('f'{name}'')',
f'{name}_I' : 'Fractionalization $F$ ('f'{name}'')'})
star[i].show_model_numbers(False)
star[i].custom_columns(['Voice',
'Political stability',
'Govern-t effectiv.',
'Regul. quality',
'Rule of law',
'Control of corr'],
[1,1,1,1,1,1])
star[0].add_line('Controls', ['Yes','Yes','Yes','Yes','Yes','Yes'])
star[0].add_line('Sample', ['Full','Full','Full','Full','Full','Full'])
star[1].add_line('Controls', ['No','No','No','No','No','No'])
star[1].add_line('Sample', ['Full','Full','Full','Full','Full','Full'])
star[2].add_line('Controls', ['Yes','Yes','Yes','Yes','Yes','Yes'])
star[2].add_line('Sample', ['Democ','Democ','Democ','Democ','Democ','Democ'])
star[0].title('Panel A. Baseline : All controls and full sample')
star[1].title('Panel B. No controls and full sample')
star[2].title('Panel C. All controls; sample excludes dictatorship')
return [star[0],star[1],star[2]]
def table6(df, alternative = True) :
df_6E = df[['ethnicity_C2', 'ethnicity_I','ethnicity_C','ethnicity_instrument_C_thresh',
'ethnicity_instrument_C2_thresh','lnpopulation','lnGDP_pc','protestants','muslims',
'catholics','latitude','LOEnglish','LOGerman','LOSocialist','LOScandin','democ',
'mtnall','RulLaw','country']].dropna(axis=0)
df_6L = df[['language_C2', 'language_I','language_C','language_instrument_C_thresh',
'language_instrument_C2_thresh','lnpopulation','lnGDP_pc','protestants','muslims',
'catholics','latitude','LOEnglish','LOGerman','LOSocialist','LOScandin','democ',
'mtnall','RulLaw','country']].dropna(axis=0)
df_6R = df[['religion_C2', 'religion_I','religion_C','religion_instrument_C_thresh',
'religion_instrument_C2_thresh','lnpopulation','lnGDP_pc','protestants','muslims',
'catholics','latitude','LOEnglish','LOGerman','LOSocialist','LOScandin','democ',
'mtnall','RulLaw','country']].dropna(axis=0)
df_6E_demo = df_6E[df_6E.democ >= 1]
df_6L_demo = df_6L[df_6L.democ >= 1]
df_6R_demo = df_6R[df_6R.democ >= 1]
x1 = sm.add_constant(df_6E[['ethnicity_instrument_C2_thresh', 'ethnicity_I','lnpopulation','lnGDP_pc',
'protestants','muslims','catholics','latitude','LOEnglish',
'LOGerman','LOSocialist','LOScandin','democ','mtnall']])
x2 = sm.add_constant(df_6L[['language_instrument_C2_thresh', 'language_I','lnpopulation','lnGDP_pc',
'protestants','muslims','catholics','latitude','LOEnglish','LOGerman',
'LOSocialist','LOScandin','democ','mtnall']])
x3 = sm.add_constant(df_6R[['religion_instrument_C2_thresh', 'religion_I','lnpopulation','lnGDP_pc',
'protestants','muslims','catholics','latitude','LOEnglish','LOGerman',
'LOSocialist','democ','mtnall']])
x4 = sm.add_constant(df_6E_demo[['ethnicity_instrument_C2_thresh', 'ethnicity_I','lnpopulation','lnGDP_pc',
'protestants','muslims','catholics','latitude','LOEnglish','LOGerman',
'LOSocialist','LOScandin','democ','mtnall']])
x5 = sm.add_constant(df_6L_demo[['language_instrument_C2_thresh', 'language_I','lnpopulation','lnGDP_pc',
'protestants','muslims','catholics','latitude','LOEnglish','LOGerman',
'LOSocialist','LOScandin','democ','mtnall']])
x6 = sm.add_constant(df_6R_demo[['religion_instrument_C2_thresh', 'religion_I','lnpopulation','lnGDP_pc',
'protestants','muslims','catholics','latitude','LOEnglish','LOGerman',
'LOSocialist','democ','mtnall']])
y1 = df_6E['ethnicity_C2']
y2 = df_6L['language_C2']
y3 = df_6R['religion_C2']
y4 = df_6E_demo['ethnicity_C2']
y5 = df_6L_demo['language_C2']
y6 = df_6R_demo['religion_C2']
est1 = sm.OLS(y1, x1).fit(cov_type = 'HC1')
est2 = sm.OLS(y2, x2).fit(cov_type = 'HC1')
est3 = sm.OLS(y3, x3).fit(cov_type = 'HC1')
est4 = sm.OLS(y4, x4).fit(cov_type = 'HC1')
est5 = sm.OLS(y5, x5).fit(cov_type = 'HC1')
est6 = sm.OLS(y6, x6).fit(cov_type = 'HC1')
x1a = sm.add_constant(df_6E[['ethnicity_instrument_C_thresh', 'ethnicity_I','lnpopulation','lnGDP_pc',
'protestants','muslims','catholics','latitude','LOEnglish','LOGerman',
'LOSocialist','LOScandin','democ','mtnall']])
x2a = sm.add_constant(df_6L[['language_instrument_C_thresh', 'language_I','lnpopulation','lnGDP_pc',
'protestants','muslims','catholics','latitude','LOEnglish','LOGerman',
'LOSocialist','LOScandin','democ','mtnall']])
x3a = sm.add_constant(df_6R[['religion_instrument_C_thresh', 'religion_I','lnpopulation','lnGDP_pc',
'protestants','muslims','catholics','latitude','LOEnglish','LOGerman',
'LOSocialist','democ','mtnall']])
x4a = sm.add_constant(df_6E_demo[['ethnicity_instrument_C_thresh', 'ethnicity_I','lnpopulation','lnGDP_pc',
'protestants','muslims','catholics','latitude','LOEnglish','LOGerman',
'LOSocialist','LOScandin','democ','mtnall']])
x5a = sm.add_constant(df_6L_demo[['language_instrument_C_thresh', 'language_I','lnpopulation','lnGDP_pc',
'protestants','muslims','catholics','latitude','LOEnglish','LOGerman',
'LOSocialist','LOScandin','democ','mtnall']])
x6a = sm.add_constant(df_6R_demo[['religion_instrument_C_thresh', 'religion_I','lnpopulation','lnGDP_pc',
'protestants','muslims','catholics','latitude','LOEnglish','LOGerman',
'LOSocialist','democ','mtnall']])
y1a = df_6E['ethnicity_C']
y2a = df_6L['language_C']
y3a = df_6R['religion_C']
y4a = df_6E_demo['ethnicity_C']
y5a = df_6L_demo['language_C']
y6a = df_6R_demo['religion_C']
est1a = sm.OLS(y1a, x1a).fit(cov_type = 'HC1')
est2a = sm.OLS(y2a, x2a).fit(cov_type = 'HC1')
est3a = sm.OLS(y3a, x3a).fit(cov_type = 'HC1')
est4a = sm.OLS(y4a, x4a).fit(cov_type = 'HC1')
est5a = sm.OLS(y5a, x5a).fit(cov_type = 'HC1')
est6a = sm.OLS(y6a, x6a).fit(cov_type = 'HC1')
df_6Lb = df_6L.set_index('country')
df_6Lb_demo = df_6L_demo.set_index('country')
x2b = sm.add_constant(df_6Lb[['language_instrument_C_thresh', 'language_I','lnpopulation','lnGDP_pc',
'protestants','muslims','catholics','latitude','LOEnglish','LOGerman',
'LOSocialist','LOScandin','democ','mtnall']].drop(index = 'usa'))
x5b = sm.add_constant(df_6Lb_demo[['language_instrument_C_thresh', 'language_I','lnpopulation','lnGDP_pc',
'protestants','muslims','catholics','latitude','LOEnglish','LOGerman',
'LOSocialist','LOScandin','democ','mtnall']].drop(index = 'usa'))
y2b = df_6Lb['language_C'].drop(index = 'usa')
y5b = df_6Lb_demo['language_C'].drop(index = 'usa')
est2b = sm.OLS(y2b, x2b).fit(cov_type = 'HC1')
est5b = sm.OLS(y5b, x5b).fit(cov_type = 'HC1')
stargazer = Stargazer([est1, est2, est3, est4, est5, est6])
stargazer_a = Stargazer([est1a, est2a, est3a, est4a, est5a, est6a])
stargazer_b = Stargazer([est2b, est5b])
stargazer.covariate_order(['ethnicity_instrument_C2_thresh', 'ethnicity_I',
'language_instrument_C2_thresh', 'language_I',
'religion_instrument_C2_thresh', 'religion_I'])
stargazer.rename_covariates({'ethnicity_instrument_C2_thresh':'Instrument E',
'ethnicity_I':'$F$ (ethnicity)',
'language_instrument_C2_thresh':'Instrument L',
'language_I':'$F$ (language)',
'religion_instrument_C2_thresh':'Instrument R',
'religion_I':'$F$ (religion)'
})
stargazer.custom_columns(['E$\hat{S}$',
'L$\hat{S}$',
'R$\hat{S}$',
'E$\hat{S}$',
'L$\hat{S}$',
'R$\hat{S}$'],
[1,1,1,1,1,1])
stargazer.show_model_numbers(False)
stargazer.add_line('Sample', ['Full','Full','Full','Democracy','Democracy','Democracy'])
stargazer.title('Panel A. Segregation index $\hat{S}$')
stargazer_a.covariate_order(['ethnicity_instrument_C_thresh', 'ethnicity_I',
'language_instrument_C_thresh', 'language_I',
'religion_instrument_C_thresh', 'religion_I'])
stargazer_a.rename_covariates({'ethnicity_instrument_C_thresh':'Instrument E',
'ethnicity_I':'$F$ (ethnicity)',
'language_instrument_C_thresh':'Instrument L',
'language_I':'$F$ (language)',
'religion_instrument_C_thresh':'Instrument R',
'religion_I':'$F$ (religion)'
})
stargazer_a.custom_columns(['E$\\tilde{S}$',
'L$\\tilde{S}$',
'R$\\tilde{S}$',
'E$\\tilde{S}$',
'L$\\tilde{S}$',
'R$\\tilde{S}$'],
[1,1,1,1,1,1])
stargazer_a.show_model_numbers(False)
stargazer_a.add_line('Sample', ['Full','Full','Full','Democracy','Democracy','Democracy'])
stargazer_a.title('Panel B. Segregation index $\\tilde{S}$')
stargazer_b.covariate_order(['language_instrument_C_thresh', 'language_I'])
stargazer_b.rename_covariates({'language_instrument_C_thresh':'Instrument L',
'language_I':'$F$ (language)'
})
stargazer_b.custom_columns(['L$\\tilde{S}$',
'L$\\tilde{S}$'],
[1,1])
stargazer_b.show_model_numbers(False)
stargazer_b.add_line('Sample', ['Full','Democracy'])
stargazer_b.title('Panel C. Segregation index $\\tilde{S}$ for language with sample excluding the US')
return [stargazer,stargazer_a,stargazer_b]
def table8_9_ext7(df,name,GDP) :
df_8_9A = df[[f'{name}_C2',f'{name}_I',f'{name}_instrument_C2_thresh','lnpopulation','lnGDP_pc','protestants','muslims',
'catholics','latitude','LOEnglish','LOGerman','LOSocialist','LOScandin','democ',
'mtnall','voice','PolStab','GovEffec','RegQual','ConCorr','RulLaw'
]].dropna(axis = 0)
df_8_9B = df_8_9A[[f'{name}_C2',f'{name}_instrument_C2_thresh',f'{name}_I','voice','PolStab','GovEffec','RegQual',
'ConCorr','RulLaw']]
if GDP == 'democ':
df_8_9C = df_8_9A[df_8_9A.democ >= 1]
elif GDP == 'GDP':
df_8_9C = df_8_9A[df_8_9A.lnGDP_pc >= 7]
exoA = sm.add_constant(df_8_9A[[f'{name}_C2',f'{name}_I','lnpopulation','lnGDP_pc','protestants',
'muslims','catholics','latitude','LOEnglish','LOGerman','LOSocialist',
'LOScandin','democ','mtnall']])
exoB = sm.add_constant(df_8_9B[[f'{name}_C2', f'{name}_I']])
exoC = sm.add_constant(df_8_9C[[f'{name}_C2',f'{name}_I','lnpopulation','lnGDP_pc','protestants',
'muslims','catholics','latitude','LOEnglish','LOGerman','LOSocialist',
'LOScandin','democ','mtnall']])
insA = sm.add_constant(df_8_9A[[f'{name}_instrument_C2_thresh',f'{name}_I','lnpopulation','lnGDP_pc','protestants',
'muslims','catholics','latitude','LOEnglish','LOGerman','LOSocialist',
'LOScandin','democ','mtnall']])
insB = sm.add_constant(df_8_9B[[f'{name}_instrument_C2_thresh', f'{name}_I']])
insC = sm.add_constant(df_8_9C[[f'{name}_instrument_C2_thresh',f'{name}_I','lnpopulation','lnGDP_pc','protestants',
'muslims','catholics','latitude','LOEnglish','LOGerman','LOSocialist',
'LOScandin','democ','mtnall']])
df_8_9s = [df_8_9A, df_8_9B, df_8_9C]
exos = [exoA, exoB, exoC]
inss = [insA, insB, insC]
y = [
[f'y{idx}A', f'y{idx}B', f'y{idx}C']
for idx in range(1, 7)
]
est = [
[f'est{idx}A', f'est{idx}B', f'est{idx}C']
for idx in range(1, 7)
]
star = ['starA','starB','starC']
for idx, i in enumerate(['A','B','C']) :
y[0][idx] = df_8_9s[idx]['voice']
y[1][idx] = df_8_9s[idx]['PolStab']
y[2][idx] = df_8_9s[idx]['GovEffec']
y[3][idx] = df_8_9s[idx]['RegQual']
y[4][idx] = df_8_9s[idx]['RulLaw']
y[5][idx] = df_8_9s[idx]['ConCorr']
est[0][idx] = IV2SLS(y[0][idx], exos[idx], inss[idx]).fit()
est[1][idx] = IV2SLS(y[1][idx], exos[idx], inss[idx]).fit()
est[2][idx] = IV2SLS(y[2][idx], exos[idx], inss[idx]).fit()
est[3][idx] = IV2SLS(y[3][idx], exos[idx], inss[idx]).fit()
est[4][idx] = IV2SLS(y[4][idx], exos[idx], inss[idx]).fit()
est[5][idx] = IV2SLS(y[5][idx], exos[idx], inss[idx]).fit()
star[idx] = Stargazer([est[0][idx],est[1][idx],est[2][idx],est[3][idx],est[4][idx],est[5][idx]])
for i in range(3) :
star[i].covariate_order([f'{name}_C2',f'{name}_I'])
star[i].rename_covariates({f'{name}_C2' : 'Segregation $\hat{S}$ ('f'{name}'')',
f'{name}_I' : 'Fractionalization $F$ ('f'{name}'')'})
star[i].show_model_numbers(False)
star[i].custom_columns(['Voice',
'Political stability',
'Govern-t effectiv.',
'Regul. quality',
'Rule of law',
'Control of corr'],
[1,1,1,1,1,1])
if GDP == 'democ' :
star[0].add_line('Controls', ['Yes','Yes','Yes','Yes','Yes','Yes'])
star[0].add_line('Sample', ['Full','Full','Full','Full','Full','Full'])
star[1].add_line('Controls', ['No','No','No','No','No','No'])
star[1].add_line('Sample', ['Full','Full','Full','Full','Full','Full'])
star[2].add_line('Controls', ['Yes','Yes','Yes','Yes','Yes','Yes'])
star[2].add_line('Sample', ['Democ','Democ','Democ','Democ','Democ','Democ'])
star[0].title('Panel A. Baseline : All controls and full sample')
star[1].title('Panel B. No controls and full sample')
star[2].title('Panel C. All controls; sample excludes dictatorship')
return [star[0],star[1],star[2]]
if GDP == 'GDP' :
if name == 'ethnicity' :
star[2].title('Panal A. Ethnicity: All controls; sample excludes poorest countries')
elif name == 'language' :
star[2].title('Panel B. Language: All controls; sample excludes poorest countries')
return star[2]
def table10_11(df,name,democ) :
full_x = [f'{name}_I',f'{name}_C2','lnpopulation','lnGDP_pc','protestants','muslims',
'catholics','latitude','LOEnglish','LOGerman','LOSocialist','LOScandin','democ',
'mtnall']
ins = [f'{name}_I',f'{name}_instrument_C2_thresh','lnpopulation','lnGDP_pc','protestants','muslims',
'catholics','latitude','LOEnglish','LOGerman','LOSocialist','LOScandin','democ',
'mtnall']
df_10_11_1 = df[[f'{name}_C2', f'{name}_I',
f'{name}_instrument_C2_thresh','lnpopulation','lnGDP_pc','protestants','muslims',
'catholics','latitude','LOEnglish','LOGerman','LOSocialist','LOScandin','democ',
'mtnall','icrg_qog']].dropna(axis=0)
df_10_11_2 = df[[f'{name}_C2', f'{name}_I',
f'{name}_instrument_C2_thresh','lnpopulation','lnGDP_pc','protestants','muslims',
'catholics','latitude','LOEnglish','LOGerman','LOSocialist','LOScandin','democ',
'mtnall','ef_regul','ef_corruption','ef_property_rights']].dropna(axis=0)
df_10_11_3 = df[[f'{name}_C2', f'{name}_I',
f'{name}_instrument_C2_thresh','lnpopulation','lnGDP_pc','protestants','muslims',
'catholics','latitude','LOEnglish','LOGerman','LOSocialist','LOScandin','democ',
'mtnall','taxevas']].dropna(axis=0)
if democ == 'democracy' :
df_10_11_1 = df_10_11_1[df_10_11_1.democ >= 1]
df_10_11_2 = df_10_11_2[df_10_11_2.democ >= 1]
df_10_11_3 = df_10_11_3[df_10_11_3.democ >= 1]
x1 = sm.add_constant(df_10_11_1[full_x])
x2 = sm.add_constant(df_10_11_2[full_x])
x3 = sm.add_constant(df_10_11_3[full_x])
ins1 = sm.add_constant(df_10_11_1[ins])
ins2 = sm.add_constant(df_10_11_2[ins])
ins3 = sm.add_constant(df_10_11_3[ins])
else :
x1 = sm.add_constant(df_10_11_1[[f'{name}_I',f'{name}_C2']])
x2 = sm.add_constant(df_10_11_2[[f'{name}_I',f'{name}_C2']])
x3 = sm.add_constant(df_10_11_3[[f'{name}_I',f'{name}_C2']])
ins1 = sm.add_constant(df_10_11_1[[f'{name}_I',f'{name}_instrument_C2_thresh']])
ins2 = sm.add_constant(df_10_11_2[[f'{name}_I',f'{name}_instrument_C2_thresh']])
ins3 = sm.add_constant(df_10_11_3[[f'{name}_I',f'{name}_instrument_C2_thresh']])
y1 = df_10_11_1['icrg_qog']
y2 = df_10_11_2['ef_corruption']
y3 = df_10_11_2['ef_property_rights']
y4 = df_10_11_2['ef_regul']
y5 = df_10_11_3['taxevas']
est1 = sm.OLS(y1,x1).fit(cov_type = 'HC1')
est2 = IV2SLS(y1, x1,ins1).fit()
est3 = sm.OLS(y2, x2).fit(cov_type = 'HC1')
est4 = IV2SLS(y2, x2 ,ins2).fit()
est5 = sm.OLS(y3, x2).fit(cov_type = 'HC1')
est6 = IV2SLS(y3, x2 ,ins2).fit()
est7 = sm.OLS(y4, x2).fit(cov_type = 'HC1')
est8 = IV2SLS(y4, x2, ins2).fit()
est9 = sm.OLS(y5, x3).fit(cov_type = 'HC1')
est10 = IV2SLS(y5, x3, ins3).fit()
stargazer = Stargazer([est1,est2,est3,est4,est5,est6,est7,est8,est9,est10])
stargazer.custom_columns(['ICRG quality of gov','EF Corruption','EF Property rights',
'EF Regulation','Tax eva'],[2,2,2,2,2])
stargazer.show_model_numbers(False)
stargazer.covariate_order([f'{name}_C2',f'{name}_I'])
stargazer.rename_covariates({f'{name}_C2' : 'Segregation $\hat{S}$ ('f'{name}'')',
f'{name}_I' : 'Fractionalization $F$ ('f'{name}'')'})
stargazer.add_line('Method', ['OLS','2SLS','OLS','2SLS','OLS','2SLS','OLS','2SLS','OLS','2SLS'])
if democ == 'democracy':
stargazer.title('Panel B. Democracies sample, all controls')
return stargazer
else:
stargazer.title('Panel A. Full sample, no additional controls')
return stargazer
def df_table12(df,name) :
df_table12 = df[[f'{name}_C2',f'{name}_instrument_C2_thresh',f'{name}_I','trust','democ','lnpopulation','lnArea',
'lnGDP_pc','protestants','muslims','catholics','latitude','LOEnglish','LOGerman',
'LOSocialist','LOScandin','mtnall']].dropna(axis=0)
df_demo = df_table12[df_table12.democ > 1]
dep1 = df_table12['trust']
dep2 = df_demo['trust']
exo1 = sm.add_constant(df_table12[f'{name}_C2'])
exo2 = sm.add_constant(df_table12[[f'{name}_C2', f'{name}_I','lnpopulation','lnArea',
'lnGDP_pc','protestants','muslims','catholics','latitude','LOEnglish','LOGerman',
'LOSocialist','LOScandin','democ','mtnall']])
exo3 = sm.add_constant(df_demo[[f'{name}_C2',f'{name}_I','lnpopulation','lnArea',
'lnGDP_pc','protestants','muslims','catholics','latitude','LOEnglish','LOGerman',
'LOSocialist','LOScandin','democ','mtnall']])
ins1 = sm.add_constant(df_table12[f'{name}_instrument_C2_thresh'])
ins2 = sm.add_constant(df_table12[[f'{name}_instrument_C2_thresh',f'{name}_I','lnpopulation','lnArea',
'lnGDP_pc','protestants','muslims','catholics','latitude','LOEnglish','LOGerman',
'LOSocialist','LOScandin','democ','mtnall']])
ins3 = sm.add_constant(df_demo[[f'{name}_instrument_C2_thresh',f'{name}_I','lnpopulation','lnArea',
'lnGDP_pc','protestants','muslims','catholics','latitude','LOEnglish','LOGerman',
'LOSocialist','LOScandin','democ','mtnall']])
reg1 = sm.OLS(dep1,exo1).fit(cov_type = 'HC1')
reg2 = sm.OLS(dep1,exo2).fit(cov_type = 'HC1')
reg3 = sm.OLS(dep2,exo3).fit(cov_type = 'HC1')
reg4 = IV2SLS(dep1,exo1,ins1).fit()
reg5 = IV2SLS(dep1,exo2,ins2).fit()
reg6 = IV2SLS(dep2,exo3,ins3).fit()
stargazer = Stargazer([reg1,reg2,reg3,reg4,reg5,reg6])
stargazer.covariate_order([f'{name}_C2',f'{name}_I'])
stargazer.rename_covariates({f'{name}_C2' : 'Segregation $\hat{S}$ ('f'{name}'')',
f'{name}_I' : 'Fractionalization $F$ ('f'{name}'')'})
stargazer.custom_columns(['OLS','OLS','OLS','2SLS','2SLS','2SLS'],[1,1,1,1,1,1])
stargazer.add_line('Controls', ['No','Yes','Yes','No','Yes','Yes'])
stargazer.add_line('Sample', ['Full','Full','Democ','Full','Full','Democ'])
if name == 'ethnicity':
stargazer.title('Panel A. Ethnicity')
return stargazer
else:
stargazer.title('Panel B. Language')
return stargazer
def table13_ext11(df, name, trust) :
dependent = ['voice','PolStab','GovEffec','RegQual','RulLaw','ConCorr']
table = [f'table{i}'
for i in range(6)
]
for dep, i in zip(dependent, range(6)) :
df_13 = df[[f'{name}_C2',f'{name}_instrument_C2_thresh',f'{name}_I',
'voice','PolStab','GovEffec','RegQual','RulLaw','ConCorr',
'trust','ethnic_party_dum','dummy_sepx_nm']].dropna(axis=0)
y1 = df_13[f'{dep}']
x1 = sm.add_constant(df_13[[f'{name}_C2',f'{name}_I']])
x2 = sm.add_constant(df_13[[f'{name}_C2',f'{name}_I','trust']])
x3 = sm.add_constant(df_13[[f'{name}_C2',f'{name}_I','trust','ethnic_party_dum','dummy_sepx_nm']])
ins1 = sm.add_constant(df_13[[f'{name}_instrument_C2_thresh',f'{name}_I']])
ins2 = sm.add_constant(df_13[[f'{name}_instrument_C2_thresh',f'{name}_I','trust']])
ins3 = sm.add_constant(df_13[[f'{name}_instrument_C2_thresh',f'{name}_I','trust','ethnic_party_dum',
'dummy_sepx_nm']])
est = [f'est{i}'
for i in range(6)
]
est[0] = sm.OLS(y1,x1).fit(cov_type = 'HC1')
est[1] = sm.OLS(y1,x2).fit(cov_type = 'HC1')
est[2] = sm.OLS(y1,x3).fit(cov_type = 'HC1')
est[3] = IV2SLS(y1, x1, ins1).fit()
est[4] = IV2SLS(y1, x2, ins2).fit()
est[5] = IV2SLS(y1, x3, ins3).fit()
if trust == 'trust':
table[i] = pd.DataFrame({'OLS / Trust' : [est[1].params.values[3],est[1].bse.values[3],est[1].pvalues[3]],
'OLS / All' : [est[2].params.values[3],est[2].bse.values[3],est[2].pvalues[3]],
'2SLS / Trust' : [est[4].params.values[3],est[4].bse.values[3],est[4].pvalues[3]],
'2SLS / All' : [est[5].params.values[3],est[5].bse.values[3],est[5].pvalues[3]]},
index = ['Trust','Standard Error','p-value'])
table[i].index = pd.MultiIndex.from_product([[f'{dep}'], table[i].index])
else:
table[i] = pd.DataFrame({'OLS / None' : [est[0].params.values[1],est[0].bse.values[1],est[0].pvalues[1]],
'OLS / Trust' : [est[1].params.values[1],est[1].bse.values[1],est[1].pvalues[1]],
'OLS / All' : [est[2].params.values[1],est[2].bse.values[1],est[2].pvalues[1]],
'2SLS / None' : [est[3].params.values[1],est[3].bse.values[1],est[3].pvalues[1]],
'2SLS / Trust' : [est[4].params.values[1],est[4].bse.values[1],est[4].pvalues[1]],
'2SLS / All' : [est[5].params.values[1],est[5].bse.values[1],est[5].pvalues[1]]},
index = ['Segregation','Standard Error','p-value'])
table[i].index = pd.MultiIndex.from_product([[f'{dep}'], table[i].index])
table = pd.concat(table)
table = table.rename(index = {'voice': 'Voice',
'PolStab' : 'Political stability',
'GovEffec' : 'Govern-t effectiv.',
'RegQual' : 'Regul. quality',
'RulLaw' : 'Rule of law',
'ConCorr' : 'Control of corr'
})
table.index.names = ['Dependent Var','']
return table
def vif_cal(input_data, dependent_col, endo, instrument, reg):
if reg == '2SLS' :
x_vars=input_data.drop([dependent_col, instrument], axis=1)
ins_vars=input_data.drop([dependent_col, endo], axis=1)
else :
x_vars=input_data.drop([dependent_col, instrument], axis=1)
ins_vars=input_data.drop([dependent_col, instrument], axis=1)
xvar_names=x_vars.columns
vif=list()
for i in range(0,xvar_names.shape[0]):
y=x_vars[xvar_names[i]]
x=x_vars[xvar_names.drop(xvar_names[i])]
if reg == '2SLS' :
rsq=IV2SLS(y,x,ins_vars).fit().rsquared
else :
rsq=smf.ols(formula="y~x", data=x_vars).fit().rsquared
vif.append(round(1/(1-rsq),2))
if reg == 'OLS' :
return pd.DataFrame({'Var' : xvar_names[i], 'VIF/OLS' : vif[i]} for i in range(0,xvar_names.shape[0]))
elif reg == '2SLS' :
return pd.DataFrame({'Var' : xvar_names[i], 'VIF/2SLS' : vif[i]} for i in range(0,xvar_names.shape[0]))
|
StarcoderdataPython
|
6623082
|
import ntplib
import time
from ..plugins.base import BasePlugin
from ..plugins.doctor import BaseExamination
class DoctorTimePlugin(BasePlugin):
"""
Examinations to check the time on the docker server is roughly correct
"""
requires = ["doctor"]
def load(self):
self.add_catalog_item("doctor-exam", "time", TimeExamination)
class TimeExamination(BaseExamination):
"""
Checks the datetime on the docker server is not too out of drift
"""
warning_limit = 10
error_limit = 120
description = "Time checks"
def check_docker_time(self):
"""Testing docker clock sync"""
# Check to see if the docker server agrees with our clock
self.host.client.pull("alpine", "3.5")
container = self.host.client.create_container(
"alpine:3.5",
command=["/bin/date", "+%s"],
detach=True,
tty=False,
)
self.host.client.start(container)
while self.host.container_running(container['Id']):
time.sleep(0.1)
docker_time = self.host.client.logs(container['Id']).strip()
delta = abs(int(docker_time) - time.time())
if delta > self.error_limit:
raise self.Failure("%i seconds out of sync" % delta)
elif delta > self.warning_limit:
raise self.Warning("%i seconds out of sync" % delta)
def check_local_time(self):
"""Testing local clock sync"""
# Query an NTP server for the time
c = ntplib.NTPClient()
response = c.request('pool.ntp.org', version=3)
delta = abs(response.offset)
if delta > self.error_limit:
raise self.Failure("%i seconds out of sync" % delta)
elif delta > self.warning_limit:
raise self.Warning("%i seconds out of sync" % delta)
|
StarcoderdataPython
|
3211374
|
<filename>BRCA_6CNV_testRNA_update.py
#june 2014
#determine most common copy number variants in a set of breast cancer patients
import csv
import math
import numpy as np
import scipy
from scipy import stats
import matplotlib.pyplot as plt
import math
import itertools
from itertools import zip_longest
import pandas as pd
#in order to create a candidate CNV file for a large number of genes,
#I need to automatically pull out the genomic coordinates for build hg19 for each gene
#function to transpose
def transpose(mylist):
return [list(i) for i in zip(*mylist)]
#function for significant digits
from math import log10, floor
def round_to_2(x):
digits = -int(floor(log10(x))-1)
digit_str = '.' + str(digits) + 'f'
return float(format(x, digit_str))
#function for testing if a string is a number
def isnumber(s):
try:
float(s)
return True
except ValueError:
return False
#get filtered gene list
with open('BRCA_CNVs_foldchange_TN_filtered.csv', 'r') as filtered:
filtered = csv.reader(filtered)
filtered_genelist = next(filtered)
filtered_genelist = list(filtered_genelist)[1:]
print('Initial Gene List:')
print(len(filtered_genelist),'genes') #9694
#get amps and del
dict_ampdel = {}
with open('BRCA_TN_CNVs_foldchange_parsed_counts.csv', 'r') as parsed_counts_file:
parsed_counts = csv.reader(parsed_counts_file)
parsed_counts = list(parsed_counts)
for gene in filtered_genelist:
i = parsed_counts[0].index(gene)
if float(parsed_counts[2][i]) > 50:
dict_ampdel[gene] = 'amp'
elif float(parsed_counts[3][i]) > 50:
dict_ampdel[gene] = 'del'
else:
print('PROBLEM GENE:')
print(gene, 'up:', parsed_counts[2][i], 'down:', parsed_counts[3][i])
#remove genes not differentially expressed (i.e. where the value is 0 in most samples).
#borrowing this step from Akavia et al, 2010, from dana peer's lab
RNA = pd.read_csv('BRCA_RNA_candidates.csv',header=0,index_col=0)
#print('RNA BEFORE FILTERING:')
#print(RNA.head())
RNA = RNA.T
RNA['StDev'] = RNA.std(axis=1)
RNA = RNA[RNA['StDev']>0.25]
RNA.drop('StDev',axis=1)
RNA = RNA.T
#print('RNA AFTER FILTERING:')
#print(RNA.head())
RNA.to_csv('BRCA_RNA_candidates_filtered.csv')
CNV = pd.read_csv('BRCA_CNVs_foldchange_all_filtered.csv',header=0,index_col=0)
CNV = CNV[list(RNA.columns.values)]
CNV.to_csv('BRCA_CNVs_foldchange_all_filtered2.csv')
TNcounts = pd.read_csv('BRCA_TN_CNVs_foldchange_parsed_counts.csv',header=0,index_col=0)
TNcounts = TNcounts[list(RNA.columns.values)]
TNcounts.to_csv('BRCA_TN_CNVs_foldchange_parsed_counts2.csv')
TNsum = pd.read_csv('BRCA_CNVs_foldchange_TN_filtered.csv',header=0,index_col=0)
TNsum = TNsum[list(RNA.columns.values)]
TNsum.to_csv('BRCA_CNVs_foldchange_TN_filtered2.csv')
filtered_genelist = list(TNsum.columns.values)
print('Filtered Gene List:')
print(len(filtered_genelist),'genes') #6709
#convert to z-scores
with open('BRCA_RNA_candidates_filtered.csv', 'r') as RNA:
RNA = csv.reader(RNA)
RNA = list(RNA)
RNA_tr = transpose(RNA)
z_list_tr = []
z_list_tr.append(RNA_tr[0])
for cand in range(1,len(RNA[0])):
#print(RNA[0][cand])
RNA_list = []
for i in RNA_tr[cand]:
if isnumber(i):
RNA_list.append(float(i))
normal = scipy.stats.normaltest(RNA_list)
z_array = scipy.stats.zscore(RNA_list)
z_list_cand = list(z_array)
z_list_cand.insert(0, RNA[0][cand])
z_list_tr.append(z_list_cand)
z_list = transpose(z_list_tr)
with open('BRCA_RNA_z_scores.csv','w+') as z_scores:
z_scores = csv.writer(z_scores)
for line in z_list:
z_scores.writerow(line)
print('created z-scores file')
#import RNA file and CNV file as list and concatenate:
with open('BRCA_CNVs_foldchange_all_filtered2.csv', 'r') as CNVs:
CNVs = csv.reader(CNVs)
CNVs = list(CNVs)
with open('BRCA_RNA_z_scores.csv', 'r') as RNA:
RNA = csv.reader(RNA)
RNA = list(RNA)
with open('BRCA_CNVs_foldchange_and_RNA.csv', 'w+') as comb:
comb = csv.writer(comb)
firstrow = ['Complete TCGA ID']
for CNV_name in CNVs[0][1:len(CNVs[0])]:
if CNV_name != '':
CNV_header = CNV_name + '-CNV'
firstrow.append(CNV_header)
# firstrow.append('tumor')
# firstrow.append('normal')
for RNA_name in RNA[0][1:len(RNA[0])]:
if RNA_name != '':
RNA_header = RNA_name + '-RNA'
firstrow.append(RNA_header)
comb.writerow(firstrow)
for sample in CNVs[1:]:
for ID in RNA[1:]:
if sample[0] == ID[0]:
sample_list = [sample[0]]
for i in sample[1:]:
sample_list.append(i)
for i in ID[1:]:
sample_list.append(i)
comb.writerow(sample_list)
##thenconcatenate the CNV and RNA files and do pairwise t-tests.
##set cutoff automatically by amp or del
##then output a list of genes after filtering them by p-value
datadf = pd.read_csv('BRCA_CNVs_foldchange_and_RNA.csv', header=0)
print('COMBINED FILE:')
#print(datadf.head(n=10))
print(datadf.shape)
final_genelist = []
with open('BRCA_CNV_foldchange_siggenes.csv','w+') as sig:
sig = csv.writer(sig)
sig.writerow(['Gene','CNV type','CN cutoff','Percent Altered','p-value for RNA t-test','Result','CN-RNA relationship'])
equal = 0
unequal = 0
non_equal = 0
non_unequal = 0
upnormal = False
downnormal = False
for gene in filtered_genelist:
if float((len(filtered_genelist) - filtered_genelist.index(gene))/50).is_integer():
print(str(len(filtered_genelist) - filtered_genelist.index(gene)) + ' ' + 'genes left')
#print(gene)
CNV_header = gene + '-CNV'
RNA_header = gene + '-RNA'
testdf = datadf[[CNV_header, RNA_header]]
#print(testdf.head())
#print(testdf.shape)
testdf.dropna(inplace=True)
testdf.columns = ['CNV', 'RNA']
#print(testdf.head())
#print(testdf.shape)
nodup = testdf.RNA #checking to see that there is more than one value in RNA
nodup.drop_duplicates(inplace=True)
if nodup.shape[0] > 1:
if dict_ampdel[gene] == 'amp': #test amplifications. here I will ONLY use the 1.2 cutoff.
testdf = testdf[testdf.CNV > 2.**-0.3] #remove deletions
upmask = (testdf.CNV > 2.**0.3)
upRNA = testdf[upmask].RNA
upmean = upRNA.mean()
upmedian = upRNA.median()
cutoff = 2.**0.3
downRNA = testdf[~upmask].RNA
downmean = downRNA.mean()
downmedian = downRNA.median()
perc = TNcounts[gene].ix['percent up of total']*100
elif dict_ampdel[gene] == 'del': #test deletions
testdf = testdf[testdf.CNV < 2.**0.3] #remove amplifications
upmask = (testdf.CNV > 2.**-0.3)
upRNA = testdf[upmask].RNA
upmean = upRNA.mean()
upmedian = upRNA.median()
downRNA = testdf[~upmask].RNA
downmean = downRNA.mean()
downmedian = downRNA.median()
cutoff = 2.**-0.3
perc = TNcounts[gene].ix['percent down of total']*100
if scipy.stats.normaltest(upRNA)[1] > 0.05 or len(upRNA) >=30:
upnormal = True
if scipy.stats.normaltest(downRNA)[1] > 0.05 or len(downRNA) >= 30: #using the central limit theorem to say if the sample is large enough in approximates normal.
downnormal = True
if upnormal and downnormal: #will use one-sided t tests here, because I only want those cases where upRNA > downRNA, not the other way around.
if scipy.stats.bartlett(upRNA,downRNA)[1] > 0.05:
p_value = scipy.stats.ttest_ind(upRNA, downRNA)[1]/2
equal += 1
else:
p_value = scipy.stats.ttest_ind(upRNA, downRNA,equal_var=False)[1]/2 #Welsch t-test
unequal += 1
if upmean > downmean:
relationship = '+'
else: relationship = '-'
else:
if scipy.stats.levene(upRNA,downRNA)[1] > 0.05:
p_value = scipy.stats.mannwhitneyu(upRNA, downRNA)[1]/2 #non-parametric test
non_equal += 1
else:
p_value = scipy.stats.mannwhitneyu(upRNA, downRNA)[1]/2 ##using the Mann-Whitney U test here. Not robust for unequal variances. could try transform
non_unequal += 1
if upmedian > downmedian: #can't consider means for a nonnormal samples
relationship = '+'
else: relationship = '-'
#print(p_value)
if p_value < 0.05/len(filtered_genelist):
final_genelist.append(gene)
stat = 'Significant'
else: stat = 'Non significant'
sig.writerow([gene,dict_ampdel[gene],cutoff,perc,p_value,stat,relationship])
print('Normal, equal variance:',equal) #
print('Normal, unequal variance:', unequal)#
print('Nonnormal, equal variance:',non_equal) #note that all samples were normal in this run
print('Nonnormal, unequal variance:',non_unequal)
if non_unequal > 0:
print('WARNING: Nonnormal, unequal variance samples were identified. Modify the script to show the distribution of these samples and attempt to transform the samples to achieve equal variances or normality.')
#that is, either they actually were normally distributed, or the sample size was > 30. So it is actually just extra conservative to use only the Welsch test in
#this case, as I suspect the CONEXIC people will have done. I have used either a regular t-test or a welsch t-test where appropriate.
print(len(final_genelist), 'significant genes')
print('created significant genes file')
#read in the sig genes file and output counts
siggenes = pd.read_csv('BRCA_CNV_foldchange_siggenes.csv',header=0)
sigonly = siggenes[siggenes['Result']=='Significant']
#print('Number of significant genes:', sigonly.shape[0])
posonly = sigonly[sigonly['CN-RNA relationship']=='+']
print('Significant genes with + relationship:', posonly.shape[0]) #
siggenes.set_index('Gene',inplace=True)
siggenes = siggenes[['CNV type','CN cutoff','Percent Altered','p-value for RNA t-test','Result','CN-RNA relationship']]
cands = pd.read_csv('BRCA_allGISTIC2.0andISARgenes_foldchange_compressed_step5.csv',header=0,index_col='Symbol')
sub1 = cands[cands['Has RNA data']=='yes']
rest1 = cands[~(cands['Has RNA data']=='yes')]
sub2 = sub1[sub1.index.isin(filtered_genelist)]
rest2 = sub1[~(sub1.index.isin(filtered_genelist))]
sub2['RNA differentially expressed'] = 'yes'
rest2['RNA differentially expressed'] = 'no'
rest1['RNA differentially expressed'] = ''
cands = pd.concat([sub2,rest2,rest1])
cands = cands.merge(siggenes,how='outer',left_index=True, right_index=True)
cands.to_csv('BRCA_allGISTIC2.0andISARgenes_foldchange_compressed_step6.csv',index=True)
|
StarcoderdataPython
|
8148620
|
<filename>sfsuListings/createPost.py
from flask import Flask, flash, redirect, render_template, request, session, abort, g, Blueprint, url_for
import logging
import base64
import datetime
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import DateTime
from werkzeug.utils import secure_filename
from pathlib import Path
from sfsuListings.configPaths import image_path
import os
createPost = Blueprint('createPost', __name__, template_folder='templates')
UPLOAD_FOLDER = image_path
ALLOWED_EXTENSIONS = set(['jpg', 'jpeg', 'png'])
login = Flask(__name__)
db = SQLAlchemy(login)
class Posts(db.Model):
name = db.Column(db.String(80), unique=False, nullable=False, primary_key=True)
author = db.Column(db.String(80), unique=False, nullable=True, primary_key=False)
price = db.Column(db.REAL, unique=False, nullable=False, primary_key=False)
description = db.Column(db.String(300), unique=False, nullable=False, primary_key=False)
image = db.Column(db.String(80), unique=False, nullable=True, primary_key=False)
id = db.Column(db.INTEGER, unique=True, nullable=False, primary_key=True)
category = db.Column(db.String(80), unique=False, nullable=False, primary_key=False)
approval = db.Column(db.String(20), unique=False, nullable=False, primary_key=False)
date = db.Column(DateTime, default=datetime.datetime.utcnow)
def __repr__(self):
return "<Name: {}>".format(self.name)
return "<Author: {}".format(self.author)
return "<id: {}".format(self.id)
return "<price: {}".format(self.price)
return "<image: {}".format(self.image)
return "<category: {}".format(self.category)
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@createPost.route('/CreatePost', methods=['GET'])
def createPostGet():
return render_template('CreatePost.html', title='Create Post')
@createPost.route('/CreatePost', methods=['POST'])
def createNewPost():
if ((session.get('logged_in') == None) or (session.get('logged_in') == False)):
flash('Please log in before creating a post.')
return redirect('/login')
title = request.form['title']
category = request.form['category']
price = request.form['price']
description = request.form['description']
last = Posts.query.all()
lastId = last[-1].id + 1
img = ''
if('img' not in request.files):
img = 'NoImageAvailable.png'
else:
image = request.files['img']
if(image.filename == ''):
imageName = 'NoImageAvailable.png'
image.save(os.path.join((UPLOAD_FOLDER), imageName))
img = imageName
elif(image and allowed_file(image.filename)):
imageName = secure_filename("Post_" + str(lastId) + Path(image.filename).suffix)
image.save(os.path.join((UPLOAD_FOLDER), imageName))
img = imageName
else:
imageName = 'NoImageAvailable.png'
image.save(os.path.join((UPLOAD_FOLDER), imageName))
img = imageName
newPost = Posts(name=title, author=session.get('user_name'), price=price, category=category,
description=description, image=img, id=lastId, approval='pending')
db.session.add(newPost)
db.session.commit()
return redirect('/Dashboard')
|
StarcoderdataPython
|
6574961
|
<reponame>gglin001/popart
# Copyright (c) 2018 Graphcore Ltd. All rights reserved.
import sys
import os
import c10driver
import popart
import cmdline
from popart.torch import torchwriter
import torch
import numpy as np
args = cmdline.parse()
nInChans = 3
nOutChans = 8
batchSize = 2
batchesPerStep = 4
anchors = {
"l1LossVal": popart.AnchorReturnType("EveryN", 2),
"out": popart.AnchorReturnType("Final"),
"im0": popart.AnchorReturnType("All")
}
dataFlow = popart.DataFlow(batchesPerStep, anchors)
inputShapeInfo = popart.InputShapeInfo()
inputShapeInfo.add("im0",
popart.TensorInfo("FLOAT", [batchSize, nInChans, 32, 32]))
inNames = ["im0"]
outNames = ["out"]
cifarInIndices = {"im0": 0}
losses = [popart.L1Loss("out", "l1LossVal", 0.1)]
class Module0(torch.nn.Module):
def __init__(self):
torch.nn.Module.__init__(self)
self.sin = torch.sin
self.conv1 = torchwriter.conv3x3(nInChans, nOutChans)
self.in2 = torch.nn.InstanceNorm2d(nOutChans,
eps=0.1,
affine=True,
momentum=0)
# Force random initialization
np.random.seed(0)
self.in2.weight.data = torch.tensor(
np.random.rand(nOutChans).astype(np.float32))
def forward(self, inputs):
im0 = inputs[0]
x = self.conv1(im0)
x = self.in2(x)
x = self.sin(x)
return x
# Set arbitrary seed so model weights are initialized to the
# same values each time the test is run
torch.manual_seed(1)
torchWriter = torchwriter.PytorchNetWriter(
inNames=inNames,
outNames=outNames,
losses=losses,
optimizer=popart.ConstSGD(0.001),
inputShapeInfo=inputShapeInfo,
dataFlow=dataFlow,
### Torch specific:
module=Module0(),
samplesPerBatch=batchSize)
c10driver.run(torchWriter,
None,
args.outputdir,
cifarInIndices,
args.device,
args.hw_id,
transformations=["prepareNodesForTraining"],
epochs=4)
|
StarcoderdataPython
|
11229240
|
from ._vsm import Vsm
|
StarcoderdataPython
|
11286948
|
# coding: utf-8
def test_Table():
from pycharmers.utils import Table, toBLUE
table = Table(enable_colspan=True)
table.set_cols([1,2,""], colname="id")
table.set_cols([toBLUE("abc"), "", "de"], color="GREEN")
table.show()
# +----+-------+
# | id | col.2 |
# +====+=======+
# | 1 | [34mabc[0m |
# +----+ +
# | 2 | |
# + +-------+
# | | [32mde[0m |
# +----+-------+
def test_align_text():
from pycharmers.utils import align_text, toBLUE
print(align_text("Hello world!", align=">", width=15))
# Hello world!
print(align_text(toBLUE("Hello world!"), align=">", width=15))
# [34mHello world![0m
def test_format_spec_create():
from pycharmers.utils import format_spec_create
format_spec = format_spec_create(width=10, align="^")
format_spec("hoge")
# ' hoge '
format_spec = format_spec_create(align="<", fmt=".1%")
format_spec(1/3)
# '33.3%'
format_spec = format_spec_create(align=">", zero_padding=True, fmt="b")
format_spec(20)
# '10100'
def test_pretty_3quote():
from pycharmers.utils import pretty_3quote
print(*pretty_3quote("""
When I was 17, I read a quote that went something like:
“If you live each day as if it was your last, someday you’ll most certainly be right.”
It made an impression on me, and since then, for the past 33 years,
"""))
# When I was 17, I read a quote that went something like:
# “If you live each day as if it was your last, someday you’ll most certainly be right.”
# It made an impression on me, and since then, for the past 33 years,
def test_print_dict_tree():
from pycharmers.utils import print_dict_tree
print_dict_tree({"a": 0, "b": 1})
# - a: 0
# - b: 1
print_dict_tree({"a": 0, "b": {"b1": 1, "b2": 2}})
# - a: 0
# - b:
# * b1: 1
# * b2: 2
print_dict_tree({"a": 0, "b": {"b1": 1, "b2": {"b21": 0, "b22": 1}}, "c": 3})
# - a: 0
# - b:
# * b1: 1
# * b2:
# # b21: 0
# # b22: 1
# - c: 3
def test_print_func_create():
from pycharmers.utils import print_func_create
print_func = print_func_create(width=8, align="^", left_side_bar="[", right_side_bar="]")
print_func("hoge")
# [ hoge ]
print_func = print_func_create(align="<", left_side_bar="$ ")
print_func("git clone https://github.com/iwasakishuto/Python-utils.git")
# $ git clone https://github.com/iwasakishuto/Python-utils.git
print_func("cd Python-utils")
# $ cd Python-utils
print_func("sudo python setup.py install")
# $ sudo python setup.py install
def test_str2pyexample():
from pycharmers.utils import str2pyexample
WINDOW_NAME = "string2python"
str2pyexample("""
import cv2
import numpy as np
frame = np.zeros(shape=(50, 100, 3), dtype=np.uint8)
while (True):
cv2.imshow(WINDOW_NAME, frame)
if cv2.waitKey(0) == 27: break
cv2.destroyAllWindows()
""")
import cv2
import numpy as np
frame = np.zeros(shape=(50, 100, 3), dtype=np.uint8)
while (True):
cv2.imshow(WINDOW_NAME, frame)
if cv2.waitKey(0) == 27: break
cv2.destroyAllWindows()
def test_strip_invisible():
from pycharmers.utils import strip_invisible, toBLUE
strip_invisible("[31mhello[0m")
# 'hello'
strip_invisible(toBLUE("hello"))
# 'hello'
strip_invisible("hello")
# 'hello'
def test_tabulate():
from pycharmers.utils import tabulate
tabulate([[i*j for i in range(1,4)] for j in range(1,4)])
# +-------+-------+-------+
# | col.1 | col.2 | col.3 |
# +=======+=======+=======+
# | 1 | 2 | 3 |
# +-------+-------+-------+
# | 2 | 4 | 6 |
# +-------+-------+-------+
# | 3 | 6 | 9 |
# +-------+-------+-------+
def test_visible_width():
from pycharmers.utils import visible_width, toBLUE
visible_width(toBLUE("hello"))
# 5
visible_width("こんにちは")
# 10
visible_width("hello 世界。")
# 12
|
StarcoderdataPython
|
6488770
|
import scrapy
from bs4 import BeautifulSoup as bs
from scrapy.crawler import CrawlerProcess
from scrapy.utils.project import get_project_settings
import requests
import re
#Replace this
class spider(scrapy.Spider):
def __init__(self):
self.name = 'spider'
self.allowed_domains = ['en.wikipedia.org']
api_url = 'https://api.thewikigame.com/api/v1/group/22033570-e1fd-4a9f-9a96-9068082b88aa/current-round/'
headers = {
'Authorization': 'Token <PASSWORD>' #Might need to change this
}
response = requests.get(api_url, headers=headers)
start_index = response.text.index('"start_article"')
start_index = response.text[start_index:].index('link') + start_index
start_link = response.text[start_index+6:].split(',')[0]
start_link = 'https://en.wikipedia.org/wiki/'+start_link.replace('"', '')
end_index = response.text.index('"goal_article"')
end_index = response.text[end_index:].index('link') + end_index
end_link = response.text[end_index+6:].split(',')[0]
self.end_link = 'https://en.wikipedia.org/wiki/'+end_link.replace('"', '')
self.start_urls = [start_link]
self.dont_overwrite = False
def get_page_name(self, url):
return url.replace('https://en.wikipedia.org/wiki/', '')
def start_requests(self):
url = self.start_urls[0]
path = self.get_page_name(url)
yield scrapy.Request(url=url,
callback=self.parse,
meta={'path': path},
errback=self.handle_failure)
def handle_failure(self, failure):
yield scrapy.Request(url=failure.request.url,
callback=self.parse,
meta={'path': failure.request.meta['path']},
errback=self.handle_failure)
def parse(self, response):
soup = bs(response.text, 'html.parser')
links = []
for link in soup.findAll('a', attrs={'href': re.compile('^/wiki/')}):
path = link.get('href')[6:]
not_allowed = ['Special:', 'Wikipedia:', 'Portal:', 'Category:', 'File:', 'Template:', 'Template_talk:', 'Help:', 'Talk:']
allowed = True
for word in not_allowed:
if path.startswith(word):
allowed = False
break
if allowed and path != 'Main_Page':
links.append(path)
links = list(set(links))
links = ['https://en.wikipedia.org/wiki/'+l for l in links]
for link in links:
if self.get_page_name(link) in path:
continue
new_path = response.meta['path']+', '+self.get_page_name(link)
if link == self.end_link and self.dont_overwrite == False:
with open('path.txt', 'w') as outfile:
outfile.write(new_path)
raise scrapy.exceptions.CloseSpider('Path Found!')
self.dont_overwrite = True
yield scrapy.Request(url=link,
callback=self.parse,
meta={'path': new_path},
errback=self.handle_failure)
def find_best_path():
process = CrawlerProcess(get_project_settings())
process.crawl(spider)
process.start()
find_best_path()
|
StarcoderdataPython
|
8073230
|
<reponame>hyperonecom/h1-client-python<gh_stars>0
"""
HyperOne
HyperOne API # noqa: E501
The version of the OpenAPI document: 0.1.0
Generated by: https://openapi-generator.tech
"""
import unittest
import h1
from h1.api.iam_organisation_policy_api import IamOrganisationPolicyApi # noqa: E501
class TestIamOrganisationPolicyApi(unittest.TestCase):
"""IamOrganisationPolicyApi unit test stubs"""
def setUp(self):
self.api = IamOrganisationPolicyApi() # noqa: E501
def tearDown(self):
pass
def test_iam_organisation_policy_actor_create(self):
"""Test case for iam_organisation_policy_actor_create
Create iam/policy.actor # noqa: E501
"""
pass
def test_iam_organisation_policy_actor_delete(self):
"""Test case for iam_organisation_policy_actor_delete
Delete iam/policy.actor # noqa: E501
"""
pass
def test_iam_organisation_policy_actor_get(self):
"""Test case for iam_organisation_policy_actor_get
Get iam/policy.actor # noqa: E501
"""
pass
def test_iam_organisation_policy_actor_list(self):
"""Test case for iam_organisation_policy_actor_list
List iam/policy.actor # noqa: E501
"""
pass
def test_iam_organisation_policy_create(self):
"""Test case for iam_organisation_policy_create
Create iam/policy # noqa: E501
"""
pass
def test_iam_organisation_policy_delete(self):
"""Test case for iam_organisation_policy_delete
Delete iam/policy # noqa: E501
"""
pass
def test_iam_organisation_policy_event_get(self):
"""Test case for iam_organisation_policy_event_get
Get iam/policy.event # noqa: E501
"""
pass
def test_iam_organisation_policy_event_list(self):
"""Test case for iam_organisation_policy_event_list
List iam/policy.event # noqa: E501
"""
pass
def test_iam_organisation_policy_get(self):
"""Test case for iam_organisation_policy_get
Get iam/policy # noqa: E501
"""
pass
def test_iam_organisation_policy_list(self):
"""Test case for iam_organisation_policy_list
List iam/policy # noqa: E501
"""
pass
def test_iam_organisation_policy_service_get(self):
"""Test case for iam_organisation_policy_service_get
Get iam/policy.service # noqa: E501
"""
pass
def test_iam_organisation_policy_service_list(self):
"""Test case for iam_organisation_policy_service_list
List iam/policy.service # noqa: E501
"""
pass
def test_iam_organisation_policy_tag_create(self):
"""Test case for iam_organisation_policy_tag_create
Create iam/policy.tag # noqa: E501
"""
pass
def test_iam_organisation_policy_tag_delete(self):
"""Test case for iam_organisation_policy_tag_delete
Delete iam/policy.tag # noqa: E501
"""
pass
def test_iam_organisation_policy_tag_get(self):
"""Test case for iam_organisation_policy_tag_get
Get iam/policy.tag # noqa: E501
"""
pass
def test_iam_organisation_policy_tag_list(self):
"""Test case for iam_organisation_policy_tag_list
List iam/policy.tag # noqa: E501
"""
pass
def test_iam_organisation_policy_tag_put(self):
"""Test case for iam_organisation_policy_tag_put
Replace iam/policy.tag # noqa: E501
"""
pass
def test_iam_organisation_policy_update(self):
"""Test case for iam_organisation_policy_update
Update iam/policy # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
6649350
|
<reponame>ProgressBG-Python-Course/ProgressBG-VC2-Python
def user_input(msg):
try:
usr_input = input(msg)
return (usr_input, True)
except:
print("User Break - 'CTRL+D' is Disabled!!")
return ("Not OK", False)
# def user_input(msg):
# usr_input = input(msg)
# if len(usr_input)>2:
# return (usr_input, True)
# else:
# print("User Break - 'CTRL+D' is Disabled!!")
# return ("Not OK" ,False)
while True:
status = user_input("Enter your message: ")
# print(x)
if status[1]:
quit()
|
StarcoderdataPython
|
363882
|
import magenta
def readfile(path):
print("Reading file from " + path)
file = open(path,"r")
return 0
|
StarcoderdataPython
|
5121508
|
<filename>project6-hy/tests.py<gh_stars>0
from hashtable import Hashtable
import time
###########################
########## Tests ##########
###########################
some_words = [u'lewes', # => 5
u'mistranscribe', # => 13
u'outbleed', # => 8
u'abstemiously', # => 12
u'antifeudal', # => 10
u'tableaux', # => 8
u'whine', # => 5
u'ytterbite', # => 9
u'redeemer'] # => 8
filename = "words.txt"
print(u'Reading words from file {}.'.format(filename))
most_words = []
start_time = time.time()
with open(filename) as f:
for line in f.readlines():
most_words.append(line.strip())
print(u'Read in {} words in {}s.'.format(len(most_words), time.time()-start_time))
def do_tests(T):
"""Run the tests for the Hashtable class.
For the example hashtable, we're mapping strings to integers. More
specifically, we're mapping words to the number of characters they have,
just for fun. The test function takes a Hashtable of words mapped to their
length, and at the end it adds a lot more of them to it.
"""
print(u'Starting hashtable tests!')
print(u'#####################')
print(u'')
print(u'Initial word list: {}'.format(some_words))
# test the constructor (which also uses __setitem__ and thereby __getitem__)
for word in some_words:
print(u'{} should map to {}.'.format(word, len(word)))
assert T[word] == len(word)
print(u'#####################')
print(u'')
print(u'Testing __setitem__ and __getitem__')
# test __setitem__ and __getitem__ some more
more_words = [u'nummulitic', u'proconviction', u'inscriber']
print(u'Adding more things to the table: {}'.format(more_words))
for word in more_words:
T[word] = len(word)
# make sure the original words are still there
for word in some_words:
print(u'{} should map to {}.'.format(word, len(word)))
assert T[word] == len(word)
# make sure the insertion actually worked
for word in more_words:
print(u'{} should map to {}.'.format(word, len(word)))
assert T[word] == len(word)
print(u'#####################')
print(u'')
# now delete the second list of words
print(u'Testing delete')
for word in more_words:
print(u'Delete key {}'.format(word))
del T[word]
# make sure the words in more_words aren't keys anymore
keys = T.keys()
print(u'Current list of keys: {}'.format(keys))
for word in more_words:
assert word not in keys
print(u'#####################')
print(u'')
# let's put them back in
for word in more_words:
print(u'Re-adding {}.'.format(word))
T[word] = len(word)
# make sure the list of keys contains all the words from both lists
keys = T.keys()
print(u'Current list of keys: {}'.format(keys))
for word in some_words:
assert word in keys
for word in more_words:
assert word in keys
print(u'#####################')
print(u'')
print(u'Now, let\'s make the table REALLY big!')
print(u'(In other words, let\'s test double() and quarter().)')
print(u'#####################')
print(u'')
print(u'Putting a bunch of words in the hashtable.')
start_time = time.time()
for word in most_words:
T[word] = len(word)
print(u'{} words inserted successfully in {}s.'.format(\
len(most_words),
time.time()-start_time))
print(u'Checking that the words and their values are actually there.')
for word in most_words:
l = len(word)
print(u'{}: {}'.format(word, l))
assert T[word] == l
print(u'Deleting a lot of items.')
for i, key in enumerate(T.keys()):
if i > 800:
break
else:
del T[key]
print(u'All tests passed!')
|
StarcoderdataPython
|
3595031
|
<reponame>FlyingKiwiBird/AioCron
import datetime
import sys
import asyncio
sys.path.append("..")
from CoroCron.Cron import Cron
async def report_time(name="there"):
print("Hi {}, it is now {}".format(name, datetime.datetime.now()))
if __name__ == '__main__':
mins = [x for x in range(0, 59) if x % 2 == 0]
mins2 = [x for x in range(0, 59) if x % 2 == 1]
Cron = Cron()
Cron.Job().Minutes(mins).Do(report_time, ("Even",))
Cron.Job().Minutes(mins2).Do(report_time)
loop = asyncio.get_event_loop()
asyncio.ensure_future(Cron.Start())
loop.run_forever()
|
StarcoderdataPython
|
3305977
|
from django.contrib import admin
from .models import Disk, File, FileCopy, Oplog
@admin.register(Disk)
class DiskAdmin(admin.ModelAdmin):
list_display = ('dev_name', 'mount_point', 'is_healthy')
@admin.register(File)
class FileAdmin(admin.ModelAdmin):
list_display = ('__str__', 'size', 'readable_size')
readonly_fields = ('size', 'sha256')
@admin.register(FileCopy)
class FileCopyAdmin(admin.ModelAdmin):
pass
@admin.register(Oplog)
class OplogAdmin(admin.ModelAdmin):
list_display = ('__str__', 'error_code')
readonly_fields = ('error_code', 'stdout', 'stderr')
|
StarcoderdataPython
|
4939929
|
<reponame>afterloe/LearnOpencv<gh_stars>1-10
#!/usr/bin/env python
# coding=utf-8
from __future__ import division
import cv2
import Adafruit_PCA9685
import time
import numpy as np
import threading
pwm = Adafruit_PCA9685.PCA9685()
pwm.set_pwm_freq(60)
#pwm.set_pwm(0, 0, 320)
#pwm.set_pwm(1, 0, 240)
time.sleep(1)
cap = cv2.VideoCapture(0)
face_cascade = cv2.CascadeClassifier("/home/pi/Project/lib/opencv/data/haarcascades/haarcascade_frontalface_default.xml")
x = 0
thisError_x = 500
lastError_x = 100
thisError_y = 500
lastError_y = 100
Y_P = 425
X_P = 425
flag = 0
y = 0
faceBool = False
def moveSteeringEngine(x, y):
pwm.set_pwm(14, 0, 650 - x)
pwm.set_pwm(15, 0, 650 - y)
while True:
ret, frame = cap.read()
if False == ret:
print("can't open video!")
break
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray)
max_face = 0
value_x = 0
if 0 < len(faces):
print("find face!")
(x, y, w, h) = faces[0]
cv2.rectangle(frame, (x, y), (x + h, y + h), (0, 255, 0), 2)
result = (x, y, w, h)
x = result[0] + w / 2
y = result[1] + h / 2
faceBool = True
if faceBool:
faceBool = False
thisError_x = x - 160
thisError_y = y - 120
pwm_x = thisError_x * 5 + 1 * (thisError_x - lastError_x)
pwm_y = thisError_y * 5 + 1 * (thisError_y - lastError_y)
lastError_x = thisError_x
lastError_y = thisError_y
XP = pwm_x / 100
YP = pwm_y / 100
X_P = X_P + int(XP)
Y_P = Y_P + int(YP)
if 670 < X_P:
X_P = 670
if 0 > X_P:
X_P = 0
if 650 < Y_P:
Y_P = 650
if 0 > Y_P:
Y_P = 0
tid = threading.Thread(target = moveSteeringEngine, args = (X_P, Y_P))
tid.setDaemon(True)
tid.start()
cv2.imshow("capture", frame)
if 119 == cv2.waitKey(1):
break
cap.release()
cv2.destroyAllWindows()
|
StarcoderdataPython
|
12864665
|
<reponame>EmmaAlexander/possum-tools
#CASA script to create cutouts of fits cubes
directoryA = '/Volumes/TARDIS/Work/askap/'
directoryB = '/Volumes/NARNIA/pilot_cutouts/'
import numpy as np
sources=np.loadtxt('/Users/emma/GitHub/possum-tools/DataProcess/pilot_sources.txt',dtype='str')
for i in range(0,sources.shape[0]):
objectname=sources[i,0]
POSSUMSB=sources[i,3]
EMUSB=sources[i,4]
ra=sources[i,1]
dec=sources[i,2]
sourcecentre=ra+','+dec
fov=sources[i,6]#arcsec
print(objectname)
region='centerbox[['+sourcecentre+'], ['+fov+'arcsec, '+fov+'arcsec]]'
possum_outfile=directoryB+objectname+'/'+objectname+'_POSSUM.fits'
emu_outfile=directoryB+objectname+'/'+objectname+'_EMU.fits'
#POSSUM
if POSSUMSB == '5038':
#this is the Early Science data
possum_cont_filename = '/Volumes/NARNIA/PawseySync/DRAGN_1_0p8_A/DRAGN_1_0p8_A/image.i.SB5038.cont.restored.fits'
else:
possum_cont_filename = directoryA +'fullfields/image.i.SB'+POSSUMSB+'.cont.taylor.0.restored.fits'
if POSSUMSB == '10035':
print('Skipping POSSUM: bad SB10035')
else:
imsubimage(imagename=possum_cont_filename,outfile='possum_cont_temp',region=region,overwrite=True,dropdeg=True)
exportfits(imagename='possum_cont_temp',fitsimage=possum_outfile,overwrite=True)
#cubes
i_filename = '/Volumes/NARNIA/leakage_corrected/image.restored.i.SB'+POSSUMSB+'.contcube.linmos.13arcsec.leakage.zernike.holoI.fits'
q_filename = '/Volumes/NARNIA/leakage_corrected/image.restored.q.SB'+POSSUMSB+'.contcube.linmos.13arcsec.leakage.zernike.holoI.fits'
u_filename = '/Volumes/NARNIA/leakage_corrected/image.restored.u.SB'+POSSUMSB+'.contcube.linmos.13arcsec.leakage.zernike.holoI.fits'
imsubimage(imagename=i_filename,outfile='i_im_temp',region=region,overwrite=True,dropdeg=True)
imsubimage(imagename=q_filename,outfile='q_im_temp',region=region,overwrite=True,dropdeg=True)
imsubimage(imagename=u_filename,outfile='u_im_temp',region=region,overwrite=True,dropdeg=True)
exportfits(imagename='i_im_temp',fitsimage=objectname+'_POSSUM_i.fits',overwrite=True)
exportfits(imagename='q_im_temp',fitsimage=objectname+'_POSSUM_q.fits',overwrite=True)
exportfits(imagename='u_im_temp',fitsimage=objectname+'_POSSUM_u.fits',overwrite=True)
#EMU
if EMUSB != 'NaN':
if EMUSB=='10083':
i_EMU_filename = '/Volumes/NARNIA/fullfields/image.restored.i.SB10083.contcube.conv.fits'
q_EMU_filename = '/Volumes/NARNIA/fullfields/image.restored.q.SB10083.contcube.conv.fits'
u_EMU_filename = '/Volumes/NARNIA/fullfields/image.restored.u.SB10083.contcube.conv.fits'
cont_EMU_filename= '/Volumes/NARNIA/fullfields/image.i.SB10083.cont.taylor.0.restored.conv.fits'
imsubimage(imagename=i_EMU_filename,outfile='i_EMU_im_temp',region=region,overwrite=True,dropdeg=True)
imsubimage(imagename=q_EMU_filename,outfile='q_EMU_im_temp',region=region,overwrite=True,dropdeg=True)
imsubimage(imagename=u_EMU_filename,outfile='u_EMU_im_temp',region=region,overwrite=True,dropdeg=True)
imsubimage(imagename=cont_EMU_filename,outfile='EMU_cont_im_temp',region=region,overwrite=True,dropdeg=True)
exportfits(imagename='i_EMU_im_temp',fitsimage=objectname+'_EMU_i.fits',overwrite=True)
exportfits(imagename='q_EMU_im_temp',fitsimage=objectname+'_EMU_q.fits',overwrite=True)
exportfits(imagename='u_EMU_im_temp',fitsimage=objectname+'_EMU_u.fits',overwrite=True)
exportfits(imagename='EMU_cont_im_temp',fitsimage=emu_outfile,overwrite=True)
elif EMUSB=='10635':
i_EMU_filename = '/Volumes/NARNIA/fullfields/image.restored.i.SB10635.contcube.v2.conv.fits'
q_EMU_filename = '/Volumes/NARNIA/fullfields/image.restored.q.SB10635.contcube.v2.conv.fits'
u_EMU_filename = '/Volumes/NARNIA/fullfields/image.restored.u.SB10635.contcube.v2.conv.fits'
cont_EMU_filename= '/Volumes/NARNIA/fullfields/image.i.SB10635.cont.taylor.0.restored.fits'
imsubimage(imagename=i_EMU_filename,outfile='i_EMU_im_temp',region=region,overwrite=True,dropdeg=True)
imsubimage(imagename=q_EMU_filename,outfile='q_EMU_im_temp',region=region,overwrite=True,dropdeg=True)
imsubimage(imagename=u_EMU_filename,outfile='u_EMU_im_temp',region=region,overwrite=True,dropdeg=True)
imsubimage(imagename=cont_EMU_filename,outfile='EMU_cont_im_temp',region=region,overwrite=True,dropdeg=True)
exportfits(imagename='i_EMU_im_temp',fitsimage=objectname+'_EMU_i.fits',overwrite=True)
exportfits(imagename='q_EMU_im_temp',fitsimage=objectname+'_EMU_q.fits',overwrite=True)
exportfits(imagename='u_EMU_im_temp',fitsimage=objectname+'_EMU_u.fits',overwrite=True)
exportfits(imagename='EMU_cont_im_temp',fitsimage=emu_outfile,overwrite=True)
else:
#no cubes
emu_filename= directoryA +'fullfields/image.i.SB'+EMUSB+'.cont.taylor.0.restored.fits'
imsubimage(imagename=emu_filename,outfile='emu_cont_temp',region=region,overwrite=True,dropdeg=True)
exportfits(imagename='emu_cont_temp',fitsimage=emu_outfile,overwrite=True)
os.system("rm -r emu_cont_temp")
#tidy up
os.system("rm -r *_temp")
os.system("mv *{}* {}/".format(objectname,objectname))
|
StarcoderdataPython
|
5108860
|
<reponame>iconation/scorelib
from iconsdk.builder.transaction_builder import DeployTransactionBuilder
from tbears.libs.icon_integrate_test import IconIntegrateTestBase, SCORE_INSTALL_ADDRESS
from iconsdk.libs.in_memory_zip import gen_deploy_data_content
from iconsdk.signed_transaction import SignedTransaction
from .utils import *
import json
import os
DIR_PATH = os.path.abspath(os.path.dirname(__file__))
class ScoreLibTests(IconIntegrateTestBase):
SCORE_PATH = os.path.abspath(os.path.join(DIR_PATH, '../'))
def setUp(self):
super().setUp()
self.icon_service = None
# install SCORE
self._operator = self._test1
self._user = self._wallet_array[0]
self._attacker = self._wallet_array[9]
for wallet in self._wallet_array:
icx_transfer_call(super(), self._test1, wallet.get_address(), 100 * 10**18, self.icon_service)
self._operator_icx_balance = get_icx_balance(super(), address=self._operator.get_address(), icon_service=self.icon_service)
self._score_address = self._deploy_score(self.SCORE_PATH, params={})['scoreAddress']
def _deploy_score(self, project, to: str = SCORE_INSTALL_ADDRESS, params={}) -> dict:
# Generates an instance of transaction for deploying SCORE.
transaction = DeployTransactionBuilder() \
.from_(self._test1.get_address()) \
.to(to) \
.step_limit(100_000_000_000) \
.nid(3) \
.nonce(100) \
.content_type("application/zip") \
.content(gen_deploy_data_content(project)) \
.params(params) \
.build()
# Returns the signed transaction object having a signature
signed_transaction = SignedTransaction(transaction, self._test1)
# process the transaction in local
result = self.process_transaction(signed_transaction, self.icon_service)
self.assertTrue('status' in result)
if result['status'] != 1:
print(result)
self.assertEqual(1, result['status'])
self.assertTrue('scoreAddress' in result)
return result
def _deploy_irc2(self, project, to: str = SCORE_INSTALL_ADDRESS) -> dict:
# Generates an instance of transaction for deploying SCORE.
transaction = DeployTransactionBuilder() \
.params({
"_initialSupply": 0x100000000000,
"_decimals": 18,
"_name": 'StandardToken',
"_symbol": 'ST',
}) \
.from_(self._operator.get_address()) \
.to(to) \
.step_limit(100_000_000_000) \
.nid(3) \
.nonce(100) \
.content_type("application/zip") \
.content(gen_deploy_data_content(project)) \
.build()
# Returns the signed transaction object having a signature
signed_transaction = SignedTransaction(transaction, self._operator)
# process the transaction in local
result = self.process_transaction(
signed_transaction, self.icon_service)
self.assertTrue('status' in result)
self.assertEqual(1, result['status'])
self.assertTrue('scoreAddress' in result)
return result
def shard_set(self, key: str, value: int, success=True):
return transaction_call(
super(),
from_=self._operator,
to_=self._score_address,
method='shard_set',
params={'key': key, 'value': value},
icon_service=self.icon_service,
success=success
)
def shard_get(self, key: str, success=True):
return transaction_call(
super(),
from_=self._operator,
to_=self._score_address,
method='shard_get',
params={'key': key},
icon_service=self.icon_service,
success=success
)
def shard_multiset(self, count: int, success=True):
return transaction_call(
super(),
from_=self._operator,
to_=self._score_address,
method='shard_multiset',
params={'count': count},
icon_service=self.icon_service,
success=success
)
|
StarcoderdataPython
|
1881689
|
import requests
from bs4 import BeautifulSoup
import re # Importing regular expression module
import click
import os
@click.command()
@click.option("--trending",is_flag=True,help='Gives the trending news topics!')
@click.option("--read",is_flag=True,help='Reads you out trending news topics!')
def cli(trending, read):
if(trending):
url='https://in.reuters.com/news/top-news'
r=requests.get(url) # The very old get function
soup=BeautifulSoup(r.content,'html.parser') #Getting content
links=soup.find_all(href=re.compile('/article/')) #getting every link which has the word article
for i in links:
if(i.text != 'Continue Reading'):
if(i.text != ""):
print("->" + i.text) #printing out text of the blockquote
if(read):
url='https://in.reuters.com/news/top-news'
r=requests.get(url) # The very old get function
soup=BeautifulSoup(r.content,'html.parser') #Getting content
links=soup.find_all(href=re.compile('/article/')) #getting every link which has the word article
for i in links:
if(i.text != 'Continue Reading'):
if(i.text != ""):
os.system("espeak '{}'".format(i.text))
|
StarcoderdataPython
|
4842578
|
"""Find the minimal frame pointer and stack pointer positions from a C6T VM
logfile. This will be the lowest depth of the stack.
"""
from sys import argv
from typing import Optional, Tuple
def findmin(log: str, fieldpos: int) -> Optional[int]:
"""Splits and then finds minimum in given split index fieldpos.
"""
minval = None
for line in log.splitlines():
try:
curval = int(line.split()[fieldpos], base=16)
if minval is None:
minval = curval
else:
minval = min(curval, minval)
except IndexError:
continue
except ValueError:
continue
return minval
def findmins(log: str) -> Tuple[int]:
"""Find stack and frame pointer mins.
"""
fp, sp = findmin(log, 1), findmin(log, 3)
if fp is None:
fp = -1
if sp is None:
sp = -1
return fp, sp
if __name__ == "__main__":
argv = [None, 'c6t.log']
with open(argv[1], 'r', encoding='utf8') as logfile:
fp, sp = findmins(logfile.read())
print("FP:", hex(fp), "SP:", hex(sp))
|
StarcoderdataPython
|
8031892
|
# -*- coding: utf-8 -*-
# File: develop.py
# Copyright 2021 Dr. <NAME>. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) Tensorpack Contributors
# Licensed under the Apache License, Version 2.0 (the "License")
"""
Utilities for developers only. These are not visible to users and should not appear in docs.
"""
import functools
import inspect
from collections import defaultdict
from datetime import datetime
from typing import Any, Callable, List, Optional
from .logger import logger
__all__: List[str] = ["deprecated"]
# Copy and paste from https://github.com/tensorpack/tensorpack/blob/master/tensorpack/utils/develop.py
_DEPRECATED_LOG_NUM = defaultdict(int) # type: ignore
def log_deprecated(name: str = "", text: str = "", eos: str = "", max_num_warnings: Optional[int] = None) -> None:
"""
Log deprecation warning.
:param name: name of the deprecated item.
:param text: information about the deprecation.
:param eos: end of service date such as "YYYY-MM-DD".
:param max_num_warnings: the maximum number of times to print this warning
"""
assert name or text
if eos:
eos = "after " + datetime(*map(int, eos.split("-"))).strftime("%d %b") # type: ignore # pylint: disable=C0209
if name:
if eos:
info_msg = f"{name} will be deprecated {eos}. {text}"
else:
info_msg = f"{name} was deprecated. {text}"
else:
info_msg = text
if eos:
info_msg += f" Legacy period ends {eos}"
if max_num_warnings is not None:
if _DEPRECATED_LOG_NUM[info_msg] >= max_num_warnings:
return
_DEPRECATED_LOG_NUM[info_msg] += 1
logger.info("[Deprecated] %s", info_msg)
def deprecated(text: str = "", eos: str = "", max_num_warnings: Optional[int] = None) -> Callable[[Any], Any]:
"""
:param text: same as :func:`log_deprecated`.
:param eos: same as :func:`log_deprecated`.
:param max_num_warnings: same as :func:`log_deprecated`.
:return: A decorator which deprecates the function.
**Example:**
.. code-block:: python
@deprecated("Explanation of what to do instead.", "2017-11-4")
def foo(...):
pass
"""
def get_location() -> str:
frame = inspect.currentframe()
if frame:
callstack = inspect.getouterframes(frame)[-1]
return f"{callstack[1]}:{callstack[2]}"
stack = inspect.stack(0)
entry = stack[2]
return f"{entry[1]}:{entry[2]}"
def deprecated_inner(func: Callable[[Any], Any]) -> Callable[[Any], Any]:
@functools.wraps(func)
def new_func(*args, **kwargs): # type: ignore
name = f"{func.__name__} [{get_location()}]"
log_deprecated(name, text, eos, max_num_warnings=max_num_warnings)
return func(*args, **kwargs)
return new_func
return deprecated_inner
|
StarcoderdataPython
|
64218
|
<reponame>oswaldo-spadari/Python-Exec
# Faça um Programa que peça as quatro notas de 10 alunos,
# calcule e armazene num vetor a média de cada aluno,
# imprima o número de alunos com média maior ou igual a 7.0.
from random import randint
boletim = []
alunos = {}
notas = []
total = 0
for i in range(1, 11):
alunos['nome'] = f'aluno{i}'
notas.clear()
for c in range(1, 5):
notas.append(randint(0, 10))
alunos['notas'] = notas.copy()
alunos['media'] = sum(notas) / len(notas)
boletim.append(alunos.copy())
for a in boletim:
print(a)
if a['media'] >= 7:
total += 1
print(f'O total de alunos com média maior ou igual a 7 é de {total} alunos')
|
StarcoderdataPython
|
3409827
|
<reponame>NeonOcean/Environment<gh_stars>1-10
import random
from sims4.tuning.tunable import HasTunableFactory, AutoFactoryInit, TunablePercent
import services
class SetFireState(HasTunableFactory, AutoFactoryInit):
FACTORY_TUNABLES = {'chance': TunablePercent(description='\n Chance that the fire will trigger\n ', default=100)}
def __init__(self, target, *args, **kwargs):
super().__init__(*args, **kwargs)
self.target = target
def start(self, *_, **__):
if random.random() < self.chance:
fire_service = services.get_fire_service()
fire_service.spawn_fire_at_object(self.target)
def stop(self, *_, **__):
pass
|
StarcoderdataPython
|
11282047
|
<reponame>PCIHD/Project_Daydream
from rest_framework import serializers
from .models import Dream
class Dream_serializer(serializers.ModelSerializer):
class Meta:
model = Dream
fields = ['image']
|
StarcoderdataPython
|
1687125
|
<reponame>vt102/eosfactory<filename>pyteos/core/logger.py
import enum
import re
import inspect
from textwrap import dedent
from termcolor import cprint, colored
class Verbosity(enum.Enum):
COMMENT = ['green', None, []]
INFO = ['blue', None, []]
TRACE = ['cyan', None, []]
ERROR = ['red', None, ['reverse']]
ERROR_TESTING = ['green', None, ['reverse']]
OUT = [None, None, []]
DEBUG = ['yellow', None, []]
NONE = None
__verbosity = [Verbosity.TRACE, Verbosity.OUT, Verbosity.DEBUG]
def verbosity(verbosity):
global __verbosity
__verbosity = verbosity
def COMMENT(msg):
frame = inspect.stack()[1][0]
test_name = inspect.getframeinfo(frame).function
color = Verbosity.COMMENT.value
cprint(
"\n### " + test_name + ":\n" + condition(msg) + "\n",
color[0], color[1], attrs=color[2])
def SCENARIO(msg):
COMMENT(msg)
__trace_buffer = ""
def TRACE(msg=None, translate=True, verbosity=None):
if not msg:
return __trace_buffer
msg = condition(msg, translate)
__trace_buffer = msg
if msg and Verbosity.TRACE in (verbosity if verbosity else __verbosity):
color = Verbosity.TRACE.value
cprint(msg, color[0], color[1], attrs=color[2])
__info_buffer = ""
def INFO(msg=None, translate=True, verbosity=None):
global __info_buffer
if not msg:
return __info_buffer
msg = condition(msg, translate)
__info_buffer = msg
v = verbosity if verbosity else __verbosity
if msg and (
Verbosity.TRACE in v or Verbosity.INFO in v
):
color = Verbosity.INFO.value
cprint(msg, color[0], color[1], attrs=color[2])
__out_buffer = ""
def OUT(msg=None, translate=True, verbosity=None):
global __out_buffer
if not msg:
return __out_buffer
msg = condition(msg, translate)
__out_buffer = msg
if msg and Verbosity.OUT in (verbosity if verbosity else __verbosity):
color = Verbosity.OUT.value
cprint(msg, color[0], color[1], attrs=color[2])
__debug_buffer = ""
def DEBUG(msg=None, translate=True, verbosity=None):
global __debug_buffer
if not msg:
return __debug_buffer
msg = condition(msg, translate)
__debug_buffer = msg
if msg and Verbosity.DEBUG in (verbosity if verbosity else __verbosity):
color = Verbosity.DEBUG.value
cprint(msg, color[0], color[1], attrs=color[2])
__is_testing_errors = False
def set_is_testing_errors(status=True):
'''Changes the color of the ``ERROR`` logger printout.
Makes it less alarming.
'''
global _is_testing_errors
if status:
__is_testing_errors = True
else:
__is_testing_errors = False
def error(msg, translate=True):
color = Verbosity.ERROR_TESTING.value \
if __is_testing_errors else Verbosity.ERROR.value
return colored(
"ERROR:\n{}".format(condition(msg, translate)),
color[0], color[1], attrs=color[2])
def ERROR(msg, translate=True, verbosity=None):
if not verbosity:
cprint(error(msg, translate))
def condition(message, translate=True):
import core.manager as manager
ansi_escape = re.compile(r'\x1B\[[0-?]*[ -/]*[@-~]')
message = ansi_escape.sub('', message)
message = dedent(message).strip()
message.replace("<br>", "\n")
if translate:
message = manager.accout_names_2_object_names(message)
return message
|
StarcoderdataPython
|
3314563
|
<reponame>sebastian-software/jasy<filename>jasy/script/clean/Permutate.py<gh_stars>1-10
#
# Jasy - Web Tooling Framework
# Copyright 2010-2012 Zynga Inc.
# Copyright 2013-2014 <NAME>
#
import jasy.script.parse.Parser as Parser
import jasy.core.Console as Console
from jasy.script.util import *
def __translateToJS(code):
"""Returns the code equivalent of the stored value for the given key."""
if code is None:
pass
elif code is True:
code = "true"
elif code is False:
code = "false"
elif isinstance(code, str) and code.startswith("{") and code.endswith("}"):
pass
elif isinstance(code, str) and code.startswith("[") and code.endswith("]"):
pass
else:
code = "\"%s\"" % code
return code
def patch(node, permutation):
"""Replaces all occourences with incoming values."""
modified = False
if node.type == "dot" and node.parent.type == "call":
assembled = assembleDot(node)
# jasy.Env.getValue(key)
if assembled == "jasy.Env.getValue" and node.parent.type == "call":
callNode = node.parent
params = callNode[1]
name = params[0].value
Console.debug("Found jasy.Env.getValue(%s) in line %s", name, node.line)
replacement = __translateToJS(permutation.get(name))
if replacement:
replacementNode = Parser.parseExpression(replacement)
callNode.parent.replace(callNode, replacementNode)
modified = True
Console.debug("Replaced with %s", replacement)
# jasy.Env.isSet(key, expected)
# also supports boolean like: jasy.Env.isSet(key)
elif assembled == "jasy.Env.isSet" and node.parent.type == "call":
callNode = node.parent
params = callNode[1]
name = params[0].value
Console.debug("Found jasy.Env.isSet(%s) in line %s", name, node.line)
replacement = __translateToJS(permutation.get(name))
if replacement is not None:
# Auto-fill second parameter with boolean "true"
expected = params[1] if len(params) > 1 else Parser.parseExpression("true")
if expected.type in ("string", "number", "true", "false"):
parsedReplacement = Parser.parseExpression(replacement)
expectedValue = getattr(expected, "value", None)
if expectedValue is not None:
if getattr(parsedReplacement, "value", None) is not None:
replacementResult = parsedReplacement.value in str(expected.value).split("|")
else:
replacementResult = parsedReplacement.type in str(expected.value).split("|")
else:
replacementResult = parsedReplacement.type == expected.type
# Do actual replacement
replacementNode = Parser.parseExpression("true" if replacementResult else "false")
callNode.parent.replace(callNode, replacementNode)
modified = True
Console.debug("Replaced with %s", "true" if replacementResult else "false")
# jasy.Env.select(key, map)
elif assembled == "jasy.Env.select" and node.parent.type == "call":
Console.debug("Found jasy.Env.select() in line %s", node.line)
callNode = node.parent
params = callNode[1]
replacement = __translateToJS(permutation.get(params[0].value))
if replacement:
parsedReplacement = Parser.parseExpression(replacement)
if parsedReplacement.type != "string":
raise Exception("jasy.Env.select requires that the given replacement is of type string.")
# Directly try to find matching identifier in second param (map)
objectInit = params[1]
if objectInit.type == "object_init":
fallbackNode = None
for propertyInit in objectInit:
if propertyInit[0].value == "default":
fallbackNode = propertyInit[1]
elif parsedReplacement.value in str(propertyInit[0].value).split("|"):
callNode.parent.replace(callNode, propertyInit[1])
modified = True
break
if not modified and fallbackNode is not None:
callNode.parent.replace(callNode, fallbackNode)
modified = True
Console.debug("Updated with %s", replacement)
# Process children
for child in reversed(node):
if child is not None:
if patch(child, permutation):
modified = True
return modified
|
StarcoderdataPython
|
6572133
|
#!/usr/bin/python
#
import os
import requests
import codecs
import logging
import argparse
import multiprocessing
import time
GROBID_SERVER = 'http://localhost:8081'
GROBID_HANDLER = 'processFulltextDocument'
DEFAULT_THREADS = multiprocessing.cpu_count() / 2;
DEFAULT_TIMEOUT = 60 # timeout on connection after this delay
DEFAULT_MAX_RETRIES = 3 # try to reconnect these many times to grobid server
DEFAULT_SLEEP_DELAY = 10 # give server enough time to restart grobid
class GrobidError(Exception):
pass
class ConnectionError(Exception):
pass
class GrobidProcessor(object):
"""
Needed to take avantage of multiprocessing.Pool
"""
def __init__(self, service, destdir=None, force=None, timeout=DEFAULT_TIMEOUT,
max_retries=DEFAULT_MAX_RETRIES, sleep_delay=DEFAULT_SLEEP_DELAY):
self.service = service
self.destdir = destdir
self.force = force
self.timeout = timeout
self.max_retries = max_retries
self.sleep_delay = sleep_delay
def __call__(self, file):
try:
fp = open(file, 'r')
except IOError, error:
logging.error("error opening file %s: %s" % (file, error))
return None
if self.destdir:
out_file = os.path.join(self.destdir, os.path.basename(file)) + '.xml'
else:
out_file = file + '.xml'
logging.debug("considering source file %s" % file)
if os.path.exists(out_file):
if os.path.getmtime(out_file) > os.path.getmtime(file):
if self.force:
logging.debug("forcing reprocessing of source file %s (target is %s)" %(file, out_file))
else:
logging.debug("target file %s is up-to-date" % out_file)
return out_file
else:
logging.debug("recreating stale target file %s" % out_file)
else:
logging.debug("creating target file %s" % out_file)
logging.info("processing file %s" % file)
retry = self.max_retries
while retry > 0:
try:
xml = self.send_to_grobid(fp)
except ConnectionError, error:
retry = retry - 1
logging.info("ran into connection error: '%s'" % error)
if retry > 0:
logging.info("retrying in %d seconds" % self.sleep_delay)
time.sleep(self.sleep_delay)
except GrobidError, error:
logging.error("error processing file %s: %s" % (file, error))
return None
else:
retry = 0
try:
fp = codecs.open(out_file, 'w', 'utf-8')
except IOError, error:
logging.error("error opening file %s: %s" % (out_file, error))
return None
fp.write(xml)
logging.info("written output file %s" % out_file)
return out_file
def send_to_grobid(self, filehandle):
try:
response = requests.post(url=self.service, files={'input': filehandle}, timeout=self.timeout)
except requests.exceptions.Timeout:
logging.debug("timeout from requests")
raise ConnectionError("request timeout after %d seconds" % self.timeout)
except requests.exceptions.RequestException as e:
raise ConnectionError("request exception: %s" % e)
if response.status_code == 200:
logging.debug("successful response from grobid server (%d bytes)" % len(response.content))
return response.text
else:
raise GrobidError("HTTP %d - %s: %s" % (response.status_code, response.reason, response.text))
def parse_arguments():
argp = argparse.ArgumentParser()
argp.add_argument(
'--debug',
default=False,
action='store_true',
dest='debug',
help='turn on debugging'
)
argp.add_argument(
'--force',
default=False,
action='store_true',
dest='force',
help='force recreation of all target files'
)
argp.add_argument(
'--server',
type=str,
default=GROBID_SERVER,
dest='server',
help='specify server to use (default is %s)' % GROBID_SERVER
)
argp.add_argument(
'--handler',
type=str,
default=GROBID_HANDLER,
dest='handler',
help='specify handler to use (default is %s)' % GROBID_HANDLER
)
argp.add_argument(
'--threads',
type=int,
default=DEFAULT_THREADS,
dest='threads',
help='specify number of threads to use (default is %d)' % DEFAULT_THREADS
)
argp.add_argument(
'--destdir',
type=str,
default=None,
dest='destdir',
help='specify output directory for extracted files'
)
argp.add_argument('files', nargs='+')
return argp.parse_args()
if __name__ == "__main__":
args = parse_arguments()
if args.debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
service = os.path.join(args.server, args.handler)
threads = min(args.threads, len(args.files))
logging.info("allocating %d threads for processing %d files" %(threads, len(args.files)))
# avoid the overhead of multiprocessing unless necessary
if threads > 1:
p = multiprocessing.Pool(threads)
p.map(GrobidProcessor(service, destdir=args.destdir, force=args.force), args.files)
else:
map(GrobidProcessor(service, destdir=args.destdir, force=args.force), args.files)
|
StarcoderdataPython
|
3503675
|
import traceback
from typing import (
Any,
cast,
Dict,
Tuple,
)
from norfs.fs.base import (
BaseFileSystem,
FSObjectPath,
FSObjectType,
Path,
)
class CopyError(Exception):
pass
class CopyFileSystemObject:
_fs: BaseFileSystem
_path: Path
def __init__(self, fs: BaseFileSystem, path: Path) -> None:
self._fs = fs
self._path = path
@property
def fs(self) -> BaseFileSystem:
return self._fs
@property
def path(self) -> Path:
return self._path
def copy(self, dst: 'CopyFileSystemObject', copy_strategy: 'CopyStrategy') -> None:
raise TypeError("Cannot copy from filesystem object that is not file or directory")
def copy_from_file(self, src: 'CopyFile', copy_strategy: 'CopyStrategy') -> None:
raise TypeError("Cannot copy to filesystem object that is not file or directory")
def copy_from_dir(self, src: 'CopyDirectory', copy_strategy: 'CopyStrategy') -> None:
raise TypeError("Cannot copy to filesystem object that is not file or directory")
def __eq__(self, other: Any) -> bool:
if isinstance(other, self.__class__):
other_casted: 'CopyFileSystemObject' = cast(CopyFileSystemObject, other)
return self._fs == other_casted._fs and self._path == other_casted._path
return False
def __repr__(self) -> str:
return f"{self.__class__.__name__}(fs={self._fs}, path={self._path})"
class CopyFile(CopyFileSystemObject):
def copy(self, dst: 'CopyFileSystemObject', copy_strategy: 'CopyStrategy') -> None:
dst.copy_from_file(self, copy_strategy)
def copy_from_file(self, src: 'CopyFile', copy_strategy: 'CopyStrategy') -> None:
copy_strategy.copy_file_to_file(src, self)
def copy_from_dir(self, src: 'CopyDirectory', copy_strategy: 'CopyStrategy') -> None:
raise TypeError("Cannot copy Directory into a File.")
class CopyDirectory(CopyFileSystemObject):
def file(self, suffix: str) -> 'CopyFile':
return CopyFile(self._fs, self._path.child(suffix))
def subdir(self, suffix: str) -> 'CopyDirectory':
return CopyDirectory(self._fs, self._path.child(suffix))
def copy(self, dst: 'CopyFileSystemObject', copy_strategy: 'CopyStrategy') -> None:
dst.copy_from_dir(self, copy_strategy)
def copy_from_file(self, src: 'CopyFile', copy_strategy: 'CopyStrategy') -> None:
copy_strategy.copy_file_to_file(src, self.file(src.path.basename))
def copy_from_dir(self, src: 'CopyDirectory', copy_strategy: 'CopyStrategy') -> None:
copy_strategy.copy_dir_to_dir(src, self)
class CopyStrategy:
def copy_dir_to_dir(self, src: CopyDirectory, dst: CopyDirectory) -> None:
raise NotImplementedError()
def copy_file_to_file(self, src: CopyFile, dst: CopyFile) -> None:
raise NotImplementedError()
class GenericCopyStrategy(CopyStrategy):
def copy_dir_to_dir(self, src: CopyDirectory, dst: CopyDirectory) -> None:
fs_path: FSObjectPath
for fs_path in src.fs.dir_list(src.path):
if fs_path.type == FSObjectType.FILE:
src_child_file: CopyFile = src.file(fs_path.path.basename)
dst_child_file: CopyFile = dst.file(fs_path.path.basename)
self.copy_file_to_file(src_child_file, dst_child_file)
elif fs_path.type == FSObjectType.DIR:
src_child_dir: CopyDirectory = src.subdir(fs_path.path.basename)
dst_child_dir: CopyDirectory = dst.subdir(fs_path.path.basename)
self.copy_dir_to_dir(src_child_dir, dst_child_dir)
def copy_file_to_file(self, src: CopyFile, dst: CopyFile) -> None:
dst.fs.file_write(dst.path, src.fs.file_read(src.path))
class Copier:
_copy_strategies: Dict[Tuple[BaseFileSystem, BaseFileSystem], CopyStrategy]
_default: CopyStrategy
def __init__(self, default_copy_strategy: CopyStrategy) -> None:
self._copy_strategies = {}
self._default = default_copy_strategy
def set_copy_policy(self, src_fs: BaseFileSystem, dst_fs: BaseFileSystem, copy_strategy: CopyStrategy) -> None:
self._copy_strategies[(src_fs, dst_fs)] = copy_strategy
def copy(self, src: CopyFileSystemObject, dst: CopyFileSystemObject) -> None:
copy_strategy: CopyStrategy = self._copy_strategies.get((src.fs, dst.fs), self._default)
try:
src.copy(dst, copy_strategy)
except Exception:
raise CopyError(traceback.format_exc())
|
StarcoderdataPython
|
9610954
|
<reponame>Donglin-Wang2/panda-gym<gh_stars>100-1000
from panda_gym.envs.core import RobotTaskEnv
from panda_gym.pybullet import PyBullet
from panda_gym.envs.robots import Panda
from panda_gym.envs.tasks import Push
class PandaPushEnv(RobotTaskEnv):
"""Push task wih Panda robot.
Args:
render (bool, optional): Activate rendering. Defaults to False.
reward_type (str, optional): "sparse" or "dense". Defaults to "sparse".
"""
def __init__(self, render=False, reward_type="sparse"):
self.sim = PyBullet(render=render)
self.robot = Panda(self.sim, block_gripper=True, base_position=[-0.6, 0.0, 0.0])
self.task = Push(self.sim, reward_type=reward_type)
RobotTaskEnv.__init__(self)
|
StarcoderdataPython
|
11260230
|
<reponame>chanzuckerberg/dcp-prototype
import unittest
import anndata
from backend.corpora.common.utils.color_conversion_utils import (
convert_color_to_hex_format,
convert_anndata_category_colors_to_cxg_category_colors,
)
from backend.corpora.common.utils.http_exceptions import ColorFormatException
from tests.unit.backend.corpora.fixtures.environment_setup import fixture_file_path
class TestColorConversionUtils(unittest.TestCase):
"""Test color conversion helper functions"""
def test_convert_color_to_hex_format(self):
self.assertEqual(convert_color_to_hex_format("wheat"), "#f5deb3")
self.assertEqual(convert_color_to_hex_format("WHEAT"), "#f5deb3")
self.assertEqual(convert_color_to_hex_format((245, 222, 179)), "#f5deb3")
self.assertEqual(convert_color_to_hex_format([245, 222, 179]), "#f5deb3")
self.assertEqual(convert_color_to_hex_format("#f5deb3"), "#f5deb3")
self.assertEqual(
convert_color_to_hex_format([0.9607843137254902, 0.8705882352941177, 0.7019607843137254]), "#f5deb3"
)
for bad_input in ["foo", "BAR", "#AABB", "#AABBCCDD", "#AABBGG", (1, 2), [1, 2], (1, 2, 3, 4), [1, 2, 3, 4]]:
with self.assertRaises(ColorFormatException):
convert_color_to_hex_format(bad_input)
def test_anndata_colors_to_cxg_colors(self):
# test standard behavior
adata = self._get_h5ad()
expected_pbmc3k_colors = {
"louvain": {
"B cells": "#2ca02c",
"CD14+ Monocytes": "#ff7f0e",
"CD4 T cells": "#1f77b4",
"CD8 T cells": "#d62728",
"Dendritic cells": "#e377c2",
"FCGR3A+ Monocytes": "#8c564b",
"Megakaryocytes": "#bcbd22",
"NK cells": "#9467bd",
}
}
self.assertEqual(convert_anndata_category_colors_to_cxg_category_colors(adata), expected_pbmc3k_colors)
# test that invalid color formats raise an exception
adata.uns["louvain_colors"][0] = "#NOTCOOL"
with self.assertRaises(ColorFormatException):
convert_anndata_category_colors_to_cxg_category_colors(adata)
# test that colors without a matching obs category are skipped
adata = self._get_h5ad()
del adata.obs["louvain"]
self.assertEqual(convert_anndata_category_colors_to_cxg_category_colors(adata), {})
def _get_h5ad(self):
return anndata.read_h5ad(fixture_file_path("pbmc3k.h5ad"))
|
StarcoderdataPython
|
250672
|
from django.db import models
# Create your models here.
class TodoIem(models.Model):
text = models.TextField(max_length=500)
def __str__(self):
return self.text
|
StarcoderdataPython
|
1961250
|
<reponame>bitcaster-io/bitcaster
import logging
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as _UserAdmin
from django.utils.translation import gettext_lazy as _
from ..models import ApiAuthToken, ApplicationTriggerKey, User
from .forms import UserCreationForm
from .inlines import ApiTokenInline
from .site import site
logger = logging.getLogger(__name__)
@admin.register(User, site=site)
class UserAdmin(_UserAdmin):
inlines = [ApiTokenInline, ]
# add_form_template = 'admin/auth/user/add_form.html'
add_form = UserCreationForm
# form = UserChangeForm
search_fields = ('email',)
list_display = ('email', 'name', 'is_staff', 'is_superuser',
'language', 'timezone')
list_filter = ('is_staff', 'is_superuser', 'is_active', 'groups',)
ordering = ('email',)
fieldsets = (
(None, {'fields': (('email', 'password'),)}),
(_('Personal info'), {'fields': (('name', 'friendly_name'),
('language',),
('country', 'timezone'))}),
(_('Extra'), {'fields': ('storage', 'extras', 'options',
# 'groups', 'user_permissions'
)}),
(_('Permissions'), {'fields': ('is_active', 'is_staff', 'is_superuser',
# 'groups', 'user_permissions'
)}),
(_('Important dates'), {'fields': (('last_login',
'last_password_change',
'date_joined'),)}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': (('email', 'language'),
('country', 'timezone'),
('password1', 'password2'),),
}),
)
# def get_changeform_initial_data(self, request):
# initial = super().get_changeform_initial_data(request)
# remote_ip = get_client_ip(request)
# initial['language'] = request.LANGUAGE_CODE
# if remote_ip:
# from geolite2 import geolite2
# reader = geolite2.reader()
# match = reader.get(remote_ip)
# if match:
# # code = match['country']['iso_code'].lower()
# # c = pycountry.languages.get(alpha_2=code)
# # initial['language'] = c.alpha_2.lower()
# initial['country'] = match['country']['iso_code']
# initial['timezone'] = match['location']['time_zone']
# return initial
@admin.register(ApiAuthToken, site=site)
class ApiAuthTokenAdmin(admin.ModelAdmin):
list_display = ('application', 'user', 'token', 'enabled')
@admin.register(ApplicationTriggerKey, site=site)
class ApplicationTriggerKeyAdmin(admin.ModelAdmin):
list_display = ('application', 'token', 'enabled')
|
StarcoderdataPython
|
3371937
|
import logging
import sentry_sdk
import sentry_sdk.integrations.aiohttp
import sentry_sdk.integrations.logging
sentry_logging = sentry_sdk.integrations.logging.LoggingIntegration(
level=logging.INFO, event_level=logging.ERROR
)
def setup(server_version, dsn):
sentry_sdk.init(
dsn=dsn,
integrations=[
sentry_sdk.integrations.aiohttp.AioHttpIntegration(),
sentry_logging,
],
release=server_version,
traces_sample_rate=0.2,
)
|
StarcoderdataPython
|
11225089
|
from pubsub import pub
from . import portnums_pb2, remote_hardware_pb2
def onGPIOreceive(packet, interface):
"""Callback for received GPIO responses
FIXME figure out how to do closures with methods in python"""
hw = packet["decoded"]["remotehw"]
print(f'Received RemoteHardware typ={hw["typ"]}, gpio_value={hw["gpioValue"]}')
class RemoteHardwareClient:
"""
This is the client code to control/monitor simple hardware built into the
meshtastic devices. It is intended to be both a useful API/service and example
code for how you can connect to your own custom meshtastic services
"""
def __init__(self, iface):
"""
Constructor
iface is the already open MeshInterface instance
"""
self.iface = iface
ch = iface.localNode.getChannelByName("gpio")
if not ch:
raise Exception(
"No gpio channel found, please create on the sending and receive nodes to use this (secured) service (--ch-add gpio --info then --seturl)")
self.channelIndex = ch.index
pub.subscribe(
onGPIOreceive, "meshtastic.receive.remotehw")
def _sendHardware(self, nodeid, r, wantResponse=False, onResponse=None):
if not nodeid:
raise Exception(
"You must set a destination node ID for this operation (use --dest \!xxxxxxxxx)")
return self.iface.sendData(r, nodeid, portnums_pb2.REMOTE_HARDWARE_APP,
wantAck=True, channelIndex=self.channelIndex, wantResponse=wantResponse, onResponse=onResponse)
def writeGPIOs(self, nodeid, mask, vals):
"""
Write the specified vals bits to the device GPIOs. Only bits in mask that
are 1 will be changed
"""
r = remote_hardware_pb2.HardwareMessage()
r.typ = remote_hardware_pb2.HardwareMessage.Type.WRITE_GPIOS
r.gpio_mask = mask
r.gpio_value = vals
return self._sendHardware(nodeid, r)
def readGPIOs(self, nodeid, mask, onResponse = None):
"""Read the specified bits from GPIO inputs on the device"""
r = remote_hardware_pb2.HardwareMessage()
r.typ = remote_hardware_pb2.HardwareMessage.Type.READ_GPIOS
r.gpio_mask = mask
return self._sendHardware(nodeid, r, wantResponse=True, onResponse=onResponse)
def watchGPIOs(self, nodeid, mask):
"""Watch the specified bits from GPIO inputs on the device for changes"""
r = remote_hardware_pb2.HardwareMessage()
r.typ = remote_hardware_pb2.HardwareMessage.Type.WATCH_GPIOS
r.gpio_mask = mask
return self._sendHardware(nodeid, r)
|
StarcoderdataPython
|
3595172
|
import asyncio
import datetime
from app import app
from app.tasks.task_utils import bind_to_service
from app.tasks.build_tasks.create_services import create_services
async def check_heartbeat(services_dict, timeout):
"""
Fetches all available services and binds to their ports in order to check if their are up.
"""
while True:
# Periodic task with infinite loop and timeout
await asyncio.sleep(int(timeout))
# These datetimes are used only for logging
start = datetime.datetime.now()
app.logger.info('Heartbeat Task started at: %s' % (str(start.strftime('%Y-%m-%d %H:%M:%S'))))
if services_dict:
# build a response for hartbeat task
response = ''
for service_name in services_dict.keys():
running = bind_to_service(services_dict[service_name]['ip_address'], services_dict[service_name]['port'])
# if one service is down build the response with every not running service name
# this is a simple log notification. You may want to send an email to your admin
if not running:
response = response + service_name + ' '
if response:
# Total time is only for logging purposes
total_time = int((datetime.datetime.now() - start).microseconds / 1000)
app.logger.info('Finished in %s ms. DOWN: %s' % (str(total_time), response))
else:
total_time = int((datetime.datetime.now() - start).microseconds / 1000)
app.logger.info('Finished in %s ms. Core heartbeat: OK' % (str(total_time)))
else:
# If no service found iterate
app.logger.info('No Services found. Re-fetching...')
services_dict = create_services()
# Sleep until next iteration.
|
StarcoderdataPython
|
198856
|
<filename>Roku Network Remote.indigoPlugin/Contents/Server Plugin/RPFramework/dataAccess/indigosql.py
#! /usr/bin/env python
#/////////////////////////////////////////////////////////////////////////////////////////
#/////////////////////////////////////////////////////////////////////////////////////////
# IndigoSql by RogueProeliator <<EMAIL>>
# This file provides access to a SQL database in a standard method, be it in
# SQLList or PostGRE format.
#
# Much of this file was adapted from Perceptive Automation's SQL Logger plugin and used
# with permission of the authors. Therefore, portions of this code are governed by their
# copyright, found below. As such, redistribution of this software in any form is not
# permitted without permission from all parties.
#
# ################### ORIGINAL COPYRIGHT ###################
# Copyright (c) 2012, Perceptive Automation, LLC. All rights reserved.
# http://www.perceptiveautomation.com
#
# Redistribution of this source file, its binary forms, and images are not allowed.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
#
# IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
#
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ##########################################################
#/////////////////////////////////////////////////////////////////////////////////////////
#/////////////////////////////////////////////////////////////////////////////////////////
#/////////////////////////////////////////////////////////////////////////////////////////
# Python imports
#/////////////////////////////////////////////////////////////////////////////////////////
import datetime
import os
import re
import socket
import sys
import time
import ConfigParser
from errno import EWOULDBLOCK, EINTR, EMSGSIZE, ECONNREFUSED, EAGAIN
#/////////////////////////////////////////////////////////////////////////////////////////
# Constants and configuration variables
#/////////////////////////////////////////////////////////////////////////////////////////
kDbType_sqlite = 0
kDbType_postgres = 1
kAutoIncrKey_sqlite = u"INTEGER PRIMARY KEY"
kAutoIncrKey_postgres = u"SERIAL PRIMARY KEY"
kDbConnectTimeout = 8
#/////////////////////////////////////////////////////////////////////////////////////////
# SQLLite database type conversion routines
#/////////////////////////////////////////////////////////////////////////////////////////
def adapt_boolean(val):
if val:
return "True"
else:
return "False"
def convert_boolean(valStr):
if str(valStr) == "True":
return bool(True)
elif str(valStr) == "False":
return bool(False)
else:
raise ValueError, "Unknown value of bool attribute '%s'" % valStr
def nopDebugLog(unusedMsg):
pass
#/////////////////////////////////////////////////////////////////////////////////////////
#/////////////////////////////////////////////////////////////////////////////////////////
# IndigoSql
# Base class for database access which allows for a standard interface to the different
# database types supported by the SQL Logger
#/////////////////////////////////////////////////////////////////////////////////////////
#/////////////////////////////////////////////////////////////////////////////////////////
class IndigoSql:
#/////////////////////////////////////////////////////////////////////////////////////
# Class construction and destruction methods
#/////////////////////////////////////////////////////////////////////////////////////
def __init__(self, sqlType, sleepFunc, logFunc, debugLogFunc):
self.sqlType = sqlType
if self.sqlType == kDbType_sqlite:
self.sqlAutoIncrKey = kAutoIncrKey_sqlite
elif self.sqlType == kDbType_postgres:
self.sqlAutoIncrKey = kAutoIncrKey_postgres
else:
raise Exception('databaseType specified not valid (select sqlite or postgres)')
self._Sleep = sleepFunc
self._Log = logFunc
self._DebugLog = debugLogFunc
if not self._DebugLog:
self._DebugLog = nopDebugLog
self.sqlConn = None
self.sqlConnGood = False
self.sqlCursor = None
#/////////////////////////////////////////////////////////////////////////////////////
# Property-access style functions
#/////////////////////////////////////////////////////////////////////////////////////
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# Returns a boolean indicating if the database connection is up-and-running
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def IsSqlConnectionGood(self):
return self.sqlConnGood
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# Allows quickly determining if this is a PostGRE database type
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def IsTypePostgres(self):
return self.sqlType == kDbType_postgres
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# Allows quickly determining if this is a SQLLite database type
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def IsTypeSqlite(self):
return self.sqlType == kDbType_sqlite
#/////////////////////////////////////////////////////////////////////////////////////
# Database connection routines
#/////////////////////////////////////////////////////////////////////////////////////
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# This will "safely" shut down the database connection and connected data
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def CloseSqlConnection(self):
if self.sqlCursor:
self.sqlCursor.close()
self.sqlCursor = None
if self.sqlConn:
self.sqlConn.close()
self.sqlConn = None
#/////////////////////////////////////////////////////////////////////////////////////
# Database structure access routines
#/////////////////////////////////////////////////////////////////////////////////////
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# This routine will return a list of table names that exist within the SQL logger
# database
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def GetAllTableNames(self):
# PostgreSQL doesn't have IF NOT EXISTS when creating tables, so we have to query the schema.
if not self.sqlConn or not self.sqlCursor:
raise Exception('not connected to database')
tableNames = []
if self.sqlType == kDbType_sqlite:
self.sqlCursor.execute("SELECT name FROM sqlite_master WHERE type='table';")
for nameObj in self.sqlCursor.fetchall():
tableNames.append(nameObj[0])
elif self.sqlType == kDbType_postgres:
self.sqlCursor.execute("SELECT table_name FROM information_schema.tables WHERE table_schema='public';")
for nameObj in self.sqlCursor.fetchall():
tableNames.append(nameObj[0])
return tableNames
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# This routine check to see if a table by the given name already exists in the DB
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def TableExists(self, tableName):
# PostgreSQL doesn't have IF NOT EXISTS when creating tables, so we have to query the schema.
if not self.sqlConn or not self.sqlCursor:
raise Exception('not connected to database')
if self.sqlType == kDbType_sqlite:
self.sqlCursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name=?;", (tableName,))
return len(self.sqlCursor.fetchall()) == 1
elif self.sqlType == kDbType_postgres:
self.sqlCursor.execute("SELECT * FROM information_schema.tables WHERE table_schema='public' AND table_name=%s;", (tableName,))
return self.sqlCursor.rowcount == 1
return False
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# This routine will create a dictionary of the column names and types defined for
# the given table name
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def GetTableColumnNamesAndTypes(self, tableName):
if not self.sqlConn or not self.sqlCursor:
raise Exception('not connected to database')
colTypeDict = {}
if self.sqlType == kDbType_sqlite:
# Yuck. Sqlite doesn't have a good way to get out the column names and types. Instead we
# extract out the table CREATE definition, then parse out the column names and types from
# that statement. Ugly, but works well enough.
self.sqlCursor.execute("SELECT sql FROM sqlite_master WHERE type='table' AND name=?;", (tableName,))
cleanitem = re.sub(r"[ \t\n\r\f\v]+", ' ', self.sqlCursor.fetchone()[0])
cleanitem = cleanitem.split(")")[0].split("(")[1].lower()
colTypeDict = dict([pair.strip().split(" ",1) for pair in cleanitem.split(",")])
elif self.sqlType == kDbType_postgres:
self.sqlCursor.execute("SELECT column_name, data_type from information_schema.columns WHERE table_name=%s;", (tableName,))
for item in self.sqlCursor.fetchall():
colTypeDict[item[0]] = item[1].lower()
return colTypeDict
#/////////////////////////////////////////////////////////////////////////////////////
# Table modification routines
#/////////////////////////////////////////////////////////////////////////////////////
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# This routine will add a new column to the given table
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def AddTableColumn(self, tableName, colName, colType):
sqlStr = "ALTER TABLE %s ADD COLUMN %s %s;" % (tableName, colName, colType)
self.ExecuteWithSubstitution(sqlStr);
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# This routine will modify the column type of an existing column in a table
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def ModifyTableColumnType(self, tableName, colName, colType):
if not self.sqlConn or not self.sqlCursor:
raise Exception('not connected to database')
if self.sqlType == kDbType_sqlite:
# The Good News: Because SQLite doesn't care about column types we will probably
# never need to try to modify a column type. For example, we won't get an error
# when trying to insert "oak tree" into a BOOL type column. Note SQLite does let
# you specify a column type in the CREATE table call, but those types are not
# enforced. So there probably isn't a critical reason to worry about modifying the
# types after the CREATE if they need to change (besides the fact that it would
# make the table definition look "more correct").
#
# The Bad News: If we do decide to care about this, it is a pain because SQLite
# doesn't support ALTER COLUMN. The solution is to create a temporary table with
# the correct types and move over the contents of the previous table. Something
# roughly like this:
#
# BEGIN TRANSACTION
# ALTER TABLE orig_table_name RENAME TO tmp_table_name;
# CREATE TABLE orig_table_name (col_a INT, col_b INT, ...);
# INSERT INTO orig_table_name(col_a, col_b, ...)
# SELECT col_a, colb, ...
# FROM tmp_table_name;
# DROP TABLE tmp_table_name;
# COMMIT;
#
raise Exception('modifying SQLite table column types is not supported')
elif self.sqlType == kDbType_postgres:
sqlStr = "ALTER TABLE %s ALTER COLUMN %s TYPE %s;" % (tableName, colName, colType)
self.ExecuteWithSubstitution(sqlStr)
#/////////////////////////////////////////////////////////////////////////////////////
# Data retrieval and modification routines
#/////////////////////////////////////////////////////////////////////////////////////
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# This routine will prune all of the data in a table that was stamped prior to a date
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def PruneOldTableRows(self, tableName, beforeDateTime):
if self.IsTypePostgres():
sqlStr = "DELETE FROM " + tableName + " WHERE ts < %s;"
elif self.IsTypeSqlite():
sqlStr = "DELETE FROM " + tableName + " WHERE datetime(ts,'localtime') < %s;"
if isinstance(beforeDateTime, datetime.datetime):
self.ExecuteWithSubstitution(sqlStr, (beforeDateTime.isoformat(" "),))
else: # assume it is just a date (not datetime), so no arg to isoformat()
self.ExecuteWithSubstitution(sqlStr, (beforeDateTime.isoformat(),))
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# This routine will execute a SQL statement as-is
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def ExecuteSQL(self, command, subArgs=None):
if not self.sqlConnGood:
raise Exception('not connected to database')
if self.sqlType == kDbType_sqlite and not subArgs:
# sqLite doesn't like None specified for args; use empty tuple instead
subArgs = ()
self.sqlCursor.execute(command, subArgs)
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# This routine will execute a SQL statement while doing substitution of parameters
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def ExecuteWithSubstitution(self, command, subArgs=None):
if not self.sqlConnGood:
raise Exception('not connected to database')
if self.sqlType == kDbType_sqlite:
# sqLite uses the ? character for argument substiution
command = command.replace("%d", "?")
command = command.replace("%f", "?")
command = command.replace("%s", "?")
if not subArgs: # sqLite doesn't like None specified for args; use empty tuple instead
subArgs = ()
elif self.sqlType == kDbType_postgres:
# postgres uses printf style character for argument substiution
pass
command = command.replace("#AUTO_INCR_KEY", self.sqlAutoIncrKey)
self._DebugLog(command)
if subArgs:
self._DebugLog(" %s" % str(subArgs))
self.sqlCursor.execute(command, subArgs)
if self.sqlType == kDbType_sqlite:
self.sqlConn.commit()
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# This routine will execute a SQL statement to select the given columns from a table
# for records that fall within a given date/time range
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def QueryFromTableUsingRange(self, tableName, elemColumn, elemName, startTime, endTime, columnList):
if self.IsTypePostgres():
sqlStr = """
SELECT id, ts, #ELEMCOLUMN, #COLUMNLIST
FROM #TABLENAME
WHERE #ELEMCOLUMN = %s AND ts BETWEEN %s AND %s
ORDER BY id;
"""
elif self.IsTypeSqlite():
sqlStr = """
SELECT id, ts as 'ts [timestamp]', #ELEMCOLUMN, #COLUMNLIST
FROM #TABLENAME
WHERE #ELEMCOLUMN = %s AND datetime(ts,'localtime') BETWEEN %s AND %s
ORDER BY id;
"""
sqlStr = sqlStr.replace("#TABLENAME", tableName)
sqlStr = sqlStr.replace("#ELEMCOLUMN", elemColumn)
sqlStr = sqlStr.replace("#COLUMNLIST", ','.join(columnList))
self.ExecuteWithSubstitution(sqlStr, (elemName, startTime.isoformat(' '), endTime.isoformat(' ')))
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# This routine will fetch a single record from the current cursor
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def FetchOne(self):
if not self.sqlConnGood:
raise Exception('not connected to database')
return self.sqlCursor.fetchone()
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# This routine will fetch all of the records from the current cursor
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def FetchAll(self):
if not self.sqlConnGood:
raise Exception('not connected to database')
return self.sqlCursor.fetchall()
#/////////////////////////////////////////////////////////////////////////////////////
# Utility Routines
#/////////////////////////////////////////////////////////////////////////////////////
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# This routine can format a timestamp column to retrieve the value in the locals
# time (as opposed to UNC); by default we do no changes
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def getLocalTimestampColumn(self, tsColumnName):
return tsColumnName
#/////////////////////////////////////////////////////////////////////////////////////////
#/////////////////////////////////////////////////////////////////////////////////////////
# IndigoSqlite
# This concrete implementation allows access to the SQLLite database
#/////////////////////////////////////////////////////////////////////////////////////////
#/////////////////////////////////////////////////////////////////////////////////////////
class IndigoSqlite(IndigoSql):
#/////////////////////////////////////////////////////////////////////////////////////
# Constructors and Destructors
#/////////////////////////////////////////////////////////////////////////////////////
def __init__(self, sql_lite_db_file, sleepFunc, logFunc, debugLogFunc):
IndigoSql.__init__(self, kDbType_sqlite, sleepFunc, logFunc, debugLogFunc)
# Create connection to database. Create database and tables if they do not exist.
try:
self.sqlmod = __import__('sqlite3', globals(), locals())
self.sqlmod.register_adapter(bool, adapt_boolean)
self.sqlmod.register_converter("boolean", convert_boolean)
except Exception, e:
self._Log("exception trying to load python sqlite3 module: " + str(e), isError=True)
raise
try:
self.sqlConn = self.sqlmod.connect(sql_lite_db_file, detect_types=self.sqlmod.PARSE_COLNAMES)
self.sqlCursor = self.sqlConn.cursor()
self.sqlConnGood = True
self._Log("connected to " + sql_lite_db_file)
except Exception, e:
if self.sqlCursor:
self.sqlCursor.close()
self.sqlCursor = None
if self.sqlConn:
self.sqlConn.close()
self.sqlConn = None
self._Log("exception trying to connect or create database file: %s" % (sql_lite_db_file), isError=True)
self._Log(str(e), isError=True)
raise
#/////////////////////////////////////////////////////////////////////////////////////
# Utility Routines
#/////////////////////////////////////////////////////////////////////////////////////
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# This routine can format a timestamp column to retrieve the value in the local
# time (as opposed to UNC)
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def getLocalTimestampColumn(self, tsColumnName):
return "datetime(" + tsColumnName + ", 'localtime') as " + tsColumnName
#/////////////////////////////////////////////////////////////////////////////////////////
#/////////////////////////////////////////////////////////////////////////////////////////
# IndigoPostgresql
# This concrete implementation allows access to the PostGRE database
#/////////////////////////////////////////////////////////////////////////////////////////
#/////////////////////////////////////////////////////////////////////////////////////////
class IndigoPostgresql(IndigoSql):
#/////////////////////////////////////////////////////////////////////////////////////
# Constructors and Destructors
#/////////////////////////////////////////////////////////////////////////////////////
def __init__(self, sql_host, sql_user, sql_password, sql_database, sleepFunc, logFunc, debugLogFunc):
IndigoSql.__init__(self, kDbType_postgres, sleepFunc, logFunc, debugLogFunc)
# Create connection to database. Create database and tables if they do not exist.
try:
self.sqlmod = __import__('', globals(), locals(), ['bpgsql'])
self.sqlmod = getattr(self.sqlmod, 'bpgsql')
#self.sqlmod = __import__('MySQLdb', globals(), locals())
except Exception, e:
self._Log("exception trying to load python bpgsql module: " + str(e), isError=True)
raise
loggedException = False
try:
try:
self.sqlConn = self.sqlmod.connect(host=sql_host, dbname=sql_database, username=sql_user, password=<PASSWORD>, timeout=kDbConnectTimeout)
# additional interesting args: use_unicode=1, charset='utf8'
self.sqlCursor = self.sqlConn.cursor()
except socket.error, e:
# Server likely hasn't started (we could test to see if value == ECONNREFUSED here),
# so we sleep for a bit and try again. Unfortuantely, IndigoServer can be launched
# during the OS startup before PostgreSQL has started.
self._Log("PostgreSQL server %s is not reachable (may not have started yet)" % (sql_host), isError=True)
loggedException = True
raise
except self.sqlmod.DatabaseError, e:
# Database probably didn't exist, try to create it.
self.sqlConn = self.sqlmod.connect(host=sql_host, username=sql_user, password=<PASSWORD>, timeout=kDbConnectTimeout)
self.sqlCursor = self.sqlConn.cursor()
self._Log("creating new database: " + sql_database)
self.sqlCursor.execute("CREATE DATABASE " + sql_database + ";")
# No good way to select the new database without recreating the connection.
self.CloseSqlConnection()
self.sqlConn = self.sqlmod.connect(host=sql_host, dbname=sql_database, username=sql_user, password=<PASSWORD>, timeout=kDbConnectTimeout)
self.sqlCursor = self.sqlConn.cursor()
self.sqlConnGood = True
self._Log("connected to %s as %s on %s" % (sql_database, sql_user, sql_host))
except Exception, e:
if self.sqlCursor:
self.sqlCursor.close()
self.sqlCursor = None
if self.sqlConn:
self.sqlConn.close()
self.sqlConn = None
if not loggedException:
self._Log("exception trying to connect or create database %s on %s" % (sql_database, sql_host), isError=True)
self._Log(str(e), isError=True)
raise
|
StarcoderdataPython
|
1721719
|
"""Contains all exception classes used within this library."""
from abc import ABC
from amplitude_python_sdk.common.models import BaseAPIError
class AmplitudeAPIException(Exception, ABC):
error: BaseAPIError
def __init__(self, error: BaseAPIError):
super().__init__()
self.error = error
|
StarcoderdataPython
|
9715545
|
<reponame>csdms/dakotathon<filename>dakotathon/tests/test_responses_base.py<gh_stars>1-10
"""Tests for the dakotathon.responses.base module."""
import os, sys
from nose.tools import raises, assert_true, assert_false, assert_equal
from dakotathon.responses.base import ResponsesBase
descriptors = ["a", "b"]
class Concrete(ResponsesBase):
"""A subclass of ResponsesBase used for testing."""
def __init__(self):
ResponsesBase.__init__(self)
def setup_module():
"""Fixture called before any tests are performed."""
print("\n*** " + __name__)
global c
c = Concrete()
def teardown_module():
"""Fixture called after all tests have completed."""
pass
@raises(TypeError)
def test_instantiate():
"""Test whether ResponsesBase instantiates."""
if sys.version[0] == 2:
r = ResponsesBase()
else:
# abstract base class type error not raised
# in python 3.
raise (TypeError)
def test_str_special():
"""Test type of __str__ method results."""
s = str(c)
assert_true(type(s) is str)
def test_responses():
"""Test the default responses attribute."""
value = c.responses
assert_equal(value, "response_functions")
def test_get_response_descriptors():
"""Test getting the response_descriptors property."""
assert_equal(c.response_descriptors, tuple())
def test_set_response_descriptors():
"""Test setting the response_descriptors property."""
r = Concrete()
for desc in [["Qs_median"], ("Qs_median",)]:
r.response_descriptors = desc
assert_equal(r.response_descriptors, desc)
@raises(TypeError)
def test_set_response_descriptors_fails_with_nonstring_scalar():
"""Test that response_descriptors fails with a non-string scalar."""
r = Concrete()
desc = 42
r.response_descriptors = desc
def test_set_response_descriptors_string_to_tuple():
"""Test that a string is converted to a tuple."""
r = Concrete()
desc = "x1"
r.response_descriptors = desc
assert_true(type(r.response_descriptors) is tuple)
def test_gradients():
"""Test getting the default gradients property."""
value = c.gradients
assert_equal(value, "no_gradients")
def test_hessians():
"""Test getting the default hessians property."""
value = c.hessians
assert_equal(value, "no_hessians")
def test_str_length():
"""Test the default length of __str__."""
s = str(c)
n_lines = len(s.splitlines())
assert_equal(n_lines, 1)
|
StarcoderdataPython
|
3432599
|
print("hello, I am here to glucosify your life")
print("The Potato lords will come for your soul")
|
StarcoderdataPython
|
8185868
|
from __future__ import print_function, division
from sympy.core import Mul, sympify, Pow
from sympy.strategies import unpack, flatten, condition, exhaust, do_one
from sympy.matrices.expressions.matexpr import MatrixExpr, ShapeError
def hadamard_product(*matrices):
"""
Return the elementwise (aka Hadamard) product of matrices.
Examples
========
>>> from sympy.matrices import hadamard_product, MatrixSymbol
>>> A = MatrixSymbol('A', 2, 3)
>>> B = MatrixSymbol('B', 2, 3)
>>> hadamard_product(A)
A
>>> hadamard_product(A, B)
A.*B
>>> hadamard_product(A, B)[0, 1]
A[0, 1]*B[0, 1]
"""
if not matrices:
raise TypeError("Empty Hadamard product is undefined")
validate(*matrices)
if len(matrices) == 1:
return matrices[0]
else:
return HadamardProduct(*matrices).doit()
class HadamardProduct(MatrixExpr):
"""
Elementwise product of matrix expressions
This is a symbolic object that simply stores its argument without
evaluating it. To actually compute the product, use the function
``hadamard_product()``.
>>> from sympy.matrices import hadamard_product, HadamardProduct, MatrixSymbol
>>> A = MatrixSymbol('A', 5, 5)
>>> B = MatrixSymbol('B', 5, 5)
>>> isinstance(hadamard_product(A, B), HadamardProduct)
True
"""
is_HadamardProduct = True
def __new__(cls, *args, **kwargs):
args = list(map(sympify, args))
check = kwargs.get('check' , True)
if check:
validate(*args)
return super(HadamardProduct, cls).__new__(cls, *args)
@property
def shape(self):
return self.args[0].shape
def _entry(self, i, j):
return Mul(*[arg._entry(i, j) for arg in self.args])
def _eval_transpose(self):
from sympy.matrices.expressions.transpose import transpose
return HadamardProduct(*list(map(transpose, self.args)))
def doit(self, **ignored):
return canonicalize(self)
def validate(*args):
if not all(arg.is_Matrix for arg in args):
raise TypeError("Mix of Matrix and Scalar symbols")
A = args[0]
for B in args[1:]:
if A.shape != B.shape:
raise ShapeError("Matrices %s and %s are not aligned" % (A, B))
rules = (unpack,
flatten)
canonicalize = exhaust(condition(lambda x: isinstance(x, HadamardProduct),
do_one(*rules)))
def hadamard_power(base, exp):
base = sympify(base)
exp = sympify(exp)
if exp == 1:
return base
if not base.is_Matrix:
return base**exp
if exp.is_Matrix:
raise ValueError("cannot raise expression to a matrix")
return HadamardPower(base, exp)
class HadamardPower(MatrixExpr):
"""
Elementwise power of matrix expressions
"""
def __new__(cls, base, exp):
base = sympify(base)
exp = sympify(exp)
obj = super(HadamardPower, cls).__new__(cls, base, exp)
return obj
@property
def base(self):
return self._args[0]
@property
def exp(self):
return self._args[1]
@property
def shape(self):
return self.base.shape
def _entry(self, i, j, **kwargs):
return self.base[i, j]**self.exp
def _eval_transpose(self):
from sympy.matrices.expressions.transpose import transpose
return HadamardPower(transpose(self.base), self.exp)
|
StarcoderdataPython
|
5117597
|
import winreg;
from mWindowsSDK import *;
gduHive_by_sName = {
"HKCR": winreg.HKEY_CLASSES_ROOT,
"HKEY_CLASSES_ROOT": winreg.HKEY_CLASSES_ROOT,
"HKCU": winreg.HKEY_CURRENT_USER,
"HKEY_CURRENT_USER": winreg.HKEY_CURRENT_USER,
"HKLM": winreg.HKEY_LOCAL_MACHINE,
"HKEY_LOCAL_MACHINE": winreg.HKEY_LOCAL_MACHINE,
"HKU": winreg.HKEY_USERS,
"HKEY_USERS": winreg.HKEY_USERS,
"HKCC": winreg.HKEY_CURRENT_CONFIG,
"HKEY_CURRENT_CONFIG": winreg.HKEY_CURRENT_CONFIG,
};
gdsName_by_uHive = {
winreg.HKEY_CLASSES_ROOT: "HKEY_CLASSES_ROOT",
winreg.HKEY_CURRENT_USER: "HKEY_CURRENT_USER",
winreg.HKEY_LOCAL_MACHINE: "HKEY_LOCAL_MACHINE",
winreg.HKEY_USERS: "HKEY_USERS",
winreg.HKEY_CURRENT_CONFIG: "HKEY_CURRENT_CONFIG",
};
class cRegistryHive(object):
duHive_by_sName = gduHive_by_sName;
dsName_by_uHive = gdsName_by_uHive;
def __init__(oSelf, xUnused = None, uHive = None, sHiveName = None):
assert xUnused is None, \
"Constructor arguments must be named values!";
if uHive is None:
assert sHiveName is not None, \
"You must provide either uHive or sHiveName, not both";
uHive = cRegistryHive.duHive_by_sName.get(sHiveName);
assert uHive is not None, \
"You must provide a valid sHiveName, not %s" % repr(sHiveName);
else:
assert uHive in cRegistryHive.dsName_by_uHive, \
"You must provide a valid uHive, not %s" % repr(uHive);
oSelf.__uHive = uHive;
oSelf.__oHive = None;
@property
def uHive(oSelf):
# Getter for uHive
return oSelf.__uHive;
@uHive.setter
def uHive(oSelf, uHive):
# Setter for uHive deletes cached oHive
oSelf.__oHive = None;
oSelf.__uHive = uHive;
return uHive;
@property
def sHiveName(oSelf):
# Getter for sHiveName
return cRegistryHive.dsName_by_uHive[oSelf.__uHive];
@sHiveName.setter
def sHiveName(oSelf, sHiveName):
# Setter for sHiveName sets uHive, which deletes cached oHive
assert sHiveName in cRegistryHive.duHive_by_sName, \
"Unknown hive name %s" % sHiveName;
oSelf.uHive = cRegistryHive.duHive_by_sName[sHiveName];
@property
def oHive(oSelf):
if oSelf.__oHive is None:
oSelf.__oHive = winreg.ConnectRegistry(None, oSelf.uHive);
return oSelf.__oHive;
def foCreateWinRegKey(oSelf, sKeyPath, bForWriting = False, uRegistryBits = 0):
uAccessMask = winreg.KEY_READ | (bForWriting and winreg.KEY_SET_VALUE or 0) | {32: winreg.KEY_WOW64_32KEY, 64:winreg.KEY_WOW64_64KEY}.get(uRegistryBits, 0);
return winreg.CreateKeyEx(oSelf.oHive, sKeyPath, 0, uAccessMask);
def foCreateHiveKey(oSelf, sKeyPath, bForWriting = False, uRegistryBits = 0):
oWinRegKey = oSelf.foCreateWinRegKey(sKeyPath, bForWriting = bForWriting, uRegistryBits = uRegistryBits);
return cRegistryHiveKey(
sKeyPath = sKeyPath,
oRegistryHive = oSelf,
oWinRegKey = oWinRegKey,
bWinRegKeyOpenForWriting = bForWriting,
);
def foOpenWinRegKey(oSelf, sKeyPath, bForWriting = False, uRegistryBits = 0, bThrowErrors = False):
uAccessMask = winreg.KEY_READ | (bForWriting and winreg.KEY_SET_VALUE or 0) | {32: winreg.KEY_WOW64_32KEY, 64:winreg.KEY_WOW64_64KEY}.get(uRegistryBits, 0);
try:
return winreg.OpenKey(oSelf.oHive, sKeyPath, 0, uAccessMask);
except WindowsError as oWindowsError:
if bThrowErrors or oWindowsError.errno != ERROR_FILE_NOT_FOUND:
raise;
return None; # The key does not exist.
def foOpenHiveKey(oSelf, sKeyPath, bForWriting = False, uRegistryBits = 0, bThrowErrors = False):
oWinRegKey = oSelf.foOpenWinRegKey(sKeyPath, bForWriting = bForWriting, uRegistryBits = uRegistryBits, bThrowErrors = bThrowErrors);
return oWinRegKey and cRegistryHiveKey(
sKeyPath = sKeyPath,
oRegistryHive = oSelf,
oWinRegKey = oWinRegKey,
bWinRegKeyOpenForWriting = bForWriting,
);
def fbDeleteHiveKeySubKey(oSelf, oHiveKey, sSubKeyName, uRegistryBits = 0, bThrowErrors = False):
oWinRegKey = oSelf.foOpenWinRegKey(oHiveKey.sKeyPath, bForWriting = True, uRegistryBits = uRegistryBits, bThrowErrors = bThrowErrors);
if not oWinRegKey:
assert not bThrowErrors, \
"Unreachable code!?";
return False;
try:
winreg.DeleteKey(oWinRegKey, sSubKeyName);
except WindowsError as oWindowsError:
if bThrowErrors or oWindowsError.errno != ERROR_FILE_NOT_FOUND:
raise;
return False; # The value does not exist.
return True;
@property
def sFullPath(oSelf):
return oSelf.sHiveName;
def fsToString(oSelf):
return "%s{path=%s}" % (oSelf.__class__.__name__, oSelf.sFullPath);
def __repr__(oSelf):
return "<%s %s>" % (oSelf.__class__.__name__, oSelf.sFullPath);
def __str__(oSelf):
return "%s %s" % (oSelf.__class__.__name__, oSelf.sFullPath);
from .cRegistryHiveKey import cRegistryHiveKey;
|
StarcoderdataPython
|
4926837
|
# https://deeplearningcourses.com/c/data-science-natural-language-processing-in-python
# https://www.udemy.com/data-science-natural-language-processing-in-python
# Author: http://lazyprogrammer.me
from __future__ import print_function, division
from future.utils import iteritems
from builtins import range
# Note: you may need to update your version of future
# sudo pip install -U future
import os, sys
import string
import numpy as np
import matplotlib.pyplot as plt
from scipy.sparse import lil_matrix, csr_matrix, save_npz, load_npz
from scipy.spatial.distance import cosine as cos_dist
from sklearn.metrics.pairwise import pairwise_distances
from glob import glob
from datetime import datetime
# input files
files = glob('../large_files/enwiki*.txt')
# unfortunately these work different ways
def remove_punctuation_2(s):
return s.translate(None, string.punctuation)
def remove_punctuation_3(s):
return s.translate(str.maketrans('','',string.punctuation))
if sys.version.startswith('2'):
remove_punctuation = remove_punctuation_2
else:
remove_punctuation = remove_punctuation_3
# max vocab size
V = 2000
# context size
context_size = 10
# word counts
all_word_counts = {}
# get the top V words
num_lines = 0
num_tokens = 0
for f in files:
for line in open(f):
# don't count headers, structured data, lists, etc...
if line and line[0] not in ('[', '*', '-', '|', '=', '{', '}'):
num_lines += 1
for word in remove_punctuation(line).lower().split():
num_tokens += 1
if word not in all_word_counts:
all_word_counts[word] = 0
all_word_counts[word] += 1
print("num_lines:", num_lines)
print("num_tokens:", num_tokens)
# words I really want to keep
keep_words = [
'king', 'man', 'queen', 'woman',
'heir', 'heiress', 'prince', 'princess',
'nephew', 'niece', 'uncle', 'aunt',
'husband', 'wife', 'brother', 'sister',
'tokyo', 'beijing', 'dallas', 'texas',
'january', 'february', 'march',
'april', 'may', 'june',
'july', 'august', 'september',
'october', 'november', 'december',
'actor', 'actress',
'rice', 'bread', 'miami', 'florida',
'walk', 'walking', 'swim', 'swimming',
]
for w in keep_words:
all_word_counts[w] = float('inf')
# sort in descending order
all_word_counts = sorted(all_word_counts.items(), key=lambda x: x[1], reverse=True)
# keep just the top V words
# save a slot for <UNK>
V = min(V, len(all_word_counts))
top_words = [w for w, count in all_word_counts[:V-1]] + ['<UNK>']
# TODO: try it without UNK at all
# reverse the array to get word2idx mapping
word2idx = {w:i for i, w in enumerate(top_words)}
unk = word2idx['<UNK>']
# for w in ('king', 'man', 'queen', 'woman', 'france', 'paris', \
# 'london', 'england', 'italy', 'rome', \
# 'france', 'french', 'english', 'england', \
# 'japan', 'japanese', 'chinese', 'china', \
# 'italian', 'australia', 'australian' \
# 'japan', 'tokyo', 'china', 'beijing'):
# assert(w in word2idx)
if not os.path.exists('pmi_counts_%s.npz' % V):
# init counts
wc_counts = lil_matrix((V, V))
### make PMI matrix
# add counts
k = 0
# for line in open('../large_files/text8'):
for f in files:
for line in open(f):
# don't count headers, structured data, lists, etc...
if line and line[0] not in ('[', '*', '-', '|', '=', '{', '}'):
line_as_idx = []
for word in remove_punctuation(line).lower().split():
if word in word2idx:
idx = word2idx[word]
# line_as_idx.append(idx)
else:
idx = unk
# pass
line_as_idx.append(idx)
for i, w in enumerate(line_as_idx):
# keep count
k += 1
if k % 10000 == 0:
print("%s/%s" % (k, num_tokens))
start = max(0, i - context_size)
end = min(len(line_as_idx), i + context_size)
for c in line_as_idx[start:i]:
wc_counts[w, c] += 1
for c in line_as_idx[i+1:end]:
wc_counts[w, c] += 1
print("Finished counting")
save_npz('pmi_counts_%s.npz' % V, csr_matrix(wc_counts))
else:
wc_counts = load_npz('pmi_counts_%s.npz' % V)
# context counts get raised ^ 0.75
c_counts = wc_counts.sum(axis=0).A.flatten() ** 0.75
c_probs = c_counts / c_counts.sum()
c_probs = c_probs.reshape(1, V)
# PMI(w, c) = #(w, c) / #(w) / p(c)
# pmi = wc_counts / wc_counts.sum(axis=1) / c_probs # works only if numpy arrays
pmi = wc_counts.multiply(1.0 / wc_counts.sum(axis=1) / c_probs).tocsr()
# this operation changes it to a coo_matrix
# which doesn't have functions we need, e.g log1p()
# so convert it back to a csr
print("type(pmi):", type(pmi))
logX = pmi.log1p() # would be logX = np.log(pmi.A + 1) in numpy
print("type(logX):", type(logX))
logX[logX < 0] = 0
### do alternating least squares
# latent dimension
D = 100
reg = 0.1
# initialize weights
W = np.random.randn(V, D) / np.sqrt(V + D)
b = np.zeros(V)
U = np.random.randn(V, D) / np.sqrt(V + D)
c = np.zeros(V)
mu = logX.mean()
costs = []
t0 = datetime.now()
for epoch in range(10):
print("epoch:", epoch)
delta = W.dot(U.T) + b.reshape(V, 1) + c.reshape(1, V) + mu - logX
# cost = ( delta * delta ).sum()
cost = np.multiply(delta, delta).sum()
# * behaves differently if delta is a "matrix" object vs "array" object
costs.append(cost)
### partially vectorized updates ###
# update W
# matrix = reg*np.eye(D) + U.T.dot(U)
# for i in range(V):
# vector = (logX[i,:] - b[i] - c - mu).dot(U)
# W[i] = np.linalg.solve(matrix, vector)
# # update b
# for i in range(V):
# numerator = (logX[i,:] - W[i].dot(U.T) - c - mu).sum()
# b[i] = numerator / V #/ (1 + reg)
# # update U
# matrix = reg*np.eye(D) + W.T.dot(W)
# for j in range(V):
# vector = (logX[:,j] - b - c[j] - mu).dot(W)
# U[j] = np.linalg.solve(matrix, vector)
# # update c
# for j in range(V):
# numerator = (logX[:,j] - W.dot(U[j]) - b - mu).sum()
# c[j] = numerator / V #/ (1 + reg)
### vectorized updates ###
# vectorized update W
matrix = reg*np.eye(D) + U.T.dot(U)
vector = (logX - b.reshape(V, 1) - c.reshape(1, V) - mu).dot(U).T
W = np.linalg.solve(matrix, vector).T
# vectorized update b
b = (logX - W.dot(U.T) - c.reshape(1, V) - mu).sum(axis=1) / V
# vectorized update U
matrix = reg*np.eye(D) + W.T.dot(W)
vector = (logX - b.reshape(V, 1) - c.reshape(1, V) - mu).T.dot(W).T
U = np.linalg.solve(matrix, vector).T
# vectorized update c
c = (logX - W.dot(U.T) - b.reshape(V, 1) - mu).sum(axis=0) / V
print("train duration:", datetime.now() - t0)
plt.plot(costs)
plt.show()
### test it
king = W[word2idx['king']]
man = W[word2idx['man']]
queen = W[word2idx['queen']]
woman = W[word2idx['woman']]
vec = king - man + woman
# find closest
# closest = None
# min_dist = float('inf')
# for i in range(len(W)):
# dist = cos_dist(W[i], vec)
# if dist < min_dist:
# closest = i
# min_dist = dist
# set word embedding matrix
# W = (W + U) / 2
distances = pairwise_distances(vec.reshape(1, D), W, metric='cosine').reshape(V)
idx = distances.argsort()[:10]
print("closest 10:")
for i in idx:
print(top_words[i], distances[i])
print("dist to queen:", cos_dist(W[word2idx['queen']], vec))
def analogy(pos1, neg1, pos2, neg2):
# don't actually use pos2 in calculation, just print what's expected
print("testing: %s - %s = %s - %s" % (pos1, neg1, pos2, neg2))
for w in (pos1, neg1, pos2, neg2):
if w not in word2idx:
print("Sorry, %s not in word2idx" % w)
return
p1 = W[word2idx[pos1]]
n1 = W[word2idx[neg1]]
p2 = W[word2idx[pos2]]
n2 = W[word2idx[neg2]]
vec = p1 - n1 + n2
distances = pairwise_distances(vec.reshape(1, D), W, metric='cosine').reshape(V)
idx = distances.argsort()[:10]
# pick the best that's not p1, n1, or n2
best_idx = -1
keep_out = [word2idx[w] for w in (pos1, neg1, neg2)]
for i in idx:
if i not in keep_out:
best_idx = i
break
print("got: %s - %s = %s - %s" % (pos1, neg1, top_words[best_idx], neg2))
print("closest 10:")
for i in idx:
print(top_words[i], distances[i])
print("dist to %s:" % pos2, cos_dist(p2, vec))
analogy('king', 'man', 'queen', 'woman')
analogy('miami', 'florida', 'dallas', 'texas')
# analogy('einstein', 'scientist', 'picasso', 'painter')
analogy('china', 'rice', 'england', 'bread')
analogy('man', 'woman', 'he', 'she')
analogy('man', 'woman', 'uncle', 'aunt')
analogy('man', 'woman', 'brother', 'sister')
analogy('man', 'woman', 'husband', 'wife')
analogy('man', 'woman', 'actor', 'actress')
analogy('man', 'woman', 'father', 'mother')
analogy('heir', 'heiress', 'prince', 'princess')
analogy('nephew', 'niece', 'uncle', 'aunt')
analogy('france', 'paris', 'japan', 'tokyo')
analogy('france', 'paris', 'china', 'beijing')
analogy('february', 'january', 'december', 'november')
analogy('france', 'paris', 'italy', 'rome')
analogy('paris', 'france', 'rome', 'italy')
analogy('france', 'french', 'england', 'english')
analogy('japan', 'japanese', 'china', 'chinese')
analogy('japan', 'japanese', 'italy', 'italian')
analogy('japan', 'japanese', 'australia', 'australian')
analogy('walk', 'walking', 'swim', 'swimming')
|
StarcoderdataPython
|
6400982
|
import os
import yaml
from pathlib import Path
from extract_data import extract
from utils import run_process, is_truthy
from settings import (
INPUT_PATH,
NISMOD_PATH,
RESULTS_PATH,
model_to_run,
part_of_sos_model,
sector_model,
timestep,
use_generated_scenario,
)
def extract_and_run():
extract()
go_to_nismod_root = "cd " + str(NISMOD_PATH)
print("Deciding - ", model_to_run)
run_process(go_to_nismod_root + " && smif list")
run_process(go_to_nismod_root + " && smif decide " + model_to_run)
print("Decided woop!")
|
StarcoderdataPython
|
4962597
|
<reponame>mqadri93/leetCode-py<gh_stars>0
class Solution(object):
def minCut(self, s):
"""
:type s: str
:rtype: int
"""
def ispalendrome(s):
h = len(s)-1
l = 0
while(l<=h):
if s[h] != s[l]:
return False
l+=1
h-=1
return True
def traverse(s, d):
if s in d:
return d[s]
if len(s) == 1:
d[s] = 0
return 0
minpartition = float("inf")
for i in range(1, len(s)+1):
sub1 = s[:i]
if ispalendrome(sub1):
sub2 = s[i:]
if sub2:
ret = traverse(sub2, d) + 1
if ret < minpartition:
minpartition = ret
else:
minpartition = 0
d[s] = minpartition
return minpartition
d = {}
return traverse(s, d)
|
StarcoderdataPython
|
11349921
|
class Window:
def __init__(self, size):
self.window = []
self.size = size
def gate_out(self, data):
self.window.append(data)
output = ''
if self.size <= len(self.window):
output = str(self.window)
self.window = []
return output.encode('utf-8')
|
StarcoderdataPython
|
6455012
|
<reponame>PeerHerholz/guideline_jupyter_book<filename>venv/Lib/site-packages/nbdime/args.py
# coding: utf-8
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import argparse
import json
import logging
import os
import sys
from ._version import __version__
from .config import (
get_defaults_for_argparse, build_config, entrypoint_configurables,
Namespace
)
from .diffing.notebooks import set_notebook_diff_targets, set_notebook_diff_ignores
from .gitfiles import is_gitref
from .ignorables import diff_ignorables
from .log import init_logging, set_nbdime_log_level
class ConfigBackedParser(argparse.ArgumentParser):
def parse_known_args(self, args=None, namespace=None):
entrypoint = self.prog.split(' ')[0]
try:
defs = get_defaults_for_argparse(entrypoint)
ignore = defs.pop('Ignore', None)
self.set_defaults(**defs)
if ignore:
set_notebook_diff_ignores(ignore)
except ValueError:
pass
return super(ConfigBackedParser, self).parse_known_args(args=args, namespace=namespace)
class LogLevelAction(argparse.Action):
def __init__(self, option_strings, dest, default=None, **kwargs):
# __call__ is not called if option not given:
level = getattr(logging, default or 'INFO')
init_logging(level=level)
set_nbdime_log_level(level)
super(LogLevelAction, self).__init__(option_strings, dest, default=default, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, values)
level = getattr(logging, values)
set_nbdime_log_level(level, True)
class SkipAction(argparse.Action):
"""Action of an argument that will not be stored"""
def __init__(self, option_strings, dest, **kwargs):
super(SkipAction, self).__init__([], argparse.SUPPRESS, **kwargs)
def __call__(self, parser, ns, values, opttion_string=None):
pass
class PathType(object):
"""Argparse type for arguments that should be paths
No-op on Python 3, but casts Python 2 bytes to text
using sys.getfilesystemencoding()
"""
def __init__(self):
pass
def __call__(self, value):
if not isinstance(value, bytes):
return value
# py2: decode bytes to text
encoding = sys.getfilesystemencoding() or 'utf-8'
if encoding.lower() == 'ascii':
# ignore ascii and use utf-8
# if it really is ascii, this will still be correct,
# but it never should actually be ascii
encoding = 'utf-8'
return value.decode(encoding)
Path = PathType()
def modify_config_for_print(config):
output = {}
ns = None
for k, v in config.items():
if isinstance(v, dict):
output[k] = modify_config_for_print(v)
if not output[k]:
output[k] = '{}'
elif k in diff_ignorables and v is None:
if ns is None:
ns = Namespace(config)
for k2 in diff_ignorables:
setattr(ns, k2, config.get(k2, None))
process_exclusive_ignorables(ns, diff_ignorables)
output[k] = '<unset, resolves to {0}>'.format(
json.dumps(getattr(ns, k, v)))
else:
output[k] = json.dumps(v)
return output
class ConfigHelpAction(argparse.Action):
def __init__(self, option_strings, dest, help=None):
super(ConfigHelpAction, self).__init__(
option_strings, dest, nargs=0, help=help)
def __call__(self, parser, namespace, values, option_string=None):
from .prettyprint import pretty_print_dict, PrettyPrintConfig
header = entrypoint_configurables[parser.prog].__name__
config = build_config(parser.prog, True)
pretty_print_dict(
{
header: modify_config_for_print(config),
},
config=PrettyPrintConfig(out=sys.stderr)
)
sys.exit(1)
class IgnorableAction(argparse.Action):
"""Adds the supplied positive options and negative/ignore version as well"""
def __init__(self, option_strings, dest, default=None, required=False, help=None):
opts = []
for opt in option_strings:
if len(opt) == 2 and opt[0] == '-':
if not opt[1].islower():
raise ValueError('Single character flags should be lower-case for IgnorableAction')
opts.append(opt)
opts.append(opt.upper())
elif opt[:2] == '--':
opts.append(opt)
opts.append('--ignore-' + opt[2:])
else:
ValueError('Could not turn option "%s" into an IgnoreAction option.')
# Put positives first, negatives last:
opts = opts[0::2] + opts[1::2]
super(IgnorableAction, self).__init__(
opts, dest, nargs=0, const=None,
default=default, required=required,
help=help)
def __call__(self, parser, ns, values, option_string=None):
if len(option_string) == 2:
setattr(ns, self.dest, option_string[1].islower())
else:
setattr(ns, self.dest, option_string[2 : 2 + len('ignore')] != 'ignore')
def process_exclusive_ignorables(ns, arg_names, default=True):
"""Parse a set of ignorables.
It checks that all specified options are either all positive or all negative.
It then returns a namespace with the parsed options.
Returns whether any values were specified or not.
"""
# `toggle` tracks whether:
# - True: One or more positive options were defined
# - False: One or more negative options were defined
# - None: No options were defined
toggle = getattr(ns, arg_names[0])
for name in arg_names[1:]:
opt = getattr(ns, name)
if toggle is None:
toggle = opt
elif toggle != opt and opt is not None:
message = 'Arguments must either all be negative or all positive: %r' % (arg_names,)
raise argparse.ArgumentError(None, message)
if toggle is not None:
# One or more options were defined, set default to the opposite
default = not toggle
# Set all unset options to the default
for name in arg_names:
if getattr(ns, name) is None:
setattr(ns, name, default)
return toggle is not None
def add_generic_args(parser):
"""Adds a set of arguments common to all nbdime commands.
"""
parser.add_argument(
'--version',
action="version",
version="%(prog)s " + __version__)
parser.add_argument(
'--config',
help="list the valid config keys and their current effective values",
action=ConfigHelpAction,
)
parser.add_argument(
'--log-level',
default='INFO',
choices=('DEBUG', 'INFO', 'WARN', 'ERROR', 'CRITICAL'),
help="set the log level by name.",
action=LogLevelAction,
)
def add_git_config_subcommand(subparsers, enable, disable, subparser_help, enable_help, disable_help):
# Add subparser
config = subparsers.add_parser('config',
description=subparser_help)
# Option for git scope (global/system):
scope = config.add_mutually_exclusive_group()
scope.add_argument('--global', action='store_const', dest='scope',
const='global',
help="configure your global git config instead of the current repo"
)
scope.add_argument('--system', action='store_const', dest='scope',
const='system',
help="configure your system git config instead of the current repo"
)
# Add enable/disable flags
enable_disable = config.add_mutually_exclusive_group(required=True)
enable_disable.add_argument('--enable', action='store_const',
dest='config_func', const=enable,
help=enable_help
)
enable_disable.add_argument('--disable', action='store_const',
dest='config_func', const=disable,
help=disable_help
)
return config
def add_web_args(parser, default_port=8888):
"""Adds a set of arguments common to all commands that show a web gui.
"""
port_help = (
"specify the port you want the server to run on. Default is %d%s." % (
default_port, " (random)" if default_port == 0 else ""
))
parser.add_argument(
'-p', '--port',
default=default_port,
type=int,
help=port_help)
parser.add_argument(
'-b', '--browser',
default=None,
type=str,
help="specify the browser to use, to override the system default.")
parser.add_argument(
'--persist',
action="store_true",
default=False,
help="prevent server shutting down on remote close request (when these"
" would normally be supported)."
)
parser.add_argument(
'--ip',
default='127.0.0.1',
help="specify the interface to listen to for the web server. "
"NOTE: Setting this to anything other than 127.0.0.1/localhost "
"might comprimise the security of your computer. Use with care!")
cwd = os.path.abspath(os.path.curdir)
parser.add_argument(
'-w', '--workdirectory',
default=cwd,
help="specify the working directory you want "
"the server to run from. Default is the "
"actual cwd at program start.")
parser.add_argument(
'--base-url',
default='/',
help="The base URL prefix under which to run the web app")
parser.add_argument(
'--show-unchanged',
dest='hide_unchanged',
action="store_false",
default=True,
help="show unchanged cells by default"
)
def add_diff_args(parser):
"""Adds a set of arguments for commands that perform diffs.
Note:
Merge applications also performs diff operations to compute
the merge, so these arguments should also be included there.
"""
# TODO: Define sensible strategy variables and implement
#parser.add_argument('-X', '--diff-strategy',
# default="default", choices=("foo", "bar"),
# help="specify the diff strategy to use.")
# Things we can choose to diff or not
ignorables = parser.add_argument_group(
title='ignorables',
description='Set which parts of the notebook (not) to process.')
ignorables.add_argument(
'-s', '--sources',
action=IgnorableAction,
help="process/ignore sources.")
ignorables.add_argument(
'-o', '--outputs',
action=IgnorableAction,
help="process/ignore outputs.")
ignorables.add_argument(
'-a', '--attachments',
action=IgnorableAction,
help="process/ignore attachments.")
ignorables.add_argument(
'-m', '--metadata',
action=IgnorableAction,
help="process/ignore metadata.")
ignorables.add_argument(
'-d', '--details',
action=IgnorableAction,
help="process/ignore details not covered by other options.")
def add_diff_cli_args(parser):
"""Adds a set of arguments for CLI diff commands (i.e. not web).
"""
parser.add_argument(
'--color-words',
action='store_true', default=False,
help=("whether to pass the --color-words flag to any internal calls "
"to git diff")
)
def add_filter_args(diff_parser):
"""Adds configuration for git commands where filter use is flagged"""
# Ideally, we would want to apply the filter only if we knew
# the file was not from a blob. However, this is not possible:
# If remote file path is equal to repo file path, it implies
# that the hex of remote equals the hex of the file on disk.
# Two possible cases can cause this:
# 1) Diffing against working dir (or stage when entire file is staged)
# 2) Diffing against a blob (clean) that happens to have the same hex as
# the (smudged) file in working tree.
# Condition 1 should have filter applied, 2 should not.
# We can learn something by comparing the remote hash to the hash of the
# file in HEAD.
# - If they are equal, we know that is cannot come from a diff
# agains working tree (git would not see it as changed),
# so it must be from a blob (clean). No filter.
# - If they differ, consider the setup:
# git co A; git co B -- file.path; git reset A
# + remote could be from a working-tree diff: git diff (smudged, apply filter).
# + remote could be from a blob: git diff A B (clean, no filter).
#
# These are undistinguishable to us. Therefore, we will always
# apply the filter to the remote file if flag use_filter is set.
diff_parser.add_argument(
'--use-filter',
action='store_true', default=False,
help='apply any configured git filters on remote')
def add_git_diff_driver_args(diff_parser):
"""Adds a set of 7 stanard git diff driver arguments:
path old-file old-hex old-mode new-file new-hex new-mode [ rename-to rename-metadata ]
Note: Only path, base and remote are added to parsed namespace
"""
add_filter_args(diff_parser)
diff_parser.add_argument('path', type=Path)
diff_parser.add_argument('base', type=Path, nargs='?', default=None)
diff_parser.add_argument('base_sha1', nargs='?', default=None, action=SkipAction)
diff_parser.add_argument('base_mode', nargs='?', default=None, action=SkipAction)
diff_parser.add_argument('remote', type=Path, nargs='?', default=None)
diff_parser.add_argument('remote_sha1', nargs='?', default=None, action=SkipAction)
diff_parser.add_argument('remote_mode', nargs='?', default=None, action=SkipAction)
diff_parser.add_argument('rename_to', type=Path, nargs='?', default=None, action=SkipAction)
diff_parser.add_argument('rename_metadata', type=Path, nargs='?', default=None, action=SkipAction)
def process_diff_flags(args):
any_flags_given = process_exclusive_ignorables(args, diff_ignorables)
if any_flags_given:
# Note: This will blow away any options set via config (for these fields)
set_notebook_diff_targets(
args.sources, args.outputs, args.attachments, args.metadata,
args.details)
def resolve_diff_args(args):
"""Resolve ambiguity of path vs base/remote for git:
Cases:
- No args: Use defaults
- One arg: Either base or path, check with is_gitref.
- Two args or more: Check if first two are base/remote by is_gitref
"""
base = args.base
remote = args.remote
paths = getattr(args, 'paths', None)
if not paths:
paths = None
if remote is None and paths is None:
# One arg only:
if not is_gitref(base):
paths = base
base = 'HEAD'
# Two or more args:
elif paths is None:
# Two exactly
# - Two files (not git-mode, do nothing)
# - Base gitref one file (remote=None, path = file)
# - Base gitref remote gitref (do nothing)
if is_gitref(base) and not is_gitref(remote):
paths = remote
remote = None
elif base and remote:
# Three or more
if not is_gitref(base):
paths = [base, remote] + paths
base = remote = None
elif is_gitref(base) and not is_gitref(remote):
paths = [remote] + paths
remote = None
return base, remote, paths
def add_merge_args(parser):
"""Adds a set of arguments for commands that perform merges.
"""
from .merging.notebooks import cli_conflict_strategies, cli_conflict_strategies_input, cli_conflict_strategies_output
parser.add_argument(
'--merge-strategy',
default="inline",
choices=cli_conflict_strategies,
help="the merge strategy to use.")
parser.add_argument(
'--input-strategy',
default=None,
choices=cli_conflict_strategies_input,
help="the merge strategy to use for inputs "
"(overrides 'merge-strategy' for inputs).")
parser.add_argument(
'--output-strategy',
default=None,
choices=cli_conflict_strategies_output,
help="the merge strategy to use for outputs "
"(overrides 'merge-strategy' for outputs).")
parser.add_argument(
'--no-ignore-transients',
dest='ignore_transients',
action="store_false",
default=True,
help="disallow deletion of transient data such as outputs and "
"execution counts in order to resolve conflicts.")
filename_help = {
"notebook": "The notebook filename.",
"base": "The base notebook filename.",
"local": "The local modified notebook filename.",
"remote": "The remote modified notebook filename.",
"merged": "The merge result notebook filename.",
"patch": "The patch filename, output from nbdiff.",
}
def add_filename_args(parser, names):
"""Add the base, local, remote, and merged positional arguments.
Helps getting consistent doc strings.
"""
for name in names:
parser.add_argument(name, type=Path, help=filename_help[name])
def add_prettyprint_args(parser):
"""Adds optional arguments for controlling pretty print behavior.
"""
parser.add_argument(
'--no-color',
dest='use_color',
action="store_false",
default=True,
help=("prevent use of ANSI color code escapes for text output")
)
parser.add_argument(
'--no-git',
dest='use_git',
action="store_false",
default=True,
help=("prevent use of git for formatting diff/merge text output")
)
parser.add_argument(
'--no-use-diff',
dest='use_diff',
action="store_false",
default=True,
help=("prevent use of diff/diff3 for formatting diff/merge text output")
)
def prettyprint_config_from_args(arguments, **kwargs):
from .prettyprint import PrettyPrintConfig
return PrettyPrintConfig(
include=arguments,
color_words=getattr(arguments, 'color_words', False),
use_color=getattr(arguments, 'use_color', True),
use_git=getattr(arguments, 'use_git', True),
use_diff=getattr(arguments, 'use_diff', True),
**kwargs
)
def args_for_server(arguments):
"""Translate standard arguments into kwargs for running webapp.nbdimeserver.main"""
# Map format: <arguments.name>='<kwargs[key]>'
kmap = dict(ip='ip',
port='port',
workdirectory='cwd',
base_url='base_url',
hide_unchanged='hide_unchanged',
)
ret = {kmap[k]: v for k, v in vars(arguments).items() if k in kmap}
if 'persist' in arguments:
ret['closable'] = not arguments.persist
return ret
def args_for_browse(arguments):
"""Translate standard arguments into kwargs for webapp.webutil.browse()"""
# Map format: <arguments.name>='<kwargs[key]>'
kmap = dict(ip='ip',
browser='browsername',
base_url='base_url',
)
return {kmap[k]: v for k, v in vars(arguments).items() if k in kmap}
|
StarcoderdataPython
|
5087036
|
<reponame>lifei96/Medium-crawler-with-data-parser
# -*- coding: utf-8 -*-
import urllib2
import cookielib
import re
import json
import datetime
import codecs
import os
class TopStories(object):
def __init__(self):
super(TopStories, self).__init__()
self.data = {
'date': "",
'url': "",
'stories': [],
}
def getstr(self):
result = json.dumps(self.data, indent=4)
return result
class Story(object):
def __init__(self):
super(Story, self).__init__()
self.data = {
'story_id': "",
'author': "",
'timestamp': 0,
'published_date': "",
'tags': [],
'recommends': 0,
'responses': 0,
'success': 1,
}
def get_story(url):
story = Story()
cj = cookielib.MozillaCookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
req = urllib2.Request(url)
req.add_header("User-agent", 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) \
Chrome/50.0.2661.102 Safari/537.36')
try:
response = opener.open(req, timeout=10)
except urllib2.URLError:
story.data['success'] = 0
print('timeout')
return story
data = response.read()
story_id = re.findall('data-post-id="(.*?)" data-is-icon', data)
if not story_id:
story.data['success'] = 0
print('-----fail to get story_id')
else:
story.data['story_id'] = story_id[0]
author = re.findall('"username":"(.*?)","createdAt"', data)
if not author:
story.data['success'] = 0
print('-----fail to get author')
else:
story.data['author'] = author[0]
timestamp = re.findall('"firstPublishedAt":(.*?),"latestPublishedAt"', data)
if not timestamp:
story.data['success'] = 0
print('-----fail to get timestamp')
else:
story.data['timestamp'] = float(timestamp[0])
story.data['published_date'] = datetime.date.fromtimestamp(story.data['timestamp']/1000.0).isoformat()
tags = re.findall('false,"tags":(.*?),"socialRecommendsCount"', data)
if not tags:
story.data['success'] = 0
print('-----fail to get tags')
else:
story.data['tags'] = json.loads(tags[0])
recommends = re.findall('"recommends":(.*?),"socialRecommends"', data)
if not recommends:
story.data['success'] = 0
print('-----fail to get recommends')
else:
story.data['recommends'] = eval(recommends[0])
responses = re.findall('"responsesCreatedCount":(.*?),"links"', data)
if not responses:
story.data['success'] = 0
print('-----fail to get responses')
else:
story.data['responses'] = eval(responses[0])
return story
START_DATE = datetime.date(2014, 9, 10)
END_DATE = datetime.date(2016, 7, 16)
def get_top_stories():
current_date = START_DATE
while current_date <= END_DATE:
top_stories = TopStories()
date_string = current_date.strftime("%B-%d-%Y").lower()
url = "https://medium.com/browse/top/" + date_string
top_stories.data['date'] = current_date.isoformat()
top_stories.data['url'] = url
cj = cookielib.MozillaCookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
req = urllib2.Request(url)
req.add_header("User-agent", 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) \
Chrome/50.0.2661.102 Safari/537.36')
response = opener.open(req, timeout=10)
data = response.read()
stories = []
story_url = re.findall('<a class="link link--darken" href="(.*?)\?source=top_stories---------[0-9]*-" data-action="open-post"', data)
num = len(story_url)
for i in range(num):
story_data = get_story(story_url[i]).data
if story_data['success']:
stories.append(story_data)
print(i)
top_stories.data['stories'] = stories
out = codecs.open("./TopStories/%s.json" % current_date.isoformat(), 'w', 'utf-8')
out.write(top_stories.getstr())
out.close()
print("-----%s obtained" % current_date.isoformat())
current_date = current_date + datetime.timedelta(days=1)
if __name__ == '__main__':
if not os.path.exists('./TopStories'):
os.mkdir('./TopStories')
get_top_stories()
|
StarcoderdataPython
|
3513297
|
<reponame>lilianwaweru/Blog
from .import db
from werkzeug.security import generate_password_hash,check_password_hash
from flask_login import UserMixin
from . import login_manager
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class User(UserMixin,db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer,primary_key = True)
username = db.Column(db.String(255))
email = db.Column(db.String(255),unique = True,index = True)
role_id = db.Column(db.Integer,db.ForeignKey('roles.id'))
bio = db.Column(db.String(255))
profile_pic_path = db.Column(db.String())
password_hash = db.Column(db.String(255))
blog = db.relationship('Blog',backref = 'users',lazy ="dynamic")
comment = db.relationship('Comment',backref='users',lazy='dynamic')
def __repr__(self):
return f'User {self.username}'
@property
def password(self):
raise AttributeError('You cannot read the password attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self,password):
return check_password_hash(self.password_hash,password)
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer,primary_key = True)
name = db.Column(db.String(255))
users = db.relationship('User',backref = 'role',lazy="dynamic")
def __repr__(self):
return f'User {self.name}'
class Blog(db.Model):
__tablename__ = 'blogs'
id = db.Column(db.Integer,primary_key = True)
content = db.Column(db.String(500))
title = db.Column(db.String)
category = db.Column(db.String)
comment = db.relationship('Comment',backref = 'blogs',lazy ="dynamic")
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
def save_blog(self):
db.session.add(self)
db.session.commit()
@classmethod
def get_blog(cls,category):
blog = Blog.query.filter_by(category = category).all()
return blog
@classmethod
def get_all_blogs(cls):
blogs = Blog.query.order_by('id').all()
return blogs
@classmethod
def get_category(cls,cat):
category = Blog.query.filter_by(blog_category=cat).order_by('id').all()
return category
def __repr__(self):
return f'Blog {self.blog_title}, {self.category}'
class Comment(db.Model):
__tablename__ = 'comments'
id = db.Column(db.Integer,primary_key = True)
comment = db.Column(db.String(1000))
user_id = db.Column(db.Integer,db.ForeignKey("users.id"))
blog_id = db.Column(db.Integer,db.ForeignKey("blogs.id"))
def save_comment(self):
db.session.add(self)
db.session.commit()
@classmethod
def get_comment(id,blog):
comment = Comment.query.filter_by(blog_id = blog).all()
return comment
def __repr__(self):
return f'Comment{self.comment}'
class Subscriber(db.Model):
__tablename__ = 'subscribers'
id = db.Column(db.Integer, primary_key = True)
email = db.Column(db.String(255))
username = db.Column(db.String(255), index = True)
|
StarcoderdataPython
|
1904797
|
<gh_stars>0
import luigi
import gokart
class inherits_config_params:
def __init__(self, config_class: luigi.Config):
self.config_class: luigi.Config = config_class
def __call__(self, task: gokart.TaskOnKart):
config_class = self.config_class
# wrap task to prevent task name from being changed
@luigi.task._task_wraps(task)
class Wrapped(task):
@classmethod
def get_param_values(cls, params, args, kwargs):
for k, v in config_class().param_kwargs.items():
if hasattr(cls, k) and k not in kwargs:
kwargs[k] = v
return super(Wrapped, cls).get_param_values(params, args, kwargs)
return Wrapped
|
StarcoderdataPython
|
3598899
|
<filename>maze/maze.py
import random
import numpy as np
import matplotlib.pyplot as plt
class Maze(object):
def __init__(self, width = 10, height = 5):
self.width = 2*width + 1
self.height = 2*height + 1
self.start = None
self.end = None
self.n_el = self.width * self.height
self.grid = [' ']*(self.n_el)
for i in range(len(self.grid)):
if (i // self.width)%2 == 0 and i%2 == 0:
self.grid[i] = u"\u2588"
elif i%2 == 1:
self.grid[i] = ' '
self.solutions = []
self.solution = None
self.max_length = self.n_el
def set_start(self, position):
self.grid[position] = 'S'
self.start = position
def set_end(self, position):
self.grid[position] = 'E'
self.end = position
def __str__(self):
grid_as_text = ''.join(self.grid)
ret = ''
for s in [grid_as_text[i*self.width:(i+1)*self.width] for i in range(self.height)]:
ret += s + "\n"
return ret
def move(self, cell, direction, step = 1):
new_cell = cell + step * direction[0] + step * self.width*direction[1]
if 0 <= new_cell < self.n_el and self.grid[new_cell] != '#':
return new_cell
else:
return None
def make_maze(self, percentage = 50):
n_elements = int(percentage * self.n_el / 100)
for i in range(n_elements):
p = int((self.n_el//2) * random.random())
self.grid[2*p+1] = u"\u2588"
def next_move(self, position, length, solution=[]):
if length > self.max_length:
return
grid_copy = self.grid[:]
sol = solution[:]
sol.append(position)
for d in [[-1, 0], [1,0], [0,-1], [0,1]]:
new_cell = self.move(position, d, 1)
if new_cell != None and new_cell not in sol:
if self.grid[new_cell] == 'E':
if len(sol) < self.max_length:
self.max_length = len(sol)
self.solutions.append(sol)
return
elif self.grid[new_cell] == ' ':
self.next_move(new_cell, length+1, sol)
def show_solution(self):
if len(self.solutions) == 0:
return
self.solution = self.solutions[0]
for s in self.solutions:
if len(s) < len(self.solution):
self.solution = s
for p in self.solution:
if p!= self.start and p != self.end:
self.grid[p] = '.'
def solve(self):
self.next_move(self.start, 0)
self.show_solution()
def plot(self):
d = {'E': 0, 'S':1, u"\u2588": 3, ' ':2, '.': 2}
data = [d[i] for i in self.grid]
data = np.array( data )
data = data.reshape((self.width, self.height))
plt.pcolormesh(data)
plt.axes().set_aspect('equal')
plt.xticks([])
plt.yticks([])
plt.axes().invert_yaxis()
if self.solution != None:
X = [i // self.width + 0.5 for i in self.solution]
Y = [i % self.width + 0.5 for i in self.solution]
plt.scatter(Y, X)
plt.show()
|
StarcoderdataPython
|
1837776
|
<filename>willie/modules/adminchannel.py
# coding=utf8
"""
admin.py - Willie Admin Module
Copyright 2010-2011, <NAME>, <NAME>, and <NAME>
Copyright © 2012, <NAME> <<EMAIL>>
Licensed under the Eiffel Forum License 2.
http://willie.dftba.net/
"""
from __future__ import unicode_literals
import re
from willie.module import commands, priority, OP, HALFOP
from willie.tools import Nick, get_hostmask_regex
def setup(bot):
#Having a db means pref's exists. Later, we can just use `if bot.db`.
if bot.db and not bot.db.preferences.has_columns('topic_mask'):
bot.db.preferences.add_columns(['topic_mask'])
@commands('op')
def op(bot, trigger):
"""
Da op al usuario especificado, si no se especifica un nick, se lo otorga al que ejecuta el comando.
"""
if bot.privileges[trigger.sender][trigger.nick] < OP:
return
if bot.privileges[trigger.sender][bot.nick] < OP:
return bot.reply("No soy operador de este canal!")
nick = trigger.group(2)
channel = trigger.sender
if not nick:
nick = trigger.nick
bot.write(['MODE', channel, "+o", nick])
@commands('deop')
def deop(bot, trigger):
"""
Quita el estado de op al usuario especificado. Si no se especifica un nick, quita el op al que ejecuta el comando.
"""
if bot.privileges[trigger.sender][trigger.nick] < OP:
return
if bot.privileges[trigger.sender][bot.nick] < OP:
return bot.reply("No soy operador en este canal!")
nick = trigger.group(2)
channel = trigger.sender
if not nick:
nick = trigger.nick
bot.write(['MODE', channel, "-o", nick])
@commands('voice')
def voice(bot, trigger):
"""
Da voz al usuario especificado, si no se especifica un nick, se lo otorga al que ejecuta el comando.
"""
if bot.privileges[trigger.sender][trigger.nick] < OP:
return
if bot.privileges[trigger.sender][bot.nick] < HALFOP:
return bot.reply("No soy operador de este canal!")
nick = trigger.group(2)
channel = trigger.sender
if not nick:
nick = trigger.nick
bot.write(['MODE', channel, "+v", nick])
@commands('devoice')
def devoice(bot, trigger):
"""
Quita la voz al usuario especificado, si no se especifica un nick, se la remueve al que ejecuta el comando.
"""
if bot.privileges[trigger.sender][trigger.nick] < OP:
return
if bot.privileges[trigger.sender][bot.nick] < HALFOP:
return bot.reply("No soy operador de este canal!")
nick = trigger.group(2)
channel = trigger.sender
if not nick:
nick = trigger.nick
bot.write(['MODE', channel, "-v", nick])
@commands('kick')
@priority('high')
def kick(bot, trigger):
"""
Echa a un usuario del canal actual
"""
if bot.privileges[trigger.sender][trigger.nick] < OP:
return
if bot.privileges[trigger.sender][bot.nick] < HALFOP:
return bot.reply("No soy un operador del canal!")
text = trigger.group().split()
argc = len(text)
if argc < 2:
return
opt = Nick(text[1])
nick = opt
channel = trigger.sender
reasonidx = 2
if not opt.is_nick():
if argc < 3:
return
nick = text[2]
channel = opt
reasonidx = 3
reason = ' '.join(text[reasonidx:])
if nick != bot.config.nick:
bot.write(['KICK', channel, nick, reason])
def configureHostMask(mask):
if mask == '*!*@*':
return mask
if re.match('^[^.@!/]+$', mask) is not None:
return '%s!*@*' % mask
if re.match('^[^@!]+$', mask) is not None:
return '*!*@%s' % mask
m = re.match('^([^!@]+)@$', mask)
if m is not None:
return '*!%s@*' % m.group(1)
m = re.match('^([^!@]+)@([^@!]+)$', mask)
if m is not None:
return '*!%s@%s' % (m.group(1), m.group(2))
m = re.match('^([^!@]+)!(^[!@]+)@?$', mask)
if m is not None:
return '%s!%s@*' % (m.group(1), m.group(2))
return ''
@commands('ban')
@priority('high')
def ban(bot, trigger):
"""
Veta a un usuario del canal.
"""
if bot.privileges[trigger.sender][trigger.nick] < OP:
return
if bot.privileges[trigger.sender][bot.nick] < HALFOP:
return bot.reply("No soy operador en este canal!")
text = trigger.group().split()
argc = len(text)
if argc < 2:
return
opt = Nick(text[1])
banmask = opt
channel = trigger.sender
if not opt.is_nick():
if argc < 3:
return
channel = opt
banmask = text[2]
banmask = configureHostMask(banmask)
if banmask == '':
return
bot.write(['MODE', channel, '+b', banmask])
@commands('unban')
def unban(bot, trigger):
"""
Remueve un veto en el canal
"""
if bot.privileges[trigger.sender][trigger.nick] < OP:
return
if bot.privileges[trigger.sender][bot.nick] < HALFOP:
return bot.reply("No soy un operador de este canal!")
text = trigger.group().split()
argc = len(text)
if argc < 2:
return
opt = Nick(text[1])
banmask = opt
channel = trigger.sender
if not opt.is_nick():
if argc < 3:
return
channel = opt
banmask = text[2]
banmask = configureHostMask(banmask)
if banmask == '':
return
bot.write(['MODE', channel, '-b', banmask])
@commands('quiet')
def quiet(bot, trigger):
"""
Añade un silencio a un usuario o máscara
"""
if bot.privileges[trigger.sender][trigger.nick] < OP:
return
if bot.privileges[trigger.sender][bot.nick] < OP:
return bot.reply("No soy operador de este canal!!")
text = trigger.group().split()
argc = len(text)
if argc < 2:
return
opt = Nick(text[1])
quietmask = opt
channel = trigger.sender
if not opt.is_nick():
if argc < 3:
return
quietmask = text[2]
channel = opt
quietmask = configureHostMask(quietmask)
if quietmask == '':
return
bot.write(['MODE', channel, '+q', quietmask])
@commands('unquiet')
def unquiet(bot, trigger):
"""
Quita el silencio de un usuario o máscara
"""
if bot.privileges[trigger.sender][trigger.nick] < OP:
return
if bot.privileges[trigger.sender][bot.nick] < OP:
return bot.reply("No soy operador de este canal!!")
text = trigger.group().split()
argc = len(text)
if argc < 2:
return
opt = Nick(text[1])
quietmask = opt
channel = trigger.sender
if not opt.is_nick():
if argc < 3:
return
quietmask = text[2]
channel = opt
quietmask = configureHostMask(quietmask)
if quietmask == '':
return
bot.write(['MODE', opt, '-q', quietmask])
@commands('kickban', 'kb')
@priority('high')
def kickban(bot, trigger):
"""
Echa y veta a un usuario o máscara
%kickban [#chan] user1 user!*@* fuera de aquí
"""
if bot.privileges[trigger.sender][trigger.nick] < OP:
return
if bot.privileges[trigger.sender][bot.nick] < HALFOP:
return bot.reply("No soy operador de este canal!!")
text = trigger.group().split()
argc = len(text)
if argc < 4:
return
opt = Nick(text[1])
nick = opt
mask = text[2]
reasonidx = 3
if not opt.is_nick():
if argc < 5:
return
channel = opt
nick = text[2]
mask = text[3]
reasonidx = 4
reason = ' '.join(text[reasonidx:])
mask = configureHostMask(mask)
if mask == '':
return
bot.write(['MODE', channel, '+b', mask])
bot.write(['KICK', channel, nick, ' :', reason])
@commands('topic')
def topic(bot, trigger):
"""
This gives ops the ability to change the topic.
The bot must be a Channel Operator for this command to work.
"""
purple, green, bold = '\x0306', '\x0310', '\x02'
if bot.privileges[trigger.sender][trigger.nick] < OP:
return
if bot.privileges[trigger.sender][bot.nick] < HALFOP:
return bot.reply("No soy operador de este canal!!")
text = trigger.group(2)
if text == '':
return
channel = trigger.sender.lower()
narg = 1
mask = None
if bot.db and channel in bot.db.preferences:
mask = bot.db.preferences.get(channel, 'topic_mask')
narg = len(re.findall('%s', mask))
if not mask or mask == '':
mask = purple + 'Welcome to: ' + green + channel + purple \
+ ' | ' + bold + 'Topic: ' + bold + green + '%s'
top = trigger.group(2)
text = tuple()
if top:
text = tuple(unicode.split(top, '~', narg))
if len(text) != narg:
message = "Not enough arguments. You gave " + str(len(text)) + ', it requires ' + str(narg) + '.'
return bot.say(message)
topic = mask % text
bot.write(('TOPIC', channel + ' :' + topic))
@commands('tmask')
def set_mask(bot, trigger):
"""
Set the mask to use for .topic in the current channel. %s is used to allow
substituting in chunks of text.
"""
if bot.privileges[trigger.sender][trigger.nick] < OP:
return
if not bot.db:
bot.say("I'm afraid I can't do that.")
else:
bot.db.preferences.update(trigger.sender.lower(), {'topic_mask': trigger.group(2)})
bot.say("Gotcha, " + trigger.nick)
@commands('showmask')
def show_mask(bot, trigger):
"""Show the topic mask for the current channel."""
if bot.privileges[trigger.sender][trigger.nick] < OP:
return
if not bot.db:
bot.say("I'm afraid I can't do that.")
elif trigger.sender.lower() in bot.db.preferences:
bot.say(bot.db.preferences.get(trigger.sender.lower(), 'topic_mask'))
else:
bot.say("%s")
@commands('kickbanb')
def kickban_beta(bot, trigger):
text = trigger.group(2).split(' ')
nick = text[0]
users = bot.privileges[trigger.sender]
#for nick in users:
#kicklist += if_kick(nick, mask)
bot.say(' '.join(users))
#bot.say(trigger.sender)
def if_kick(nick, mask):
get_hostmask_regex(mask)
|
StarcoderdataPython
|
5151895
|
from django.urls import include, path
from wab.core.export_database.views import ExportPdfView, ExportExcelView, ExportTextView, DownloadFileExportViews, \
ProcessFileExportViews
from wab.core.import_database.views import ImportCsvView
from wab.core.views import ListOperatorView, ListJoinView, ListRelationView, ListDataTypeView, ListRegexTypeView
urlpatterns = [
path("users/", include("wab.core.users.urls")),
path("", include("wab.core.custom_column.urls")),
path("", include("wab.core.custom_column_fk.urls")),
path("db-provider/", include("wab.core.db_provider.urls")),
path("sql-function/", include("wab.core.sql_function.urls")),
path("export/pdf/<int:connection>/<str:table_name>/", ExportPdfView.as_view(), name="export-pdf"),
path("export/excel/<int:connection>/<str:table_name>/", ExportExcelView.as_view(), name="export-excel"),
path("export/text/<int:connection>/<str:table_name>/", ExportTextView.as_view(), name="export-text"),
path("download/export-file/<int:export_id>/", DownloadFileExportViews.as_view(), name="download-export-file"),
# path("process-export/", ProcessFileExportViews.as_view(), name="ProcessFileExportViews"),
path("import/csv/<int:connection>/<str:table_name>/", ImportCsvView.as_view(), name="import-csv"),
path("notifications/", include("wab.core.notifications.urls")),
path("sharing-files/", include("wab.core.sharing_files.urls")),
path("list-operator/", ListOperatorView.as_view(), name='list-operator'),
path("list-join/", ListJoinView.as_view(), name='list-operator'),
path("list-relation/", ListRelationView.as_view(), name='list-relation'),
path("list-data-type/", ListDataTypeView.as_view(), name='list-data-type'),
path("list-regex-type/", ListRegexTypeView.as_view(), name='list-regex-type'),
]
|
StarcoderdataPython
|
104051
|
# Generated by Django 3.1.1 on 2020-10-16 09:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('info', '0013_auto_20201009_1912'),
]
operations = [
migrations.AlterField(
model_name='player',
name='points_gained',
field=models.IntegerField(blank=True, null=True),
),
]
|
StarcoderdataPython
|
3500748
|
<reponame>juxiangwu/image-processing
"""
Mani experimenting with facial information extraction.
This module is used to sort the CK+ dataset.
"""
import glob
from shutil import copyfile
# No need to modify this one as it is a helper script.
__version__ = "1.0, 01/04/2016"
__author__ = "<NAME>, 2016"
# Define emotion order
emotions = ["neutral", "anger", "contempt", "disgust",
"fear", "happy", "sadness", "surprise"]
# Returns a list of all folders with participant numbers
participants = glob.glob("source_emotions\\*")
# i = 1;
for x in participants:
# i += 1;
# store current participant number
part = "%s" % x[-4:]
# Store list of sessions for current participant
for sessions in glob.glob("%s\\*" % x):
for files in glob.glob("%s\\*" % sessions):
current_session = files[20:-30]
file1 = open(files, 'r')
# Emotions are encoded as a float, readline as float,
# then convert to integer.
emotion = int(float(file1.readline()))
# get path for last image in sequence, which contains the emotion
sourcefile_emotion = glob.glob(
"source_images\\%s\\%s\\*" % (part, current_session))[-1]
# do same for neutral image
sourcefile_neutral = glob.glob(
"source_images\\%s\\%s\\*" % (part, current_session))[0]
# Generate path to put neutral image
dest_neut = "sorted_set\\neutral\\%s" % sourcefile_neutral[25:]
# Do same for emotion containing image
dest_emot = "sorted_set\\%s\\%s" % (
emotions[emotion], sourcefile_emotion[25:])
# Copy file
copyfile(sourcefile_neutral, dest_neut)
copyfile(sourcefile_emotion, dest_emot)
# if i == 10:
# break;
|
StarcoderdataPython
|
1842152
|
<gh_stars>1-10
from auth import *
from message import *
|
StarcoderdataPython
|
4815238
|
<filename>360agent/plugins/process.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import psutil
import plugins
import sys
class Plugin(plugins.BasePlugin):
__name__ = 'process'
def run(self, *unused):
process = []
for proc in psutil.process_iter():
try:
pinfo = proc.as_dict(attrs=[
'pid', 'name', 'ppid', 'exe', 'cmdline', 'username',
'cpu_percent', 'memory_percent', 'io_counters'
])
try:
pinfo['cmdline'] = ' '.join(pinfo['cmdline']).strip()
except:
pass
if sys.version_info < (3,):
pinfo['cmdline'] = unicode(pinfo['cmdline'], sys.getdefaultencoding(), errors="replace").strip()
pinfo['name'] = unicode(pinfo['name'], sys.getdefaultencoding(), errors="replace")
pinfo['username'] = unicode(pinfo['username'], sys.getdefaultencoding(), errors="replace")
try:
pinfo['exe'] = unicode(pinfo['exe'], sys.getdefaultencoding(), errors="replace")
except:
pass
except psutil.NoSuchProcess:
pass
except psutil.AccessDenied:
pass
except:
pass
else:
process.append(pinfo)
return process
if __name__ == '__main__':
Plugin().execute()
|
StarcoderdataPython
|
8152785
|
import numpy as np
import torch
COCO_JOINTS = {
'Right Ankle': 16, 'Right Knee': 14, 'Right Hip': 12,
'Left Hip': 11, 'Left Knee': 13, 'Left Ankle': 15,
'Right Wrist': 10, 'Right Elbow': 8, 'Right Shoulder': 6,
'Left Shoulder': 5, 'Left Elbow': 7, 'Left Wrist': 9,
'Right Ear': 4, 'Left Ear': 3, 'Right Eye': 2, 'Left Eye': 1,
'Nose': 0
}
# The SMPL model (im smpl_official.py) returns a large superset of joints.
# Different subsets are used during training - e.g. H36M 3D joints convention and COCO 2D joints convention.
# Joint label conversions from SMPL to H36M/COCO/LSP
ALL_JOINTS_TO_COCO_MAP = [24, 26, 25, 28, 27, 16, 17, 18, 19, 20, 21, 1, 2, 4, 5, 7, 8] # Using OP Hips
ALL_JOINTS_TO_H36M_MAP = list(range(73, 90))
H36M_TO_J17 = [6, 5, 4, 1, 2, 3, 16, 15, 14, 11, 12, 13, 8, 10, 0, 7, 9]
H36M_TO_J14 = H36M_TO_J17[:14]
# Joint label and body part seg label matching
# 24 part seg: COCO Joints
TWENTYFOUR_PART_SEG_TO_COCO_JOINTS_MAP = {19: 7,
21: 7,
20: 8,
22: 8,
4: 9,
3: 10,
12: 13,
14: 13,
11: 14,
13: 14,
5: 15,
6: 16}
def convert_densepose_seg_to_14part_labels(densepose_seg):
"""
Convert 24 body-part labels (DensePose convention) to 14 body-part labels.
"""
if isinstance(densepose_seg, torch.Tensor):
fourteen_part_seg = torch.zeros_like(densepose_seg)
elif isinstance(densepose_seg, np.ndarray):
fourteen_part_seg = np.zeros_like(densepose_seg)
fourteen_part_seg[densepose_seg == 1] = 1
fourteen_part_seg[densepose_seg == 2] = 1
fourteen_part_seg[densepose_seg == 3] = 11
fourteen_part_seg[densepose_seg == 4] = 12
fourteen_part_seg[densepose_seg == 5] = 14
fourteen_part_seg[densepose_seg == 6] = 13
fourteen_part_seg[densepose_seg == 7] = 8
fourteen_part_seg[densepose_seg == 8] = 6
fourteen_part_seg[densepose_seg == 9] = 8
fourteen_part_seg[densepose_seg == 10] = 6
fourteen_part_seg[densepose_seg == 11] = 9
fourteen_part_seg[densepose_seg == 12] = 7
fourteen_part_seg[densepose_seg == 13] = 9
fourteen_part_seg[densepose_seg == 14] = 7
fourteen_part_seg[densepose_seg == 15] = 2
fourteen_part_seg[densepose_seg == 16] = 4
fourteen_part_seg[densepose_seg == 17] = 2
fourteen_part_seg[densepose_seg == 18] = 4
fourteen_part_seg[densepose_seg == 19] = 3
fourteen_part_seg[densepose_seg == 20] = 5
fourteen_part_seg[densepose_seg == 21] = 3
fourteen_part_seg[densepose_seg == 22] = 5
fourteen_part_seg[densepose_seg == 23] = 10
fourteen_part_seg[densepose_seg == 24] = 10
return fourteen_part_seg
def convert_multiclass_to_binary_labels(multiclass_labels):
"""
Converts multiclass segmentation labels into a binary mask.
"""
if isinstance(multiclass_labels, torch.Tensor):
binary_labels = torch.zeros_like(multiclass_labels)
elif isinstance(multiclass_labels, np.ndarray):
binary_labels = np.zeros_like(multiclass_labels)
binary_labels[multiclass_labels != 0] = 1
return binary_labels
def convert_2Djoints_to_gaussian_heatmaps(joints2D, img_wh, std=4):
"""
:param joints2D: (N, 2) array, 2D joint locations.
:return heatmaps: (img_wh, img_wh, N) array, 2D joint heatmaps (channels last).
"""
xx, yy = np.meshgrid(np.arange(img_wh),
np.arange(img_wh))
xx = xx[None, :, :].astype(np.float32)
yy = yy[None, :, :].astype(np.float32)
j2d_u = joints2D[:, 0, None, None]
j2d_v = joints2D[:, 1, None, None]
heatmap = np.exp(-(((xx - j2d_u) / std) ** 2) / 2 - (((yy - j2d_v) / std) ** 2) / 2).transpose(1, 2, 0)
return heatmap
def convert_2Djoints_to_gaussian_heatmaps_torch(joints2D,
img_wh,
std=4):
"""
:param joints2D: (B, N, 2) tensor - batch of 2D joints.
:param img_wh: int, dimensions of square heatmaps
:param std: standard deviation of gaussian blobs
:return heatmaps: (B, N, img_wh, img_wh) - batch of 2D joint heatmaps (channels first).
"""
device = joints2D.device
xx, yy = torch.meshgrid(torch.arange(img_wh, device=device),
torch.arange(img_wh, device=device))
xx = xx[None, None, :, :].float()
yy = yy[None, None, :, :].float()
j2d_u = joints2D[:, :, 0, None, None] # Horizontal coord (columns)
j2d_v = joints2D[:, :, 1, None, None] # Vertical coord (rows)
heatmap = torch.exp(-(((xx - j2d_v) / std) ** 2) / 2 - (((yy - j2d_u) / std) ** 2) / 2)
return heatmap
def convert_heatmaps_to_2Djoints_coordinates_torch(joints2D_heatmaps,
eps=1e-6):
"""
Convert 2D joint heatmaps into coordinates using argmax.
:param joints2D_heatmaps: (N, K, H, W) array of 2D joint heatmaps.
:param eps: heatmap max threshold to count as detected joint.
:return: joints2D: (N, K, 2) array of 2D joint coordinates.
joints2D_vis: (N, K) bool array of 2D joint visibilties.
"""
batch_size = joints2D_heatmaps.shape[0]
num_joints = joints2D_heatmaps.shape[1]
width = joints2D_heatmaps.shape[3]
# Joints 2D given by max heatmap indices.
# Since max and argmax are over batched 2D arrays, first flatten to 1D.
max_vals_flat, max_indices_flat = torch.max(joints2D_heatmaps.view(batch_size, num_joints, -1),
dim=-1) # (N, K)
# Convert 1D max indices to 2D max indices i.e. (x, y) coordinates.
joints2D = torch.zeros(batch_size, num_joints, 2, device=joints2D_heatmaps.device) # (N, K, 2)
joints2D[:, :, 0] = max_indices_flat % width # x-coordinate
joints2D[:, :, 1] = torch.floor(max_indices_flat / float(width)) # y-coordinate
# If heatmap is 0 everywhere (i.e. max value = 0), then no 2D coordinates
# should be returned for that heatmap (i.e. joints2D not visible).
# Following returns 1 for heatmaps with visible 2D coordinates (max val > eps) and -1 for heatmaps without.
joints2D_vis = max_vals_flat > eps
joints2D[torch.logical_not(joints2D_vis)] = -1
return joints2D, joints2D_vis
|
StarcoderdataPython
|
11351053
|
<filename>fastflix/encoders/vceencc_avc/settings_panel.py
# -*- coding: utf-8 -*-
import logging
from box import Box
from qtpy import QtCore, QtWidgets, QtGui
from fastflix.encoders.common.setting_panel import SettingPanel
from fastflix.language import t
from fastflix.models.encode import VCEEncCAVCSettings
from fastflix.models.fastflix_app import FastFlixApp
from fastflix.shared import link
from fastflix.resources import warning_icon
logger = logging.getLogger("fastflix")
presets = ["balanced", "fast", "slow"]
recommended_bitrates = [
"200k (320x240p @ 30fps)",
"300k (640x360p @ 30fps)",
"1000k (640x480p @ 30fps)",
"1750k (1280x720p @ 30fps)",
"2500k (1280x720p @ 60fps)",
"4000k (1920x1080p @ 30fps)",
"5000k (1920x1080p @ 60fps)",
"7000k (2560x1440p @ 30fps)",
"10000k (2560x1440p @ 60fps)",
"15000k (3840x2160p @ 30fps)",
"20000k (3840x2160p @ 60fps)",
"Custom",
]
recommended_crfs = [
"28",
"27",
"26",
"25",
"24",
"23",
"22",
"21",
"20",
"19",
"18",
"17",
"16",
"15",
"14",
"Custom",
]
def get_breaker():
breaker_line = QtWidgets.QWidget()
breaker_line.setMaximumHeight(2)
breaker_line.setStyleSheet("background-color: #ccc; margin: auto 0; padding: auto 0;")
return breaker_line
class VCEENCCAVC(SettingPanel):
profile_name = "vceencc_avc"
hdr10plus_signal = QtCore.Signal(str)
hdr10plus_ffmpeg_signal = QtCore.Signal(str)
def __init__(self, parent, main, app: FastFlixApp):
super().__init__(parent, main, app)
self.main = main
self.app = app
grid = QtWidgets.QGridLayout()
self.widgets = Box(mode=None)
self.mode = "Bitrate"
self.updating_settings = False
grid.addLayout(self.init_modes(), 0, 2, 4, 4)
grid.addLayout(self._add_custom(title="Custom VCEEncC options", disable_both_passes=True), 10, 0, 1, 6)
grid.addLayout(self.init_preset(), 0, 0, 1, 2)
grid.addLayout(self.init_profile(), 1, 0, 1, 2)
grid.addLayout(self.init_mv_precision(), 2, 0, 1, 2)
grid.addLayout(self.init_pre(), 3, 0, 1, 2)
breaker = QtWidgets.QHBoxLayout()
breaker_label = QtWidgets.QLabel(t("Advanced"))
breaker_label.setFont(QtGui.QFont("helvetica", 8, weight=55))
breaker.addWidget(get_breaker(), stretch=1)
breaker.addWidget(breaker_label, alignment=QtCore.Qt.AlignHCenter)
breaker.addWidget(get_breaker(), stretch=1)
grid.addLayout(breaker, 4, 0, 1, 6)
qp_line = QtWidgets.QHBoxLayout()
qp_line.addLayout(self.init_min_q())
qp_line.addStretch(1)
qp_line.addLayout(self.init_max_q())
qp_line.addStretch(1)
qp_line.addLayout(self.init_ref())
qp_line.addStretch(1)
qp_line.addLayout(self.init_b_frames())
qp_line.addStretch(1)
qp_line.addLayout(self.init_level())
qp_line.addStretch(1)
qp_line.addLayout(self.init_decoder())
qp_line.addStretch(1)
qp_line.addLayout(self.init_metrics())
grid.addLayout(qp_line, 5, 0, 1, 6)
self.ffmpeg_level = QtWidgets.QLabel()
grid.addWidget(self.ffmpeg_level, 8, 2, 1, 4)
grid.setRowStretch(9, 1)
guide_label = QtWidgets.QLabel(
link("https://github.com/rigaya/VCEEnc/blob/master/VCEEncC_Options.en.md", t("VCEEncC Options"))
)
warning_label = QtWidgets.QLabel()
warning_label.setPixmap(QtGui.QIcon(warning_icon).pixmap(22))
guide_label.setAlignment(QtCore.Qt.AlignBottom)
guide_label.setOpenExternalLinks(True)
grid.addWidget(guide_label, 11, 0, 1, 4)
grid.addWidget(warning_label, 11, 4, 1, 1, alignment=QtCore.Qt.AlignRight)
grid.addWidget(QtWidgets.QLabel(t("VCEEncC Encoder support is still experimental!")), 11, 5, 1, 1)
self.setLayout(grid)
self.hide()
self.hdr10plus_signal.connect(self.done_hdr10plus_extract)
self.hdr10plus_ffmpeg_signal.connect(lambda x: self.ffmpeg_level.setText(x))
def init_preset(self):
return self._add_combo_box(
label="Preset",
widget_name="preset",
options=presets,
tooltip="preset: The slower the preset, the better the compression and quality",
connect="default",
opt="preset",
)
def init_profile(self):
return self._add_combo_box(
label="Profile",
widget_name="profile",
options=["Baseline", "Main", "High"],
connect="default",
opt="profile",
)
def init_mv_precision(self):
return self._add_combo_box(
label="Motion vector accuracy",
tooltip="Q-pel is highest precision",
widget_name="mv_precision",
options=["q-pel", "half-pel", "full-pel"],
opt="mv_precision",
)
def init_lookahead(self):
return self._add_combo_box(
label="Lookahead",
tooltip="",
widget_name="lookahead",
opt="lookahead",
options=["off"] + [str(x) for x in range(1, 33)],
)
def init_pre(self):
layout = QtWidgets.QHBoxLayout()
layout.addLayout(self._add_check_box(label="VBAQ", widget_name="vbaq", opt="vbaq"))
layout.addLayout(self._add_check_box(label="Pre Encode", widget_name="pre_encode", opt="pre_encode"))
layout.addLayout(self._add_check_box(label="Pre Analysis", widget_name="pre_analysis", opt="pre_analysis"))
return layout
def init_level(self):
layout = self._add_combo_box(
label="Level",
tooltip="Set the encoding level restriction",
widget_name="level",
options=[
t("Auto"),
"1.0",
"2.0",
"2.1",
"3.0",
"3.1",
"4.0",
"4.1",
"5.0",
"5.1",
"5.2",
],
opt="level",
)
self.widgets.level.setMinimumWidth(60)
return layout
@staticmethod
def _qp_range():
return [str(x) for x in range(0, 52)]
def init_min_q(self):
layout = QtWidgets.QHBoxLayout()
layout.addWidget(QtWidgets.QLabel(t("Min Q")))
layout.addWidget(
self._add_combo_box(widget_name="min_q", options=["I"] + self._qp_range(), min_width=45, opt="min_q")
)
return layout
def init_max_q(self):
layout = QtWidgets.QHBoxLayout()
layout.addWidget(QtWidgets.QLabel(t("Max Q")))
layout.addWidget(
self._add_combo_box(widget_name="max_q", options=["I"] + self._qp_range(), min_width=45, opt="max_q")
)
return layout
def init_b_frames(self):
return self._add_combo_box(
widget_name="b_frames",
label="B Frames",
options=[t("Auto")] + [str(x) for x in range(3)],
opt="b_frames",
min_width=60,
)
def init_ref(self):
return self._add_combo_box(
widget_name="ref",
label="Ref Frames",
options=[t("Auto")] + [str(x) for x in range(17)],
opt="ref",
min_width=60,
)
def init_decoder(self):
return self._add_combo_box(
widget_name="decoder",
label="Decoder",
options=["Hardware", "Software"],
opt="decoder",
tooltip="Hardware: use libavformat + hardware decoder for input\nSoftware: use avcodec + software decoder",
min_width=80,
)
def init_metrics(self):
return self._add_check_box(
widget_name="metrics",
opt="metrics",
label="Metrics",
tooltip="Calculate PSNR and SSIM and show in the encoder output",
)
def init_modes(self):
layout = self._add_modes(recommended_bitrates, recommended_crfs, qp_name="cqp")
return layout
def mode_update(self):
self.widgets.custom_cqp.setDisabled(self.widgets.cqp.currentText() != "Custom")
self.widgets.custom_bitrate.setDisabled(self.widgets.bitrate.currentText() != "Custom")
self.main.build_commands()
def setting_change(self, update=True):
if self.updating_settings:
return
self.updating_settings = True
if update:
self.main.page_update()
self.updating_settings = False
def update_video_encoder_settings(self):
settings = VCEEncCAVCSettings(
preset=self.widgets.preset.currentText().split("-")[0].strip(),
mv_precision=self.widgets.mv_precision.currentText(),
max_q=self.widgets.max_q.currentText() if self.widgets.max_q.currentIndex() != 0 else None,
min_q=self.widgets.min_q.currentText() if self.widgets.min_q.currentIndex() != 0 else None,
extra=self.ffmpeg_extras,
metrics=self.widgets.metrics.isChecked(),
level=self.widgets.level.currentText() if self.widgets.level.currentIndex() != 0 else None,
b_frames=self.widgets.b_frames.currentText() if self.widgets.b_frames.currentIndex() != 0 else None,
ref=self.widgets.ref.currentText() if self.widgets.ref.currentIndex() != 0 else None,
pre_encode=self.widgets.pre_encode.isChecked(),
pre_analysis=self.widgets.pre_analysis.isChecked(),
vbaq=self.widgets.vbaq.isChecked(),
decoder=self.widgets.decoder.currentText(),
)
encode_type, q_value = self.get_mode_settings()
settings.cqp = q_value if encode_type == "qp" else None
settings.bitrate = q_value if encode_type == "bitrate" else None
self.app.fastflix.current_video.video_settings.video_encoder_settings = settings
def set_mode(self, x):
self.mode = x.text()
self.widgets.min_q.setEnabled(self.mode.lower() == "bitrate")
self.widgets.max_q.setEnabled(self.mode.lower() == "bitrate")
self.main.build_commands()
def new_source(self):
if not self.app.fastflix.current_video:
return
super().new_source()
|
StarcoderdataPython
|
5129615
|
<filename>pcdet/models/backbones_2d/unet.py
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..model_utils.seg_loss import FocalLoss
class DoubleConv(nn.Module):
def __init__(self, in_ch, out_ch, bn=True):
super(DoubleConv, self).__init__()
if bn:
self.conv = nn.Sequential(
nn.Conv2d(in_ch, out_ch, 3, padding=1),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True),
nn.Conv2d(out_ch, out_ch, 3, padding=1),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True)
)
else:
self.conv = nn.Sequential(
nn.Conv2d(in_ch, out_ch, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(out_ch, out_ch, 3, padding=1),
nn.ReLU(inplace=True)
)
def forward(self, x):
x = self.conv(x)
return x
class Down(nn.Module):
def __init__(self, in_ch, out_ch, bn=True):
super(Down, self).__init__()
self.mpconv = nn.Sequential(
nn.MaxPool2d(2),
DoubleConv(in_ch, out_ch, bn=bn),
)
def forward(self, x):
x = self.mpconv(x)
return x
class Up(nn.Module):
def __init__(self, in_ch, out_ch, bilinear=True, bn=True):
super(Up, self).__init__()
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
else:
self.up = nn.ConvTranspose2d(in_ch // 2, in_ch // 2, 2, stride=2)
self.conv = DoubleConv(in_ch, out_ch, bn=bn)
def forward(self, x1, x2):
x1 = self.up(x1)
diffY = x2.size()[2] - x1.size()[2]
diffX = x2.size()[3] - x1.size()[3]
x1 = F.pad(x1, (diffX // 2, diffX - diffX // 2,
diffY // 2, diffY - diffY // 2))
x = torch.cat([x2, x1], dim=1)
x = self.conv(x)
return x
class UNet(nn.Module):
def __init__(self, model_cfg):
super().__init__()
self.model_cfg = model_cfg
self.input_channel = self.model_cfg.get('INPUT_CHANNELS', None)
bn = self.model_cfg.get('BN', None)
channels = self.model_cfg.get('CHANNELS', None)
self.output_channels = self.model_cfg.get('OUTPUT_CHANNELS', None)
self.inc = DoubleConv(self.input_channel, channels[0], bn=bn)
self.down1 = Down(channels[0], channels[1], bn=bn)
self.down2 = Down(channels[1], channels[2], bn=bn)
self.down3 = Down(channels[2], channels[3], bn=bn)
self.down4 = Down(channels[3], channels[3], bn=bn)
self.up1 = Up(channels[4], channels[2], bn=bn)
self.up2 = Up(channels[3], channels[1], bn=bn)
self.up3 = Up(channels[2], channels[0], bn=bn)
self.up4 = Up(channels[1], channels[0], bn=bn)
self.out_conv = nn.Conv2d(channels[0], self.output_channels, 1)
output_prob = self.model_cfg.get('OUTPUT_PROB', None)
self.output_layer = None
if output_prob:
self.output_layer = nn.Softmax(dim=1)
self.loss_func = None
if hasattr(self.model_cfg, 'LOSS_CONFIG'):
apply_nonlin = nn.Softmax(dim=1) if not output_prob else None
if self.model_cfg.LOSS_CONFIG.NAME == 'FocalLoss':
self.loss_func = FocalLoss(apply_nonlin=apply_nonlin, alpha=getattr(self.model_cfg.LOSS_CONFIG, 'ALPHA', None))
else:
raise NotImplementedError
self.forward_data_dict = None
def get_output_feature_dim(self):
return self.output_channels
def get_loss(self, tb_dict=None):
tb_dict = {} if tb_dict is None else tb_dict
pred = self.forward_data_dict['pred_image_seg']
target = self.forward_data_dict['image_seg_label']
loss = self.loss_func(pred, target)
tb_dict.update({'image_seg_loss': loss})
return loss, tb_dict
def forward(self, data_dict):
x = data_dict['image']
x = x.permute(0, 3, 1, 2)
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x = self.up1(x5, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
x = self.up4(x, x1)
out = self.out_conv(x)
if self.output_layer is not None:
out = self.output_layer(out)
data_dict['pred_image_seg'] = out
self.forward_data_dict = data_dict
return data_dict
|
StarcoderdataPython
|
33398
|
<gh_stars>1-10
def preprocess(text):
text=text.replace('\n', '\n\r')
return text
def getLetter():
return open("./input/letter.txt", "r").read()
|
StarcoderdataPython
|
3237442
|
import amplitf.interface as atfi
import amplitf.likelihood as atfl
from amplitf.phasespace.rectangular_phasespace import RectangularPhaseSpace
from amplitf.phasespace.combined_phasespace import CombinedPhaseSpace
import tfa.plotting as tfp
import tfa.optimisation as tfo
import tfa.rootio as tfr
import tfa.toymc as tft
import tfa.neural_nets as tfn
import tensorflow as tf
import numpy as np
import sys
import matplotlib.pyplot as plt
from DistributionModel import parameters_list, observables_phase_space, observables_data, observables_titles
print_step = 50 # print statistics every 50 epochs
norm_size = 1000000 # size of the normalisation sample (random uniform)
path = "./"
initfile = "eff_train.npy" # file with the trained parameters of the NN
calibfile = "test_tuple.root" # Sample to fit
outfile = "eff_result" # Prefix for output files (text and pdf)
seed = 1 # initial random seed
if len(sys.argv)>1 : calibfile = sys.argv[1] # file to fit is the first command line parameter
if len(sys.argv)>2 : outfile = sys.argv[2] # output prefix is the second parameter
if len(sys.argv)>3 : seed = int(sys.argv[3]) # optionally, initial seed
ann = np.load(initfile, allow_pickle = True) # Load NN parameters
data_sample = tfr.read_tuple(path + calibfile, branches = observables_data)[:100000,:]
# Initialise NN weights and biases from the loaded file
(scale, ranges) = ann[:2]
(weights, biases) = tfn.init_fixed_weights_biases(ann[2:])
ndim = observables_phase_space.dimensionality()
observables_bounds = observables_phase_space.bounds()
# Density model as a multilayer perceptron
def model(x, pars) :
# Constant vectors of fit parameters (the same for every data point)
vec = tf.reshape( tf.concat( [ tf.constant(ndim*[0.], dtype = atfi.fptype() ), pars ], axis = 0 ), [ 1, ndim + len(pars) ] )
# Input tensor for MLP, 5 first variables are data, the rest are constant optimisable parameters
x2 = tf.pad( x, [[0, 0], [0, len(pars)]], 'CONSTANT') + vec
return scale*tfn.multilayer_perceptron(x2, ranges, weights, biases)
# Initialise random seeds
atfi.set_seed(seed)
# Declare fit parameters
pars = [ tfo.FitParameter(par[0], (par[2][0]+par[2][1])/2., par[2][0], par[2][1]) for par in parameters_list ]
# Unbinned negative log likelihood
@atfi.function
def nll(pars) :
parslist = [ pars[i[0]] for i in parameters_list ]
return atfl.unbinned_nll( model(data_sample, parslist), atfl.integral( model(norm_sample, parslist) ) )
# Normalisation sample is a uniform random sample in 5D phase space
norm_sample = observables_phase_space.rectangular_grid_sample( ( 200,200 ) )
# Data sample, run through phase space filter just in case
data_sample = observables_phase_space.filter(data_sample)
bins = ndim*[50]
tfp.set_lhcb_style(size=9, usetex=False)
fig, ax = plt.subplots(nrows=ndim, ncols=ndim, figsize=(8, 6))
# Initialise multidimensional density display object
display = tfp.MultidimDisplay(data_sample, norm_sample, bins, observables_bounds, observables_titles, fig, ax)
print("Normalisation sample size = {len(norm_sample)}")
print(norm_sample)
print("Data sample size = {len(data_sample)}")
print(data_sample)
# Run minimisation 20 times, choose the best NLL value
best_nll = 1e10
best_result = None
for i in range(0, 5) :
for p in pars :
p.update(np.random.uniform(p.lower_limit, p.upper_limit))
result = tfo.run_minuit(nll, pars)
print(result)
parslist = [ result["params"][i[0]][0] for i in parameters_list ]
if result['loglh'] < best_nll : # If we got the best NLL so far
best_nll = result['loglh']
best_result = result
norm_pdf = model(norm_sample, parslist) # Calculate PDF
display.draw(norm_pdf) # Draw PDF
plt.tight_layout(pad=0., w_pad=0., h_pad=0.)
plt.draw()
plt.pause(0.1)
print("Optimization Finished!")
parslist = [ best_result["params"][i[0]][0] for i in parameters_list ]
norm_pdf = model(norm_sample, parslist) # Calculate PDF
display.draw(norm_pdf)
def fit_model(x) :
return model(x, parslist)
# Generate toy MC sample corresponding to fit result and store it to ROOT file
fit_sample = tft.run_toymc(fit_model, observables_phase_space, 1000000, 1e-20, chunk = 1000000)
tfr.write_tuple("eff_fit_result.root", fit_sample, observables_data)
# Calculate the fit result on a 50x50 grid for chi2 evaluation
norm_sample_2 = observables_phase_space.rectangular_grid_sample( ( 50, 50 ) )
fit = fit_model(norm_sample_2).numpy().reshape( (50, 50) )
hist = np.histogram2d(data_sample[:,0], data_sample[:,1], bins=(50, 50), range=observables_bounds)
# Normalise fit result
fit = fit/atfl.integral(fit)*atfl.integral(hist[0])
# Chi2
chi2 = np.sum((fit-hist[0])**2/fit)
print(fit)
print(hist)
print(norm_sample_2)
print(f"Chi2={chi2}")
|
StarcoderdataPython
|
376850
|
<gh_stars>0
from .simple_json import simple_json_from_html_string
from .simple_tree import simple_tree_from_html_string
__all__ = [
'simple_json_from_html_string',
'simple_tree_from_html_string',
]
|
StarcoderdataPython
|
191315
|
<reponame>beda-software/cookiecutter-beda-software-stack<filename>{{cookiecutter.project_slug}}/backend/app/gcs.py
import datetime
import urllib.parse
from urllib.parse import urlparse
from aiohttp import web
from google.cloud import storage
from app import config
from app.sdk import sdk
from app.contrib.google_cloud import generate_signed_url
from app.fhirdate import get_now
from app.contrib.utils import robust_fn, sync_to_async
DEFAULT_EXPIRATION = 3600
@sdk.operation(["GET"], ["$sign", {"name": "resource-type"}, {"name": "id"}])
async def operation_sign_resource(operation, request):
expiration = DEFAULT_EXPIRATION
resource_type = request["route-params"]["resource-type"]
resource_id = request["route-params"]["id"]
resource = await sdk.client.resources(resource_type).get(id=resource_id)
sign_resource(resource)
headers = {
"Cache-Control": "private, max-age={0}".format(expiration),
}
return web.json_response(resource.serialize(), headers=headers)
@sdk.operation(["POST"], ["$signed-upload"])
async def operation_singed_upload(operation, request):
resource = request["resource"]
file_name = resource["fileName"].split(".")
extension = file_name[-1]
file_name[-1] = str(datetime.datetime.now().timestamp())
file_name.append(extension)
file_name = ".".join(file_name)
now = get_now()
object_name = "uploads/{year}/{month}/{day}/{file_name}".format(
file_name=file_name, year=now.year, month=now.month, day=now.day
)
object_url = "https://storage.googleapis.com/{bucket}/{object_name}".format(
bucket=config.gc_bucket, object_name=object_name
)
signed_url = generate_signed_url(
config.gc_account_file,
config.gc_bucket,
object_name=object_name,
expiration=3600,
http_method="PUT",
headers={"Content-Type": resource["contentType"]},
)
return web.json_response(
{"signedUploadUrl": signed_url, "objectUrl": object_url, "fileName": file_name}
)
@robust_fn
@sync_to_async
def google_storage_upload(path, content_str, content_type):
storage_client = storage.Client()
bucket = storage_client.get_bucket(config.gc_bucket)
blob = storage.Blob(path, bucket)
blob.upload_from_string(content_str, content_type=content_type)
return urllib.parse.unquote(blob.public_url)
@robust_fn
@sync_to_async
def google_storage_download(path):
storage_client = storage.Client()
bucket = storage_client.get_bucket(config.gc_bucket)
blob = storage.Blob(path, bucket)
return blob.download_as_string()
def sign_url(url: str, expiration: int):
object_name = extract_google_storage_object_name_from_url(url)
if object_name:
return generate_signed_url(
config.gc_account_file, config.gc_bucket, object_name, expiration
)
return url
def sign_resource(resource):
def walk(tree):
if isinstance(tree, dict):
if "url" in tree:
tree["url"] = sign_url(tree["url"], DEFAULT_EXPIRATION)
if isinstance(tree, dict):
for branch in tree.values():
walk(branch)
if isinstance(tree, list):
for branch in tree:
walk(branch)
walk(resource)
def extract_google_storage_object_name_from_url(url):
if url.startswith("https://www.googleapis.com/"):
return urlparse(url).path.split("/").pop()
elif url.startswith("https://storage.googleapis.com/"):
return urlparse(url).path.replace("/{0}/".format(config.gc_bucket), "",)
else:
return None
|
StarcoderdataPython
|
6551676
|
<filename>cloud_auto/utils.py<gh_stars>0
import os, socket, re
def FileisExist(dirpath, tfilename):
filenames = os.listdir(dirpath)
for filename in filenames:
if tfilename == filename:
return True
return False
def chk_valid_ipv4(addr):
try:
socket.inet_aton(addr)
return True
except socket.error as e:
return False
def chk_valid_mac(addr):
if re.match("[0-9a-f]{2}([-:])[0-9a-f]{2}(\\1[0-9a-f]{2}){4}$", addr.lower()):
return True
else:
return False
|
StarcoderdataPython
|
271680
|
# Generated by Django 2.1.3 on 2018-12-12 07:07
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Pizza',
fields=[
('pizza_no', models.AutoField(primary_key=True, serialize=False, verbose_name='披薩編號')),
('name', models.CharField(max_length=20, verbose_name='披薩名稱')),
('description', models.TextField(verbose_name='披薩描述')),
('price', models.PositiveIntegerField(verbose_name='披薩價格')),
('size', models.CharField(max_length=2, verbose_name='披薩尺寸')),
('cost', models.PositiveIntegerField(verbose_name='披薩成本')),
('in_stock', models.PositiveIntegerField(verbose_name='披薩庫存量')),
('sales_volume', models.PositiveIntegerField(default=0, verbose_name='披薩銷售量')),
('click_count', models.PositiveIntegerField(default=0, verbose_name='披薩點擊量')),
('isVegetarian', models.BooleanField(verbose_name='是否為素食披薩')),
('stars', models.DecimalField(decimal_places=0, max_digits=1, validators=[django.core.validators.MinValueValidator(1, '最低1分'), django.core.validators.MaxValueValidator(5, '最多5分')], verbose_name='披薩評分')),
],
),
]
|
StarcoderdataPython
|
37632
|
<filename>datawinners/submission/request_processor.py
import json
import logging
from django.conf import settings
from datawinners.feeds.database import get_feeds_db_for_org
from mangrove.transport import TransportInfo
from datawinners.accountmanagement.models import TEST_REPORTER_MOBILE_NUMBER, OrganizationSetting
from datawinners.messageprovider.messages import SMS
from datawinners.utils import get_organization, get_database_manager_for_org
logger = logging.getLogger("django")
class WebSMSDBMRequestProcessor(object):
def process(self, http_request, mangrove_request):
mangrove_request['dbm']=get_database_manager_for_org(mangrove_request['organization'])
mangrove_request['feeds_dbm'] = get_feeds_db_for_org(mangrove_request['organization'])
class WebSMSTransportInfoRequestProcessor(object):
def process(self, http_request, mangrove_request):
organization_settings = OrganizationSetting.objects.get(organization=mangrove_request['organization'])
_to = get_organization_number(organization_settings.get_organisation_sms_number()[0])
_from = TEST_REPORTER_MOBILE_NUMBER
mangrove_request['transport_info']=TransportInfo(SMS, _from, _to)
class WebSMSOrganizationFinderRequestProcessor(object):
def process(self, http_request, mangrove_request):
mangrove_request['organization'] = get_organization(http_request)
class SMSMessageRequestProcessor(object):
def process(self, http_request, mangrove_request):
if settings.USE_NEW_VUMI:
data = http_request.raw_post_data
params = json.loads(data)
message_ = params['content']
else:
message_ = http_request.POST['message']
mangrove_request['incoming_message']= message_
class SMSTransportInfoRequestProcessor(object):
def process(self, http_request, mangrove_request):
vumi_parameters = get_vumi_parameters(http_request)
mangrove_request['transport_info']=TransportInfo(SMS, vumi_parameters.from_number,
vumi_parameters.to_number)
class MangroveWebSMSRequestProcessor(object):
middlewares=[SMSMessageRequestProcessor(),WebSMSOrganizationFinderRequestProcessor(),WebSMSTransportInfoRequestProcessor(),WebSMSDBMRequestProcessor()]
def process(self, http_request, mangrove_request):
for middleware in self.middlewares:
middleware.process(http_request,mangrove_request)
def get_organization_number(organization_number):
return organization_number[0] if(isinstance(organization_number, list)) else organization_number
def try_get_value(request_params, key):
return request_params[key] if request_params.has_key(key) else None
def get_vumi_parameters(http_request):
http_request_post = http_request.POST
if settings.USE_NEW_VUMI:
data = http_request.raw_post_data
logger.info('http request raw post data: %s' % data)
params = json.loads(data)
from_addr_ = try_get_value(params, "from_addr")
to_addr_ = try_get_value(params, "to_addr")
return VumiParameters(from_number=from_addr_, to_number=to_addr_, content=params["content"], is_new_vumi = True)
else:
from_addr_ = try_get_value(http_request_post, "from_msisdn")
to_addr_ = try_get_value(http_request_post, "to_msisdn")
return VumiParameters(from_number=from_addr_, to_number=to_addr_, content=http_request_post["message"], is_new_vumi=False)
class VumiParameters(object):
def __init__(self, from_number, to_number, content, is_new_vumi):
self.from_number = from_number
self.to_number = to_number
self.content = content
self.is_new_vumi = is_new_vumi
|
StarcoderdataPython
|
6459206
|
<reponame>Lord-of-the-Galaxy/heroku-multi-account
import os, sys, time
import requests as req
import psycopg2
from hma_conf import MASTER_APP, SLAVE_APP, PG_TABLES as TABLES
# You shouldn't need to modify anything here
DB_URL = os.environ['DATABASE_URL']
SLAVE_URL = f"http://{SLAVE_APP}.herokuapp.com"
MASTER_API_URL = f"https://api.heroku.com/apps/{MASTER_APP}"
SLAVE_API_URL = f"https://api.heroku.com/apps/{SLAVE_APP}"
HMA_KEY = os.environ['HMA_SHARED_KEY']
MASTER_TOKEN = os.environ['MASTER_HEROKU_TOKEN']
SLAVE_TOKEN = os.environ['SLAVE_HEROKU_TOKEN']
MASTER_API_HEADERS = {
"Accept": "application/vnd.heroku+json; version=3",
"Authorization": f"Bearer {MASTER_TOKEN}"
}
SLAVE_API_HEADERS = {
"Accept": "application/vnd.heroku+json; version=3",
"Authorization": f"Bearer {SLAVE_TOKEN}"
}
API_PAYLOAD_0 = {
"quantity": 0,
"size": "free"
}
API_PAYLOAD_1 = {
"quantity": 1,
"size": "free"
}
#connect to database
conn = psycopg2.connect(DB_URL)
# first, check date and decide what should be done
def main():
"""Checks the date and time, and then decides if a shift from master to slave (or vice versa) is needed. If necessary, makes the shift."""
date = time.gmtime().tm_mday
if date == 1 or date == 2: # in case it missed once
# shift from slave to master, checking to ensure it hasn't already happened
status = check_status()
if status == 'slave':
slave_to_master()
elif status == 'master':
print("Shift has probably already happened")
else:
print("In a forbidden state:", status)
elif date == 22 or date == 23: #in case it missed once
# shift from master to slave, checking to ensure it hasn't already happened
status = check_status()
if status == 'master':
master_to_slave()
elif status == 'slave':
print("Shift has probably already happened")
else:
print("In a forbidden state:", status)
else:
pass
def check_status():
"""
Check the status of the application, i.e., whether it is running on the master or slave.
Also check to see if there are any issues, like the web dyno on the slave running, or both workers running etc.
"""
# assume no web dynos on master - there should never be a web dyno on master
r = req.get(f"{MASTER_API_URL}/formation/worker", headers=MASTER_API_HEADERS)
if r.status_code != req.codes.ok:
print("Couldn't get master worker formation")
print(r.status_code, ":", r.text)
return 'unknown:1'
master_worker = r.json()['quantity'] # this is guaranteed to work i think
r = req.get(f"{SLAVE_API_URL}/formation/worker", headers=SLAVE_API_HEADERS)
if r.status_code != req.codes.ok:
print("Couldn't get slave worker formation")
print(r.status_code, ":", r.text)
return 'unknown:2'
slave_worker = r.json()['quantity']
r = req.get(f"{SLAVE_API_URL}/formation/web", headers=SLAVE_API_HEADERS)
if r.status_code != req.codes.ok:
print("Couldn't get slave web formation")
print(r.status_code, ":", r.text)
return 'unknown:3'
slave_web = r.json()['quantity']
# all done
if slave_web != 0:
return 'forbidden-web'
elif master_worker != 0 and slave_worker != 0:
return 'both'
elif master_worker != 0:
return 'master'
elif slave_worker != 0:
return 'slave'
else:
return 'none'
def master_to_slave():
"""Shift the process from master to slave, shifting data as needed."""
print("Shifting from master to slave")
stop_master_worker()
setup_slave_web()
prepare_push()
push_to_slave()
stop_slave_web()
start_slave_worker()
print("DONE!")
def slave_to_master():
"""Shift the process from slave to master, shifting data as needed."""
print("Shifting from slave to master")
stop_slave_worker()
setup_slave_web()
pull_from_slave()
commit_pull_to_db()
stop_slave_web()
start_master_worker()
print("DONE!")
def setup_slave_web():
"""Sets up the web server on the slave, then checks it."""
print("Starting slave web")
r = req.patch(f"{SLAVE_API_URL}/formation/web", json=API_PAYLOAD_1, headers=SLAVE_API_HEADERS)
if r.status_code != req.codes.ok:
print("Unable to start the web dyno on slave")
print(r.text)
return False
#wait a bit for the web process to start up
print("Waiting a bit")
time.sleep(10)
r = req.get(SLAVE_URL)
if not r.text.startswith("Index"):
print("Something is wrong with slave:")
print(r.text)
return False
print("Got response from slave:", r.text)
return True
# LOTS of code duplication here, should fix sometime
def stop_slave_web():
"""Stops the web process on the slave."""
print("Stopping slave web")
r = req.patch(f"{SLAVE_API_URL}/formation/web", json=API_PAYLOAD_0, headers=SLAVE_API_HEADERS)
if r.status_code != req.codes.ok:
print("Unable to stop the web dyno on slave")
print(r.text)
return False
#wait a bit for the web process to stop
print("Waiting a bit")
time.sleep(2)
return True
def start_master_worker():
"""Starts the worker process on the master."""
print("Starting master worker")
r = req.patch(f"{MASTER_API_URL}/formation/worker", json=API_PAYLOAD_1, headers=MASTER_API_HEADERS)
if r.status_code != req.codes.ok:
print("Unable to start the worker dyno on master")
print(r.text)
return False
#wait a bit for the worker process to start
print("Waiting a bit")
time.sleep(10)
return True
def stop_master_worker():
"""Stops the worker process on the master."""
print("Stopping master worker")
r = req.patch(f"{MASTER_API_URL}/formation/worker", json=API_PAYLOAD_0, headers=MASTER_API_HEADERS)
if r.status_code != req.codes.ok:
print("Unable to stop the worker dyno on master")
print(r.text)
return False
#wait a bit for the worker process to stop
print("Waiting a bit")
time.sleep(2)
return True
def start_slave_worker():
"""Starts the worker process on the slave."""
print("Starting slave worker")
r = req.patch(f"{SLAVE_API_URL}/formation/worker", json=API_PAYLOAD_1, headers=SLAVE_API_HEADERS)
if r.status_code != req.codes.ok:
print("Unable to start the worker dyno on slave")
print(r.text)
return False
#wait a bit for the worker process to start up
print("Waiting a bit")
time.sleep(10)
return True
def stop_slave_worker():
"""Stops the worker process on the slave."""
print("Stopping slave worker")
r = req.patch(f"{SLAVE_API_URL}/formation/worker", json=API_PAYLOAD_0, headers=SLAVE_API_HEADERS)
if r.status_code != req.codes.ok:
print("Unable to stop the worker dyno on slave")
print(r.text)
return False
#wait a bit for the worker process to stop
print("Waiting a bit")
time.sleep(2)
return True
def prepare_push():
"""Prepares to send data from master (this) to slave."""
print("Preparing to push")
cur = conn.cursor()
try:
for tname in TABLES:
with open(f'{tname}.db', 'w') as f:
print(f"Copying {tname}")
cur.copy_to(f, f'"{tname}"')
return True
except IOError:
print("IO ERROR")
return False
finally:
cur.close()
def push_to_slave():
"""Sends data from the master (this) to the slave."""
print("Pushing to slave")
try:
for tname in TABLES:
with open(f'{tname}.db', 'rb') as f:
print(f"Pushing {tname}")
r = req.post(f"{SLAVE_URL}/push_db/{tname}", files={'file': f}, data={'key': HMA_KEY})
if r.status_code != req.codes.ok:
print("Something wrong with slave on push:")
print(r.text)
return False
return True
except IOError:
print("IO ERROR")
return False
def pull_from_slave():
"""Pulls data from the slave."""
print("Pulling from slave")
r = req.get(f"{SLAVE_URL}/prepare_pull")
if r.status_code != req.codes.ok:
print("Something wrong with slave on prepare pull")
print(r.text)
return False
print("Prepared")
try:
for tname in TABLES:
with open(f'{tname}.db', 'wb') as f:
print(f"Pulling {tname}")
r = req.post(f"{SLAVE_URL}/pull_db/{tname}", data={'key': HMA_KEY})
if r.status_code != req.codes.ok:
print("Something went wrong")
print(r.text)
return False
f.write(r.content)
return True
except IOError:
print("IO ERROR")
return False
def commit_pull_to_db():
"""Commit data pulled from slave to the master's database."""
print("Committing pulled data")
cur = conn.cursor()
try:
for tname in TABLES:
cur.execute(f"DELETE FROM {tname};")
with open(f'{tname}.db', 'r') as f:
print(f"Copying {tname}")
cur.copy_from(f, f'"{tname}"')
conn.commit()
return True
except IOError:
print("IO ERROR")
return False
finally:
cur.close()
def debug(mode):
if mode == 'push':
master_to_slave()
elif mode == 'pull':
slave_to_master()
elif mode == 'debug':
print(MASTER_API_HEADERS)
print(SLAVE_API_HEADERS)
print(MASTER_API_URL)
print(SLAVE_API_URL)
elif mode == 'status':
print("Current status:", check_status())
if __name__ == '__main__':
if '--push-to-slave' in sys.argv[1:]:
debug('pull')
elif '--pull-from-slave' in sys.argv[1:]:
debug('push')
elif '--debug' in sys.argv[1:]:
debug('debug')
elif '--status' in sys.argv[1:]:
debug('status')
else:
main()
#always have to do this
conn.close()
|
StarcoderdataPython
|
6492867
|
<gh_stars>0
import networkx as nx
import torch
import matplotlib.pyplot as plt
def visualize(h, G, color, epoch = None, loss = None):
plt.figure(figsize=(7,7))
plt.xticks([])
plt.yticks([])
if torch.is_tensor(h):
h = h.detach().cpu().numpy()
plt.scatter(h[:, 0], h[:, 1], s=140, c=color, cmap='Set2')
if epoch is not None and loss is not None:
plt.xlabel(f'Epoch: {epoch}, Loss: {loss.item(): .4f}', fontsize=16)
else:
nx.draw_networkx(G, pos=nx.spring_layout(G,seed=42), with_labels=False, node_color=color, cmap="Set2")
plt.show()
from torch_geometric.datasets import KarateClub
dataset = KarateClub()
data = dataset[0]
def basic_info():
print(dataset)
print("=========================")
print(len(dataset))
print(dataset.num_features)
print(dataset.num_classes)
print(data)
print('=========================')
print(f'nodes:{data.num_nodes}')
print(f'edges:{data.num_edges}')
print(f'node degree:{data.num_nodes / data.num_edges:.2f}')
print(f'training nodes:{data.train_mask.sum()}')
print(f'training node label rate:{int(data.train_mask.sum()) / data.num_nodes:.2f}')
print(f'isolated nodes:{data.has_isolated_nodes()}')
print(f'Has self-loops: {data.has_self_loops()}')
print(f'Is undirected: {data.is_undirected()}')
def graph_info():
from IPython.display import Javascript # Restrict height of output cell.
from IPython.display import display
display(Javascript('''google.colab.output.setIframeHeight(0, true, {maxHeight: 300})'''))
edge_index = data.edge_index
print(edge_index.t())
from torch_geometric.utils import to_networkx
G = to_networkx(data, to_undirected=True)
visualize(G, G, color=data.y)
from torch.nn import Linear
from torch_geometric.nn import GCNConv
class GCN(torch.nn.Module):
def __init__(self):
super(GCN, self).__init__()
torch.manual_seed(1234)
self.conv1 = GCNConv(data.num_features, 4)
self.conv2 = GCNConv(4, 4)
self.conv3 = GCNConv(4, 2)
self.classifier = Linear(2, dataset.num_classes)
def forward(self, x, edge_index):
h = self.conv1(x, edge_index)
h = h.tanh()
h = self.conv2(h, edge_index)
h = h.tanh()
h = self.conv3(h, edge_index)
h = h.tanh()
out = self.classifier(h)
return out, h
def train_GCN():
model = GCN()
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
def train(data):
optimizer.zero_grad()
out, h = model(data.x, data.edge_index)
loss = criterion(out[data.train_mask], data.y[data.train_mask])
loss.backward()
optimizer.step()
return loss, h
import time
for epoch in range(401):
loss, h = train(data)
if epoch % 10 == 0:
visualize(h, h, color=data.y, epoch=epoch, loss=loss)
time.sleep(0.3)
import torch.nn.functional as F
class MLP(torch.nn.Module):
def __init__(self, hidden_channels):
super(MLP, self).__init__()
torch.manual_seed(12345)
self.lin1 = Linear(dataset.num_features, hidden_channels)
self.lin2 = Linear(hidden_channels, dataset.num_classes)
def forward(self, x):
x = self.lin1(x)
x = x.relu()
x = F.dropout(x, p =0.5, training = self.training)
x = self.lin2(x)
return x
model = MLP(hidden_channels=16)
print(model)
|
StarcoderdataPython
|
11274340
|
<gh_stars>0
#!/usr/bin/env python
import urllib,urllib2,re,sys,os,cookielib
cj = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
exheaders = [("User-Agent","Mozilla/4.0 (compatible; MSIE 7.1; Windows NT 5.1; SV1)"),]
opener.addheaders=exheaders
url_login = 'https://cs.login.cmu.edu/idp/Authn/Stateless'
url_cmu = 'https://cs.login.cmu.edu'
body = (('email','<EMAIL>'), ('password','<PASSWORD>'))
req1 = opener.open(url_cmu, urllib.urlencode(body))
req1 = opener.open(url_login, urllib.urlencode(body))
response = urllib2.urlopen('https://cs.login.cmu.edu/idp/Authn/Stateless')
print response.read()
for line in cj:
print line
#response = urllib2.urlopen('http://chenzongzhi.info/wp-admin/')
#print response.info()
#print response.read()
|
StarcoderdataPython
|
9704871
|
<filename>test_async.py
import asyncio
from netmiko import ConnectHandler
import getpass
from pprint import pprint
import ipdb
import time
device_dict = {
'cisco3': {
# 'comment': 'Cisco IOS-XE',
'host': 'cisco3.lasthop.io',
# 'snmp_port': 161,
# 'ssh_port': 22,
'username': 'pyclass',
'password': '<PASSWORD>',
'device_type': 'cisco_xe'
},
'cisco': {
# 'comment': 'Cisco IOS-XE',
'host': 'cisco4.lasthop.io',
# 'snmp_port': 161,
# 'ssh_port': 22,
'username': 'pyclass',
'password': '<PASSWORD>',
'device_type': 'cisco_xe'
},
'arista1': {
# 'comment': 'Arista_vEOS_switch',
'host': 'arista1.lasthop.io',
# 'ssh_port': 22,
# 'eapi_port': 443,
'username': 'pyclass',
'password': '<PASSWORD>',
'device_type': 'arista_eos'
},
'arista2': {
# 'comment': 'Arista_vEOS_switch',
'host': 'arista2.lasthop.io',
# 'ssh_port': 22,
# 'eapi_port': 443,
'username': 'pyclass',
'password': '<PASSWORD>',
'device_type': 'arista_eos'
},
'arista3': {
# 'comment': 'Arista_vEOS_switch',
'host': 'arista3.lasthop.io',
# 'ssh_port': 22,
# 'eapi_port': 443,
'username': 'pyclass',
'password': '<PASSWORD>',
'device_type': 'arista_eos'
},
'arista4': {
# 'comment': 'Arista_vEOS_switch',
'host': 'arista4.lasthop.io',
# 'ssh_port': 22,
# 'eapi_port': 443,
'username': 'pyclass',
'password': '<PASSWORD>',
'device_type': 'arista_eos'
},
'srx2': {
# 'comment': 'Juniper_SRX',
'host': 'srx2.lasthop.io',
# 'ssh_port': 22,
# 'netconf_port': 830,
'username': 'pyclass',
'password': '<PASSWORD>',
'device_type': 'juniper'
},
'nxos1': {
# 'comment': 'NX-OSv Switch',
'host': 'nxos1.lasthop.io',
# 'ssh_port': 22,
# 'nxapi_port': 8443,
'username': 'pyclass',
'password': '<PASSWORD>',
'device_type': 'cisco_nxos'
},
'nxos2': {
# 'comment': 'NX-OSv Switch',
'host': 'nxos2.lasthop.io',
# 'ssh_port': 22,
# 'nxapi_port': 8443,
'username': 'pyclass',
'password': '<PASSWORD>',
'device_type': 'cisco_nxos'
}
}
"""
device_dict = {
'cisco3': {
# 'comment': 'Cisco IOS-XE',
'host': 'cisco3.lasthop.io',
# 'snmp_port': 161,
# 'ssh_port': 22,
'username': 'pyclass',
'password': '<PASSWORD>',
'device_type': 'cisco_xe'
},
'cisco': {
# 'comment': 'Cisco IOS-XE',
'host': 'cisco4.lasthop.io',
# 'snmp_port': 161,
# 'ssh_port': 22,
'username': 'pyclass',
'password': '<PASSWORD>',
'device_type': 'cisco_xe'
}
}
"""
exec_send_args = {'command_string': "show run",
'expect_string': None,
'delay_factor': 1,
'max_loops': 500,
'auto_find_prompt': True,
'strip_prompt': True,
'strip_command': True,
'normalize': True,
'use_textfsm': False,
'textfsm_template': None,
'use_ttp': False,
'ttp_template': None,
'use_genie': False,
'cmd_verify': True
}
# connection dictionary argument setup functions
def binary_prompt(user_text):
# handles yes and no prompts
answer = input(user_text)
answer.strip()
answer.lower()
if answer == 'yes':
return True
elif answer == 'no':
return False
else:
print("Please enter a valid response. yes or no\n")
return binary_prompt(user_text)
def filepath_prompt(user_text):
# prompts user for filepath and tests if it exists.
# Returns [filepath, filename, and boolean of the files exists]
# True file exists
# False file doesn't exist
file_path = input(user_text)
# extract filename out of filepath
filename_split = file_path.split('/')
filename_split_2 = filename_split[-1].split('\\')
try:
f = open(file_path, 'r')
f.close()
return [file_path, filename_split_2[-1], True]
except FileNotFoundError:
return [file_path, filename_split_2[-1], False]
def prompt_credentials():
# prompts user for username and password and returns [username, password]
username = input("Username:\n")
prompt = binary_prompt("Use {} as your username? yes or no\n".format(username))
if prompt:
pass
else:
return prompt_credentials()
password = <PASSWORD>()
print("To prevent lockouts please re-enter your password\n")
verify_password = getpass()
if password == verify_password:
return [username, password]
else:
print("Passwords do not match\n")
return prompt_credentials()
async def ssh_connect(sort_dict):
# takes in a sorted connection dictionary used to make ssh connections
# returns our "thread_dict" with the format {device_name: connection_object}
print("#"*20+" CONNECTING "+"#"*20)
device_list = list(sort_dict)
print(device_list)
coroutine = [ConnectHandler(**sort_dict[device]) for device in device_list]
print("#" * 20 + " CONNECTED " + "#" * 20+"\n")
print(coroutine)
print("\n")
# threads = await asyncio.gather(*coroutine)
threads = coroutine
thread_dict = {device_list[i]: threads[i] for i in range(len(device_list))}
print("#" * 20 + " DICT " + "#" * 20)
print(thread_dict)
return thread_dict
async def ssh_disconnect(thread_dict):
# function that disconnects from each device
print("#"*20+" DISCONNECT "+"#"*20+"\n")
device_list = list(thread_dict)
print(device_list)
[print(f"Disconnecting from {device}") for device in device_list]
[thread_dict[device].disconnect() for device in device_list]
# await asyncio.gather(*coroutine)
print("#" * 20 +" DISCONNECTED " + "#" * 20 + "\n")
return
# async functions that interact with remote devices
async def send_exec_command(thread_dict, send_dict):
# sends the command to each device
# thread_dict is a dictionary containing ssh_connection object for its respective device
device_list = list(thread_dict)
# still not asynco
coroutine = [thread_dict[device].send_command(**send_dict) for device in device_list]
# output_list = await asyncio.gather(*coroutine)
output_dict = {device_list[i]: coroutine[i] for i in range(len(device_list))}
#print(output_dict)
return output_dict
def main():
#print("#"*20+" SET TRACE "+"#"*20+"\n")
#ipdb.set_trace()
# set up our send arguments in here:
# ssh setup
# note need to run asyncio.run(function())
# x = time.clock_settime()
# time.clock_settime()
print("\nx")
thread_dict = asyncio.run(ssh_connect(sort_dict=device_dict))
output = asyncio.run(send_exec_command(thread_dict=thread_dict, send_dict=exec_send_args))
# pprint(output)
# close ssh connection
asyncio.run(ssh_disconnect(thread_dict=thread_dict))
return
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
394297
|
<gh_stars>0
"""
Helper functions for interacting with the shell, and consuming shell-style
parameters provided in config files.
"""
import os
import shlex
import subprocess
try:
from shlex import quote
except ImportError:
from pipes import quote
__all__ = ['WindowsParser', 'PosixParser', 'OpenVMSParser', 'NativeParser']
class CommandLineParser:
"""
An object that knows how to split and join command-line arguments.
It must be true that ``argv == split(join(argv))`` for all ``argv``.
The reverse neednt be true - `join(split(cmd))` may result in the addition
or removal of unnecessary escaping.
"""
@staticmethod
def join(argv):
""" Join a list of arguments into a command line string """
raise NotImplementedError
@staticmethod
def split(cmd):
""" Split a command line string into a list of arguments """
raise NotImplementedError
class WindowsParser:
"""
The parsing behavior used by `subprocess.call("string")` on Windows, which
matches the Microsoft C/C++ runtime.
Note that this is _not_ the behavior of cmd.
"""
@staticmethod
def join(argv):
# note that list2cmdline is specific to the windows syntax
return subprocess.list2cmdline(argv)
@staticmethod
def split(cmd):
import ctypes # guarded import for systems without ctypes
try:
ctypes.windll
except AttributeError:
raise NotImplementedError
# Windows has special parsing rules for the executable (no quotes),
# that we do not care about - insert a dummy element
if not cmd:
return []
cmd = 'dummy ' + cmd
CommandLineToArgvW = ctypes.windll.shell32.CommandLineToArgvW
CommandLineToArgvW.restype = ctypes.POINTER(ctypes.c_wchar_p)
CommandLineToArgvW.argtypes = (ctypes.c_wchar_p, ctypes.POINTER(ctypes.c_int))
nargs = ctypes.c_int()
lpargs = CommandLineToArgvW(cmd, ctypes.byref(nargs))
args = [lpargs[i] for i in range(nargs.value)]
assert not ctypes.windll.kernel32.LocalFree(lpargs)
# strip the element we inserted
assert args[0] == "dummy"
return args[1:]
class PosixParser:
"""
The parsing behavior used by `subprocess.call("string", shell=True)` on Posix.
"""
@staticmethod
def join(argv):
return ' '.join(quote(arg) for arg in argv)
@staticmethod
def split(cmd):
return shlex.split(cmd, posix=True)
class OpenVMSParser:
"""
The parsing behavior used by `subprocess.call("string", shell=True)` on Posix.
"""
@staticmethod
def strong_dquote(s):
return '"' + s.replace('"', '""') + '"'
@staticmethod
def join(argv):
prog = ''
try:
import vms.decc
prog = 'MCR ' + vms.decc.to_vms(argv[0], 0, 0)[0]
except:
pass
if len(argv) == 1:
return prog
return prog + ' ' + ' '.join(OpenVMSParser.strong_dquote(arg) for arg in argv[1:])
@staticmethod
def split(cmd):
return shlex.split(cmd, posix=True)
if os.name == 'nt':
NativeParser = WindowsParser
elif os.name == 'posix':
import sys
if sys.platform == 'OpenVMS':
NativeParser = OpenVMSParser
else:
NativeParser = PosixParser
|
StarcoderdataPython
|
11280676
|
<reponame>Wirocama/Backend-Proyecto2-IPC1
from publicaciones import publicaciones
import json
class CRUD_PUBLICACIONES:
def __init__(self):
self.listaPublicaciones = []
self.listaCategorias = []
self.contador = 0
def agregarpublicacion(self,tipo,url,date,category,idU,usuario):
self.listaPublicaciones.append(publicaciones(self.contador,tipo,url,date,category,0,idU,usuario))
self.contador =+ 1
print("Se hizo una nueva publicación")
return {
"type": tipo,
"url": url,
"date": date,
"category": category,
"estado": 1,
"usuario": usuario
}
def modificarpublicacion(self,id,tipo,url,category):
for publicacion in self.listaPublicaciones:
if publicacion.getid() == id:
publicacion.settipo(tipo)
publicacion.seturl(url)
publicacion.setcategory(category)
return publicacion.infopublicacion()
return False
def eliminarpublicacion(self, id):
self.listaPublicaciones.pop(id)
return True
def likes(self, id):
for publicacion in self.listaPublicaciones:
if publicacion.getid() == id:
publicacion.setlikes()
return True
def verpublicaciones(self):
return json.dumps([publicacion.infopublicacion() for publicacion in self.listaPublicaciones])
def agregarcategoria(self, categoria):
for listado in self.listaCategorias:
if listado == categoria:
return False
print("Se agrego la categoría: " + categoria)
self.listaCategorias.append(categoria)
return True
def mostrarcategorias(self):
self.listaCategorias.sort()
return self.listaCategorias
|
StarcoderdataPython
|
4896217
|
import feedparser
import urllib2
from config_helper import get_proxies
def get_rss_items(url):
proxy=urllib2.ProxyHandler(get_proxies())
xmldata = feedparser.parse(url, handlers=[proxy])
return xmldata['entries']
|
StarcoderdataPython
|
3376749
|
from grid_world import standard_grid
import numpy as np
import sys
def print_policy(P, title):
print("---------------------------")
print(title)
for i in range(3):
print("---------------------------")
for j in range(4):
a = P.get((i,j), ' ')
print(" %s |" % a, end="")
print("")
def initial_policy(grid):
policy = {}
for s in grid.actions.keys():
policy[s] = np.random.choice(grid.all_actions)
return policy
def value_iteration(grid,policy, e = 1e-4, gamma=0.9):
V = {}
states = grid.all_states()
for s in states:
V[s] = 0.1
previous_update = float('inf')
while True:
for s in states:
Vs = V[s]
if not s in policy:
continue
best_v = float('-inf')
for a in grid.all_actions:
grid.set_state(s)
new_state = grid.move(a)
r = grid.get_state_reward()
new_v = r + gamma * V[new_state]
if new_v > best_v:
best_v = new_v
V[s] = best_v
previous_update = min(previous_update, np.abs(Vs - V[s]))
if previous_update < e:
return V
def update_policy(V, policy, grid, gamma=0.9):
for s in policy.keys():
best_a = None
best_value = float('-inf')
for a in grid.all_actions:
grid.set_state(s)
new_state = grid.move(a)
r = grid.get_state_reward()
new_v = r + gamma * V[new_state]
if new_v > best_value:
best_value = new_v
best_a = a
policy[s] = best_a
if __name__ == "__main__":
env = standard_grid()
policy = initial_policy(env)
v = value_iteration(env,policy)
print_policy(policy, 'Initial policy')
update_policy(v,policy,env)
print_policy(policy, 'Final policy')
|
StarcoderdataPython
|
11382004
|
from simple_oauth import SimpleSession
sess = SimpleSession(
client_secrets_path='secrets/secret.json',
scope=['https://www.googleapis.com/auth/drive.readonly'],
cache="dict")
file = sess.get_session().get('https://www.googleapis.com/drive/v3/files')
print(file.json())
|
StarcoderdataPython
|
1618566
|
# coding: utf-8
"""
OpenLattice API
OpenLattice API # noqa: E501
The version of the OpenAPI document: 0.0.1
Contact: <EMAIL>
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import openlattice
from openlattice.models.association_type import AssociationType # noqa: E501
from openlattice.rest import ApiException
class TestAssociationType(unittest.TestCase):
"""AssociationType unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test AssociationType
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = openlattice.models.association_type.AssociationType() # noqa: E501
if include_optional :
return AssociationType(
entity_type = {"id":"ec6865e6-e60e-424b-a071-6a9c1603d735","type":{"namespace":"lattice","name":"myentity"},"schemas":{"namespace":"lattice","name":"myschema"},"key":["8f79e123-3411-4099-a41f-88e5d22d0e8d","e39dfdfa-a3e6-4f1f-b54b-646a723c3085"],"properties":["8f79e123-3411-4099-a41f-88e5d22d0e8d","e39dfdfa-a3e6-4f1f-b54b-646a723c3085","fae6af98-2675-45bd-9a5b-1619a87235a8"],"category":"EntityType"},
src = [
'0'
],
dst = [
'0'
],
bidirectional = True
)
else :
return AssociationType(
)
def testAssociationType(self):
"""Test AssociationType"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
4827618
|
<gh_stars>1-10
class Grafo:
def __init__(self, nome_arquivo):
self.arestas = []
self.caminho = []
self.graus = []
self.n = 0
self.ler_arquivo(nome_arquivo)
def ler_arquivo(self, nome_arquivo):
arquivo = open(nome_arquivo, 'r')
linha = arquivo.readline()
self.n = int(linha)
linha = arquivo.readline()
while linha:
valores = linha.split()
vertice1 = int(valores[0])
vertice2 = int(valores[1])
self.adicionar_aresta(vertice1, vertice2)
linha = arquivo.readline()
def adicionar_aresta(self, vertice1, vertice2):
self.arestas.append([vertice1, vertice2])
def hierholzer(self, vertice):
for aresta in self.arestas:
if vertice == aresta[0]:
self.arestas.remove(aresta)
self.hierholzer(aresta[1])
elif vertice == aresta[1]:
self.arestas.remove(aresta)
self.hierholzer(aresta[0])
self.caminho.append(vertice)
def imprimir_resultado(self):
self.caminho.reverse()
retorno = ""
for vertice in self.caminho:
retorno += " " + str(vertice)
print(retorno)
def verificar_pares(self):
for i in range(self.n):
self.graus.append(0)
for i,j in self.arestas:
self.graus[i] += 1
self.graus[j] += 1
for i in self.graus:
if i % 2 == 1:
return False
return True
def algoritmo(self):
if self.verificar_pares():
self.hierholzer(0)
self.imprimir_resultado()
else:
print("<NAME>")
def main():
grafo = Grafo("Ex1.txt")
grafo.algoritmo()
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
6632837
|
import dash
import numpy as np
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
from app import app, dbroot, logger
from .multiplexer import MultiplexerOutput
from .notifications import _prep_notification
def _fill_annotation(adata, cluster_id, value):
"""
Set the annotation for all points with label 'cluster_id' to 'value'.
"""
if isinstance(cluster_id, list):
cluster_id = cluster_id[0]
if isinstance(value, list):
value = value[0]
value = str(value)[:200]
if cluster_id.startswith('main-cluster'):
cluster_id = cluster_id[len('main-cluster'):]
elif cluster_id.startswith('side-cluster'):
cluster_id = cluster_id[len('side-cluster'):]
cluster_id = int(cluster_id)
if 'annotations' not in adata.obs:
adata.obs['annotations'] = np.array(
[""] * adata.shape[0], dtype='U200')
annotations_copy = adata.obs['annotations'].to_numpy().copy()
annotations_copy[adata.obs['labels'].to_numpy() == cluster_id] = value
adata.obs['annotations'] = annotations_copy
@app.callback(
Output("annotation-signal", "data"),
MultiplexerOutput("push-notification", "data"),
Input("annotation-store-btn", "n_clicks"),
State("main-annotation-select", "value"),
State("side-annotation-select", "value"),
State("annotation-input", "value"),
State("active-plot", "data"),
prevent_initial_call=True
)
def signal_annotation_change(n1, id1, id2, value, actp):
ctx = dash.callback_context
if not ctx.triggered or n1 is None:
raise PreventUpdate
an = 'a1' if actp == 1 else 'a2'
if an not in dbroot.adatas:
raise PreventUpdate
if 'adata' not in dbroot.adatas[an]:
raise PreventUpdate
if len(value) == 0:
return dash.no_update, _prep_notification(
"Empty annotation field.", "warning")
cluster_id = id1 if actp == 1 else id2
try:
_fill_annotation(dbroot.adatas[an]['adata'], cluster_id, value)
except Exception as e:
logger.error(str(e))
error_msg = "An error occurred when storing annotations."
logger.error(error_msg)
return dash.no_update, _prep_notification(error_msg, "danger")
return 1, dash.no_update
def get_update_annotation_table(prefix, an):
def _func(s1, s2, s3):
"""
Return a table of annotations based on the keys
obs['labels'] and obs['annotations']. Only the clusters
for which annotations exist (i.e., != "") will be displayed.
"""
ctx = dash.callback_context
if not ctx.triggered:
raise PreventUpdate
data = [
{
"cluster_id": "N/A",
"annotation": "N/A"
}
]
if an not in dbroot.adatas:
return data
if 'adata' not in dbroot.adatas[an]:
return data
# Need labels and annotations keys to be populated
if 'labels' not in dbroot.adatas[an]['adata'].obs:
# logger.warn("No labels found in adata.")
return data
if 'annotations' not in dbroot.adatas[an]['adata'].obs:
# logger.warn("No annotations found in adata.")
return data
# Get cluster ID's and the first index for each
# so that we can also get annotations
unq_labels, unq_indices = np.unique(
dbroot.adatas[an]['adata'].obs['labels'].to_numpy(),
return_index=True)
unq_annotations = dbroot.adatas[an][
'adata'][unq_indices].obs['annotations']
data = [
{'cluster_id': str(i), 'annotation': str(j)}
for i, j in zip(unq_labels, unq_annotations)
if j != ""
]
return data
return _func
for prefix, an in zip(['main', 'side'], ['a1', 'a2']):
app.callback(
Output(prefix + "-annotation-table", "data"),
Input("annotation-signal", "data"),
Input("data-loaded-annotation-table-signal", "data"),
Input(prefix + "-cluster-list-signal", "data"),
prevent_initial_call=True
)(get_update_annotation_table(prefix, an))
|
StarcoderdataPython
|
9688981
|
<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 31 17:08:48 2021
@author: jstout
"""
from ..bstags import txt_to_tag
from ..bstags import write_tagfile
import pytest
import os
# =============================================================================
# Tests
# =============================================================================
test_dir = os.path.dirname(os.path.abspath(__file__))
def test_txt_to_tag():
txtname=os.path.join(test_dir, 'test_txt.txt')
test_tags = {'Nasion': "'Nasion' -1.5966 -123.1359 2.1943",
'Left Ear': "'Left Ear' 80.8481 -39.0185 -48.2379",
'Right Ear': "'Right Ear' -75.3443 -44.3777 -48.1843"}
tags = txt_to_tag(txtname)
assert tags == test_tags
def test_write_tagfile(tmpdir):
txtname=os.path.join(test_dir, 'test_txt.txt')
tags = txt_to_tag(txtname)
name, ext = os.path.splitext(txtname)
if ext != ".txt":
name = txtname
tagname = "{}.tag".format(name)
tagname = os.path.basename(tagname)
outfile = os.path.join(tmpdir, tagname)
write_tagfile(tags, out_fname=outfile)
with open(outfile) as w:
test_test=w.readlines()
test_tagfile = os.path.join(test_dir, 'test_txt.tag')
with open(test_tagfile) as w:
test_valid=w.readlines()
assert test_test == test_valid
def test_exported_w_extra_spaces():
txtname=os.path.join(test_dir, 'Exported_Electrodes.txt')
tags = txt_to_tag(txtname)
assert len(tags)==3
assert 'Nasion' in tags
assert 'Left Ear' in tags
assert 'Right Ear' in tags
assert 'Nasion ' not in tags #Make sure that extra space not included
|
StarcoderdataPython
|
3531359
|
# when comparing Python to Java. Python is far more verbose. Please see Verbosity.java for an example to compare the two
dict = {"left": 1, "right": 2, "top": 3, "bottom": 4};
|
StarcoderdataPython
|
8179657
|
from django.contrib.auth.backends import BaseBackend
from accounts.models import Customer, ServiceProvider
class PhoneNumberPasswordBackend(BaseBackend):
def authenticate(self, request, phone_number=None, password=None):
try:
customer = Customer.objects.get(phone_number=phone_number)
except Customer.DoesNotExist:
return None
else:
if password: # if the password is sent via the form
if customer.check_password(password):
return customer
else:
return None
return customer
def get_user(self, user_id):
try:
customer = Customer.objects.get(pk=user_id)
except Customer.DoesNotExist:
return None
else:
return customer
class ServiceProviderAuthentication(BaseBackend):
def authenticate(self, request, username=None, password=<PASSWORD>):
try:
user = ServiceProvider.objects.get(username=username, password=password)
return user
except ServiceProvider.DoesNotExist:
return None
def get_user(self, user_id):
try:
return ServiceProvider.objects.get(pk=user_id)
except ServiceProvider.DoesNotExist:
return None
|
StarcoderdataPython
|
9626077
|
<reponame>Baughn/nixgan<filename>jax-diffusion/jax-guided-diffusion/diffusion_models/common.py
import numpy as np
import jax
import jax.numpy as jnp
import jax.scipy as jsp
import jaxtorch
from jaxtorch import PRNG, Context, Module, nn, init
from dataclasses import dataclass
from functools import partial
import math
# Common nn modules.
class SkipBlock(nn.Module):
def __init__(self, main, skip=None):
super().__init__()
self.main = nn.Sequential(*main)
self.skip = skip if skip else nn.Identity()
def forward(self, cx, input):
return jnp.concatenate([self.main(cx, input), self.skip(cx, input)], axis=1)
class FourierFeatures(nn.Module):
def __init__(self, in_features, out_features, std=1.):
super().__init__()
assert out_features % 2 == 0
self.weight = init.normal(out_features // 2, in_features, stddev=std)
def forward(self, cx, input):
f = 2 * math.pi * input @ cx[self.weight].transpose()
return jnp.concatenate([f.cos(), f.sin()], axis=-1)
class AvgPool2d(nn.Module):
def forward(self, cx, x):
[n, c, h, w] = x.shape
x = x.reshape([n, c, h//2, 2, w//2, 2])
x = x.mean((3,5))
return x
def expand_to_planes(input, shape):
return input[..., None, None].broadcast_to(list(input.shape) + [shape[2], shape[3]])
Tensor = None
@dataclass
@jax.tree_util.register_pytree_node_class
class DiffusionOutput:
v: Tensor
pred: Tensor
eps: Tensor
def tree_flatten(self):
return [self.v, self.pred, self.eps], []
@classmethod
def tree_unflatten(cls, static, dynamic):
return cls(*dynamic)
def __mul__(self, scalar):
return DiffusionOutput(self.v * scalar, self.pred * scalar, self.eps * scalar)
def __add__(self, other):
return DiffusionOutput(self.v + other.v,
self.pred + other.pred,
self.eps + other.eps)
@jax.tree_util.register_pytree_node_class
class Partial(object):
"""Wrap a function with arguments as a jittable object."""
def __init__(self, f, *args, **kwargs):
self.f = f
self.args = args
self.kwargs = kwargs
self.p = partial(f, *args, **kwargs)
def __call__(self, *args, **kwargs):
return self.p(*args, **kwargs)
def tree_flatten(self):
return [self.args, self.kwargs], [self.f]
def tree_unflatten(static, dynamic):
[args, kwargs] = dynamic
[f] = static
return Partial(f, *args, **kwargs)
def make_partial(f):
def p(*args, **kwargs):
return Partial(f, *args, **kwargs)
return p
def make_cosine_model(model):
@make_partial
@jax.jit
def forward(params, x, cosine_t, key, **kwargs):
n = x.shape[0]
cx = Context(params, key).eval_mode_()
return model(cx, x, cosine_t.broadcast_to([n]), **kwargs)
return forward
@jax.jit
def blur_fft(image, std):
std = jnp.asarray(std).clamp(1e-18)
[n, c, h, w] = image.shape
dy = jnp.arange(-(h-1)//2, (h+1)//2)
dy = jnp.roll(dy, -(h-1)//2)
dx = jnp.arange(-(w-1)//2, (w+1)//2)
dx = jnp.roll(dx, -(w-1)//2)
distance = dy[:, None]**2 + dx[None, :]**2
kernel = jnp.exp(-0.5 * distance / std**2)
kernel /= kernel.sum()
return jnp.fft.ifft2(jnp.fft.fft2(image, norm='forward') * jnp.fft.fft2(kernel, norm='backward'), norm='forward').real
def Normalize(mean, std):
mean = jnp.array(mean).reshape(3,1,1)
std = jnp.array(std).reshape(3,1,1)
def forward(image):
return (image - mean) / std
return forward
def norm1(x):
"""Normalize to the unit sphere."""
return x / x.square().sum(axis=-1, keepdims=True).sqrt().clamp(1e-12)
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.