id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
3324181 | <filename>sites_multidb/middleware.py
from django.conf import settings
from django.contrib.sites.models import Site
from django.http import HttpResponseNotFound
from .utils import site_class
HOST_CACHE = {}
class DynamicSiteMiddleware:
def __init__(self, get_response):
self.get_response = get_response
@staticmethod
def _get_site(request):
host = request.get_host()
shost = host.rsplit(':', 1)[0] # Host without port
try:
# Check cache first
return HOST_CACHE[host]
except KeyError:
pass
try:
# Check DB
site = site_class.objects.get(domain=host)
HOST_CACHE[host] = site
return site
except Site.DoesNotExist:
pass
if shost != host:
# Check DB for host without port
try:
site = site_class.objects.get(domain=shost)
HOST_CACHE[host] = site
return site
except Site.DoesNotExist:
pass
return None
def __call__(self, request):
site = self._get_site(request)
if site:
settings.SITE_ID.set(site.pk)
request.subdomain = site
else:
# No matching site
return HttpResponseNotFound()
response = self.get_response(request)
return response
| StarcoderdataPython |
138290 | <reponame>macbre/Mike<filename>mycroft_holmes/sources/athena.py
"""
AWS Athena class
"""
from pyathena import connect
from .base import DatabaseSourceBase
class AthenaSource(DatabaseSourceBase):
"""
Returns a number result for a given SQL query run on AWS Athena.
https://aws.amazon.com/athena/
#### `sources` config
```yaml
sources:
- name: foo/athena
kind: aws/athena
access_key_id: "${ATHENA_ACCESS_KEY_ID}"
secret_access_key: "${ATHENA_SECRET}"
s3_staging_dir: "${ATHENA_S3_STAGING_DIR}"
region: "us-east-1"
```
> `s3_staging_dir` is the S3 location to which your query output is written,
for example `s3://query-results-bucket/folder/`, which is established under Settings
in the [Athena Console](https://console.aws.amazon.com/athena/).
#### `metrics` config
```yaml
metrics:
- name: foo/wikis
source: foo/athena
query: "SELECT count(*) FROM stats.wikis WHERE lang = %(wiki_lang)s"
label: "{wiki_lang} wikis count: %d"
```
Please note that only the first column from the first row in the results set will be taken.
#### `features` config
```yaml
features:
- name: Wikis
template:
- wiki_lang: "is" # this will be used in query defined above
metrics:
- name: foo/wikis
```
"""
NAME = 'aws/athena'
# pylint: disable=too-many-arguments
def __init__(self, access_key_id, secret_access_key, s3_staging_dir, region, client=None):
"""
:type access_key_id str
:type secret_access_key str
:type s3_staging_dir str
:type region str
:type client obj
"""
super().__init__()
self._connection_params = dict(
aws_access_key_id=access_key_id,
aws_secret_access_key=secret_access_key,
s3_staging_dir=s3_staging_dir,
region_name=region
)
self._client = client or None
def _get_client(self):
"""
Connect to Athena lazily
:rtype: mysql.connector.connection.MySQLConnection
"""
self.logger.info('Connecting to Athena in "%s"...',
self._connection_params['region_name'])
# https://pypi.org/project/PyAthena/
return connect(**self._connection_params)
| StarcoderdataPython |
1652257 | <reponame>ereide/pyga-camcal
import numpy as np
from pygacal.common.cgatools import *
#TODO: from clifford_tools.common.g3c.core import *
from .costfunction import ( sumLineSquaredErrorCost, line2lineErrorCost,
sumPlaneSquaredErrorCost, plane2planeErrorCost,
sumLineLogErrorCost, sumImageFunction,
sumImageThreeViewAllPairsCostFunction, sumImageMultiViewCostFunction,
restrictedCostFunction, restrictedImageCostFunction, restrictedMultiViewImageCostFunction,
lineEstimationErrorCost, restrictedMultiViewBaseImageCostFunction, sumImageMultiViewBaseImageCostFunction,
sumWeightedImageFunction, logSumWeightedLineSquaredErrorCost, sumLogWeightedLineSquaredErrorCost,
sumWeightedLineSquaredErrorCost)
def bivectorToVecRepr(B):
x = np.zeros(6)
t = B | ep
x[0:3] = MVto3DVec(t)
P = (B - t * ninf)
x[3:6] = -float(P|e12), -float(P|e13), -float(P|e23)
return x
def vecReprToBivector(x):
"""
I will be using the conversion x \in R^(3 + 3)
B = alpha * P + t * ninf requiring 6 parameters
Constraint: abs(alpha) < pi
R = exp(B)
"""
alphaP = x[3]*e12 + x[4]*e13 + x[5]*e23
t = x[0]*e1 + x[1] * e2 + x[2] *e3
B = alphaP + t * ninf
return B
def extendedVecReprToBivector(x):
"""
I will be using the conversion x \in R^(3 + 3 + 1)
B = alpha * P + t * ninf + omega * E0 requiring 6 parameters
Constraint: abs(alpha) < pi
R = exp(B)
"""
B = vecReprToBivector(x[:6]) + x[6] * E0
return B
class Mapping(object):
name = "Mapping"
constraints = None
bounds = None
opt_method = 'L-BFGS-B'
costfunction = None
color = 'b'
costfunctiondecorator = None
callback = None
@staticmethod
def rotorconversion(x):
raise NotImplementedError
class BivectorMapping(Mapping):
name = "BivectorMapping"
constraints = None
bounds = None
opt_method = 'L-BFGS-B'
costfunction = sumLineSquaredErrorCost
color = 'b'
costfunctiondecorator = restrictedCostFunction
@staticmethod
def rotorconversion(x):
#return rotorconversion_fast(x) #BROKEN
return ga_exp(vecReprToBivector(x))
@staticmethod
def inverserotorconversion(R):
B = ga_log(R)
return bivectorToVecRepr(B)
@staticmethod
def startValue():
return np.random.rand(6)
#return np.zeros(6) #Equivalent to no rotation
class BivectorLineMapping(BivectorMapping):
name = "BivectorLineMapping"
costfunction = sumLineSquaredErrorCost
class BivectorWeightedLineMapping(BivectorMapping):
name = "BivectorWeightedLineMapping"
costfunction = sumWeightedLineSquaredErrorCost(1)
##Needs a cost function to be implemented
class BivectorLogSumLineMapping(BivectorMapping):
name = "BivectorLogSumLineMapping"
costfunction =logSumWeightedLineSquaredErrorCost(weight = 1)
class BivectorSumLogLineMapping(BivectorMapping):
name = "BivectorSumLogLineMapping"
costfunction = sumLogWeightedLineSquaredErrorCost(weight = 1)
class BivectorLineMultMapping(BivectorMapping):
name = "BivectorMultLineMapping"
#TODO: costfunction = sumLineMultSquaredErrorCost
class BivectorLogCostLineMapping(BivectorMapping):
name = "BivectorLogCostLineMapping"
color = 'b'
costfunction = sumLineLogErrorCost
class BivectorPlaneMapping(BivectorMapping):
name = "BivectorPlaneMapping"
costfunction = sumPlaneSquaredErrorCost
class ExtendedBivectorMapping(Mapping):
name = "ExtendedBivectorMapping"
costfunction = sumLineSquaredErrorCost
color = 'b'
costfunctiondecorator = restrictedCostFunction
@staticmethod
def rotorconversion(x):
return ga_exp(extendedVecReprToBivector(x))
@staticmethod
def inverserotorconversion(R):
B = ga_log(R)
return bivectorToVecRepr(B)
@staticmethod
def startValue():
return np.random.rand(7)
class LinePropertyBivectorMapping(BivectorMapping):
name = "LinePropertyBivectorMapping"
color = 'g'
costfunction = line2lineErrorCost
class PlanePropertyBivectorMapping(BivectorMapping):
name = "PlanePropertyBivectorMapping"
color = 'g'
costfunction = plane2planeErrorCost
class RotorMapping(Mapping):
name = "RotorMapping"
color = 'y'
constraints = None
opt_method = 'L-BFGS-B'
bounds = None
costfunction = None
costfunctiondecorator = restrictedCostFunction
@staticmethod
def startValue():
x0 = np.zeros(8)
x0[0] = 1
return x0
@staticmethod
def rotorconversion(x):
"""
I will be using the conversion x \in R^(1 + 3 + 3 + 1)
R = alpha + B + c * ninf + gamma * I3 * ninf
"""
alpha = x[0]
B = x[1] * e12 + x[2] * e13 + x[3]*e23
c = x[4] * e1 + x[5]*e2 + x[6] * e3
gamma = x[7]
R = alpha + B + c * ninf + gamma * I3 * ninf
return R.normal()
class RotorLineMapping(RotorMapping):
name = 'RotorLineMapping'
costfunction = sumLineSquaredErrorCost
#For line estimation:
class BivectorLineEstimationMapping(BivectorLineMapping):
name = "BivectorLineEstimationMapping"
costfunction = lineEstimationErrorCost
costfunctiondecorator = restrictedCostFunction
#For images:
class BivectorLineImageMapping(BivectorMapping):
costfunction = sumImageFunction
costfunctiondecorator = restrictedImageCostFunction
#For images:
class ExtendedBivectorLineImageMapping(ExtendedBivectorMapping):
costfunction = sumImageFunction
costfunctiondecorator = restrictedImageCostFunction
class BivectorWeightedLineImageMapping(BivectorMapping):
costfunction = sumWeightedImageFunction
costfunctiondecorator = restrictedImageCostFunction
class MultiViewLineImageMapping(Mapping):
name = "MultiViewMapping"
constraints = None
bounds = None
opt_method = 'L-BFGS-B'
costfunction = sumImageMultiViewBaseImageCostFunction
color = 'b'
costfunctiondecorator = restrictedMultiViewBaseImageCostFunction
@staticmethod
def rotorconversion(x):
"""
See above
"""
K = x.size//6
#R_list = [rotorconversion_fast(x[6*i: 6*i + 6]) for i in range(K)] #BROKEN
R_list = [ga_exp(vecReprToBivector(x[6*i: 6*i + 6])) for i in range(K)]
return R_list
@staticmethod
def inverserotorconversion(R_list):
K = len(R_list)
x = np.zeros(6*K)
for i in range(K):
B = ga_log(R_list)
x[i*6:(i+1)*6] = bivectorToVecRepr(B)
return x
@staticmethod
def startValue():
return np.random.rand(12)
class ThreeViewLineImageMapping(MultiViewLineImageMapping):
costfunction = sumImageThreeViewAllPairsCostFunction
costfunctiondecorator = restrictedMultiViewImageCostFunction
| StarcoderdataPython |
3314814 | # Bank
import time as t
import os
try :
with open("balance.txt") as f:
with open("balance.txt","r") as f:
read = f.read()
if read == "":
with open("balance.txt","w") as f:
write = f.write("0")
except FileNotFoundError:
with open("balance.txt","w") as f:
f.write("0")
info = print ("Welcome To 💰😂 Chillar Bank 😂💰 \n 1 - check your current balance \n 2 - Add money 💰 \n 3 - Withdraw money \n q - quit ")
t.sleep(2)
command = (input ())
class Bank:
# Command execution
def Command_execution(self):
work = True
while work == True:
if command == "1":
print(f"Your current Balance: ₹{my_acc.balance()}")
break
elif command == "2":
t.sleep(1)
my_acc.add_money()
break
elif command == "3":
t.sleep(1)
my_acc.take_money()
break
elif command == "q":
work = False
bank_name = "Chillar"
with open("balance.txt") as f:
bank_balance_int = int(f.read())
def balance(self):
return self.bank_balance_int
def add_money(self):
add = int(input("How much money u want to add"))
self.bank_balance_int += add
with open("balance.txt","w") as f:
f.write(str(self.bank_balance_int))
return print(f"Money added! \n Your updated balance now: ₹{self.bank_balance_int}")
def take_money(self):
take = int(input("how much money u want ?"))
if self.bank_balance_int < take:
t.sleep(1)
print(f"Sorry sir you dont have sufficient balance to withdraw ❗ \n Your balance : ₹{self.bank_balance_int}")
else:
self.bank_balance_int -= take
with open("balance.txt","w") as f:
f.write(str(self.bank_balance_int))
print (f"Withdraw Succesfull! updated balance: ₹{self.bank_balance_int}")
my_acc = Bank()
loop = my_acc.Command_execution()
#------------------------------------------------------------------------------------#
| StarcoderdataPython |
3311144 | from mongoengine import (
BooleanField,
DateTimeField,
IntField,
StringField,
ObjectIdField,
ListField,
EmbeddedDocument,
EmbeddedDocumentField,
)
from maestro_api.db.mixins import CreatedUpdatedDocumentMixin
from maestro_api.libs.datetime import strftime
class RunConfigurationSchedule(EmbeddedDocument):
days = ListField(field=StringField(), required=True)
time = StringField(required=True)
class RunConfigurationHosts(EmbeddedDocument):
host = StringField(required=True)
ip = StringField(required=True)
class RunConfigurationCustomProperty(EmbeddedDocument):
name = StringField(required=True)
value = StringField(required=True)
class RunConfigurationLoadProfile(EmbeddedDocument):
start = IntField(required=True)
end = IntField(required=True)
duration = IntField(required=True)
class RunConfiguration(CreatedUpdatedDocumentMixin):
title = StringField(required=True)
run_plan_id = ObjectIdField(required=True)
workspace_id = ObjectIdField(required=True)
agent_ids = ListField(
required=True,
field=ObjectIdField(),
)
hosts = ListField(field=EmbeddedDocumentField(RunConfigurationHosts), default=[])
custom_data_ids = ListField(field=ObjectIdField(), default=[])
custom_properties = ListField(
field=EmbeddedDocumentField(RunConfigurationCustomProperty), default=[]
)
load_profile = ListField(
field=EmbeddedDocumentField(RunConfigurationLoadProfile), default=[]
)
labels = ListField(field=StringField(), default=[])
is_schedule_enabled = BooleanField(default=False)
schedule = EmbeddedDocumentField(RunConfigurationSchedule)
last_scheduled_at = DateTimeField()
def to_dict(self):
return {
"id": str(self.id),
"run_plan_id": str(self.run_plan_id),
"workspace_id": str(self.workspace_id),
"agent_ids": [str(agent_id) for agent_id in self.agent_ids],
"custom_data_ids": [
str(custom_data_id) for custom_data_id in self.custom_data_ids
],
"title": self.title,
"hosts": [{"host": host.host, "ip": host.ip} for host in self.hosts],
"custom_properties": [
{
"name": custom_property.name,
"value": custom_property.value,
}
for custom_property in self.custom_properties
],
"load_profile": [
{
"start": load_step.start,
"end": load_step.end,
"duration": load_step.duration,
}
for load_step in self.load_profile
],
"labels": self.labels,
"is_schedule_enabled": self.is_schedule_enabled,
"schedule": {
"days": [day for day in self.schedule.days],
"time": self.schedule.time,
}
if self.schedule
else None,
"created_at": strftime(self.created_at),
"updated_at": strftime(self.updated_at),
}
| StarcoderdataPython |
100800 | from simplified_scrapy.core.spider import Spider# as SP
from simplified_scrapy.simplified_doc import SimplifiedDoc
# class Spider(SP):
# pass | StarcoderdataPython |
1640751 | #!/usr/bin/python
VERSION = 0.92
#---------------------------------
# BAGEL: Bayesian Analysis of Gene EssentaLity for python 3
# (c) <NAME>, 02/2015.
# modified 12/2019 for random seed option
# Free to modify and redistribute with attribtuion
#---------------------------------
from numpy import *
import scipy.stats as stats
import sys, getopt
import time
helptext = ('\n'
'BAGEL.py -i [fold change file] -o [output file] -e [reference essentials] -n [reference nonessentials] -c [columns to test]\n'
'\n'
' from the Bayesian Analysis of Gene EssentiaLity (BAGEL) suite\n'
' Version ' + str(VERSION) + '\n'
'\n'
' required options:\n'
' -i [fold change file] Tab-delmited file of reagents and fold changes. See documentation for format.\n'
' -o [output file] Output filename\n'
' -e [reference essentials] File with list of training set of essential genes\n'
' -n [reference nonessentials] File with list of training set of nonessential genes\n'
' -c [columns to test] comma-delimited list of columns in input file to include in analyisis\n'
'\n'
' other options:\n'
' --numiter=N Number of bootstrap iterations (default 1000)\n'
' -s, --seed=N Random seed by user input\n'
' -h, --help Show this help text\n'
'\n'
' Example:\n'
' BAGEL.py -i foldchange_file -o experiment.bf -e essentials_training_set -n nonessentials_training_set -c 1,2,3\n'
'\n'
' Calculates a log2 Bayes Factor for each gene; positive BFs indicate confidence that the gene is essential.\n'
' writes to [output file]: gene name, mean Bayes Factor across all iterations, std deviation of BFs, and number of iterations\n'
' in which the gene was part of the test set (and a BF was calculated[output file]\n'
'\n')
NUM_BOOTSTRAPS = 1000
try:
opts, args = getopt.getopt(sys.argv[1:], "hi:o:c:e:n:s:", ["numiter=","seed=","help"])
except getopt.GetoptError:
print(helptext)
sys.exit(2)
seed=int(time.time() * 100000 % 100000)
for opt, arg in opts:
if opt in ( '-h', '--help'):
print(helptext)
sys.exit()
elif opt in ( '-s', '--seed'):
seed = int(arg)
elif opt == '-i':
foldchangefile = arg
elif opt == '-o':
outfilename = arg
elif opt == '-e':
ess_ref = arg
elif opt == '-n':
non_ref = arg
elif opt == '-c':
columns = arg.split(',')
elif opt == '--numiter':
NUM_BOOTSTRAPS = int(arg)
# set random seed
random.seed(seed)
column_list = [int(c) for c in columns]
FC_THRESH = 2**-7
genes={}
fc = {}
def round_to_hundredth(x):
return around( x*100) / 100.0
def bootstrap_resample(X, n=None):
""" Bootstrap resample an array_like
Parameters
----------
X : array_like
data to resample
n : int, optional
length of resampled array, equal to len(X) if n==None
Results
-------
returns X_resamples
adapted from
Dated 7 Oct 2013
http://nbviewer.ipython.org/gist/aflaxman/6871948
"""
if n == None:
n = len(X)
resample_i = floor(random.rand(n)*len(X)).astype(int)
X_resample = X[resample_i]
return X_resample
#
# LOAD FOLDCHANGES
#
fin = open(foldchangefile)
skipfields = fin.readline().rstrip().split('\t')
for i in column_list:
print("Using column: " + skipfields[i+1])
for line in fin:
fields = line.rstrip().split('\t')
gsym = fields[1]
genes[ gsym ]=1
if ( not gsym in fc ):
fc[gsym]=[] # initialize dict entry as a list
for i in column_list:
fc[gsym].append( float(fields[i + 1])) # per user docs, GENE is column 0, first data column is col 1.
genes_array = array( list(genes.keys()) )
gene_idx = arange( len( genes ) )
#print "Number of gRNA loaded: " + str( len(genes_array) )
print("Number of unique genes: " + str( len(genes) ))
#
# DEFINE REFERENCE SETS
#
coreEss = []
fin = open(ess_ref)
for line in fin:
coreEss.append( line.rstrip().split('\t')[0] )
fin.close()
coreEss=array(coreEss)
print("Number of reference essentials: " + str(len(coreEss)))
nonEss = []
fin = open(non_ref)
for line in fin:
nonEss.append( line.rstrip().split('\t')[0] )
fin.close()
nonEss = array(nonEss)
print("Number of reference nonessentials: " + str(len(nonEss)))
#
# INITIALIZE BFS
#
bf = {}
for g in genes_array:
bf[g]=[]
#
# BOOTSTRAP ITERATIONS
#
print("Iter", end=' ')
print("TrainEss", end=' ')
print("TrainNon", end=' ')
print("TestSet")
sys.stdout.flush()
for loop in range(NUM_BOOTSTRAPS):
print(str(loop), end=' ')
#
# bootstrap resample from gene list to get the training set
#
gene_train_idx = bootstrap_resample(gene_idx)
#
# test set for this iteration is everything not selected in bootstrap resampled training set
#
gene_test_idx = setxor1d(gene_idx, gene_train_idx)
#
# define essential and nonessential training sets: arrays of indexes
#
train_ess = where( in1d( genes_array[gene_train_idx], coreEss))[0]
train_non = where( in1d( genes_array[gene_train_idx], nonEss))[0]
print(len(train_ess), end=' ')
print(len(train_non), end=' ')
print(len(gene_test_idx))
sys.stdout.flush()
#
# define ess_train: vector of observed fold changes of essential genes in training set
#
ess_train_fc_list_of_lists = [ fc[x] for x in genes_array[gene_train_idx[train_ess]] ]
ess_train_fc_flat_list = [obs for sublist in ess_train_fc_list_of_lists for obs in sublist]
#
# define non_train vector of observed fold changes of nonessential genes in training set
#
non_train_fc_list_of_lists = [ fc[x] for x in genes_array[gene_train_idx[train_non]] ]
non_train_fc_flat_list = [obs for sublist in non_train_fc_list_of_lists for obs in sublist]
#
# calculate empirical fold change distributions for both
#
kess = stats.gaussian_kde( ess_train_fc_flat_list )
knon = stats.gaussian_kde( non_train_fc_flat_list )
#
# define empirical upper and lower bounds within which to calculate BF = f(fold change)
#
x = arange(-10,2,0.01)
nonfitx = knon.evaluate(x)
# define lower bound empirical fold change threshold: minimum FC where knon is above threshold
f = where( nonfitx > FC_THRESH)
xmin = round_to_hundredth( min(x[f]) )
# define upper bound empirical fold change threshold: minimum value of log2(ess/non)
subx = arange( xmin, max(x[f]), 0.01)
logratio_sample = log2( kess.evaluate(subx) / knon.evaluate(subx) )
f = where( logratio_sample == logratio_sample.min() )
xmax = round_to_hundredth( subx[f] )
#
# round foldchanges to nearest 0.01
# precalculate logratios and build lookup table (for speed)
#
logratio_lookup = {}
for i in arange(xmin, xmax+0.01, 0.01):
logratio_lookup[around(i*100)] = log2( kess.evaluate(i) / knon.evaluate(i) )
#
# calculate BFs from lookup table for withheld test set
#
for g in genes_array[gene_test_idx]:
foldchanges = array( fc[g] )
foldchanges[foldchanges<xmin]=xmin
foldchanges[foldchanges>xmax]=xmax
bayes_factor = sum( [ logratio_lookup[ around( x * 100 ) ] for x in foldchanges ] )
bf[g].append(bayes_factor)
fout = open(outfilename, 'w')
fout.write('GENE\tBF\tSTD\tNumObs\n')
for g in sorted( bf.keys() ):
num_obs = len( bf[g] )
bf_mean = mean( bf[g] )
bf_std = std( bf[g] )
bf_norm = ( bf[g] - bf_mean ) / bf_std
#dstat, pval = stats.kstest( bf_norm, 'norm')
fout.write('{0:s}\t{1:4.3f}\t{2:4.3f}\t{3:d}\n'.format( g, bf_mean, bf_std, num_obs ) )
fout.close()
| StarcoderdataPython |
4908 | <reponame>ProtKsen/pgame
"""Text parts."""
SEPARATOR = '----------------------------------'
CONT_GAME = 'enter для продолжения игры'
GREETING = 'Добро пожаловать в игру ''Сундук сокровищ''!\n' \
'Попробуй себя в роли капитана корабля, собери ' \
'команду и достань все сокровища!'
NAME_QUESTION = 'Как тебя зовут?'
CHOOSE_LEVEL = 'Выбери уровень сложности, он влияет на стоимость ' \
'сокровищ на островах. \n' \
'1 - легко \n' \
'2 - средне \n' \
'3 - тяжело'
INTRODUCTION = 'В наследство от дядюшки тебе достался корабль, \n' \
'несколько золотых монет и карта, на которой \n' \
'отмечены 10 островов. На каждом из островов \n' \
'зарыт клад. Но для того, чтобы достать его, \n' \
'необходимо обезвредить ловушку. Чем больше \n' \
'порядковый номер острова, тем ценнее хранящееся \n' \
'на нем сокровище и тем труднее его получить. \n\n' \
'Цель игры - добыть все сокровища и скопить как можно больше монет. \n\n' \
'Команда твоего корабля сможет обезвредить ловушку, \n' \
'только если будет иметь нужное количество очков \n' \
'логики, силы и ловкости. \n\n' \
'!!! Сумма всех требуемых очков равна номеру острова,\n' \
'но точная комбинация тебе неизвестна. !!!'
ORACLE_QUESTION = 'Здесь неподалеку живет известный оракул. За определенную\n' \
'плату он сможет предсказать с какой ловушкой\n' \
'ты столкнешься на острове. Пойдешь ли ты к нему?\n' \
'----------------------------------\n'\
'1 - да, пойду\n' \
'2 - нет, сам разберусь'
ORACLE_QUESTION_1 = 'Что ты хочешь узнать у оракула? \n' \
'----------------------------------\n'\
'1 - я передумал, буду сам себе оракул! \n'\
'2 - сколько очков логики должно быть у команды? (1 монета) \n'\
'3 - сколько очков силы должно быть у команды? (1 монета) \n'\
'4 - сколько очков ловкости должно быть у команды? (1 монета) \n'\
'5 - узнать все требуемые характеристики (3 монеты)'
ORACLE_QUESTION_2 = 'Что ты хочешь узнать у оракула? \n' \
'----------------------------------\n'\
'1 - я передумал, буду сам себе оракул! \n'\
'2 - сколько очков логики должно быть у команды? (1 монета) \n'\
'3 - сколько очков силы должно быть у команды? (1 монета) \n'\
'4 - сколько очков ловкости должно быть у команды? (1 монета)'
GO_TAVERN_TEXT = 'Отлично! Для похода на остров тебе понадобится \n' \
'команда, а нанять ее ты сможешь в таверне.'
EXIT_QUESTION = 'Продолжить игру?\n' \
'----------------------------------\n'\
'1 - да\n' \
'2 - нет'
SUCCESS_STEP = 'Поздравляю! Ты смог достать спрятанное сокровище! \n' \
'Самое время готовиться к следующему походу.'
FAILURE_STEP = 'К сожалению, ты не смог достать сокровище. \n' \
'Если у тебя еще остались монеты, то можешь \n' \
'попробовать организовать поход заново. Удачи!'
WINNING = 'Поздравляю! Ты собрал сокровища со всех окрестных \n' \
'островов, можешь выкинуть ненужную теперь карту) \n' \
'Конец игры.'
LOSING = 'Сожалею, ты потратил все деньги. Карьера пиратского \n' \
'капитана подошла к концу. А дядюшка в тебя верил! \n' \
'Конец игры.'
NAMES = ['Боб', 'Ричард', 'Алан', 'Степан', 'Грозный Глаз', 'Гарри',
'Моррис', 'Джек', 'Алекс', 'Сэм', 'Том', 'Янис', 'Геральт',
'Ринсвинд', 'Купер', 'Борис', 'Джон', 'Рон']
| StarcoderdataPython |
1766702 | import pytest
from schemaperfect import SchemaBase, SchemaModuleGenerator
@pytest.fixture
def schema():
return {
'definitions': {
'Person': {
'properties': {
'name': {'type': 'string'},
'age': {'type': 'integer'},
}
}
},
'properties': {
'family_name': {
'type': 'string'
},
'people': {
'type': 'array',
'items': {'$ref': '#/definitions/Person'}
}
},
'required': ['family_name']
}
def test_module_code(schema):
gen = SchemaModuleGenerator(schema, root_name='Family')
code = gen.module_code()
namespace = {}
exec(code, namespace)
Family = namespace['Family']
Person = namespace['Person']
assert issubclass(Family, SchemaBase)
assert issubclass(Person, SchemaBase)
assert Family._property_names == ('family_name', 'people')
family = Family(family_name='Smith', people=[Person(name='Alice', age=25), Person(name='Bob', age=26)])
dct = family.to_dict()
assert dct == {'family_name': 'Smith', 'people': [{'name': 'Alice', 'age': 25}, {'name': 'Bob', 'age': 26}]}
family2 = Family.from_dict(dct)
assert family2.to_dict() == dct
# noinspection PyUnresolvedReferences
def test_dynamic_module(schema):
gen = SchemaModuleGenerator(schema, root_name='Family')
testmod = gen.import_as('testmod')
assert issubclass(testmod.Family, SchemaBase)
assert issubclass(testmod.Person, SchemaBase)
from testmod import Family, Person
assert issubclass(Family, SchemaBase)
assert issubclass(Person, SchemaBase)
family = Family(family_name='Smith', people=[Person(name='Alice', age=25), Person(name='Bob', age=26)])
dct = family.to_dict()
assert dct == {'family_name': 'Smith', 'people': [{'name': 'Alice', 'age': 25}, {'name': 'Bob', 'age': 26}]}
family2 = Family.from_dict(dct)
assert family2.to_dict() == dct
# test extended class with properties
class MyFamily(Family):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.dependants = 0
@property
def has_pet(self):
return False
family3 = MyFamily.from_dict(dct)
assert family3.dependants == 0
family3.dependants = 1
assert family3.dependants == 1
assert not family3.has_pet
assert family3.to_dict() == dct
| StarcoderdataPython |
3220701 | point = []
intrin = {"model": 0}
def rs2_project_point_to_pixel(pixel[2], intrin, point[3]):
x = point[0] / point[2]
y = point[1] / point[2]
if intrin.model == 0:
r2 = x * x + y * y
| StarcoderdataPython |
1771797 | <reponame>PingHuskar/hackerrank
# Mathematics > Linear Algebra Foundations > Eigenvalue of a Matrix I
# Basic problems related to eigenvalues.
#
# https://www.hackerrank.com/challenges/eigenvalue-of-matrix-1/problem
#
import numpy as np
M = np.matrix([[1, -3, 3],
[3, -5, 3],
[6, -6, 4]])
print(np.linalg.eigvals(M))
I = np.identity(np.linalg.matrix_rank(M))
for λ in np.linalg.eigvals(M):
print(np.linalg.det(M - λ * I)) | StarcoderdataPython |
3394092 | <gh_stars>1-10
# Copyright 2021 <NAME> (rafsaf). All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
""" File with decorator to measure function time """
import logging
from functools import wraps
from time import time
from django.db import connection, reset_queries
def timing(function):
"""Time for a given function"""
@wraps(function)
def wrap(*args, **kwargs):
reset_queries()
start_queries = len(connection.queries)
time1 = time()
result = function(*args, **kwargs)
time2 = time()
if len(str(args)) > 80:
new_args = str(args)[0:80]
else:
new_args = args
if len(str(kwargs)) > 80:
new_kwargs = str(kwargs)[0:80]
else:
new_kwargs = kwargs
end_queries = len(connection.queries)
time3 = round(time2 - time1, 5)
logging.debug(f"\r\n Func: {function.__name__}")
logging.debug(f" Args:[{new_args}]")
logging.debug(f" Kwargs:[{new_kwargs}]")
logging.debug(f" Took: {time3} sec")
logging.debug(f" Number of Queries: {end_queries - start_queries}")
logging.debug(" Line by line time: ")
return result
return wrap
| StarcoderdataPython |
1638889 | <filename>kubernetes/models/v1/ConfigMapVolumeSource.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is subject to the terms and conditions defined in
# file 'LICENSE.md', which is part of this source code package.
#
from kubernetes.utils import is_valid_string, filter_model
from kubernetes.models.v1.KeyToPath import KeyToPath
class ConfigMapVolumeSource(object):
"""
https://kubernetes.io/docs/api-reference/v1.8/#configmapvolumesource-v1-core
The contents of the target ConfigMap's Data field will be presented in a volume as files using the keys in the
Data field as the file names, unless the items element is populated with specific mappings of keys to paths.
ConfigMap volumes support ownership management and SELinux relabeling.
"""
def __init__(self, model=None):
super(ConfigMapVolumeSource, self).__init__()
self._default_mode = None
self._items = None
self._name = None
self._optional = None
if model is not None:
m = filter_model(model)
self._build_with_model(m)
def _build_with_model(self, model=None):
if 'defaultMode' in model:
self.default_mode = model['defaultMode']
if 'items' in model:
self.items = model['items']
if 'name' in model:
self.name = model['name']
if 'optional' in model:
self.optional = model['optional']
# ------------------------------------------------------------------------------------- default_mode
@property
def default_mode(self):
return self._default_mode
@default_mode.setter
def default_mode(self, mode=None):
if is_valid_string(mode):
try:
mode = int(mode)
except ValueError:
raise SyntaxError('ConfigMapVolumeSource: defaultMode: [ {0} ] is invalid.'.format(mode))
if not isinstance(mode, int):
raise SyntaxError('ConfigMapVolumeSource: defaultMode: [ {0} ] is invalid.'.format(mode))
self._default_mode = mode
# ------------------------------------------------------------------------------------- items
@property
def items(self):
return self._items
@items.setter
def items(self, items=None):
if not isinstance(items, list):
raise SyntaxError('ConfigMapVolumeSource: items: [ {0} ] is invalid.'.format(items))
modeled_items = list()
for i in items:
tmp_item = KeyToPath(model=i)
modeled_items.append(tmp_item)
self._items = modeled_items
# ------------------------------------------------------------------------------------- name
@property
def name(self):
return self._name
@name.setter
def name(self, name=None):
if not is_valid_string(name):
raise SyntaxError('ConfigMapVolumeSource: name: [ {0} ] is invalid.'.format(name))
self._name = name
# ------------------------------------------------------------------------------------- optional
@property
def optional(self):
return self._optional
@optional.setter
def optional(self, v=None):
if not isinstance(v, bool):
raise SyntaxError('ConfigMapVolumeSource: optional: [ {0} ] is invalid.'.format(v))
self._optional = v
# ------------------------------------------------------------------------------------- serialize
def serialize(self):
data = {}
if self.default_mode is not None:
data['defaultMode '] = self.default_mode
if self.items is not None:
tmp_items = list()
for i in self.items:
tmp_items.append(i.serialize())
data['items'] = tmp_items
if self.name is not None:
data['name'] = self.name
if self.optional is not None:
data['optional'] = self.optional
return data
| StarcoderdataPython |
1635267 | <filename>hallo/test/modules/server_control/test_list_servers.py
from hallo.events import EventMessage
from hallo.server import Server
from hallo.test.server_mock import ServerMock
def test_no_servers(hallo_getter):
test_hallo = hallo_getter({"server_control"}, disconnect_servers=True)
# Send command
test_hallo.function_dispatcher.dispatch(
EventMessage(test_hallo.test_server, test_hallo.test_chan, test_hallo.test_user, "list servers")
)
data = test_hallo.test_server.get_send_data(1, test_hallo.test_chan, EventMessage)
# Check response
assert "do not" in data[0].text
assert ":" not in data[0].text
def test_one_server(hallo_getter):
test_hallo = hallo_getter({"server_control"}, disconnect_servers=True)
# Add one server
serv1 = ServerMock(test_hallo)
serv1.name = "server_list_test"
test_hallo.add_server(serv1)
# Send command
test_hallo.function_dispatcher.dispatch(
EventMessage(test_hallo.test_server, test_hallo.test_chan, test_hallo.test_user, "list servers")
)
data = test_hallo.test_server.get_send_data(1, test_hallo.test_chan, EventMessage)
# Check response
server_list_text = data[0].text.split(":")[1]
server_list = server_list_text.split("], ")
assert len(server_list) == 1
assert serv1.name in server_list[0]
assert "type=" + serv1.type in server_list[0]
assert "state=" + serv1.state in server_list[0]
assert "nick=" + serv1.get_nick() in server_list[0]
assert "auto_connect=" + str(serv1.auto_connect) in server_list[0]
def test_two_mock_servers(hallo_getter):
test_hallo = hallo_getter({"server_control"}, disconnect_servers=True)
# Add two servers
serv1 = ServerMock(test_hallo)
serv1.name = "server_list_test1"
serv1.auto_connect = True
serv1.nick = "hallo"
serv1.disconnect()
test_hallo.add_server(serv1)
serv2 = ServerMock(test_hallo)
serv2.name = "server_list_test2"
serv2.auto_connect = False
serv2.nick = "yobot"
serv2.start()
test_hallo.add_server(serv2)
# Send command
test_hallo.function_dispatcher.dispatch(
EventMessage(test_hallo.test_server, test_hallo.test_chan, test_hallo.test_user, "list servers")
)
data = test_hallo.test_server.get_send_data(1, test_hallo.test_chan, EventMessage)
# Check response
server_list_text = data[0].text.split(": \n")[1]
server_list = server_list_text.split("\n")
assert len(server_list) == 2
if serv1.name in server_list[0]:
server_text1 = server_list[0]
server_text2 = server_list[1]
else:
server_text1 = server_list[1]
server_text2 = server_list[0]
assert serv1.name in server_text1
assert "type=" + serv1.type in server_text1
assert "state=" + serv1.state in server_text1
assert "nick=" + serv1.get_nick() in server_text1
assert "auto_connect=" + str(serv1.auto_connect) in server_text1
assert serv2.name in server_text2
assert "type=" + serv2.type in server_text2
assert "state=" + serv2.state in server_text2
assert "nick=" + serv2.get_nick() in server_text2
assert "auto_connect=" + str(serv2.auto_connect) in server_text2
def test_irc_server(hallo_getter):
test_hallo = hallo_getter({"server_control"}, disconnect_servers=True)
# Add one server
serv1 = ServerMock(test_hallo)
serv1.type = Server.TYPE_IRC
serv1.server_address = "irc.example.org"
serv1.server_port = 6789
serv1.name = "irc_server_list_test"
test_hallo.add_server(serv1)
# Send command
test_hallo.function_dispatcher.dispatch(
EventMessage(test_hallo.test_server, test_hallo.test_chan, test_hallo.test_user, "list servers")
)
data = test_hallo.test_server.get_send_data(1, test_hallo.test_chan, EventMessage)
# Check response
server_list_text = data[0].text.split(":", 1)[1]
server_list = server_list_text.split("], ")
assert len(server_list) == 1
assert serv1.name in server_list[0], (
"Server name not found in output.\n"
"Server name: " + serv1.name + "\nCommand output: " + server_list[0]
)
assert "type=" + serv1.type in server_list[0], (
"Server type not found in output.\n"
"Server type: " + serv1.type + "\nCommand output: " + server_list[0]
)
irc_address = serv1.server_address + ":" + str(serv1.server_port)
assert irc_address in server_list[0], (
"IRC server address not found in output.\n"
"Server address: " + irc_address + "\nCommand output: " + server_list[0]
)
assert "state=" + serv1.state in server_list[0], (
"Server state not found in output.\n"
"Server name: " + serv1.state + "\nCommand output: " + server_list[0]
)
assert "nick=" + serv1.get_nick() in server_list[0], (
"Server nick not found in output.\n"
"Server nick: " + serv1.get_nick() + "\nCommand output: " + server_list[0]
)
assert "auto_connect=" + str(serv1.auto_connect) in server_list[0]
| StarcoderdataPython |
1736074 | #네이버 실시간 검색 순위
import requests
import time
from bs4 import BeautifulSoup
loopNum = 1
while True:
print("*"*30)
response = requests.get('https://www.naver.com/')
assert response.status_code is 200
dom = BeautifulSoup(response.content, "html.parser") # (print(dom))
ranking_elements = dom.select("li.ah_item") # (print(ranking_elements))
for i, ranking_element in enumerate(ranking_elements):
ranking_title_element = ranking_element.select(".ah_k")[0]
print(i+1, "위: ", ranking_title_element.text)
if i+1>=20:
break
print("*"*30)
print("[", loopNum, "] 1분에 한번씩 갱신됩니다.")
print("*"*30, "\n")
#time.sleep(20) #20초마다 갱신
time.sleep(60) | StarcoderdataPython |
1701380 | <gh_stars>0
import time
import unittest
from algorithms.main.sets.permutations_without_reps import permute_without_reps as pnr, permute_without_reps_two as pnrtwo, permute_without_reps_three as pnrthree
class TestPermutationsWithoutReps(unittest.TestCase):
def setUp(self):
self._started_at = time.time()
def tearDown(self):
elapsed = time.time() - self._started_at
print(f'Time: ({elapsed:.8f}s)')
def test_permutations_without_reps(self):
self.assertEqual(sorted(pnr('a')), ['a'])
self.assertEqual(sorted(pnr('ab')), ['ab', 'ba'])
self.assertTrue(sorted(pnr('MISSISSIPPI')))
self.assertEqual(sorted(pnr('aabb')), [
'aabb', 'abab', 'abba', 'baab', 'baba', 'bbaa'])
self.assertEqual(sorted(pnr(['a'])), ['a'])
self.assertEqual(sorted(pnr(['a','b'])), ['ab', 'ba'])
self.assertEqual(sorted(pnr(['a','a','b','b'])), [
'aabb', 'abab', 'abba', 'baab', 'baba', 'bbaa'])
def test_permutations_without_reps_two(self):
self.assertEqual(sorted(pnrtwo('a')), ['a'])
self.assertEqual(sorted(pnrtwo('ab')), ['ab', 'ba'])
self.assertTrue(sorted(pnr('MISSISSIPPI')))
self.assertEqual(sorted(pnrtwo('aabb')), [
'aabb', 'abab', 'abba', 'baab', 'baba', 'bbaa'])
self.assertEqual(sorted(pnrtwo(['a'])), ['a'])
self.assertEqual(sorted(pnrtwo(['a','b'])), ['ab', 'ba'])
self.assertEqual(sorted(pnrtwo(['a','a','b','b'])), [
'aabb', 'abab', 'abba', 'baab', 'baba', 'bbaa'])
def test_permutations_without_reps_three(self):
self.assertEqual(sorted(pnrthree('a')), ['a'])
self.assertEqual(sorted(pnrthree('ab')), ['ab', 'ba'])
self.assertTrue(sorted(pnr('MISSISSIPPI')))
self.assertEqual(sorted(pnrthree('aabb')), [
'aabb', 'abab', 'abba', 'baab', 'baba', 'bbaa'])
self.assertEqual(sorted(pnrthree(['a'])), ['a'])
self.assertEqual(sorted(pnrthree(['a','b'])), ['ab', 'ba'])
self.assertEqual(sorted(pnrthree(['a','a','b','b'])), [
'aabb', 'abab', 'abba', 'baab', 'baba', 'bbaa'])
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
49124 | <filename>api/v1/utilities/su.py
import os
class Guard:
_preserved_uid: int
_preserved_gid: int
_uid: int
_gid: int
def __init__(self, uid: int, gid: int):
# safety measure
if uid == 0 or gid == 0:
print("Tried to su Guard to root :(")
os.exit(-1)
self._uid = uid
self._gid = gid
def _preserve_ids(self):
self._preserved_uid = os.geteuid()
self._preserved_gid = os.getegid()
def __enter__(self):
self._preserve_ids()
os.setegid(self._gid)
os.seteuid(self._uid)
def __exit__(self ,type, value, traceback):
os.seteuid(self._preserved_uid)
os.setegid(self._preserved_gid) | StarcoderdataPython |
85682 | <filename>phl_courts_scraper/court_summary/core.py
"""Module for parsing court summary reports."""
import collections
from operator import itemgetter
from pathlib import Path
from typing import Any
from ..base import DownloadedPDFScraper
from ..utils import get_pdf_words
from . import utils
from .schema import CourtSummary
class CourtSummaryParser(DownloadedPDFScraper):
"""
A class to parse court summary reports.
Call the class to parse a PDF. The class will return a
CourtSummary object.
Example
-------
>>> from phl_courts_scraper.court_summary import CourtSummaryParser
>>> parser = CourtSummaryParser()
>>> court_summary = parser(pdf_path)
"""
def __call__(self, pdf_path: Path, **kwargs: Any) -> CourtSummary:
"""Parse and return a court summary document."""
# Parse PDF into a list of words
words = get_pdf_words(
str(pdf_path),
keep_blank_chars=True,
x_tolerance=5,
y_tolerance=0,
header_cutoff=0,
footer_cutoff=645,
)
# Define the section headers
headers = [
"Active",
"Closed",
"Inactive",
"Archived",
"Adjudicated",
]
# Determine section headers
starts = {}
for header in headers:
line = utils.find_line_number(words, header, missing="ignore")
if line is not None:
starts[header] = line
# Put the section in the correct order (ascending)
sections = sorted(starts, key=itemgetter(1))
sorted_starts = collections.OrderedDict()
for key in sections:
sorted_starts[key] = starts[key]
# Parse each section
dockets = []
for i, this_section in enumerate(sorted_starts):
# Skip the "Archived" section
if this_section == "Archived":
continue
# Determine the next section if there is one
next_section = sections[i + 1] if i < len(sections) - 1 else None
# Determine line number of sections
this_section_start = sorted_starts[this_section]
next_section_start = (
sorted_starts[next_section] if next_section else None
)
# Trim the words to just lines in this section
section_words = words[this_section_start:next_section_start]
# Parse dockets in this section
for docket_number, county, docket in utils.yield_dockets(
section_words
):
# Do the parsing work
result = utils.parse_charges_table(docket_number, docket)
# Format the result
info = result["header"]
info["county"] = county
info["docket_number"] = docket_number
info["status"] = this_section.lower()
info["charges"] = result["charges"]
# Fix columns
if "prob_#" in info:
info["prob_num"] = info.pop("prob_#")
if "psi#" in info:
info["psi_num"] = info.pop("psi#")
# Save the result
dockets.append(info)
# Parse the header too
out = utils.parse_header(words, sections[0])
out["dockets"] = dockets
return CourtSummary.from_dict(out)
| StarcoderdataPython |
3397209 | <filename>part_2_read_test_json.py
import test_data
import json
# Creates and returns a GameLibrary object(defined in test_data) from loaded json_data
def make_game_library_from_json(json_data):
# Initialize a new GameLibrary
game_library = test_data.GameLibrary()
# Loop through the json_data
for game in json_data:
new_game = test_data.Game()
# title
new_game.title = game["title"]
# year
new_game.year = game["year"]
# platform (which requires reading name and launch_year)
new_game.platform = test_data.Platform(game["platform"]["name"], game["platform"]["launch_year"])
# Add that Game object to the game_library
game_library.add_game(new_game)
return game_library
# Part 2
input_json_file = "data/test_data.json"
# Open the file specified by input_json_file
with open(input_json_file, "r") as reader:
# Use the json module to load the data from the file
game_library_json = json.load(reader)
# Use make_game_library_from_json(json_data) to convert the data to GameLibrary data
game_library_data = make_game_library_from_json(game_library_json)
# Print out the resulting GameLibrary data using print()
print("JSON data:")
print(game_library_json)
print("GameLibrary data:")
print(game_library_data)
| StarcoderdataPython |
1745329 | from google.appengine.ext import ndb
context = ndb.get_context()
context.set_cache_policy(lambda key: False)
context.set_memcache_policy(lambda key: False)
| StarcoderdataPython |
1723768 | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# author: bigfoolliu
"""
珂珂喜欢吃香蕉。这里有 N 堆香蕉,第 i 堆中有 piles[i] 根香蕉。警卫已经离开了,将在 H 小时后回来。
珂珂可以决定她吃香蕉的速度 K (单位:根/小时)。每个小时,她将会选择一堆香蕉,从中吃掉 K 根。如果这堆香蕉少于 K 根,她将吃掉这堆的所有香蕉,然后这一小时内不会再吃更多的香蕉。
珂珂喜欢慢慢吃,但仍然想在警卫回来前吃掉所有的香蕉。
返回她可以在 H 小时内吃掉所有香蕉的最小速度 K(K 为整数)。
示例 1:
输入: piles = [3,6,7,11], H = 8
输出: 4
示例 2:
输入: piles = [30,11,23,4,20], H = 5
输出: 30
示例 3:
输入: piles = [30,11,23,4,20], H = 6
输出: 23
提示:
1 <= piles.length <= 10^4
piles.length <= H <= 10^9
1 <= piles[i] <= 10^9
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/koko-eating-bananas
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
"""
import doctest
from typing import List
class Solution:
def minEatingSpeed(self, piles: List[int], h: int) -> int:
pass
if __name__ == '__main__':
doctest.testmod()
| StarcoderdataPython |
114800 | <reponame>dd2t/desafio<filename>my-app/api/api.py
from flask import Flask, render_template, url_for, request, redirect
import pymongo
from pymongo import MongoClient
import os
from password import passwd
app = Flask(__name__, static_folder='./build', static_url_path='/')
# Database
cluster = pymongo.MongoClient(passwd())
db = cluster["desafio"]
collection = db["cellphone"]
# Database Model
# startDB = [
# {
# '_id': 0,
# 'brand': 'Apple',
# 'model': 'Iphone 5',
# 'memory': '2',
# 'releaseDate': '2021-04-03'
# },
# {
# '_id': 1,
# 'brand': 'Pineapple',
# 'model': 'Honeycomb 3',
# 'memory': '42',
# 'releaseDate': '2021-12-21'
# },
# {
# '_id': 2,
# 'brand': 'Mapple',
# 'model': 'Melon 7',
# 'memory': '88',
# 'releaseDate': '2042-01-11'
# }
# ]
# Routes
@app.route('/')
def index():
return app.send_static_file('index.html')
@app.errorhandler(404)
def not_found(e):
return app.send_static_file('index.html')
@app.route('/api/cellphone-list', methods=['GET'])
def send():
phones = []
for doc in collection.find():
phones.append(doc)
return {
'cellphoneArray': phones
}
@app.route('/api/delete', methods=['DELETE', 'POST'])
def delete():
phone = request.get_json()
collection.delete_one({'model': phone['model']})
return redirect('/')
def newPhone(element, id):
return {
'_id': id,
'brand': element['brand'],
'model': element['model'],
'memory': element['memory'],
'releaseDate': element['releaseDate']
}
@app.route('/api/update', methods=['GET', 'POST'])
def update():
if request.method == 'POST':
phone = request.get_json()
oldPhone = collection.find_one({'model': phone['model']})
if None == oldPhone:
oldPhone = collection.find_one({'$query': {}, '$orderby':{'_id':-1}})
if oldPhone != None:
collection.insert_one(newPhone(phone, oldPhone['_id']+1))
else:
collection.insert_one(newPhone(phone, 0))
else:
collection.replace_one(oldPhone, phone)
return redirect('/')
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True, port=os.environ.get('PORT',80))
| StarcoderdataPython |
1693822 | import importlib
import logging
import numpy as np
import os
import os.path as osp
import time
import torch
import torch.optim as optim
from torch.optim.lr_scheduler import _LRScheduler
from torch.utils.data import ConcatDataset
from bisect import bisect_right
from functools import partial
from six.moves import map, zip
from libs.datasets.transform import TrainTransform
from libs.datasets.transform import EvalTransform
class AverageMeter(object):
"""Computes and stores the average and current value
"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def resource_path(relative_path):
"""To get the absolute path"""
base_path = osp.abspath(".")
return osp.join(base_path, relative_path)
def ensure_dir(root_dir, rank=0):
if not osp.exists(root_dir) and rank == 0:
print(f'=> creating {root_dir}')
os.mkdir(root_dir)
else:
while not osp.exists(root_dir):
print(f'=> wait for {root_dir} created')
time.sleep(10)
return root_dir
def create_logger(cfg, rank=0):
# working_dir root
abs_working_dir = resource_path('work_dirs')
working_dir = ensure_dir(abs_working_dir, rank)
# output_dir root
output_root_dir = ensure_dir(os.path.join(working_dir, cfg.OUTPUT_ROOT), rank)
time_str = time.strftime('%Y-%m-%d-%H-%M')
final_output_dir = ensure_dir(os.path.join(output_root_dir, time_str), rank)
# set up logger
logger = setup_logger(final_output_dir, time_str, rank)
return logger, final_output_dir
def setup_logger(final_output_dir, time_str, rank, phase='train'):
log_file = f'{phase}_{time_str}_rank{rank}.log'
final_log_file = os.path.join(final_output_dir, log_file)
head = '%(asctime)-15s %(message)s'
logging.basicConfig(filename=str(final_log_file), format=head)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
console = logging.StreamHandler()
logging.getLogger('').addHandler(console)
return logger
def get_model(cfg, device):
module = importlib.import_module(cfg.MODEL.FILE)
model, criterion, postprocessors = getattr(module, 'build_model')(cfg, device)
return model, criterion, postprocessors
def get_optimizer(cfg, model):
"""Support two types of optimizers: SGD, Adam.
"""
assert (cfg.TRAIN.OPTIMIZER in [
'sgd',
'adam',
])
if cfg.TRAIN.OPTIMIZER == 'sgd':
optimizer = optim.SGD(
filter(lambda p: p.requires_grad, model.parameters()),
lr=cfg.TRAIN.LR,
momentum=cfg.TRAIN.MOMENTUM,
weight_decay=cfg.TRAIN.WEIGHT_DECAY,
nesterov=cfg.TRAIN.NESTEROV)
elif cfg.TRAIN.OPTIMIZER == 'adam':
optimizer = optim.Adam(
filter(lambda p: p.requires_grad, model.parameters()),
lr=cfg.TRAIN.LR,
weight_decay=cfg.TRAIN.WEIGHT_DECAY)
return optimizer
def load_checkpoint(cfg, model, optimizer, lr_scheduler, device, module_name='model'):
last_iter = -1
resume_path = cfg.MODEL.RESUME_PATH
resume = cfg.TRAIN.RESUME
if resume_path and resume:
if osp.exists(resume_path):
checkpoint = torch.load(resume_path, map_location='cpu')
# resume
if 'state_dict' in checkpoint:
model.module.load_state_dict(checkpoint['state_dict'], strict=False)
logging.info(f'==> model pretrained from {resume_path} \n')
elif 'model' in checkpoint:
if module_name == 'detr':
model.module.detr_head.load_state_dict(checkpoint['model'], strict=False)
logging.info(f'==> detr pretrained from {resume_path} \n')
else:
model.module.load_state_dict(checkpoint['model'], strict=False)
logging.info(f'==> model pretrained from {resume_path} \n')
if 'optimizer' in checkpoint:
optimizer.load_state_dict(checkpoint['optimizer'])
logging.info(f'==> optimizer resumed, continue training')
for state in optimizer.state.values():
for k, v in state.items():
if torch.is_tensor(v):
state[k] = v.to(device)
if 'optimizer' in checkpoint and 'lr_scheduler' in checkpoint and 'epoch' in checkpoint:
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
last_iter = checkpoint['epoch']
logging.info(f'==> last_epoch = {last_iter}')
if 'epoch' in checkpoint:
last_iter = checkpoint['epoch']
logging.info(f'==> last_epoch = {last_iter}')
# pre-train
else:
logging.error(f"==> checkpoint do not exists: \"{resume_path}\"")
raise FileNotFoundError
else:
logging.info("==> train model without resume")
return model, optimizer, lr_scheduler, last_iter
class WarmupMultiStepLR(_LRScheduler):
def __init__(self, optimizer, milestones, gamma=0.1, warmup_factor=1.0 / 3,
warmup_iters=500, last_epoch=-1):
if not list(milestones) == sorted(milestones):
raise ValueError(
"Milestones should be a list of" " increasing integers. Got {}",
milestones,
)
self.milestones = milestones
self.gamma = gamma
self.warmup_factor = warmup_factor
self.warmup_iters = warmup_iters
super().__init__(optimizer, last_epoch)
def get_lr(self):
warmup_factor = 1
if self.last_epoch < self.warmup_iters:
alpha = float(self.last_epoch) / self.warmup_iters
warmup_factor = self.warmup_factor * (1 - alpha) + alpha
return [
base_lr
* warmup_factor
* self.gamma ** bisect_right(self.milestones, self.last_epoch)
for base_lr in self.base_lrs
]
def get_lr_scheduler(cfg, optimizer, last_epoch=-1):
"""Support three types of optimizers: StepLR, MultiStepLR, MultiStepWithWarmup.
"""
assert (cfg.TRAIN.LR_SCHEDULER in [
'StepLR',
'MultiStepLR',
'MultiStepWithWarmup',
])
if cfg.TRAIN.LR_SCHEDULER == 'StepLR':
lr_scheduler = torch.optim.lr_scheduler.StepLR(
optimizer,
cfg.TRAIN.LR_STEPS[0],
cfg.TRAIN.LR_FACTOR,
last_epoch=last_epoch)
elif cfg.TRAIN.LR_SCHEDULER == 'MultiStepLR':
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer,
cfg.TRAIN.LR_STEPS,
cfg.TRAIN.LR_FACTOR,
last_epoch=last_epoch)
elif cfg.TRAIN.LR_SCHEDULER == 'MultiStepWithWarmup':
lr_scheduler = WarmupMultiStepLR(
optimizer,
cfg.TRAIN.LR_STEPS,
cfg.TRAIN.LR_FACTOR,
cfg.TRAIN.WARMUP_INIT_FACTOR,
cfg.TRAIN.WARMUP_STEP,
last_epoch)
else:
raise AttributeError(f'{cfg.TRAIN.LR_SCHEDULER} is not implemented')
return lr_scheduler
def get_det_criterion(cfg):
return critertion
def get_trainer(cfg, model, criterion, optimizer, lr_scheduler, postprocessors,
log_dir, performance_indicator, last_iter, rank, device, max_norm):
module = importlib.import_module(cfg.TRAINER.FILE)
Trainer = getattr(module, cfg.TRAINER.NAME)(
cfg,
model=model,
criterion=criterion,
optimizer=optimizer,
lr_scheduler=lr_scheduler,
postprocessors=postprocessors,
log_dir=log_dir,
performance_indicator=performance_indicator,
last_iter=last_iter,
rank=rank,
device=device,
max_norm = max_norm
)
return Trainer
def list_to_set(data_list, name='train'):
if len(data_list) == 0:
dataset = None
logging.warning(f"{name} dataset is None")
elif len(data_list) == 1:
dataset = data_list[0]
else:
dataset = ConcatDataset(data_list)
if dataset is not None:
logging.info(f'==> the size of {name} dataset is {len(dataset)}')
return dataset
def get_dataset(cfg):
train_transform = TrainTransform(
mean=cfg.DATASET.MEAN,
std=cfg.DATASET.STD,
scales=cfg.DATASET.SCALES,
max_size=cfg.DATASET.MAX_SIZE
)
eval_transform = EvalTransform(
mean=cfg.DATASET.MEAN,
std=cfg.DATASET.STD,
max_size=cfg.DATASET.MAX_SIZE
)
module = importlib.import_module(cfg.DATASET.FILE)
Dataset = getattr(module, cfg.DATASET.NAME)
data_root = cfg.DATASET.ROOT # abs path in yaml
# get train data list
train_root = osp.join(data_root, 'train')
train_set = [d for d in os.listdir(train_root) if osp.isdir(osp.join(train_root, d))]
if len(train_set) == 0:
train_set = ['.']
train_list = []
for sub_set in train_set:
train_sub_root = osp.join(train_root, sub_set)
logging.info(f'==> load train sub set: {train_sub_root}')
train_sub_set = Dataset(cfg, train_sub_root, train_transform)
train_list.append(train_sub_set)
# get eval data list
eval_root = osp.join(data_root, 'test')
eval_set = [d for d in os.listdir(eval_root) if osp.isdir(osp.join(eval_root, d))]
if len(eval_set) == 0:
eval_set = ['.']
eval_list = []
for sub_set in eval_set:
eval_sub_root = osp.join(eval_root, sub_set)
logging.info(f'==> load val sub set: {eval_sub_root}')
eval_sub_set = Dataset(cfg, eval_sub_root, eval_transform)
eval_list.append(eval_sub_set)
# concat dataset list
train_dataset = list_to_set(train_list, 'train')
eval_dataset = list_to_set(eval_list, 'eval')
return train_dataset, eval_dataset
def save_checkpoint(states, is_best, output_dir, filename='checkpoint.pth'):
torch.save(states, os.path.join(output_dir, filename))
logging.info(f'save model to {output_dir}')
if is_best:
torch.save(states['state_dict'], os.path.join(output_dir, 'model_best.pth'))
def load_eval_model(resume_path, model):
if resume_path != '':
if osp.exists(resume_path):
print(f'==> model load from {resume_path}')
checkpoint = torch.load(resume_path)
if 'state_dict' in checkpoint:
model.load_state_dict(checkpoint['state_dict'])
else:
model.load_state_dict(checkpoint)
else:
print(f"==> checkpoint do not exists: \"{resume_path}\"")
raise FileNotFoundError
return model
def multi_apply(func, *args, **kwargs):
pfunc = partial(func, **kwargs) if kwargs else func
map_results = map(pfunc, *args)
return tuple(map(list, zip(*map_results)))
def naive_np_nms(dets, thresh):
"""Pure Python NMS baseline."""
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = x1.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1]
return dets[keep]
def write_dict_to_json(mydict, f_path):
import json
import numpy
class DateEnconding(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, (numpy.int_, numpy.intc, numpy.intp, numpy.int8,
numpy.int16, numpy.int32, numpy.int64, numpy.uint8,
numpy.uint16,numpy.uint32, numpy.uint64)):
return int(obj)
elif isinstance(obj, (numpy.float_, numpy.float16, numpy.float32,
numpy.float64)):
return float(obj)
elif isinstance(obj, (numpy.ndarray,)): # add this line
return obj.tolist() # add this line
return json.JSONEncoder.default(self, obj)
with open(f_path, 'w') as f:
json.dump(mydict, f, cls=DateEnconding)
print("write down det dict to %s!" %(f_path))
| StarcoderdataPython |
4827504 | import re
from datetime import datetime
from unittest import TestCase
from difflib import SequenceMatcher
import logging
from kqml.kqml_performative import KQMLPerformative
logging.basicConfig(format='%(levelname)s: %(name)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger('integration_tests')
try:
from colorama.ansi import Back, Style, Fore
except ImportError:
logger.warning('Will not be able to mark diffs with color.')
# Create dummies
class DummyColorer(object):
def __getattribute__(self, *args, **kwargs):
return ""
Back = DummyColorer()
Style = DummyColorer()
Fore = DummyColorer()
def color_diff(expected, received):
"""Show in color the change in a string compaired to another."""
sm = SequenceMatcher(None, received, expected)
output = []
for opcode, a0, a1, b0, b1 in sm.get_opcodes():
if opcode == 'equal':
output.append(sm.a[a0:a1])
elif opcode == 'insert':
output.append(Back.RED + sm.b[b0:b1] + Style.RESET_ALL)
elif opcode == 'delete':
output.append(Back.GREEN + sm.a[a0:a1] + Style.RESET_ALL)
elif opcode == 'replace':
output.append(Back.BLUE + sm.a[a0:a1] + Back.RESET +
Fore.BLUE + sm.b[b0:b1] + Style.RESET_ALL)
else:
raise Exception('unexpected opcode')
return ''.join(output)
def define_in_child(s):
return "Define %s in the child!" % s
class _IntegrationTest(TestCase):
"""An abstract class for running a series of requests to the bioagent.
Much of the functionality of bioagents comes with their ability to
respond to messages they receive. This is a template for tests that
verify bioagents respond correctly to various messages.
Class Attributes:
----------
message_funcs: (list) A list of the message names to be sent. This can be
used to set the order of execution, which is otherwise alphabetical.
Methods:
-------
_get_messages: creates a generator that iterates over the message methods
given by message_funcs, or else all methods with the `create_` prefix.
run_test: runs the test.
Special Methods in Children:
---------------------------
create_<message_func_name>: These functions must take no inputs, and return
a kqml msg and content to be input into the receive_request function of
a bioagent.
check_response_to_<message_func_name>: These functions contain asserts, and
any other similar means of checking the result of the corresponding
call to the bioagent, as per usual nosetest procedures.
Example:
-------
As an example, a test sending two message to the bioagent, labeled prep and
run, then checking the result of the run, may be written as follows:
```
>> class TestFoo(_IntegrationTest):
>> message_funcs = ['prep', 'run']
>>
>> def create_prep(self):
>> "Creates the kqml message (msg) and content (content) for prep."
>> ...
>> return msg, content
>>
>> def create_run(self):
>> "Creates the kqml message and content for runing something."
>> ...
>> return msg, content
>>
>> def check_response_to_run(self, output):
>> "Checks that the output from the run is valid."
>> ...
```
This defines a test that sends a prep message, then a run message, and
checks the result of the run message to determine the status of the test.
Note that the prefixes are required for the methods to be found and used.
Note also that unless `message_funcs` is defined, the messages will be sent
in alphabetical order by default. Last of all, note that the `create_`
methods must have no inputs (besides self), and the `check_response_to_`
methods must have one input, which will be the output content of the
`receive_request` call.
Single requests may also be made without any difficulty or altering of the
above paradigm. Note that message_funcs would not be needed in such cases.
"""
message_funcs = []
timeout = 40 # seconds
def __init__(self, bioagent, **kwargs):
self.bioagent = bioagent(testing=True, **kwargs)
self.this_test_log_start = None
TestCase.__init__(self, 'run_test')
return
def __getattribute__(self, attr_name):
"Ensure that all attributes are implemented."
attr = TestCase.__getattribute__(self, attr_name)
if attr is NotImplemented:
raise NotImplementedError(define_in_child(attr_name))
return attr
def _get_method_dict(self, prefix=''):
"""Get a dict of methods with the given prefix string."""
# We need to walk up the parental tree to get all relevant methods.
# Note: the particular way the dicts are combined preserves parent
# child priority, namely that a child method should always take
# priority over like-named parent method.
full_dict = {}
current_class = self.__class__
while issubclass(current_class, _IntegrationTest) and \
current_class is not _IntegrationTest:
full_dict = dict([
(name, attr)
for name, attr in current_class.__dict__.items()
if not name.startswith('__')
] + list(full_dict.items()))
current_class = current_class.__base__
# Create the method dict.
method_dict = {
name[len(prefix):]: attr
for name, attr in full_dict.items()
if callable(attr) and name.startswith(prefix)
}
return method_dict
def _get_messages(self):
"""Get a generator iterating over the methods to send messages.
Yields:
------
request_args: (tuple) arguements to be passed to `receive_request`.
check_func: (callable) a function used to check the result of a
request, or else None, if no such check is to be made.
"""
send_dict = self._get_method_dict('create_')
check_dict = self._get_method_dict('check_response_to_')
if not self.message_funcs:
msg_list = sorted(send_dict.keys())
else:
msg_list = self.message_funcs[:]
assert len(msg_list), \
"No messages found to test, likely error in def of test."
for msg in msg_list:
yield send_dict[msg](self), check_dict.get(msg)
def get_output_log(self, start_line=0, end_line=None, get_full_log=False):
"""Get the messages sent by the bioagent."""
buff = self.bioagent.out
cur_pos = buff.tell()
if get_full_log:
buff.seek(0)
elif self.this_test_log_start is not None:
buff.seek(self.this_test_log_start)
else:
return []
out_lines = re.findall('^(\(.*?\))$', buff.read().decode(),
re.MULTILINE | re.DOTALL)
out_msgs = [KQMLPerformative.from_string(line) for line in out_lines]
buff.seek(cur_pos)
return out_msgs[start_line:end_line]
def setUp(self):
"""Set the start of the logs"""
self.this_test_log_start = self.bioagent.out.tell()
logger.debug("Set log start to: %d" % self.this_test_log_start)
def run_test(self):
for request_args, check_resp in self._get_messages():
start = datetime.now()
self.bioagent.receive_request(*request_args)
end = datetime.now()
dt = end - start
assert dt.total_seconds() < self.timeout, \
("Task took too long (%.2f > %d seconds). BA would have "
"timed out." % (dt.total_seconds(), self.timeout))
output_log = self.get_output_log()
latest_log = output_log[-1]
output = latest_log.get('content')
if check_resp is not None:
check_resp(self, output)
return
def tearDown(self):
"""Unset the start of the log."""
self.this_test_log_start = None
class _StringCompareTest(_IntegrationTest):
"""Integration test in which the expected result is a verbatim string."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.expected = NotImplemented
def create_message(self):
raise NotImplementedError(define_in_child("the message constructor"))
def check_response_to_message(self, output):
output_str = output.to_string()
assert output_str == self.expected, (
'Did not get the expected output string:\n'
+ 'Expected: %s\n' % self.expected
+ 'Received: %s\n' % output.to_string()
+ 'Diff: %s\n' % color_diff(self.expected, output.to_string())
)
class _FailureTest(_IntegrationTest):
"""Integration test in which the expected result is a failure."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.expected_reason = NotImplemented
def check_response_to_message(self, output):
assert output.head() == 'FAILURE', 'Head is not FAILURE: %s' % output
reason = output.gets('reason')
assert reason == self.expected_reason, \
'Reason mismatch: %s instead of %s' % \
(reason, self.expected_reason)
| StarcoderdataPython |
51999 | <reponame>GYosifov88/Python-Basics<gh_stars>0
days = int(input())
type_of_room = input()
feedback = input()
price = 0
nights = days - 1
if type_of_room == 'room for one person':
price = 18
cost = nights * price
elif type_of_room == 'apartment':
price = 25
cost = nights * price
if days < 10:
cost = cost - (cost * 30 /100)
elif 10 <= days <= 15:
cost = cost - (cost * 35 / 100)
elif days > 15:
cost = cost - (cost * 50 / 100)
elif type_of_room == 'president apartment':
price = 35
cost = nights * price
if days < 10:
cost = cost - (cost * 10 /100)
elif 10 <= days <= 15:
cost = cost - (cost * 15 / 100)
elif days > 15:
cost = cost - (cost * 20 / 100)
if feedback == 'positive':
cost = cost + (cost * 25 /100)
elif feedback == 'negative':
cost = cost - (cost * 10 / 100)
print (f'{cost:.2f}') | StarcoderdataPython |
187763 | <gh_stars>0
from cipher_xs2423 import cipher_xs2423
import pytest
def cipher(text, shift, encrypt=True):
alphabet = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
new_text = ''
assert isinstance(shift, int), 'The shift parameter should be an integer'
for c in text:
index = alphabet.find(c)
if index == -1:
new_text += c
else:
new_index = index + shift if encrypt == True else index - shift
new_index %= len(alphabet)
new_text += alphabet[new_index:new_index+1]
return new_text
def test_cipher_single():
example1 = 'selina'
expected1 = 'tfmjob'
actual1 = cipher(example1, 1)
assert actual1 == expected1,'Cipher function is not working as expected for single words'
def test_cipher_negative():
example2 = 'routine'
expected2 = 'qntshmd'
actual2 = cipher(example2, -1)
assert actual2 == expected2,'Cipher function is not working as expected for negative shifts'
def test_cipher_nonalphabet():
example3 = 'selina!'
expected = 'tfmjob!'
actual = cipher(example3, 1)
assert actual == expected,'Cipher function is not working as expected for non-alphabetical symbols'
def test_cipher_shift():
with pytest.raises(AssertionError):
cipher('Selina','two',encrypt = True)
| StarcoderdataPython |
150841 | class PageEffectiveImageMixin(object):
def get_effective_image(self):
if self.image:
return self.image
page = self.get_main_language_page()
if page.specific.image:
return page.specific.get_effective_image()
return ''
| StarcoderdataPython |
199175 | <filename>tests/async/test_listeners.py
# Copyright (c) Microsoft Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
async def test_listeners(page, server):
log = []
def print_response(response):
log.append(response)
page.on("response", print_response)
await page.goto(f"{server.PREFIX}/input/textarea.html")
assert len(log) > 0
page.remove_listener("response", print_response)
log = []
await page.goto(f"{server.PREFIX}/input/textarea.html")
assert len(log) == 0
| StarcoderdataPython |
3350815 | from decimal import Decimal
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect, JsonResponse
from django.views.generic.base import View
from django.conf import settings
from django.urls import reverse_lazy
from django.contrib import messages
from django.db import transaction
from django.forms import formset_factory
from books.models import Account, AccountGroup, DeclaredSource, SingleEntry
from books.forms import NewSourceForm, SourceDeclarationForm, InitializeIntegratedAccountForm
from books.utils import register_new_account, chart_of_accounts_setup
class InitializeIntegratedAccountView(View):
template_name = 'books/dashboard/initialize_integrated_acc.html'
def get(self, request, *args, **kwargs):
form = InitializeIntegratedAccountForm()
ctx = {'username':request.user.username,'form':form}
return render(request, self.template_name, ctx)
def post(self, request, *args, **kwargs):
ctx = {}
with transaction.atomic():
acc = register_new_account(user = request.user,
name = request.user.username)
messages.add_message(request, messages.SUCCESS, "Accounts initialized.")
return HttpResponseRedirect(reverse_lazy('admin:index'))
class DeclareSourcesView(View):
form_class = None
root_account_group = ''
prev_url = ''
this_url = ''
next_url = ''
end_url = ''
template_name = 'books/declare_sources.html'
def _existing_delarations(self, system_account):
table = []
debit_total = Decimal('0.00')
credit_total = Decimal('0.00')
for source in DeclaredSource.objects.filter(
system_account = system_account).order_by('date'):
row_cells = [source.date, "{} ({})".format(source.account.short_name,source.account.account_group.short_name),
source.details]
if source.debit:
debit_total += source.debit
value_cells = [source.debit, '-']
else:
credit_total += source.credit
value_cells = ['-', source.credit]
row_cells.extend(value_cells)
table.append(row_cells)
table.append(['', '', '', debit_total, credit_total])
return table
def get(self, request, *args, **kwargs):
new_source_form = NewSourceForm(self.root_account_group,
request.user.account)
source_declaration_form = SourceDeclarationForm(self.root_account_group,
request.user.account)
existing_declarations = self._existing_delarations(request.user.account)
if self.next_url:
next_url = reverse_lazy(self.next_url)
else:
next_url = None
if self.prev_url:
prev_url = reverse_lazy(self.prev_url)
else:
prev_url = None
if self.end_url:
end_url = reverse_lazy(self.end_url)
else:
end_url = None
ctx = {'new_source_form':new_source_form,
'root_account_group':self.root_account_group,
'source_declaration_form':source_declaration_form,
'existing_declarations':existing_declarations,
'next_url': next_url, 'prev_url':prev_url,
'end_url':end_url, 'show_new_source_form':False}
return render(request, self.template_name, ctx)
def post(self, request, *args, **kwargs):
#Setup both forms in their default state
ctx = {'show_new_source_form':False}
new_source_form = NewSourceForm(self.root_account_group,
request.user.account)
source_declaration_form = SourceDeclarationForm(self.root_account_group,
request.user.account)
existing_declarations = self._existing_delarations(request.user.account)
if self.next_url:
next_url = reverse_lazy(self.next_url)
else:
next_url = None
if self.prev_url:
prev_url = reverse_lazy(self.prev_url)
else:
prev_url = None
if self.end_url:
end_url = reverse_lazy(self.end_url)
else:
end_url = None
#Determine what to do based on which submit button on the page was clicked
if 'new_declaration' in request.POST:
source_declaration_form = SourceDeclarationForm(self.root_account_group,
request.user.account, request.POST)
if source_declaration_form.is_valid():
declaration = source_declaration_form.save(commit = False)
declaration.system_account = request.user.account
declaration.save()
messages.add_message(request, messages.SUCCESS, "Success")
return HttpResponseRedirect(reverse_lazy(self.this_url))
if 'new_source' in request.POST:
new_source_form = NewSourceForm(self.root_account_group,
request.user.account, request.POST)
if new_source_form.is_valid():
source = new_source_form.save(commit = False)
if source.parent:
source.account_group = source.parent.account_group
source.system_account = request.user.account
source.save()
messages.add_message(request, messages.SUCCESS, "Success")
return HttpResponseRedirect(reverse_lazy(self.this_url))
ctx.update({'show_new_source_form':True})
ctx.update({'new_source_form':new_source_form,
'root_account_group':self.root_account_group,
'source_declaration_form':source_declaration_form,
'existing_declarations':existing_declarations,
'next_url': next_url, 'prev_url':prev_url,
'end_url':end_url})
return render(request, self.template_name, ctx)
class CapitalSourcesView(DeclareSourcesView):
form_class = None
root_account_group = 'equity'
this_url = 'opexa_books:capital_sources'
next_url = 'opexa_books:liability_sources'
class LiabilitySourcesView(DeclareSourcesView):
form_class = None
root_account_group = 'liability'
prev_url = 'opexa_books:capital_sources'
this_url = 'opexa_books:liability_sources'
next_url = 'opexa_books:asset_sources'
class AssetsSourcesView(DeclareSourcesView):
form_class = None
root_account_group = 'assets'
prev_url = 'opexa_books:liability_sources'
this_url = 'opexa_books:asset_sources'
end_url = 'opexa_books:end_declarations'
def end_declarations_view(request):
system_account = request.user.account
declarations = DeclaredSource.objects.filter(system_account = system_account)
with transaction.atomic():
for d in declarations:
if d.is_debit:
action = 'D'
else:
action = 'C'
s_entry_dict = {'account':d.account, 'action':action,
'value':d.value, 'details':d.details, 'date':d.date,
'system_account':system_account}
SingleEntry.objects.create(**s_entry_dict)
d.delete()
request.user.account.initial_setup_done = True
messages.add_message(request, messages.SUCCESS, "Opening Financial Position Declared")
return HttpResponseRedirect(reverse_lazy('opexa_books:dashboard'))
| StarcoderdataPython |
3383234 | <filename>server/models.py
import sqlalchemy as sa
from sqlalchemy.ext.declarative import DeclarativeMeta, declarative_base
Base: DeclarativeMeta = declarative_base()
class Logger(Base):
__tablename__ = "loggers"
id = sa.Column(sa.Integer, sa.Sequence("logger_id_seq"), primary_key=True)
name = sa.Column(sa.UnicodeText, nullable=False, unique=True)
is_displayed = sa.Column(sa.Boolean, nullable=False, default=False)
class Log(Base):
__tablename__ = "logs"
id = sa.Column(sa.Integer, sa.Sequence("log_id_seq"), primary_key=True)
logger_id = sa.Column(sa.Integer, sa.ForeignKey(Logger.id, onupdate="cascade", ondelete="cascade"), nullable=False)
temperature_c = sa.Column(sa.Float, nullable=False)
humidity = sa.Column(sa.Float, nullable=False)
timestamp = sa.Column(sa.TIMESTAMP(timezone=True), nullable=False, default=sa.func.now())
| StarcoderdataPython |
165175 | # -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals, division
from eight import *
from .lca import LCA
from numpy.linalg import solve
class DenseLCA(LCA):
def solve_linear_system(self):
"""
Master solution function for linear system :math:`Ax=B`.
To most numerical analysts, matrix inversion is a sin.
-- <NAME>, Accuracy and Stability of Numerical Algorithms, Society for Industrial and Applied Mathematics, Philadelphia, PA, USA, 2002, p. 260.
We use `UMFpack <http://www.cise.ufl.edu/research/sparse/umfpack/>`_, which is a very fast solver for sparse matrices.
If the technosphere matrix has already been factorized, then the decomposed technosphere (``self.solver``) is reused. Otherwise the calculation is redone completely.
"""
return solve(self.technosphere_matrix.toarray(), self.demand_array)
| StarcoderdataPython |
3331490 | import hashlib
import os
import shutil
import subprocess
from django.views import generic
from django.template.response import TemplateResponse
from src.apps.main.forms import BinaryViewForm
class HomepageView(generic.FormView):
template_name = 'main/homepage.html'
form_class = BinaryViewForm
def form_valid(self, form):
COMMANDS = [
('Readelf: file header', 'file-header'),
('Readelf: program headers', 'program-headers'),
('LDD', 'ldd'),
('Sha512', 'sha512'),
]
input_data = self.request.FILES['binary'].read()
temporary_binary_path = os.path.join('/tmp/', hashlib.sha256(('makrela-' + form.cleaned_data['binary'].name).encode('utf-8')).hexdigest())
with open(temporary_binary_path, 'wb') as f:
f.write(input_data)
analysis_results = []
for header, command_name in COMMANDS:
analysis_results.append((
header,
subprocess.check_output([
os.path.join(os.path.dirname(__file__), '..', '..', 'analyze.sh'),
command_name,
temporary_binary_path,
])
))
return TemplateResponse(
self.request,
'main/homepage.html',
{'analysis_results': analysis_results}
)
| StarcoderdataPython |
16720 | """TrafficSignDataset dataset."""
from .TrafficSignsDataset import Trafficsignsdataset
| StarcoderdataPython |
81359 | <filename>cache_dependencies/mixins.py
try:
import _thread
except ImportError:
import thread as _thread # Python < 3.*
class ThreadSafeDecoratorMixIn(object):
def __init__(self, delegate):
self._delegate = delegate
self._thread_id = self._get_thread_id()
@staticmethod
def _get_thread_id():
return _thread.get_ident()
def _validate_thread_sharing(self):
if self._thread_id != self._get_thread_id():
raise RuntimeError(
"%s objects created in a "
"thread can only be used in that same thread. The object "
"with %s was created in thread id %s and this is "
"thread id %s."
% (self.__class__, id(self), self._thread_id, self._get_thread_id())
)
def __getattr__(self, name):
return getattr(self._delegate, name)
| StarcoderdataPython |
3371173 | <filename>sqswatcher/plugins/torque.py
# Copyright 2013-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the
# License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'dougalb'
import subprocess as sub
import os
import paramiko
import logging
import shlex
import time
import xml.etree.ElementTree as xmltree
import socket
log = logging.getLogger(__name__)
def __runCommand(command):
log.debug(repr(command))
_command = shlex.split(str(command))
log.debug(_command)
DEV_NULL = open(os.devnull, "rb")
try:
process = sub.Popen(_command, env=dict(os.environ), stdout=sub.PIPE, stderr=sub.STDOUT, stdin=DEV_NULL)
stdout = process.communicate()[0]
exitcode = process.poll()
if exitcode != 0:
log.error("Failed to run %s:\n%s" % (_command, stdout))
return stdout
finally:
DEV_NULL.close()
def isHostInitState(host_state):
# Node states http://docs.adaptivecomputing.com/torque/6-0-2/adminGuide/help.htm#topics/torque/8-resources/resources.htm#nodeStates
init_states = ("down", "offline", "unknown", str(None))
return str(host_state).startswith(init_states)
def wakeupSchedOn(hostname):
log.info('Waking up scheduler on host %s', hostname)
command = ("/opt/torque/bin/pbsnodes -x %s" % (hostname))
sleep_time = 3
times = 20
host_state = None
while isHostInitState(host_state) and times > 0:
output = __runCommand(command)
try:
# Ex.1: <Data><Node><name>ip-10-0-76-39</name><state>down,offline,MOM-list-not-sent</state><power_state>Running</power_state>
# <np>1</np><ntype>cluster</ntype><mom_service_port>15002</mom_service_port><mom_manager_port>15003</mom_manager_port></Node></Data>
# Ex 2: <Data><Node><name>ip-10-0-76-39</name><state>free</state><power_state>Running</power_state><np>1</np><ntype>cluster</ntype>
# <status>rectime=1527799181,macaddr=02:e4:00:b0:b1:72,cpuclock=Fixed,varattr=,jobs=,state=free,netload=210647044,gres=,loadave=0.00,
# ncpus=1,physmem=1017208kb,availmem=753728kb,totmem=1017208kb,idletime=856,nusers=1,nsessions=1,sessions=19698,
# uname=Linux ip-10-0-76-39 4.9.75-25.55.amzn1.x86_64 #1 SMP Fri Jan 5 23:50:27 UTC 2018 x86_64,opsys=linux</status>
# <mom_service_port>15002</mom_service_port><mom_manager_port>15003</mom_manager_port></Node></Data>
xmlnode = xmltree.XML(output)
host_state = xmlnode.findtext("./Node/state")
except:
log.error("Error parsing XML from %s" % output)
if isHostInitState(host_state):
log.debug("Host %s is still in state %s" % (hostname, host_state))
time.sleep(sleep_time)
times -= 1
if host_state == "free":
command = "/opt/torque/bin/qmgr -c \"set server scheduling=true\""
__runCommand(command)
elif times == 0:
log.error("Host %s is still in state %s" % (hostname, host_state))
else:
log.debug("Host %s is in state %s" % (hostname, host_state))
def addHost(hostname,cluster_user,slots):
log.info('Adding %s', hostname)
command = ("/opt/torque/bin/qmgr -c 'create node %s np=%s'" % (hostname, slots))
__runCommand(command)
command = ('/opt/torque/bin/pbsnodes -c %s' % hostname)
__runCommand(command)
# Connect and hostkey
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
hosts_key_file = os.path.expanduser("~" + cluster_user) + '/.ssh/known_hosts'
user_key_file = os.path.expanduser("~" + cluster_user) + '/.ssh/id_rsa'
iter=0
connected=False
while iter < 3 and connected == False:
try:
log.info('Connecting to host: %s iter: %d' % (hostname, iter))
ssh.connect(hostname, username=cluster_user, key_filename=user_key_file)
connected=True
except socket.error, e:
log.info('Socket error: %s' % e)
time.sleep(10 + iter)
iter = iter + 1
if iter == 3:
log.info("Unable to provison host")
return
try:
ssh.load_host_keys(hosts_key_file)
except IOError:
ssh._host_keys_filename = None
pass
ssh.save_host_keys(hosts_key_file)
ssh.close()
wakeupSchedOn(hostname)
def removeHost(hostname, cluster_user):
log.info('Removing %s', hostname)
command = ('/opt/torque/bin/pbsnodes -o %s' % hostname)
__runCommand(command)
command = ("/opt/torque/bin/qmgr -c 'delete node %s'" % hostname)
__runCommand(command)
| StarcoderdataPython |
1645838 | <filename>src/learning/models/vae/unet_vae.py<gh_stars>10-100
""" Full assembly of the parts to form the complete network """
import numpy as np
from torch import nn
from typing import *
from .base_vae import BaseVAE
from ..unet.unet_parts import *
from src.dataloaders.dataloader_meta_info import DataloaderMetaInfo
from src.enums import *
from src.datasets.base_dataset import BaseDataset
from src.learning.loss.loss import kld_log_var_loss_fct, total_variation_loss_fct, masked_total_variation_loss_fct
class UNetVAE(BaseVAE):
def __init__(self, hidden_dims: List = None, bilinear=True, **kwargs):
super(UNetVAE, self).__init__(**kwargs)
self.bilinear = bilinear
self.hidden_dims = hidden_dims
if self.hidden_dims is None:
self.hidden_dims = [64, 128, 256, 512, 1024]
factor = 2 if bilinear else 1
encoder_layers = [DoubleConv(len(self.in_channels), self.hidden_dims[0])]
for in_idx, num_out_channels in enumerate(self.hidden_dims[1:]):
if (in_idx + 1) >= len(self.hidden_dims[1:]):
encoder_layers.append(Down(self.hidden_dims[in_idx], num_out_channels // factor))
else:
encoder_layers.append(Down(self.hidden_dims[in_idx], num_out_channels))
self.encoder = nn.Sequential(*encoder_layers)
# we send a sample input through the model to infer dynamically the needed size of the fc layers
sample_input = torch.zeros(size=(1, len(self.in_channels), self.input_dim[0], self.input_dim[1]))
sample_encodings = self.encode(sample_input)
sample_x_flat = torch.flatten(sample_encodings[-1], start_dim=1)
self.fc_mu = nn.Linear(sample_x_flat.size(1), self.latent_dim)
self.fc_var = nn.Linear(sample_x_flat.size(1), self.latent_dim)
self.fc_decoder_input = nn.Linear(self.latent_dim, sample_x_flat.size(1))
decoder_layers = []
reversed_hidden_dims = self.hidden_dims.copy()
reversed_hidden_dims.reverse()
for in_idx, num_out_channels in enumerate(reversed_hidden_dims[1:]):
if (in_idx + 1) >= len(reversed_hidden_dims[1:]):
decoder_layers.append(Up(reversed_hidden_dims[in_idx], num_out_channels, self.bilinear))
else:
decoder_layers.append(Up(reversed_hidden_dims[in_idx], num_out_channels // factor, self.bilinear))
decoder_layers.append(OutConv(reversed_hidden_dims[-1], len(self.out_channels)))
self.decoder = nn.Sequential(*decoder_layers)
self.feature_extractor = None
def forward(self, data: Dict[Union[str, ChannelEnum], torch.Tensor],
**kwargs) -> Dict[Union[ChannelEnum, str], torch.Tensor]:
input, norm_consts = self.assemble_input(data)
encodings = self.encode(input)
x = encodings[-1]
x_flat = torch.flatten(x, start_dim=1)
# Split the result into mu and var components
# of the latent Gaussian distribution
if self.fc_mu is None or self.fc_var is None or self.fc_decoder_input is None:
device, = list(set(p.device for p in self.parameters()))
self.fc_mu = nn.Linear(x_flat.size(1), self.latent_dim).to(device=device)
self.fc_var = nn.Linear(x_flat.size(1), self.latent_dim).to(device=device)
self.fc_decoder_input = nn.Linear(self.latent_dim, x_flat.size(1)).to(device=device)
mu = self.fc_mu(x_flat)
log_var = self.fc_var(x_flat)
if self.training:
z = self.reparameterize(mu, log_var)
else:
z = mu
x = self.fc_decoder_input(z)
x = x.view(encodings[-1].size(0), encodings[-1].size(1), encodings[-1].size(2), encodings[-1].size(3))
rec_dem = self.decode(x, encodings).squeeze(dim=1)
output = {ChannelEnum.REC_DEM: rec_dem, "mu": mu, "log_var": log_var}
if self.num_solutions > 1 and self.training is False:
dem_solutions = []
for i in range(self.num_solutions):
z = self.reparameterize(mu, log_var)
x = self.fc_decoder_input(z)
x = x.view(encodings[-1].size(0), encodings[-1].size(1), encodings[-1].size(2), encodings[-1].size(3))
x = self.decode(x, encodings).squeeze(dim=1)
dem_solutions.append(x)
dem_solutions = torch.stack(dem_solutions, dim=1)
model_uncertainty = torch.var(dem_solutions, dim=1)
output[ChannelEnum.REC_DEMS] = dem_solutions
output[ChannelEnum.MODEL_UM] = model_uncertainty
if self.use_mean_as_rec:
output[ChannelEnum.REC_DEM] = torch.mean(dem_solutions, dim=1)
output = self.denormalize_output(data, output, norm_consts)
return output
def encode(self, input: torch.Tensor) -> List[torch.Tensor]:
encodings = []
for encoding_idx, encoder_layer in enumerate(self.encoder):
if len(encodings) == 0:
encodings.append(encoder_layer(input))
else:
encodings.append(encoder_layer(encodings[-1]))
return encodings
def decode(self, input: torch.Tensor, encodings: List[torch.Tensor]) -> torch.Tensor:
reversed_encodings = encodings.copy()
reversed_encodings.reverse()
x = input
for decoding_idx, decoder_layer in enumerate(self.decoder):
if decoding_idx + 1 < len(self.decoder):
x = decoder_layer(x, reversed_encodings[decoding_idx+1])
else:
x = decoder_layer(x)
return x
def loss_function(self,
loss_config: dict,
output: Dict[Union[ChannelEnum, LossEnum, str], torch.Tensor],
data: Dict[ChannelEnum, torch.Tensor],
dataloader_meta_info: DataloaderMetaInfo = None,
**kwargs) -> dict:
loss_dict = self.eval_loss_function(loss_config=loss_config, output=output, data=data,
dataloader_meta_info=dataloader_meta_info,**kwargs)
if self.training:
weights = loss_config.get("train_weights", {})
reconstruction_weight = weights.get(LossEnum.MSE_REC_ALL.value, 0)
reconstruction_non_occlusion_weight = weights.get(LossEnum.MSE_REC_NOCC.value, 1)
reconstruction_occlusion_weight = weights.get(LossEnum.MSE_REC_OCC.value, 1)
perceptual_weight = weights.get(LossEnum.PERCEPTUAL.value, 0)
style_weight = weights.get(LossEnum.STYLE.value, 0)
total_variation_weight = weights.get(LossEnum.TV.value, 0)
# kld_weight: Account for the minibatch samples from the dataset
kld_weight = weights.get("kld", None)
if kld_weight is None:
kld_weight = data[ChannelEnum.GT_DEM].size(0) / dataloader_meta_info.length
if perceptual_weight > 0 or style_weight > 0:
artistic_loss = self.artistic_loss_function(loss_config=loss_config, output=output, data=data, **kwargs)
loss_dict.update(artistic_loss)
total_variation_loss = masked_total_variation_loss_fct(input=output[ChannelEnum.COMP_DEM],
mask=data[ChannelEnum.OCC_MASK])
kld_loss = kld_log_var_loss_fct(output["mu"], output["log_var"])
loss = reconstruction_weight * loss_dict[LossEnum.MSE_REC_ALL] \
+ reconstruction_non_occlusion_weight * loss_dict[LossEnum.MSE_REC_NOCC] \
+ reconstruction_occlusion_weight * loss_dict[LossEnum.MSE_REC_OCC] \
+ perceptual_weight * loss_dict.get(LossEnum.PERCEPTUAL, 0.) \
+ style_weight * loss_dict.get(LossEnum.STYLE, 0.) \
+ total_variation_weight * total_variation_loss \
+ kld_weight * kld_loss
loss_dict.update({LossEnum.LOSS: loss})
return loss_dict
else:
return loss_dict
| StarcoderdataPython |
3358877 | <reponame>tk1012/ion-kit
# https://github.com/fixstars/ion-csharp/blob/master/test/Test.cs
from ionpy import Node, Builder, Buffer, PortMap, Port, Param, Type, TypeCode
import numpy as np # TODO: rewrite with pure python
def test_all():
t = Type(code_=TypeCode.Int, bits_=32, lanes_=1)
input_port = Port(key='input', type=t, dim=2)
value41 = Param(key='v', val='41')
builder = Builder()
builder.set_target(target='host')
builder.with_bb_module(path='libion-bb-test.so')
# builder.with_bb_module(path='ion-bb-test.dll') # for Windows
node = builder.add('test_inc_i32x2').set_port(ports=[ input_port, ]).set_param(params=[ value41, ])
port_map = PortMap()
sizes = (4, 4)
ibuf = Buffer(type=t, sizes=sizes)
obuf = Buffer(type=t, sizes=sizes)
idata = np.full((4*4, ), fill_value=1, dtype=np.int32)
odata = np.full((4*4, ), fill_value=0, dtype=np.int32)
idata_bytes = idata.tobytes(order='C')
odata_bytes = odata.tobytes(order='C')
ibuf.write(data=idata_bytes)
obuf.write(data=odata_bytes)
port_map.set_buffer(port=input_port, buffer=ibuf)
port_map.set_buffer(port=node.get_port(key='output'), buffer=obuf)
builder.run(port_map=port_map)
obuf_bytes = obuf.read(num_data_bytes=len(odata_bytes))
odata = np.frombuffer(obuf_bytes, dtype=np.int32)
for i in range(4*4):
assert odata[i] == 42
| StarcoderdataPython |
14782 | import zeep
import asyncio, sys
from onvif import ONVIFCamera
import cv2
import numpy as np
import urllib
from urllib.request import urlopen
IP="192.168.2.22" # Camera IP address
PORT=80 # Port
USER="admin" # Username
PASS="<PASSWORD>" # Password
XMAX = 1
XMIN = -1
YMAX = 1
YMIN = -1
moverequest = None
ptz = None
active = False
def zeep_pythonvalue(self, xmlvalue):
return xmlvalue
zeep.xsd.simple.AnySimpleType.pythonvalue = zeep_pythonvalue
def setup_move():
mycam = ONVIFCamera(IP, PORT, USER, PASS)
# Create media service object
media = mycam.create_media_service()
print("setup_move {} {}", mycam, media)
# Create ptz service object
global ptz
ptz = mycam.create_ptz_service()
# Get target profile
media_profile = media.GetProfiles()[0]
profileToken = media_profile.token
# Get presets
print("Get Presets...")
gp = ptz.create_type('GetPresets')
gp.ProfileToken = profileToken
presets = ptz.GetPresets(gp)
for preset in presets:
if (hasattr(preset, "Name")):
name = preset.Name
else:
name = ""
position = preset['PTZPosition']
print("preset {} => ({}, {}, {})".format(name, position.PanTilt.x,
position.PanTilt.y,
position.Zoom.x))
# GetStatus
print("GetStatus")
status = ptz.GetStatus({'ProfileToken': profileToken})
print('status {} {} {} ? => {}'.format(status.Position.PanTilt.x, status.Position.PanTilt.y,
status.Position.Zoom.x,
status.MoveStatus.PanTilt))
# abMove = ptz.create_type('AbsoluteMove')
# abMove.ProfileToken = profileToken
# print('status {} {} {} {}'.format(status.Position.PanTilt.x, status.Position.PanTilt.y,
# status.Velocity.PanTilt.x, status.Velocity.PanTilt.y))
return
# Get PTZ configuration options for getting continuous move range
request = ptz.create_type('GetConfigurationOptions')
request.ConfigurationToken = media_profile.PTZConfiguration.token
ptz_configuration_options = ptz.GetConfigurationOptions(request)
global moverequest
moverequest = ptz.create_type('ContinuousMove')
moverequest.ProfileToken = media_profile.token
if moverequest.Velocity is None:
moverequest.Velocity = ptz.GetStatus({'ProfileToken': media_profile.token}).Position
# Get range of pan and tilt
# NOTE: X and Y are velocity vector
# global XMAX, XMIN, YMAX, YMIN
# XMAX = ptz_configuration_options.Spaces.ContinuousPanTiltVelocitySpace[0].XRange.Max
# XMIN = ptz_configuration_options.Spaces.ContinuousPanTiltVelocitySpace[0].XRange.Min
# YMAX = ptz_configuration_options.Spaces.ContinuousPanTiltVelocitySpace[0].YRange.Max
# YMIN = ptz_configuration_options.Spaces.ContinuousPanTiltVelocitySpace[0].YRange.Min
def url_to_image(url):
# password_mgr = urllib.request.HTTPPasswordMgrWithDefaultRealm()
# password_mgr.add_password(None, url, USER, PASS)
# handler = urllib.request.HTTPBasicAuthHandler(password_mgr)
# opener = urllib.request.build_opener(handler)
# urllib.request.install_opener(opener)
# resp = urlopen(url)
import requests
from requests.auth import HTTPDigestAuth
resp = requests.get(url, auth=HTTPDigestAuth(USER, PASS))
if resp.status_code == 200:
image = np.asarray(bytearray(resp.content), dtype="uint8")
image2 = cv2.imdecode(image, cv2.IMREAD_COLOR)
cv2.imshow('image', image2)
return image
else:
return None
class CameraController:
presets = []
status = None
def get_current_preset(self):
mycam = ONVIFCamera(IP, PORT, USER, PASS, '../wsdl/')
# Create media service object
media = mycam.create_media_service()
print("setup_move {} {}", mycam, media)
# Create ptz service object
ptz = mycam.create_ptz_service()
# Get target profile
media_profile = media.GetProfiles()[0]
profileToken = media_profile.token
# GetStatus
print("GetStatus")
self.status = ptz.GetStatus({'ProfileToken': profileToken})
print('status {} {} {} ? => {}'.format(self.status.Position.PanTilt.x, self.status.Position.PanTilt.y,
self.status.Position.Zoom.x,
self.status.MoveStatus.PanTilt))
min_dist = 100
current_prest = None
for preset in self.presets:
position = preset['PTZPosition']
dist = pow((self.status.Position.PanTilt.x - position.PanTilt.x), 2) + pow((self.status.Position.PanTilt.y - position.PanTilt.y), 2)
if dist < min_dist:
min_dist = dist
current_prest = preset
snapshot = media.GetSnapshotUri({'ProfileToken': profileToken})
print('snapshot uri {}'.format(snapshot))
# image = io.imread(snapshot)
# n_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# cv2.imwrite('./image1.jpg', n_image)
image = url_to_image(snapshot.Uri)
cv2.imwrite('./image2.jpg', image)
return current_prest, self.status.MoveStatus.PanTilt, snapshot
def get_presets(self):
mycam = ONVIFCamera(IP, PORT, USER, PASS, '../wsdl/')
# Create media service object
media = mycam.create_media_service()
print("setup_move {} {}", mycam, media)
# Create ptz service object
ptz = mycam.create_ptz_service()
# Get target profile
media_profile = media.GetProfiles()[0]
profileToken = media_profile.token
# Get presets
print("Get Presets...")
gp = ptz.create_type('GetPresets')
gp.ProfileToken = profileToken
self.presets = ptz.GetPresets(gp)
for preset in self.presets:
if (hasattr(preset, "Name")):
name = preset.Name
else:
name = ""
position = preset['PTZPosition']
print("preset {} => ({}, {}, {})".format(name, position.PanTilt.x,
position.PanTilt.y,
position.Zoom.x))
return self.presets
if __name__ == '__main__':
# url_to_image('http://192.168.1.108/onvifsnapshot/media_service/snapshot?channel=1&subtype=0')
# setup_move()
camera = CameraController()
camera.get_presets()
camera.get_current_preset() | StarcoderdataPython |
1792748 | <filename>frontend/sphinx/tests/test_button.py
import unittest
from widget.button import Button, button_dictionary
class TestButton(unittest.TestCase):
def test_constructor(self):
button = Button("run_button")
self.assertEqual(button.name, button_dictionary["run_button"]["name"], msg="button name != dictionary lookup")
self.assertEqual(button.title, button_dictionary["run_button"]["title"], msg="button title != dictionary lookup")
self.assertEqual(button.mode, button_dictionary["run_button"]["mode"], msg="button mode != dictionary lookup")
if __name__ == '__main__':
unittest.main() | StarcoderdataPython |
1608057 | <filename>aliyun-python-sdk-mts/aliyunsdkmts/__init__.py
__version__ = "2.6.1" | StarcoderdataPython |
25901 | import sys
import csv
import glob
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("lp_files", help="The label pair need to be converted")
parser.add_argument("dir2pubmed", help="The directory to the place for pubmed articles")
parser.add_argument('out_path', help="The path to store finaltraining data")
args = parser.parse_args()
def main():
lp_files = sorted(glob.glob(args.lp_files))
outpath = args.out_path
pubids = []
for lp_file in lp_files:
lp = open(lp_file)
print(lp_file)
print(lp_file[lp_file.find('labelPair'):])
out_file = open(outpath + lp_file[9+lp_file.find('labelPair'):]+'.csv', 'w')
writer = csv.writer(out_file, delimiter='\t')
for line in lp.readlines():
line = line.strip('\n').split('\t')
gene1, gene2, rel, pubmedid = line[0], line[1], line[2], line[3]
tep = open(args.dir2pubmed+pubmedid).readline().strip('\n').split()
if pubmedid not in pubids:
pubids.append(pubmedid)
gene1_occ = [i for i, w in enumerate(tep)
if w == gene1]
gene1_start = ":".join([str(i) for i in gene1_occ])
gene1_end = ":".join([str(i+1) for i in gene1_occ])
gene2_occ = [i for i, w in enumerate(open(args.dir2pubmed+pubmedid).readline().strip('\n').split()) if w == gene2]
gene2_start = ":".join([str(i) for i in gene2_occ])
gene2_end = ":".join([str(i+1) for i in gene2_occ])
text = open(args.dir2pubmed+pubmedid).readline().strip('\n')
writer.writerow([gene1, 'Gene', gene1, gene1_start, gene1_end,
gene2, 'Gene', gene2, gene2_start, gene2_end,
pubmedid, rel, text])
out_file.close()
cancerlist = [line.strip('\n').split('_') for line in open(args.dir2pubmed+'rel.txt').readlines()]
genelist = [line.strip('\n').split('\t')[0] for line in open(args.dir2pubmed+'gene_list.txt').readlines()]
#print(genelist)
if 'Train' in args.lp_files:
nerfile = open(outpath + 'ner_train.txt', 'w')
else:
nerfile = open(outpath + 'ner_test.txt', 'w')
for pubid in pubids:
content = open(args.dir2pubmed+pubid).readline().strip('\n').split()
for widx, w in enumerate(content):
if w in genelist:
nerfile.write('%s\t%s\t%s\t%s\n' % (w,'B-GENE',str(genelist.index(w)),pubid))
else:
nerfile.write('%s\t%s\t%s\t%s\n' % (w, 'O', '-1', pubid))
nerfile.write('\n')
nerfile.close()
if __name__ == '__main__':
main()
| StarcoderdataPython |
4834263 | # Ejemplo de como manejar excepciones (Exception handling)
# Definimos una lista
a = [1,2]
try:
print(a[2]) #Tratamos de imprimir un indice que no existe!
except IndexError:
print("The requested index is not available. The highest available index is %s" % (len(a)-1))
except:
print("An unknown error occured")
finally:
print("The full array is: ",a)
print("============================================")
try:
year_of_birth = int(input("What is your year of birth?\n"))
age = 2019 - year_of_birth
print("You turn %s years this year" % age)
except ValueError:
print("ERROR: it seems that you did not enter a numerical value")
print("I abort")
| StarcoderdataPython |
1638111 | # -*- coding: utf-8 -*-
############################################################################
#
# Copyright © 2013, 2015 OnlineGroups.net and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
############################################################################
import sqlalchemy as sa
from datetime import datetime
from zope.sqlalchemy import mark_changed
from gs.database import getSession, getTable
class TopicQuery(object):
def __init__(self, context=None):
self.topicTable = getTable('topic')
self.topicKeywordsTable = getTable('topic_keywords')
self.postTable = getTable('post')
def topic_hidden(self, postId):
s1 = sa.select([self.postTable.c.topic_id])
s1.append_whereclause(self.postTable.c.post_id == postId)
s1.alias('ss')
s2 = sa.select(self.topicTable.hidden)
s2.append_whereclause(ss.c.topic_id) # lint:ok
session = getSession()
r = session.execute(s2)
x = r.fetchone()
retval = bool(x['hidden'])
return retval
def topic_sticky(self, topicId):
s = sa.select([self.topicTable.c.sticky])
s.append_whereclause(self.topicTable.c.topic_id == topicId)
session = getSession()
r = session.execute(s)
x = r.fetchone()
retval = bool(x['sticky'])
return retval
def set_sticky(self, topicId, sticky):
session = getSession()
tt = self.topicTable
u = tt.update(tt.c.topic_id == topicId)
if sticky:
v = datetime.utcnow()
else:
v = None
d = {'sticky': v}
session.execute(u, params=d)
mark_changed(session)
def topic_keywords(self, topicId):
tkt = self.topicKeywordsTable
s = tkt.select()
s.append_whereclause(tkt.c.topic_id == topicId)
session = getSession()
r = session.execute(s)
retval = []
if r.rowcount:
x = r.fetchone()
retval = x['keywords']
return retval
| StarcoderdataPython |
3327007 | # Generated by Django 3.2.8 on 2021-11-04 16:06
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('vault', '0006_auto_20211103_1452'),
]
operations = [
migrations.RenameField(
model_name='keys',
old_name='pwd',
new_name='enc_pwd',
),
]
| StarcoderdataPython |
1622561 | <reponame>Clinical-Genomics/scout<filename>scout/commands/load/exons.py
import logging
from datetime import datetime
from pprint import pprint as pp
import click
from flask.cli import with_appcontext
from scout.load import load_exons
from scout.server.extensions import store
from scout.utils.handle import get_file_handle
from scout.utils.scout_requests import fetch_ensembl_exons
LOG = logging.getLogger(__name__)
@click.command("exons", short_help="Load exons")
@click.option(
"-e",
"--exons-file",
type=click.Path(exists=True),
help="Path to file with ensembl exons",
)
@click.option("-b", "--build", type=click.Choice(["37", "38"]), default="37", show_default=True)
@with_appcontext
def exons(build, exons_file):
"""Load exons into the scout database. If no file, fetch exons from ensembl biomart"""
adapter = store
LOG.info("Running scout load exons")
start = datetime.now()
# Test if there are any exons loaded
existing_exon = adapter.exon(build=build)
if existing_exon:
LOG.warning("Dropping all exons ")
adapter.drop_exons(build=build)
LOG.info("Exons dropped")
# Load the exons
nr_exons = 0
if exons_file:
ensembl_exons = get_file_handle(exons_file)
for nr_exons, line in enumerate(ensembl_exons, 1):
pass
ensembl_exons = get_file_handle(exons_file)
else:
ensembl_exons = fetch_ensembl_exons(build=build)
nr_exons = 1360000
try:
load_exons(adapter, ensembl_exons, build, nr_exons=nr_exons)
except Exception as err:
LOG.warning("Something went wrong with ensembl biomart")
# LOG.info("Try to fetch one chromosome at the time")
LOG.info("Please download a mart dump manually, see instructions in user guide for admins")
return
LOG.info("Time to load exons: {0}".format(datetime.now() - start))
| StarcoderdataPython |
116956 | from discord.ext import commands
from backup_bot.logger import logger
from os.path import isdir
from os import mkdir
import shelve
from datetime import datetime
from discord import File, Embed
from collections import OrderedDict
extension_name = "backup"
logger = logger.getChild(extension_name)
@commands.command("backup")
async def backup_cmd(ctx: commands.Context):
embed = Embed(title="Backup", description="In progress... \N{hourglass}")
msg = await ctx.send(embed=embed)
file_name = f"backup/{datetime.now().strftime('%d-%m-%Y %H:%M')}"
with shelve.open(file_name, writeback=True) as file:
file["channels"] = OrderedDict()
file["users"] = OrderedDict()
file["categories"] = OrderedDict()
for c in ctx.guild.text_channels:
embed_field_name = c.name
if c.category:
embed_field_name = f"{c.category} > {embed_field_name}"
if c.category_id not in file["categories"]:
file["categories"][c.category_id] = {"name": c.category.name,
"position": c.category.position,
"nsfw": c.category.is_nsfw()}
embed = msg.embeds[0]
if len(embed.fields) != 0:
embed.set_field_at(-1, name=embed.fields[-1].name, value="\N{check mark}", inline=False)
embed.add_field(name=embed_field_name, value="\N{hourglass}", inline=False)
await msg.edit(embed=embed)
file["channels"][c.id] = {"name": c.name,
"id": c.id,
"category_id": c.category_id,
"topic": c.topic,
"position": c.position,
"slowmode_delay": c.slowmode_delay,
"nsfw": c.is_nsfw(),
"messages": []}
async for m in c.history(limit=None):
if m.author.id not in file["users"]:
file["users"][m.author.id] = {"name": m.author.name,
"discriminator": m.author.discriminator,
"display_name": m.author.display_name,
"avatar": m.author.avatar}
file["channels"][c.id]["messages"].append({"author_id": m.author.id,
"content": m.content,
"embeds": m.embeds,
# "attachments": m.attachments,
"pinned": m.pinned,
"reactions": m.reactions,
"created_at": m.created_at,
"edited_at": m.edited_at})
embed = msg.embeds[0]
embed.set_field_at(-1, name=embed.fields[-1].name, value="\N{check mark}", inline=False)
embed.description = "Finish ! \N{check mark}"
await msg.edit(embed=embed)
await ctx.send(file=File(file_name + ".db", "backup.db"))
def setup(bot: commands.Bot):
logger.info(f"Loading...")
if not isdir("backup"):
logger.info(f"Create backup folder")
mkdir("backup")
try:
bot.add_command(backup_cmd)
except Exception as e:
logger.error(f"Error loading: {e}")
else:
logger.info(f"Load successful")
def teardown(bot: commands.Bot):
logger.info(f"Unloading...")
try:
bot.remove_command("backup")
except Exception as e:
logger.error(f"Error unloading: {e}")
else:
logger.info(f"Unload successful")
| StarcoderdataPython |
3241093 | <reponame>trisadmeslek/V-Sekai-Blender-tools
preview_verts = None
preview_uvs = None
preview_is_quads = False
def set_preview_data(verts, uvs, is_quads=True):
"""
Set the preview data for SprytileGUI to draw
:param verts:
:param uvs:
:param is_quads:
:return:
"""
global preview_verts, preview_uvs, preview_is_quads
preview_verts = verts
preview_uvs = uvs
preview_is_quads = is_quads
def clear_preview_data():
global preview_verts, preview_uvs, preview_is_quads
preview_verts = None
preview_uvs = None
preview_is_quads = True | StarcoderdataPython |
1768485 | # Generated by Django 2.1.1 on 2018-09-03 16:11
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='DatasetModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('timestamp', models.DateTimeField(default=django.utils.timezone.now)),
],
options={
'verbose_name': 'Dataset',
'verbose_name_plural': 'Datasets',
},
),
migrations.CreateModel(
name='EntryModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('coordinates', django.contrib.postgres.fields.jsonb.JSONField()),
('data', django.contrib.postgres.fields.jsonb.JSONField(blank=True, default=None, null=True)),
('dataset', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='entries', to='hydra_datastore.DatasetModel')),
],
options={
'verbose_name': 'Entry',
'verbose_name_plural': 'Entries',
},
),
migrations.CreateModel(
name='SchemaModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', django.contrib.postgres.fields.jsonb.JSONField()),
],
),
migrations.AddField(
model_name='datasetmodel',
name='schema',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='datasets', to='hydra_datastore.SchemaModel'),
),
]
| StarcoderdataPython |
157700 | <gh_stars>10-100
"""
This code enables geolocation of the ISS LIS background datasets
http://dx.doi.org/10.5067/LIS/ISSLIS/DATA206
http://dx.doi.org/10.5067/LIS/ISSLIS/DATA207
Notable required packages: xarray, cython, cartopy
<NAME>
<EMAIL>
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import datetime as dt
import cartopy.crs as ccrs
import xarray
import time
import itertools
from .GeodeticFromKmCTRS import GeodeticFromKmCTRS # cython 5x speedup
# Below are global constants based on empirically derived values
# specific to the ISS LIS optics and integration on the space station.
MAX_HT = 13.5
MAGNIFICATION_FACTOR = 1.01
ROTATE_FACTOR = 0
LEFT_RIGHT_FACTOR = -0.0022
UP_DOWN_FACTOR = 0.0205
MATRIX_VAL = np.array(
[[-0.1322, -0.0518, 0.9911],
[0.0268, 0.9953, 0.1055],
[-0.9907, 0.0413, -0.1299]])
class Quaternion(object):
"""
This class simplifies passing of quaternion information
"""
def __init__(self, w, x, y, z):
self.w = w
self.x = x
self.y = y
self.z = z
class Geo(object):
"""
This class simplifies passing of geolocation information
"""
def __init__(self, lat, lon, alt):
self.lat = lat
self.lon = lon
self.alt = alt
class Ephemeris(object):
"""
This class simplifies passing of ephemeris data
"""
def __init__(self, new_pv, new_vv, new_tm, index):
self.state_vector = np.concatenate(
[[], new_pv[index]])
self.state_vector = np.concatenate(
[self.state_vector, new_vv[index]])
self.translation_matrix = new_tm[index]
def get_every_pointing_vector():
"""
; ------------------------------------------------------------------
; Polynomial fit of lab measurements to get pointing vector
; from x and y pixel location
; ------------------------------------------------------------------
; Look_Vector = output = unit vector direction from CCD axis
; to geolocated position (a global variable)
; x_pixel = input = CCD pixel location in x direction (0-127)
; y_pixel = input = CCD pixel location in y direction (0-127)
;
"""
Look_Vector = np.zeros((128, 128, 3), dtype='double')
# ISS-LIS optics
coeff = np.zeros(4, dtype='double')
coeff[0] = 1.4754537
coeff[1] = -0.36224695
coeff[2] = -0.088939824
coeff[3] = -0.28203806
for i, j in itertools.product(range(128), range(128)):
x = (i - 63.5) / 127.0
y = (127 - j - 63.5) / 127.0
xy = np.sqrt(x * x + y * y)
convert = coeff[0] + coeff[1] * xy + coeff[2] * xy * xy + \
coeff[3] * xy * xy * xy
Look_Vector[i][j][0] = x * convert * MAGNIFICATION_FACTOR
Look_Vector[i][j][1] = y * convert * MAGNIFICATION_FACTOR
Look_Vector[i][j][2] = np.sqrt(
1.0 - (Look_Vector[i][j][0] * Look_Vector[i][j][0] +
Look_Vector[i][j][1] * Look_Vector[i][j][1]))
return Look_Vector
def QuaternionFromMatrix(m):
tr = m[0, 0] + m[1, 1] + m[2, 2]
if tr > 0:
S = np.sqrt(tr + 1.0) * 2
q = Quaternion(0.25 * S, (m[2, 1] - m[1, 2]) / S,
(m[0, 2] - m[2, 0]) / S, (m[1, 0] - m[0, 1]) / S)
elif m[0, 0] > m[1, 1] and m[0, 0] > m[2, 2]:
S = np.sqrt(1.0 + m[0, 0] - m[1, 1] - m[2, 2]) * 2
q = Quaternion((m[2, 1] - m[1, 2]) / S, 0.25 * S,
(m[0, 1] + m[1, 0]) / S, (m[0, 2] + m[2, 0]) / S)
elif m.e[1, 1] > m.e[2, 2]:
S = np.sqrt(1.0 + m[1, 1] - m[0, 0] - m[2, 2]) * 2
q = Quaternion((m[0, 2] - m[2, 0]) / S, (m[0, 1] + m[1, 0]) / S,
0.25 * S, (m[1, 2] + m[2, 1]) / S)
else:
S = np.sqrt(1.0 + m[2, 2] - m[0, 0] - m[1, 1]) * 2
q = Quaternion((m[1, 0] - m[0, 1]) / S, (m[0, 2] + m[2, 0]) / S,
(m[1, 2] + m[2, 1]) / S, 0.25 * S)
return q
def Vector_Add(A, B):
"""
C-code legacy function
"""
return A + B
def Vector_Mag(A):
"""
C-code legacy function, could get replaced by an equivalent numpy function
"""
return np.sqrt(
A[0] * A[0] +
A[1] * A[1] +
A[2] * A[2])
def Vector_Cross(A, B):
"""
This C-code legacy function is surprisingly faster than np.cross, go figure
"""
C = np.zeros(3, dtype='double')
C[0] = A[1] * B[2] - A[2] * B[1]
C[1] = A[2] * B[0] - A[0] * B[2]
C[2] = A[0] * B[1] - A[1] * B[0]
return C
def MatrixFromYPR(ypr):
cy = np.cos(ypr[0])
sy = np.sin(ypr[0])
cp = np.cos(ypr[1])
sp = np.sin(ypr[1])
cr = np.cos(ypr[2])
sr = np.sin(ypr[2])
MYaw = np.zeros((3, 3), dtype='double')
MPitch = np.zeros((3, 3), dtype='double')
MRoll = np.zeros((3, 3), dtype='double')
# Define Yaw matrix
MYaw[0][0] = cy
MYaw[0][1] = -sy
MYaw[0][2] = 0.0
MYaw[1][0] = sy
MYaw[1][1] = cy
MYaw[1][2] = 0.0
MYaw[2][0] = 0.0
MYaw[2][1] = 0.0
MYaw[2][2] = 1.0
# Define Pitch matrix
MPitch[0][0] = cp
MPitch[0][1] = 0.0
MPitch[0][2] = sp
MPitch[1][0] = 0.0
MPitch[1][1] = 1.0
MPitch[1][2] = 0.0
MPitch[2][0] = -sp
MPitch[2][1] = 0.0
MPitch[2][2] = cp
# Define Roll matrix
MRoll[0][0] = 1.0
MRoll[0][1] = 0.0
MRoll[0][2] = 0.0
MRoll[1][0] = 0.0
MRoll[1][1] = cr
MRoll[1][2] = -sr
MRoll[2][0] = 0.0
MRoll[2][1] = sr
MRoll[2][2] = cr
return np.dot(np.dot(MYaw, MPitch), MRoll)
def LVLHMatrixFrom(inPos, inVel):
Mlvlh = np.zeros((3, 3), dtype='double')
posMag = Vector_Mag(inPos)
speed = Vector_Mag(inVel)
crossPr = Vector_Cross(inPos, inVel)
for i in range(3):
Mlvlh[0][i] = inVel[i] / speed # LVLH X in geocentric frame
# LVLH Y in geocentric frame
Mlvlh[1][i] = crossPr[i] / ((-posMag) * speed)
Mlvlh[2][i] = inPos[i] / (-posMag) # LVLH Z in geocentric frame
return Mlvlh
def YPRFromQuaternion(q):
"""
****************** DESCRIPTION ********************
This subroutine converts quaternions to yaw, pitch, roll (Euler angles)
"""
v = np.zeros(3, dtype='double')
v[0] = np.arctan2(2.0 * (q.x * q.y + q.w * q.z),
q.w * q.w + q.x * q.x - q.y * q.y - q.z * q.z)
v[1] = np.arcsin(-2.0 * (q.x * q.z - q.w * q.y))
v[2] = np.arctan2(2.0 * (q.y * q.z + q.w * q.x),
q.w * q.w - q.x * q.x - q.y * q.y + q.z * q.z)
return v
def get_earth_intersection(number_to_process, look_vector, Eph, ISSGD,
MlisOrientation, Mypr, Mlvlh, inPosCTRS):
"""
/* ****************** DESCRIPTION ******************** */
/* This subroutine computes the surface position of each */
/* pixel handed to it. */
/* It is based on CATS code. */
/* This subroutine utilizes the subroutines above. */
Note: Old code derived from C, could use more Python-ification.
"""
location = np.zeros((1, 2), dtype='double')
for i in range(number_to_process):
PixelLOS = np.zeros(3, dtype='double')
for j in range(3):
PixelLOS[j] = look_vector[i][j]
Alt = ISSGD.alt - 3.0
RangeSpan = 5.0 * ISSGD.alt
P2GD = Geo(-90.0, -90.0, -1.0)
# This loop is legacy code that will be modified soon
for z in np.arange(Alt, Alt + RangeSpan, 1.0):
P2T = np.dot(MlisOrientation, z * PixelLOS)
P2LVLH = np.dot(Mypr, P2T) # Transform to LVLH
P2g = np.dot(P2LVLH, Mlvlh) # Transform to geocentric
P2 = Vector_Add(inPosCTRS, P2g)
output = GeodeticFromKmCTRS(P2)
P2GD = Geo(output[0], output[1], output[2])
# MAX_HT = "Cloud-Top Height"
# P2GD.alt is nearly always negative to start, so break hits early
if P2GD.alt < MAX_HT:
break
location[i][0] = P2GD.lat
location[i][1] = P2GD.lon
return location
def get_interpolated_matrices(lis_background, index):
"""
ISS LIS background files contain TAI93 time stamps for each image.
However, ephemeris information is provided at different time steps.
This function interpolates the ephemeris data to the desired background
image's time. The ephemeris time step is only ~1 second, so simple
linear interpolation is used.
"""
time1 = pd.to_datetime(
lis_background.bg_data_summary_TAI93_time.data).to_pydatetime()
time2 = pd.to_datetime(
lis_background.bg_info_TAI93_time.data).to_pydatetime()
# Work in floating point seconds since the earliest reference time.
# np.interp does not play nicely with datetime/timedelta objects.
min_time = np.min([time1[0], time2[0]])
td1 = np.array([(t1 - min_time).total_seconds() for t1 in time1])
td2 = np.array([(t2 - min_time).total_seconds() for t2 in time2])
# Position vector
new_pv = np.zeros(
(time1.shape[0], lis_background.bg_info_position_vector.data.shape[1]),
dtype='double')
# Velocity vector
new_vv = np.zeros_like(new_pv)
# Transformation matrix
new_tm = np.zeros(
(time1.shape[0],
lis_background.bg_info_transform_matrix.data.data.shape[1]),
dtype='double')
# Do the interpolation
for i in range(9):
if i < 3:
new_pv[:, i] = np.interp(
td1, td2, lis_background.bg_info_position_vector.data[:, i])
new_vv[:, i] = np.interp(
td1, td2, lis_background.bg_info_velocity_vector.data[:, i])
new_tm[:, i] = np.interp(
td1, td2, lis_background.bg_info_transform_matrix.data[:, i])
return Ephemeris(new_pv, new_vv, new_tm, index)
def run_iss_lis_geolocation(lis_background, index, verbose=True):
"""
Main function that drives the geolocation processing.
Parameters
----------
lis_background : xarray.Dataset object
LIS background xarray Dataset object from xarray.open_dataset()
index : int
Background image index to use
Other Parameters
----------------
verbose : bool
True - Print out helpful monitoring info
False - Don't do this
Returns
-------
locations : numpy.ndarray
16384 x 2 array of geolocated pixels (lats = index 0, lons = index 1)
Use numpy.reshape(locations[:, i], (128, 128)) to recover 2D structure
Be sure to transpose the original 128x128 image data after geolocation
"""
if verbose:
bt = time.time()
Eph = get_interpolated_matrices(lis_background, index)
lookvecSC = np.zeros((1, 3), dtype='double')
locations = []
inPosCTRS = np.zeros(3, dtype='double')
inVelCTRS = np.zeros(3, dtype='double')
Mypr = np.zeros((3, 3), dtype='double')
MlisOrientation = np.zeros((3, 3), dtype='double')
for i in range(3):
for j in range(3):
Mypr[i, j] = Eph.translation_matrix[i*3 + j]
MlisOrientation[i, j] = MATRIX_VAL[i, j]
inPosCTRS[i] = Eph.state_vector[i] * 0.001
inVelCTRS[i] = Eph.state_vector[i+3] * 0.001
output = GeodeticFromKmCTRS(inPosCTRS)
ISSGD = Geo(output[0], output[1], output[2])
Mlvlh = LVLHMatrixFrom(inPosCTRS, inVelCTRS)
VlisOrientation = np.zeros(3, dtype='double')
VlisOrientation[0] = 3.182 - 0.0485 + \
0.0485 * Eph.state_vector[5] / 6000.0 + ROTATE_FACTOR
VlisOrientation[1] = -0.020 + 0.015 - \
0.0042 * Eph.state_vector[5] / 6000.0 + LEFT_RIGHT_FACTOR
VlisOrientation[2] = 0.020 - 0.051 + UP_DOWN_FACTOR
MlisOrientation = MatrixFromYPR(VlisOrientation)
lv = get_every_pointing_vector()
for i, j in itertools.product(range(128), range(128)):
lookvecSC[0][0] = lv[i][j][0]
lookvecSC[0][1] = lv[i][j][1]
lookvecSC[0][2] = lv[i][j][2]
locations.append(
get_earth_intersection(
1, lookvecSC, Eph, ISSGD,
MlisOrientation, Mypr, Mlvlh, inPosCTRS))
if verbose:
print((time.time() - bt) / 1.0, 'seconds to run')
return np.squeeze(locations)
def plot_series_of_backgrounds(lis_background, index, save=None, cmap='bone'):
"""
This function plots 10 panels of ISS LIS background images.
It is a quick way to review ISS LIS backgrounds to identify images worth
geolocating.
Parameters
----------
lis_background : xarray.Dataset object
LIS background xarray Dataset object from xarray.open_dataset()
locations : numpy.ndarray
16384 x 2 array of geolocated pixels (lats = index 0, lons = index 1)
Use numpy.reshape(locations[:, i], (128, 128)) to recover 2D structure
Be sure to transpose the original 128x128 image data after geolocation
index : int
Background image index to use
Other Parameters
----------------
save : str or None
File to save plot to (via matplotlib.pyplot.savefig)
cmap : str
Colormap to use
Returns
-------
None
"""
bdts = pd.to_datetime(
lis_background.bg_data_summary_TAI93_time.data).to_pydatetime()
fig = plt.figure(figsize=(9, 20))
for i in range(10):
ax = fig.add_subplot(5, 2, i+1)
ax.imshow(lis_background.bg_data[index + i, :, :], cmap=cmap)
ax.set_title(str(index+i) + ' - ' +
bdts[index+i].strftime('%Y%m%d %H:%M:%S'))
plt.tight_layout()
if save is not None:
plt.savefig(save)
def plot_geolocated_quicklook(lis_background, locations, index, save=None,
cmap='bone', delt=5.8, alpha=0.5,
layer='ASTER_GDEM_Color_Shaded_Relief'):
"""
This function uses cartopy to plot a simple quicklook of a
non-geolocated and then geolocated background. The function also provides
a useful template for decoding the geolocation information provided
by run_iss_lis_geolocation().
Parameters
----------
lis_background : xarray.Dataset object
LIS background xarray Dataset object from xarray.open_dataset()
locations : numpy.ndarray
16384 x 2 array of geolocated pixels (lats = index 0, lons = index 1)
Use numpy.reshape(locations[:, i], (128, 128)) to recover 2D structure
Be sure to transpose the original 128x128 image data after geolocation
index : int
Background image index to use
Other Parameters
----------------
save : str or None
File to save plot to (via matplotlib.pyplot.savefig)
cmap : str
Colormap to use
delt : float
Half-width of geolocated quicklook panel (degrees)
alpha : float
Alpha value (0-1) of background image on top of WTMS imagery
layer: str or None
WTMS layer from https://map1c.vis.earthdata.nasa.gov/wmts-geo/wmts.cgi
None means no layer will be plotted
Returns
-------
None
"""
url = 'https://map1c.vis.earthdata.nasa.gov/wmts-geo/wmts.cgi'
fig = plt.figure(figsize=(16, 8))
projection = ccrs.PlateCarree()
lats = locations[:, 0]
lons = locations[:, 1]
ax = fig.add_subplot(121, projection=projection)
ax.imshow(lis_background.bg_data.data[index, :, :], cmap='bone')
ax.set_title('(a) Raw ISS LIS Background')
ax = fig.add_subplot(122, projection=projection)
ext = [np.mean(lons)-delt,
np.mean(lons)+delt,
np.mean(lats)-delt,
np.mean(lats)+delt]
ax.set_extent(ext)
if type(layer) is str:
ax.add_wmts(url, layer)
ax.coastlines(resolution='10m')
ax.pcolormesh(lons.reshape((128, 128)), lats.reshape((128, 128)),
lis_background.bg_data.data[index, :, :].T, cmap=cmap,
alpha=alpha, transform=projection)
ax.set_title('(b) Geolocated ISS LIS Background')
gl = ax.gridlines(linestyle='--', draw_labels=True)
gl.ylabels_right = []
gl.xlabels_top = []
if save is not None:
plt.savefig(save, bbox_inches='tight')
| StarcoderdataPython |
1658421 | <reponame>databio/pararead<filename>pararead/exceptions.py
""" Specific exception types. """
__author__ = "<NAME>"
__email__ = "<EMAIL>"
class CommandOrderException(Exception):
""" The parallel reads processor needs certain method call sequence. """
def __init__(self, reason=""):
super(CommandOrderException, self).__init__(reason)
class FileTypeException(Exception):
""" Extension not matching any of those of supported file types. """
def __init__(self, got, known):
"""
Declare filetype received and those supported.
:param str got: file type, name, or path of offending file.
:param str | Iterable[str] known: supported filetype(s).
"""
reason = "'{}' is not among supported filetypes: {}".format(got, known)
super(FileTypeException, self).__init__(reason)
class IllegalChunkException(Exception):
""" Illegal reads chunk ID. """
def __init__(self, requested, of_interest):
reason = "Requested {} but processing was restricted to: {}".\
format(requested, of_interest)
super(IllegalChunkException, self).__init__(reason)
class MissingHeaderException(Exception):
""" A reads file undergoing processing must have a header. """
def __init__(self, filepath=""):
reason = "No chromosomes in header; this file is " \
"empty or unaligned. Aligned reads are required{}".\
format(": '{}'".format(filepath) if filepath else ".")
super(MissingHeaderException, self).__init__(reason)
class MissingOutputFileException(Exception):
"""
Filepath for particular chunk output doesn't exist.
Based on its internal settings, the processor's combine() step derives
a filepath to the output file for each reads chunk indicated by key
in the argument that it receives. If one of those is missing, it may
be considered an exceptional case.
"""
def __init__(self, reads_chunk_key, filepath):
reason = "Path to output file for reads chunk '{}' " \
"does not exist: '{}'".format(reads_chunk_key, filepath)
super(MissingOutputFileException, self).__init__(reason)
class UnknownChromosomeException(Exception):
""" Represent case in which data about a chromosome is not available. """
def __init__(self, requested, known=None):
reason = requested
if known:
reason += "; known: {}".format(known)
super(UnknownChromosomeException, self).__init__(reason)
| StarcoderdataPython |
3232720 | #!/usr/bin/env python3 -u
# -*- coding: utf-8 -*-
# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
__author__ = ["<NAME>"]
__all__ = ["BaseMetric"]
import inspect
from sklearn.base import BaseEstimator
class BaseMetric(BaseEstimator):
"""Base class for defining metrics in sktime.
Extends scikit-learn's BaseEstimator.
"""
def __init__(self, func, name=None):
self._func = func
self.name = name if name is not None else func.__name__
def __call__(self, y_true, y_pred, **kwargs):
"""Calculate metric value using underlying metric function."""
NotImplementedError("abstract method")
# This is copied from sktime.base.BaseEstimator. Choice to copy was made to
# Avoid the not applicable functionality from BaseEstimator that tripped
# up unit tests (e.g. is_fitted, check_is_fitted).
@classmethod
def _all_tags(cls):
"""Get tags from estimator class and all its parent classes."""
# We here create a separate estimator tag interface in addition to the one in
# scikit-learn to make sure we do not interfere with scikit-learn's one
# when we inherit from scikit-learn classes. We also make estimator tags a
# class rather than object attribute.
collected_tags = dict()
# We exclude the last two parent classes; sklearn.base.BaseEstimator and
# the basic Python object.
for parent_class in reversed(inspect.getmro(cls)[:-2]):
if hasattr(parent_class, "_tags"):
# Need the if here because mixins might not have _more_tags
# but might do redundant work in estimators
# (i.e. calling more tags on BaseEstimator multiple times)
more_tags = parent_class._tags
collected_tags.update(more_tags)
return collected_tags
| StarcoderdataPython |
168416 | <reponame>kkaris/indra_cogex
from inspect import Signature, signature
from typing import (
Any,
Callable,
Counter,
Dict,
Iterable,
List,
Mapping,
Tuple,
Type,
Optional,
Set,
)
from docstring_parser import parse
from indra.statements import Agent, Evidence, Statement
from indra_cogex.representation import Node
__all__ = [
"parse_json",
"process_result",
"get_web_return_annotation",
"get_docstring",
"ParseError",
]
class ParseError(ValueError):
"""Raised when the JSON cannot be parsed or is not valid."""
MAX_NODES = 400
def parse_json(query_json: Dict[str, Any]) -> Dict[str, Any]:
"""Parse the incoming query
Parameters
----------
query_json :
The incoming query as a dictionary
Returns
-------
:
The parsed query
"""
parsed_query = {}
for key, value in query_json.items():
if key in ("stmt_hashes", "stmt_hash"):
if isinstance(value, str):
parsed_query[key] = int(value)
elif isinstance(value, list):
parsed_query[key] = [int(v) for v in value]
else:
raise ParseError(f"{key} must be a string or list of strings")
elif key == "nodes":
if isinstance(value, list):
if len(value) > MAX_NODES:
raise ValueError(f"Number of {key} must be less than {MAX_NODES}")
parsed_query[key] = value
else:
raise ParseError(f"{key} must be a list")
else:
parsed_query[key] = value
return parsed_query
def process_result(result) -> Any:
"""Make the result of a query JSON-serializable.
Parameters
----------
result :
The result of a query
Returns
-------
:
The processed result
"""
# Any fundamental type
if isinstance(result, (int, str, bool, float)):
return result
# Any dict query
elif isinstance(result, (dict, Mapping, Counter)):
res_dict = dict(result)
return {k: process_result(v) for k, v in res_dict.items()}
# Any iterable query
elif isinstance(result, (Iterable, list, set)):
list_res = list(result)
# Check for empty list
if list_res and hasattr(list_res[0], "to_json"):
list_res = [res.to_json() for res in list_res]
return list_res
else:
raise TypeError(f"Don't know how to process result of type {type(result)}")
def get_web_return_annotation(sig: Signature) -> Type:
"""Get and translate the return annotation of a function
Parameters
----------
sig :
The signature of the function
Returns
-------
:
The return annotation of the function
"""
# Get the return annotation
return_annotation = sig.return_annotation
if return_annotation is sig.empty:
raise ValueError("Forgot to type annotate function")
# Translate the return annotation:
# Iterable[Node] -> List[Dict[str, Any]]
# bool -> Dict[str: bool]
# Dict[str, List[Evidence]] -> Dict[int, List[Dict[str, Any]]]
# Iterable[Evidence] -> List[Dict[str, Any]]
# Iterable[Statement] -> List[Dict[int, Any]]
# Counter -> Dict[str, int]
# Iterable[Agent] -> List[Dict[str, Any]]
if return_annotation is Iterable[Node]:
return List[Dict[str, Any]]
elif return_annotation is bool:
return Dict[str, bool]
elif return_annotation is Dict[int, List[Evidence]]:
return Dict[str, List[Dict[str, Any]]]
elif return_annotation is Iterable[Evidence]:
return List[Dict[str, Any]]
elif return_annotation is Iterable[Statement]:
return List[Dict[str, Any]]
elif return_annotation is Counter:
return Dict[str, int]
elif return_annotation is Iterable[Agent]:
return List[Dict[str, Any]]
else:
return return_annotation
def get_docstring(
fun: Callable, skip_params: Optional[Set[str]] = None
) -> Tuple[str, str]:
"""Get the docstring of a function
Parameters
----------
fun :
The function whose docstring is to be retrieved
skip_params :
The parameters to skip docstring generation for
Returns
-------
:
The docstring of the function
"""
parsed_doc = parse(fun.__doc__)
sig = signature(fun)
full_docstr = """{title}
Parameters
----------
{params}
Returns
-------
{return_str}
"""
# Get title
short = parsed_doc.short_description
param_templ = "{name} : {typing}\n {description}"
ret_templ = "{typing}\n {description}"
# Get the parameters
param_list = []
for param in parsed_doc.params:
# Skip client, evidence_map,
if param.arg_name in skip_params:
continue
if param.arg_name == "stmt_hash":
annot = str
elif param.arg_name == "stmt_hashes":
annot = List[str]
else:
annot = sig.parameters[param.arg_name].annotation
str_type = str(annot).replace("typing.", "")
param_list.append(
param_templ.format(
name=param.arg_name, typing=str_type, description=param.description
)
)
params = "\n\n".join(param_list)
return_str = ret_templ.format(
typing=str(get_web_return_annotation(sig)).replace("typing.", ""),
description=parsed_doc.returns.description,
)
return short, full_docstr.format(
title=short,
params=params,
return_str=return_str,
)
| StarcoderdataPython |
1786539 | from otree.api import *
c = Currency
doc = """
Your app description
"""
class Constants(BaseConstants):
name_in_url = 'subject_email'
players_per_group = None
num_rounds = 1
class Subsession(BaseSubsession):
pass
class Group(BaseGroup):
pass
class Player(BasePlayer):
subject_email = models.StringField(
label="Please enter your Email address"
)
# PAGES
class MyPage(Page):
form_model = 'player'
form_fields = ['subject_email']
@staticmethod
def before_next_page(player: Player, timeout_happened):
player.participant.label = player.subject_email
page_sequence = [MyPage]
| StarcoderdataPython |
3257576 | import contextlib
import unittest
from parflow.subset.clipper import MaskClipper, BoxClipper
import parflow.subset.utils.io as file_io_tools
from parflow.subset.mask import SubsetMask
import numpy as np
import tests.test_files as test_files
import os
class RegressionClipTests(unittest.TestCase):
"""
Regression tests1 to verify subsetting can correctly clip a data file,
correctly produces the subset clip,
and correctly writes the bounding box file
"""
def test_subset_dem_to_tif_conus1(self):
data_array = file_io_tools.read_file(test_files.conus1_dem.as_posix())
my_mask = SubsetMask(test_files.huc10190004.get('conus1_mask').as_posix())
clipper = MaskClipper(subset_mask=my_mask, no_data_threshold=-1)
return_arr, new_geom, new_mask, bbox = clipper.subset(data_array)
file_io_tools.write_array_to_geotiff("conus_1_clip_dem_test.tif",
return_arr, new_geom, my_mask.mask_tif.GetProjection())
self.assertIsNone(
np.testing.assert_array_equal(file_io_tools.read_file(test_files.huc10190004.get('conus1_dem').as_posix()),
file_io_tools.read_file('conus_1_clip_dem_test.tif')),
'Clipping DEM matches reference')
os.remove('conus_1_clip_dem_test.tif')
file_io_tools.write_bbox(bbox, 'bbox_conus1.txt')
self.assertSequenceEqual(file_io_tools.read_bbox('bbox_conus1.txt'), test_files.huc10190004.get('conus1_bbox'),
'Subset writes correct bounding box file')
os.remove('bbox_conus1.txt')
def test_subset_tif_conus2(self):
data_array = file_io_tools.read_file(test_files.conus2_dem.as_posix())
my_mask = SubsetMask(test_files.huc10190004.get('conus2_mask').as_posix())
clipper = MaskClipper(subset_mask=my_mask, no_data_threshold=-1)
return_arr, new_geom, new_mask, bbox = clipper.subset(data_array)
file_io_tools.write_array_to_geotiff("conus_2_clip_dem_test.tif",
return_arr, new_geom, my_mask.mask_tif.GetProjection())
self.assertIsNone(
np.testing.assert_array_equal(file_io_tools.read_file(test_files.huc10190004.get('conus2_dem').as_posix()),
file_io_tools.read_file('conus_2_clip_dem_test.tif')),
'Clipping DEM matches reference')
os.remove('conus_2_clip_dem_test.tif')
file_io_tools.write_bbox(bbox, 'bbox_conus2_full.txt')
self.assertSequenceEqual(file_io_tools.read_bbox('bbox_conus2_full.txt'),
test_files.huc10190004.get('conus2_bbox'),
'Subset writes correct bounding box file')
os.remove('bbox_conus2_full.txt')
def test_compare_box_clips(self):
data_array = file_io_tools.read_file(test_files.conus1_dem.as_posix())
my_mask = SubsetMask(test_files.huc10190004.get('conus1_mask').as_posix())
clipper = MaskClipper(subset_mask=my_mask, no_data_threshold=-1)
mask_subset, _, _, bbox = clipper.subset(data_array, crop_inner=0)
box_clipper = BoxClipper(ref_array=data_array, x=bbox[0], y=bbox[1], nx=bbox[2], ny=bbox[3])
box_subset, _, _, _ = box_clipper.subset()
self.assertEqual(mask_subset.shape[0], box_subset.shape[0])
self.assertEqual(mask_subset.shape[1], box_subset.shape[1])
self.assertEqual(mask_subset.shape[2], box_subset.shape[2])
self.assertIsNone(np.testing.assert_array_equal(mask_subset, box_subset))
def test_box_clip(self):
data_array = file_io_tools.read_file(test_files.conus1_dem.as_posix())
box_clipper = BoxClipper(ref_array=data_array)
subset, _, _, _ = box_clipper.subset()
self.assertEqual(1, subset.shape[0])
self.assertEqual(3342, subset.shape[2])
self.assertEqual(1888, subset.shape[1])
self.assertIsNone(np.testing.assert_array_equal(data_array, subset),
'selecting the whole region should return exactly what you would expect')
box_clipper.update_bbox(x=10, y=10, nx=3332, ny=1878)
subset2, _, _, _ = box_clipper.subset()
self.assertEqual(1, subset2.shape[0])
self.assertEqual(3332, subset2.shape[2])
self.assertEqual(1878, subset2.shape[1])
box_clipper.update_bbox(x=10, y=10, nx=201, ny=20)
subset3, _, _, _ = box_clipper.subset()
self.assertEqual(1, subset3.shape[0])
self.assertEqual(201, subset3.shape[2])
self.assertEqual(20, subset3.shape[1])
box_clipper.update_bbox(x=1, y=1, nx=500, ny=300)
subset4, _, _, _ = box_clipper.subset()
self.assertEqual(1, subset4.shape[0])
self.assertEqual(500, subset4.shape[2])
self.assertEqual(300, subset4.shape[1])
# create a 3d array for testing, z=4, y=3, x=2
data_array2 = np.array([[[1, 2], [3, 4, ], [5, 6]],
[[7, 8], [9, 10], [11, 12]],
[[13, 14], [15, 16], [17, 18]],
[[19, 20], [21, 22], [23, 24]]])
box_clipper2 = BoxClipper(ref_array=data_array2)
subset5, _, _, _ = box_clipper2.subset()
self.assertIsNone(np.testing.assert_array_equal(data_array2, subset5))
self.assertEqual(1, subset5[0, 0, 0])
self.assertEqual(22, subset5[3, 1, 1])
box_clipper2.update_bbox(x=1, y=1, nx=1, ny=2)
subset6, _, _, _ = box_clipper2.subset()
self.assertEqual(1, subset6[0, 0, 0])
self.assertEqual(13, subset6[2, 0, 0])
self.assertEqual(15, subset6[2, 1, 0])
def test_box_clip_with_padding(self):
data_array = file_io_tools.read_file(test_files.conus1_dem.as_posix())
# css-like padding (top,right,bot,left)
bbox = test_files.huc10190004.get('conus1_bbox')
box_clipper = BoxClipper(ref_array=data_array, x=bbox[0], y=bbox[1], nx=bbox[2], ny=bbox[3],
padding=(1, 6, 1, 5))
subset, _, _, _ = box_clipper.subset()
self.assertEqual(1, subset.shape[0])
self.assertEqual(32, subset.shape[1])
self.assertEqual(96, subset.shape[2])
file_io_tools.write_pfb(subset, 'WBDHU8_conus1_dem_padded_test.pfb')
padded_subset_ref = file_io_tools.read_file(test_files.huc10190004.get('conus1_dem_padded_box').as_posix())
self.assertIsNone(np.testing.assert_array_equal(padded_subset_ref, subset))
os.remove('WBDHU8_conus1_dem_padded_test.pfb')
def test_box_clip_invalid_nx_dim(self):
with self.assertRaises(Exception):
data_array = file_io_tools.read_file(test_files.conus1_dem.as_posix())
BoxClipper(ref_array=data_array, nx=0)
def test_box_clip_invalid_x_dim(self):
with self.assertRaises(Exception):
data_array = file_io_tools.read_file(test_files.conus1_dem.as_posix())
BoxClipper(ref_array=data_array, x=0)
def test_box_print_no_exception(self):
data_array = file_io_tools.read_file(test_files.conus1_dem.as_posix())
clipper = BoxClipper(ref_array=data_array)
self.assertEqual(-999, clipper.no_data)
self.assertIsNone(print(clipper))
def test_mask_print_no_exception(self):
my_mask = SubsetMask(test_files.huc10190004.get('conus1_mask').as_posix())
clipper = MaskClipper(subset_mask=my_mask, no_data_threshold=-1)
self.assertIsNone(print(clipper))
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
132417 | import hashlib
import logging
import urllib.parse
import uuid
from django.conf import settings
from django.contrib.auth import authenticate, login
from django.forms import ValidationError
from django.forms.utils import ErrorList
from django.http import (
HttpResponseForbidden,
HttpResponseRedirect,
)
from django.shortcuts import render
from django.urls import reverse
from django.utils.translation import gettext as _
from django.views import View
from spid_cie_oidc.entity.exceptions import InvalidEntityConfiguration
from spid_cie_oidc.provider.forms import AuthLoginForm, AuthzHiddenForm
from spid_cie_oidc.provider.models import OidcSession
from spid_cie_oidc.provider.exceptions import AuthzRequestReplay, ValidationException
from spid_cie_oidc.provider.settings import OIDCFED_DEFAULT_PROVIDER_PROFILE, OIDCFED_PROVIDER_PROFILES_DEFAULT_ACR
from . import OpBase
logger = logging.getLogger(__name__)
class AuthzRequestView(OpBase, View):
"""
View which processes the actual Authz request and
returns a Http Redirect
"""
template = "op_user_login.html"
def validate_authz(self, payload: dict):
must_list = ("scope", "acr_values")
for i in must_list:
if isinstance(payload.get(i, None), str):
if ' ' in payload[i]:
payload[i] = payload[i].split(' ')
else:
payload[i] = [payload[i]]
if (
'offline_access' in payload['scope'] and
'consent' not in payload['prompt']
):
raise ValidationError(
"scope with offline_access without prompt = consent"
)
redirect_uri = payload.get("redirect_uri", "")
p = urllib.parse.urlparse(redirect_uri)
scheme_fqdn = f"{p.scheme}://{p.hostname}"
if payload.get("client_id", None) in scheme_fqdn:
raise ValidationError("client_id not in redirect_uri")
self.validate_json_schema(
payload,
"authorization_request",
"Authen request object validation failed "
)
def get_login_form(self):
return AuthLoginForm
def get(self, request, *args, **kwargs):
"""
authz request object is received here
it's validated and a login prompt is rendered to the user
"""
req = request.GET.get("request", None)
# FIXME: invalid check: if not request-> no payload-> no redirect_uri
if not req:
logger.error(
f"Missing Authz request object in {dict(request.GET)} "
f"error=invalid_request"
)
return self.redirect_response_data(
self.payload["redirect_uri"],
error="invalid_request",
error_description=_("Missing Authz request object"),
# No req -> no payload -> no state
state="",
)
# yes, again. We MUST.
tc = None
try:
tc = self.validate_authz_request_object(req)
except InvalidEntityConfiguration as e:
# FIXME: to do test
logger.error(f" {e}")
return self.redirect_response_data(
self.payload["redirect_uri"],
error = "invalid_request",
error_description =_("Failed to establish the Trust"),
state = self.payload.get("state", "")
)
except AuthzRequestReplay as e:
logger.error(
"Replay on authz request detected for "
f"{request.GET.get('client_id', 'unknow')}: {e}"
)
return self.redirect_response_data(
self.payload["redirect_uri"],
error = "invalid_request",
error_description =_(
"An Unknown error raised during validation of "
f" authz request object: {e}"
),
state = self.payload.get("state", "")
)
except Exception as e:
logger.error(
"Error during trust build for "
f"{request.GET.get('client_id', 'unknown')}: {e}"
)
return self.redirect_response_data(
self.payload["redirect_uri"],
error="invalid_request",
error_description=_("Authorization request not valid"),
state = self.payload.get("state", "")
)
try:
self.validate_authz(self.payload)
except ValidationException:
return self.redirect_response_data(
self.payload["redirect_uri"],
error="invalid_request",
error_description=_("Authorization request validation error"),
state = self.payload.get("state", "")
)
# stores the authz request in a hidden field in the form
form = self.get_login_form()()
context = {
"client_organization_name": tc.metadata.get(
"client_name", self.payload["client_id"]
),
"hidden_form": AuthzHiddenForm(dict(authz_request_object=req)),
"form": form,
"redirect_uri": self.payload["redirect_uri"]
}
return render(request, self.template, context)
def post(self, request, *args, **kwargs):
"""
When the User prompts his credentials
"""
form = self.get_login_form()(request.POST)
if not form.is_valid():
return render(
request,
self.template,
{
"form": form,
"hidden_form": AuthzHiddenForm(request.POST),
}
)
authz_form = AuthzHiddenForm(request.POST)
authz_form.is_valid()
authz_request = authz_form.cleaned_data.get("authz_request_object")
try:
self.validate_authz_request_object(authz_request)
except Exception as e:
logger.error(
"Authz request object validation failed "
f"for {authz_request}: {e} "
)
# we don't have a redirect_uri here
return HttpResponseForbidden()
# autenticate the user
username = form.cleaned_data.get("username")
password = form.cleaned_data.get("password")
user = authenticate(username=username, password=password)
if not user:
errors = form._errors.setdefault("username", ErrorList())
errors.append(_("invalid username or password"))
return render(
request,
self.template,
{
"form": form,
"hidden_form": AuthzHiddenForm(request.POST),
}
)
else:
login(request, user)
# create auth_code
auth_code = hashlib.sha512(
'-'.join(
(
f'{uuid.uuid4()}',
f'{self.payload["client_id"]}',
f'{self.payload["nonce"]}'
)
).encode()
).hexdigest()
# put the auth_code in the user web session
request.session["oidc"] = {"auth_code": auth_code}
# store the User session
_provider_profile = getattr(
settings,
'OIDCFED_DEFAULT_PROVIDER_PROFILE',
OIDCFED_DEFAULT_PROVIDER_PROFILE
)
default_acr = OIDCFED_PROVIDER_PROFILES_DEFAULT_ACR[_provider_profile]
session = OidcSession.objects.create(
user=user,
user_uid=user.username,
nonce=self.payload["nonce"],
authz_request=self.payload,
client_id=self.payload["client_id"],
auth_code=auth_code,
acr=(
self.payload["acr_values"][-1]
if len(self.payload.get("acr_values",[])) > 0
else default_acr
)
)
session.set_sid(request)
url = reverse("oidc_provider_consent")
if (
user.is_staff and
'spid_cie_oidc.relying_party_test' in settings.INSTALLED_APPS
):
try:
url = reverse("oidc_provider_staff_testing")
except Exception as e:
logger.error(f"testigng page url reverse failed: {e}")
return HttpResponseRedirect(url)
| StarcoderdataPython |
44954 | <filename>TorPool/tor_method.py
# coding: utf-8
import subprocess
import shutil
import sys
import os
class TorMethod(object):
'''tor 代理伺服器製作
:::參數說明:::
torrc_dir: torrc要儲存的位置(必要)
tordata_dir: torrc內DataDirectory的資訊(必要)
__process: tor的process控制器, 可以使用TorMethod.get_process取得
__torname: torrc檔案和資料夾的名稱, 可以使用TorMethod.get_toruuid取得
__torrcfile: torrc檔案路徑, 可以使用TorMethod.get_torrcpath取得
__tordatafile: torrc內DataDirectory的資訊, 可以使用TorMethod.get_torrcpath取得
__socksport: Tor opens a SOCKS proxy on port [socksport]
__controlport: The port on which Tor will listen for local connections from Tor
controller applications, as documented in control-spec.txt.
'''
def __init__(self, torrc_dir, tordata_dir, hashedcontrolpassword):
from . import _TOR_EXE
if sys.platform is 'win32':
self.__tor_exe = _TOR_EXE
else:
self.__tor_exe = os.popen('which tor').read().rstrip('\n')
if self.__tor_exe is '':
error_msg = (
"\'Tor client\' is not installed. Please insatll tor client first!\n"
"\t If Your system is debian or ubuntu, please execute \'sudo apt install tor -y\'.\n"
"\t If Your system is macOS, please install homebrew and execute \'brew install tor -y\'.\n"
)
raise OSError(error_msg)
self.torrc_dir = torrc_dir #
self.tordata_dir = tordata_dir #
self.hashedcontrolpassword = <PASSWORD>
import uuid
self.__process = None #
self.__torname = str(uuid.uuid4()) #
self.__torrcfile = os.path.join(self.torrc_dir, self.__torname + '.conf') #
self.__tordatafile = os.path.join(self.tordata_dir, self.__torname)
self.__socksport = None #
self.__controlport = None #
self.__hashed = self.__tor_hashpasswd()
if os.path.exists(self.torrc_dir):
shutil.rmtree(self.torrc_dir)
os.makedirs(self.torrc_dir)
if os.path.exists(self.tordata_dir):
shutil.rmtree(self.tordata_dir)
os.makedirs(self.tordata_dir)
@property
def get_status(self):
if self.__process is None:
pid = None
else:
pid = self.__process.pid
return {
'tor_exe': self.__tor_exe,
'socksport': self.__socksport,
'process': pid,
'tor_uuid': self.__torname,
'torrc_path': self.__torrcfile,
'torrcdata_path': self.__tordatafile,
}
def __tor_hashpasswd(self):
process = subprocess.Popen(self.__tor_exe + ' --hash-password ' + str(self.hashedcontrolpassword), shell=True, stdout=subprocess.PIPE)
return str(process.stdout.readline().decode('utf-8')).rstrip('\n')
def get_free_port(self):
'''找閒置port'''
from socket import socket
port = None
with socket() as s:
s.bind(('',0))
port = s.getsockname()[1]
s.close()
return port
def make_torrc(self):
'''寫出torrc'''
if not os.path.exists(self.torrc_dir):
os.makedirs(self.torrc_dir)
if not os.path.exists(self.tordata_dir):
os.makedirs(self.tordata_dir)
with open(self.__torrcfile, 'w') as f:
torrc = self.torrc()
f.write(torrc)
def torrc(self):
'''torrc格式'''
if self.__socksport is None:
self.__socksport = self.get_free_port()
if self.__controlport is None:
self.__controlport = self.get_free_port()
torrc_file = (
'HashedControlPassword {<PASSWORD>'
'SocksPort {socksport}\n'
'ControlPort {controlport}\n'
'DataDirectory {tordatafile}\n'
)
return torrc_file.format(
hashedcontrolpassword = self.__hashed,
socksport = self.__socksport,
controlport = self.__controlport,
tordatafile = self.__tordatafile
)
def start_tor(self):
'''啟動tor'''
if self.__process is not None:
self.__process.kill()
else:
process = subprocess.Popen(self.__tor_exe + ' -f ' + self.__torrcfile, shell=True)
self.__process = process
def restart_tor(self):
'''若proxy被封鎖,殺掉程序重新執行tor'''
self.__process.kill()
self.start_tor()
def kill_process(self):
'''殺死利用套件啟動的tor程序'''
self.__process.kill()
shutil.rmtree(self.torrcfile)
shutil.rmtree(self.tordatafile)
self.__process = None
def kill_all_tor(self):
'''殺死系統所有存在的tor'''
if sys.platform is 'win32':
os.system('TASKKILL /F /IM tor.exe /T')
else:
os.system('killall -9 tor')
self.pool = [] | StarcoderdataPython |
5117 | <gh_stars>0
"""High-level search API.
This module implements application-specific search semantics on top of
App Engine's search API. There are two chief operations: querying for
entities, and managing entities in the search facility.
Add and remove Card entities in the search facility:
insert_cards([models.Card])
delete_cards([models.Card])
Query for Card entities:
query_cards(query_string, limit=20) -> search.SearchResults
The results items will have the following fields:
user_key, user_nickname, front, back, info, tag (repeated), added,
modified, source_url
The query_string is free-form, as a user would enter it, and passes
through a custom query processor before the query is submitted to App
Engine. Notably, pass @username to restrict the query to entities
authored by username, and #tag to restrict the query to only documents
matching the given tag. Multiple @usernames or #tags result in an OR
query.
"""
import re
from google.appengine.api import search
from google.appengine.ext import ndb
QUERY_LIMIT = 20
CARD_INDEX_NAME = 'cards'
# Increase this value when _card2doc changes its format so that
# queries can determine the data available on returned documents.
CARD_DOCUMENT_VERSION = '1'
# Ensure we're under the 2000 character limit from
# https://developers.google.com/appengine/docs/python/search/query_strings
MAX_QUERY_LEN = 200
# TODO(chris): it would be better if this module didn't know about
# specific entity types, but instead defined a protocol to get
# metadata from an entity and generate a document.
def insert_cards(cards):
"""Insert or update models.Card entities in the search facility."""
# TODO(chris): should we allow more than 200 cards per call?
assert len(cards) <= 200, len(cards)
card_docs = map(_card2doc, cards)
index = search.Index(name=CARD_INDEX_NAME)
index.put(card_docs)
def delete_cards(cards):
"""Delete models.Card entities from the search facility."""
index = search.Index(name=CARD_INDEX_NAME)
card_doc_ids = map(_card2docid, cards)
index.delete(card_doc_ids)
def query_cards(query_str, limit=QUERY_LIMIT, web_safe_cursor=None,
ids_only=False, user_key=None):
"""Return the search.SearchResults for a query.
ids_only is useful because the returned document IDs are url-safe
keys for models.Card entities.
"""
if web_safe_cursor:
cursor = search.Cursor(web_safe_string=web_safe_cursor)
else:
cursor = None
index = search.Index(name=CARD_INDEX_NAME)
query_processor = _QueryProcessor(
query_str,
name_field='user_nickname',
tag_field='tag',
private_field='private',
user_key_field='user_key',
query_options=search.QueryOptions(limit=limit, cursor=cursor,
ids_only=ids_only),
user_key=user_key)
search_results = index.search(query_processor.query())
# TODO(chris): should this return partially-instantiated
# models.Card instances instead of leaking implementation details
# like we do now?
return search_results
def _card2doc(card):
# TODO(chris): should we include all fields that would be needed
# for rendering a search results item to avoid entity lookup?
tag_fields = [search.AtomField(name='tag', value=tag) for tag in card.tags]
doc = search.Document(
doc_id=_card2docid(card),
fields=[
search.AtomField(name='doc_version', value=CARD_DOCUMENT_VERSION),
search.AtomField(name='user_key', value=card.user_key.urlsafe()),
# TODO(chris): is user_nickname always a direct-match
# shortname, e.g., @chris?
search.AtomField(name='user_nickname', value=card.user_nickname),
# TODO(chris): support HtmlField for richer cards?
search.TextField(name='front', value=card.front),
search.TextField(name='back', value=card.back),
search.TextField(name='info', value=card.info),
search.DateField(name='added', value=card.added),
search.DateField(name='modified', value=card.modified),
search.AtomField(name='source_url', value=card.source_url),
search.AtomField(name='private', value="1" if card.private else "0"),
] + tag_fields)
return doc
def _card2docid(card):
# We set the search.Document's ID to the entity key it mirrors.
return card.key.urlsafe()
def _sanitize_user_input(query_str):
# The search API puts special meaning on certain inputs and we
# don't want to expose the internal query language to users so
# we strictly restrict inputs. The rules are:
#
# Allowed characters for values are [a-zA-Z0-9._-].
# @name is removed and 'name' values returned as a list.
# #tag is removed and 'tag' values returned as a list.
terms, names, tags = [], [], []
for token in query_str.split():
# TODO(chris): allow international characters.
sane_token = re.sub(r'[^a-zA-Z0-9._-]+', '', token)
if sane_token:
if sane_token in ('AND', 'OK'):
continue # ignore special search keywords
elif token.startswith('@'):
names.append(sane_token)
elif token.startswith('#'):
tags.append(sane_token)
else:
terms.append(sane_token)
return terms, names, tags
class _QueryProcessor(object):
"""Simple queries, possibly with @name and #tag tokens.
name_field is the field @name tokens should apply to.
tag_field is the name of the field #tag tokens should apply to.
"""
def __init__(self, query_str,
name_field, tag_field, private_field, user_key_field,
query_options=None, user_key=None):
self.query_str = query_str
self.name_field = name_field
self.tag_field = tag_field
self.private_field = private_field
self.user_key_field = user_key_field
self.query_options = query_options
self.user_key = user_key
def _sanitize_user_input(self):
query_str = self.query_str[:MAX_QUERY_LEN]
return _sanitize_user_input(query_str)
def _build_query_string(self):
terms, names, tags = self._sanitize_user_input()
# Our simply query logic is to OR together all terms from the
# user, then AND in the name or tag filters (plus a privacy clause).
parts = []
if terms:
parts.append(' OR '.join(terms))
if names:
parts.append('%s: (%s)' % (self.name_field, ' OR '.join(names)))
if tags:
parts.append('%s: (%s)' % (self.tag_field, ' OR '.join(tags)))
# Don't return cards that other users have marked private...
privacy = '%s: 0' % self.private_field
if self.user_key:
# ... but always show the user their own cards in results.
privacy += ' OR %s: (%s)' % (self.user_key_field, self.user_key)
parts.append('(' + privacy + ')')
return ' AND '.join(parts)
def query(self):
query = search.Query(
query_string=self._build_query_string(),
options=self.query_options)
return query
| StarcoderdataPython |
3319109 | <filename>obj_import_shape.py
#!python
# import a ESRI shape to vulcan objects
# input_shp: path to the shape file (the auxiliary files must also exist)
# object_layer: (optional) attribute that will be used as object layer
# object_name: (optional) attribute that will be used as object name
# object_group: (optional) attribute that will be used as object group
# output_dgd: path to a new or existing dgd where the data will be saved
# v1.0 05/2019 paulo.ernesto
'''
usage: $0 input_shp*shp layer:input_shp name:input_shp group:input_shp feature:input_shp value:input_shp output_dgd*dgd.isis
'''
'''
Copyright 2019 Vale
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*** You can contribute to the main repository at: ***
https://github.com/pemn/vulcan_shape_export_import
---------------------------------
'''
import sys, os.path
import re
# import modules from a pyz (zip) file with same name as scripts
sys.path.append(os.path.splitext(sys.argv[0])[0] + '.pyz')
from _gui import usage_gui
def obj_import_shape(input_shp, object_layer, object_name, object_group, object_feature, object_value, output_dgd):
print("# obj_import_shape")
import vulcan
import shapefile
shapes = shapefile.Reader(input_shp)
dgd = None
if os.path.exists(output_dgd):
dgd = vulcan.dgd(output_dgd, 'w')
else:
dgd = vulcan.dgd(output_dgd, 'c')
layer_name = object_layer
layers = dict()
for item in shapes.shapeRecords():
point_type = int(not re.search('POINT', item.shape.shapeTypeName))
# object without a valid layer name will have this default layer
layer_name = '0'
fields = item.record.as_dict()
if object_layer in fields and fields[object_layer]:
layer_name = str(fields[object_layer])
elif object_layer:
layer_name = object_layer
p1 = len(item.shape.points)
# each object may have multiple parts
# create a object for each of these parts
for p in reversed(item.shape.parts):
coordinates = [tuple(_) + (0,0,point_type) for _ in item.shape.points[p:p1]]
# print("p", p, "p1", p1)
p1 = p
# continue
obj = vulcan.polyline(coordinates)
if object_name in fields:
obj.set_name(str(fields[object_name]))
if object_group in fields:
obj.set_group(str(fields[object_group]))
if object_feature in fields:
obj.set_feature(str(fields[object_feature]))
if object_value in fields:
obj.set_value(fields[object_value])
if layer_name not in layers:
layers[layer_name] = vulcan.layer(layer_name)
layers[layer_name].append(obj)
for layer_obj in layers.values():
dgd.save_layer(layer_obj)
print("finished")
main = obj_import_shape
if __name__=="__main__":
usage_gui(__doc__)
| StarcoderdataPython |
1650603 | from pyteal import *
from algosdk.future import transaction
from algosdk.v2client import algod
import os
import pathlib
from util import deploy, get_private_key_from_mnemonic
#if Resource: application_args = (asset_total, asset_unit_name, asset_name, asset_url, asset_metadata_hash)
# Resource must include the address of the apat field of their no_op txn
#if User: application_args = ()
# User must include the ASA ID in the apas field of their no_op txn
class LocalState:
SCHEMA: transaction.StateSchema = transaction.StateSchema(
num_uints=1, num_byte_slices=0)
class Variables:
ASSET: TealType.bytes = Bytes("asset")
class GlobalState:
SCHEMA: transaction.StateSchema = transaction.StateSchema(
num_uints=0, num_byte_slices=0)
def approval_program():
create_asa = Seq([
Assert(Txn.application_args.length() == Int(5)),
# Create an asset
InnerTxnBuilder.Begin(),
InnerTxnBuilder.SetFields({
TxnField.type_enum: TxnType.AssetConfig,
TxnField.config_asset_total: Btoi(Txn.application_args[0]),
TxnField.config_asset_decimals: Int(0),
TxnField.config_asset_unit_name: Txn.application_args[1],
TxnField.config_asset_name: Txn.application_args[2],
TxnField.config_asset_url: Txn.application_args[3],
TxnField.config_asset_metadata_hash: Txn.application_args[4],
TxnField.config_asset_manager: Global.current_application_address(),
TxnField.config_asset_reserve: Global.current_application_address(),
TxnField.config_asset_freeze: Global.current_application_address(),
TxnField.config_asset_clawback: Global.current_application_address()
}),
InnerTxnBuilder.Submit(),
App.localPut(
Txn.accounts[1], LocalState.Variables.ASSET, InnerTxn.created_asset_id()),
Return(InnerTxn.created_asset_id())
])
user_asa_info = App.localGetEx(
Txn.sender(), App.id(), LocalState.Variables.ASSET)
num_asa = AssetHolding.balance(
Global.current_application_address(), Txn.assets[0])
claim_asa = Seq([
user_asa_info,
If(user_asa_info.hasValue(), Seq([
num_asa,
InnerTxnBuilder.Begin(),
InnerTxnBuilder.SetFields({
TxnField.asset_sender: Global.current_application_address(),
TxnField.type_enum: TxnType.AssetTransfer,
TxnField.asset_receiver: Txn.sender(),
TxnField.asset_amount: num_asa.value(),
# Must be in the assets array sent as part of the application call
TxnField.xfer_asset: user_asa_info.value(),
}),
InnerTxnBuilder.Submit(),
Return(Int(1))
]), Err())
])
handle_noop = If(Global.creator_address() ==
Txn.sender(), create_asa, claim_asa)
# TODO: Can set a required fee for users to pay when they opt-in, could be used for one-time payments for a service
handle_optin = Seq([
Return(Int(1))
])
handle_closeout = Seq([
Return(Int(1))
])
handle_updateapp = If(Global.creator_address() ==
Txn.sender(), Return(Int(1)), Err())
handle_deleteapp = If(Global.creator_address() ==
Txn.sender(), Return(Int(1)), Err())
program = Cond(
[Txn.on_completion() == OnComplete.NoOp, handle_noop],
[Txn.on_completion() == OnComplete.OptIn, handle_optin],
[Txn.on_completion() == OnComplete.CloseOut, handle_closeout],
[Txn.on_completion() == OnComplete.UpdateApplication, handle_updateapp],
[Txn.on_completion() == OnComplete.DeleteApplication, handle_deleteapp]
)
return program
def clear_program():
program = Return(Int(1))
return program
def create(API_TOKEN, API_URL, MNEMONIC, upload=False):
# Compile to TEAL
with open(os.path.join(pathlib.Path(__file__).parent, 'blockin_local_approval.teal'), 'w') as f:
approval_teal = compileTeal(
approval_program(), Mode.Application, version=5)
f.write(approval_teal)
with open(os.path.join(pathlib.Path(__file__).parent, 'blockin_local_clear.teal'), 'w') as f:
clear_teal = compileTeal(clear_program(), Mode.Application, version=5)
f.write(clear_teal)
if upload:
# initialize an algodClient
algod_client = algod.AlgodClient(API_TOKEN, API_URL, {
"x-api-key": API_TOKEN})
# Create & Deploy the Application
return deploy(algod_client, get_private_key_from_mnemonic(MNEMONIC),
approval_teal, clear_teal, GlobalState.SCHEMA, LocalState.SCHEMA) | StarcoderdataPython |
1721747 | <filename>tensorflow_transform/saved/saved_transform_io_v2.py
# Copyright 2020 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions to save and load from SavedModels in TF 2.x."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
# GOOGLE-INITIALIZATION
import six
import tensorflow as tf
from tensorflow_transform.saved import constants
from tensorflow_transform.saved import saved_model_loader
from tensorflow_transform.saved import saved_transform_io
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.framework import composite_tensor
from tensorflow.python.util import object_identity
# pylint: enable=g-direct-tensorflow-import
class SavedModelLoader(object):
"""Handles a SavedModel exported using TF 1.x APIs in TF 2.x."""
def __init__(self, saved_model_dir):
"""Init method for SavedModelLoader.
Args:
saved_model_dir: A SavedModel directory providing a transform graph. The
MetaGraphDef and signature are selected from the SavedModel using keys
defined in `../constants.py` ('transform' and 'transform_signature',
respectively).
"""
# TODO(b/160294509): Stop using tf.compat.v2 when TF1.15 support is dropped.
self._imported = tf.compat.v2.saved_model.load(saved_model_dir)
self.load_v2_in_compat = (
constants.TRANSFORM_SIGNATURE in self._imported.signatures)
if self.load_v2_in_compat:
self._wrapped = self._imported.signatures[constants.TRANSFORM_SIGNATURE]
self._func_graph = self._wrapped.graph
self._structured_inputs = self._get_input_signature_from_v1_saved_model(
saved_model_dir)
self._structured_outputs = self._wrapped.structured_outputs
else:
# TODO(b/160550490): Remove local import.
from tensorflow_transform import tf2_utils # pylint: disable=g-import-not-at-top
# Since `input_signature` was specified when exporting the tf function to
# transform_fn is now a ConcreteFunction, but was a tf.function. We need
# to handle both to maintain backward compatiblity. If it's a tf.function,
# since `input_signature` was specified when exporting the tf function to
# `SavedModel`, there should be exactly one concrete function present on
# loading the `SavedModel`.
if hasattr(self._imported.transform_fn, 'concrete_functions'):
concrete_functions = self._imported.transform_fn.concrete_functions
assert len(concrete_functions) == 1, concrete_functions
self._wrapped = concrete_functions[0]
else:
self._wrapped = self._imported.transform_fn
self._func_graph = self._wrapped.graph
self._structured_inputs = (
tf2_utils.get_structured_inputs_from_func_graph(self._func_graph))
self._structured_outputs = tf.nest.pack_sequence_as(
self._func_graph.structured_outputs,
self._func_graph.outputs,
expand_composites=True)
self._output_to_inputs_map = (
self._get_output_to_inputs_map(self._structured_outputs))
saved_transform_io._maybe_register_addon_ops() # pylint: disable=protected-access
def _get_input_signature_from_v1_saved_model(self, saved_model_dir):
"""Get structured inputs for a TF1 compat SavedModel."""
saved_model = saved_model_loader.parse_saved_model(saved_model_dir)
meta_graph_def = saved_model_loader.choose_meta_graph_def_and_raise(
saved_model)
signature = meta_graph_def.signature_def[constants.TRANSFORM_SIGNATURE]
return signature.inputs
def _get_output_to_inputs_map(self, output_signature):
"""Get all graph inputs that the tensors in output_signature depend on."""
# TODO(b/160550490): Remove local import.
from tensorflow_transform import graph_tools # pylint: disable=g-import-not-at-top
result = {}
for name, output in six.iteritems(output_signature):
components = self._get_component_tensors(output)
sinks = [self._as_operation(component) for component in components]
# Ignore control dependencies when walking the graph as we only care about
# which user defined inputs this output depends on.
result[name] = graph_tools.retrieve_sources(
sinks, ignore_control_dependencies=True)
return result
def _as_operation(self, op_or_tensor):
if isinstance(op_or_tensor, tf.Tensor):
return op_or_tensor.op
return op_or_tensor
def _get_component_tensors(self, tensor):
"""Get all component tensors.
Args:
tensor: A `Tensor` or `CompositeTensor`.
Returns:
All `Tensor` components of `tensor`.
Raises:
ValueError if supplied `tensor` parameter is neither a `Tensor` nor a
`CompositeTensor`.
"""
if isinstance(tensor, tf.Tensor):
return [tensor]
elif isinstance(tensor, composite_tensor.CompositeTensor):
return tf.nest.flatten(tensor, expand_composites=True)
else:
raise ValueError(
'Unsupported tensor. Arg `tensor` is neither a `Tensor` nor a '
'`CompositeTensor`: {}.'.format(tensor))
def _get_fetches(self, feeds):
result = {}
for name, output in six.iteritems(self._func_graph.structured_outputs):
extra_sources = self._output_to_inputs_map[name].difference(feeds)
# If output does not depend on an input placeholder that is not being fed,
# add it to fetches.
if not extra_sources.difference(self._func_graph.internal_captures):
result[name] = output
return result
def _apply_v1_transform_model_in_v2(self, logical_input_map):
"""Applies a V1 transform graph to `Tensor`s.
This method applies the transformation graph as a pruned function to the
`logical_input_map`.
It prunes the function loaded from the SavedModel to return only outputs
that can be computed from the keys provided in `logical_input_map`.
Args:
logical_input_map: a dict of logical name to Tensor. The logical names
must be a subset of those in the input signature of the transform graph,
and the corresponding Tensors must have the expected types and shapes.
Returns:
A dict of logical name to Tensor, as provided by the output signature of
the transform graph.
"""
input_map = (
saved_transform_io._expand_input_map( # pylint: disable=protected-access
logical_input_map, self._structured_inputs))
feeds = []
pruned_input_args = []
for name in six.iterkeys(input_map):
tensor = self._func_graph.get_tensor_by_name(name)
try:
tensor.shape.assert_is_compatible_with(input_map[name].shape)
except ValueError as e:
raise ValueError('{}: {}'.format(name, e))
feeds.append(tensor)
pruned_input_args.append(input_map[name])
fetches = self._get_fetches(feeds)
pruned = self._wrapped.prune(feeds, fetches)
result = pruned(*pruned_input_args)
# TODO(b/163329414): Remove set_shape when calling pruned no longer produces
# tensors with unknown shapes.
for name, output in fetches.items():
if hasattr(result[name], 'set_shape'):
result[name].set_shape(output.shape)
return result
def _apply_v2_transform_model(self, logical_input_map):
"""Applies a V2 transform graph to `Tensor`s.
This method applies the transformation graph to the `logical_input_map` to
return only outputs that can be computed from the keys provided in
`logical_input_map`.
Args:
logical_input_map: a dict of logical name to Tensor. The logical names
must be a subset of those in the input signature of the transform graph,
and the corresponding Tensors must have the expected types and shapes.
Returns:
A dict of logical name to Tensor, as provided by the output signature of
the transform graph.
"""
# TODO(b/160550490): Remove local import.
from tensorflow_transform import tf2_utils # pylint: disable=g-import-not-at-top
feeds = object_identity.ObjectIdentitySet(self._func_graph.inputs)
unfed_input_keys = (
set(six.iterkeys(self._structured_inputs)) -
set(six.iterkeys(logical_input_map)))
for input_key in unfed_input_keys:
unfed_input_components = self._get_component_tensors(
self._structured_inputs[input_key])
feeds = feeds.difference(unfed_input_components)
modified_inputs = copy.copy(logical_input_map)
if unfed_input_keys:
batch_size = 1
if logical_input_map:
an_input = next(six.itervalues(logical_input_map))
if tf.shape(an_input)[0] is not None:
batch_size = tf.shape(an_input)[0]
missing_inputs = (
tf2_utils.supply_missing_inputs(self._structured_inputs, batch_size,
unfed_input_keys))
modified_inputs.update(missing_inputs)
fetches = self._get_fetches(feeds)
transformed_features = self._wrapped(modified_inputs)
return {key: transformed_features[key] for key in fetches.keys()}
def apply_transform_model(self, logical_input_map):
"""Applies a transform graph to `Tensor`s.
Args:
logical_input_map: a dict of logical name to Tensor. The logical names
must be a subset of those in the input signature of the transform graph,
and the corresponding Tensors must have the expected types and shapes.
Returns:
A dict of logical name to Tensor, as provided by the output signature of
the transform graph.
"""
unexpected_inputs = (
set(six.iterkeys(logical_input_map)) -
set(six.iterkeys(self._structured_inputs)))
if unexpected_inputs:
raise ValueError(
'Unexpected inputs to transform: {}'.format(unexpected_inputs))
if self.load_v2_in_compat:
return self._apply_v1_transform_model_in_v2(logical_input_map)
else:
return self._apply_v2_transform_model(logical_input_map)
def get_dependent_input_output_keys(self, input_keys, exclude_output_keys):
"""Determine inputs needed to get outputs excluding exclude_output_keys.
Args:
input_keys: A collection of all input keys available to supply to the
SavedModel.
exclude_output_keys: A collection of output keys returned by the
SavedModel that should be excluded.
Returns:
A pair of:
required_input_keys: A subset of the input features to this SavedModel
that are required to compute the set of output features excluding
`exclude_output_keys`. It is sorted to be deterministic.
output_keys: The set of output features excluding `exclude_output_keys`.
It is sorted to be deterministic.
"""
# Assert inputs being fed and outputs being excluded are part of the
# SavedModel.
if set(input_keys).difference(self._structured_inputs.keys()):
raise ValueError(
'Input tensor names contained tensors not in graph: {}'.format(
input_keys))
if set(exclude_output_keys).difference(self._structured_outputs.keys()):
raise ValueError(
'Excluded outputs contained keys not in graph: {}'.format(
exclude_output_keys))
output_keys = (
set(self._structured_outputs.keys()).difference(exclude_output_keys))
# Get all the input tensors that are required to evaluate output_keys.
required_inputs = object_identity.ObjectIdentitySet()
for key in output_keys:
required_inputs.update(self._output_to_inputs_map[key])
# Get all the input feature names that have atleast one component tensor in
# required_inputs.
required_input_keys = []
for key, tensor in six.iteritems(self._structured_inputs):
if any(x in required_inputs for x in self._get_component_tensors(tensor)):
required_input_keys.append(key)
return sorted(required_input_keys), sorted(output_keys)
| StarcoderdataPython |
3250009 | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for rewards."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import mock
import tensorflow.compat.v1 as tf
from neural_guided_symbolic_regression.mcts import rewards
from neural_guided_symbolic_regression.mcts import states
class RewardBaseTest(tf.test.TestCase):
def test_set_post_transformer_not_callable(self):
with self.assertRaisesRegexp(TypeError,
'post_transformer is not callable'):
reward = rewards.RewardBase()
reward.set_post_transformer(post_transformer=42)
def test_set_default_value(self):
reward = rewards.RewardBase()
# Default None.
self.assertIsNone(reward._default_value)
# The default value can be changed.
reward.set_default_value(42)
self.assertAlmostEqual(reward._default_value, 42.)
# The default value can be changed multiple times.
reward.set_default_value(-1.5)
self.assertAlmostEqual(reward._default_value, -1.5)
def test_evaluate_not_implemented(self):
state = states.ProductionRulesState(production_rules_sequence=[])
reward = rewards.RewardBase()
with self.assertRaisesRegexp(NotImplementedError,
'Must be implemented by subclass'):
reward.evaluate(state)
def test_evaluate_not_terminal_without_default_value(self):
not_terminal_state = states.ProductionRulesState(
production_rules_sequence=[])
not_terminal_state.is_terminal = mock.MagicMock(return_value=False)
reward = rewards.RewardBase(allow_nonterminal=False, default_value=None)
with self.assertRaisesRegexp(ValueError,
'allow_nonterminal is False and '
'default_value is None, but state is not '
'terminal'):
reward.evaluate(not_terminal_state)
# ValueError will not be raised if default value is set.
reward.set_default_value(42)
self.assertAlmostEqual(reward.evaluate(not_terminal_state), 42.)
def test_evaluate_not_terminal_with_default_value(self):
not_terminal_state = states.ProductionRulesState(
production_rules_sequence=[])
not_terminal_state.is_terminal = mock.MagicMock(return_value=False)
reward = rewards.RewardBase(allow_nonterminal=False, default_value=42)
self.assertAlmostEqual(reward.evaluate(not_terminal_state), 42)
if __name__ == '__main__':
tf.test.main()
| StarcoderdataPython |
3378484 | import zmq
import socket
data = []
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def tcp_stream():
global sock, data
dataTemp = ""
while '\n' not in dataTemp:
dataTemp += str(sock.recv(2048),"utf-8")
dataTemp = dataTemp.strip("\r\n").split(";")
#print(dataTemp)
data.append(dataTemp)
#writefile(dataTemp)
return dataTemp
def test(): # ZeroMq kapcsolat tesztelése
return "It works!"
def zeromq():
global context, socket
# Wait for request from client
message = socket.recv()
print("Received request: %s" % message)
try:
r = eval(message)
print(r)
socket.send(bytearray(str(r), 'utf-8')) # send returned value as bytearry to client
except NameError:
socket.send(b"Unknown command")
except Exception as error:
print(error)
socket.send(b"Unknown error")
def tcp_conn(IPcim):
global sock
# TCP Setup
sock.connect((IPcim,234))
def main():
return ";".join(tcp_stream())
# ZeroMq configuráció
context = zmq.Context()
socket = context.socket(zmq.REP)
socket.bind("tcp://127.0.0.1:5555")
tcp_conn("192.168.137.119")
while True:
zeromq() | StarcoderdataPython |
20162 | ##############################################################################
# Copyright 2009, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the authors nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT
##############################################################################
'''
Import all essential functions and constants to re-export them here for easy
access.
This module contains also various pre-defined ISO 8601 format strings.
'''
from .isodates import parse_date, date_isoformat
from .isotime import parse_time, time_isoformat
from .isodatetime import parse_datetime, datetime_isoformat
from .isoduration import parse_duration, duration_isoformat, Duration
from .isoerror import ISO8601Error
from .isotzinfo import parse_tzinfo, tz_isoformat
from .tzinfo import UTC, FixedOffset, LOCAL
from .duration import Duration
from .isostrf import strftime
from .isostrf import DATE_BAS_COMPLETE, DATE_BAS_ORD_COMPLETE
from .isostrf import DATE_BAS_WEEK, DATE_BAS_WEEK_COMPLETE
from .isostrf import DATE_CENTURY, DATE_EXT_COMPLETE
from .isostrf import DATE_EXT_ORD_COMPLETE, DATE_EXT_WEEK
from .isostrf import DATE_EXT_WEEK_COMPLETE, DATE_MONTH, DATE_YEAR
from .isostrf import TIME_BAS_COMPLETE, TIME_BAS_MINUTE
from .isostrf import TIME_EXT_COMPLETE, TIME_EXT_MINUTE
from .isostrf import TIME_HOUR
from .isostrf import TZ_BAS, TZ_EXT, TZ_HOUR
from .isostrf import DT_BAS_COMPLETE, DT_EXT_COMPLETE
from .isostrf import DT_BAS_ORD_COMPLETE, DT_EXT_ORD_COMPLETE
from .isostrf import DT_BAS_WEEK_COMPLETE, DT_EXT_WEEK_COMPLETE
from .isostrf import D_DEFAULT, D_WEEK, D_ALT_EXT, D_ALT_BAS
from .isostrf import D_ALT_BAS_ORD, D_ALT_EXT_ORD
| StarcoderdataPython |
3376443 | <reponame>progressivis/ipytablewidgets
# Initial software, <NAME>, <NAME>, Copyright (c) Inria, BSD 3-Clause License, 2021
import os
from os.path import join as pjoin
import json
from .._frontend import npm_module_name, npm_package_version
here = os.path.dirname(os.path.abspath(__file__))
def test_frontend():
package_json_file = pjoin(here, '..', '..', 'js', 'package.json')
with open(package_json_file) as pjf:
package_json = json.load(pjf)
print(package_json)
assert package_json['name'] == npm_module_name
assert f"^{package_json['version']}" == npm_package_version
| StarcoderdataPython |
155779 | # -*- coding: utf-8 -*-
# Copyright (c) 2016-2022 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
import pytest
import gc
import copy
import numpy as np
import pandas as pd
try:
import geopandas as gpd
import shapely.geometry
GEOPANDAS_INSTALLED = True
except ImportError:
GEOPANDAS_INSTALLED = False
from pandapower.auxiliary import get_indices
import pandapower as pp
import pandapower.networks
import pandapower.control
import pandapower.timeseries
class MemoryLeakDemo:
"""
Dummy class to demonstrate memory leaks
"""
def __init__(self, net):
self.net = net
# it is interesting, that if "self" is just an attribute of net, there are no problems
# if "self" is saved in a DataFrame, it causes a memory leak
net['memory_leak_demo'] = pd.DataFrame(data=[self], columns=['object'])
class MemoryLeakDemoDF:
"""
Dummy class to demonstrate memory leaks
"""
def __init__(self, df):
self.df = df
# if "self" is saved in a DataFrame, it causes a memory leak
df.loc[0, 'object'] = self
class MemoryLeakDemoDict:
"""
Dummy class to demonstrate memory leaks
"""
def __init__(self, d):
self.d = d
d['object'] = self
def test_get_indices():
a = [i + 100 for i in range(10)]
lookup = {idx: pos for pos, idx in enumerate(a)}
lookup["before_fuse"] = a
# First without fused buses no magic here
# after fuse
result = get_indices([102, 107], lookup, fused_indices=True)
assert np.array_equal(result, [2, 7])
# before fuse
result = get_indices([2, 7], lookup, fused_indices=False)
assert np.array_equal(result, [102, 107])
# Same setup EXCEPT we have fused buses now (bus 102 and 107 are fused)
lookup[107] = lookup[102]
# after fuse
result = get_indices([102, 107], lookup, fused_indices=True)
assert np.array_equal(result, [2, 2])
# before fuse
result = get_indices([2, 7], lookup, fused_indices=False)
assert np.array_equal(result, [102, 107])
def test_net_deepcopy():
net = pp.networks.example_simple()
net.line_geodata.loc[0, 'coords'] = [[0, 1], [1, 2]]
net.bus_geodata.loc[0, ['x', 'y']] = 0, 1
pp.control.ContinuousTapControl(net, tid=0, vm_set_pu=1)
ds = pp.timeseries.DFData(pd.DataFrame(data=[[0, 1, 2], [3, 4, 5]]))
pp.control.ConstControl(net, element='load', variable='p_mw', element_index=[0], profile_name=[0], data_source=ds)
net1 = copy.deepcopy(net)
assert not net1.controller.object.at[1].data_source is ds
assert not net1.controller.object.at[1].data_source.df is ds.df
assert not net1.line_geodata.coords.at[0] is net.line_geodata.coords.at[0]
if GEOPANDAS_INSTALLED:
for tab in ('bus_geodata', 'line_geodata'):
if tab == 'bus_geodata':
geometry = net[tab].apply(lambda x: shapely.geometry.Point(x.x, x.y), axis=1)
else:
geometry = net[tab].coords.apply(shapely.geometry.LineString)
net[tab] = gpd.GeoDataFrame(net[tab], geometry=geometry)
net1 = net.deepcopy()
assert isinstance(net1.line_geodata, gpd.GeoDataFrame)
assert isinstance(net1.bus_geodata, gpd.GeoDataFrame)
assert isinstance(net1.bus_geodata.geometry.iat[0], shapely.geometry.Point)
assert isinstance(net1.line_geodata.geometry.iat[0], shapely.geometry.LineString)
def test_memory_leaks():
net = pp.networks.example_simple()
# first, test to check that there are no memory leaks
types_dict1 = pp.toolbox.get_gc_objects_dict()
num = 3
for _ in range(num):
net_copy = copy.deepcopy(net)
# In each net copy it has only one controller
pp.control.ContinuousTapControl(net_copy, tid=0, vm_set_pu=1)
gc.collect()
types_dict2 = pp.toolbox.get_gc_objects_dict()
assert types_dict2[pandapower.auxiliary.pandapowerNet] - types_dict1[pandapower.auxiliary.pandapowerNet] == 1
assert types_dict2[pandapower.control.ContinuousTapControl] - types_dict1.get(
pandapower.control.ContinuousTapControl, 0) == 1
def test_memory_leaks_demo():
net = pp.networks.example_simple()
# first, test to check that there are no memory leaks
types_dict1 = pp.toolbox.get_gc_objects_dict()
# now, demonstrate how a memory leak occurs
# emulates the earlier behavior before the fix with weakref
num = 3
for _ in range(num):
net_copy = copy.deepcopy(net)
MemoryLeakDemo(net_copy)
# demonstrate how the garbage collector doesn't remove the objects even if called explicitly
gc.collect()
types_dict2 = pp.toolbox.get_gc_objects_dict()
assert types_dict2[pandapower.auxiliary.pandapowerNet] - types_dict1[pandapower.auxiliary.pandapowerNet] == num
assert types_dict2[MemoryLeakDemo] - types_dict1.get(MemoryLeakDemo, 0) == num
def test_memory_leaks_no_copy():
types_dict0 = pp.toolbox.get_gc_objects_dict()
num = 3
for _ in range(num):
net = pp.create_empty_network()
# In each net copy it has only one controller
pp.control.ConstControl(net, 'sgen', 'p_mw', 0)
gc.collect()
types_dict1 = pp.toolbox.get_gc_objects_dict()
assert types_dict1[pandapower.control.ConstControl] - types_dict0.get(pandapower.control.ConstControl, 0) == 1
assert types_dict1[pandapower.auxiliary.pandapowerNet] - types_dict0.get(pandapower.auxiliary.pandapowerNet, 0) <= 1
def test_memory_leak_no_copy_demo():
types_dict1 = pp.toolbox.get_gc_objects_dict()
# now, demonstrate how a memory leak occurs
# emulates the earlier behavior before the fix with weakref
num = 3
for _ in range(num):
net = pp.networks.example_simple()
MemoryLeakDemo(net)
# demonstrate how the garbage collector doesn't remove the objects even if called explicitly
gc.collect()
types_dict2 = pp.toolbox.get_gc_objects_dict()
assert types_dict2[pandapower.auxiliary.pandapowerNet] - \
types_dict1.get(pandapower.auxiliary.pandapowerNet, 0) >= num-1
assert types_dict2[MemoryLeakDemo] - types_dict1.get(MemoryLeakDemo, 0) == num
def test_memory_leak_df():
types_dict1 = pp.toolbox.get_gc_objects_dict()
num = 3
for _ in range(num):
df = pd.DataFrame()
MemoryLeakDemoDF(df)
gc.collect()
types_dict2 = pp.toolbox.get_gc_objects_dict()
assert types_dict2[MemoryLeakDemoDF] - types_dict1.get(MemoryLeakDemoDF, 0) == num
def test_memory_leak_dict():
types_dict1 = pp.toolbox.get_gc_objects_dict()
num = 3
for _ in range(num):
d = dict()
MemoryLeakDemoDict(d)
gc.collect()
types_dict2 = pp.toolbox.get_gc_objects_dict()
assert types_dict2[MemoryLeakDemoDict] - types_dict1.get(MemoryLeakDemoDict, 0) <= 1
def test_create_trafo_characteristics():
net = pp.networks.example_multivoltage()
# test 2 modes, multiple index and single index, for 2w trafo
pp.control.create_trafo_characteristics(net, "trafo", [1], 'vk_percent', [[-2,-1,0,1,2]], [[2,3,4,5,6]])
assert "characteristic" in net
assert "tap_dependent_impedance" in net.trafo.columns
assert net.trafo.tap_dependent_impedance.dtype == np.bool_
assert net.trafo.tap_dependent_impedance.at[1]
assert not net.trafo.tap_dependent_impedance.at[0]
assert "vk_percent_characteristic" in net.trafo.columns
assert net.trafo.at[1, 'vk_percent_characteristic'] == 0
assert pd.isnull(net.trafo.at[0, 'vk_percent_characteristic'])
assert net.trafo.vk_percent_characteristic.dtype == pd.Int64Dtype()
assert "vkr_percent_characteristic" not in net.trafo.columns
pp.control.create_trafo_characteristics(net, "trafo", 1, 'vkr_percent', [-2,-1,0,1,2], [1.323,1.324,1.325,1.326,1.327])
assert len(net.characteristic) == 2
assert "vkr_percent_characteristic" in net.trafo.columns
assert net.trafo.at[1, 'vkr_percent_characteristic'] == 1
assert pd.isnull(net.trafo.at[0, 'vkr_percent_characteristic'])
assert net.trafo.vkr_percent_characteristic.dtype == pd.Int64Dtype()
assert isinstance(net.characteristic.object.at[0], pp.control.SplineCharacteristic)
assert isinstance(net.characteristic.object.at[1], pp.control.SplineCharacteristic)
# test for 3w trafo
pp.control.create_trafo_characteristics(net, "trafo3w", 0, 'vk_hv_percent', [-8, -4, 0, 4, 8], [8.1, 9.1, 10.1, 11.1, 12.1])
assert "tap_dependent_impedance" in net.trafo3w.columns
assert net.trafo3w.tap_dependent_impedance.dtype == np.bool_
assert net.trafo3w.tap_dependent_impedance.at[0]
assert "vk_hv_percent_characteristic" in net.trafo3w.columns
assert net.trafo3w.at[0, 'vk_hv_percent_characteristic'] == 2
assert net.trafo3w.vk_hv_percent_characteristic.dtype == pd.Int64Dtype()
assert "vkr_hv_percent_characteristic" not in net.trafo3w.columns
assert "vk_mv_percent_characteristic" not in net.trafo3w.columns
pp.control.create_trafo_characteristics(net, "trafo3w", 0, 'vk_mv_percent', [-8, -4, 0, 4, 8], [8.1, 9.1, 10.1, 11.1, 12.1])
assert net.trafo3w.tap_dependent_impedance.dtype == np.bool_
assert net.trafo3w.tap_dependent_impedance.at[0]
assert "vk_mv_percent_characteristic" in net.trafo3w.columns
assert net.trafo3w.at[0, 'vk_mv_percent_characteristic'] == 3
assert net.trafo3w.vk_hv_percent_characteristic.dtype == pd.Int64Dtype()
assert "vkr_mv_percent_characteristic" not in net.trafo3w.columns
assert "vk_lv_percent_characteristic" not in net.trafo3w.columns
assert "vkr_lv_percent_characteristic" not in net.trafo3w.columns
# this should be enough testing for adding columns
# now let's test if it raises errors
# invalid variable
with pytest.raises(UserWarning):
pp.control.create_trafo_characteristics(net, "trafo3w", 0, 'vk_percent',
[-8, -4, 0, 4, 8], [8.1, 9.1, 10.1, 11.1, 12.1])
# invalid shapes
with pytest.raises(UserWarning):
pp.control.create_trafo_characteristics(net, "trafo3w", 0, 'vk_hv_percent',
[-8, -4, 0, 4, 8], [8.1, 9.1, 10.1, 11.1])
with pytest.raises(UserWarning):
pp.control.create_trafo_characteristics(net, "trafo3w", [0], 'vk_hv_percent',
[-8, -4, 0, 4, 8], [8.1, 9.1, 10.1, 11.1, 12.1])
with pytest.raises(UserWarning):
pp.control.create_trafo_characteristics(net, "trafo3w", [0, 1], 'vk_hv_percent',
[[-8, -4, 0, 4, 8]], [[8.1, 9.1, 10.1, 11.1, 12.1]])
with pytest.raises(UserWarning):
pp.control.create_trafo_characteristics(net, "trafo3w", [0, 1], 'vk_hv_percent',
[[-8, -4, 0, 4, 8], [-8, -4, 0, 4, 8]],
[[8.1, 9.1, 10.1, 11.1, 12.1]])
if __name__ == '__main__':
pytest.main([__file__, "-x"])
| StarcoderdataPython |
3237619 | <reponame>techthiyanes/malaya-speech
from malaya_speech.utils import (
check_file,
load_graph,
generate_session,
nodes_session,
)
from malaya_speech.model.synthesis import (
Tacotron,
Fastspeech,
Fastpitch,
GlowTTS,
GlowTTS_MultiSpeaker
)
from malaya_speech.path import STATS_VOCODER
from malaya_speech import speaker_vector
import numpy as np
def load(model, module, inputs, outputs, normalizer, model_class, quantized=False, **kwargs,):
path = check_file(
file=model,
module=module,
keys={'model': 'model.pb', 'stats': STATS_VOCODER[model]},
quantized=quantized,
**kwargs,
)
g = load_graph(path['model'], **kwargs)
input_nodes, output_nodes = nodes_session(g, inputs, outputs)
stats = np.load(path['stats'])
sess = generate_session(graph=g, **kwargs)
return model_class(
input_nodes=input_nodes,
output_nodes=output_nodes,
normalizer=normalizer,
stats=stats,
sess=sess,
model=model,
name=module,
)
def tacotron_load(
model, module, normalizer, quantized=False, **kwargs,
):
inputs = ['Placeholder', 'Placeholder_1']
outputs = ['decoder_output', 'post_mel_outputs', 'alignment_histories']
return load(model=model,
module=module,
inputs=inputs,
outputs=outputs,
normalizer=normalizer,
model_class=Tacotron,
quantized=quantized,
**kwargs)
def fastspeech_load(
model, module, normalizer, quantized=False, **kwargs,
):
inputs = ['Placeholder', 'speed_ratios', 'f0_ratios', 'energy_ratios']
outputs = ['decoder_output', 'post_mel_outputs']
return load(model=model,
module=module,
inputs=inputs,
outputs=outputs,
normalizer=normalizer,
model_class=Fastspeech,
quantized=quantized,
**kwargs)
def fastpitch_load(
model, module, normalizer, quantized=False, **kwargs,
):
inputs = ['Placeholder', 'speed_ratios', 'pitch_ratios', 'pitch_addition']
outputs = ['decoder_output', 'post_mel_outputs', 'pitch_outputs']
return load(model=model,
module=module,
inputs=inputs,
outputs=outputs,
normalizer=normalizer,
model_class=Fastpitch,
quantized=quantized,
**kwargs)
def glowtts_load(
model, module, normalizer, quantized=False, **kwargs,
):
if model == 'female-singlish':
stats = f'{model}-v1'
else:
stats = model
path = check_file(
file=model,
module=module,
keys={'model': 'model.pb', 'stats': STATS_VOCODER.get(stats, 'male')},
quantized=quantized,
**kwargs,
)
inputs = ['input_ids', 'lens', 'temperature', 'length_ratio']
if model == 'multispeaker':
inputs = inputs + ['speakers', 'speakers_right']
g = load_graph(path['model'], glowtts_multispeaker_graph=True, **kwargs)
speaker_model = speaker_vector.deep_model('vggvox-v2', **kwargs)
model_class = GlowTTS_MultiSpeaker
stats = None
else:
speaker_model = None
model_class = GlowTTS
g = load_graph(path['model'], glowtts_graph=True, **kwargs)
stats = np.load(path['stats'])
outputs = ['mel_output', 'alignment_histories']
input_nodes, output_nodes = nodes_session(g, inputs, outputs)
return model_class(
input_nodes=input_nodes,
output_nodes=output_nodes,
normalizer=normalizer,
speaker_vector=speaker_model,
stats=stats,
sess=generate_session(graph=g, **kwargs),
model=model,
name=module,
)
| StarcoderdataPython |
123872 | from biobb_common.tools import test_fixtures as fx
from biobb_analysis.ambertools.cpptraj_rgyr import cpptraj_rgyr
class TestCpptrajRgyrDocker():
def setUp(self):
fx.test_setup(self,'cpptraj_rgyr_docker')
def tearDown(self):
fx.test_teardown(self)
pass
def test_rgyr_docker(self):
cpptraj_rgyr(properties=self.properties, **self.paths)
assert fx.not_empty(self.paths['output_cpptraj_path'])
assert fx.equal(self.paths['output_cpptraj_path'], self.paths['ref_output_cpptraj_path'])
class TestCpptrajRgyrSingularity():
def setUp(self):
fx.test_setup(self,'cpptraj_rgyr_singularity')
def tearDown(self):
fx.test_teardown(self)
pass
def test_rgyr_singularity(self):
cpptraj_rgyr(properties=self.properties, **self.paths)
assert fx.not_empty(self.paths['output_cpptraj_path'])
assert fx.equal(self.paths['output_cpptraj_path'], self.paths['ref_output_cpptraj_path']) | StarcoderdataPython |
3245555 | <filename>creations/protype/simpleLayer.py<gh_stars>0
from copy import copy, deepcopy
class simpleLayer():
background = [0,0,0,0]
content = 'blank'
def getContent(self):
return self.content
def getBackgroud(self):
return self.background
def paint(self,painting):
self.content=painting
def setParent(self,p):
self.background[3]=p
def fillBackground(self,back):
self.background=back
def clone(self):
return copy(self)
def deep_clone(self):
return deepcopy(self)
# if __name__ == "__main__":
# dog_layer = simpleLayer()
# dog_layer.paint('dog')
# dog_layer.fillBackground([0,0,255,0])
# print ("Background:",dog_layer.getBackgroud())
# print ("Painting:",dog_layer.getContent())
# """
# output:
# Background: [0, 0, 255, 0]
# Painting: dog
# """
if __name__=="__main__":
dog_layer=simpleLayer()
dog_layer.paint("Dog")
dog_layer.fillBackground([0,0,255,0])
print ("Original Background:",dog_layer.getBackgroud())
print ("Original Painting:",dog_layer.getContent())
another_dog_layer=dog_layer.clone()
another_dog_layer.setParent(128)
another_dog_layer.paint("Puppy")
print ("Original Background:", dog_layer.getBackgroud())
print ("Original Painting:", dog_layer.getContent())
print ("Copy Background:", another_dog_layer.getBackgroud())
print ("Copy Painting:", another_dog_layer.getContent())
"""
output:
Original Background: [0, 0, 255, 0]
Original Painting: Dog
Original Background: [0, 0, 255, 128]
Original Painting: Dog
Copy Background: [0, 0, 255, 128]
Copy Painting: Puppy
""" | StarcoderdataPython |
95437 | <filename>Project 3/Q1-Q3.py
import numpy as np
### Q1
class convolution():
def __init__(self, n_filters, kernel_size, padding, stride, activation=None):
self.output_channel = n_filters
self.filter_size = kernel_size[0]
if padding == "VALID":
self.pad = 0
else:
self.pad = (self.filter_size -1)//2
self.stride = stride
self.activation = activation
def forward(self, X): #, figures=None):
self.input = X
self.batch_size = X.shape[0]
self.input_size = X.shape[1]
self.input_channel = X.shape[3]
self.filters = np.random.randn(self.output_channel, self.filter_size, self.filter_size, self.input_channel)
output_dim = int(((self.input.shape[1]-self.filter_size + 2 * self.pad)// self.stride)+1)
output_image = np.zeros((self.batch_size, output_dim, output_dim, self.output_channel))
X_padded = np.pad (X, ((0,0), (self.pad, self.pad), (self.pad, self.pad), (0,0)), 'constant', constant_values=0)
for r in range(self.batch_size):
for k in range(self.output_channel):
filter = self.filters[k]
for i in range(output_dim): #horizontal axis
for j in range(output_dim): # vertical axis
output_image[r,i,j,k] = np.multiply(filter, X_padded[r,i* self.stride + self.filter_size,
j * self.stride: j * self.stride + self.filter_size, :]).sum()
### Q2
# outputOfEachConvLayer = [(in_size + 2*padding - kernel_size) / stride] + 1
def X_flatten(self, X, window_h, window_w, window_c, out_h, out_w, stride=1, padding=0):
X_padded = np.pad (X, ((0,0), (padding, padding), (padding, padding), (0,0)), 'constant', constant_values=0)
windows = []
for i in range(out_h):
for j in range(out_w):
window = X_padded[:,i *stride:i * stride + window_h, j * stride: j*stride + window_w, :]
windows.append(window)
stacked = np.stack(windows)
return np.reshape(stacked, (-1, window_c * window_w * window_h))
def convolution(X, n_filters, kernel_size, padding, stride):
global conv_activation_layer
k_h = kernel_size[0]
k_w = kernel_size[1]
if padding == 'VALID':
pad = 0
else:
pad = 1
filters = []
for i in range(n_filters):
kernel = np.random.randn(k_h, k_w, X.shape[3])
filters.append(kernel)
kernel = np.reshape(filters, (k_h, k_w, X.shape[3], n_filters))
n,h,w,c = X.shape[0], X.shape[1], X.shape[2], X.shape[3]
filter_h, filter_w, filter_c, filter_n = kernel.shape[0], kernel.shape[1], kernel.shape[2], kernel.shape[3]
out_h = (h + 2 * pad - filter_h) // stride + 1
out_w = (w + 2 * pad - filter_w) // stride + 1
X_flat = model.X_flatten(X, filter_h, filter_w, filter_c, out_h, out_w, stride, pad)
W_flat = np.reshape(kernel, (filter_h * filter_w * filter_c, filter_n))
z = np.matmul(X_flat, W_flat)
z = np.transpose(np.reshape(z, (out_h, out_w, n, filter_n)), (2,0,1,3))
conv_activation_layer = relu.activation(z)
return conv_activation_layer
| StarcoderdataPython |
22209 | import random
TUPLE_SIZE = 4
DIGIT_BASE = 10
MAX_GUESS = DIGIT_BASE ** TUPLE_SIZE
def yield_all():
for i in xrange(DIGIT_BASE ** TUPLE_SIZE):
tup = tuple([int(x) for x in '%04d' % i])
assert len(tup) == TUPLE_SIZE
for l in tup:
if tup.count(l) != 1:
break
else:
yield OblioTuple(tup)
def weighted_dice_roll(weight_map, exclusions):
# Actually, this does an UNWEIGHTED dice roll. Never got around to doing weighted.
# Don't think it would matter much anyway.
new_map = {k: v for k, v in weight_map.iteritems() if k not in exclusions}
return new_map.keys()[random.randint(0, len(new_map) - 1)]
class OblioTuple(tuple):
@staticmethod
def get_random():
pile = range(0, DIGIT_BASE)
secret = []
for i in xrange(0, TUPLE_SIZE):
r = random.randint(0, len(pile) - 1)
secret.append(pile[r])
del pile[r]
# Assert that the tuple contains 4 distinct digits
assert len(list(set(secret))) == 4
return OblioTuple(secret)
| StarcoderdataPython |
1641830 | <gh_stars>1-10
"""Copyright 2018 <NAME> and The Netherlands Organisation for
Applied Scientific Research TNO.
Licensed under the MIT license.
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the conditions stated in the LICENSE file in the project root for
details.
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
"""
import unittest
import configsuite
from configsuite import MetaKeys as MK
from . import data
class TestDict(unittest.TestCase):
def test_valid_store_config(self):
raw_config = data.store.build_config()
store_schema = data.store.build_schema()
config_suite = configsuite.ConfigSuite(raw_config, store_schema)
self.assertTrue(config_suite.valid)
prices = [(elem.key, elem.value) for elem in config_suite.snapshot.prices]
self.assertEqual(
sorted(tuple(raw_config["prices"].items())), sorted(tuple(prices))
)
def test_invalid_schema_no_key(self):
raw_config = data.store.build_config()
store_schema = data.store.build_schema()
store_schema[MK.Content]["prices"][MK.Content].pop(MK.Key)
with self.assertRaises(KeyError):
configsuite.ConfigSuite(raw_config, store_schema)
def test_invalid_schema_no_value(self):
raw_config = data.store.build_config()
store_schema = data.store.build_schema()
store_schema[MK.Content]["prices"][MK.Content].pop(MK.Value)
with self.assertRaises(KeyError):
configsuite.ConfigSuite(raw_config, store_schema)
def test_invalid_schema_extra_spec(self):
raw_config = data.store.build_config()
store_schema = data.store.build_schema()
store_schema[MK.Content]["prices"][MK.Content]["monkey"] = "patch"
with self.assertRaises(KeyError):
configsuite.ConfigSuite(raw_config, store_schema)
def test_valid_store_config_faulty_key(self):
raw_config = data.store.build_config()
raw_config["prices"][123] = 1
store_schema = data.store.build_schema()
config_suite = configsuite.ConfigSuite(raw_config, store_schema)
self.assertFalse(config_suite.valid)
self.assertEqual(1, len(config_suite.errors))
err = config_suite.errors[0]
self.assertIsInstance(err, configsuite.InvalidTypeError)
def test_valid_store_config_faulty_value(self):
raw_config = data.store.build_config()
raw_config["prices"]["pig"] = "very expensive"
store_schema = data.store.build_schema()
config_suite = configsuite.ConfigSuite(raw_config, store_schema)
self.assertFalse(config_suite.valid)
self.assertEqual(1, len(config_suite.errors))
err = config_suite.errors[0]
self.assertIsInstance(err, configsuite.InvalidTypeError)
def test_valid_advanded_store_config(self):
raw_config = data.advanced_store.build_config()
store_schema = data.advanced_store.build_schema()
config_suite = configsuite.ConfigSuite(raw_config, store_schema)
self.assertTrue(config_suite.valid)
# Map all not listed values to None
raw_prices = [
(key, {key: value.get(key) for key in ("unit", "kilo", "gram")})
for (key, value) in raw_config["prices"].items()
]
# Extract and format prices from config
prices = [
(
elem.key,
{
"unit": elem.value.unit,
"kilo": elem.value.kilo,
"gram": elem.value.gram,
},
)
for elem in config_suite.snapshot.prices
]
self.assertEqual(sorted(raw_prices), sorted(prices))
def test_advanced_store_config_faulty_schema(self):
raw_config = data.advanced_store.build_config()
store_schema = data.advanced_store.build_schema()
store_schema[MK.Content]["prices"][MK.Content][MK.Value][MK.Content][
"kilo"
].pop(MK.Type)
with self.assertRaises(KeyError):
configsuite.ConfigSuite(raw_config, store_schema)
| StarcoderdataPython |
3246392 |
from bcc import BPF
prog = """
int hello(void *ctx) {
bpf_trace_printk("hello, world!\\n");
return 0;
}
"""
# load BPF program
b = BPF(text=prog)
b.attach_kprobe(event="sys_clone", fn_name="hello")
print("%-18s %-16s %-6s %s" % ("TIME(s)", "COMM", "PID", "MESSAGE"))
while 1:
try:
(task, pid, cpu, flags, ts, msg) = b.trace_fields()
except ValueError:
continue
print("%-18.9f %-16s %-6d %s" % (ts, task, pid, msg))
| StarcoderdataPython |
1797747 | from django import http
from django.core.paginator import Paginator, InvalidPage
from django.shortcuts import get_object_or_404, render_to_response
from django.template import RequestContext
from satchmo.configuration import config_value
from satchmo.product.models import Product
from satchmo.productratings.queries import highest_rated
from satchmo.product.queries import bestsellers
import logging
log = logging.getLogger('product.filterviews')
def display_bestratings(request, count=0, template='product/best_ratings.html'):
"""Display a list of the products with the best ratings in comments"""
if count is None:
count = config_value('SHOP', 'NUM_DISPLAY')
ctx = RequestContext(request, {
'products' : highest_rated(),
})
return render_to_response(template, ctx)
def display_bestsellers(request, count=0, template='product/best_sellers.html'):
"""Display a list of the products which have sold the most"""
if count == 0:
count = config_value('SHOP', 'NUM_PAGINATED')
ctx = RequestContext(request, {
'products' : bestsellers(count),
})
return render_to_response(template, ctx)
def display_recent(request, page=0, count=0, template='product/recently_added.html'):
"""Display a list of recently added products."""
if count == 0:
count = config_value('SHOP', 'NUM_PAGINATED')
if page == 0:
if request.method == 'GET':
page = request.GET.get('page', 1)
else:
page = 1
query = Product.objects.recent_by_site()
paginator = Paginator(query, count)
try:
currentpage = paginator.page(page)
except InvalidPage:
currentpage = None
ctx = RequestContext(request, {
'page' : currentpage,
'paginator' : paginator,
})
return render_to_response(template, ctx)
| StarcoderdataPython |
42423 | import numpy as np
import pandas as pd
from .scm import SCM
class DataGenerator:
def generate(self, scm: SCM, n_samples: int, seed: int):
pass
class SimpleDataGenerator(DataGenerator):
def generate(self, scm: SCM, n_samples: int, seed: int):
"""
Generates date according to the given Structural Causal Model
This Generator assumes that variables are normally distributed
The noise is distributed according to standard normal distribution
:param scm: instance of SCM
:param n_samples: number of samples to generate
:param seed: random seed
:return:
"""
np.random.seed(seed)
data = {}
for equation in scm.equations:
data[equation["output_variable"].name] = np.zeros(n_samples)
for input_variable, coeff in equation["input_variables"].items():
if input_variable.name not in data:
raise AttributeError(
f"No data generated for dependent variable {input_variable.name}"
)
data[equation["output_variable"].name] += (
data[input_variable.name] * coeff
)
mean = 0
std = 1.0
if isinstance(equation["output_variable"].config, dict):
mean = equation["output_variable"].config.get("mean", 0)
std = equation["output_variable"].config.get("std", 1.0)
data[equation["output_variable"].name] += np.random.normal(
loc=mean, scale=std, size=n_samples
)
if (
isinstance(equation["output_variable"].config, dict)
and "mask" in equation["output_variable"].config
):
out_val = data[equation["output_variable"].name]
out_val[out_val < equation["output_variable"].config["mask"]] = 0
out_val[out_val > 0] = 1
data[equation["output_variable"].name] = out_val
return pd.DataFrame.from_dict(data)
| StarcoderdataPython |
3310543 | from .x import *
print("init")
| StarcoderdataPython |
4835327 | import pytz
import uuid
from django.db import models
from django.urls import reverse
from django.utils import timezone
class Registrante(models.Model):
name = models.CharField(max_length=240)
legal_uid = models.CharField(max_length=90, db_index=True)
uid = models.UUIDField(default=uuid.uuid4, editable=False, unique=True)
zone = models.CharField(max_length=10, default='AR', help_text='Para identificar en el pais donde esta')
created = models.DateTimeField(null=True, blank=True)
changed = models.DateTimeField(null=True, blank=True)
object_created = models.DateTimeField(auto_now_add=True)
object_modified = models.DateTimeField(auto_now=True)
def get_absolute_url(self):
return reverse('registrante', kwargs={'uid': self.uid})
class Meta:
unique_together = (('legal_uid', 'zone'))
def get_zoned_date(self, field, zona):
""" put a datetime in the rigth timezone before move to string """
if field == 'created':
timefield = self.created
elif field == 'changed':
timefield = self.changed
else:
raise Exception('Bad field date')
return timezone.localtime(timefield, pytz.timezone(zona))
def __str__(self):
return f'{self.name} [{self.zone}-{self.legal_uid}]'
class TagForRegistrante(models.Model):
""" etiqueta para los registrantes """
nombre = models.CharField(max_length=190)
uid = models.UUIDField(default=uuid.uuid4, editable=False, unique=True)
object_created = models.DateTimeField(auto_now_add=True)
object_modified = models.DateTimeField(auto_now=True)
def __str__(self):
return self.nombre
def get_absolute_url(self):
return reverse('rubro', kwargs={'uid': self.uid})
class RegistranteTag(models.Model):
registrante = models.ForeignKey(Registrante, on_delete=models.CASCADE, related_name='tags')
uid = models.UUIDField(default=uuid.uuid4, editable=False, unique=True)
tag = models.ForeignKey(TagForRegistrante, on_delete=models.CASCADE, related_name='registrantes')
object_created = models.DateTimeField(auto_now_add=True)
object_modified = models.DateTimeField(auto_now=True)
def __str__(self):
return f'{self.tag}->{self.registrante}'
""" por ahora a mano desde el shell
from registrantes.models import *
r = Registrante.objects.get(legal_uid='50039229460')
tr = TagForRegistrante.objects.get(nombre='BigTech')
RegistranteTag.objects.create(registrante=r, tag=tr)
# one line shell
from registrantes.models import *;RegistranteTag.objects.create(registrante=Registrante.objects.get(legal_uid='50033280669'), tag=TagForRegistrante.objects.get(nombre='BancosAR'))
""" | StarcoderdataPython |
31220 | <reponame>tassotirap/data-science
#!/usr/bin/python
import sys
import csv
reader = csv.reader(sys.stdin, delimiter='\t')
writer = csv.writer(sys.stdout, delimiter='\t', quotechar='"', quoting=csv.QUOTE_ALL)
userInfo = None
for line in reader:
thisType = line[1]
if thisType == 'A':
userInfo = line
elif thisType == 'B':
if userInfo and userInfo[0] == line[0]:
new_line = []
new_line.extend(userInfo)
new_line.extend(line)
writer.writerow(new_line) | StarcoderdataPython |
157356 | <filename>chatette_qiu/units/intent/example.py<gh_stars>0
from chatette_qiu.units import Example
class IntentExample(Example):
def __init__(self, name, text=None, entities=None):# -> None:
super(IntentExample, self).__init__(text, entities)
self.name = name
@classmethod
def from_example(cls, name, ex):
return cls(name, ex.text, ex.entities)
# def __str__(self):
# return str(self.__dict__)
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.name+self.text+str(self.entities))
| StarcoderdataPython |
13321 | import time
import pytest
from flask import g
from flask import session
import paho.mqtt.client as paho
from SmartSleep.db import get_db
from flask import json
import runpy
msg_nr = 0
messages = [""]
broker = 'broker.emqx.io'
port = 1883
def update_contor():
global msg_nr
msg_nr += 1
def on_message(client, userdata, message):
received = json.loads(message.payload)
if "status" in received:
assert received['status'] == messages[msg_nr]
update_contor()
elif "db" in received:
assert received["db"] == messages[msg_nr]
update_contor()
def test_cooling_system(client, auth):
global msg_nr
msg_nr = 0
global messages
messages = ['16',
"Setting the temperature system level to 1.0", "New temperature system level set to 1.0",
'16',
"Setting the temperature system level to 2.0", "New temperature system level set to 2.0",
'16',
"Setting the temperature system level to 3.0", "New temperature system level set to 3.0",
'16',
"Setting the temperature system level to 4.0", "New temperature system level set to 4.0",
'19',
"Setting the temperature system level to 3.0", "New temperature system level set to 3.0",
'16',
"Setting the temperature system level to 4.0", "New temperature system level set to 4.0",
"18"
]
time.sleep(2)
client_mqtt = paho.Client("client-test-snoring")
client_mqtt.on_message = on_message
client_mqtt.connect(broker)
client_mqtt.loop_start()
client_mqtt.subscribe("SmartSleep/SoundSensor")
auth.login()
response = client.post(f"/config/start_to_sleep?sleep_now={True}")
assert response.status_code == 200
response = client.post("/config/temp?temperature=18")
assert response.status_code == 200
time.sleep(1.5)
response = client.post("/config/current_temp?sensor=16")
assert response.status_code == 200
time.sleep(1.5)
response = client.post("/config/current_temp?sensor=16")
assert response.status_code == 200
time.sleep(1.5)
response = client.post("/config/current_temp?sensor=16")
assert response.status_code == 200
time.sleep(1.5)
response = client.post("/config/current_temp?sensor=16")
assert response.status_code == 200
time.sleep(1.5)
response = client.post("/config/current_temp?sensor=19")
assert response.status_code == 200
time.sleep(1.5)
response = client.post("/config/current_temp?sensor=16")
assert response.status_code == 200
time.sleep(1.5)
response = client.post("/config/current_temp?sensor=18")
assert response.status_code == 200
time.sleep(1.5)
| StarcoderdataPython |
1670316 | """Actions for linking object code produced by compilation"""
load(":private/pkg_id.bzl", "pkg_id")
load(":private/set.bzl", "set")
load(":private/path_utils.bzl", "get_lib_name")
load("@bazel_skylib//lib:paths.bzl", "paths")
load(":private/packages.bzl", "expose_packages")
def _backup_path(target):
"""Return a path from the directory this is in to the Bazel root.
Args:
target: File
Returns:
A path of the form "../../.."
"""
n = len(target.dirname.split("/"))
return "/".join([".."] * n)
def _fix_linker_paths(hs, inp, out, external_libraries):
"""Postprocess a macOS binary to make shared library references relative.
On macOS, in order to simulate the linker "rpath" behavior and make the
binary load shared libraries from relative paths, (or dynamic libraries
load other libraries) we need to postprocess it with install_name_tool.
(This is what the Bazel-provided `cc_wrapper.sh` does for cc rules.)
For details: https://blogs.oracle.com/dipol/entry/dynamic_libraries_rpath_and_mac
Args:
hs: Haskell context.
inp: An input file.
out: An output file.
external_libraries: A list of C library dependencies to make relative.
"""
hs.actions.run_shell(
inputs = [inp],
outputs = [out],
mnemonic = "HaskellFixupLoaderPath",
progress_message = "Fixing install paths for {0}".format(out.basename),
command = " &&\n ".join(
[
"cp {} {}".format(inp.path, out.path),
"chmod +w {}".format(out.path),
] +
[
"/usr/bin/install_name_tool -change {} {} {}".format(
f.path,
paths.join("@loader_path", _backup_path(out), f.path),
out.path,
)
for f in external_libraries
],
),
)
def _create_objects_dir_manifest(hs, objects_dir, dynamic, with_profiling):
suffix = ".dynamic.manifest" if dynamic else ".static.manifest"
objects_dir_manifest = hs.actions.declare_file(
objects_dir.basename + suffix,
sibling = objects_dir,
)
if with_profiling:
ext = "p_o"
elif dynamic:
ext = "dyn_o"
else:
ext = "o"
hs.actions.run_shell(
inputs = [objects_dir],
outputs = [objects_dir_manifest],
command = """
find {dir} -name '*.{ext}' > {out}
""".format(
dir = objects_dir.path,
ext = ext,
out = objects_dir_manifest.path,
),
use_default_shell_env = True,
)
return objects_dir_manifest
def link_binary(
hs,
cc,
dep_info,
extra_srcs,
compiler_flags,
objects_dir,
dynamic,
with_profiling,
version):
"""Link Haskell binary from static object files.
Returns:
File: produced executable
"""
executable = hs.actions.declare_file(hs.name)
if not hs.toolchain.is_darwin:
compile_output = executable
else:
compile_output = hs.actions.declare_file(hs.name + ".temp")
_fix_linker_paths(
hs,
compile_output,
executable,
dep_info.external_libraries,
)
args = hs.actions.args()
args.add(["-optl" + f for f in cc.linker_flags])
if with_profiling:
args.add("-prof")
args.add(hs.toolchain.compiler_flags)
args.add(compiler_flags)
# By default, GHC will produce mostly-static binaries, i.e. in which all
# Haskell code is statically linked and foreign libraries and system
# dependencies are dynamically linked. If linkstatic is false, i.e. the user
# has requested fully dynamic linking, we must therefore add flags to make
# sure that GHC dynamically links Haskell code too. The one exception to
# this is when we are compiling for profiling, which currently does not play
# nicely with dynamic linking.
if dynamic:
if with_profiling:
print("WARNING: dynamic linking and profiling don't mix. Omitting -dynamic.\nSee https://ghc.haskell.org/trac/ghc/ticket/15394")
else:
args.add(["-pie", "-dynamic"])
# When compiling with `-threaded`, GHC needs to link against
# the pthread library when linking against static archives (.a).
# We assume it’s not a problem to pass it for other cases,
# so we just default to passing it.
args.add("-optl-pthread")
args.add(["-o", compile_output.path])
# De-duplicate optl calls while preserving ordering: we want last
# invocation of an object to remain last. That is `-optl foo -optl
# bar -optl foo` becomes `-optl bar -optl foo`. Do this by counting
# number of occurrences. That way we only build dict and add to args
# directly rather than doing multiple reversals with temporary
# lists.
args.add(expose_packages(
dep_info,
lib_info = None,
use_direct = False,
use_my_pkg_id = None,
custom_package_caches = None,
version = version,
))
_add_external_libraries(args, dep_info.external_libraries.values())
solibs = set.union(
set.from_list(dep_info.external_libraries),
dep_info.dynamic_libraries,
)
if hs.toolchain.is_darwin:
args.add(["-optl-Wl,-headerpad_max_install_names"])
# Suppress a warning that Clang prints due to GHC automatically passing
# "-pie" or "-no-pie" to the C compiler.
# This particular invocation of GHC is a little unusual; e.g., we're
# passing an empty archive so that GHC has some input files to work on
# during linking.
args.add([
"-optc-Wno-unused-command-line-argument",
"-optl-Wno-unused-command-line-argument",
])
# Nixpkgs commit 3513034208a introduces -liconv in NIX_LDFLAGS on
# Darwin. We don't currently handle NIX_LDFLAGS in any special
# way, so a hack is to simply do what NIX_LDFLAGS is telling us we
# should do always when using a toolchain from Nixpkgs.
# TODO remove this gross hack.
# TODO: enable dynamic linking of Haskell dependencies for macOS.
args.add("-liconv")
else:
for rpath in set.to_list(_infer_rpaths(executable, solibs)):
args.add(["-optl-Wl,-rpath," + rpath])
objects_dir_manifest = _create_objects_dir_manifest(
hs,
objects_dir,
dynamic = dynamic,
with_profiling = with_profiling,
)
hs.toolchain.actions.run_ghc(
hs,
inputs = depset(transitive = [
depset(extra_srcs),
set.to_depset(dep_info.package_caches),
set.to_depset(dep_info.dynamic_libraries),
depset(dep_info.static_libraries),
depset(dep_info.static_libraries_prof),
depset([objects_dir]),
depset(dep_info.external_libraries.values()),
depset(hs.extra_binaries),
]),
outputs = [compile_output],
mnemonic = "HaskellLinkBinary",
arguments = [args],
params_file = objects_dir_manifest,
)
return executable
def _add_external_libraries(args, libs):
"""Add options to `args` that allow us to link to `libs`.
Args:
args: Args object.
libs: list of external shared libraries.
"""
seen_libs = set.empty()
for lib in libs:
lib_name = get_lib_name(lib)
if not set.is_member(seen_libs, lib_name):
set.mutable_insert(seen_libs, lib_name)
args.add([
"-l{0}".format(lib_name),
"-L{0}".format(paths.dirname(lib.path)),
])
def _infer_rpaths(target, solibs):
"""Return set of RPATH values to be added to target so it can find all
solibs.
Args:
target: File, executable or library we're linking.
solibs: A set of Files, shared objects that the target needs.
Returns:
Set of strings: rpaths to add to target.
"""
r = set.empty()
for solib in set.to_list(solibs):
rpath = paths.normalize(
paths.join(
_backup_path(target),
solib.dirname,
),
)
set.mutable_insert(r, "$ORIGIN/" + rpath)
return r
def _so_extension(hs):
"""Returns the extension for shared libraries.
Args:
ctx: Rule context.
Returns:
string of extension.
"""
return "dylib" if hs.toolchain.is_darwin else "so"
def link_library_static(hs, cc, dep_info, objects_dir, my_pkg_id, with_profiling):
"""Link a static library for the package using given object files.
Returns:
File: Produced static library.
"""
static_library = hs.actions.declare_file(
"lib{0}.a".format(pkg_id.library_name(hs, my_pkg_id, prof_suffix = with_profiling)),
)
objects_dir_manifest = _create_objects_dir_manifest(
hs,
objects_dir,
dynamic = False,
with_profiling = with_profiling,
)
args = hs.actions.args()
inputs = ([objects_dir, objects_dir_manifest, hs.tools.ar] +
hs.tools_runfiles.ar + hs.extra_binaries)
if hs.toolchain.is_darwin:
# On Darwin, ar doesn't support params files.
args.add([
static_library,
objects_dir_manifest.path,
])
# TODO Get ar location from the CC toolchain. This is
# complicated by the fact that the CC toolchain does not
# always use ar, and libtool has an entirely different CLI.
# See https://github.com/bazelbuild/bazel/issues/5127
hs.actions.run_shell(
inputs = inputs,
outputs = [static_library],
mnemonic = "HaskellLinkStaticLibrary",
command = "{ar} qc $1 $(< $2)".format(ar = hs.tools.ar.path),
arguments = [args],
# Use the default macosx toolchain
env = {"SDKROOT": "macosx"},
)
else:
args.add([
"qc",
static_library,
"@" + objects_dir_manifest.path,
])
hs.actions.run(
inputs = inputs,
outputs = [static_library],
mnemonic = "HaskellLinkStaticLibrary",
executable = hs.tools.ar,
arguments = [args],
)
return static_library
def link_library_dynamic(hs, cc, dep_info, extra_srcs, objects_dir, my_pkg_id):
"""Link a dynamic library for the package using given object files.
Returns:
File: Produced dynamic library.
"""
dynamic_library = hs.actions.declare_file(
"lib{0}-ghc{1}.{2}".format(
pkg_id.library_name(hs, my_pkg_id),
hs.toolchain.version,
_so_extension(hs),
),
)
args = hs.actions.args()
args.add(["-optl" + f for f in cc.linker_flags])
args.add(["-shared", "-dynamic"])
# Work around macOS linker limits. This fix has landed in GHC HEAD, but is
# not yet in a release; plus, we still want to support older versions of
# GHC. For details, see: https://phabricator.haskell.org/D4714
if hs.toolchain.is_darwin:
args.add(["-optl-Wl,-dead_strip_dylibs"])
args.add(expose_packages(
dep_info,
lib_info = None,
use_direct = False,
use_my_pkg_id = None,
custom_package_caches = None,
version = my_pkg_id.version if my_pkg_id else None,
))
_add_external_libraries(args, dep_info.external_libraries.values())
solibs = set.union(
set.from_list(dep_info.external_libraries),
dep_info.dynamic_libraries,
)
if hs.toolchain.is_darwin:
dynamic_library_tmp = hs.actions.declare_file(dynamic_library.basename + ".temp")
_fix_linker_paths(
hs,
dynamic_library_tmp,
dynamic_library,
dep_info.external_libraries,
)
args.add(["-optl-Wl,-headerpad_max_install_names"])
else:
dynamic_library_tmp = dynamic_library
for rpath in set.to_list(_infer_rpaths(dynamic_library, solibs)):
args.add(["-optl-Wl,-rpath," + rpath])
args.add(["-o", dynamic_library_tmp.path])
# Profiling not supported for dynamic libraries.
objects_dir_manifest = _create_objects_dir_manifest(
hs,
objects_dir,
dynamic = True,
with_profiling = False,
)
hs.toolchain.actions.run_ghc(
hs,
inputs = depset([objects_dir], transitive = [
depset(hs.extra_binaries),
depset(extra_srcs),
set.to_depset(dep_info.package_caches),
set.to_depset(dep_info.dynamic_libraries),
depset(dep_info.external_libraries.values()),
]),
outputs = [dynamic_library_tmp],
mnemonic = "HaskellLinkDynamicLibrary",
arguments = [args],
params_file = objects_dir_manifest,
)
return dynamic_library
| StarcoderdataPython |
1760814 | """
Copyright (c) 2017 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
This module contains the convince class to read experiment data and generate sensitivity
profiles. See profile.py for more information.
"""
import re
import os
import datetime
from functools import partial
import pandas as pd
import numpy as np
from collections import defaultdict
# -------------------------------
# dataframe on disk caching layer
# -------------------------------
class DataFrameToCSVCache:
""" Dict-like API CSV based cache to store dataframe as compressed CSV.
Can be used as decorator.
"""
CACHE_DIR = '.experiments_csv_cache'
def __init__(self, suffix=''):
self.suffix = suffix
def _filename(self, experiment_id):
return os.path.join(self.CACHE_DIR, '%s%s.csv.bz2' % (experiment_id, self.suffix))
def __contains__(self, experiment_id):
return os.path.exists(self._filename(experiment_id))
def __setitem__(self, experiment_id, df):
if not os.path.exists(self.CACHE_DIR):
os.makedirs(self.CACHE_DIR)
df.to_csv(self._filename(experiment_id), compression='bz2', index=False)
def __getitem__(self, experiment_id):
if experiment_id in self:
return pd.read_csv(self._filename(experiment_id), compression='bz2')
else:
raise KeyError()
def __call__(self, func):
""" Can be use as decorator for function that have experiment_id as first parameter and returns dataframe.
Cache can be disabled by adding cache=False to kwargs in decorated function.
"""
def decorator(experiment_id, *args, **kw):
if kw.pop('cache', True) and experiment_id in self:
return self[experiment_id]
df = func(experiment_id, *args, **kw)
self[experiment_id] = df
return df
return decorator
# -------------------------------
# cassandra session singleton
# -------------------------------
CASSANDRA_SESSION = None # one instance for all existing notebook experiments
DEFAULT_CASSANDRA_OPTIONS = dict(
nodes=['127.0.0.1'],
port=9042,
ssl_options=None
)
DEFAULT_KEYSPACE = 'swan'
def _get_or_create_cassandra_session(nodes, port, ssl_options=None):
""" Get or prepare new session to Cassandra cluster.
:param nodes: List of addresses of cassandra nodes.
:param port: Port of cassandra service listening on.
:param ssl_options: Optional SSL options to connect to cassandra in secure manner.
:returns: cassandra session singleton
"""
from cassandra.auth import PlainTextAuthProvider
from cassandra.cluster import Cluster
from cassandra.query import ordered_dict_factory
global CASSANDRA_SESSION
if not CASSANDRA_SESSION:
auth_provider = None
if ssl_options:
username = ssl_options.pop('username')
password = ssl_options.pop('password')
auth_provider = PlainTextAuthProvider(username, password)
cluster = Cluster(nodes, port=port, ssl_options=ssl_options, auth_provider=auth_provider)
CASSANDRA_SESSION = cluster.connect()
CASSANDRA_SESSION.row_factory = ordered_dict_factory
return CASSANDRA_SESSION
# ----------------------------------------
# helper function to load and convert data
# ----------------------------------------
@DataFrameToCSVCache()
def load_dataframe_from_cassandra_streamed(experiment_id, tag_keys, cassandra_options=DEFAULT_CASSANDRA_OPTIONS,
aggfuncs=None, default_aggfunc=np.average,
keyspace=DEFAULT_KEYSPACE):
""" Load data from cassandra database as rows, processes them and returns dataframe with multiindex build on tags.
It only loads doubleval, ns and tags values ignoring other kinds of metrics.
:param experiment_id: identifier of experiment to load metrics for,
:param tag_keys: Names of columns used to aggregate by and build Dataframe multiindex from.
:param ns2agg: Mapping from processes "ns" to aggregation function for one phase.
:param cassandra_session: Cassandra session object to be used to execute query,
:param keyspace: Snap keyspace to be used to load metrics from,
:returns: Data as cassandra rows (each row being dict like).
"""
cassandra_session = _get_or_create_cassandra_session(**cassandra_options)
cassandra_session.set_keyspace(keyspace)
# helper to drop prefix from ns (removing host depedency).
pattern = re.compile(r'(/intel/swan/(caffe/)?(\w+)/([.\w-]+)/).*?')
drop_prefix = partial(pattern.sub, '')
query = "SELECT ns, doubleval, tags FROM %s.tags WHERE key = 'swan_experiment' AND val=?"
statement = cassandra_session.prepare(query)
rows = cassandra_session.execute(statement, [experiment_id])
# temporary mutli hierarchy index for storing loaded data
# first level is a namespace and second level is tuple of values from selected tags
# value is is a list of values
records = defaultdict(lambda: defaultdict(list))
started = datetime.datetime.now()
print('loading data...')
idx = 0
for idx, row in enumerate(rows):
tags = row['tags']
# namespace, value and tags
ns = drop_prefix(row['ns'])
val = row['doubleval']
tagidx = tuple(tags[tk] for tk in tag_keys)
# store in temporary index
records[ns][tagidx].append(val)
if idx and idx % 50000 == 0:
print("%d loaded" % idx)
print('%d rows loaded in %.0fs' % (idx, (datetime.datetime.now() - started).seconds))
started = datetime.datetime.now()
print('building a dataframe...')
df = pd.DataFrame()
for ns, d in records.items():
tuples = [] # values used to build an index for given series.
data = [] # data for Series
# Use aggfunc provided by ns_aggfunctions or fallback to defaggfunc.
aggfunc = aggfuncs.get(ns, default_aggfunc) if aggfuncs else default_aggfunc
for tags, values in sorted(d.items()):
tuples.append(tags)
data.append(aggfunc(values))
index = pd.MultiIndex.from_tuples(tuples, names=tag_keys)
df[ns] = pd.Series(data, index)
# Cannot recreate index after file is stored without additional info about number of index columns.
# Additionally furhter transformation are based on values available in columns (not in index).
df.reset_index(inplace=True)
# Convert all series to numeric if possible.
for column in df.columns:
try:
df[column] = df[column].apply(pd.to_numeric)
except ValueError:
continue
print('dataframe with shape=%s build in %.0fs' % (df.shape, (datetime.datetime.now() - started).seconds))
return df
# Constants for columns names used.
# New column names.
ACHIEVED_QPS_LABEL = 'achieved QPS'
ACHIEVED_LATENCY_LABEL = 'achieved latency'
COMPOSITE_VALUES_LABEL = 'composite values'
# Existing column names (from metrics provided by plugins).
PERCENTILE99TH_LABEL = 'percentile/99th'
QPS_LABEL = 'qps' # Absolute achieved QPS.
SWAN_LOAD_POINT_QPS_LABEL = 'swan_loadpoint_qps' # Target QPS.
SWAN_AGGRESSOR_NAME_LABEL = 'swan_aggressor_name'
SWAN_REPETITION_LABEL = 'swan_repetition'
# ----------------------------------------------------
# Style functions & constants for table cells styling
# ----------------------------------------------------
CRIT_STYLE = 'background:#a9341f; color: white;'
WARN_STYLE = 'background:#ffeda0'
OK_STYLE = 'background:#98cc70'
NAN_STYLE = 'background-color: #c0c0c0'
FAIL_STYLE = 'background-color: #b0c0b0'
QPS_FAIL_THRESHOLD = 0.9
QPS_OK_THRESHOLD = 0.95
QPS_WARN_THRESHOLD = 0.8
LATENCY_CRIT_THRESHOLD = 3 # 300 %
LATENCY_WARN_THRESHOLD = 1 # 100 %
def composite_qps_formatter(composite_values, normalized=False):
""" Formatter responsible for showing either absolute or normalized value of QPS. """
if composite_values is None:
return 'N/A'
qps = composite_values[QPS_LABEL]
achieved_qps = composite_values[ACHIEVED_QPS_LABEL]
if any(map(pd.isnull, (achieved_qps, qps))):
return NAN_STYLE
if normalized:
return '{:.0%}'.format(achieved_qps)
else:
return '{:,.0f}'.format(qps)
def composite_qps_colors(composite_values):
""" Styler for showing QPS from composite values.
If not value available return NAN style.
For achieved QPS > 95% show OK, with WARN color and CRITICAL color with
achieved QPS above and below 80% respectively.
"""
if pd.isnull(composite_values):
return NAN_STYLE
achieved_qps = composite_values[ACHIEVED_QPS_LABEL]
if any(map(pd.isnull, (achieved_qps, ))):
return NAN_STYLE
if pd.isnull(achieved_qps):
return NAN_STYLE
if achieved_qps > QPS_OK_THRESHOLD:
return OK_STYLE
elif achieved_qps > QPS_WARN_THRESHOLD:
return WARN_STYLE
else:
return CRIT_STYLE
def bytes_formatter(b):
""" Formatter that formats bytes into kb/mb/gb etc... """
for u in ' KMGTPEZ':
if abs(b) < 1024.0:
return "%3.1f%s" % (b, u)
b /= 1024.0
return "%.1f%s" % (b, 'Y')
def composite_latency_formatter(composite_values, normalized=False):
""" Formatter responsible for showing either absolute or normalized
value of latency depending of normalized argument.
Additionally if achieved normalized QPS was below 90% marks column as "FAIL".
"""
if composite_values is None:
return 'N/A'
achieved_qps = composite_values[ACHIEVED_QPS_LABEL]
latency = composite_values[PERCENTILE99TH_LABEL]
achieved_latency = composite_values[ACHIEVED_LATENCY_LABEL]
if any(map(pd.isnull, (achieved_qps, latency, achieved_latency))):
return NAN_STYLE
if achieved_qps < QPS_FAIL_THRESHOLD:
return '<b>FAIL</b>'
if normalized:
if achieved_latency > LATENCY_CRIT_THRESHOLD:
return '>300%'
else:
return '{:.0%}'.format(achieved_latency)
else:
return '{:.0f}'.format(latency)
def composite_latency_colors(composite_values, slo):
""" Styler responsible for choosing a background of latency tables.
Uses composite value to get info about QPS and latency and then:
- if normalized achieved QPS are below 90% return "fail style"
- or depending of latency: if above 150% - CRIT, if above 100% WARN or OK otherwise
"""
if pd.isnull(composite_values):
return NAN_STYLE
achieved_qps = composite_values[ACHIEVED_QPS_LABEL]
achieved_latency = composite_values[ACHIEVED_LATENCY_LABEL]
if any(map(pd.isnull, (achieved_qps, achieved_latency))):
return NAN_STYLE
# format just according QPS value
if pd.isnull(achieved_qps) or pd.isnull(achieved_latency):
return NAN_STYLE
if achieved_qps < QPS_FAIL_THRESHOLD:
return FAIL_STYLE
if achieved_latency > LATENCY_CRIT_THRESHOLD:
return CRIT_STYLE
elif achieved_latency > LATENCY_WARN_THRESHOLD:
return WARN_STYLE
else:
return OK_STYLE
class Renamer:
""" Helper class to facilitate columns renaming in dataframe before visualizing.
Instance can be used as function to refere to new name.
"""
def __init__(self, columns_to_rename):
self.columns_to_rename = columns_to_rename
def rename(self, df):
""" Rename columns according self.columns_to_rename.
Use Renamer instance() method to refer to new names.
:returns: new dataframe with columns renamed
"""
return df.rename(columns=self.columns_to_rename)
def __call__(self, original_name):
""" Returns new name of column to be used by formatting & styling functions. """
return self.columns_to_rename.get(original_name, original_name)
def add_extra_and_composite_columns(df, slo):
""" Add extra derived columns with achieved normalized QPS/latency
and composite column to store all values in one dict.
Reshaping and preparing extra normalized columns
for latency/qps according target SLOs.
:returns: New dataframe with new derived columns and one special composite column.
"""
# Extra columns.
# Calculate achieved QPS as percentage (normalized to 1).
df[ACHIEVED_QPS_LABEL] = pd.Series(df[QPS_LABEL] / df[SWAN_LOAD_POINT_QPS_LABEL])
# Calculate achieved latency in regards to SLO.
df[ACHIEVED_LATENCY_LABEL] = pd.Series(df[PERCENTILE99TH_LABEL] / slo)
# Columns to store in one cell for father processing.
COMPOSITE_COLUMNS = [ACHIEVED_QPS_LABEL, PERCENTILE99TH_LABEL, ACHIEVED_LATENCY_LABEL, QPS_LABEL]
# Composite value to store all values e.g. "achieved qps" and "latency" together in one cell as dict.
# Used to display one of the values and format using other value.
df[COMPOSITE_VALUES_LABEL] = df[COMPOSITE_COLUMNS].apply(dict, axis=1)
return df
def _pivot_ui(df, totals=True, **options):
""" Interactive pivot table for data analysis. """
try:
from pivottablejs import pivot_ui
except ImportError:
print("Error: cannot import pivottablejs, please install 'pip install pivottablejs'!")
return
iframe = pivot_ui(df, **options)
if not totals:
with open(iframe.src) as f:
replacedHtml = f.read().replace(
'</style>',
'.pvtTotal, .pvtTotalLabel, .pvtGrandTotal {display: none}</style>'
)
with open(iframe.src, "w") as f:
f.write(replacedHtml)
return iframe
class Experiment:
""" Base class for loading & storing data for swan experiments.
During loading from cassandra data metrics are index and grouped by tag_keys (a.k.a. dimensions).
Additionall parameter ns_aggfunctions is used to define how specific namespaces (ns) should be initially aggregated
(by default it is 'mean' for whole phase).
"""
def __init__(self, experiment_id, tag_keys, cassandra_options=DEFAULT_CASSANDRA_OPTIONS,
aggfuncs=None, default_aggfunc=np.mean, cache=True, keyspace=DEFAULT_KEYSPACE):
self.experiment_id = experiment_id
self.df = load_dataframe_from_cassandra_streamed(
experiment_id, tag_keys, cassandra_options,
aggfuncs=aggfuncs, default_aggfunc=default_aggfunc, cache=cache, keyspace=keyspace,
)
self.df.columns.name = 'Experiment %s' % self.experiment_id
def _repr_html_(self):
""" When presented in jupyter just return representation of dataframe. """
return self.df._repr_html_()
def pivot_ui(self):
""" Interactive pivot table for data analysis. """
return _pivot_ui(self.df)
# --------------------------------------------------------------
# "sensitivity profile" experiment
# --------------------------------------------------------------
class SensitivityProfile:
""" Visualization for "sensitivity profile" experiments that presents
latency/QPS and caffe aggressor throughput in "aggressor" and
"load" dimensions.
"""
tag_keys = (
SWAN_AGGRESSOR_NAME_LABEL,
SWAN_LOAD_POINT_QPS_LABEL,
SWAN_REPETITION_LABEL,
)
def __init__(self, experiment_id, slo, cassandra_options=DEFAULT_CASSANDRA_OPTIONS,
cache=True, keyspace=DEFAULT_KEYSPACE):
self.experiment = Experiment(experiment_id, self.tag_keys, cassandra_options,
aggfuncs=dict(batches=np.max), cache=cache, keyspace=keyspace)
self.slo = slo
# Pre-process data specifically for this experiment.
df = self.experiment.df.copy()
df = add_extra_and_composite_columns(df, slo)
# Replace "None" aggressor with "Baseline" only for aggressor based experiments.
df[SWAN_AGGRESSOR_NAME_LABEL].replace(to_replace={'None': 'Baseline'}, inplace=True)
# Rename columns
self.renamer = Renamer({
SWAN_LOAD_POINT_QPS_LABEL: 'Target QPS',
SWAN_AGGRESSOR_NAME_LABEL: 'Aggressor',
})
self.df = self.renamer.rename(df)
self.df.columns.name = 'Profile %s' % self.experiment.experiment_id
def _repr_html_(self):
""" When presented in jupyter just return representation of dataframe. """
return self.df._repr_html_()
def _composite_pivot_table(self, aggressors=None, qpses=None):
df = self.df
if aggressors is not None:
df = df[df[self.renamer(SWAN_AGGRESSOR_NAME_LABEL)].isin(aggressors)]
if qpses is not None:
df = df[df[self.renamer(SWAN_LOAD_POINT_QPS_LABEL)].isin(qpses)]
return df.pivot_table(
values=COMPOSITE_VALUES_LABEL,
index=self.renamer(SWAN_AGGRESSOR_NAME_LABEL),
columns=self.renamer(SWAN_LOAD_POINT_QPS_LABEL),
aggfunc='first',
)
def _get_caption(self, cell, normalized=False):
return '%s%s of "sensitivity profile" experiment %s' % (
'normalized ' if normalized else '',
cell,
self.experiment.experiment_id
)
def latency(self, normalized=True, aggressors=None, qpses=None):
""" Generate table with information about tail latency."""
return self._composite_pivot_table(
aggressors,
qpses
).style.applymap(
partial(composite_latency_colors, slo=self.slo),
).format(
partial(composite_latency_formatter, normalized=normalized)
).set_caption(
self._get_caption('latency[us]', normalized)
)
def qps(self, normalized=True, aggressors=None, qpses=None):
""" Generate table with information about achieved QPS."""
return self._composite_pivot_table(
aggressors,
qpses
).style.applymap(
partial(composite_qps_colors),
).format(
partial(composite_qps_formatter, normalized=normalized)
).set_caption(
self._get_caption('queries per second', normalized)
)
def caffe_batches(self):
""" Generate table with information about Caffe aggressor
images batches preprocessed darning each phase."""
# For caffe only show caffe aggressor data.
df = self.df[self.df[self.renamer(SWAN_AGGRESSOR_NAME_LABEL)] == 'Caffe']
return df.pivot_table(
values="batches",
index=self.renamer(SWAN_AGGRESSOR_NAME_LABEL),
columns=self.renamer(SWAN_LOAD_POINT_QPS_LABEL),
).style.format(
'{:.0f}'
).set_caption(
self._get_caption('caffe image batches')
)
# --------------------------------------------------------------
# "optimal core allocation" experiment
# --------------------------------------------------------------
NUMBER_OF_CORES_LABEL = 'number_of_cores' # HP cores. (TODO: replace with number_of_threads)
SNAP_USE_COMPUTE_SATURATION_LABEL = '/intel/use/compute/saturation'
class OptimalCoreAllocation:
""" Visualization for "optimal core allocation" experiments that
presents latency/QPS and cpu utilization in "number of cores" and "load" dimensions.
"""
tag_keys = (
SWAN_AGGRESSOR_NAME_LABEL,
NUMBER_OF_CORES_LABEL,
SWAN_LOAD_POINT_QPS_LABEL,
)
def __init__(self, experiment_id, slo, cassandra_options=DEFAULT_CASSANDRA_OPTIONS,
cache=True, keyspace=DEFAULT_KEYSPACE):
self.experiment = Experiment(experiment_id, self.tag_keys, cassandra_options, cache=cache, keyspace=keyspace)
self.slo = slo
# Pre-process data specifically for this experiment.
df = self.experiment.df.copy()
df = add_extra_and_composite_columns(df, slo)
# Rename columns.
self.renamer = Renamer({
SWAN_AGGRESSOR_NAME_LABEL: 'Aggressor',
NUMBER_OF_CORES_LABEL: 'Number of cores',
SWAN_LOAD_POINT_QPS_LABEL: 'Target QPS',
})
self.df = self.renamer.rename(df)
self.df.columns.name = 'Optimal core allocation %s' % self.experiment.experiment_id
def _repr_html_(self):
return self.df._repr_html_()
def _composite_pivot_table(self, aggressors=None, qpses=None):
df = self.df
if aggressors is not None:
df = df[df[self.renamer(SWAN_AGGRESSOR_NAME_LABEL)].isin(aggressors)]
if qpses is not None:
df = df[df[self.renamer(SWAN_LOAD_POINT_QPS_LABEL)].isin(qpses)]
return df.pivot_table(
values=COMPOSITE_VALUES_LABEL,
index=self.renamer(NUMBER_OF_CORES_LABEL),
columns=self.renamer(SWAN_LOAD_POINT_QPS_LABEL),
aggfunc='first',
)
def _get_caption(self, cell, normalized):
return '%s%s of "optimal core allocation" experiment %s' % (
'normalized ' if normalized else '',
cell,
self.experiment.experiment_id
)
def latency(self, normalized=True, aggressors=None, qpses=None):
return self._composite_pivot_table(
aggressors,
qpses
).style.applymap(
partial(composite_latency_colors, slo=self.slo),
).format(
partial(composite_latency_formatter, normalized=normalized)
).set_caption(
self._get_caption('latency[us]', normalized)
)
def qps(self, normalized=True, aggressors=None, qpses=None):
return self._composite_pivot_table(
aggressors,
qpses
).style.applymap(
partial(composite_qps_colors),
).format(
partial(composite_qps_formatter, normalized=normalized)
).set_caption(
self._get_caption('queries per second', normalized)
)
def cpu(self):
def cpu_colors(cpu):
""" Style function for cpu colors. """
if pd.isnull(cpu):
return NAN_STYLE
return "background: rgb(%d, %d, 0); color: white;" % (cpu * 255, 255 - cpu * 255)
return self.df.pivot_table(
values=SNAP_USE_COMPUTE_SATURATION_LABEL,
index=self.renamer(NUMBER_OF_CORES_LABEL),
columns=self.renamer(SWAN_LOAD_POINT_QPS_LABEL),
).style.applymap(
cpu_colors
).format(
'{:.0%}'
).set_caption(
self._get_caption('cpu utilization', False)
)
# --------------------------------------------------------------
# memcached-cat experiment
# --------------------------------------------------------------
def new_aggregated_index_based_column(df, source_indexes_column, template, aggfunc=sum):
""" Create new pd.Series as aggregation of values from other columns.
It uses template to find values from other columns, using indexes in one of the columns.
E.g. with template='column-{}' and input dataframe like this:
| example_indexes | column-1 | column-2 | column-3 |
| 1,2 | 1 | 11 | 111 |
| 1 | 2 | 22 | 222 |
| 1,2,3 | 3 | 33 | 333 |
when called like this
>>> new_aggregate_cores_range_column('examples_indexes', template='column-{}', aggfunc=sum)
results with series like this:
| 12 (1+11) |
| 2 |
| 369 (3+33+333) |
"""
array = np.empty(len(df))
for row_index, column_indexes in enumerate(df[source_indexes_column]):
indexes = column_indexes.split(',')
values = [df.iloc[row_index][template.format(index)] for index in indexes]
aggvalue = aggfunc(values)
array[row_index] = aggvalue
return pd.Series(array)
# Derived metrics from Intel RDT collector.
LLC_BE_LABEL = 'llc/be/megabytes'
LLC_BE_PERC_LABEL = 'llc/be/perecentage'
MEMBW_BE_LABEL = 'membw/be/gigabytes'
LLC_HP_LABEL = 'llc/hp/megabytes'
LLC_HP_PERC_LABEL = 'llc/hp/perecentage'
MEMBW_HP_LABEL = 'membw/hp/gigabytes'
# BE configuration lables
BE_NUMBER_OF_CORES_LABEL = 'be_number_of_cores'
BE_L3_CACHE_WAYS_LABEL = 'be_l3_cache_ways'
class CAT:
""" Visualization for "optimal core allocation" experiments that
presents latency/QPS and cpu utilization in "number of cores" and "load" dimensions.
"""
tag_keys = ('be_cores_range',
'hp_cores_range',
BE_L3_CACHE_WAYS_LABEL,
BE_NUMBER_OF_CORES_LABEL,
SWAN_AGGRESSOR_NAME_LABEL,
SWAN_LOAD_POINT_QPS_LABEL)
def __init__(self, experiment_id, slo, cassandra_options=DEFAULT_CASSANDRA_OPTIONS,
cache=True, keyspace=DEFAULT_KEYSPACE):
self.experiment = Experiment(experiment_id, self.tag_keys, cassandra_options,
aggfuncs=dict(batches=np.max), cache=cache, keyspace=keyspace)
self.slo = slo
df = self.experiment.df.copy()
df = add_extra_and_composite_columns(df, slo)
if '/intel/rdt/llc_occupancy/0/bytes' in df.columns:
# aggregate BE columns
df[LLC_BE_LABEL] = new_aggregated_index_based_column(
df, 'be_cores_range', '/intel/rdt/llc_occupancy/{}/bytes', sum)/(1024*1024)
df[MEMBW_BE_LABEL] = new_aggregated_index_based_column(
df, 'be_cores_range', '/intel/rdt/memory_bandwidth/local/{}/bytes', sum)/(1024*1024*1024)
df[LLC_HP_LABEL] = new_aggregated_index_based_column(
df, 'hp_cores_range', '/intel/rdt/llc_occupancy/{}/bytes', sum)/(1024*1024)
df[MEMBW_HP_LABEL] = new_aggregated_index_based_column(
df, 'hp_cores_range', '/intel/rdt/memory_bandwidth/local/{}/bytes', sum)/(1024*1024*1024)
df[LLC_BE_PERC_LABEL] = new_aggregated_index_based_column(
df, 'be_cores_range', '/intel/rdt/llc_occupancy/{}/percentage', sum) / 100
df[LLC_HP_PERC_LABEL] = new_aggregated_index_based_column(
df, 'hp_cores_range', '/intel/rdt/llc_occupancy/{}/percentage', sum) / 100
self.df = df
def _get_caption(self, cell, normalized):
return '%s%s of "memcached-cat" experiment %s' % (
'normalized ' if normalized else '',
cell,
self.experiment.experiment_id
)
def filtered_df(self):
""" Returns dataframe that exposes only meaningful columns."""
# RDT collected data.
rdt_columns = [
LLC_HP_LABEL,
LLC_HP_PERC_LABEL,
LLC_BE_LABEL,
LLC_BE_PERC_LABEL,
MEMBW_HP_LABEL,
MEMBW_BE_LABEL,
]
columns = [
SWAN_LOAD_POINT_QPS_LABEL,
SWAN_AGGRESSOR_NAME_LABEL,
BE_NUMBER_OF_CORES_LABEL,
BE_L3_CACHE_WAYS_LABEL,
PERCENTILE99TH_LABEL,
ACHIEVED_LATENCY_LABEL,
QPS_LABEL,
ACHIEVED_QPS_LABEL,
]
# Check if RDT collector data is available.
if LLC_HP_LABEL in self.df:
columns += rdt_columns
df = self.df[columns]
# Drop title of dataframe.
df.columns.name = ''
return df
def filtered_df_table(self):
""" Returns an simple formated dataframe """
df = self.filtered_df()
styler = df.style.format('{:.0%}', [
ACHIEVED_QPS_LABEL,
ACHIEVED_LATENCY_LABEL,
]
)
# format optionall values optionally.
if LLC_HP_LABEL in self.df:
styler = styler.format('{:.0%}', [
LLC_BE_PERC_LABEL,
LLC_HP_PERC_LABEL
])
return styler
def latency(self, normalized=True, aggressors=None, qpses=None):
# Create local reference of data and modify it according provided paramters.
df = self.df
if aggressors is not None:
df = df[df[SWAN_AGGRESSOR_NAME_LABEL].isin(aggressors)]
if qpses is not None:
df = df[df[SWAN_LOAD_POINT_QPS_LABEL].isin(qpses)]
# Rename columns.
renamer = Renamer({
NUMBER_OF_CORES_LABEL: 'Number of cores',
SWAN_LOAD_POINT_QPS_LABEL: 'Target QPS',
BE_L3_CACHE_WAYS_LABEL: 'BE cache ways',
BE_NUMBER_OF_CORES_LABEL: 'BE number of cores',
})
df = renamer.rename(df)
return df.pivot_table(
values=COMPOSITE_VALUES_LABEL, aggfunc='first',
index=[renamer(SWAN_AGGRESSOR_NAME_LABEL), renamer(BE_L3_CACHE_WAYS_LABEL)],
columns=[renamer(SWAN_LOAD_POINT_QPS_LABEL), renamer(BE_NUMBER_OF_CORES_LABEL)],
).style.applymap(
partial(composite_latency_colors, slo=self.slo),
).format(
partial(composite_latency_formatter, normalized=normalized)
).set_caption(
self._get_caption('latency[us]', normalized)
)
def filtered_df_pivot_ui(self,
rows=(SWAN_AGGRESSOR_NAME_LABEL, 'be_l3_cache_ways'),
cols=(SWAN_LOAD_POINT_QPS_LABEL, 'be_number_of_cores'),
aggregatorName='First', vals=('percentile/99th',), rendererName='Heatmap', **options):
return _pivot_ui(
self.filtered_df(),
totals=False,
rows=rows,
cols=cols,
vals=vals,
aggregatorName=aggregatorName,
rendererName=rendererName,
**options
)
| StarcoderdataPython |
180097 | <filename>m2py/thermo/script/test_gas.py
from m2py.thermo.marktable import read_data_tables
from m2py.thermo import gas as g
from pprint import pprint
from m2py.utils import resource_path
tables = read_data_tables(resource_path("../data/gas_test_data.txt"))
gases = list(tables.keys())
def test_fuction(function, gas, _input, _output):
_function = lambda t: function(t, gas)
_error = lambda x: 100*abs(x[1]-x[0])/x[0]
output = list(map(_function, _input))
error = list(map(_error, list(zip(output, _output))))
print("GAS : ", gas)
print("Testing : ", function.__name__)
print("\nMax error %", max(error))
print("x f(x) f'(x) %%error)")
pprint (list(zip( _input, output, _output, error)))
print("\n\n")
test_gas = lambda gas, func, output : test_fuction(func, gas, tables[gas].T,tables[gas][output])
for gas in gases:
test_gas(gas, g.__cp_nasap_p7__, "cp")
test_gas(gas, g.s_nasa_p7, "s")
print(60*"-")
#test_gas("O2", g.__cp_nasap_p7__, "cp")
#CO2 = tables["CO2"]
| StarcoderdataPython |
1708650 | <reponame>contek-io/contek-tusk
from __future__ import annotations
import atexit
from datetime import datetime
from threading import Timer, RLock
from typing import Optional, Union, Dict
import contek_tusk as tusk
from contek_tusk.batching_config import BatchingConfig
from contek_tusk.entry_input_normalizer import EntryInputNormalizer
from contek_tusk.entry_row import EntryRow
from contek_tusk.env_tags_cache import EnvTagsCache
from contek_tusk.metric_batch import MetricBatch
from contek_tusk.metric_client import DEFAULT_CLIENT
from contek_tusk.metric_formatter import MetricFormatter
from contek_tusk.schema_provider import SchemaProvider
from contek_tusk.table import Table
from contek_tusk.time_column_cache import TimeColumnCache
class Metric:
DEFAULT_BATCHING_CONFIG = BatchingConfig.default()
def __init__(
self,
table: Table,
time_column_cache: TimeColumnCache,
env_tags_cache: EnvTagsCache,
entry_input_normalizer: EntryInputNormalizer,
metric_formatter: MetricFormatter,
schema_provider: SchemaProvider,
batching_config: BatchingConfig,
client_name: str,
) -> None:
self._table = table
self._time_column_cache = time_column_cache
self._env_tags_cache = env_tags_cache
self._entry_input_normalizer = entry_input_normalizer
self._metric_formatter = metric_formatter
self._schema_provider = schema_provider
self._batch = MetricBatch(table, batching_config)
self._client_name = client_name
self._lock: RLock = RLock()
self._timer: Optional[Timer] = None
atexit.register(self._flush)
@classmethod
def metric(
cls,
table: Union[Table, str],
batching_config: BatchingConfig = DEFAULT_BATCHING_CONFIG,
client_name: str = DEFAULT_CLIENT,
) -> Metric:
if type(table) is str:
table = Table.from_str(table)
schema_provider = SchemaProvider(table, client_name)
return cls(
table,
TimeColumnCache(schema_provider, table.get_time_column()),
EnvTagsCache(schema_provider),
EntryInputNormalizer(schema_provider),
MetricFormatter(table, schema_provider),
schema_provider,
batching_config,
client_name,
)
def write(self, key_values: Dict[str, any]) -> None:
time_column = self._time_column_cache.get()
if time_column is not None:
now = datetime.utcnow()
key_values[time_column] = now
env_tags = self._env_tags_cache.get()
if env_tags is not None:
for (key, value) in env_tags.items():
if key not in key_values:
key_values[key] = value
entry_row = self._entry_input_normalizer.normalize(key_values)
if entry_row is None:
return
self._accept(entry_row)
def get_schema_provider(self) -> SchemaProvider:
return self._schema_provider
def _accept(self, entry_row: EntryRow) -> None:
self._batch.add(entry_row)
self._schedule_if_idle()
def _schedule_if_idle(self) -> None:
if self._batch.is_immediate():
self._flush()
return
self._lock.acquire()
try:
timer = self._timer
if timer is not None and timer.is_alive():
return
self._schedule()
finally:
self._lock.release()
def _flush_and_schedule(self) -> None:
updated = self._flush()
if not updated:
return
self._schedule()
def _schedule(self) -> None:
self._lock.acquire()
try:
timer = Timer(
self._batch.get_period().total_seconds(),
self._flush_and_schedule,
)
timer.daemon = True
timer.start()
self._timer = timer
finally:
self._lock.release()
def _flush(self) -> bool:
client = tusk.get_client(self._client_name)
if client is None:
return False
data = self._batch.export(self._metric_formatter)
if data is None:
return False
client.write(data)
return True
| StarcoderdataPython |
169649 | <gh_stars>0
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Config object for MSG."""
import gin
import dataclasses
import typing
@gin.configurable
@dataclasses.dataclass
class MSGConfig:
"""Configuration options for MSG."""
policy_lr: float = 3e-5
# policy_lr: float = 1e-4
q_lr: float = 3e-4
ensemble_method: str = 'deep_ensembles'
mimo_using_adamw: bool = False
mimo_using_obs_tile: bool = False
mimo_using_act_tile: bool = False
use_double_q: bool = False
ensemble_size: int = 16
beta: float = -1.0
td_target_method: str = 'independent'
perform_sarsa_q_eval: bool = False
num_q_repr_pretrain_iters: int = 0
pretrain_temp: float = 1.
use_sass: bool = False
num_bc_iters: int = 50_000
use_random_weighting_in_critic_loss: bool = True
use_ema_target_critic_params: bool = True
entropy_coefficient: typing.Optional[float] = None
target_entropy: float = 0.0
use_entropy_regularization: bool = True
num_sgd_steps_per_step: int = 1
actor_network_hidden_sizes: typing.Tuple = (256, 256)
critic_network_hidden_sizes: typing.Tuple = (256, 256)
networks_init_type: str = 'glorot_also_dist'
critic_random_init: bool = False
critic_init_scale: float = 1.0 # I dont think it is used anymore
behavior_regularization_type: str = 'none'
behavior_regularization_alpha: float = 1.0
num_cql_actions: int = 2 # if using cql regularization type
eval_with_q_filter: bool = False
num_eval_samples: int = 10
rem_mode: bool = False
use_img_encoder: bool = False
img_encoder_params_ckpt_path: str = ''
img_encoder_fn: typing.Optional[typing.Callable] = None
| StarcoderdataPython |
1667130 | # -*- coding: utf-8 -*-
"""
@author: TT
"""
import FileHelper as fh
import TypeHelper as th
import ziptozcta as zz
def LoadZipToZCTA():
fileName = r'Datafiles/zip_to_zcta10_nyc_with_NBH.csv'
return fh.readInCSVDicData(fileName, processZipToZCTA)
#process
def processZipToZCTA(fileList):
data = []
#rowCount = 0
for line in fileList:
obj = zz.zipToZCTA(line['zipcode'].strip(), line['zcta5'].strip(), line['neighborhoodlabel'].strip(), line['boro'].strip())
data.append(obj)
#rowCount += 1
return data
#get
def getZCTAByZip(zipCode, data):
try:
print("zipcode from file: " + str(zipCode))
matching = [item for item in data if item.ZipCode == zipCode]
if(len(matching) > 0):
print("found match" + str(matching[0].ZCTA))
return matching[0]
print("no match")
return zz.zipToZCTA(0, 0, '', '') #return blank one so no error occurrs, just dumps a 0 in the file
except Exception as e:
print("Error: " + str(e))
#use this to just get the zcta. less efficent since have to get the data every call
def getZCTAValueByZip(zipCode):
data = LoadZipToZCTA()
obj = getZCTAByZip(zipCode, data)
if(obj == None):
return obj
else:
return obj.ZCTA
#uses file from run input to append data to file.
def appendZCTADataToFile(fileFrame, zipcol):
mapdata = LoadZipToZCTA()
field1 = 'zcta'
field2 = 'nbhLabel'
field3 = 'boroLabel'
fileFrame[field1] = fileFrame[zipcol].apply(lambda x: getZCTAByZip(th.cleanInts(x), mapdata).ZCTA)
fileFrame[field2] = fileFrame[zipcol].apply(lambda x: getZCTAByZip(th.cleanInts(x), mapdata).Neighborhood)
fileFrame[field3] = fileFrame[zipcol].apply(lambda x: getZCTAByZip(th.cleanInts(x), mapdata).Boro)
return fileFrame
def run():
inText = input("Enter the name of the file to add zcta data to (or enter 'exit' to exit): ")
if inText.upper() != 'EXIT':
fileName = fh.parseToCSVFileName(inText)
if fileName == 'N':
print("Invalid file name was provided, please try again.")
else:
print("Attempting to read data...\n")
data = fh.readInCSVPandas(fileName, -1)
zipCol = input("What column contains the zip code (name label)? ")
fh.writeOutCSVPandas("withZCTA_" + fileName, appendZCTADataToFile(data, zipCol))
print("Done.")
#run
run() | StarcoderdataPython |
1626922 | # imports
import stardust
import os
import inspect
from re import findall
class CustomStardustFilters:
rep = os.path.abspath(inspect.getfile(stardust.main.ctf)).strip('main.py') + 'filters/'
filters = rep + 'filters.txt'
info = rep + 'filters.info'
@classmethod
def get_number_of_filters(cls):
'''Read number of filters in stardust from the filters.info file.'''
with open(CustomStardustFilters.info, 'r') as fi:
contents = fi.read()
lines = contents.split('\n')
return len(lines)
@classmethod
def is_filter_in_info_file(cls, filter: str) -> bool:
'''Check if a filter is already in the filters.info file.'''
with open(CustomStardustFilters.info, 'r') as fi:
contents = fi.read()
lines = contents.split('\n')
flag = False
for line in lines:
if filter in line:
flag = True
return flag
@classmethod
def add_file(cls, file: str):
'''Add filters from a file to the filters.info and filters.txt files.'''
with open(file, 'r') as f:
contents = f.read()
lines = contents.split('\n')
filters = []
current_filter = []
lines_in_filter = 0
for line in lines:
if not current_filter:
lines_in_filter = int(findall(r'[\s+]?(\d+)[\s\S]*',line)[0])
current_filter.append(line.strip())
if len(current_filter)-1 == lines_in_filter:
filters.append(current_filter)
current_filter = []
print(f'Found {len(filters)} filters in {file}')
number = CustomStardustFilters.get_number_of_filters() + 1
for filter in filters:
info = filter[0]
if not CustomStardustFilters.is_filter_in_info_file(info):
print(f'Added filter no. {number} to stardust filter list: {info}')
with open(CustomStardustFilters.info, 'a+') as fa:
fa.write(f'\n{number} {info}')
with open(CustomStardustFilters.filters, 'a+') as fa:
fa.write(f'\n{info}')
for line in filter[1:]:
fa.write(f'\n{line}')
number += 1
else:
print(f'Filter was already present in stardust filter list: {info}')
if __name__ == '__main__':
c = CustomStardustFilters.get_number_of_filters()
CustomStardustFilters.add_file('Data/COSMOS/extra_filters.txt') | StarcoderdataPython |
1658739 | <gh_stars>1000+
#!/usr/bin/env python
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
def PathToNyc():
return os.path.join(
os.path.dirname(__file__), 'node_modules', 'nyc', 'bin', 'nyc')
| StarcoderdataPython |
3277172 | # Generated by Django 3.2.12 on 2022-05-13 12:47
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('products', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Cart',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, db_index=True)),
('modified', models.DateTimeField(auto_now=True)),
('total', models.DecimalField(blank=True, decimal_places=2, default=0, max_digits=10, null=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='user_cart', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='CartItem',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, db_index=True)),
('modified', models.DateTimeField(auto_now=True)),
('custom_order', models.BooleanField(default=False)),
('pd_1', models.DecimalField(blank=True, decimal_places=2, max_digits=5, null=True)),
('pd_2', models.DecimalField(blank=True, decimal_places=2, max_digits=5, null=True)),
('right_sph', models.DecimalField(blank=True, decimal_places=2, max_digits=5, null=True)),
('right_cyl', models.DecimalField(blank=True, decimal_places=2, max_digits=5, null=True)),
('right_axis', models.DecimalField(blank=True, decimal_places=2, max_digits=5, null=True)),
('right_add', models.CharField(default='None', max_length=100, null=True)),
('left_sph', models.DecimalField(blank=True, decimal_places=2, max_digits=5, null=True)),
('left_cyl', models.DecimalField(blank=True, decimal_places=2, max_digits=5, null=True)),
('left_axis', models.DecimalField(blank=True, decimal_places=2, max_digits=5, null=True)),
('left_add', models.CharField(default='None', max_length=100, null=True)),
('prism', models.BooleanField(default=False)),
('lens_type', models.CharField(default='None', max_length=25, null=True)),
('rx_type', models.CharField(default='None', max_length=25, null=True)),
('lens_coat', models.CharField(default='None', max_length=25, null=True)),
('quantity', models.IntegerField(default=1)),
('cart', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='cart_item', to='cart.cart')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='cart_product', to='products.product')),
],
options={
'abstract': False,
},
),
]
| StarcoderdataPython |
3369880 | """
LeetCode Problem: 61. Rotate List
Link: hhttps://leetcode.com/problems/rotate-list/
Language: Python
Written by: <NAME>
Time Complexity: O(N)
Space Complexity: O(1)
"""
# Approach 1
class Solution:
def rotateRight(self, head: 'ListNode', k: 'int') -> 'ListNode':
# base cases
if not head:
return None
if not head.next:
return head
# close the linked list into the ring
old_tail = head
n = 1
while old_tail.next:
old_tail = old_tail.next
n += 1
old_tail.next = head
# find new tail : (n - k % n - 1)th node
# and new head : (n - k % n)th node
new_tail = head
for i in range(n - k % n - 1):
new_tail = new_tail.next
new_head = new_tail.next
# break the ring
new_tail.next = None
return new_head
# Approach 2
class Solution:
def swapHelper(self, head, index, length):
i = length - index
prev = None
curr = head
while index > 0:
prev = curr
curr = curr.next
index -= 1
prev.next = None
startNode = curr
if curr.next is None:
curr.next = head
return startNode
while i > 0:
curr = curr.next
i -= 1
if curr.next is None:
curr.next = head
break
return startNode
def rotateRight(self, head: ListNode, k: int) -> ListNode:
if not head or k == 0:
return head
curr = head
length = 0
endNode = None
while curr:
if curr.next is None:
endNode = curr
curr = curr.next
length += 1
if length == 1 or k % length == 0:
return head
if k > length:
actualSwaps = k % length
index = length - actualSwaps
return self.swapHelper(head, index, length)
else:
return self.swapHelper(head, length-k, length) | StarcoderdataPython |
3349146 | # Copyright (c) 2018, 2020 ARM Limited
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2004-2005 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import sys
from code_formatter import code_formatter
parser = argparse.ArgumentParser()
parser.add_argument("hh", help="the path of the debug flag header file")
parser.add_argument("name", help="the name of the debug flag")
parser.add_argument("desc", help="a description of the debug flag")
parser.add_argument("fmt",
help="whether the flag is a format flag (True or False)")
parser.add_argument("components",
help="components of a compound flag, if applicable, joined with :")
args = parser.parse_args()
fmt = args.fmt.lower()
if fmt == 'true':
fmt = True
elif fmt == 'false':
fmt = False
else:
print(f'Unrecognized "FMT" value {fmt}', file=sys.stderr)
sys.exit(1)
components = args.components.split(':') if args.components else []
code = code_formatter()
code('''
#ifndef __DEBUG_${{args.name}}_HH__
#define __DEBUG_${{args.name}}_HH__
#include "base/compiler.hh" // For namespace deprecation
#include "base/debug.hh"
''')
for flag in components:
code('#include "debug/${flag}.hh"')
code('''
namespace gem5
{
GEM5_DEPRECATED_NAMESPACE(Debug, debug);
namespace debug
{
namespace unions
{
''')
# Use unions to prevent debug flags from being destructed. It's the
# responsibility of the programmer to handle object destruction for members
# of the union. We purposefully leave that destructor empty so that we can
# use debug flags even in the destructors of other objects.
if components:
code('''
inline union ${{args.name}}
{
~${{args.name}}() {}
CompoundFlag ${{args.name}} = {
"${{args.name}}", "${{args.desc}}", {
${{",\\n ".join(
f"(Flag *)&::gem5::debug::{flag}" for flag in components)}}
}
};
} ${{args.name}};
''')
else:
code('''
inline union ${{args.name}}
{
~${{args.name}}() {}
SimpleFlag ${{args.name}} = {
"${{args.name}}", "${{args.desc}}", ${{"true" if fmt else "false"}}
};
} ${{args.name}};
''')
code('''
} // namespace unions
inline constexpr const auto& ${{args.name}} =
::gem5::debug::unions::${{args.name}}.${{args.name}};
} // namespace debug
} // namespace gem5
#endif // __DEBUG_${{args.name}}_HH__
''')
code.write(args.hh)
| StarcoderdataPython |
76807 | from __future__ import print_function,division
import os,sys,re
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import h5py
from scipy.ndimage.filters import gaussian_filter
from scipy.ndimage import median_filter
from scipy.optimize import leastsq, curve_fit
from scipy.signal import lombscargle
from pkg_resources import resource_filename
import logging
import acor
from .utils import peaks_and_lphs
from .findpeaks import peakdetect
class TimeSeries(object):
def __init__(self, t, f, mask=None, cadence=None,
default_maxlag_days=200,
flatten_order=2):
self.t = np.atleast_1d(t)
self.f = np.atleast_1d(f)
if mask is None:
mask = np.isnan(self.f)
self.mask = mask
if cadence is None:
cadence = np.median(self.t[1:]-self.t[:-1])
self.cadence = cadence
self.default_maxlag_days = default_maxlag_days
self.default_maxlag = default_maxlag_days//cadence
self.flatten_order = flatten_order
#set private variables for cached acorr calculation
self._lag = None #always should be in cadences
self._ac = None
self._ac_poly_coeffs = None #polynomial coefficients subtracted out to flatten acorr
#private variables for caching pgram calculation
self._pers = None
self._pgram = None
def acorr(self, maxlag=None, smooth=18, days=True,
recalc=False):
if maxlag is None:
maxlag = self.default_maxlag
#don't recalculate the same thing if not necessary
if self._ac is not None and not recalc:
lag = self._lag
ac = self._ac
else:
x = self.f.copy()
x[self.mask] = 0
#logging.debug('{} nans in x'.format((np.isnan(x)).sum()))
ac = acor.function(x, maxlag)
lag = np.arange(maxlag)
#fit and subtract out quadratic
if self.flatten_order is not None:
c = np.polyfit(lag, ac, self.flatten_order)
ac -= np.polyval(c, lag)
self._ac_poly_coeffs = c
#smooth AC function
ac = gaussian_filter(ac, smooth)
#set private variables for cached calculation
self._ac = ac
self._lag = lag
self._maxlag = maxlag
self._smooth = smooth
if days:
return lag*self.cadence,ac
else:
return lag,ac
def acorr_peaks(self, lookahead=5, days=True,
return_heights=False, **kwargs):
lag, ac = self.acorr(days=days, **kwargs)
return peaks_and_lphs(ac, lag, return_heights=return_heights,
lookahead=lookahead)
def plot_acorr(self, days=True, smooth=18, maxlag=None,
mark_period=False, lookahead=5, fit_npeaks=4,
tol=0.2,
**kwargs):
lag, ac = self.acorr(days=days, smooth=smooth, maxlag=maxlag)
plt.plot(lag, ac, **kwargs)
pks, lphs = self.acorr_peaks(smooth=smooth,
maxlag=maxlag,
lookahead=lookahead)
#plt.ylim(ymax=1)
if mark_period:
if mark_period is True:
mark_period = None
p,e_p,pks,lphs,hts = self.acorr_period_fit(period=mark_period, return_peaks=True,
fit_npeaks=fit_npeaks, tol=tol,
smooth=smooth,
maxlag=maxlag,
lookahead=lookahead)
plt.xlim(xmax=min((fit_npeaks+1)*p, lag.max()))
for pk in pks:
plt.axvline(pk, ls=':')
def acorr_period_fit(self, period=None, fit_npeaks=4,
smooth=18, maxlag=None, lookahead=5,
tol=0.2, return_peaks=False):
peaks, lphs, hts = self.acorr_peaks(smooth=smooth, maxlag=maxlag,
lookahead=lookahead, return_heights=True)
if lphs[0] >= lphs[1]:
firstpeak = peaks[0]
else:
firstpeak = peaks[1]
if lphs[1] < 1.2*lphs[0]:
logging.warning('Second peak (selected) less than 1.2x height of first peak.')
if period is None:
period = firstpeak
if fit_npeaks > len(peaks):
fit_npeaks = len(peaks)
#peaks = peaks[:fit_npeaks]
#identify peaks to use in fit: first 'fit_npeaks' peaks closest to integer
# multiples of period guess
fit_peaks = []
fit_lphs = []
fit_hts = []
last = 0.
#used = np.zeros_like(peaks).astype(bool)
for n in np.arange(fit_npeaks)+1:
#find highest peak within 'tol' of integer multiple (that hasn't been used)
close = (np.absolute(peaks - n*period) < (tol*n*period)) & ((peaks-last) > 0.3*period)
if close.sum()==0:
fit_npeaks = n-1
break
#raise NoPeakError('No peak found near {}*{:.2f}={:.2f} (tol={})'.format(n,period,n*period,tol))
ind = np.argmax(hts[close])
last = peaks[close][ind]
fit_peaks.append(peaks[close][ind])
fit_lphs.append(lphs[close][ind])
fit_hts.append(hts[close][ind])
#used[close][ind] = True
logging.debug('{}: {}, {}'.format(n*period,peaks[close],peaks[close][ind]))
#logging.debug(used)
#ind = np.argmin(np.absolute(peaks - n*period)) #closest peak
#fit_peaks.append(peaks[ind])
#fit_lphs.append(lphs[ind])
logging.debug('fitting peaks: {}'.format(fit_peaks))
if fit_npeaks < 3:
return peaks,-1, fit_peaks, fit_lphs, fit_hts
x = np.arange(fit_npeaks + 1)
y = np.concatenate([np.array([0]),fit_peaks])
#x = np.arange(fit_npeaks) + 1
#y = fit_peaks
def fn(x,a,b):
return a*x + b
fit,cov = curve_fit(fn, x, y, p0=(period,0))
if return_peaks:
return fit[0],cov[0][0],fit_peaks,fit_lphs,fit_hts
else:
return fit[0],cov[0][0]
def periodogram(self,pmin=0.5,pmax=60,npts=2000,
recalc=False):
pers = np.logspace(np.log10(pmin),np.log10(pmax),npts)
if np.array_equal(pers,self._pers) and not recalc:
pgram = self._pgram
else:
freqs = (2*np.pi)/(pers)
t = self.t[~self.mask]
f = self.f[~self.mask]
pgram = lombscargle(t.astype('float64'),
f.astype('float64'),
freqs.astype('float64'))
self._pgram = pgram
self._pers = pers
return pers,pgram
def pgram_peaks(self, npeaks=10, lookahead=5, **kwargs):
pers,pgram = self.periodogram(**kwargs)
maxes,mins = peakdetect(pgram,pers,lookahead=lookahead)
maxes = np.array(maxes)
inds = np.argsort(maxes[:,1])
pks,hts = maxes[inds,0][-npeaks:][::-1],maxes[inds,1][-npeaks:][::-1]
return pks,hts
def save_hdf(self, filename, path=''):
"""Writes data to file, along with acorr and pgram info.
"""
data = pd.DataFrame({'t':self.t,
'f':self.f,
'mask':self.mask})
lag, ac = self.acorr(days=False)
acorr = pd.DataFrame({'lag':lag,
'ac':ac})
pks, lphs = self.acorr_peaks()
acorr_peaks = pd.DataFrame({'lag':pks,
'lph':lphs})
pers,pg = self.periodogram()
pgram = pd.DataFrame({'period':pers,
'pgram':pg})
pks, hts = self.pgram_peaks()
pgram_peaks = pd.DataFrame({'P':pks,
'height':hts})
data.to_hdf(filename,'{}/data'.format(path))
acorr.to_hdf(filename,'{}/acorr'.format(path))
acorr_peaks.to_hdf(filename,'{}/acorr_peaks'.format(path))
pgram.to_hdf(filename,'{}/pgram'.format(path))
pgram_peaks.to_hdf(filename,'{}/pgram_peaks'.format(path))
if hasattr(self,'subseries'):
for name in self.subseries:
self.subseries[name].save_hdf(filename, path=name)
def make_chunks(self, nchunks, chunksize=300, step=100):
tmin, tmax = (self.t.min(), self.t.max())
tlist = [(t, t+chunksize) for t in np.arange(tmin, tmax+step, step)]
logging.debug('(start, stop) tlist: {}'.format(tlist))
self.make_subseries(tlist)
def make_subseries(self, tlist, names=None):
"""Splits up timeseries into chunks, according to tlist
tlist is a list of (tstart,tstop) tuples. If names is provided,
those names will be used; otherwise 'sub1', 'sub2', etc.
"""
if names is None:
names = ['sub{}'.format(i) for i in 1+np.arange(len(tlist))]
self.subseries = {}
for (tlo,thi),name in zip(tlist,names):
tok = (self.t > tlo) & (self.t < thi)
t = self.t[tok]
f = self.f[tok]
mask = self.mask[tok]
self.subseries[name] = TimeSeries(t, f, mask=mask,
default_maxlag_days=self.default_maxlag_days)
@classmethod
def load_hdf(cls, filename, path=''):
data = pd.read_hdf(filename, '{}/data'.format(path))
t = np.array(data['t'])
f = np.array(data['f'])
mask = np.array(data['mask'])
new = cls(t,f,mask=mask)
acorr = pd.read_hdf(filename, '{}/acorr'.format(path))
new._lag = np.array(acorr['lag'])
new._ac = np.array(acorr['ac'])
pgram = pd.read_hdf(filename, '{}/pgram'.format(path))
new._pers = np.array(pgram['period'])
new._pgram = np.array(pgram['pgram'])
#store.close()
i=1
has_sub = True
new.subseries = {}
while has_sub:
try:
name = 'sub{}'.format(i)
new.subseries[name] = cls.load_hdf(filename, path='{}/{}'.format(path,name))
except KeyError:
has_sub = False
i += 1
return new
class NoPeakError(Exception):
pass
| StarcoderdataPython |
179732 | from setuptools import setup, find_packages
import datetime
import os
YEAR = datetime.date.today().year
__author__ = "<NAME>"
__version__ = "0.3.16"
__license__ = "MIT"
__copyright__ = u'%s, <NAME>' % YEAR
# Add Travis build id if not deploying a release (tag)
if os.environ.get("TRAVIS", "") == "true":
build_id = os.environ["TRAVIS_BUILD_NUMBER"]
tag = os.environ.get("TRAVIS_TAG", "")
if tag:
if __version__ != tag:
raise RuntimeError(
"tag != version: {0}, {1}".format(tag, __version__))
else:
__version__ = "{0}.{1}".format(__version__, build_id)
setup(
name='docker-inside',
version=__version__,
description='Run a docker container with you workspace and user',
long_description_markdown_filename='README.md',
url="https://github.com/boon-code/docker-inside",
license=__license__,
author=__author__,
author_email='<EMAIL>',
classifiers=["Development Status :: 3 - Alpha",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3 :: Only",
"Topic :: System :: Systems Administration"],
package_dir={
'': 'src'
},
packages=find_packages(where='./src'),
entry_points={
'console_scripts': [
'din = dockerinside.__init__:main',
'docker-inside = dockerinside.__init__:main',
'docker_inside = dockerinside.__init__:main',
'dockerinside = dockerinside.__init__:main',
'din-setup = dockerinside.setup.__init__:setup_main',
'docker-inside-setup = dockerinside.setup.__init__:setup_main',
'docker_inside_setup = dockerinside.setup.__init__:setup_main',
]
},
install_requires=[
"argparse>=1.4.0",
"argcomplete>=1.4.1",
"docker>=2.7.0",
"dockerpty>=0.4.1"
],
setup_requires=['setuptools-markdown'],
)
| StarcoderdataPython |
8227 | import os
import sys
import random
def get_next_wallpaper(curr_path):
lst_dir = os.listdir()
rand_index = random.randint(0, len(lst_dir) - 1)
return lst_dir[rand_index]
def get_wall_dir():
return "/Users/MYOUNG/Pictures/mmt"
def main():
script = "osascript -e 'tell application \"Finder\" to set desktop picture to POSIX file '"
path = get_wall_dir()
file = get_next_wallpaper(path)
# print("FILE = ", file)
script = script + path + "/" + file
# print("SCRIPT = ", script)
os.system(script)
main()
| StarcoderdataPython |
3364767 | <gh_stars>10-100
#!/usr/bin/env python
import rospy
import sys
import moveit_commander
from geometry_msgs.msg import PoseStamped, Pose
from moveit_commander import MoveGroupCommander, PlanningSceneInterface
from moveit_msgs.msg import PlanningScene, ObjectColor
from moveit_msgs.msg import Grasp, GripperTranslation, MoveItErrorCodes
from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint
from tf.transformations import quaternion_from_euler
from copy import deepcopy
from tms_msg_db.msg import TmsdbStamped, Tmsdb
from tms_msg_db.srv import *
from tms_msg_rp.srv import *
GROUP_NAME_ARM = 'l_arm'
GROUP_NAME_GRIPPER = 'l_gripper'
GRIPPER_FRAME = 'l_end_effector_link'
GRIPPER_JOINT_NAMES = ['l_gripper_thumb_joint']
GRIPPER_EFFORT = [1.0]
REFERENCE_FRAME = 'world_link'
GRIPPER_OPEN_VAL = [-1.0]
class SubTaskRelease:
def __init__(self):
# Initialize the move_group API
moveit_commander.roscpp_initialize(sys.argv)
rospy.init_node('subtask_release')
rospy.on_shutdown(self.shutdown)
self.release_srv = rospy.Service('subtask_release', rp_release, self.ReleaseSrvCallback)
def ReleaseSrvCallback(self, req):
rospy.loginfo("Received the service call!")
rospy.loginfo(req)
arm = MoveGroupCommander(GROUP_NAME_ARM)
gripper = MoveGroupCommander(GROUP_NAME_GRIPPER)
scene = PlanningSceneInterface()
rospy.sleep(0.1)
result = None
gripper.set_joint_value_target(GRIPPER_OPEN_VAL)
result = gripper.go()
arm.detach_object()
gripper.detach_object()
scene.remove_attached_object(GRIPPER_FRAME, str(req.object_id))
scene.remove_world_object(str(req.object_id))
result = True
ret = rp_releaseResponse()
ret.result = result
return ret
def shutdown(self):
rospy.loginfo("Stopping the node")
# Shut down MoveIt cleanly and exit the script
moveit_commander.roscpp_shutdown()
moveit_commander.os._exit(0)
if __name__ == "__main__":
try:
SubTaskRelease()
rospy.spin()
except rospy.ROSInterruptException:
rospy.loginfo("subtask_release node terminated.")
| StarcoderdataPython |
161128 | <reponame>LR-POR/tools
from conllu import parse
import joblib
DEPRELS = ['nsubj','obj','iobj','xcomp','ccomp','csubj','expl','nsubj:pass','aux:pass']
## Funçoes auxiliares
def binary_search(x,l):
""" Esse algorítmo é o algorítmo de busca binária, mas ele retorna
qual o índice o qual devo colocar o elemento para que a lista
permaneça ordenada.
Input: elemento x e lista l
Output: Índice em que o elemento deve ser inserido para manter a ordenação da lista
"""
lo = 0 # Cota inferior inicial (Lower bound)
up = len(l) # Cota superior inicial (Upper bound)
while lo < up:
mid = int((lo+up)/2) #Ponto Médio
if l[mid] < x:
lo = mid + 1
else:
up = mid
return up
def convert_token_to_relation(sentence,token):
""" Converte um objeto do tipo token da biblioteca conllu em um objeto
do tipo Relation criado aqui
"""
return Relation(sentence,token)
class Relation:
""" Objeto do tipo relation, tem basicamente as informações do token e
também de seus filhos que nos interessam (case e mark)
"""
def __init__(self,sentence,token):
self.token = token['form']
self.lemma = token['lemma']
self.deprel = token['deprel']
self.upos = token['upos']
self.feats=token['feats']
self.relation = []
son_tokens = get_son_tokens(sentence,token)
if son_tokens != []:
for son_token in son_tokens:
if son_token['deprel'] in ('case','mark'):
self.relation.append(convert_token_to_relation(sentence,son_token))
def __str__(self):
return f'[{str(self.lemma)},{str(self.deprel)},{str(self.upos)}]'
def __repr__(self):
return f'[{str(self.lemma)},{str(self.deprel)},{str(self.upos)}]'
class Valence:
""" Objeto do tipo valencia, que guarda as infos específicas de um
determinado verbo em uma determinada sentença, como seu xcomp,
ccomp, obj etc. caso haja. Possui um método print para imprimir
as informaçoes da valência do verbo.
"""
def __init__(self,
token,
lemma=None,
feats=None,
xcomp=None,
ccomp=None,
obj=None,
iobj=None,
expl=None,
nsubj=None,
csubj=None,
nsubj_pass=None,
aux_pass = None,
example=None,
rel_set = None):
self.token = token
self.lemma = lemma
self.feats = feats
self.xcomp = xcomp
self.ccomp = ccomp
self.obj = obj
self.iobj = iobj
self.expl = expl
self.nsubj = nsubj
self.csubj = csubj
self.nsubj_pass = nsubj_pass
self.aux_pass = aux_pass
self.example = example
self.rel_set = []
rel_string = ''
if self.obj is not None:
obj = 'obj'
obj_complement = ''
if len(self.obj.keys()) > 0:
val = list(self.obj.keys())[0]
rel = self.obj[val]
if len(rel.relation) > 0:
for relation in rel.relation:
if relation.upos == 'ADP':
obj_complement += f':{relation.lemma}'
break
obj += obj_complement
self.rel_set.append(obj)
if self.iobj is not None:
iobj = 'iobj'
iobj_complement = ''
if len(self.iobj.keys()) > 0:
val = list(self.iobj.keys())[0]
rel = self.iobj[val]
if len(rel.relation) > 0:
for relation in rel.relation:
if relation.upos == 'ADP':
iobj_complement += f':{relation.lemma}'
iobj += iobj_complement
self.rel_set.append(iobj)
if self.ccomp is not None:
ccomp = 'ccomp'
ccomp_complement = ''
if len(self.ccomp.keys()) > 0:
val = list(self.ccomp.keys())[0]
rel = self.ccomp[val]
rel.relation.sort(key = lambda x: x.lemma)
for relation in rel.relation:
if relation.upos == 'SCONJ':
ccomp_complement += f'+{relation.lemma}'
break
if val == 'VERB':
if 'Mood' in rel.feats.keys():
ccomp_complement += f"+{rel.feats['Mood']}"
if len(ccomp_complement) > 0 and ccomp_complement[0] == '+':
ccomp_complement = ':' + ccomp_complement[1:]
ccomp+=ccomp_complement
self.rel_set.append(ccomp)
if self.xcomp is not None:
xcomp = 'xcomp'
xcomp_complement = ''
if len(self.xcomp.keys()) > 0:
val = list(self.xcomp.keys())[0]
rel = self.xcomp[val]
rel.relation.sort(key = lambda x: x.lemma)
for relation in rel.relation:
if relation.upos == 'SCONJ':
xcomp_complement += f'+{relation.lemma}'
if val == 'VERB':
if 'VerbForm' in rel.feats.keys():
xcomp_complement += f'+{rel.feats["VerbForm"]}'
if len(xcomp_complement) > 0 and xcomp_complement[0] == '+':
xcomp_complement = ':' + xcomp_complement[1:]
xcomp += xcomp_complement
self.rel_set.append(xcomp)
if self.csubj is not None:
self.rel_set.append('csubj')
if self.expl is not None:
self.rel_set.append('expl')
if self.aux_pass is not None or self.nsubj_pass is not None or (self.feats is not None and 'Voice' in self.feats.keys() and self.feats['Voice'] == 'Pass'):
verb_state = 'VERB:pass'
else:
verb_state = 'VERB:act'
self.rel_set.sort()
if self.nsubj_pass is not None or self.nsubj is not None:
self.rel_set = ['nsubj'] + self.rel_set
self.rel_set = [verb_state] + self.rel_set
s = '<'
for string in self.rel_set:
s+=string+','
s = s[:-1] + '>'
self.valence_category = s
def __getitem__(self,item):
return self.rel_set[item]
def __repr__(self):
return self.valence_category
def __str__(self):
return self.valence_category
def print(self):
verb = f'{self.lemma}'
mdata = ''
for key in ['Mood','Number','Person','Tense','VerbForm']:
if key in self.feats:
mdata+=f'+{key}:{self.feats[key]}'
mdata = mdata + " "
verb+=mdata
if self.xcomp is not None:
val = list(self.xcomp.keys())[0]
if self.xcomp[val].upos in ('VERB'):
xcomp = f"xcomp {val}+{self.xcomp[val].lemma}"
for key in ['Mood','Number','Person','Tense','VerbForm']:
if key in self.xcomp[val].feats:
xcomp += f'+{key}:{self.xcomp[val].feats[key]}'
xcomp += ' '
for t in self.xcomp[val].relation:
xcomp += f'{t.deprel}+{t.upos}+{t.lemma} '
else:
xcomp = f"xcomp "
verb+=xcomp
#if self.ccomp is not None:
#val = list(self.ccomp.keys())[0]
#ccomp = f'ccomp {val}+{self.ccomp[val].deprel}+{self.ccomp[val].upos}+{self.ccomp[val].lemma} '
#else:
#ccomp = f'ccomp '
#verb+=ccomp
if self.ccomp is not None:
val = list(self.ccomp.keys())[0]
if self.ccomp[val].upos in ('VERB'):
ccomp = f"ccomp {val}+{self.ccomp[val].lemma}"
for key in ['Mood','Number','Person','Tense','VerbForm']:
if key in self.ccomp[val].feats:
ccomp += f'+{key}:{self.ccomp[val].feats[key]}'
ccomp += ' '
for t in self.ccomp[val].relation:
ccomp += f'{t.deprel}+{t.upos}+{t.lemma} '
else:
ccomp = f"ccomp "
verb+=ccomp
if self.obj is not None:
if 'ADP' in self.obj.keys():
if self.obj['ADP'].deprel == 'case':
verb += f"obj case+ADP+{self.obj['ADP'].lemma} "
else:
verb += "obj "
else:
verb += "obj "
if self.iobj is not None:
if 'ADP' in self.iobj.keys():
if self.iobj['ADP'].deprel == 'case':
verb += f"iobj case+ADP+{self.iobj['ADP'].lemma} "
else:
verb += 'iobj '
else:
verb += 'iobj '
if self.nsubj is not None:
verb += f'nsubj '
if self.csubj is not None:
verb += 'csubj '
if self.nsubj_pass is not None:
verb += 'nsubj:pass '
if self.aux_pass is not None:
if 'AUX' in self.aux_pass.keys():
aux = f"aux:pass:{self.aux_pass['AUX'].lemma}"
for key in ['Mood','Number','Person','Tense','VerbForm']:
if key in self.aux_pass['AUX'].feats:
aux += f"+{key}:{self.aux_pass['AUX'].feats[key]}"
aux += ' '
else:
aux += 'aux:pass '
verb += aux
if self.expl is not None:
if 'PRON' in self.expl.keys():
expl = f"PRON+{self.expl['PRON'].token}+"
for key in ['Case','Gender','Number','Person','PronType']:
if key in self.expl['PRON'].feats:
expl+=f"{self.expl['PRON'].feats[key]}+"
expl = expl[:-1]
verb += expl
return verb
class Verb:
def __init__(self,lemma=None, valences = []):
self.lemma = lemma
self.valences = valences
def __repr__(self):
return self.lemma
def __str__(self):
return self.lemma
def add_valence(self,valence):
if valence.lemma != self.lemma:
raise TypeError("Not same lemma")
self.valences.append(valence)
def print(self):
print_output = []
for valence in self.valences:
print_output.append(valence.print())
print_output = list(set(print_output))
s = ''
for t in print_output:
s+=t + "\n"
return s
#def __repr__(self):
#s = f"{lemma}:{self.feats['mood']}+{self.feats['Number']}+{self.feats['Person']}+{self.feats['Tense']}+{self.feats['VerbForm']}\n"
#if rels in self.rel.keys():
#return None
## FUNÇÕES BÁSICAS
def get_root_index(sentence):
for token in sentence:
if token['deprel'] == 'root':
return sentence.index(token)
def get_verbs_index(sentence):
return [sentence.index(x) for x in sentence if x['upos'] == 'VERB']
def get_son_tokens(sentence,
token):
token_id = token['id']
tokens = [t for t in sentence if t['head'] == token_id]
return tokens
def recover_verbs_valences(sentence,
with_lemmas=False):
verbs = get_verbs_index(sentence)
if with_lemmas:
dic = {sentence[x]['lemma']:[(y['deprel'],y['lemma']) for y in get_son_tokens(sentence,sentence[x]) if y['deprel'] not in DEPRELS] for x in verbs}
return dic
dic = {sentence[x]['lemma']:[y['deprel'] for y in get_son_tokens(sentence,sentence[x]) if y['deprel'] in DEPRELS] for x in verbs}
return dic
def get_rel_set(sentence,token):
tokens = get_son_tokens(sentence,token)
rel_set = [x['deprel'] for x in tokens if x['deprel'] in DEPRELS and x['deprel'] != []]
rel_set_aux = [x['id'] for x in tokens if x['deprel'] in DEPRELS and x['deprel'] != []]
token_id = token['id']
i = binary_search(token_id,rel_set_aux)
rel_set = rel_set[:i] + ['VERB'] + rel_set[i:]
return rel_set
## FUNÇÕES PARA EXTRAÇÃO
def get_deprel(sentence,token,deprel):
son_tokens = get_son_tokens(sentence,token)
son_tokens_deprel = [x['deprel'] for x in son_tokens]
result_dic = {}
if deprel not in son_tokens_deprel:
return None
else:
deprels = [x for x in son_tokens if x['deprel'] == deprel]
for deprel_ in deprels:
result_dic[deprel_['upos']] = Relation(sentence,deprel_)
return result_dic
def get_valence(sentence,token):
obj = get_deprel(sentence,token,'obj')
iobj = get_deprel(sentence,token,'iobj')
ccomp = get_deprel(sentence,token,'ccomp')
xcomp = get_deprel(sentence,token,'xcomp')
expl = get_deprel(sentence,token,'expl')
nsubj = get_deprel(sentence,token,'nsubj')
csubj = get_deprel(sentence,token,'csubj')
aux_pass = get_deprel(sentence,token,'aux:pass')
nsubj_pass = get_deprel(sentence,token,'nsubj:pass')
if obj is None and iobj is None and ccomp is None and xcomp is None and expl is None:
return None
return Valence(token = token['form'],
lemma=token['lemma'],
feats=token['feats'],
xcomp=xcomp,
ccomp=ccomp,
obj=obj,
iobj=iobj,
expl=expl,
nsubj = nsubj,
csubj=csubj,
nsubj_pass = nsubj_pass,
aux_pass = aux_pass,
example=sentence.metadata['text'],
rel_set = get_rel_set(sentence,token))
def main():
verbs = {}
with open("pt_bosque-ud-train.conllu") as arq:
bosque = parse(arq.read())
for sentence in bosque:
for verb_index in get_verbs_index(sentence):
verb_lemma = sentence[verb_index]['lemma']
if verb_lemma not in verbs.keys():
verbs[verb_lemma] = Verb(lemma=verb_lemma,valences = [])
valence = get_valence(sentence,sentence[verb_index])
if valence is None:
continue
else:
verbs[verb_lemma].add_valence(valence)
print(f'Done {(bosque.index(sentence)+1)*100/len(bosque):.3f}',end='\r')
print("Done first part...")
joblib.dump(verbs,'verbs_dict.joblib')
d = verbs
g = {}
i=0
for verb in d.keys():
for valence in d[verb].valences:
if str(valence) not in g.keys():
g[str(valence)] = []
if d[verb] not in g[str(valence)]:
g[str(valence)].append(d[verb])
i+=1
print(f'Done {100*i/len(d.keys()):.2f}',end='\r')
joblib.dump(g,'valences_dict.joblib')
print('Done second part...')
def extract_example(valences, valence_category,lemma):
examples=[]
for verb in valences[valence_category]:
if verb.lemma == lemma:
for valence in verb.valences:
if valence.valence_category == valence_category:
examples.append(valence.example)
return examples
def extract_valences(file_path):
verbs = {}
with open(file_path) as arq:
corpus = parse(arq.read())
for sentence in corpus:
for verb_index in get_verbs_index(sentence):
verb_lemma = sentence[verb_index]['lemma']
if verb_lemma not in verbs.keys():
verbs[verb_lemma] = Verb(lemma=verb_lemma,valences = [])
valence = get_valence(sentence,sentence[verb_index])
if valence is None:
continue
else:
verbs[verb_lemma].add_valence(valence)
g = {}
for verb in verbs.keys():
for valence in verbs[verb].valences:
if str(valence) not in g.keys():
g[str(valence)] = []
if verbs[verb] not in g[str(valence)]:
g[str(valence)].append(verbs[verb])
return g
def dump(d,out):
with open(out, 'w') as f:
for val in d.keys():
for v in d[val]:
print(val,v, file = f)
if __name__ == "__main__":
main()
| StarcoderdataPython |
3390183 | <reponame>fer-moreira/MYQT<filename>MYQT_Application/assets/UI/Scripts/ConnectorWindow.py
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '.\assets\UI\Layout\ConnectorWindow.ui'
#
# Created by: PyQt5 UI code generator 5.12.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Connector(object):
def setupUi(self, Connector):
Connector.setObjectName("Connector")
Connector.setWindowModality(QtCore.Qt.ApplicationModal)
Connector.resize(302, 368)
Connector.setMinimumSize(QtCore.QSize(302, 368))
Connector.setMaximumSize(QtCore.QSize(302, 368))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
Connector.setFont(font)
Connector.setStyleSheet("")
Connector.setIconSize(QtCore.QSize(32, 32))
Connector.setDockNestingEnabled(False)
Connector.setDockOptions(QtWidgets.QMainWindow.AllowTabbedDocks|QtWidgets.QMainWindow.AnimatedDocks|QtWidgets.QMainWindow.VerticalTabs)
self.centralwidget = QtWidgets.QWidget(Connector)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.centralwidget.sizePolicy().hasHeightForWidth())
self.centralwidget.setSizePolicy(sizePolicy)
self.centralwidget.setObjectName("centralwidget")
self.dbType = QtWidgets.QComboBox(self.centralwidget)
self.dbType.setGeometry(QtCore.QRect(10, 14, 281, 40))
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.dbType.sizePolicy().hasHeightForWidth())
self.dbType.setSizePolicy(sizePolicy)
self.dbType.setMinimumSize(QtCore.QSize(101, 40))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.dbType.setFont(font)
self.dbType.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.dbType.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.dbType.setAutoFillBackground(False)
self.dbType.setEditable(False)
self.dbType.setMaxCount(3)
self.dbType.setIconSize(QtCore.QSize(32, 32))
self.dbType.setFrame(False)
self.dbType.setObjectName("dbType")
self.dbType.addItem("")
self.dbType.addItem("")
self.dbType.addItem("")
self.remember = QtWidgets.QCheckBox(self.centralwidget)
self.remember.setEnabled(True)
self.remember.setGeometry(QtCore.QRect(160, 255, 130, 40))
self.remember.setMaximumSize(QtCore.QSize(130, 40))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(8)
font.setBold(False)
font.setItalic(True)
font.setWeight(50)
self.remember.setFont(font)
self.remember.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.remember.setAutoFillBackground(True)
self.remember.setIconSize(QtCore.QSize(32, 32))
self.remember.setCheckable(True)
self.remember.setChecked(True)
self.remember.setAutoRepeat(False)
self.remember.setAutoExclusive(False)
self.remember.setTristate(False)
self.remember.setObjectName("remember")
self.pass_in = QtWidgets.QLineEdit(self.centralwidget)
self.pass_in.setGeometry(QtCore.QRect(10, 204, 281, 41))
self.pass_in.setMinimumSize(QtCore.QSize(281, 41))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.pass_in.setFont(font)
self.pass_in.setStyleSheet("padding:7;")
self.pass_in.setText("")
self.pass_in.setFrame(False)
self.pass_in.setEchoMode(QtWidgets.QLineEdit.Password)
self.pass_in.setCursorMoveStyle(QtCore.Qt.LogicalMoveStyle)
self.pass_in.setClearButtonEnabled(False)
self.pass_in.setObjectName("pass_in")
self.connect = QtWidgets.QPushButton(self.centralwidget)
self.connect.setGeometry(QtCore.QRect(10, 305, 281, 51))
self.connect.setMinimumSize(QtCore.QSize(281, 51))
self.connect.setMaximumSize(QtCore.QSize(281, 51))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(14)
self.connect.setFont(font)
self.connect.setStyleSheet("")
self.connect.setAutoDefault(False)
self.connect.setDefault(False)
self.connect.setFlat(False)
self.connect.setObjectName("connect")
self.user_in = QtWidgets.QLineEdit(self.centralwidget)
self.user_in.setGeometry(QtCore.QRect(10, 157, 281, 41))
self.user_in.setMinimumSize(QtCore.QSize(281, 41))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.user_in.setFont(font)
self.user_in.setStyleSheet("padding:7;")
self.user_in.setText("")
self.user_in.setFrame(False)
self.user_in.setClearButtonEnabled(False)
self.user_in.setObjectName("user_in")
self.buffered = QtWidgets.QCheckBox(self.centralwidget)
self.buffered.setEnabled(True)
self.buffered.setGeometry(QtCore.QRect(10, 255, 140, 40))
self.buffered.setMaximumSize(QtCore.QSize(140, 40))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(8)
font.setBold(False)
font.setItalic(True)
font.setWeight(50)
self.buffered.setFont(font)
self.buffered.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.buffered.setLayoutDirection(QtCore.Qt.LeftToRight)
self.buffered.setAutoFillBackground(True)
self.buffered.setIconSize(QtCore.QSize(32, 32))
self.buffered.setCheckable(True)
self.buffered.setChecked(True)
self.buffered.setAutoRepeat(False)
self.buffered.setAutoExclusive(False)
self.buffered.setTristate(False)
self.buffered.setObjectName("buffered")
self.host_in = QtWidgets.QLineEdit(self.centralwidget)
self.host_in.setGeometry(QtCore.QRect(10, 60, 281, 41))
self.host_in.setMinimumSize(QtCore.QSize(131, 41))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.host_in.setFont(font)
self.host_in.setStyleSheet("padding:7;")
self.host_in.setFrame(False)
self.host_in.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.host_in.setClearButtonEnabled(False)
self.host_in.setObjectName("host_in")
self.port_in = QtWidgets.QLineEdit(self.centralwidget)
self.port_in.setGeometry(QtCore.QRect(10, 110, 280, 41))
self.port_in.setMinimumSize(QtCore.QSize(141, 41))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.port_in.setFont(font)
self.port_in.setStyleSheet("padding:7;")
self.port_in.setText("")
self.port_in.setFrame(False)
self.port_in.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.port_in.setClearButtonEnabled(False)
self.port_in.setObjectName("port_in")
Connector.setCentralWidget(self.centralwidget)
self.retranslateUi(Connector)
QtCore.QMetaObject.connectSlotsByName(Connector)
Connector.setTabOrder(self.dbType, self.host_in)
Connector.setTabOrder(self.host_in, self.port_in)
Connector.setTabOrder(self.port_in, self.user_in)
Connector.setTabOrder(self.user_in, self.pass_in)
Connector.setTabOrder(self.pass_in, self.buffered)
Connector.setTabOrder(self.buffered, self.remember)
Connector.setTabOrder(self.remember, self.connect)
def retranslateUi(self, Connector):
_translate = QtCore.QCoreApplication.translate
Connector.setWindowTitle(_translate("Connector", "SQL Connector"))
self.dbType.setItemText(0, _translate("Connector", "MY SQL"))
self.dbType.setItemText(1, _translate("Connector", "Microsoft Server"))
self.dbType.setItemText(2, _translate("Connector", "Postgre SQL "))
self.remember.setToolTip(_translate("Connector", "<html><head/><body><p>Trusted Connection</p></body></html>"))
self.remember.setText(_translate("Connector", "Remember me"))
self.pass_in.setToolTip(_translate("Connector", "<html><head/><body><p>Password</p></body></html>"))
self.pass_in.setPlaceholderText(_translate("Connector", "Password:"))
self.connect.setText(_translate("Connector", "CONNECT"))
self.connect.setShortcut(_translate("Connector", "Enter"))
self.user_in.setToolTip(_translate("Connector", "<html><head/><body><p>Username</p></body></html>"))
self.user_in.setPlaceholderText(_translate("Connector", "User:"))
self.buffered.setToolTip(_translate("Connector", "<html><head/><body><p>Buffered Connection</p></body></html>"))
self.buffered.setText(_translate("Connector", "Buffered"))
self.host_in.setToolTip(_translate("Connector", "<html><head/><body><p>Server / IP</p></body></html>"))
self.host_in.setText(_translate("Connector", "localhost"))
self.host_in.setPlaceholderText(_translate("Connector", "Server/IP:"))
self.port_in.setToolTip(_translate("Connector", "<html><head/><body><p>Server PORT</p></body></html>"))
self.port_in.setPlaceholderText(_translate("Connector", "Port:"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Connector = QtWidgets.QMainWindow()
ui = Ui_Connector()
ui.setupUi(Connector)
Connector.show()
sys.exit(app.exec_())
| StarcoderdataPython |
4832201 | <filename>D_linknet34.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 2 20:31:14 2018
@author: pooh
"""
from torch import nn
import torch
from torchvision import models
import torchvision
from torch.nn import functional as F
def conv3x3(in_, out):
return nn.Conv2d(in_, out, 3, padding=1)
class ConvRelu(nn.Module):
def __init__(self, in_: int, out: int):
super(ConvRelu, self).__init__()
self.conv = conv3x3(in_, out)
self.activation = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.activation(x)
return x
class DecoderBlock(nn.Module):
"""
Paramaters for Deconvolution were chosen to avoid artifacts, following
link https://distill.pub/2016/deconv-checkerboard/
"""
def __init__(self, in_channels, middle_channels, out_channels, is_deconv=True):
super(DecoderBlock, self).__init__()
self.in_channels = in_channels
if is_deconv:
self.block = nn.Sequential(
ConvRelu(in_channels, middle_channels),
nn.ConvTranspose2d(middle_channels, out_channels, kernel_size=4, stride=2,
padding=1),
nn.ReLU(inplace=True)
)
else:
self.block = nn.Sequential(
nn.Upsample(scale_factor=2, mode='bilinear'),
ConvRelu(in_channels, middle_channels),
ConvRelu(middle_channels, out_channels),
)
def forward(self, x):
return self.block(x)
class UNet11(nn.Module):
def __init__(self, num_classes=1, num_filters=32, pretrained=False):
"""
:param num_classes:
:param num_filters:
:param pretrained:
False - no pre-trained network used
True - encoder pre-trained with VGG11
"""
super().__init__()
self.pool = nn.MaxPool2d(2, 2)
self.num_classes = num_classes
self.encoder = models.vgg11(pretrained=pretrained).features
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Sequential(self.encoder[0],
self.relu)
self.conv2 = nn.Sequential(self.encoder[3],
self.relu)
self.conv3 = nn.Sequential(
self.encoder[6],
self.relu,
self.encoder[8],
self.relu,
)
self.conv4 = nn.Sequential(
self.encoder[11],
self.relu,
self.encoder[13],
self.relu,
)
self.conv5 = nn.Sequential(
self.encoder[16],
self.relu,
self.encoder[18],
self.relu,
)
self.center = DecoderBlock(256 + num_filters * 8, num_filters * 8 * 2, num_filters * 8, is_deconv=True)
self.dec5 = DecoderBlock(512 + num_filters * 8, num_filters * 8 * 2, num_filters * 8, is_deconv=True)
self.dec4 = DecoderBlock(512 + num_filters * 8, num_filters * 8 * 2, num_filters * 4, is_deconv=True)
self.dec3 = DecoderBlock(256 + num_filters * 4, num_filters * 4 * 2, num_filters * 2, is_deconv=True)
self.dec2 = DecoderBlock(128 + num_filters * 2, num_filters * 2 * 2, num_filters, is_deconv=True)
self.dec1 = ConvRelu(64 + num_filters, num_filters)
self.final = nn.Conv2d(num_filters, num_classes, kernel_size=1)
def forward(self, x):
conv1 = self.conv1(x)
conv2 = self.conv2(self.pool(conv1))
conv3 = self.conv3(self.pool(conv2))
conv4 = self.conv4(self.pool(conv3))
conv5 = self.conv5(self.pool(conv4))
center = self.center(self.pool(conv5))
dec5 = self.dec5(torch.cat([center, conv5], 1))
dec4 = self.dec4(torch.cat([dec5, conv4], 1))
dec3 = self.dec3(torch.cat([dec4, conv3], 1))
dec2 = self.dec2(torch.cat([dec3, conv2], 1))
dec1 = self.dec1(torch.cat([dec2, conv1], 1))
if self.num_classes > 1:
x_out = F.log_softmax(self.final(dec1), dim=1)
else:
x_out = self.final(dec1)
return x_out
class UNet16(nn.Module):
def __init__(self, num_classes=1, num_filters=32, pretrained=False):
"""
:param num_classes:
:param num_filters:
:param pretrained:
False - no pre-trained network used
True - encoder pre-trained with VGG11
"""
super().__init__()
self.num_classes = num_classes
self.pool = nn.MaxPool2d(2, 2)
self.encoder = torchvision.models.vgg16(pretrained=pretrained).features
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Sequential(self.encoder[0],
self.relu,
self.encoder[2],
self.relu)
self.conv2 = nn.Sequential(self.encoder[5],
self.relu,
self.encoder[7],
self.relu)
self.conv3 = nn.Sequential(self.encoder[10],
self.relu,
self.encoder[12],
self.relu,
self.encoder[14],
self.relu)
self.conv4 = nn.Sequential(self.encoder[17],
self.relu,
self.encoder[19],
self.relu,
self.encoder[21],
self.relu)
self.conv5 = nn.Sequential(self.encoder[24],
self.relu,
self.encoder[26],
self.relu,
self.encoder[28],
self.relu)
self.center = DecoderBlock(512, num_filters * 8 * 2, num_filters * 8)
self.dec5 = DecoderBlock(512 + num_filters * 8, num_filters * 8 * 2, num_filters * 8)
self.dec4 = DecoderBlock(512 + num_filters * 8, num_filters * 8 * 2, num_filters * 8)
self.dec3 = DecoderBlock(256 + num_filters * 8, num_filters * 4 * 2, num_filters * 2)
self.dec2 = DecoderBlock(128 + num_filters * 2, num_filters * 2 * 2, num_filters)
self.dec1 = ConvRelu(64 + num_filters, num_filters)
self.final = nn.Conv2d(num_filters, num_classes, kernel_size=1)
def forward(self, x):
conv1 = self.conv1(x)
conv2 = self.conv2(self.pool(conv1))
conv3 = self.conv3(self.pool(conv2))
conv4 = self.conv4(self.pool(conv3))
conv5 = self.conv5(self.pool(conv4))
center = self.center(self.pool(conv5))
dec5 = self.dec5(torch.cat([center, conv5], 1))
dec4 = self.dec4(torch.cat([dec5, conv4], 1))
dec3 = self.dec3(torch.cat([dec4, conv3], 1))
dec2 = self.dec2(torch.cat([dec3, conv2], 1))
dec1 = self.dec1(torch.cat([dec2, conv1], 1))
if self.num_classes > 1:
x_out = F.log_softmax(self.final(dec1), dim=1)
else:
x_out = self.final(dec1)
return x_out
class DecoderBlockLinkNet(nn.Module):
def __init__(self, in_channels, n_filters):
super().__init__()
self.relu = nn.ReLU(inplace=True)
# B, C, H, W -> B, C/4, H, W
self.conv1 = nn.Conv2d(in_channels, in_channels // 4, 1)
self.norm1 = nn.BatchNorm2d(in_channels // 4)
# B, C/4, H, W -> B, C/4, 2 * H, 2 * W
self.deconv2 = nn.ConvTranspose2d(in_channels // 4, in_channels // 4, kernel_size=4,
stride=2, padding=1, output_padding=0)
self.norm2 = nn.BatchNorm2d(in_channels // 4)
# B, C/4, H, W -> B, C, H, W
self.conv3 = nn.Conv2d(in_channels // 4, n_filters, 1)
self.norm3 = nn.BatchNorm2d(n_filters)
def forward(self, x):
x = self.conv1(x)
x = self.norm1(x)
x = self.relu(x)
x = self.deconv2(x)
x = self.norm2(x)
x = self.relu(x)
x = self.conv3(x)
x = self.norm3(x)
x = self.relu(x)
return x
class CenterBlockLinkNet(nn.Module):
def __init__(self, in_channels):
super().__init__()
self.relu = nn.ReLU(inplace=True)
# B, C, H, W -> B, C, H, W
self.norm1 = nn.BatchNorm2d(in_channels)
self.selu = nn.relu(inplace=True)
self.conv1 = nn.Conv2d(in_channels, in_channels, 3,1,padding=1)
self.norm1 = nn.BatchNorm2d(in_channels)
self.selu = nn.relu(in_channels)
self.conv2 = nn.Conv2d(in_channels, in_channels, 3,1, padding=2, dilation=2)
#self.conv3 = nn.Conv2d(in_channels, in_channels, 3,1, padding=4, dilation=4)
#self.conv4 = nn.Conv2d(in_channels, in_channels, 3,1, padding=8, dilation=8)
# B, C/4, H, W -> B, C/4, 2 * H, 2 * W
def forward(self, x):
#######3#########
x2 = self.conv1(x)
x2 = self.norm1(x2)
x2 = self.relu(x2)
x2 = self.conv2(x2)
x2 = self.norm1(x2)
x2 = self.relu(x2)
#x2 = self.conv3(x2)
#x2 = self.norm1(x2)
#x2 = self.relu(x2)
##################
x3 = self.conv1(x)
x3 = self.norm1(x3)
x3 = self.relu(x3)
#x3 = self.conv2(x3)
#x3 = self.norm1(x3)
#x3 = self.relu(x3)
###################
#x4 = self.conv1(x)
#x4 = self.norm1(x4)
#x4 = self.relu(x4)
y=x2+x3+x
return y
class LinkNet34(nn.Module):
def __init__(self, num_classes=1, num_channels=3, pretrained=True):
super().__init__()
assert num_channels == 3
self.num_classes = num_classes
filters = [64, 128, 256, 512]
resnet = models.resnet34(pretrained=pretrained)
self.firstconv = resnet.conv1
self.firstbn = resnet.bn1
self.firstrelu = resnet.relu
self.firstmaxpool = resnet.maxpool
self.encoder1 = resnet.layer1
self.encoder2 = resnet.layer2
self.encoder3 = resnet.layer3
self.encoder4 = resnet.layer4
# center dilation
self.center = CenterBlockLinkNet(filters[3])
# Decoder
self.decoder4 = DecoderBlockLinkNet(filters[3], filters[2])
self.decoder3 = DecoderBlockLinkNet(filters[2], filters[1])
self.decoder2 = DecoderBlockLinkNet(filters[1], filters[0])
self.decoder1 = DecoderBlockLinkNet(filters[0], filters[0])
# Final Classifier
self.finaldeconv1 = nn.ConvTranspose2d(filters[0], 32, 3, stride=2)
self.finalrelu1 = nn.ReLU(inplace=True)
self.finalconv2 = nn.Conv2d(32, 32, 3)
self.finalrelu2 = nn.ReLU(inplace=True)
self.finalconv3 = nn.Conv2d(32, num_classes, 2, padding=1)
# noinspection PyCallingNonCallable
def forward(self, x):
# Encoder
x = self.firstconv(x)
x = self.firstbn(x)
x = self.firstrelu(x)
x = self.firstmaxpool(x)
e1 = self.encoder1(x)
e2 = self.encoder2(e1)
e3 = self.encoder3(e2)
e4 = self.encoder4(e3)
# Center with dilation
c = self.center(e4)
# Decoder with Skip Connections
d4 = self.decoder4(c) + e3
d3 = self.decoder3(d4) + e2
d2 = self.decoder2(d3) + e1
d1 = self.decoder1(d2)
# Final Classification
f1 = self.finaldeconv1(d1)
f2 = self.finalrelu1(f1)
f3 = self.finalconv2(f2)
f4 = self.finalrelu2(f3)
f5 = self.finalconv3(f4)
if self.num_classes > 1:
x_out = F.log_softmax(f5, dim=1)
else:
x_out = f5
return x_out
class Conv3BN(nn.Module):
def __init__(self, in_: int, out: int, bn=False):
super().__init__()
self.conv = conv3x3(in_, out)
self.bn = nn.BatchNorm2d(out) if bn else None
self.activation = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
x = self.activation(x)
return x
class UNetModule(nn.Module):
def __init__(self, in_: int, out: int):
super().__init__()
self.l1 = Conv3BN(in_, out)
self.l2 = Conv3BN(out, out)
def forward(self, x):
x = self.l1(x)
x = self.l2(x)
return x
class UNet(nn.Module):
"""
Vanilla UNet.
Implementation from https://github.com/lopuhin/mapillary-vistas-2017/blob/master/unet_models.py
"""
output_downscaled = 1
module = UNetModule
def __init__(self,
input_channels: int = 3,
filters_base: int = 32,
down_filter_factors=(1, 2, 4, 8, 16),
up_filter_factors=(1, 2, 4, 8, 16),
bottom_s=4,
num_classes=1,
add_output=True):
super().__init__()
self.num_classes = num_classes
assert len(down_filter_factors) == len(up_filter_factors)
assert down_filter_factors[-1] == up_filter_factors[-1]
down_filter_sizes = [filters_base * s for s in down_filter_factors]
up_filter_sizes = [filters_base * s for s in up_filter_factors]
self.down, self.up = nn.ModuleList(), nn.ModuleList()
self.down.append(self.module(input_channels, down_filter_sizes[0]))
for prev_i, nf in enumerate(down_filter_sizes[1:]):
self.down.append(self.module(down_filter_sizes[prev_i], nf))
for prev_i, nf in enumerate(up_filter_sizes[1:]):
self.up.append(self.module(
down_filter_sizes[prev_i] + nf, up_filter_sizes[prev_i]))
pool = nn.MaxPool2d(2, 2)
pool_bottom = nn.MaxPool2d(bottom_s, bottom_s)
upsample = nn.Upsample(scale_factor=2)
upsample_bottom = nn.Upsample(scale_factor=bottom_s)
self.downsamplers = [None] + [pool] * (len(self.down) - 1)
self.downsamplers[-1] = pool_bottom
self.upsamplers = [upsample] * len(self.up)
self.upsamplers[-1] = upsample_bottom
self.add_output = add_output
if add_output:
self.conv_final = nn.Conv2d(up_filter_sizes[0], num_classes, 1)
def forward(self, x):
xs = []
for downsample, down in zip(self.downsamplers, self.down):
x_in = x if downsample is None else downsample(xs[-1])
x_out = down(x_in)
xs.append(x_out)
x_out = xs[-1]
for x_skip, upsample, up in reversed(
list(zip(xs[:-1], self.upsamplers, self.up))):
x_out = upsample(x_out)
x_out = up(torch.cat([x_out, x_skip], 1))
if self.add_output:
x_out = self.conv_final(x_out)
if self.num_classes > 1:
x_out = F.log_softmax(x_out, dim=1)
return x_out
class AlbuNet(nn.Module):
"""
UNet (https://arxiv.org/abs/1505.04597) with Resnet34(https://arxiv.org/abs/1512.03385) encoder
Proposed by <NAME>: https://www.linkedin.com/in/al-buslaev/
"""
def __init__(self, num_classes=1, num_filters=32, pretrained=False, is_deconv=False):
"""
:param num_classes:
:param num_filters:
:param pretrained:
False - no pre-trained network is used
True - encoder is pre-trained with resnet34
:is_deconv:
False: bilinear interpolation is used in decoder
True: deconvolution is used in decoder
"""
super().__init__()
self.num_classes = num_classes
self.pool = nn.MaxPool2d(2, 2)
self.encoder = torchvision.models.resnet34(pretrained=pretrained)
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Sequential(self.encoder.conv1,
self.encoder.bn1,
self.encoder.relu,
self.pool)
self.conv2 = self.encoder.layer1
self.conv3 = self.encoder.layer2
self.conv4 = self.encoder.layer3
self.conv5 = self.encoder.layer4
self.center = DecoderBlock(512, num_filters * 8 * 2, num_filters * 8, is_deconv)
self.dec5 = DecoderBlock(512 + num_filters * 8, num_filters * 8 * 2, num_filters * 8, is_deconv)
self.dec4 = DecoderBlock(256 + num_filters * 8, num_filters * 8 * 2, num_filters * 8, is_deconv)
self.dec3 = DecoderBlock(128 + num_filters * 8, num_filters * 4 * 2, num_filters * 2, is_deconv)
self.dec2 = DecoderBlock(64 + num_filters * 2, num_filters * 2 * 2, num_filters * 2 * 2, is_deconv)
self.dec1 = DecoderBlock(num_filters * 2 * 2, num_filters * 2 * 2, num_filters, is_deconv)
self.dec0 = ConvRelu(num_filters, num_filters)
self.final = nn.Conv2d(num_filters, num_classes, kernel_size=1)
def forward(self, x):
conv1 = self.conv1(x)
conv2 = self.conv2(conv1)
conv3 = self.conv3(conv2)
conv4 = self.conv4(conv3)
conv5 = self.conv5(conv4)
center = self.center(self.pool(conv5))
dec5 = self.dec5(torch.cat([center, conv5], 1))
dec4 = self.dec4(torch.cat([dec5, conv4], 1))
dec3 = self.dec3(torch.cat([dec4, conv3], 1))
dec2 = self.dec2(torch.cat([dec3, conv2], 1))
dec1 = self.dec1(dec2)
dec0 = self.dec0(dec1)
if self.num_classes > 1:
x_out = F.log_softmax(self.final(dec0), dim=1)
else:
x_out = self.final(dec0)
return x_out
| StarcoderdataPython |
3399498 | <filename>opencv/misc/simple_demo/BGRvsHSV/BGRvsHSV.py
import cv2
import numpy as np
def nothing(x):
pass
bgr_img = np.zeros((500, 300, 3), np.uint8)
hsv_img = np.copy(bgr_img)
cv2.namedWindow('BGR')
cv2.namedWindow('HSV')
cv2.createTrackbar('B', 'BGR', 0, 255, nothing)
cv2.createTrackbar('G', 'BGR', 0, 255, nothing)
cv2.createTrackbar('R', 'BGR', 0, 255, nothing)
# create trackbars for color change
cv2.createTrackbar('H', 'HSV', 0, 179, nothing)
cv2.createTrackbar('S', 'HSV', 0, 255, nothing)
cv2.createTrackbar('V', 'HSV', 0, 255, nothing)
while(1):
cv2.imshow('BGR', bgr_img)
cv2.imshow('HSV', hsv_img)
k = cv2.waitKey(1) & 0xFF
if k == 27:
break
# get current positions of four trackbars
r = cv2.getTrackbarPos('R', 'BGR')
g = cv2.getTrackbarPos('G', 'BGR')
b = cv2.getTrackbarPos('B', 'BGR')
h = cv2.getTrackbarPos('H', 'HSV')
s = cv2.getTrackbarPos('S', 'HSV')
v = cv2.getTrackbarPos('V', 'HSV')
# print r, g, b, h, s, v
bgr_hsv = cv2.cvtColor(np.array([[[h, s, v]]], np.uint8), cv2.COLOR_HSV2BGR)
# s = cv2.cvtColor(cv2.merge((np.array([h]), np.array([s]), np.array([v])), cv2.COLOR_HSV2BGR)
bgr_img[:] = [b, g, r]
hsv_img[:] = bgr_hsv
cv2.destroyAllWindows()
| StarcoderdataPython |
38052 | <filename>crawlers/news/items.py<gh_stars>1-10
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
from scrapy import Item, Field
from scrapy.loader.processors import TakeFirst, Join
class NewsItem(Item):
url = Field(
output_processor=TakeFirst()
)
type = Field(
output_processor=TakeFirst()
)
source = Field(
output_processor=TakeFirst()
)
title = Field(
output_processor=TakeFirst()
)
article = Field(
output_processor=TakeFirst()
)
create_date = Field(
output_processor=TakeFirst()
)
image_urls = Field()
class SinaCaptchaItem(Item):
image_urls = Field()
images = Field()
class SinaStarItem(Item):
name = Field(
output_processor=TakeFirst()
)
avatar_url = Field(
output_processor=TakeFirst()
)
weibo_url = Field(
output_processor=TakeFirst()
) | StarcoderdataPython |
3294999 | import maya.cmds as cmds
def multObjImport():
files_to_import = cmds.fileDialog2(fileFilter = '*.obj', dialogStyle = 2, caption = 'import multiple object files', fileMode = 4)
for file_to_import in files_to_import:
names_list = file_to_import.split('/')
object_name = names_list[-1].replace('.obj', '')
returnedNodes = cmds.file('%s' % file_to_import, i = True, type = "OBJ", rnn=True, ignoreVersion = True, options = "mo=0", loadReferenceDepth = "all" )
cmds.rename( returnedNodes[0], object_name)
for nd in returnedNodes:
if '|' in nd and 'Shape' not in nd:
cmds.rename(nd, object_name)
multObjImport()
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.